[polly] 41d5033 - [IR] Enable opaque pointers by default

Nikita Popov via llvm-commits llvm-commits at lists.llvm.org
Thu Jun 2 00:41:20 PDT 2022


Author: Nikita Popov
Date: 2022-06-02T09:40:56+02:00
New Revision: 41d5033eb162cb92b684855166cabfa3983b74c6

URL: https://github.com/llvm/llvm-project/commit/41d5033eb162cb92b684855166cabfa3983b74c6
DIFF: https://github.com/llvm/llvm-project/commit/41d5033eb162cb92b684855166cabfa3983b74c6.diff

LOG: [IR] Enable opaque pointers by default

This enabled opaque pointers by default in LLVM. The effect of this
is twofold:

* If IR that contains *neither* explicit ptr nor %T* types is passed
  to tools, we will now use opaque pointer mode, unless
  -opaque-pointers=0 has been explicitly passed.
* Users of LLVM as a library will now default to opaque pointers.
  It is possible to opt-out by calling setOpaquePointers(false) on
  LLVMContext.

A cmake option to toggle this default will not be provided. Frontends
or other tools that want to (temporarily) keep using typed pointers
should disable opaque pointers via LLVMContext.

Differential Revision: https://reviews.llvm.org/D126689

Added: 
    

Modified: 
    clang/test/CodeGen/thinlto_backend.ll
    clang/test/CodeGenCUDA/amdgpu-asan.cu
    clang/test/Driver/clang-offload-wrapper.c
    clang/test/Driver/linker-wrapper-image.c
    llvm/docs/OpaquePointers.rst
    llvm/lib/IR/LLVMContextImpl.cpp
    llvm/test/Assembler/comment.ll
    llvm/test/Assembler/invalid-vecreduce.ll
    llvm/test/Bindings/llvm-c/globals.ll
    llvm/test/CodeGen/AArch64/GlobalISel/combine-shift-immed-mismatch-crash.mir
    llvm/test/CodeGen/AArch64/GlobalISel/preselect-process-phis.mir
    llvm/test/CodeGen/AArch64/stp-opt-with-renaming-undef-assert.mir
    llvm/test/CodeGen/AArch64/taildup-inst-dup-loc.mir
    llvm/test/CodeGen/AMDGPU/GlobalISel/prelegalizer-combiner-divrem.mir
    llvm/test/CodeGen/AMDGPU/merge-flat-load-store.mir
    llvm/test/CodeGen/AMDGPU/merge-flat-with-global-load-store.mir
    llvm/test/CodeGen/AMDGPU/merge-global-load-store.mir
    llvm/test/CodeGen/ARM/GlobalISel/arm-select-globals-ropi-rwpi.mir
    llvm/test/CodeGen/ARM/GlobalISel/arm-select-globals-static.mir
    llvm/test/CodeGen/ARM/GlobalISel/thumb-select-globals-ropi-rwpi.mir
    llvm/test/CodeGen/ARM/GlobalISel/thumb-select-globals-static.mir
    llvm/test/CodeGen/MIR/Generic/aligned-memoperands.mir
    llvm/test/CodeGen/Mips/cstmaterialization/isel-materialization.ll
    llvm/test/CodeGen/PowerPC/fmf-propagation.ll
    llvm/test/CodeGen/SystemZ/regcoal-undef-lane-4-rm-cp-commuting-def.mir
    llvm/test/CodeGen/X86/bug47278.mir
    llvm/test/Instrumentation/AddressSanitizer/no-globals.ll
    llvm/test/Instrumentation/AddressSanitizer/odr-check-ignore.ll
    llvm/test/Instrumentation/DataFlowSanitizer/arith.ll
    llvm/test/Instrumentation/DataFlowSanitizer/origin_call.ll
    llvm/test/Instrumentation/DataFlowSanitizer/origin_phi.ll
    llvm/test/Instrumentation/DataFlowSanitizer/origin_select.ll
    llvm/test/Instrumentation/DataFlowSanitizer/phi.ll
    llvm/test/Instrumentation/DataFlowSanitizer/select.ll
    llvm/test/Instrumentation/DataFlowSanitizer/shadow-args-zext.ll
    llvm/test/Instrumentation/HWAddressSanitizer/X86/globals.ll
    llvm/test/Instrumentation/InstrOrderFile/basic.ll
    llvm/test/Instrumentation/JustMyCode/jmc-instrument-elf.ll
    llvm/test/Instrumentation/JustMyCode/jmc-instrument-x86.ll
    llvm/test/Instrumentation/JustMyCode/jmc-instrument.ll
    llvm/test/Instrumentation/MemorySanitizer/SystemZ/vararg-kernel.ll
    llvm/test/Instrumentation/MemorySanitizer/X86/vararg-too-large.ll
    llvm/test/Instrumentation/MemorySanitizer/abs-vector.ll
    llvm/test/Instrumentation/MemorySanitizer/array_types.ll
    llvm/test/Instrumentation/MemorySanitizer/bmi.ll
    llvm/test/Instrumentation/MemorySanitizer/clmul.ll
    llvm/test/Instrumentation/MemorySanitizer/funnel_shift.ll
    llvm/test/Instrumentation/MemorySanitizer/missing_origin.ll
    llvm/test/Instrumentation/SanitizerCoverage/cmp-tracing-api-x86_32.ll
    llvm/test/Instrumentation/SanitizerCoverage/cmp-tracing-api-x86_64.ll
    llvm/test/Instrumentation/SanitizerCoverage/coff-comdat.ll
    llvm/test/Instrumentation/SanitizerCoverage/inline-8bit-counters.ll
    llvm/test/Instrumentation/SanitizerCoverage/inline-bool-flag.ll
    llvm/test/Instrumentation/SanitizerCoverage/switch-tracing.ll
    llvm/test/Linker/2003-01-30-LinkerRename.ll
    llvm/test/Linker/2008-07-06-AliasFnDecl.ll
    llvm/test/Linker/2008-07-06-AliasWeakDest.ll
    llvm/test/Linker/AppendingLinkage.ll
    llvm/test/Linker/Inputs/2003-01-30-LinkerRename.ll
    llvm/test/Linker/Inputs/only-needed-debug-metadata.ll
    llvm/test/Linker/Inputs/pr26037.ll
    llvm/test/Linker/alias-2.ll
    llvm/test/Linker/alias-threadlocal.ll
    llvm/test/Linker/comdat-any.ll
    llvm/test/Linker/ctors2.ll
    llvm/test/Linker/ctors3.ll
    llvm/test/Linker/funcimport.ll
    llvm/test/Linker/ifunc.ll
    llvm/test/Linker/wrong-addrspace-gv-declaration.ll
    llvm/test/ThinLTO/X86/Inputs/import-ro-constant-bar.ll
    llvm/test/ThinLTO/X86/Inputs/module_asm.ll
    llvm/test/ThinLTO/X86/autoupgrade.ll
    llvm/test/ThinLTO/X86/deadstrip.ll
    llvm/test/ThinLTO/X86/dot-dumper2.ll
    llvm/test/ThinLTO/X86/funcimport-debug.ll
    llvm/test/ThinLTO/X86/globals-import-const-fold.ll
    llvm/test/ThinLTO/X86/import-dsolocal.ll
    llvm/test/ThinLTO/X86/import-ro-constant.ll
    llvm/test/ThinLTO/X86/index-const-prop-comdat.ll
    llvm/test/ThinLTO/X86/index-const-prop-full-lto.ll
    llvm/test/ThinLTO/X86/index-const-prop-ldst.ll
    llvm/test/ThinLTO/X86/personality-local.ll
    llvm/test/ThinLTO/X86/referenced_by_constant.ll
    llvm/test/ThinLTO/X86/weak_globals_import.ll
    llvm/test/Transforms/BlockExtractor/extract-blocks-with-groups.ll
    llvm/test/Transforms/CodeExtractor/PartialInlineAttributes.ll
    llvm/test/Transforms/CodeExtractor/PartialInlineDebug.ll
    llvm/test/Transforms/CodeExtractor/PartialInlineVarArgsDebug.ll
    llvm/test/Transforms/FunctionImport/Inputs/funcimport.ll
    llvm/test/Transforms/FunctionImport/funcimport.ll
    llvm/test/Transforms/FunctionImport/funcimport_alias.ll
    llvm/test/Transforms/GCOVProfiling/atomic-counter.ll
    llvm/test/Transforms/GCOVProfiling/function-numbering.ll
    llvm/test/Transforms/GCOVProfiling/noprofile.ll
    llvm/test/Transforms/GCOVProfiling/reset.ll
    llvm/test/Transforms/HotColdSplit/phi-with-distinct-outlined-values.ll
    llvm/test/Transforms/HotColdSplit/split-phis-in-exit-blocks.ll
    llvm/test/Transforms/IROutliner/different-order-phi-merges.ll
    llvm/test/Transforms/IROutliner/duplicate-merging-phis.ll
    llvm/test/Transforms/IROutliner/exit-block-phi-node-value-attribution.ll
    llvm/test/Transforms/IROutliner/exit-phi-nodes-incoming-value-constant-argument.ll
    llvm/test/Transforms/IROutliner/no-external-block-entries.ll
    llvm/test/Transforms/IROutliner/one-external-incoming-block-phi-node.ll
    llvm/test/Transforms/IROutliner/phi-node-exit-path-order.ll
    llvm/test/Transforms/LowerTypeTests/export-alias.ll
    llvm/test/Transforms/LowerTypeTests/export-allones.ll
    llvm/test/Transforms/LowerTypeTests/export-bytearray.ll
    llvm/test/Transforms/LowerTypeTests/export-icall.ll
    llvm/test/Transforms/LowerTypeTests/export-inline.ll
    llvm/test/Transforms/LowerTypeTests/export-single.ll
    llvm/test/Transforms/LowerTypeTests/pr37625.ll
    llvm/test/Transforms/NewGVN/phi-of-ops-simplified-to-existing-value-then-changes-again.ll
    llvm/test/Transforms/NewGVN/pr42422-phi-of-ops.ll
    llvm/test/Transforms/PGOProfile/branch1.ll
    llvm/test/Transforms/PGOProfile/branch2.ll
    llvm/test/Transforms/PGOProfile/counter_promo.ll
    llvm/test/Transforms/PGOProfile/criticaledge.ll
    llvm/test/Transforms/PGOProfile/instr_entry_bb.ll
    llvm/test/Transforms/PGOProfile/loop1.ll
    llvm/test/Transforms/PGOProfile/loop2.ll
    llvm/test/Transforms/PGOProfile/preinline.ll
    llvm/test/Transforms/PGOProfile/single_bb.ll
    llvm/test/Transforms/PGOProfile/switch.ll
    llvm/test/Transforms/PGOProfile/thinlto_indirect_call_promotion.ll
    llvm/test/Transforms/PGOProfile/thinlto_samplepgo_icp2.ll
    llvm/test/Transforms/PhaseOrdering/X86/simplifycfg-late.ll
    llvm/test/Transforms/RewriteStatepointsForGC/call-gc-result.ll
    llvm/test/Transforms/RewriteStatepointsForGC/deopt-intrinsic-cconv.ll
    llvm/test/Transforms/RewriteStatepointsForGC/deopt-lowering-attrs.ll
    llvm/test/Transforms/SimplifyCFG/X86/disable-lookup-table.ll
    llvm/test/Transforms/SimplifyCFG/X86/switch-covered-bug.ll
    llvm/test/Transforms/SimplifyCFG/X86/switch-table-bug.ll
    llvm/test/Transforms/SimplifyCFG/X86/switch-to-lookup-large-types.ll
    llvm/test/Transforms/SimplifyCFG/rangereduce.ll
    llvm/test/Transforms/Util/add-TLI-mappings.ll
    llvm/test/Verifier/byval-1.ll
    llvm/test/Verifier/get-active-lane-mask.ll
    llvm/test/Verifier/jumptable.ll
    llvm/test/Verifier/llvm.compiler_used-invalid-type.ll
    llvm/test/Verifier/llvm.used-invalid-type.ll
    llvm/test/Verifier/llvm.used-invalid-type2.ll
    llvm/test/Verifier/metadata-function-dbg.ll
    llvm/test/tools/llvm-extract/extract-blocks-with-groups.ll
    llvm/test/tools/llvm-link/archive-only-needed.ll
    llvm/unittests/Frontend/OpenMPIRBuilderTest.cpp
    llvm/unittests/IR/ConstantsTest.cpp
    llvm/unittests/IR/IRBuilderTest.cpp
    llvm/unittests/IR/PatternMatch.cpp
    llvm/unittests/IR/TypesTest.cpp
    llvm/unittests/IR/ValueTest.cpp
    llvm/unittests/IR/VerifierTest.cpp
    mlir/test/Target/LLVMIR/amx.mlir
    mlir/test/Target/LLVMIR/arm-sve.mlir
    mlir/test/Target/LLVMIR/llvmir-intrinsics.mlir
    mlir/test/Target/LLVMIR/llvmir-types.mlir
    mlir/test/Target/LLVMIR/llvmir.mlir
    mlir/test/Target/LLVMIR/nvvmir.mlir
    mlir/test/Target/LLVMIR/openacc-llvm.mlir
    mlir/test/Target/LLVMIR/openmp-llvm.mlir
    mlir/test/Target/LLVMIR/openmp-nested.mlir
    mlir/test/Target/LLVMIR/openmp-reduction.mlir
    polly/test/CodeGen/non-affine-exit-node-dominance.ll
    polly/test/CodeGen/non-affine-region-implicit-store.ll
    polly/test/CodeGen/out-of-scop-phi-node-use.ll
    polly/test/CodeGen/synthesizable_phi_write_after_loop.ll
    polly/test/ScopInfo/out-of-scop-use-in-region-entry-phi-node-nonaffine-subregion.ll

Removed: 
    


################################################################################
diff  --git a/clang/test/CodeGen/thinlto_backend.ll b/clang/test/CodeGen/thinlto_backend.ll
index c8b840e400066..dea1a8ac54cd3 100644
--- a/clang/test/CodeGen/thinlto_backend.ll
+++ b/clang/test/CodeGen/thinlto_backend.ll
@@ -53,12 +53,12 @@ target datalayout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16
 target triple = "x86_64-unknown-linux-gnu"
 
 declare void @f2()
-declare i8* @f3()
+declare ptr @f3()
 
 define void @f1() {
   call void @f2()
   ; Make sure that the backend can handle undefined references.
   ; Do an indirect call so that the undefined ref shows up in the combined index.
-  call void bitcast (i8*()* @f3 to void()*)()
+  call void @f3()
   ret void
 }

diff  --git a/clang/test/CodeGenCUDA/amdgpu-asan.cu b/clang/test/CodeGenCUDA/amdgpu-asan.cu
index 7d2431b520d6b..9c505fe2346ba 100644
--- a/clang/test/CodeGenCUDA/amdgpu-asan.cu
+++ b/clang/test/CodeGenCUDA/amdgpu-asan.cu
@@ -1,29 +1,29 @@
 // Create a sample address sanitizer bitcode library.
 
-// RUN: %clang_cc1 -no-opaque-pointers -x ir -fcuda-is-device -triple amdgcn-amd-amdhsa -emit-llvm-bc \
+// RUN: %clang_cc1 -x ir -fcuda-is-device -triple amdgcn-amd-amdhsa -emit-llvm-bc \
 // RUN:   -disable-llvm-passes -o %t.asanrtl.bc %S/Inputs/amdgpu-asanrtl.ll
 
 // Check sanitizer runtime library functions survive
 // optimizations without being removed or parameters altered.
 
-// RUN: %clang_cc1 -no-opaque-pointers %s -emit-llvm -o - -triple=amdgcn-amd-amdhsa \
+// RUN: %clang_cc1 %s -emit-llvm -o - -triple=amdgcn-amd-amdhsa \
 // RUN:   -fcuda-is-device -target-cpu gfx906 -fsanitize=address \
 // RUN:   -mlink-bitcode-file %t.asanrtl.bc -x hip \
 // RUN:   | FileCheck -check-prefixes=ASAN %s
 
-// RUN: %clang_cc1 -no-opaque-pointers %s -emit-llvm -o - -triple=amdgcn-amd-amdhsa \
+// RUN: %clang_cc1 %s -emit-llvm -o - -triple=amdgcn-amd-amdhsa \
 // RUN:   -fcuda-is-device -target-cpu gfx906 -fsanitize=address \
 // RUN:   -O3 -mlink-bitcode-file %t.asanrtl.bc -x hip \
 // RUN:   | FileCheck -check-prefixes=ASAN %s
 
-// RUN: %clang_cc1 -no-opaque-pointers %s -emit-llvm -o - -triple=amdgcn-amd-amdhsa \
+// RUN: %clang_cc1 %s -emit-llvm -o - -triple=amdgcn-amd-amdhsa \
 // RUN:   -fcuda-is-device -target-cpu gfx906 -x hip \
 // RUN:   | FileCheck %s
 
 // REQUIRES: amdgpu-registered-target
 
 // ASAN-DAG: define weak void @__amdgpu_device_library_preserve_asan_functions()
-// ASAN-DAG: @__amdgpu_device_library_preserve_asan_functions_ptr = weak addrspace(1) constant void ()* @__amdgpu_device_library_preserve_asan_functions
+// ASAN-DAG: @__amdgpu_device_library_preserve_asan_functions_ptr = weak addrspace(1) constant ptr @__amdgpu_device_library_preserve_asan_functions
 // ASAN-DAG: @llvm.compiler.used = {{.*}}@__amdgpu_device_library_preserve_asan_functions_ptr
 // ASAN-DAG: define weak void @__asan_report_load1(i64 %{{.*}})
 

diff  --git a/clang/test/Driver/clang-offload-wrapper.c b/clang/test/Driver/clang-offload-wrapper.c
index 1c84072fef80a..edc26e59781d3 100644
--- a/clang/test/Driver/clang-offload-wrapper.c
+++ b/clang/test/Driver/clang-offload-wrapper.c
@@ -25,9 +25,9 @@
 // ELF-WARNING: is not an ELF image, so notes cannot be added to it.
 // CHECK-IR: target triple = "x86_64-pc-linux-gnu"
 
-// CHECK-IR-DAG: [[ENTTY:%.+]] = type { i8*, i8*, i{{32|64}}, i32, i32 }
-// CHECK-IR-DAG: [[IMAGETY:%.+]] = type { i8*, i8*, [[ENTTY]]*, [[ENTTY]]* }
-// CHECK-IR-DAG: [[DESCTY:%.+]] = type { i32, [[IMAGETY]]*, [[ENTTY]]*, [[ENTTY]]* }
+// CHECK-IR-DAG: [[ENTTY:%.+]] = type { ptr, ptr, i{{32|64}}, i32, i32 }
+// CHECK-IR-DAG: [[IMAGETY:%.+]] = type { ptr, ptr, ptr, ptr }
+// CHECK-IR-DAG: [[DESCTY:%.+]] = type { i32, ptr, ptr, ptr }
 
 // CHECK-IR: [[ENTBEGIN:@.+]] = external hidden constant [[ENTTY]]
 // CHECK-IR: [[ENTEND:@.+]] = external hidden constant [[ENTTY]]
@@ -36,24 +36,24 @@
 
 // CHECK-IR: [[BIN:@.+]] = internal unnamed_addr constant [[BINTY:\[[0-9]+ x i8\]]] c"Content of device file{{.+}}"
 
-// CHECK-IR: [[IMAGES:@.+]] = internal unnamed_addr constant [1 x [[IMAGETY]]] [{{.+}} { i8* getelementptr inbounds ([[BINTY]], [[BINTY]]* [[BIN]], i64 0, i64 0), i8* getelementptr inbounds ([[BINTY]], [[BINTY]]* [[BIN]], i64 1, i64 0), [[ENTTY]]* [[ENTBEGIN]], [[ENTTY]]* [[ENTEND]] }]
+// CHECK-IR: [[IMAGES:@.+]] = internal unnamed_addr constant [1 x [[IMAGETY]]] [{{.+}} { ptr [[BIN]], ptr getelementptr inbounds ([[BINTY]], ptr [[BIN]], i64 1, i64 0), ptr [[ENTBEGIN]], ptr [[ENTEND]] }]
 
-// CHECK-IR: [[DESC:@.+]] = internal constant [[DESCTY]] { i32 1, [[IMAGETY]]* getelementptr inbounds ([1 x [[IMAGETY]]], [1 x [[IMAGETY]]]* [[IMAGES]], i64 0, i64 0), [[ENTTY]]* [[ENTBEGIN]], [[ENTTY]]* [[ENTEND]] }
+// CHECK-IR: [[DESC:@.+]] = internal constant [[DESCTY]] { i32 1, ptr [[IMAGES]], ptr [[ENTBEGIN]], ptr [[ENTEND]] }
 
-// CHECK-IR: @llvm.global_ctors = appending global [1 x { i32, void ()*, i8* }] [{ i32, void ()*, i8* } { i32 1, void ()* [[REGFN:@.+]], i8* null }]
-// CHECK-IR: @llvm.global_dtors = appending global [1 x { i32, void ()*, i8* }] [{ i32, void ()*, i8* } { i32 1, void ()* [[UNREGFN:@.+]], i8* null }]
+// CHECK-IR: @llvm.global_ctors = appending global [1 x { i32, ptr, ptr }] [{ i32, ptr, ptr } { i32 1, ptr [[REGFN:@.+]], ptr null }]
+// CHECK-IR: @llvm.global_dtors = appending global [1 x { i32, ptr, ptr }] [{ i32, ptr, ptr } { i32 1, ptr [[UNREGFN:@.+]], ptr null }]
 
 // CHECK-IR: define internal void [[REGFN]]()
-// CHECK-IR:   call void @__tgt_register_lib([[DESCTY]]* [[DESC]])
+// CHECK-IR:   call void @__tgt_register_lib(ptr [[DESC]])
 // CHECK-IR:   ret void
 
-// CHECK-IR: declare void @__tgt_register_lib([[DESCTY]]*)
+// CHECK-IR: declare void @__tgt_register_lib(ptr)
 
 // CHECK-IR: define internal void [[UNREGFN]]()
-// CHECK-IR:   call void @__tgt_unregister_lib([[DESCTY]]* [[DESC]])
+// CHECK-IR:   call void @__tgt_unregister_lib(ptr [[DESC]])
 // CHECK-IR:   ret void
 
-// CHECK-IR: declare void @__tgt_unregister_lib([[DESCTY]]*)
+// CHECK-IR: declare void @__tgt_unregister_lib(ptr)
 
 // Check that clang-offload-wrapper adds LLVMOMPOFFLOAD notes
 // into the ELF offload images:

diff  --git a/clang/test/Driver/linker-wrapper-image.c b/clang/test/Driver/linker-wrapper-image.c
index 98141f4658bd1..524fc2551daa2 100644
--- a/clang/test/Driver/linker-wrapper-image.c
+++ b/clang/test/Driver/linker-wrapper-image.c
@@ -12,20 +12,20 @@
 // OPENMP-NEXT: @__stop_omp_offloading_entries = external hidden constant %__tgt_offload_entry
 // OPENMP-NEXT: @__dummy.omp_offloading.entry = hidden constant [0 x %__tgt_offload_entry] zeroinitializer, section "omp_offloading_entries"
 // OPENMP-NEXT: @.omp_offloading.device_image = internal unnamed_addr constant [0 x i8] zeroinitializer
-// OPENMP-NEXT: @.omp_offloading.device_images = internal unnamed_addr constant [1 x %__tgt_device_image] [%__tgt_device_image { i8* getelementptr inbounds ([0 x i8], [0 x i8]* @.omp_offloading.device_image, i64 0, i64 0), i8* getelementptr inbounds ([0 x i8], [0 x i8]* @.omp_offloading.device_image, i64 0, i64 0), %__tgt_offload_entry* @__start_omp_offloading_entries, %__tgt_offload_entry* @__stop_omp_offloading_entries }]
-// OPENMP-NEXT: @.omp_offloading.descriptor = internal constant %__tgt_bin_desc { i32 1, %__tgt_device_image* getelementptr inbounds ([1 x %__tgt_device_image], [1 x %__tgt_device_image]* @.omp_offloading.device_images, i64 0, i64 0), %__tgt_offload_entry* @__start_omp_offloading_entries, %__tgt_offload_entry* @__stop_omp_offloading_entries }
-// OPENMP-NEXT: @llvm.global_ctors = appending global [1 x { i32, void ()*, i8* }] [{ i32, void ()*, i8* } { i32 1, void ()* @.omp_offloading.descriptor_reg, i8* null }]
-// OPENMP-NEXT: @llvm.global_dtors = appending global [1 x { i32, void ()*, i8* }] [{ i32, void ()*, i8* } { i32 1, void ()* @.omp_offloading.descriptor_unreg, i8* null }]
+// OPENMP-NEXT: @.omp_offloading.device_images = internal unnamed_addr constant [1 x %__tgt_device_image] [%__tgt_device_image { ptr @.omp_offloading.device_image, ptr @.omp_offloading.device_image, ptr @__start_omp_offloading_entries, ptr @__stop_omp_offloading_entries }]
+// OPENMP-NEXT: @.omp_offloading.descriptor = internal constant %__tgt_bin_desc { i32 1, ptr @.omp_offloading.device_images, ptr @__start_omp_offloading_entries, ptr @__stop_omp_offloading_entries }
+// OPENMP-NEXT: @llvm.global_ctors = appending global [1 x { i32, ptr, ptr }] [{ i32, ptr, ptr } { i32 1, ptr @.omp_offloading.descriptor_reg, ptr null }]
+// OPENMP-NEXT: @llvm.global_dtors = appending global [1 x { i32, ptr, ptr }] [{ i32, ptr, ptr } { i32 1, ptr @.omp_offloading.descriptor_unreg, ptr null }]
 
 // OPENMP: define internal void @.omp_offloading.descriptor_reg() section ".text.startup" {
 // OPENMP-NEXT: entry:
-// OPENMP-NEXT:   call void @__tgt_register_lib(%__tgt_bin_desc* @.omp_offloading.descriptor)
+// OPENMP-NEXT:   call void @__tgt_register_lib(ptr @.omp_offloading.descriptor)
 // OPENMP-NEXT:   ret void
 // OPENMP-NEXT: }
 
 // OPENMP: define internal void @.omp_offloading.descriptor_unreg() section ".text.startup" {
 // OPENMP-NEXT: entry:
-// OPENMP-NEXT:   call void @__tgt_unregister_lib(%__tgt_bin_desc* @.omp_offloading.descriptor)
+// OPENMP-NEXT:   call void @__tgt_unregister_lib(ptr @.omp_offloading.descriptor)
 // OPENMP-NEXT:   ret void
 // OPENMP-NEXT: }
 
@@ -36,56 +36,56 @@
 // RUN:   -linker-path /usr/bin/ld -- %t.o -o a.out 2>&1 | FileCheck %s --check-prefix=CUDA
 
 // CUDA: @.fatbin_image = internal constant [0 x i8] zeroinitializer, section ".nv_fatbin"
-// CUDA-NEXT: @.fatbin_wrapper = internal constant %fatbin_wrapper { i32 1180844977, i32 1, i8* getelementptr inbounds ([0 x i8], [0 x i8]* @.fatbin_image, i32 0, i32 0), i8* null }, section ".nvFatBinSegment", align 8
+// CUDA-NEXT: @.fatbin_wrapper = internal constant %fatbin_wrapper { i32 1180844977, i32 1, ptr @.fatbin_image, ptr null }, section ".nvFatBinSegment", align 8
 // CUDA-NEXT: @__dummy.cuda_offloading.entry = hidden constant [0 x %__tgt_offload_entry] zeroinitializer, section "cuda_offloading_entries"
-// CUDA-NEXT: @.cuda.binary_handle = internal global i8** null
+// CUDA-NEXT: @.cuda.binary_handle = internal global ptr null
 // CUDA-NEXT: @__start_cuda_offloading_entries = external hidden constant [0 x %__tgt_offload_entry]
 // CUDA-NEXT: @__stop_cuda_offloading_entries = external hidden constant [0 x %__tgt_offload_entry]
-// CUDA-NEXT: @llvm.global_ctors = appending global [1 x { i32, void ()*, i8* }] [{ i32, void ()*, i8* } { i32 1, void ()* @.cuda.fatbin_reg, i8* null }]
+// CUDA-NEXT: @llvm.global_ctors = appending global [1 x { i32, ptr, ptr }] [{ i32, ptr, ptr } { i32 1, ptr @.cuda.fatbin_reg, ptr null }]
 
 // CUDA: define internal void @.cuda.fatbin_reg() section ".text.startup" {
 // CUDA-NEXT: entry:
-// CUDA-NEXT:   %0 = call i8** @__cudaRegisterFatBinary(i8* bitcast (%fatbin_wrapper* @.fatbin_wrapper to i8*))
-// CUDA-NEXT:   store i8** %0, i8*** @.cuda.binary_handle, align 8
-// CUDA-NEXT:   call void @.cuda.globals_reg(i8** %0)
-// CUDA-NEXT:   call void @__cudaRegisterFatBinaryEnd(i8** %0)
-// CUDA-NEXT:   %1 = call i32 @atexit(void ()* @.cuda.fatbin_unreg)
+// CUDA-NEXT:   %0 = call ptr @__cudaRegisterFatBinary(ptr @.fatbin_wrapper)
+// CUDA-NEXT:   store ptr %0, ptr @.cuda.binary_handle, align 8
+// CUDA-NEXT:   call void @.cuda.globals_reg(ptr %0)
+// CUDA-NEXT:   call void @__cudaRegisterFatBinaryEnd(ptr %0)
+// CUDA-NEXT:   %1 = call i32 @atexit(ptr @.cuda.fatbin_unreg)
 // CUDA-NEXT:   ret void
 // CUDA-NEXT: }
 
 // CUDA: define internal void @.cuda.fatbin_unreg() section ".text.startup" {
 // CUDA-NEXT: entry:
-// CUDA-NEXT:   %0 = load i8**, i8*** @.cuda.binary_handle, align 8
-// CUDA-NEXT:   call void @__cudaUnregisterFatBinary(i8** %0)
+// CUDA-NEXT:   %0 = load ptr, ptr @.cuda.binary_handle, align 8
+// CUDA-NEXT:   call void @__cudaUnregisterFatBinary(ptr %0)
 // CUDA-NEXT:   ret void
 // CUDA-NEXT: }
 
-// CUDA: define internal void @.cuda.globals_reg(i8** %0) section ".text.startup" {
+// CUDA: define internal void @.cuda.globals_reg(ptr %0) section ".text.startup" {
 // CUDA-NEXT: entry:
-// CUDA-NEXT:   br i1 icmp ne ([0 x %__tgt_offload_entry]* @__start_cuda_offloading_entries, [0 x %__tgt_offload_entry]* @__stop_cuda_offloading_entries), label %while.entry, label %while.end
+// CUDA-NEXT:   br i1 icmp ne (ptr @__start_cuda_offloading_entries, ptr @__stop_cuda_offloading_entries), label %while.entry, label %while.end
 
 // CUDA: while.entry:
-// CUDA-NEXT:   %entry1 = phi %__tgt_offload_entry* [ getelementptr inbounds ([0 x %__tgt_offload_entry], [0 x %__tgt_offload_entry]* @__start_cuda_offloading_entries, i64 0, i64 0), %entry ], [ %7, %if.end ]
-// CUDA-NEXT:   %1 = getelementptr inbounds %__tgt_offload_entry, %__tgt_offload_entry* %entry1, i64 0, i32 0
-// CUDA-NEXT:   %addr = load i8*, i8** %1, align 8
-// CUDA-NEXT:   %2 = getelementptr inbounds %__tgt_offload_entry, %__tgt_offload_entry* %entry1, i64 0, i32 1
-// CUDA-NEXT:   %name = load i8*, i8** %2, align 8
-// CUDA-NEXT:   %3 = getelementptr inbounds %__tgt_offload_entry, %__tgt_offload_entry* %entry1, i64 0, i32 2
-// CUDA-NEXT:   %size = load i64, i64* %3, align 4
+// CUDA-NEXT:   %entry1 = phi ptr [ @__start_cuda_offloading_entries, %entry ], [ %7, %if.end ]
+// CUDA-NEXT:   %1 = getelementptr inbounds %__tgt_offload_entry, ptr %entry1, i64 0, i32 0
+// CUDA-NEXT:   %addr = load ptr, ptr %1, align 8
+// CUDA-NEXT:   %2 = getelementptr inbounds %__tgt_offload_entry, ptr %entry1, i64 0, i32 1
+// CUDA-NEXT:   %name = load ptr, ptr %2, align 8
+// CUDA-NEXT:   %3 = getelementptr inbounds %__tgt_offload_entry, ptr %entry1, i64 0, i32 2
+// CUDA-NEXT:   %size = load i64, ptr %3, align 4
 // CUDA-NEXT:   %4 = icmp eq i64 %size, 0
 // CUDA-NEXT:   br i1 %4, label %if.then, label %if.else
 
 // CUDA: if.then:
-// CUDA-NEXT:   %5 = call i32 @__cudaRegisterFunction(i8** %0, i8* %addr, i8* %name, i8* %name, i32 -1, i8* null, i8* null, i8* null, i8* null, i32* null)
+// CUDA-NEXT:   %5 = call i32 @__cudaRegisterFunction(ptr %0, ptr %addr, ptr %name, ptr %name, i32 -1, ptr null, ptr null, ptr null, ptr null, ptr null)
 // CUDA-NEXT:   br label %if.end
 
 // CUDA: if.else:
-// CUDA-NEXT:   %6 = call i32 @__cudaRegisterVar(i8** %0, i8* %addr, i8* %name, i8* %name, i32 0, i64 %size, i32 0, i32 0)
+// CUDA-NEXT:   %6 = call i32 @__cudaRegisterVar(ptr %0, ptr %addr, ptr %name, ptr %name, i32 0, i64 %size, i32 0, i32 0)
 // CUDA-NEXT:   br label %if.end
 
 // CUDA: if.end:
-// CUDA-NEXT:   %7 = getelementptr inbounds %__tgt_offload_entry, %__tgt_offload_entry* %entry1, i64 1
-// CUDA-NEXT:   %8 = icmp eq %__tgt_offload_entry* %7, getelementptr inbounds ([0 x %__tgt_offload_entry], [0 x %__tgt_offload_entry]* @__stop_cuda_offloading_entries, i64 0, i64 0)
+// CUDA-NEXT:   %7 = getelementptr inbounds %__tgt_offload_entry, ptr %entry1, i64 1
+// CUDA-NEXT:   %8 = icmp eq ptr %7, @__stop_cuda_offloading_entries
 // CUDA-NEXT:   br i1 %8, label %while.end, label %while.entry
 
 // CUDA: while.end:

diff  --git a/llvm/docs/OpaquePointers.rst b/llvm/docs/OpaquePointers.rst
index 8ae2595ae66e7..a576c1325b916 100644
--- a/llvm/docs/OpaquePointers.rst
+++ b/llvm/docs/OpaquePointers.rst
@@ -195,7 +195,7 @@ Transition State
 ================
 
 As of April 2022 both LLVM and Clang have complete support for opaque pointers,
-and opaque pointers are enabled by default in Clang.
+and opaque pointers are enabled by default in LLVM and Clang.
 
 For users of the clang driver interface, it is possible to temporarily restore
 the old default using the ``-DCLANG_ENABLE_OPAQUE_POINTERS=OFF`` cmake option,
@@ -208,8 +208,13 @@ the cc1 interface.
 Usage for LTO can be disabled by passing ``-Wl,-plugin-opt=no-opaque-pointers``
 to the clang driver.
 
+For users of LLVM as a library, opaque pointers can be disabled by calling
+``setOpaquePointers(false)`` on the ``LLVMContext``.
+
+For users of LLVM tools like opt, opaque pointers can be disabled by passing
+``-opaque-pointers=0``.
+
 The next steps for the opaque pointer migration are:
 
 * Migrate Clang/LLVM tests to use opaque pointers.
-* Enable opaque pointers by default in LLVM.
 * Remove support for typed pointers after the LLVM 15 branch has been created.

diff  --git a/llvm/lib/IR/LLVMContextImpl.cpp b/llvm/lib/IR/LLVMContextImpl.cpp
index ed192275aabed..dc44a34ea9101 100644
--- a/llvm/lib/IR/LLVMContextImpl.cpp
+++ b/llvm/lib/IR/LLVMContextImpl.cpp
@@ -36,7 +36,7 @@ using namespace llvm;
 
 static cl::opt<bool>
     OpaquePointersCL("opaque-pointers", cl::desc("Use opaque pointers"),
-                     cl::init(false));
+                     cl::init(true));
 
 LLVMContextImpl::LLVMContextImpl(LLVMContext &C)
     : DiagHandler(std::make_unique<DiagnosticHandler>()),

diff  --git a/llvm/test/Assembler/comment.ll b/llvm/test/Assembler/comment.ll
index edf098948972b..ed58fbd05b204 100644
--- a/llvm/test/Assembler/comment.ll
+++ b/llvm/test/Assembler/comment.ll
@@ -8,7 +8,7 @@
 ; BARE: }
 
 @B = external global i32
-; ANNOT: @B = external global i32   ; [#uses=0 type=i32*]
+; ANNOT: @B = external global i32   ; [#uses=0 type=ptr]
 
 define <4 x i1> @foo(<4 x float> %a, <4 x float> %b) nounwind {
 entry:

diff  --git a/llvm/test/Assembler/invalid-vecreduce.ll b/llvm/test/Assembler/invalid-vecreduce.ll
index 4806ea60883fb..1a2b866dd003f 100644
--- a/llvm/test/Assembler/invalid-vecreduce.ll
+++ b/llvm/test/Assembler/invalid-vecreduce.ll
@@ -1,28 +1,28 @@
 ; RUN: not opt -S < %s 2>&1 | FileCheck %s
 
 ; CHECK: Intrinsic has incorrect return type!
-; CHECK-NEXT: float (double, <2 x double>)* @llvm.vector.reduce.fadd.f32.f64.v2f64
+; CHECK-NEXT: ptr @llvm.vector.reduce.fadd.f32.f64.v2f64
 define float @fadd_invalid_scalar_res(double %acc, <2 x double> %in) {
   %res = call float @llvm.vector.reduce.fadd.f32.f64.v2f64(double %acc, <2 x double> %in)
   ret float %res
 }
 
 ; CHECK: Intrinsic has incorrect argument type!
-; CHECK-NEXT: double (float, <2 x double>)* @llvm.vector.reduce.fadd.f64.f32.v2f64
+; CHECK-NEXT: ptr @llvm.vector.reduce.fadd.f64.f32.v2f64
 define double @fadd_invalid_scalar_start(float %acc, <2 x double> %in) {
   %res = call double @llvm.vector.reduce.fadd.f64.f32.v2f64(float %acc, <2 x double> %in)
   ret double %res
 }
 
 ; CHECK: Intrinsic has incorrect return type!
-; CHECK-NEXT: <2 x double> (double, <2 x double>)* @llvm.vector.reduce.fadd.v2f64.f64.v2f64
+; CHECK-NEXT: ptr @llvm.vector.reduce.fadd.v2f64.f64.v2f64
 define <2 x double> @fadd_invalid_vector_res(double %acc, <2 x double> %in) {
   %res = call <2 x double> @llvm.vector.reduce.fadd.v2f64.f64.v2f64(double %acc, <2 x double> %in)
   ret <2 x double> %res
 }
 
 ; CHECK: Intrinsic has incorrect argument type!
-; CHECK-NEXT: double (<2 x double>, <2 x double>)* @llvm.vector.reduce.fadd.f64.v2f64.v2f64
+; CHECK-NEXT: ptr @llvm.vector.reduce.fadd.f64.v2f64.v2f64
 define double @fadd_invalid_vector_start(<2 x double> %in, <2 x double> %acc) {
   %res = call double @llvm.vector.reduce.fadd.f64.v2f64.v2f64(<2 x double> %acc, <2 x double> %in)
   ret double %res

diff  --git a/llvm/test/Bindings/llvm-c/globals.ll b/llvm/test/Bindings/llvm-c/globals.ll
index a38f08b93ee95..813cf5f32d82d 100644
--- a/llvm/test/Bindings/llvm-c/globals.ll
+++ b/llvm/test/Bindings/llvm-c/globals.ll
@@ -1,7 +1,7 @@
 ; RUN: llvm-as < %s | llvm-c-test --module-list-globals | FileCheck %s
 
 @foo = constant [7 x i8] c"foobar\00", align 1
-;CHECK: GlobalDefinition: foo [7 x i8]*
+;CHECK: GlobalDefinition: foo ptr
 
 @bar = common global i32 0, align 4
-;CHECK: GlobalDefinition: bar i32*
+;CHECK: GlobalDefinition: bar ptr

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/combine-shift-immed-mismatch-crash.mir b/llvm/test/CodeGen/AArch64/GlobalISel/combine-shift-immed-mismatch-crash.mir
index e55eb19518a97..fca840d80f2d6 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/combine-shift-immed-mismatch-crash.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/combine-shift-immed-mismatch-crash.mir
@@ -20,7 +20,7 @@ body:             |
   ; CHECK: bb.1:
   ; CHECK:   successors:
   ; CHECK: bb.2:
-  ; CHECK:   [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[DEF1]](p0) :: (load (s32) from `i32* undef`, align 8)
+  ; CHECK:   [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[DEF1]](p0) :: (load (s32) from `ptr undef`, align 8)
   ; CHECK:   [[MUL:%[0-9]+]]:_(s32) = nsw G_MUL [[C]], [[LOAD]]
   ; CHECK:   [[MUL1:%[0-9]+]]:_(s32) = nsw G_MUL [[MUL]], [[C1]]
   ; CHECK:   [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
@@ -45,7 +45,7 @@ body:             |
 
 
   bb.3:
-    %2:_(s32) = G_LOAD %3(p0) :: (load (s32) from `i32* undef`, align 8)
+    %2:_(s32) = G_LOAD %3(p0) :: (load (s32) from `ptr undef`, align 8)
     %5:_(s32) = nsw G_MUL %4, %2
     %7:_(s32) = nsw G_MUL %5, %6
     %9:_(s32) = nsw G_MUL %7, %8

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/preselect-process-phis.mir b/llvm/test/CodeGen/AArch64/GlobalISel/preselect-process-phis.mir
index f2508118912d5..0c97233da57b5 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/preselect-process-phis.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/preselect-process-phis.mir
@@ -34,7 +34,7 @@ body:             |
   ; CHECK-NEXT:   [[FCVTHSr:%[0-9]+]]:fpr16 = nofpexcept FCVTHSr [[COPY]]
   ; CHECK-NEXT:   [[SUBREG_TO_REG:%[0-9]+]]:fpr32 = SUBREG_TO_REG 0, [[FCVTHSr]], %subreg.hsub
   ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gpr32all = COPY [[SUBREG_TO_REG]]
-  ; CHECK-NEXT:   STRHHui [[PHI]], [[DEF1]], 0 :: (store (s16) into `half* undef`)
+  ; CHECK-NEXT:   STRHHui [[PHI]], [[DEF1]], 0 :: (store (s16) into `ptr undef`)
   ; CHECK-NEXT:   B %bb.2
   bb.0:
     successors: %bb.1(0x80000000)
@@ -55,7 +55,7 @@ body:             |
 
     %3:gpr(s16) = G_PHI %1(s16), %bb.1, %5(s16), %bb.2
     %5:fpr(s16) = G_FPTRUNC %8(s32)
-    G_STORE %3(s16), %4(p0) :: (store (s16) into `half* undef`)
+    G_STORE %3(s16), %4(p0) :: (store (s16) into `ptr undef`)
     G_BR %bb.2
 
 ...
@@ -93,7 +93,7 @@ body:             |
   ; CHECK-NEXT: {{  $}}
   ; CHECK-NEXT:   [[PHI:%[0-9]+]]:fpr16 = PHI %7, %bb.2, [[COPY2]], %bb.1
   ; CHECK-NEXT:   [[FCVTHSr:%[0-9]+]]:fpr16 = nofpexcept FCVTHSr [[COPY]]
-  ; CHECK-NEXT:   STRHui [[PHI]], [[DEF1]], 0 :: (store (s16) into `half* undef`)
+  ; CHECK-NEXT:   STRHui [[PHI]], [[DEF1]], 0 :: (store (s16) into `ptr undef`)
   ; CHECK-NEXT:   B %bb.2
   bb.0:
     successors: %bb.1(0x80000000)
@@ -114,7 +114,7 @@ body:             |
 
     %3:fpr(s16) = G_PHI %5(s16), %bb.2, %1(s16), %bb.1
     %5:fpr(s16) = G_FPTRUNC %8(s32)
-    G_STORE %3(s16), %4(p0) :: (store (s16) into `half* undef`)
+    G_STORE %3(s16), %4(p0) :: (store (s16) into `ptr undef`)
     G_BR %bb.2
 
 ...

diff  --git a/llvm/test/CodeGen/AArch64/stp-opt-with-renaming-undef-assert.mir b/llvm/test/CodeGen/AArch64/stp-opt-with-renaming-undef-assert.mir
index c1e0829cf788b..d1da5e76b536f 100644
--- a/llvm/test/CodeGen/AArch64/stp-opt-with-renaming-undef-assert.mir
+++ b/llvm/test/CodeGen/AArch64/stp-opt-with-renaming-undef-assert.mir
@@ -15,8 +15,8 @@
 # CHECK-LABLE: test
 # CHECK: bb.0:
 # CHECK-NEXT: liveins: $x0, $x17, $x18
-# CHECK: renamable $q13_q14_q15 = LD3Threev16b undef renamable $x17 :: (load (s384) from `<16 x i8>* undef`, align 64)
-# CHECK-NEXT: renamable $q23_q24_q25 = LD3Threev16b undef renamable $x18 :: (load (s384) from `<16 x i8>* undef`, align 64)
+# CHECK: renamable $q13_q14_q15 = LD3Threev16b undef renamable $x17 :: (load (s384) from `ptr undef`, align 64)
+# CHECK-NEXT: renamable $q23_q24_q25 = LD3Threev16b undef renamable $x18 :: (load (s384) from `ptr undef`, align 64)
 # CHECK-NEXT: $q16 = EXTv16i8 renamable $q23, renamable $q23, 8
 # CHECK-NEXT: renamable $q20 = EXTv16i8 renamable $q14, renamable $q14, 8
 # CHECK-NEXT: STRQui killed renamable $q20, $sp, 4 :: (store (s128))

diff  --git a/llvm/test/CodeGen/AArch64/taildup-inst-dup-loc.mir b/llvm/test/CodeGen/AArch64/taildup-inst-dup-loc.mir
index 275c9a2b96d4b..718fd6bb00cde 100644
--- a/llvm/test/CodeGen/AArch64/taildup-inst-dup-loc.mir
+++ b/llvm/test/CodeGen/AArch64/taildup-inst-dup-loc.mir
@@ -20,7 +20,7 @@ body:             |
   ; CHECK:   B %bb.2
   ; CHECK: bb.1:
   ; CHECK:   successors: %bb.9(0x80000000)
-  ; CHECK:   [[LDRXui:%[0-9]+]]:gpr64 = LDRXui [[DEF3]], 0 :: (load (s64) from `i64* undef`)
+  ; CHECK:   [[LDRXui:%[0-9]+]]:gpr64 = LDRXui [[DEF3]], 0 :: (load (s64) from `ptr undef`)
   ; CHECK:   B %bb.9
   ; CHECK: bb.2:
   ; CHECK:   successors: %bb.3(0x40000000), %bb.4(0x40000000)
@@ -71,7 +71,7 @@ body:             |
   bb.2:
     successors: %bb.8(0x80000000)
 
-    %8:gpr64 = LDRXui %9, 0 :: (load (s64) from `i64* undef`)
+    %8:gpr64 = LDRXui %9, 0 :: (load (s64) from `ptr undef`)
     B %bb.8
 
   bb.3:

diff  --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/prelegalizer-combiner-divrem.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/prelegalizer-combiner-divrem.mir
index 19a5baac249d6..367ec09aea711 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/prelegalizer-combiner-divrem.mir
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/prelegalizer-combiner-divrem.mir
@@ -303,8 +303,8 @@ body: |
     ; CHECK: %ptr2:_(p1) = G_IMPLICIT_DEF
     ; CHECK: %ptr3:_(p1) = COPY $vgpr2_vgpr3
     ; CHECK: %ptr4:_(p1) = COPY $vgpr4_vgpr5
-    ; CHECK: G_STORE %src1(s32), %ptr1(p1) :: (volatile store (s32) into `i32 addrspace(1)* undef`, addrspace 1)
-    ; CHECK: G_STORE %src2(s32), %ptr2(p1) :: (volatile store (s32) into `i32 addrspace(1)* undef`, addrspace 1)
+    ; CHECK: G_STORE %src1(s32), %ptr1(p1) :: (volatile store (s32) into `ptr addrspace(1) undef`, addrspace 1)
+    ; CHECK: G_STORE %src2(s32), %ptr2(p1) :: (volatile store (s32) into `ptr addrspace(1) undef`, addrspace 1)
     ; CHECK: %div:_(s32), %rem:_ = G_SDIVREM %src1, %src2
     ; CHECK: G_STORE %div(s32), %ptr3(p1) :: (store (s32), addrspace 1)
     ; CHECK: G_STORE %rem(s32), %ptr4(p1) :: (store (s32), addrspace 1)

diff  --git a/llvm/test/CodeGen/AMDGPU/merge-flat-load-store.mir b/llvm/test/CodeGen/AMDGPU/merge-flat-load-store.mir
index 53538810dd2e0..f9801a50dfd74 100644
--- a/llvm/test/CodeGen/AMDGPU/merge-flat-load-store.mir
+++ b/llvm/test/CodeGen/AMDGPU/merge-flat-load-store.mir
@@ -8,7 +8,7 @@ body:             |
 
     ; GCN-LABEL: name: merge_flat_load_dword_2
     ; GCN: [[DEF:%[0-9]+]]:vreg_64_align2 = IMPLICIT_DEF
-    ; GCN-NEXT: [[FLAT_LOAD_DWORDX2_:%[0-9]+]]:vreg_64_align2 = FLAT_LOAD_DWORDX2 [[DEF]], 0, 0, implicit $exec, implicit $flat_scr :: (load (s64) from `i32* undef`, align 4)
+    ; GCN-NEXT: [[FLAT_LOAD_DWORDX2_:%[0-9]+]]:vreg_64_align2 = FLAT_LOAD_DWORDX2 [[DEF]], 0, 0, implicit $exec, implicit $flat_scr :: (load (s64) from `ptr undef`, align 4)
     ; GCN-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY [[FLAT_LOAD_DWORDX2_]].sub0
     ; GCN-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY killed [[FLAT_LOAD_DWORDX2_]].sub1
     ; GCN-NEXT: S_NOP 0, implicit [[COPY]], implicit [[COPY1]]
@@ -25,7 +25,7 @@ body:             |
 
     ; GCN-LABEL: name: merge_flat_load_dword_3
     ; GCN: [[DEF:%[0-9]+]]:vreg_64_align2 = IMPLICIT_DEF
-    ; GCN-NEXT: [[FLAT_LOAD_DWORDX3_:%[0-9]+]]:vreg_96_align2 = FLAT_LOAD_DWORDX3 [[DEF]], 0, 1, implicit $exec, implicit $flat_scr :: (load (s96) from `i32* undef`, align 4)
+    ; GCN-NEXT: [[FLAT_LOAD_DWORDX3_:%[0-9]+]]:vreg_96_align2 = FLAT_LOAD_DWORDX3 [[DEF]], 0, 1, implicit $exec, implicit $flat_scr :: (load (s96) from `ptr undef`, align 4)
     ; GCN-NEXT: [[COPY:%[0-9]+]]:vreg_64_align2 = COPY [[FLAT_LOAD_DWORDX3_]].sub0_sub1
     ; GCN-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY killed [[FLAT_LOAD_DWORDX3_]].sub2
     ; GCN-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY [[COPY]].sub0
@@ -45,7 +45,7 @@ body:             |
 
     ; GCN-LABEL: name: merge_flat_load_dword_4
     ; GCN: [[DEF:%[0-9]+]]:vreg_64_align2 = IMPLICIT_DEF
-    ; GCN-NEXT: [[FLAT_LOAD_DWORDX4_:%[0-9]+]]:vreg_128_align2 = FLAT_LOAD_DWORDX4 [[DEF]], 0, 2, implicit $exec, implicit $flat_scr :: (load (s128) from `i32* undef`, align 4)
+    ; GCN-NEXT: [[FLAT_LOAD_DWORDX4_:%[0-9]+]]:vreg_128_align2 = FLAT_LOAD_DWORDX4 [[DEF]], 0, 2, implicit $exec, implicit $flat_scr :: (load (s128) from `ptr undef`, align 4)
     ; GCN-NEXT: [[COPY:%[0-9]+]]:vreg_96_align2 = COPY [[FLAT_LOAD_DWORDX4_]].sub0_sub1_sub2
     ; GCN-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY killed [[FLAT_LOAD_DWORDX4_]].sub3
     ; GCN-NEXT: [[COPY2:%[0-9]+]]:vreg_64_align2 = COPY [[COPY]].sub0_sub1
@@ -68,14 +68,14 @@ body:             |
 
     ; GCN-LABEL: name: merge_flat_load_dword_5
     ; GCN: [[DEF:%[0-9]+]]:vreg_64_align2 = IMPLICIT_DEF
-    ; GCN-NEXT: [[FLAT_LOAD_DWORDX4_:%[0-9]+]]:vreg_128_align2 = FLAT_LOAD_DWORDX4 [[DEF]], 0, 3, implicit $exec, implicit $flat_scr :: (load (s128) from `i32* undef`, align 4)
+    ; GCN-NEXT: [[FLAT_LOAD_DWORDX4_:%[0-9]+]]:vreg_128_align2 = FLAT_LOAD_DWORDX4 [[DEF]], 0, 3, implicit $exec, implicit $flat_scr :: (load (s128) from `ptr undef`, align 4)
     ; GCN-NEXT: [[COPY:%[0-9]+]]:vreg_96_align2 = COPY [[FLAT_LOAD_DWORDX4_]].sub0_sub1_sub2
     ; GCN-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY killed [[FLAT_LOAD_DWORDX4_]].sub3
     ; GCN-NEXT: [[COPY2:%[0-9]+]]:vreg_64_align2 = COPY [[COPY]].sub0_sub1
     ; GCN-NEXT: [[COPY3:%[0-9]+]]:vgpr_32 = COPY killed [[COPY]].sub2
     ; GCN-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY [[COPY2]].sub0
     ; GCN-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY killed [[COPY2]].sub1
-    ; GCN-NEXT: [[FLAT_LOAD_DWORD:%[0-9]+]]:vgpr_32 = FLAT_LOAD_DWORD [[DEF]], 16, 3, implicit $exec, implicit $flat_scr :: (load (s32) from `i32* undef`)
+    ; GCN-NEXT: [[FLAT_LOAD_DWORD:%[0-9]+]]:vgpr_32 = FLAT_LOAD_DWORD [[DEF]], 16, 3, implicit $exec, implicit $flat_scr :: (load (s32) from `ptr undef`)
     ; GCN-NEXT: S_NOP 0, implicit [[COPY4]], implicit [[COPY5]], implicit [[COPY3]], implicit [[COPY1]], implicit [[FLAT_LOAD_DWORD]]
     %0:vreg_64_align2 = IMPLICIT_DEF
     %1:vgpr_32 = FLAT_LOAD_DWORD %0, 0, 3, implicit $exec, implicit $flat_scr :: (load (s32) from `i32* undef`, align 4)
@@ -93,14 +93,14 @@ body:             |
 
     ; GCN-LABEL: name: merge_flat_load_dword_6
     ; GCN: [[DEF:%[0-9]+]]:vreg_64_align2 = IMPLICIT_DEF
-    ; GCN-NEXT: [[FLAT_LOAD_DWORDX4_:%[0-9]+]]:vreg_128_align2 = FLAT_LOAD_DWORDX4 [[DEF]], 0, 0, implicit $exec, implicit $flat_scr :: (load (s128) from `i32* undef`, align 4)
+    ; GCN-NEXT: [[FLAT_LOAD_DWORDX4_:%[0-9]+]]:vreg_128_align2 = FLAT_LOAD_DWORDX4 [[DEF]], 0, 0, implicit $exec, implicit $flat_scr :: (load (s128) from `ptr undef`, align 4)
     ; GCN-NEXT: [[COPY:%[0-9]+]]:vreg_96_align2 = COPY [[FLAT_LOAD_DWORDX4_]].sub0_sub1_sub2
     ; GCN-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY killed [[FLAT_LOAD_DWORDX4_]].sub3
     ; GCN-NEXT: [[COPY2:%[0-9]+]]:vreg_64_align2 = COPY [[COPY]].sub0_sub1
     ; GCN-NEXT: [[COPY3:%[0-9]+]]:vgpr_32 = COPY killed [[COPY]].sub2
     ; GCN-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY [[COPY2]].sub0
     ; GCN-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY killed [[COPY2]].sub1
-    ; GCN-NEXT: [[FLAT_LOAD_DWORDX2_:%[0-9]+]]:vreg_64_align2 = FLAT_LOAD_DWORDX2 [[DEF]], 16, 0, implicit $exec, implicit $flat_scr :: (load (s64) from `i32* undef`, align 4)
+    ; GCN-NEXT: [[FLAT_LOAD_DWORDX2_:%[0-9]+]]:vreg_64_align2 = FLAT_LOAD_DWORDX2 [[DEF]], 16, 0, implicit $exec, implicit $flat_scr :: (load (s64) from `ptr undef`, align 4)
     ; GCN-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY [[FLAT_LOAD_DWORDX2_]].sub0
     ; GCN-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY killed [[FLAT_LOAD_DWORDX2_]].sub1
     ; GCN-NEXT: S_NOP 0, implicit [[COPY4]], implicit [[COPY5]], implicit [[COPY3]], implicit [[COPY1]], implicit [[COPY6]], implicit [[COPY7]]
@@ -121,7 +121,7 @@ body:             |
 
     ; GCN-LABEL: name: merge_flat_load_dwordx2
     ; GCN: [[DEF:%[0-9]+]]:vreg_64_align2 = IMPLICIT_DEF
-    ; GCN-NEXT: [[FLAT_LOAD_DWORDX4_:%[0-9]+]]:vreg_128_align2 = FLAT_LOAD_DWORDX4 [[DEF]], 0, 0, implicit $exec, implicit $flat_scr :: (load (s128) from `i64* undef`, align 4)
+    ; GCN-NEXT: [[FLAT_LOAD_DWORDX4_:%[0-9]+]]:vreg_128_align2 = FLAT_LOAD_DWORDX4 [[DEF]], 0, 0, implicit $exec, implicit $flat_scr :: (load (s128) from `ptr undef`, align 4)
     ; GCN-NEXT: [[COPY:%[0-9]+]]:vreg_64_align2 = COPY [[FLAT_LOAD_DWORDX4_]].sub0_sub1
     ; GCN-NEXT: [[COPY1:%[0-9]+]]:vreg_64_align2 = COPY killed [[FLAT_LOAD_DWORDX4_]].sub2_sub3
     ; GCN-NEXT: S_NOP 0, implicit [[COPY]], implicit [[COPY1]]
@@ -138,7 +138,7 @@ body:             |
 
     ; GCN-LABEL: name: merge_flat_load_dwordx3_with_dwordx1
     ; GCN: [[DEF:%[0-9]+]]:vreg_64_align2 = IMPLICIT_DEF
-    ; GCN-NEXT: [[FLAT_LOAD_DWORDX4_:%[0-9]+]]:vreg_128_align2 = FLAT_LOAD_DWORDX4 [[DEF]], 12, 0, implicit $exec, implicit $flat_scr :: (load (s128) from `i128* undef`, align 8)
+    ; GCN-NEXT: [[FLAT_LOAD_DWORDX4_:%[0-9]+]]:vreg_128_align2 = FLAT_LOAD_DWORDX4 [[DEF]], 12, 0, implicit $exec, implicit $flat_scr :: (load (s128) from `ptr undef`, align 8)
     ; GCN-NEXT: [[COPY:%[0-9]+]]:vreg_96_align2 = COPY [[FLAT_LOAD_DWORDX4_]].sub0_sub1_sub2
     ; GCN-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY killed [[FLAT_LOAD_DWORDX4_]].sub3
     ; GCN-NEXT: S_NOP 0, implicit [[COPY]], implicit [[COPY1]]
@@ -155,7 +155,7 @@ body:             |
 
     ; GCN-LABEL: name: merge_flat_load_dwordx1_with_dwordx2
     ; GCN: [[DEF:%[0-9]+]]:vreg_64_align2 = IMPLICIT_DEF
-    ; GCN-NEXT: [[FLAT_LOAD_DWORDX3_:%[0-9]+]]:vreg_96_align2 = FLAT_LOAD_DWORDX3 [[DEF]], 12, 0, implicit $exec, implicit $flat_scr :: (load (s96) from `i32* undef`, align 4)
+    ; GCN-NEXT: [[FLAT_LOAD_DWORDX3_:%[0-9]+]]:vreg_96_align2 = FLAT_LOAD_DWORDX3 [[DEF]], 12, 0, implicit $exec, implicit $flat_scr :: (load (s96) from `ptr undef`, align 4)
     ; GCN-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY [[FLAT_LOAD_DWORDX3_]].sub0
     ; GCN-NEXT: [[COPY1:%[0-9]+]]:vreg_64_align2 = COPY killed [[FLAT_LOAD_DWORDX3_]].sub1_sub2
     ; GCN-NEXT: S_NOP 0, implicit [[COPY1]], implicit [[COPY]]
@@ -172,8 +172,8 @@ body:             |
 
     ; GCN-LABEL: name: no_merge_flat_load_dword_agpr_with_vgpr
     ; GCN: [[DEF:%[0-9]+]]:vreg_64_align2 = IMPLICIT_DEF
-    ; GCN-NEXT: [[FLAT_LOAD_DWORD:%[0-9]+]]:vgpr_32 = FLAT_LOAD_DWORD [[DEF]], 0, 0, implicit $exec, implicit $flat_scr :: (load (s32) from `i32* undef`)
-    ; GCN-NEXT: [[FLAT_LOAD_DWORD1:%[0-9]+]]:agpr_32 = FLAT_LOAD_DWORD [[DEF]], 4, 0, implicit $exec, implicit $flat_scr :: (load (s32) from `i32* undef`)
+    ; GCN-NEXT: [[FLAT_LOAD_DWORD:%[0-9]+]]:vgpr_32 = FLAT_LOAD_DWORD [[DEF]], 0, 0, implicit $exec, implicit $flat_scr :: (load (s32) from `ptr undef`)
+    ; GCN-NEXT: [[FLAT_LOAD_DWORD1:%[0-9]+]]:agpr_32 = FLAT_LOAD_DWORD [[DEF]], 4, 0, implicit $exec, implicit $flat_scr :: (load (s32) from `ptr undef`)
     ; GCN-NEXT: S_NOP 0, implicit [[FLAT_LOAD_DWORD]], implicit [[FLAT_LOAD_DWORD1]]
     %0:vreg_64_align2 = IMPLICIT_DEF
     %1:vgpr_32 = FLAT_LOAD_DWORD %0, 0, 0, implicit $exec, implicit $flat_scr :: (load (s32) from `i32* undef`, align 4)
@@ -188,8 +188,8 @@ body:             |
 
     ; GCN-LABEL: name: no_merge_flat_load_dword_disjoint
     ; GCN: [[DEF:%[0-9]+]]:vreg_64_align2 = IMPLICIT_DEF
-    ; GCN-NEXT: [[FLAT_LOAD_DWORD:%[0-9]+]]:vgpr_32 = FLAT_LOAD_DWORD [[DEF]], 0, 0, implicit $exec, implicit $flat_scr :: (load (s32) from `i32* undef`)
-    ; GCN-NEXT: [[FLAT_LOAD_DWORD1:%[0-9]+]]:vgpr_32 = FLAT_LOAD_DWORD [[DEF]], 8, 0, implicit $exec, implicit $flat_scr :: (load (s32) from `i32* undef`)
+    ; GCN-NEXT: [[FLAT_LOAD_DWORD:%[0-9]+]]:vgpr_32 = FLAT_LOAD_DWORD [[DEF]], 0, 0, implicit $exec, implicit $flat_scr :: (load (s32) from `ptr undef`)
+    ; GCN-NEXT: [[FLAT_LOAD_DWORD1:%[0-9]+]]:vgpr_32 = FLAT_LOAD_DWORD [[DEF]], 8, 0, implicit $exec, implicit $flat_scr :: (load (s32) from `ptr undef`)
     ; GCN-NEXT: S_NOP 0, implicit [[FLAT_LOAD_DWORD]], implicit [[FLAT_LOAD_DWORD1]]
     %0:vreg_64_align2 = IMPLICIT_DEF
     %1:vgpr_32 = FLAT_LOAD_DWORD %0, 0, 0, implicit $exec, implicit $flat_scr :: (load (s32) from `i32* undef`, align 4)
@@ -204,8 +204,8 @@ body:             |
 
     ; GCN-LABEL: name: no_merge_flat_load_dword_overlap
     ; GCN: [[DEF:%[0-9]+]]:vreg_64_align2 = IMPLICIT_DEF
-    ; GCN-NEXT: [[FLAT_LOAD_DWORD:%[0-9]+]]:vgpr_32 = FLAT_LOAD_DWORD [[DEF]], 0, 0, implicit $exec, implicit $flat_scr :: (load (s32) from `i32* undef`)
-    ; GCN-NEXT: [[FLAT_LOAD_DWORD1:%[0-9]+]]:vgpr_32 = FLAT_LOAD_DWORD [[DEF]], 3, 0, implicit $exec, implicit $flat_scr :: (load (s32) from `i32* undef`)
+    ; GCN-NEXT: [[FLAT_LOAD_DWORD:%[0-9]+]]:vgpr_32 = FLAT_LOAD_DWORD [[DEF]], 0, 0, implicit $exec, implicit $flat_scr :: (load (s32) from `ptr undef`)
+    ; GCN-NEXT: [[FLAT_LOAD_DWORD1:%[0-9]+]]:vgpr_32 = FLAT_LOAD_DWORD [[DEF]], 3, 0, implicit $exec, implicit $flat_scr :: (load (s32) from `ptr undef`)
     ; GCN-NEXT: S_NOP 0, implicit [[FLAT_LOAD_DWORD]], implicit [[FLAT_LOAD_DWORD1]]
     %0:vreg_64_align2 = IMPLICIT_DEF
     %1:vgpr_32 = FLAT_LOAD_DWORD %0, 0, 0, implicit $exec, implicit $flat_scr :: (load (s32) from `i32* undef`, align 4)
@@ -220,8 +220,8 @@ body:             |
 
     ; GCN-LABEL: name: no_merge_flat_load_dword_
diff erent_cpol
     ; GCN: [[DEF:%[0-9]+]]:vreg_64_align2 = IMPLICIT_DEF
-    ; GCN-NEXT: [[FLAT_LOAD_DWORD:%[0-9]+]]:vgpr_32 = FLAT_LOAD_DWORD [[DEF]], 0, 1, implicit $exec, implicit $flat_scr :: (load (s32) from `i32* undef`)
-    ; GCN-NEXT: [[FLAT_LOAD_DWORD1:%[0-9]+]]:vgpr_32 = FLAT_LOAD_DWORD [[DEF]], 4, 0, implicit $exec, implicit $flat_scr :: (load (s32) from `i32* undef`)
+    ; GCN-NEXT: [[FLAT_LOAD_DWORD:%[0-9]+]]:vgpr_32 = FLAT_LOAD_DWORD [[DEF]], 0, 1, implicit $exec, implicit $flat_scr :: (load (s32) from `ptr undef`)
+    ; GCN-NEXT: [[FLAT_LOAD_DWORD1:%[0-9]+]]:vgpr_32 = FLAT_LOAD_DWORD [[DEF]], 4, 0, implicit $exec, implicit $flat_scr :: (load (s32) from `ptr undef`)
     ; GCN-NEXT: S_NOP 0, implicit [[FLAT_LOAD_DWORD]], implicit [[FLAT_LOAD_DWORD1]]
     %0:vreg_64_align2 = IMPLICIT_DEF
     %1:vgpr_32 = FLAT_LOAD_DWORD %0, 0, 1, implicit $exec, implicit $flat_scr :: (load (s32) from `i32* undef`, align 4)
@@ -239,7 +239,7 @@ body:             |
     ; GCN-NEXT: [[DEF1:%[0-9]+]]:vgpr_32 = IMPLICIT_DEF
     ; GCN-NEXT: [[DEF2:%[0-9]+]]:vgpr_32 = IMPLICIT_DEF
     ; GCN-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE killed [[DEF1]], %subreg.sub0, killed [[DEF2]], %subreg.sub1
-    ; GCN-NEXT: FLAT_STORE_DWORDX2 [[DEF]], killed [[REG_SEQUENCE]], 0, 0, implicit $exec, implicit $flat_scr :: (store (s64) into `i32* undef`, align 4)
+    ; GCN-NEXT: FLAT_STORE_DWORDX2 [[DEF]], killed [[REG_SEQUENCE]], 0, 0, implicit $exec, implicit $flat_scr :: (store (s64) into `ptr undef`, align 4)
     %0:vreg_64_align2 = IMPLICIT_DEF
     %1:vgpr_32 = IMPLICIT_DEF
     %2:vgpr_32 = IMPLICIT_DEF
@@ -259,7 +259,7 @@ body:             |
     ; GCN-NEXT: [[DEF3:%[0-9]+]]:vgpr_32 = IMPLICIT_DEF
     ; GCN-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE killed [[DEF1]], %subreg.sub0, killed [[DEF2]], %subreg.sub1
     ; GCN-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_96_align2 = REG_SEQUENCE killed [[REG_SEQUENCE]], %subreg.sub0_sub1, killed [[DEF3]], %subreg.sub2
-    ; GCN-NEXT: FLAT_STORE_DWORDX3 [[DEF]], killed [[REG_SEQUENCE1]], 4, 1, implicit $exec, implicit $flat_scr :: (store (s96) into `i32* undef`, align 4)
+    ; GCN-NEXT: FLAT_STORE_DWORDX3 [[DEF]], killed [[REG_SEQUENCE1]], 4, 1, implicit $exec, implicit $flat_scr :: (store (s96) into `ptr undef`, align 4)
     %0:vreg_64_align2 = IMPLICIT_DEF
     %1:vgpr_32 = IMPLICIT_DEF
     %2:vgpr_32 = IMPLICIT_DEF
@@ -280,7 +280,7 @@ body:             |
     ; GCN-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[DEF1]].sub1, %subreg.sub1, [[DEF1]].sub0, %subreg.sub0
     ; GCN-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_96_align2 = REG_SEQUENCE [[DEF1]].sub2, %subreg.sub2, killed [[REG_SEQUENCE]], %subreg.sub0_sub1
     ; GCN-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_128_align2 = REG_SEQUENCE [[DEF1]].sub3, %subreg.sub3, killed [[REG_SEQUENCE1]], %subreg.sub0_sub1_sub2
-    ; GCN-NEXT: FLAT_STORE_DWORDX4 [[DEF]], killed [[REG_SEQUENCE2]], 4, 2, implicit $exec, implicit $flat_scr :: (store (s128) into `i32* undef`, align 4)
+    ; GCN-NEXT: FLAT_STORE_DWORDX4 [[DEF]], killed [[REG_SEQUENCE2]], 4, 2, implicit $exec, implicit $flat_scr :: (store (s128) into `ptr undef`, align 4)
     %0:vreg_64_align2 = IMPLICIT_DEF
     %1:vreg_128 = IMPLICIT_DEF
     FLAT_STORE_DWORD %0, %1.sub1, 8, 2, implicit $exec, implicit $flat_scr :: (store (s32) into `i32* undef`, align 4)
@@ -304,8 +304,8 @@ body:             |
     ; GCN-NEXT: [[REG_SEQUENCE:%[0-9]+]]:areg_64_align2 = REG_SEQUENCE [[DEF1]], %subreg.sub0, [[DEF2]], %subreg.sub1
     ; GCN-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:areg_96_align2 = REG_SEQUENCE killed [[REG_SEQUENCE]], %subreg.sub0_sub1, [[DEF3]], %subreg.sub2
     ; GCN-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:areg_128_align2 = REG_SEQUENCE killed [[REG_SEQUENCE1]], %subreg.sub0_sub1_sub2, [[DEF4]], %subreg.sub3
-    ; GCN-NEXT: FLAT_STORE_DWORDX4 [[DEF]], killed [[REG_SEQUENCE2]], 4, 3, implicit $exec, implicit $flat_scr :: (store (s128) into `i32* undef`, align 4)
-    ; GCN-NEXT: FLAT_STORE_DWORD [[DEF]], [[DEF5]], 20, 3, implicit $exec, implicit $flat_scr :: (store (s32) into `i32* undef`)
+    ; GCN-NEXT: FLAT_STORE_DWORDX4 [[DEF]], killed [[REG_SEQUENCE2]], 4, 3, implicit $exec, implicit $flat_scr :: (store (s128) into `ptr undef`, align 4)
+    ; GCN-NEXT: FLAT_STORE_DWORD [[DEF]], [[DEF5]], 20, 3, implicit $exec, implicit $flat_scr :: (store (s32) into `ptr undef`)
     %0:vreg_64_align2 = IMPLICIT_DEF
     %1:agpr_32 = IMPLICIT_DEF
     %2:agpr_32 = IMPLICIT_DEF
@@ -335,9 +335,9 @@ body:             |
     ; GCN-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[DEF1]], %subreg.sub0, [[DEF2]], %subreg.sub1
     ; GCN-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_96_align2 = REG_SEQUENCE killed [[REG_SEQUENCE]], %subreg.sub0_sub1, [[DEF3]], %subreg.sub2
     ; GCN-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_128_align2 = REG_SEQUENCE killed [[REG_SEQUENCE1]], %subreg.sub0_sub1_sub2, [[DEF4]], %subreg.sub3
-    ; GCN-NEXT: FLAT_STORE_DWORDX4 [[DEF]], killed [[REG_SEQUENCE2]], 4, 0, implicit $exec, implicit $flat_scr :: (store (s128) into `i32* undef`, align 8)
+    ; GCN-NEXT: FLAT_STORE_DWORDX4 [[DEF]], killed [[REG_SEQUENCE2]], 4, 0, implicit $exec, implicit $flat_scr :: (store (s128) into `ptr undef`, align 8)
     ; GCN-NEXT: [[REG_SEQUENCE3:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[DEF5]], %subreg.sub0, [[DEF6]], %subreg.sub1
-    ; GCN-NEXT: FLAT_STORE_DWORDX2 [[DEF]], killed [[REG_SEQUENCE3]], 20, 0, implicit $exec, implicit $flat_scr :: (store (s64) into `i32* undef`, align 4)
+    ; GCN-NEXT: FLAT_STORE_DWORDX2 [[DEF]], killed [[REG_SEQUENCE3]], 20, 0, implicit $exec, implicit $flat_scr :: (store (s64) into `ptr undef`, align 4)
     %0:vreg_64_align2 = IMPLICIT_DEF
     %1:vgpr_32 = IMPLICIT_DEF
     %2:vgpr_32 = IMPLICIT_DEF
@@ -363,7 +363,7 @@ body:             |
     ; GCN-NEXT: [[DEF1:%[0-9]+]]:vreg_64_align2 = IMPLICIT_DEF
     ; GCN-NEXT: [[DEF2:%[0-9]+]]:vreg_64_align2 = IMPLICIT_DEF
     ; GCN-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_128_align2 = REG_SEQUENCE killed [[DEF1]], %subreg.sub0_sub1, killed [[DEF2]], %subreg.sub2_sub3
-    ; GCN-NEXT: FLAT_STORE_DWORDX4 [[DEF]], killed [[REG_SEQUENCE]], 4, 0, implicit $exec, implicit $flat_scr :: (store (s128) into `i64* undef`, align 4)
+    ; GCN-NEXT: FLAT_STORE_DWORDX4 [[DEF]], killed [[REG_SEQUENCE]], 4, 0, implicit $exec, implicit $flat_scr :: (store (s128) into `ptr undef`, align 4)
     %0:vreg_64_align2 = IMPLICIT_DEF
     %1:vreg_64_align2 = IMPLICIT_DEF
     %2:vreg_64_align2 = IMPLICIT_DEF
@@ -381,7 +381,7 @@ body:             |
     ; GCN-NEXT: [[DEF1:%[0-9]+]]:vreg_96_align2 = IMPLICIT_DEF
     ; GCN-NEXT: [[DEF2:%[0-9]+]]:vgpr_32 = IMPLICIT_DEF
     ; GCN-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_128_align2 = REG_SEQUENCE killed [[DEF1]], %subreg.sub0_sub1_sub2, killed [[DEF2]], %subreg.sub3
-    ; GCN-NEXT: FLAT_STORE_DWORDX4 [[DEF]], killed [[REG_SEQUENCE]], 4, 0, implicit $exec, implicit $flat_scr :: (store (s128) into `i64* undef`)
+    ; GCN-NEXT: FLAT_STORE_DWORDX4 [[DEF]], killed [[REG_SEQUENCE]], 4, 0, implicit $exec, implicit $flat_scr :: (store (s128) into `ptr undef`)
     %0:vreg_64_align2 = IMPLICIT_DEF
     %1:vreg_96_align2 = IMPLICIT_DEF
     %2:vgpr_32 = IMPLICIT_DEF
@@ -398,8 +398,8 @@ body:             |
     ; GCN: [[DEF:%[0-9]+]]:vreg_64_align2 = IMPLICIT_DEF
     ; GCN-NEXT: [[DEF1:%[0-9]+]]:agpr_32 = IMPLICIT_DEF
     ; GCN-NEXT: [[DEF2:%[0-9]+]]:vgpr_32 = IMPLICIT_DEF
-    ; GCN-NEXT: FLAT_STORE_DWORD [[DEF]], killed [[DEF1]], 0, 0, implicit $exec, implicit $flat_scr :: (store (s32) into `i32* undef`)
-    ; GCN-NEXT: FLAT_STORE_DWORD killed [[DEF]], killed [[DEF2]], 4, 0, implicit $exec, implicit $flat_scr :: (store (s32) into `i32* undef`)
+    ; GCN-NEXT: FLAT_STORE_DWORD [[DEF]], killed [[DEF1]], 0, 0, implicit $exec, implicit $flat_scr :: (store (s32) into `ptr undef`)
+    ; GCN-NEXT: FLAT_STORE_DWORD killed [[DEF]], killed [[DEF2]], 4, 0, implicit $exec, implicit $flat_scr :: (store (s32) into `ptr undef`)
     %0:vreg_64_align2 = IMPLICIT_DEF
     %1:agpr_32 = IMPLICIT_DEF
     %2:vgpr_32 = IMPLICIT_DEF
@@ -416,8 +416,8 @@ body:             |
     ; GCN: [[DEF:%[0-9]+]]:vreg_64_align2 = IMPLICIT_DEF
     ; GCN-NEXT: [[DEF1:%[0-9]+]]:vgpr_32 = IMPLICIT_DEF
     ; GCN-NEXT: [[DEF2:%[0-9]+]]:vgpr_32 = IMPLICIT_DEF
-    ; GCN-NEXT: FLAT_STORE_DWORD [[DEF]], killed [[DEF1]], 0, 0, implicit $exec, implicit $flat_scr :: (store (s32) into `i32* undef`)
-    ; GCN-NEXT: FLAT_STORE_DWORD killed [[DEF]], killed [[DEF2]], 6, 0, implicit $exec, implicit $flat_scr :: (store (s32) into `i32* undef`)
+    ; GCN-NEXT: FLAT_STORE_DWORD [[DEF]], killed [[DEF1]], 0, 0, implicit $exec, implicit $flat_scr :: (store (s32) into `ptr undef`)
+    ; GCN-NEXT: FLAT_STORE_DWORD killed [[DEF]], killed [[DEF2]], 6, 0, implicit $exec, implicit $flat_scr :: (store (s32) into `ptr undef`)
     %0:vreg_64_align2 = IMPLICIT_DEF
     %1:vgpr_32 = IMPLICIT_DEF
     %2:vgpr_32 = IMPLICIT_DEF
@@ -434,8 +434,8 @@ body:             |
     ; GCN: [[DEF:%[0-9]+]]:vreg_64_align2 = IMPLICIT_DEF
     ; GCN-NEXT: [[DEF1:%[0-9]+]]:vgpr_32 = IMPLICIT_DEF
     ; GCN-NEXT: [[DEF2:%[0-9]+]]:vgpr_32 = IMPLICIT_DEF
-    ; GCN-NEXT: FLAT_STORE_DWORD [[DEF]], killed [[DEF1]], 0, 0, implicit $exec, implicit $flat_scr :: (store (s32) into `i32* undef`)
-    ; GCN-NEXT: FLAT_STORE_DWORD killed [[DEF]], killed [[DEF2]], 2, 0, implicit $exec, implicit $flat_scr :: (store (s32) into `i32* undef`, align 2)
+    ; GCN-NEXT: FLAT_STORE_DWORD [[DEF]], killed [[DEF1]], 0, 0, implicit $exec, implicit $flat_scr :: (store (s32) into `ptr undef`)
+    ; GCN-NEXT: FLAT_STORE_DWORD killed [[DEF]], killed [[DEF2]], 2, 0, implicit $exec, implicit $flat_scr :: (store (s32) into `ptr undef`, align 2)
     %0:vreg_64_align2 = IMPLICIT_DEF
     %1:vgpr_32 = IMPLICIT_DEF
     %2:vgpr_32 = IMPLICIT_DEF
@@ -452,8 +452,8 @@ body:             |
     ; GCN: [[DEF:%[0-9]+]]:vreg_64_align2 = IMPLICIT_DEF
     ; GCN-NEXT: [[DEF1:%[0-9]+]]:vgpr_32 = IMPLICIT_DEF
     ; GCN-NEXT: [[DEF2:%[0-9]+]]:vgpr_32 = IMPLICIT_DEF
-    ; GCN-NEXT: FLAT_STORE_DWORD [[DEF]], killed [[DEF1]], 0, 1, implicit $exec, implicit $flat_scr :: (store (s32) into `i32* undef`)
-    ; GCN-NEXT: FLAT_STORE_DWORD killed [[DEF]], killed [[DEF2]], 4, 0, implicit $exec, implicit $flat_scr :: (store (s32) into `i32* undef`)
+    ; GCN-NEXT: FLAT_STORE_DWORD [[DEF]], killed [[DEF1]], 0, 1, implicit $exec, implicit $flat_scr :: (store (s32) into `ptr undef`)
+    ; GCN-NEXT: FLAT_STORE_DWORD killed [[DEF]], killed [[DEF2]], 4, 0, implicit $exec, implicit $flat_scr :: (store (s32) into `ptr undef`)
     %0:vreg_64_align2 = IMPLICIT_DEF
     %1:vgpr_32 = IMPLICIT_DEF
     %2:vgpr_32 = IMPLICIT_DEF
@@ -470,8 +470,8 @@ body:             |
     ; GCN: [[DEF:%[0-9]+]]:vreg_128_align2 = IMPLICIT_DEF
     ; GCN-NEXT: [[DEF1:%[0-9]+]]:vgpr_32 = IMPLICIT_DEF
     ; GCN-NEXT: [[DEF2:%[0-9]+]]:vgpr_32 = IMPLICIT_DEF
-    ; GCN-NEXT: FLAT_STORE_DWORD [[DEF]].sub0_sub1, killed [[DEF1]], 0, 0, implicit $exec, implicit $flat_scr :: (store (s32) into `i32* undef`)
-    ; GCN-NEXT: FLAT_STORE_DWORD [[DEF]].sub2_sub3, killed [[DEF2]], 4, 0, implicit $exec, implicit $flat_scr :: (store (s32) into `i32* undef`)
+    ; GCN-NEXT: FLAT_STORE_DWORD [[DEF]].sub0_sub1, killed [[DEF1]], 0, 0, implicit $exec, implicit $flat_scr :: (store (s32) into `ptr undef`)
+    ; GCN-NEXT: FLAT_STORE_DWORD [[DEF]].sub2_sub3, killed [[DEF2]], 4, 0, implicit $exec, implicit $flat_scr :: (store (s32) into `ptr undef`)
     %0:vreg_128_align2 = IMPLICIT_DEF
     %1:vgpr_32 = IMPLICIT_DEF
     %2:vgpr_32 = IMPLICIT_DEF

diff  --git a/llvm/test/CodeGen/AMDGPU/merge-flat-with-global-load-store.mir b/llvm/test/CodeGen/AMDGPU/merge-flat-with-global-load-store.mir
index 28e8db1398efa..3a0c973d12456 100644
--- a/llvm/test/CodeGen/AMDGPU/merge-flat-with-global-load-store.mir
+++ b/llvm/test/CodeGen/AMDGPU/merge-flat-with-global-load-store.mir
@@ -8,7 +8,7 @@ body:             |
 
     ; GCN-LABEL: name: merge_flat_global_load_dword_2
     ; GCN: [[DEF:%[0-9]+]]:vreg_64_align2 = IMPLICIT_DEF
-    ; GCN-NEXT: [[FLAT_LOAD_DWORDX2_:%[0-9]+]]:vreg_64_align2 = FLAT_LOAD_DWORDX2 [[DEF]], 0, 0, implicit $exec, implicit $flat_scr :: (load (s64) from `float* undef` + 4, align 4)
+    ; GCN-NEXT: [[FLAT_LOAD_DWORDX2_:%[0-9]+]]:vreg_64_align2 = FLAT_LOAD_DWORDX2 [[DEF]], 0, 0, implicit $exec, implicit $flat_scr :: (load (s64) from `ptr undef` + 4, align 4)
     ; GCN-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY [[FLAT_LOAD_DWORDX2_]].sub0
     ; GCN-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY killed [[FLAT_LOAD_DWORDX2_]].sub1
     ; GCN-NEXT: S_NOP 0, implicit [[COPY]], implicit [[COPY1]]
@@ -25,7 +25,7 @@ body:             |
 
     ; GCN-LABEL: name: merge_global_flat_load_dword_2
     ; GCN: [[DEF:%[0-9]+]]:vreg_64_align2 = IMPLICIT_DEF
-    ; GCN-NEXT: [[FLAT_LOAD_DWORDX2_:%[0-9]+]]:vreg_64_align2 = FLAT_LOAD_DWORDX2 [[DEF]], 0, 0, implicit $exec, implicit $flat_scr :: (load (s64) from `float addrspace(1)* undef`)
+    ; GCN-NEXT: [[FLAT_LOAD_DWORDX2_:%[0-9]+]]:vreg_64_align2 = FLAT_LOAD_DWORDX2 [[DEF]], 0, 0, implicit $exec, implicit $flat_scr :: (load (s64) from `ptr addrspace(1) undef`)
     ; GCN-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY [[FLAT_LOAD_DWORDX2_]].sub0
     ; GCN-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY killed [[FLAT_LOAD_DWORDX2_]].sub1
     ; GCN-NEXT: S_NOP 0, implicit [[COPY]], implicit [[COPY1]]
@@ -42,7 +42,7 @@ body:             |
 
     ; GCN-LABEL: name: merge_global_flat_load_dword_3
     ; GCN: [[DEF:%[0-9]+]]:vreg_64_align2 = IMPLICIT_DEF
-    ; GCN-NEXT: [[FLAT_LOAD_DWORDX3_:%[0-9]+]]:vreg_96_align2 = FLAT_LOAD_DWORDX3 [[DEF]], 0, 0, implicit $exec, implicit $flat_scr :: (load (s96) from `float* undef`, align 16)
+    ; GCN-NEXT: [[FLAT_LOAD_DWORDX3_:%[0-9]+]]:vreg_96_align2 = FLAT_LOAD_DWORDX3 [[DEF]], 0, 0, implicit $exec, implicit $flat_scr :: (load (s96) from `ptr undef`, align 16)
     ; GCN-NEXT: [[COPY:%[0-9]+]]:vreg_64_align2 = COPY [[FLAT_LOAD_DWORDX3_]].sub0_sub1
     ; GCN-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY killed [[FLAT_LOAD_DWORDX3_]].sub2
     ; GCN-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY [[COPY]].sub1
@@ -62,7 +62,7 @@ body:             |
 
     ; GCN-LABEL: name: merge_global_flat_load_dword_4
     ; GCN: [[DEF:%[0-9]+]]:vreg_64_align2 = IMPLICIT_DEF
-    ; GCN-NEXT: [[FLAT_LOAD_DWORDX4_:%[0-9]+]]:vreg_128_align2 = FLAT_LOAD_DWORDX4 [[DEF]], 4, 0, implicit $exec, implicit $flat_scr :: (load (s128) from `i32 addrspace(1)* undef` + 4, align 4, basealign 8)
+    ; GCN-NEXT: [[FLAT_LOAD_DWORDX4_:%[0-9]+]]:vreg_128_align2 = FLAT_LOAD_DWORDX4 [[DEF]], 4, 0, implicit $exec, implicit $flat_scr :: (load (s128) from `ptr addrspace(1) undef` + 4, align 4, basealign 8)
     ; GCN-NEXT: [[COPY:%[0-9]+]]:vreg_96_align2 = COPY [[FLAT_LOAD_DWORDX4_]].sub0_sub1_sub2
     ; GCN-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY killed [[FLAT_LOAD_DWORDX4_]].sub3
     ; GCN-NEXT: [[COPY2:%[0-9]+]]:vreg_64_align2 = COPY [[COPY]].sub0_sub1
@@ -85,7 +85,7 @@ body:             |
 
     ; GCN-LABEL: name: merge_flat_global_load_dwordx2
     ; GCN: [[DEF:%[0-9]+]]:vreg_64_align2 = IMPLICIT_DEF
-    ; GCN-NEXT: [[FLAT_LOAD_DWORDX4_:%[0-9]+]]:vreg_128_align2 = FLAT_LOAD_DWORDX4 [[DEF]], 0, 0, implicit $exec, implicit $flat_scr :: (load (s128) from `double* undef`, align 8)
+    ; GCN-NEXT: [[FLAT_LOAD_DWORDX4_:%[0-9]+]]:vreg_128_align2 = FLAT_LOAD_DWORDX4 [[DEF]], 0, 0, implicit $exec, implicit $flat_scr :: (load (s128) from `ptr undef`, align 8)
     ; GCN-NEXT: [[COPY:%[0-9]+]]:vreg_64_align2 = COPY [[FLAT_LOAD_DWORDX4_]].sub0_sub1
     ; GCN-NEXT: [[COPY1:%[0-9]+]]:vreg_64_align2 = COPY killed [[FLAT_LOAD_DWORDX4_]].sub2_sub3
     ; GCN-NEXT: S_NOP 0, implicit [[COPY]], implicit [[COPY1]]
@@ -102,7 +102,7 @@ body:             |
 
     ; GCN-LABEL: name: merge_flat_global_load_dwordx3
     ; GCN: [[DEF:%[0-9]+]]:vreg_64_align2 = IMPLICIT_DEF
-    ; GCN-NEXT: [[FLAT_LOAD_DWORDX4_:%[0-9]+]]:vreg_128_align2 = FLAT_LOAD_DWORDX4 [[DEF]], 0, 0, implicit $exec, implicit $flat_scr :: (load (s128) from `float* undef`, align 4)
+    ; GCN-NEXT: [[FLAT_LOAD_DWORDX4_:%[0-9]+]]:vreg_128_align2 = FLAT_LOAD_DWORDX4 [[DEF]], 0, 0, implicit $exec, implicit $flat_scr :: (load (s128) from `ptr undef`, align 4)
     ; GCN-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY [[FLAT_LOAD_DWORDX4_]].sub0
     ; GCN-NEXT: [[COPY1:%[0-9]+]]:vreg_96_align2 = COPY killed [[FLAT_LOAD_DWORDX4_]].sub1_sub2_sub3
     ; GCN-NEXT: S_NOP 0, implicit [[COPY]], implicit [[COPY1]]
@@ -119,7 +119,7 @@ body:             |
 
     ; GCN-LABEL: name: merge_global_flat_load_dwordx3
     ; GCN: [[DEF:%[0-9]+]]:vreg_64_align2 = IMPLICIT_DEF
-    ; GCN-NEXT: [[FLAT_LOAD_DWORDX4_:%[0-9]+]]:vreg_128_align2 = FLAT_LOAD_DWORDX4 [[DEF]], 0, 0, implicit $exec, implicit $flat_scr :: (load (s128) from `i32 addrspace(1)* undef`, align 4)
+    ; GCN-NEXT: [[FLAT_LOAD_DWORDX4_:%[0-9]+]]:vreg_128_align2 = FLAT_LOAD_DWORDX4 [[DEF]], 0, 0, implicit $exec, implicit $flat_scr :: (load (s128) from `ptr addrspace(1) undef`, align 4)
     ; GCN-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY [[FLAT_LOAD_DWORDX4_]].sub0
     ; GCN-NEXT: [[COPY1:%[0-9]+]]:vreg_96_align2 = COPY killed [[FLAT_LOAD_DWORDX4_]].sub1_sub2_sub3
     ; GCN-NEXT: S_NOP 0, implicit [[COPY]], implicit [[COPY1]]
@@ -137,8 +137,8 @@ body:             |
     ; GCN-LABEL: name: no_merge_flat_global_load_dword_saddr
     ; GCN: [[DEF:%[0-9]+]]:vreg_64_align2 = IMPLICIT_DEF
     ; GCN-NEXT: [[DEF1:%[0-9]+]]:sreg_64_xexec = IMPLICIT_DEF
-    ; GCN-NEXT: [[FLAT_LOAD_DWORD:%[0-9]+]]:vgpr_32 = FLAT_LOAD_DWORD [[DEF]], 0, 0, implicit $exec, implicit $flat_scr :: (load (s32) from `float* undef`)
-    ; GCN-NEXT: [[GLOBAL_LOAD_DWORDX2_SADDR:%[0-9]+]]:vreg_64_align2 = GLOBAL_LOAD_DWORDX2_SADDR [[DEF1]], [[DEF]].sub0, 4, 0, implicit $exec :: (load (s64) from `i32 addrspace(1)* undef` + 4, align 4, addrspace 1)
+    ; GCN-NEXT: [[FLAT_LOAD_DWORD:%[0-9]+]]:vgpr_32 = FLAT_LOAD_DWORD [[DEF]], 0, 0, implicit $exec, implicit $flat_scr :: (load (s32) from `ptr undef`)
+    ; GCN-NEXT: [[GLOBAL_LOAD_DWORDX2_SADDR:%[0-9]+]]:vreg_64_align2 = GLOBAL_LOAD_DWORDX2_SADDR [[DEF1]], [[DEF]].sub0, 4, 0, implicit $exec :: (load (s64) from `ptr addrspace(1) undef` + 4, align 4, addrspace 1)
     ; GCN-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY [[GLOBAL_LOAD_DWORDX2_SADDR]].sub0
     ; GCN-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY killed [[GLOBAL_LOAD_DWORDX2_SADDR]].sub1
     ; GCN-NEXT: S_NOP 0, implicit [[FLAT_LOAD_DWORD]], implicit [[COPY]], implicit [[COPY1]]
@@ -158,8 +158,8 @@ body:             |
     ; GCN-LABEL: name: no_merge_global_saddr_flat_load_dword
     ; GCN: [[DEF:%[0-9]+]]:vreg_64_align2 = IMPLICIT_DEF
     ; GCN-NEXT: [[DEF1:%[0-9]+]]:sreg_64_xexec = IMPLICIT_DEF
-    ; GCN-NEXT: [[GLOBAL_LOAD_DWORD_SADDR:%[0-9]+]]:vgpr_32 = GLOBAL_LOAD_DWORD_SADDR [[DEF1]], [[DEF]].sub0, 0, 0, implicit $exec :: (load (s32) from `i32 addrspace(1)* undef`, addrspace 1)
-    ; GCN-NEXT: [[FLAT_LOAD_DWORDX2_:%[0-9]+]]:vreg_64_align2 = FLAT_LOAD_DWORDX2 [[DEF]], 4, 0, implicit $exec, implicit $flat_scr :: (load (s64) from `i32* undef` + 4, align 4)
+    ; GCN-NEXT: [[GLOBAL_LOAD_DWORD_SADDR:%[0-9]+]]:vgpr_32 = GLOBAL_LOAD_DWORD_SADDR [[DEF1]], [[DEF]].sub0, 0, 0, implicit $exec :: (load (s32) from `ptr addrspace(1) undef`, addrspace 1)
+    ; GCN-NEXT: [[FLAT_LOAD_DWORDX2_:%[0-9]+]]:vreg_64_align2 = FLAT_LOAD_DWORDX2 [[DEF]], 4, 0, implicit $exec, implicit $flat_scr :: (load (s64) from `ptr undef` + 4, align 4)
     ; GCN-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY [[FLAT_LOAD_DWORDX2_]].sub0
     ; GCN-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY killed [[FLAT_LOAD_DWORDX2_]].sub1
     ; GCN-NEXT: S_NOP 0, implicit [[GLOBAL_LOAD_DWORD_SADDR]], implicit [[COPY]], implicit [[COPY1]]
@@ -180,7 +180,7 @@ body:             |
     ; GCN-NEXT: [[DEF1:%[0-9]+]]:vgpr_32 = IMPLICIT_DEF
     ; GCN-NEXT: [[DEF2:%[0-9]+]]:vgpr_32 = IMPLICIT_DEF
     ; GCN-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE killed [[DEF1]], %subreg.sub0, killed [[DEF2]], %subreg.sub1
-    ; GCN-NEXT: FLAT_STORE_DWORDX2 [[DEF]], killed [[REG_SEQUENCE]], 0, 0, implicit $exec, implicit $flat_scr :: (store (s64) into `i32* undef`, align 4)
+    ; GCN-NEXT: FLAT_STORE_DWORDX2 [[DEF]], killed [[REG_SEQUENCE]], 0, 0, implicit $exec, implicit $flat_scr :: (store (s64) into `ptr undef`, align 4)
     %0:vreg_64_align2 = IMPLICIT_DEF
     %1:vgpr_32 = IMPLICIT_DEF
     %2:vgpr_32 = IMPLICIT_DEF
@@ -197,7 +197,7 @@ body:             |
     ; GCN-NEXT: [[DEF1:%[0-9]+]]:vgpr_32 = IMPLICIT_DEF
     ; GCN-NEXT: [[DEF2:%[0-9]+]]:vgpr_32 = IMPLICIT_DEF
     ; GCN-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE killed [[DEF1]], %subreg.sub0, killed [[DEF2]], %subreg.sub1
-    ; GCN-NEXT: FLAT_STORE_DWORDX2 [[DEF]], killed [[REG_SEQUENCE]], 0, 0, implicit $exec, implicit $flat_scr :: (store (s64) into `i32 addrspace(1)* undef`, align 4)
+    ; GCN-NEXT: FLAT_STORE_DWORDX2 [[DEF]], killed [[REG_SEQUENCE]], 0, 0, implicit $exec, implicit $flat_scr :: (store (s64) into `ptr addrspace(1) undef`, align 4)
     %0:vreg_64_align2 = IMPLICIT_DEF
     %1:vgpr_32 = IMPLICIT_DEF
     %2:vgpr_32 = IMPLICIT_DEF
@@ -214,7 +214,7 @@ body:             |
     ; GCN-NEXT: [[DEF1:%[0-9]+]]:vgpr_32 = IMPLICIT_DEF
     ; GCN-NEXT: [[DEF2:%[0-9]+]]:vreg_64_align2 = IMPLICIT_DEF
     ; GCN-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_96_align2 = REG_SEQUENCE [[DEF1]], %subreg.sub0, [[DEF2]], %subreg.sub1_sub2
-    ; GCN-NEXT: FLAT_STORE_DWORDX3 [[DEF]], killed [[REG_SEQUENCE]], 0, 0, implicit $exec, implicit $flat_scr :: (store (s96) into `i32* undef`, align 4)
+    ; GCN-NEXT: FLAT_STORE_DWORDX3 [[DEF]], killed [[REG_SEQUENCE]], 0, 0, implicit $exec, implicit $flat_scr :: (store (s96) into `ptr undef`, align 4)
     %0:vreg_64_align2 = IMPLICIT_DEF
     %1:vgpr_32 = IMPLICIT_DEF
     %2:vreg_64_align2 = IMPLICIT_DEF
@@ -231,7 +231,7 @@ body:             |
     ; GCN-NEXT: [[DEF1:%[0-9]+]]:vgpr_32 = IMPLICIT_DEF
     ; GCN-NEXT: [[DEF2:%[0-9]+]]:vreg_96_align2 = IMPLICIT_DEF
     ; GCN-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_128_align2 = REG_SEQUENCE [[DEF1]], %subreg.sub0, [[DEF2]], %subreg.sub1_sub2_sub3
-    ; GCN-NEXT: FLAT_STORE_DWORDX4 [[DEF]], killed [[REG_SEQUENCE]], 0, 0, implicit $exec, implicit $flat_scr :: (store (s128) into `i32* undef`, align 4)
+    ; GCN-NEXT: FLAT_STORE_DWORDX4 [[DEF]], killed [[REG_SEQUENCE]], 0, 0, implicit $exec, implicit $flat_scr :: (store (s128) into `ptr undef`, align 4)
     %0:vreg_64_align2 = IMPLICIT_DEF
     %1:vgpr_32 = IMPLICIT_DEF
     %2:vreg_96_align2 = IMPLICIT_DEF
@@ -248,7 +248,7 @@ body:             |
     ; GCN-NEXT: [[DEF1:%[0-9]+]]:vgpr_32 = IMPLICIT_DEF
     ; GCN-NEXT: [[DEF2:%[0-9]+]]:vreg_64_align2 = IMPLICIT_DEF
     ; GCN-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_96_align2 = REG_SEQUENCE [[DEF1]], %subreg.sub2, [[DEF2]], %subreg.sub0_sub1
-    ; GCN-NEXT: FLAT_STORE_DWORDX3 [[DEF]], killed [[REG_SEQUENCE]], 0, 0, implicit $exec, implicit $flat_scr :: (store (s96) into `i64* undef`, align 8)
+    ; GCN-NEXT: FLAT_STORE_DWORDX3 [[DEF]], killed [[REG_SEQUENCE]], 0, 0, implicit $exec, implicit $flat_scr :: (store (s96) into `ptr undef`, align 8)
     %0:vreg_64_align2 = IMPLICIT_DEF
     %1:vgpr_32 = IMPLICIT_DEF
     %2:vreg_64_align2 = IMPLICIT_DEF
@@ -265,7 +265,7 @@ body:             |
     ; GCN-NEXT: [[DEF1:%[0-9]+]]:vgpr_32 = IMPLICIT_DEF
     ; GCN-NEXT: [[DEF2:%[0-9]+]]:vreg_96_align2 = IMPLICIT_DEF
     ; GCN-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_128_align2 = REG_SEQUENCE [[DEF1]], %subreg.sub3, [[DEF2]], %subreg.sub0_sub1_sub2
-    ; GCN-NEXT: FLAT_STORE_DWORDX4 [[DEF]], killed [[REG_SEQUENCE]], 0, 0, implicit $exec, implicit $flat_scr :: (store (s128) into `<3 x i32>* undef`)
+    ; GCN-NEXT: FLAT_STORE_DWORDX4 [[DEF]], killed [[REG_SEQUENCE]], 0, 0, implicit $exec, implicit $flat_scr :: (store (s128) into `ptr undef`)
     %0:vreg_64_align2 = IMPLICIT_DEF
     %1:vgpr_32 = IMPLICIT_DEF
     %2:vreg_96_align2 = IMPLICIT_DEF
@@ -282,8 +282,8 @@ body:             |
     ; GCN-NEXT: [[DEF1:%[0-9]+]]:sreg_64_xexec = IMPLICIT_DEF
     ; GCN-NEXT: [[DEF2:%[0-9]+]]:vgpr_32 = IMPLICIT_DEF
     ; GCN-NEXT: [[DEF3:%[0-9]+]]:vgpr_32 = IMPLICIT_DEF
-    ; GCN-NEXT: FLAT_STORE_DWORD [[DEF]], [[DEF2]], 0, 0, implicit $exec, implicit $flat_scr :: (store (s32) into `i32* undef`)
-    ; GCN-NEXT: GLOBAL_STORE_DWORD_SADDR [[DEF]].sub0, [[DEF3]], [[DEF1]], 4, 0, implicit $exec :: (store (s32) into `i32 addrspace(1)* undef`, addrspace 1)
+    ; GCN-NEXT: FLAT_STORE_DWORD [[DEF]], [[DEF2]], 0, 0, implicit $exec, implicit $flat_scr :: (store (s32) into `ptr undef`)
+    ; GCN-NEXT: GLOBAL_STORE_DWORD_SADDR [[DEF]].sub0, [[DEF3]], [[DEF1]], 4, 0, implicit $exec :: (store (s32) into `ptr addrspace(1) undef`, addrspace 1)
     %0:vreg_64_align2 = IMPLICIT_DEF
     %1:sreg_64_xexec = IMPLICIT_DEF
     %2:vgpr_32 = IMPLICIT_DEF
@@ -301,8 +301,8 @@ body:             |
     ; GCN-NEXT: [[DEF1:%[0-9]+]]:sreg_64_xexec = IMPLICIT_DEF
     ; GCN-NEXT: [[DEF2:%[0-9]+]]:vgpr_32 = IMPLICIT_DEF
     ; GCN-NEXT: [[DEF3:%[0-9]+]]:vgpr_32 = IMPLICIT_DEF
-    ; GCN-NEXT: GLOBAL_STORE_DWORD_SADDR [[DEF]].sub0, [[DEF2]], [[DEF1]], 0, 0, implicit $exec :: (store (s32) into `i32 addrspace(1)* undef`, addrspace 1)
-    ; GCN-NEXT: FLAT_STORE_DWORD [[DEF]], [[DEF3]], 4, 0, implicit $exec, implicit $flat_scr :: (store (s32) into `i32* undef`)
+    ; GCN-NEXT: GLOBAL_STORE_DWORD_SADDR [[DEF]].sub0, [[DEF2]], [[DEF1]], 0, 0, implicit $exec :: (store (s32) into `ptr addrspace(1) undef`, addrspace 1)
+    ; GCN-NEXT: FLAT_STORE_DWORD [[DEF]], [[DEF3]], 4, 0, implicit $exec, implicit $flat_scr :: (store (s32) into `ptr undef`)
     %0:vreg_64_align2 = IMPLICIT_DEF
     %1:sreg_64_xexec = IMPLICIT_DEF
     %2:vgpr_32 = IMPLICIT_DEF

diff  --git a/llvm/test/CodeGen/AMDGPU/merge-global-load-store.mir b/llvm/test/CodeGen/AMDGPU/merge-global-load-store.mir
index 1d64ae5d20cc0..32d7e4afbaf9d 100644
--- a/llvm/test/CodeGen/AMDGPU/merge-global-load-store.mir
+++ b/llvm/test/CodeGen/AMDGPU/merge-global-load-store.mir
@@ -8,7 +8,7 @@ body:             |
 
     ; GCN-LABEL: name: merge_global_load_dword_2
     ; GCN: [[DEF:%[0-9]+]]:vreg_64_align2 = IMPLICIT_DEF
-    ; GCN-NEXT: [[GLOBAL_LOAD_DWORDX2_:%[0-9]+]]:vreg_64_align2 = GLOBAL_LOAD_DWORDX2 [[DEF]], 0, 0, implicit $exec :: (load (s64) from `float addrspace(1)* undef` + 4, align 4, addrspace 1)
+    ; GCN-NEXT: [[GLOBAL_LOAD_DWORDX2_:%[0-9]+]]:vreg_64_align2 = GLOBAL_LOAD_DWORDX2 [[DEF]], 0, 0, implicit $exec :: (load (s64) from `ptr addrspace(1) undef` + 4, align 4, addrspace 1)
     ; GCN-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY [[GLOBAL_LOAD_DWORDX2_]].sub0
     ; GCN-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY killed [[GLOBAL_LOAD_DWORDX2_]].sub1
     ; GCN-NEXT: S_NOP 0, implicit [[COPY]], implicit [[COPY1]]
@@ -25,7 +25,7 @@ body:             |
 
     ; GCN-LABEL: name: merge_global_load_dword_3
     ; GCN: [[DEF:%[0-9]+]]:vreg_64_align2 = IMPLICIT_DEF
-    ; GCN-NEXT: [[GLOBAL_LOAD_DWORDX3_:%[0-9]+]]:vreg_96_align2 = GLOBAL_LOAD_DWORDX3 [[DEF]], 0, 1, implicit $exec :: (load (s96) from `i32 addrspace(1)* undef`, align 4, addrspace 1)
+    ; GCN-NEXT: [[GLOBAL_LOAD_DWORDX3_:%[0-9]+]]:vreg_96_align2 = GLOBAL_LOAD_DWORDX3 [[DEF]], 0, 1, implicit $exec :: (load (s96) from `ptr addrspace(1) undef`, align 4, addrspace 1)
     ; GCN-NEXT: [[COPY:%[0-9]+]]:vreg_64_align2 = COPY [[GLOBAL_LOAD_DWORDX3_]].sub0_sub1
     ; GCN-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY killed [[GLOBAL_LOAD_DWORDX3_]].sub2
     ; GCN-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY [[COPY]].sub0
@@ -45,7 +45,7 @@ body:             |
 
     ; GCN-LABEL: name: merge_global_load_dword_4
     ; GCN: [[DEF:%[0-9]+]]:vreg_64_align2 = IMPLICIT_DEF
-    ; GCN-NEXT: [[GLOBAL_LOAD_DWORDX4_:%[0-9]+]]:vreg_128_align2 = GLOBAL_LOAD_DWORDX4 [[DEF]], 0, 2, implicit $exec :: (load (s128) from `i32 addrspace(1)* undef`, align 4, addrspace 1)
+    ; GCN-NEXT: [[GLOBAL_LOAD_DWORDX4_:%[0-9]+]]:vreg_128_align2 = GLOBAL_LOAD_DWORDX4 [[DEF]], 0, 2, implicit $exec :: (load (s128) from `ptr addrspace(1) undef`, align 4, addrspace 1)
     ; GCN-NEXT: [[COPY:%[0-9]+]]:vreg_96_align2 = COPY [[GLOBAL_LOAD_DWORDX4_]].sub0_sub1_sub2
     ; GCN-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY killed [[GLOBAL_LOAD_DWORDX4_]].sub3
     ; GCN-NEXT: [[COPY2:%[0-9]+]]:vreg_64_align2 = COPY [[COPY]].sub0_sub1
@@ -68,14 +68,14 @@ body:             |
 
     ; GCN-LABEL: name: merge_global_load_dword_5
     ; GCN: [[DEF:%[0-9]+]]:vreg_64_align2 = IMPLICIT_DEF
-    ; GCN-NEXT: [[GLOBAL_LOAD_DWORDX4_:%[0-9]+]]:vreg_128_align2 = GLOBAL_LOAD_DWORDX4 [[DEF]], 0, 3, implicit $exec :: (load (s128) from `i32 addrspace(1)* undef`, align 4, addrspace 1)
+    ; GCN-NEXT: [[GLOBAL_LOAD_DWORDX4_:%[0-9]+]]:vreg_128_align2 = GLOBAL_LOAD_DWORDX4 [[DEF]], 0, 3, implicit $exec :: (load (s128) from `ptr addrspace(1) undef`, align 4, addrspace 1)
     ; GCN-NEXT: [[COPY:%[0-9]+]]:vreg_96_align2 = COPY [[GLOBAL_LOAD_DWORDX4_]].sub0_sub1_sub2
     ; GCN-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY killed [[GLOBAL_LOAD_DWORDX4_]].sub3
     ; GCN-NEXT: [[COPY2:%[0-9]+]]:vreg_64_align2 = COPY [[COPY]].sub0_sub1
     ; GCN-NEXT: [[COPY3:%[0-9]+]]:vgpr_32 = COPY killed [[COPY]].sub2
     ; GCN-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY [[COPY2]].sub0
     ; GCN-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY killed [[COPY2]].sub1
-    ; GCN-NEXT: [[GLOBAL_LOAD_DWORD:%[0-9]+]]:vgpr_32 = GLOBAL_LOAD_DWORD [[DEF]], 16, 3, implicit $exec :: (load (s32) from `i32 addrspace(1)* undef`, addrspace 1)
+    ; GCN-NEXT: [[GLOBAL_LOAD_DWORD:%[0-9]+]]:vgpr_32 = GLOBAL_LOAD_DWORD [[DEF]], 16, 3, implicit $exec :: (load (s32) from `ptr addrspace(1) undef`, addrspace 1)
     ; GCN-NEXT: S_NOP 0, implicit [[COPY4]], implicit [[COPY5]], implicit [[COPY3]], implicit [[COPY1]], implicit [[GLOBAL_LOAD_DWORD]]
     %0:vreg_64_align2 = IMPLICIT_DEF
     %1:vgpr_32 = GLOBAL_LOAD_DWORD %0, 0, 3, implicit $exec :: (load (s32) from `i32 addrspace(1)* undef`, align 4, addrspace 1)
@@ -93,14 +93,14 @@ body:             |
 
     ; GCN-LABEL: name: merge_global_load_dword_6
     ; GCN: [[DEF:%[0-9]+]]:vreg_64_align2 = IMPLICIT_DEF
-    ; GCN-NEXT: [[GLOBAL_LOAD_DWORDX4_:%[0-9]+]]:vreg_128_align2 = GLOBAL_LOAD_DWORDX4 [[DEF]], 0, 0, implicit $exec :: (load (s128) from `i32 addrspace(1)* undef`, align 4, addrspace 1)
+    ; GCN-NEXT: [[GLOBAL_LOAD_DWORDX4_:%[0-9]+]]:vreg_128_align2 = GLOBAL_LOAD_DWORDX4 [[DEF]], 0, 0, implicit $exec :: (load (s128) from `ptr addrspace(1) undef`, align 4, addrspace 1)
     ; GCN-NEXT: [[COPY:%[0-9]+]]:vreg_96_align2 = COPY [[GLOBAL_LOAD_DWORDX4_]].sub0_sub1_sub2
     ; GCN-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY killed [[GLOBAL_LOAD_DWORDX4_]].sub3
     ; GCN-NEXT: [[COPY2:%[0-9]+]]:vreg_64_align2 = COPY [[COPY]].sub0_sub1
     ; GCN-NEXT: [[COPY3:%[0-9]+]]:vgpr_32 = COPY killed [[COPY]].sub2
     ; GCN-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY [[COPY2]].sub0
     ; GCN-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY killed [[COPY2]].sub1
-    ; GCN-NEXT: [[GLOBAL_LOAD_DWORDX2_:%[0-9]+]]:vreg_64_align2 = GLOBAL_LOAD_DWORDX2 [[DEF]], 16, 0, implicit $exec :: (load (s64) from `i32 addrspace(1)* undef`, align 4, addrspace 1)
+    ; GCN-NEXT: [[GLOBAL_LOAD_DWORDX2_:%[0-9]+]]:vreg_64_align2 = GLOBAL_LOAD_DWORDX2 [[DEF]], 16, 0, implicit $exec :: (load (s64) from `ptr addrspace(1) undef`, align 4, addrspace 1)
     ; GCN-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY [[GLOBAL_LOAD_DWORDX2_]].sub0
     ; GCN-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY killed [[GLOBAL_LOAD_DWORDX2_]].sub1
     ; GCN-NEXT: S_NOP 0, implicit [[COPY4]], implicit [[COPY5]], implicit [[COPY3]], implicit [[COPY1]], implicit [[COPY6]], implicit [[COPY7]]
@@ -121,7 +121,7 @@ body:             |
 
     ; GCN-LABEL: name: merge_global_load_dwordx2
     ; GCN: [[DEF:%[0-9]+]]:vreg_64_align2 = IMPLICIT_DEF
-    ; GCN-NEXT: [[GLOBAL_LOAD_DWORDX4_:%[0-9]+]]:vreg_128_align2 = GLOBAL_LOAD_DWORDX4 [[DEF]], 0, 0, implicit $exec :: (load (s128) from `i64 addrspace(1)* undef`, align 4, addrspace 1)
+    ; GCN-NEXT: [[GLOBAL_LOAD_DWORDX4_:%[0-9]+]]:vreg_128_align2 = GLOBAL_LOAD_DWORDX4 [[DEF]], 0, 0, implicit $exec :: (load (s128) from `ptr addrspace(1) undef`, align 4, addrspace 1)
     ; GCN-NEXT: [[COPY:%[0-9]+]]:vreg_64_align2 = COPY [[GLOBAL_LOAD_DWORDX4_]].sub0_sub1
     ; GCN-NEXT: [[COPY1:%[0-9]+]]:vreg_64_align2 = COPY killed [[GLOBAL_LOAD_DWORDX4_]].sub2_sub3
     ; GCN-NEXT: S_NOP 0, implicit [[COPY]], implicit [[COPY1]]
@@ -138,7 +138,7 @@ body:             |
 
     ; GCN-LABEL: name: merge_global_load_dwordx3_with_dwordx1
     ; GCN: [[DEF:%[0-9]+]]:vreg_64_align2 = IMPLICIT_DEF
-    ; GCN-NEXT: [[GLOBAL_LOAD_DWORDX4_:%[0-9]+]]:vreg_128_align2 = GLOBAL_LOAD_DWORDX4 [[DEF]], 12, 0, implicit $exec :: (load (s128) from `i128 addrspace(1)* undef`, align 8, addrspace 1)
+    ; GCN-NEXT: [[GLOBAL_LOAD_DWORDX4_:%[0-9]+]]:vreg_128_align2 = GLOBAL_LOAD_DWORDX4 [[DEF]], 12, 0, implicit $exec :: (load (s128) from `ptr addrspace(1) undef`, align 8, addrspace 1)
     ; GCN-NEXT: [[COPY:%[0-9]+]]:vreg_96_align2 = COPY [[GLOBAL_LOAD_DWORDX4_]].sub0_sub1_sub2
     ; GCN-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY killed [[GLOBAL_LOAD_DWORDX4_]].sub3
     ; GCN-NEXT: S_NOP 0, implicit [[COPY]], implicit [[COPY1]]
@@ -155,7 +155,7 @@ body:             |
 
     ; GCN-LABEL: name: merge_global_load_dwordx1_with_dwordx2
     ; GCN: [[DEF:%[0-9]+]]:vreg_64_align2 = IMPLICIT_DEF
-    ; GCN-NEXT: [[GLOBAL_LOAD_DWORDX3_:%[0-9]+]]:vreg_96_align2 = GLOBAL_LOAD_DWORDX3 [[DEF]], 12, 0, implicit $exec :: (load (s96) from `i32 addrspace(1)* undef`, align 4, addrspace 1)
+    ; GCN-NEXT: [[GLOBAL_LOAD_DWORDX3_:%[0-9]+]]:vreg_96_align2 = GLOBAL_LOAD_DWORDX3 [[DEF]], 12, 0, implicit $exec :: (load (s96) from `ptr addrspace(1) undef`, align 4, addrspace 1)
     ; GCN-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY [[GLOBAL_LOAD_DWORDX3_]].sub0
     ; GCN-NEXT: [[COPY1:%[0-9]+]]:vreg_64_align2 = COPY killed [[GLOBAL_LOAD_DWORDX3_]].sub1_sub2
     ; GCN-NEXT: S_NOP 0, implicit [[COPY1]], implicit [[COPY]]
@@ -172,8 +172,8 @@ body:             |
 
     ; GCN-LABEL: name: no_merge_global_load_dword_agpr_with_vgpr
     ; GCN: [[DEF:%[0-9]+]]:vreg_64_align2 = IMPLICIT_DEF
-    ; GCN-NEXT: [[GLOBAL_LOAD_DWORD:%[0-9]+]]:vgpr_32 = GLOBAL_LOAD_DWORD [[DEF]], 0, 0, implicit $exec :: (load (s32) from `i32 addrspace(1)* undef`, addrspace 1)
-    ; GCN-NEXT: [[GLOBAL_LOAD_DWORD1:%[0-9]+]]:agpr_32 = GLOBAL_LOAD_DWORD [[DEF]], 4, 0, implicit $exec :: (load (s32) from `i32 addrspace(1)* undef`, addrspace 1)
+    ; GCN-NEXT: [[GLOBAL_LOAD_DWORD:%[0-9]+]]:vgpr_32 = GLOBAL_LOAD_DWORD [[DEF]], 0, 0, implicit $exec :: (load (s32) from `ptr addrspace(1) undef`, addrspace 1)
+    ; GCN-NEXT: [[GLOBAL_LOAD_DWORD1:%[0-9]+]]:agpr_32 = GLOBAL_LOAD_DWORD [[DEF]], 4, 0, implicit $exec :: (load (s32) from `ptr addrspace(1) undef`, addrspace 1)
     ; GCN-NEXT: S_NOP 0, implicit [[GLOBAL_LOAD_DWORD]], implicit [[GLOBAL_LOAD_DWORD1]]
     %0:vreg_64_align2 = IMPLICIT_DEF
     %1:vgpr_32 = GLOBAL_LOAD_DWORD %0, 0, 0, implicit $exec :: (load (s32) from `i32 addrspace(1)* undef`, align 4, addrspace 1)
@@ -188,8 +188,8 @@ body:             |
 
     ; GCN-LABEL: name: no_merge_global_load_dword_disjoint
     ; GCN: [[DEF:%[0-9]+]]:vreg_64_align2 = IMPLICIT_DEF
-    ; GCN-NEXT: [[GLOBAL_LOAD_DWORD:%[0-9]+]]:vgpr_32 = GLOBAL_LOAD_DWORD [[DEF]], 0, 0, implicit $exec :: (load (s32) from `i32 addrspace(1)* undef`, addrspace 1)
-    ; GCN-NEXT: [[GLOBAL_LOAD_DWORD1:%[0-9]+]]:vgpr_32 = GLOBAL_LOAD_DWORD [[DEF]], 8, 0, implicit $exec :: (load (s32) from `i32 addrspace(1)* undef`, addrspace 1)
+    ; GCN-NEXT: [[GLOBAL_LOAD_DWORD:%[0-9]+]]:vgpr_32 = GLOBAL_LOAD_DWORD [[DEF]], 0, 0, implicit $exec :: (load (s32) from `ptr addrspace(1) undef`, addrspace 1)
+    ; GCN-NEXT: [[GLOBAL_LOAD_DWORD1:%[0-9]+]]:vgpr_32 = GLOBAL_LOAD_DWORD [[DEF]], 8, 0, implicit $exec :: (load (s32) from `ptr addrspace(1) undef`, addrspace 1)
     ; GCN-NEXT: S_NOP 0, implicit [[GLOBAL_LOAD_DWORD]], implicit [[GLOBAL_LOAD_DWORD1]]
     %0:vreg_64_align2 = IMPLICIT_DEF
     %1:vgpr_32 = GLOBAL_LOAD_DWORD %0, 0, 0, implicit $exec :: (load (s32) from `i32 addrspace(1)* undef`, align 4, addrspace 1)
@@ -204,8 +204,8 @@ body:             |
 
     ; GCN-LABEL: name: no_merge_global_load_dword_overlap
     ; GCN: [[DEF:%[0-9]+]]:vreg_64_align2 = IMPLICIT_DEF
-    ; GCN-NEXT: [[GLOBAL_LOAD_DWORD:%[0-9]+]]:vgpr_32 = GLOBAL_LOAD_DWORD [[DEF]], 0, 0, implicit $exec :: (load (s32) from `i32 addrspace(1)* undef`, addrspace 1)
-    ; GCN-NEXT: [[GLOBAL_LOAD_DWORD1:%[0-9]+]]:vgpr_32 = GLOBAL_LOAD_DWORD [[DEF]], 3, 0, implicit $exec :: (load (s32) from `i32 addrspace(1)* undef`, addrspace 1)
+    ; GCN-NEXT: [[GLOBAL_LOAD_DWORD:%[0-9]+]]:vgpr_32 = GLOBAL_LOAD_DWORD [[DEF]], 0, 0, implicit $exec :: (load (s32) from `ptr addrspace(1) undef`, addrspace 1)
+    ; GCN-NEXT: [[GLOBAL_LOAD_DWORD1:%[0-9]+]]:vgpr_32 = GLOBAL_LOAD_DWORD [[DEF]], 3, 0, implicit $exec :: (load (s32) from `ptr addrspace(1) undef`, addrspace 1)
     ; GCN-NEXT: S_NOP 0, implicit [[GLOBAL_LOAD_DWORD]], implicit [[GLOBAL_LOAD_DWORD1]]
     %0:vreg_64_align2 = IMPLICIT_DEF
     %1:vgpr_32 = GLOBAL_LOAD_DWORD %0, 0, 0, implicit $exec :: (load (s32) from `i32 addrspace(1)* undef`, align 4, addrspace 1)
@@ -220,8 +220,8 @@ body:             |
 
     ; GCN-LABEL: name: no_merge_global_load_dword_
diff erent_cpol
     ; GCN: [[DEF:%[0-9]+]]:vreg_64_align2 = IMPLICIT_DEF
-    ; GCN-NEXT: [[GLOBAL_LOAD_DWORD:%[0-9]+]]:vgpr_32 = GLOBAL_LOAD_DWORD [[DEF]], 0, 1, implicit $exec :: (load (s32) from `i32 addrspace(1)* undef`, addrspace 1)
-    ; GCN-NEXT: [[GLOBAL_LOAD_DWORD1:%[0-9]+]]:vgpr_32 = GLOBAL_LOAD_DWORD [[DEF]], 4, 0, implicit $exec :: (load (s32) from `i32 addrspace(1)* undef`, addrspace 1)
+    ; GCN-NEXT: [[GLOBAL_LOAD_DWORD:%[0-9]+]]:vgpr_32 = GLOBAL_LOAD_DWORD [[DEF]], 0, 1, implicit $exec :: (load (s32) from `ptr addrspace(1) undef`, addrspace 1)
+    ; GCN-NEXT: [[GLOBAL_LOAD_DWORD1:%[0-9]+]]:vgpr_32 = GLOBAL_LOAD_DWORD [[DEF]], 4, 0, implicit $exec :: (load (s32) from `ptr addrspace(1) undef`, addrspace 1)
     ; GCN-NEXT: S_NOP 0, implicit [[GLOBAL_LOAD_DWORD]], implicit [[GLOBAL_LOAD_DWORD1]]
     %0:vreg_64_align2 = IMPLICIT_DEF
     %1:vgpr_32 = GLOBAL_LOAD_DWORD %0, 0, 1, implicit $exec :: (load (s32) from `i32 addrspace(1)* undef`, align 4, addrspace 1)
@@ -237,7 +237,7 @@ body:             |
     ; GCN-LABEL: name: merge_global_load_dword_saddr_2
     ; GCN: [[DEF:%[0-9]+]]:sreg_64_xexec = IMPLICIT_DEF
     ; GCN-NEXT: [[DEF1:%[0-9]+]]:vgpr_32 = IMPLICIT_DEF
-    ; GCN-NEXT: [[GLOBAL_LOAD_DWORDX2_SADDR:%[0-9]+]]:vreg_64_align2 = GLOBAL_LOAD_DWORDX2_SADDR [[DEF]], [[DEF1]], 0, 0, implicit $exec :: (load (s64) from `i32 addrspace(1)* undef`, align 4, addrspace 1)
+    ; GCN-NEXT: [[GLOBAL_LOAD_DWORDX2_SADDR:%[0-9]+]]:vreg_64_align2 = GLOBAL_LOAD_DWORDX2_SADDR [[DEF]], [[DEF1]], 0, 0, implicit $exec :: (load (s64) from `ptr addrspace(1) undef`, align 4, addrspace 1)
     ; GCN-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY [[GLOBAL_LOAD_DWORDX2_SADDR]].sub0
     ; GCN-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY killed [[GLOBAL_LOAD_DWORDX2_SADDR]].sub1
     ; GCN-NEXT: S_NOP 0, implicit [[COPY]], implicit [[COPY1]]
@@ -256,7 +256,7 @@ body:             |
     ; GCN-LABEL: name: merge_global_load_dword_saddr_3
     ; GCN: [[DEF:%[0-9]+]]:sreg_64_xexec = IMPLICIT_DEF
     ; GCN-NEXT: [[DEF1:%[0-9]+]]:vgpr_32 = IMPLICIT_DEF
-    ; GCN-NEXT: [[GLOBAL_LOAD_DWORDX3_SADDR:%[0-9]+]]:vreg_96_align2 = GLOBAL_LOAD_DWORDX3_SADDR [[DEF]], [[DEF1]], 0, 1, implicit $exec :: (load (s96) from `i32 addrspace(1)* undef`, align 4, addrspace 1)
+    ; GCN-NEXT: [[GLOBAL_LOAD_DWORDX3_SADDR:%[0-9]+]]:vreg_96_align2 = GLOBAL_LOAD_DWORDX3_SADDR [[DEF]], [[DEF1]], 0, 1, implicit $exec :: (load (s96) from `ptr addrspace(1) undef`, align 4, addrspace 1)
     ; GCN-NEXT: [[COPY:%[0-9]+]]:vreg_64_align2 = COPY [[GLOBAL_LOAD_DWORDX3_SADDR]].sub0_sub1
     ; GCN-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY killed [[GLOBAL_LOAD_DWORDX3_SADDR]].sub2
     ; GCN-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY [[COPY]].sub0
@@ -278,7 +278,7 @@ body:             |
     ; GCN-LABEL: name: merge_global_load_dword_saddr_4
     ; GCN: [[DEF:%[0-9]+]]:sreg_64_xexec = IMPLICIT_DEF
     ; GCN-NEXT: [[DEF1:%[0-9]+]]:vgpr_32 = IMPLICIT_DEF
-    ; GCN-NEXT: [[GLOBAL_LOAD_DWORDX4_SADDR:%[0-9]+]]:vreg_128_align2 = GLOBAL_LOAD_DWORDX4_SADDR [[DEF]], [[DEF1]], 0, 2, implicit $exec :: (load (s128) from `i32 addrspace(1)* undef`, align 4, addrspace 1)
+    ; GCN-NEXT: [[GLOBAL_LOAD_DWORDX4_SADDR:%[0-9]+]]:vreg_128_align2 = GLOBAL_LOAD_DWORDX4_SADDR [[DEF]], [[DEF1]], 0, 2, implicit $exec :: (load (s128) from `ptr addrspace(1) undef`, align 4, addrspace 1)
     ; GCN-NEXT: [[COPY:%[0-9]+]]:vreg_96_align2 = COPY [[GLOBAL_LOAD_DWORDX4_SADDR]].sub0_sub1_sub2
     ; GCN-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY killed [[GLOBAL_LOAD_DWORDX4_SADDR]].sub3
     ; GCN-NEXT: [[COPY2:%[0-9]+]]:vreg_64_align2 = COPY [[COPY]].sub0_sub1
@@ -303,14 +303,14 @@ body:             |
     ; GCN-LABEL: name: merge_global_load_dword_saddr_6
     ; GCN: [[DEF:%[0-9]+]]:sreg_64_xexec = IMPLICIT_DEF
     ; GCN-NEXT: [[DEF1:%[0-9]+]]:vgpr_32 = IMPLICIT_DEF
-    ; GCN-NEXT: [[GLOBAL_LOAD_DWORDX4_SADDR:%[0-9]+]]:vreg_128_align2 = GLOBAL_LOAD_DWORDX4_SADDR [[DEF]], [[DEF1]], 4, 3, implicit $exec :: (load (s128) from `i32 addrspace(1)* undef`, align 4, addrspace 1)
+    ; GCN-NEXT: [[GLOBAL_LOAD_DWORDX4_SADDR:%[0-9]+]]:vreg_128_align2 = GLOBAL_LOAD_DWORDX4_SADDR [[DEF]], [[DEF1]], 4, 3, implicit $exec :: (load (s128) from `ptr addrspace(1) undef`, align 4, addrspace 1)
     ; GCN-NEXT: [[COPY:%[0-9]+]]:vreg_96_align2 = COPY [[GLOBAL_LOAD_DWORDX4_SADDR]].sub0_sub1_sub2
     ; GCN-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY killed [[GLOBAL_LOAD_DWORDX4_SADDR]].sub3
     ; GCN-NEXT: [[COPY2:%[0-9]+]]:vreg_64_align2 = COPY [[COPY]].sub0_sub1
     ; GCN-NEXT: [[COPY3:%[0-9]+]]:vgpr_32 = COPY killed [[COPY]].sub2
     ; GCN-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY [[COPY2]].sub0
     ; GCN-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY killed [[COPY2]].sub1
-    ; GCN-NEXT: [[GLOBAL_LOAD_DWORDX2_SADDR:%[0-9]+]]:vreg_64_align2 = GLOBAL_LOAD_DWORDX2_SADDR [[DEF]], [[DEF1]], 20, 3, implicit $exec :: (load (s64) from `i32 addrspace(1)* undef`, align 4, addrspace 1)
+    ; GCN-NEXT: [[GLOBAL_LOAD_DWORDX2_SADDR:%[0-9]+]]:vreg_64_align2 = GLOBAL_LOAD_DWORDX2_SADDR [[DEF]], [[DEF1]], 20, 3, implicit $exec :: (load (s64) from `ptr addrspace(1) undef`, align 4, addrspace 1)
     ; GCN-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY [[GLOBAL_LOAD_DWORDX2_SADDR]].sub0
     ; GCN-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY killed [[GLOBAL_LOAD_DWORDX2_SADDR]].sub1
     ; GCN-NEXT: S_NOP 0, implicit [[COPY4]], implicit [[COPY5]], implicit [[COPY3]], implicit [[COPY1]], implicit [[COPY6]], implicit [[COPY7]]
@@ -333,7 +333,7 @@ body:             |
     ; GCN-LABEL: name: merge_global_load_dwordx2_saddr
     ; GCN: [[DEF:%[0-9]+]]:sreg_64_xexec = IMPLICIT_DEF
     ; GCN-NEXT: [[DEF1:%[0-9]+]]:vgpr_32 = IMPLICIT_DEF
-    ; GCN-NEXT: [[GLOBAL_LOAD_DWORDX4_SADDR:%[0-9]+]]:vreg_128_align2 = GLOBAL_LOAD_DWORDX4_SADDR [[DEF]], [[DEF1]], 0, 0, implicit $exec :: (load (s128) from `i64 addrspace(1)* undef`, align 4, addrspace 1)
+    ; GCN-NEXT: [[GLOBAL_LOAD_DWORDX4_SADDR:%[0-9]+]]:vreg_128_align2 = GLOBAL_LOAD_DWORDX4_SADDR [[DEF]], [[DEF1]], 0, 0, implicit $exec :: (load (s128) from `ptr addrspace(1) undef`, align 4, addrspace 1)
     ; GCN-NEXT: [[COPY:%[0-9]+]]:vreg_64_align2 = COPY [[GLOBAL_LOAD_DWORDX4_SADDR]].sub0_sub1
     ; GCN-NEXT: [[COPY1:%[0-9]+]]:vreg_64_align2 = COPY killed [[GLOBAL_LOAD_DWORDX4_SADDR]].sub2_sub3
     ; GCN-NEXT: S_NOP 0, implicit [[COPY]], implicit [[COPY1]]
@@ -352,8 +352,8 @@ body:             |
     ; GCN-LABEL: name: no_merge_global_load_dword_and_global_load_dword_saddr
     ; GCN: [[DEF:%[0-9]+]]:sreg_64_xexec = IMPLICIT_DEF
     ; GCN-NEXT: [[DEF1:%[0-9]+]]:vreg_64_align2 = IMPLICIT_DEF
-    ; GCN-NEXT: [[GLOBAL_LOAD_DWORD:%[0-9]+]]:vgpr_32 = GLOBAL_LOAD_DWORD [[DEF1]], 0, 0, implicit $exec :: (load (s32) from `i32 addrspace(1)* undef`, addrspace 1)
-    ; GCN-NEXT: [[GLOBAL_LOAD_DWORD_SADDR:%[0-9]+]]:vgpr_32 = GLOBAL_LOAD_DWORD_SADDR [[DEF]], [[DEF1]].sub0, 4, 0, implicit $exec :: (load (s32) from `i32 addrspace(1)* undef`, addrspace 1)
+    ; GCN-NEXT: [[GLOBAL_LOAD_DWORD:%[0-9]+]]:vgpr_32 = GLOBAL_LOAD_DWORD [[DEF1]], 0, 0, implicit $exec :: (load (s32) from `ptr addrspace(1) undef`, addrspace 1)
+    ; GCN-NEXT: [[GLOBAL_LOAD_DWORD_SADDR:%[0-9]+]]:vgpr_32 = GLOBAL_LOAD_DWORD_SADDR [[DEF]], [[DEF1]].sub0, 4, 0, implicit $exec :: (load (s32) from `ptr addrspace(1) undef`, addrspace 1)
     ; GCN-NEXT: S_NOP 0, implicit [[GLOBAL_LOAD_DWORD]], implicit [[GLOBAL_LOAD_DWORD_SADDR]]
     %0:sreg_64_xexec = IMPLICIT_DEF
     %1:vreg_64_align2 = IMPLICIT_DEF
@@ -370,8 +370,8 @@ body:             |
     ; GCN-LABEL: name: no_merge_global_load_dword_saddr_
diff erent_saddr
     ; GCN: [[DEF:%[0-9]+]]:sgpr_128 = IMPLICIT_DEF
     ; GCN-NEXT: [[DEF1:%[0-9]+]]:vgpr_32 = IMPLICIT_DEF
-    ; GCN-NEXT: [[GLOBAL_LOAD_DWORD_SADDR:%[0-9]+]]:vgpr_32 = GLOBAL_LOAD_DWORD_SADDR [[DEF]].sub0_sub1, [[DEF1]], 0, 0, implicit $exec :: (load (s32) from `i32 addrspace(1)* undef`, addrspace 1)
-    ; GCN-NEXT: [[GLOBAL_LOAD_DWORD_SADDR1:%[0-9]+]]:vgpr_32 = GLOBAL_LOAD_DWORD_SADDR [[DEF]].sub2_sub3, [[DEF1]], 4, 0, implicit $exec :: (load (s32) from `i32 addrspace(1)* undef`, addrspace 1)
+    ; GCN-NEXT: [[GLOBAL_LOAD_DWORD_SADDR:%[0-9]+]]:vgpr_32 = GLOBAL_LOAD_DWORD_SADDR [[DEF]].sub0_sub1, [[DEF1]], 0, 0, implicit $exec :: (load (s32) from `ptr addrspace(1) undef`, addrspace 1)
+    ; GCN-NEXT: [[GLOBAL_LOAD_DWORD_SADDR1:%[0-9]+]]:vgpr_32 = GLOBAL_LOAD_DWORD_SADDR [[DEF]].sub2_sub3, [[DEF1]], 4, 0, implicit $exec :: (load (s32) from `ptr addrspace(1) undef`, addrspace 1)
     ; GCN-NEXT: S_NOP 0, implicit [[GLOBAL_LOAD_DWORD_SADDR]], implicit [[GLOBAL_LOAD_DWORD_SADDR1]]
     %0:sgpr_128 = IMPLICIT_DEF
     %1:vgpr_32 = IMPLICIT_DEF
@@ -388,8 +388,8 @@ body:             |
     ; GCN-LABEL: name: no_merge_global_load_dword_saddr_
diff erent_vaddr
     ; GCN: [[DEF:%[0-9]+]]:sreg_64_xexec = IMPLICIT_DEF
     ; GCN-NEXT: [[DEF1:%[0-9]+]]:vreg_64_align2 = IMPLICIT_DEF
-    ; GCN-NEXT: [[GLOBAL_LOAD_DWORD_SADDR:%[0-9]+]]:vgpr_32 = GLOBAL_LOAD_DWORD_SADDR [[DEF]], [[DEF1]].sub0, 0, 0, implicit $exec :: (load (s32) from `i32 addrspace(1)* undef`, addrspace 1)
-    ; GCN-NEXT: [[GLOBAL_LOAD_DWORD_SADDR1:%[0-9]+]]:vgpr_32 = GLOBAL_LOAD_DWORD_SADDR [[DEF]], [[DEF1]].sub1, 4, 0, implicit $exec :: (load (s32) from `i32 addrspace(1)* undef`, addrspace 1)
+    ; GCN-NEXT: [[GLOBAL_LOAD_DWORD_SADDR:%[0-9]+]]:vgpr_32 = GLOBAL_LOAD_DWORD_SADDR [[DEF]], [[DEF1]].sub0, 0, 0, implicit $exec :: (load (s32) from `ptr addrspace(1) undef`, addrspace 1)
+    ; GCN-NEXT: [[GLOBAL_LOAD_DWORD_SADDR1:%[0-9]+]]:vgpr_32 = GLOBAL_LOAD_DWORD_SADDR [[DEF]], [[DEF1]].sub1, 4, 0, implicit $exec :: (load (s32) from `ptr addrspace(1) undef`, addrspace 1)
     ; GCN-NEXT: S_NOP 0, implicit [[GLOBAL_LOAD_DWORD_SADDR]], implicit [[GLOBAL_LOAD_DWORD_SADDR1]]
     %0:sreg_64_xexec = IMPLICIT_DEF
     %1:vreg_64_align2 = IMPLICIT_DEF
@@ -404,7 +404,7 @@ body:             |
 
     ; GCN-LABEL: name: merge_global_load_dword_2_out_of_order
     ; GCN: [[DEF:%[0-9]+]]:vreg_64_align2 = IMPLICIT_DEF
-    ; GCN-NEXT: [[GLOBAL_LOAD_DWORDX2_:%[0-9]+]]:vreg_64_align2 = GLOBAL_LOAD_DWORDX2 [[DEF]], 0, 0, implicit $exec :: (load (s64) from `float addrspace(1)* undef`, align 4, addrspace 1)
+    ; GCN-NEXT: [[GLOBAL_LOAD_DWORDX2_:%[0-9]+]]:vreg_64_align2 = GLOBAL_LOAD_DWORDX2 [[DEF]], 0, 0, implicit $exec :: (load (s64) from `ptr addrspace(1) undef`, align 4, addrspace 1)
     ; GCN-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY [[GLOBAL_LOAD_DWORDX2_]].sub1
     ; GCN-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY killed [[GLOBAL_LOAD_DWORDX2_]].sub0
     ; GCN-NEXT: S_NOP 0, implicit [[COPY]], implicit [[COPY1]]
@@ -421,7 +421,7 @@ body:             |
 
     ; GCN-LABEL: name: merge_global_load_dword_3_out_of_order
     ; GCN: [[DEF:%[0-9]+]]:vreg_64_align2 = IMPLICIT_DEF
-    ; GCN-NEXT: [[GLOBAL_LOAD_DWORDX3_:%[0-9]+]]:vreg_96_align2 = GLOBAL_LOAD_DWORDX3 [[DEF]], 0, 0, implicit $exec :: (load (s96) from `float addrspace(1)* undef`, align 16, addrspace 1)
+    ; GCN-NEXT: [[GLOBAL_LOAD_DWORDX3_:%[0-9]+]]:vreg_96_align2 = GLOBAL_LOAD_DWORDX3 [[DEF]], 0, 0, implicit $exec :: (load (s96) from `ptr addrspace(1) undef`, align 16, addrspace 1)
     ; GCN-NEXT: [[COPY:%[0-9]+]]:vreg_64_align2 = COPY [[GLOBAL_LOAD_DWORDX3_]].sub0_sub1
     ; GCN-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY killed [[GLOBAL_LOAD_DWORDX3_]].sub2
     ; GCN-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY [[COPY]].sub1
@@ -445,7 +445,7 @@ body:             |
     ; GCN-NEXT: [[DEF1:%[0-9]+]]:vgpr_32 = IMPLICIT_DEF
     ; GCN-NEXT: [[DEF2:%[0-9]+]]:vgpr_32 = IMPLICIT_DEF
     ; GCN-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE killed [[DEF1]], %subreg.sub0, killed [[DEF2]], %subreg.sub1
-    ; GCN-NEXT: GLOBAL_STORE_DWORDX2 [[DEF]], killed [[REG_SEQUENCE]], 0, 0, implicit $exec :: (store (s64) into `i32 addrspace(1)* undef`, align 4, addrspace 1)
+    ; GCN-NEXT: GLOBAL_STORE_DWORDX2 [[DEF]], killed [[REG_SEQUENCE]], 0, 0, implicit $exec :: (store (s64) into `ptr addrspace(1) undef`, align 4, addrspace 1)
     %0:vreg_64_align2 = IMPLICIT_DEF
     %1:vgpr_32 = IMPLICIT_DEF
     %2:vgpr_32 = IMPLICIT_DEF
@@ -465,7 +465,7 @@ body:             |
     ; GCN-NEXT: [[DEF3:%[0-9]+]]:vgpr_32 = IMPLICIT_DEF
     ; GCN-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE killed [[DEF1]], %subreg.sub0, killed [[DEF2]], %subreg.sub1
     ; GCN-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_96_align2 = REG_SEQUENCE killed [[REG_SEQUENCE]], %subreg.sub0_sub1, killed [[DEF3]], %subreg.sub2
-    ; GCN-NEXT: GLOBAL_STORE_DWORDX3 [[DEF]], killed [[REG_SEQUENCE1]], 4, 1, implicit $exec :: (store (s96) into `i32 addrspace(1)* undef`, align 4, addrspace 1)
+    ; GCN-NEXT: GLOBAL_STORE_DWORDX3 [[DEF]], killed [[REG_SEQUENCE1]], 4, 1, implicit $exec :: (store (s96) into `ptr addrspace(1) undef`, align 4, addrspace 1)
     %0:vreg_64_align2 = IMPLICIT_DEF
     %1:vgpr_32 = IMPLICIT_DEF
     %2:vgpr_32 = IMPLICIT_DEF
@@ -486,7 +486,7 @@ body:             |
     ; GCN-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[DEF1]].sub1, %subreg.sub1, [[DEF1]].sub0, %subreg.sub0
     ; GCN-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_96_align2 = REG_SEQUENCE [[DEF1]].sub2, %subreg.sub2, killed [[REG_SEQUENCE]], %subreg.sub0_sub1
     ; GCN-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_128_align2 = REG_SEQUENCE [[DEF1]].sub3, %subreg.sub3, killed [[REG_SEQUENCE1]], %subreg.sub0_sub1_sub2
-    ; GCN-NEXT: GLOBAL_STORE_DWORDX4 [[DEF]], killed [[REG_SEQUENCE2]], 4, 2, implicit $exec :: (store (s128) into `i32 addrspace(1)* undef`, align 4, addrspace 1)
+    ; GCN-NEXT: GLOBAL_STORE_DWORDX4 [[DEF]], killed [[REG_SEQUENCE2]], 4, 2, implicit $exec :: (store (s128) into `ptr addrspace(1) undef`, align 4, addrspace 1)
     %0:vreg_64_align2 = IMPLICIT_DEF
     %1:vreg_128 = IMPLICIT_DEF
     GLOBAL_STORE_DWORD %0, %1.sub1, 8, 2, implicit $exec :: (store (s32) into `i32 addrspace(1)* undef`, align 4, addrspace 1)
@@ -510,8 +510,8 @@ body:             |
     ; GCN-NEXT: [[REG_SEQUENCE:%[0-9]+]]:areg_64_align2 = REG_SEQUENCE [[DEF1]], %subreg.sub0, [[DEF2]], %subreg.sub1
     ; GCN-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:areg_96_align2 = REG_SEQUENCE killed [[REG_SEQUENCE]], %subreg.sub0_sub1, [[DEF3]], %subreg.sub2
     ; GCN-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:areg_128_align2 = REG_SEQUENCE killed [[REG_SEQUENCE1]], %subreg.sub0_sub1_sub2, [[DEF4]], %subreg.sub3
-    ; GCN-NEXT: GLOBAL_STORE_DWORDX4 [[DEF]], killed [[REG_SEQUENCE2]], 4, 3, implicit $exec :: (store (s128) into `i32 addrspace(1)* undef`, align 4, addrspace 1)
-    ; GCN-NEXT: GLOBAL_STORE_DWORD [[DEF]], [[DEF5]], 20, 3, implicit $exec :: (store (s32) into `i32 addrspace(1)* undef`, addrspace 1)
+    ; GCN-NEXT: GLOBAL_STORE_DWORDX4 [[DEF]], killed [[REG_SEQUENCE2]], 4, 3, implicit $exec :: (store (s128) into `ptr addrspace(1) undef`, align 4, addrspace 1)
+    ; GCN-NEXT: GLOBAL_STORE_DWORD [[DEF]], [[DEF5]], 20, 3, implicit $exec :: (store (s32) into `ptr addrspace(1) undef`, addrspace 1)
     %0:vreg_64_align2 = IMPLICIT_DEF
     %1:agpr_32 = IMPLICIT_DEF
     %2:agpr_32 = IMPLICIT_DEF
@@ -541,9 +541,9 @@ body:             |
     ; GCN-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[DEF1]], %subreg.sub0, [[DEF2]], %subreg.sub1
     ; GCN-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_96_align2 = REG_SEQUENCE killed [[REG_SEQUENCE]], %subreg.sub0_sub1, [[DEF3]], %subreg.sub2
     ; GCN-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_128_align2 = REG_SEQUENCE killed [[REG_SEQUENCE1]], %subreg.sub0_sub1_sub2, [[DEF4]], %subreg.sub3
-    ; GCN-NEXT: GLOBAL_STORE_DWORDX4 [[DEF]], killed [[REG_SEQUENCE2]], 4, 0, implicit $exec :: (store (s128) into `i32 addrspace(1)* undef`, align 8, addrspace 1)
+    ; GCN-NEXT: GLOBAL_STORE_DWORDX4 [[DEF]], killed [[REG_SEQUENCE2]], 4, 0, implicit $exec :: (store (s128) into `ptr addrspace(1) undef`, align 8, addrspace 1)
     ; GCN-NEXT: [[REG_SEQUENCE3:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[DEF5]], %subreg.sub0, [[DEF6]], %subreg.sub1
-    ; GCN-NEXT: GLOBAL_STORE_DWORDX2 [[DEF]], killed [[REG_SEQUENCE3]], 20, 0, implicit $exec :: (store (s64) into `i32 addrspace(1)* undef`, align 4, addrspace 1)
+    ; GCN-NEXT: GLOBAL_STORE_DWORDX2 [[DEF]], killed [[REG_SEQUENCE3]], 20, 0, implicit $exec :: (store (s64) into `ptr addrspace(1) undef`, align 4, addrspace 1)
     %0:vreg_64_align2 = IMPLICIT_DEF
     %1:vgpr_32 = IMPLICIT_DEF
     %2:vgpr_32 = IMPLICIT_DEF
@@ -569,7 +569,7 @@ body:             |
     ; GCN-NEXT: [[DEF1:%[0-9]+]]:vreg_64_align2 = IMPLICIT_DEF
     ; GCN-NEXT: [[DEF2:%[0-9]+]]:vreg_64_align2 = IMPLICIT_DEF
     ; GCN-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_128_align2 = REG_SEQUENCE killed [[DEF1]], %subreg.sub0_sub1, killed [[DEF2]], %subreg.sub2_sub3
-    ; GCN-NEXT: GLOBAL_STORE_DWORDX4 [[DEF]], killed [[REG_SEQUENCE]], 4, 0, implicit $exec :: (store (s128) into `i64 addrspace(1)* undef`, align 4, addrspace 1)
+    ; GCN-NEXT: GLOBAL_STORE_DWORDX4 [[DEF]], killed [[REG_SEQUENCE]], 4, 0, implicit $exec :: (store (s128) into `ptr addrspace(1) undef`, align 4, addrspace 1)
     %0:vreg_64_align2 = IMPLICIT_DEF
     %1:vreg_64_align2 = IMPLICIT_DEF
     %2:vreg_64_align2 = IMPLICIT_DEF
@@ -587,7 +587,7 @@ body:             |
     ; GCN-NEXT: [[DEF1:%[0-9]+]]:vreg_96_align2 = IMPLICIT_DEF
     ; GCN-NEXT: [[DEF2:%[0-9]+]]:vgpr_32 = IMPLICIT_DEF
     ; GCN-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_128_align2 = REG_SEQUENCE killed [[DEF1]], %subreg.sub0_sub1_sub2, killed [[DEF2]], %subreg.sub3
-    ; GCN-NEXT: GLOBAL_STORE_DWORDX4 [[DEF]], killed [[REG_SEQUENCE]], 4, 0, implicit $exec :: (store (s128) into `i64 addrspace(1)* undef`, addrspace 1)
+    ; GCN-NEXT: GLOBAL_STORE_DWORDX4 [[DEF]], killed [[REG_SEQUENCE]], 4, 0, implicit $exec :: (store (s128) into `ptr addrspace(1) undef`, addrspace 1)
     %0:vreg_64_align2 = IMPLICIT_DEF
     %1:vreg_96_align2 = IMPLICIT_DEF
     %2:vgpr_32 = IMPLICIT_DEF
@@ -604,8 +604,8 @@ body:             |
     ; GCN: [[DEF:%[0-9]+]]:vreg_64_align2 = IMPLICIT_DEF
     ; GCN-NEXT: [[DEF1:%[0-9]+]]:agpr_32 = IMPLICIT_DEF
     ; GCN-NEXT: [[DEF2:%[0-9]+]]:vgpr_32 = IMPLICIT_DEF
-    ; GCN-NEXT: GLOBAL_STORE_DWORD [[DEF]], killed [[DEF1]], 0, 0, implicit $exec :: (store (s32) into `i32 addrspace(1)* undef`, addrspace 1)
-    ; GCN-NEXT: GLOBAL_STORE_DWORD killed [[DEF]], killed [[DEF2]], 4, 0, implicit $exec :: (store (s32) into `i32 addrspace(1)* undef`, addrspace 1)
+    ; GCN-NEXT: GLOBAL_STORE_DWORD [[DEF]], killed [[DEF1]], 0, 0, implicit $exec :: (store (s32) into `ptr addrspace(1) undef`, addrspace 1)
+    ; GCN-NEXT: GLOBAL_STORE_DWORD killed [[DEF]], killed [[DEF2]], 4, 0, implicit $exec :: (store (s32) into `ptr addrspace(1) undef`, addrspace 1)
     %0:vreg_64_align2 = IMPLICIT_DEF
     %1:agpr_32 = IMPLICIT_DEF
     %2:vgpr_32 = IMPLICIT_DEF
@@ -622,8 +622,8 @@ body:             |
     ; GCN: [[DEF:%[0-9]+]]:vreg_64_align2 = IMPLICIT_DEF
     ; GCN-NEXT: [[DEF1:%[0-9]+]]:vgpr_32 = IMPLICIT_DEF
     ; GCN-NEXT: [[DEF2:%[0-9]+]]:vgpr_32 = IMPLICIT_DEF
-    ; GCN-NEXT: GLOBAL_STORE_DWORD [[DEF]], killed [[DEF1]], 0, 0, implicit $exec :: (store (s32) into `i32 addrspace(1)* undef`, addrspace 1)
-    ; GCN-NEXT: GLOBAL_STORE_DWORD killed [[DEF]], killed [[DEF2]], 6, 0, implicit $exec :: (store (s32) into `i32 addrspace(1)* undef`, addrspace 1)
+    ; GCN-NEXT: GLOBAL_STORE_DWORD [[DEF]], killed [[DEF1]], 0, 0, implicit $exec :: (store (s32) into `ptr addrspace(1) undef`, addrspace 1)
+    ; GCN-NEXT: GLOBAL_STORE_DWORD killed [[DEF]], killed [[DEF2]], 6, 0, implicit $exec :: (store (s32) into `ptr addrspace(1) undef`, addrspace 1)
     %0:vreg_64_align2 = IMPLICIT_DEF
     %1:vgpr_32 = IMPLICIT_DEF
     %2:vgpr_32 = IMPLICIT_DEF
@@ -640,8 +640,8 @@ body:             |
     ; GCN: [[DEF:%[0-9]+]]:vreg_64_align2 = IMPLICIT_DEF
     ; GCN-NEXT: [[DEF1:%[0-9]+]]:vgpr_32 = IMPLICIT_DEF
     ; GCN-NEXT: [[DEF2:%[0-9]+]]:vgpr_32 = IMPLICIT_DEF
-    ; GCN-NEXT: GLOBAL_STORE_DWORD [[DEF]], killed [[DEF1]], 0, 0, implicit $exec :: (store (s32) into `i32 addrspace(1)* undef`, addrspace 1)
-    ; GCN-NEXT: GLOBAL_STORE_DWORD killed [[DEF]], killed [[DEF2]], 2, 0, implicit $exec :: (store (s32) into `i32 addrspace(1)* undef`, align 2, addrspace 1)
+    ; GCN-NEXT: GLOBAL_STORE_DWORD [[DEF]], killed [[DEF1]], 0, 0, implicit $exec :: (store (s32) into `ptr addrspace(1) undef`, addrspace 1)
+    ; GCN-NEXT: GLOBAL_STORE_DWORD killed [[DEF]], killed [[DEF2]], 2, 0, implicit $exec :: (store (s32) into `ptr addrspace(1) undef`, align 2, addrspace 1)
     %0:vreg_64_align2 = IMPLICIT_DEF
     %1:vgpr_32 = IMPLICIT_DEF
     %2:vgpr_32 = IMPLICIT_DEF
@@ -658,8 +658,8 @@ body:             |
     ; GCN: [[DEF:%[0-9]+]]:vreg_64_align2 = IMPLICIT_DEF
     ; GCN-NEXT: [[DEF1:%[0-9]+]]:vgpr_32 = IMPLICIT_DEF
     ; GCN-NEXT: [[DEF2:%[0-9]+]]:vgpr_32 = IMPLICIT_DEF
-    ; GCN-NEXT: GLOBAL_STORE_DWORD [[DEF]], killed [[DEF1]], 0, 1, implicit $exec :: (store (s32) into `i32 addrspace(1)* undef`, addrspace 1)
-    ; GCN-NEXT: GLOBAL_STORE_DWORD killed [[DEF]], killed [[DEF2]], 4, 0, implicit $exec :: (store (s32) into `i32 addrspace(1)* undef`, addrspace 1)
+    ; GCN-NEXT: GLOBAL_STORE_DWORD [[DEF]], killed [[DEF1]], 0, 1, implicit $exec :: (store (s32) into `ptr addrspace(1) undef`, addrspace 1)
+    ; GCN-NEXT: GLOBAL_STORE_DWORD killed [[DEF]], killed [[DEF2]], 4, 0, implicit $exec :: (store (s32) into `ptr addrspace(1) undef`, addrspace 1)
     %0:vreg_64_align2 = IMPLICIT_DEF
     %1:vgpr_32 = IMPLICIT_DEF
     %2:vgpr_32 = IMPLICIT_DEF
@@ -676,8 +676,8 @@ body:             |
     ; GCN: [[DEF:%[0-9]+]]:vreg_128_align2 = IMPLICIT_DEF
     ; GCN-NEXT: [[DEF1:%[0-9]+]]:vgpr_32 = IMPLICIT_DEF
     ; GCN-NEXT: [[DEF2:%[0-9]+]]:vgpr_32 = IMPLICIT_DEF
-    ; GCN-NEXT: GLOBAL_STORE_DWORD [[DEF]].sub0_sub1, killed [[DEF1]], 0, 0, implicit $exec :: (store (s32) into `i32 addrspace(1)* undef`, addrspace 1)
-    ; GCN-NEXT: GLOBAL_STORE_DWORD [[DEF]].sub2_sub3, killed [[DEF2]], 4, 0, implicit $exec :: (store (s32) into `i32 addrspace(1)* undef`, addrspace 1)
+    ; GCN-NEXT: GLOBAL_STORE_DWORD [[DEF]].sub0_sub1, killed [[DEF1]], 0, 0, implicit $exec :: (store (s32) into `ptr addrspace(1) undef`, addrspace 1)
+    ; GCN-NEXT: GLOBAL_STORE_DWORD [[DEF]].sub2_sub3, killed [[DEF2]], 4, 0, implicit $exec :: (store (s32) into `ptr addrspace(1) undef`, addrspace 1)
     %0:vreg_128_align2 = IMPLICIT_DEF
     %1:vgpr_32 = IMPLICIT_DEF
     %2:vgpr_32 = IMPLICIT_DEF
@@ -696,7 +696,7 @@ body:             |
     ; GCN-NEXT: [[DEF2:%[0-9]+]]:vgpr_32 = IMPLICIT_DEF
     ; GCN-NEXT: [[DEF3:%[0-9]+]]:vgpr_32 = IMPLICIT_DEF
     ; GCN-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[DEF2]], %subreg.sub0, [[DEF3]], %subreg.sub1
-    ; GCN-NEXT: GLOBAL_STORE_DWORDX2_SADDR [[DEF1]], killed [[REG_SEQUENCE]], [[DEF]], 0, 0, implicit $exec :: (store (s64) into `i32 addrspace(1)* undef`, align 4, addrspace 1)
+    ; GCN-NEXT: GLOBAL_STORE_DWORDX2_SADDR [[DEF1]], killed [[REG_SEQUENCE]], [[DEF]], 0, 0, implicit $exec :: (store (s64) into `ptr addrspace(1) undef`, align 4, addrspace 1)
     %0:sreg_64_xexec = IMPLICIT_DEF
     %1:vgpr_32 = IMPLICIT_DEF
     %2:vgpr_32 = IMPLICIT_DEF
@@ -718,7 +718,7 @@ body:             |
     ; GCN-NEXT: [[DEF4:%[0-9]+]]:vgpr_32 = IMPLICIT_DEF
     ; GCN-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[DEF2]], %subreg.sub0, [[DEF3]], %subreg.sub1
     ; GCN-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_96_align2 = REG_SEQUENCE killed [[REG_SEQUENCE]], %subreg.sub0_sub1, [[DEF4]], %subreg.sub2
-    ; GCN-NEXT: GLOBAL_STORE_DWORDX3_SADDR [[DEF1]], killed [[REG_SEQUENCE1]], [[DEF]], 4, 1, implicit $exec :: (store (s96) into `i32 addrspace(1)* undef`, align 4, addrspace 1)
+    ; GCN-NEXT: GLOBAL_STORE_DWORDX3_SADDR [[DEF1]], killed [[REG_SEQUENCE1]], [[DEF]], 4, 1, implicit $exec :: (store (s96) into `ptr addrspace(1) undef`, align 4, addrspace 1)
     %0:sreg_64_xexec = IMPLICIT_DEF
     %1:vgpr_32 = IMPLICIT_DEF
     %2:vgpr_32 = IMPLICIT_DEF
@@ -744,7 +744,7 @@ body:             |
     ; GCN-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[DEF2]], %subreg.sub0, [[DEF3]], %subreg.sub1
     ; GCN-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_96_align2 = REG_SEQUENCE killed [[REG_SEQUENCE]], %subreg.sub0_sub1, [[DEF4]], %subreg.sub2
     ; GCN-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_128_align2 = REG_SEQUENCE killed [[REG_SEQUENCE1]], %subreg.sub0_sub1_sub2, [[DEF5]], %subreg.sub3
-    ; GCN-NEXT: GLOBAL_STORE_DWORDX4_SADDR [[DEF1]], killed [[REG_SEQUENCE2]], [[DEF]], 4, 2, implicit $exec :: (store (s128) into `i32 addrspace(1)* undef`, align 4, addrspace 1)
+    ; GCN-NEXT: GLOBAL_STORE_DWORDX4_SADDR [[DEF1]], killed [[REG_SEQUENCE2]], [[DEF]], 4, 2, implicit $exec :: (store (s128) into `ptr addrspace(1) undef`, align 4, addrspace 1)
     %0:sreg_64_xexec = IMPLICIT_DEF
     %1:vgpr_32 = IMPLICIT_DEF
     %2:vgpr_32 = IMPLICIT_DEF
@@ -774,9 +774,9 @@ body:             |
     ; GCN-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[DEF2]], %subreg.sub0, [[DEF3]], %subreg.sub1
     ; GCN-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_96_align2 = REG_SEQUENCE killed [[REG_SEQUENCE]], %subreg.sub0_sub1, [[DEF4]], %subreg.sub2
     ; GCN-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_128_align2 = REG_SEQUENCE killed [[REG_SEQUENCE1]], %subreg.sub0_sub1_sub2, [[DEF5]], %subreg.sub3
-    ; GCN-NEXT: GLOBAL_STORE_DWORDX4_SADDR [[DEF1]], killed [[REG_SEQUENCE2]], [[DEF]], 4, 3, implicit $exec :: (store (s128) into `i32 addrspace(1)* undef`, align 4, addrspace 1)
+    ; GCN-NEXT: GLOBAL_STORE_DWORDX4_SADDR [[DEF1]], killed [[REG_SEQUENCE2]], [[DEF]], 4, 3, implicit $exec :: (store (s128) into `ptr addrspace(1) undef`, align 4, addrspace 1)
     ; GCN-NEXT: [[REG_SEQUENCE3:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[DEF6]], %subreg.sub0, [[DEF7]], %subreg.sub1
-    ; GCN-NEXT: GLOBAL_STORE_DWORDX2_SADDR [[DEF1]], killed [[REG_SEQUENCE3]], [[DEF]], 20, 3, implicit $exec :: (store (s64) into `i32 addrspace(1)* undef`, align 4, addrspace 1)
+    ; GCN-NEXT: GLOBAL_STORE_DWORDX2_SADDR [[DEF1]], killed [[REG_SEQUENCE3]], [[DEF]], 20, 3, implicit $exec :: (store (s64) into `ptr addrspace(1) undef`, align 4, addrspace 1)
     %0:sreg_64_xexec = IMPLICIT_DEF
     %1:vgpr_32 = IMPLICIT_DEF
     %2:vgpr_32 = IMPLICIT_DEF
@@ -803,8 +803,8 @@ body:             |
     ; GCN-NEXT: [[DEF1:%[0-9]+]]:vreg_64_align2 = IMPLICIT_DEF
     ; GCN-NEXT: [[DEF2:%[0-9]+]]:vgpr_32 = IMPLICIT_DEF
     ; GCN-NEXT: [[DEF3:%[0-9]+]]:vgpr_32 = IMPLICIT_DEF
-    ; GCN-NEXT: GLOBAL_STORE_DWORD_SADDR [[DEF1]].sub0, [[DEF2]], [[DEF]], 0, 0, implicit $exec :: (store (s32) into `i32 addrspace(1)* undef`, addrspace 1)
-    ; GCN-NEXT: GLOBAL_STORE_DWORD [[DEF1]], [[DEF3]], 4, 0, implicit $exec :: (store (s32) into `i32 addrspace(1)* undef`, addrspace 1)
+    ; GCN-NEXT: GLOBAL_STORE_DWORD_SADDR [[DEF1]].sub0, [[DEF2]], [[DEF]], 0, 0, implicit $exec :: (store (s32) into `ptr addrspace(1) undef`, addrspace 1)
+    ; GCN-NEXT: GLOBAL_STORE_DWORD [[DEF1]], [[DEF3]], 4, 0, implicit $exec :: (store (s32) into `ptr addrspace(1) undef`, addrspace 1)
     %0:sreg_64_xexec = IMPLICIT_DEF
     %1:vreg_64_align2 = IMPLICIT_DEF
     %2:vgpr_32 = IMPLICIT_DEF
@@ -823,8 +823,8 @@ body:             |
     ; GCN-NEXT: [[DEF1:%[0-9]+]]:vreg_64_align2 = IMPLICIT_DEF
     ; GCN-NEXT: [[DEF2:%[0-9]+]]:vgpr_32 = IMPLICIT_DEF
     ; GCN-NEXT: [[DEF3:%[0-9]+]]:vgpr_32 = IMPLICIT_DEF
-    ; GCN-NEXT: GLOBAL_STORE_DWORD_SADDR [[DEF1]].sub0, [[DEF2]], [[DEF]], 0, 0, implicit $exec :: (store (s32) into `i32 addrspace(1)* undef`, addrspace 1)
-    ; GCN-NEXT: GLOBAL_STORE_DWORD_SADDR [[DEF1]].sub1, [[DEF3]], [[DEF]], 4, 0, implicit $exec :: (store (s32) into `i32 addrspace(1)* undef`, addrspace 1)
+    ; GCN-NEXT: GLOBAL_STORE_DWORD_SADDR [[DEF1]].sub0, [[DEF2]], [[DEF]], 0, 0, implicit $exec :: (store (s32) into `ptr addrspace(1) undef`, addrspace 1)
+    ; GCN-NEXT: GLOBAL_STORE_DWORD_SADDR [[DEF1]].sub1, [[DEF3]], [[DEF]], 4, 0, implicit $exec :: (store (s32) into `ptr addrspace(1) undef`, addrspace 1)
     %0:sreg_64_xexec = IMPLICIT_DEF
     %1:vreg_64_align2 = IMPLICIT_DEF
     %2:vgpr_32 = IMPLICIT_DEF
@@ -843,8 +843,8 @@ body:             |
     ; GCN-NEXT: [[DEF1:%[0-9]+]]:vgpr_32 = IMPLICIT_DEF
     ; GCN-NEXT: [[DEF2:%[0-9]+]]:vgpr_32 = IMPLICIT_DEF
     ; GCN-NEXT: [[DEF3:%[0-9]+]]:vgpr_32 = IMPLICIT_DEF
-    ; GCN-NEXT: GLOBAL_STORE_DWORD_SADDR [[DEF1]], [[DEF2]], [[DEF]].sub0_sub1, 0, 0, implicit $exec :: (store (s32) into `i32 addrspace(1)* undef`, addrspace 1)
-    ; GCN-NEXT: GLOBAL_STORE_DWORD_SADDR [[DEF1]], [[DEF3]], [[DEF]].sub2_sub3, 4, 0, implicit $exec :: (store (s32) into `i32 addrspace(1)* undef`, addrspace 1)
+    ; GCN-NEXT: GLOBAL_STORE_DWORD_SADDR [[DEF1]], [[DEF2]], [[DEF]].sub0_sub1, 0, 0, implicit $exec :: (store (s32) into `ptr addrspace(1) undef`, addrspace 1)
+    ; GCN-NEXT: GLOBAL_STORE_DWORD_SADDR [[DEF1]], [[DEF3]], [[DEF]].sub2_sub3, 4, 0, implicit $exec :: (store (s32) into `ptr addrspace(1) undef`, addrspace 1)
     %0:sgpr_128 = IMPLICIT_DEF
     %1:vgpr_32 = IMPLICIT_DEF
     %2:vgpr_32 = IMPLICIT_DEF

diff  --git a/llvm/test/CodeGen/ARM/GlobalISel/arm-select-globals-ropi-rwpi.mir b/llvm/test/CodeGen/ARM/GlobalISel/arm-select-globals-ropi-rwpi.mir
index 290fab625ed51..372ba50a12159 100644
--- a/llvm/test/CodeGen/ARM/GlobalISel/arm-select-globals-ropi-rwpi.mir
+++ b/llvm/test/CodeGen/ARM/GlobalISel/arm-select-globals-ropi-rwpi.mir
@@ -29,7 +29,7 @@ registers:
   - { id: 1, class: gprb }
 # RW-DEFAULT-NOMOVT: constants:
 # RW-DEFAULT-NOMOVT: id: 0
-# RW-DEFAULT-NOMOVT: value: 'i32* @internal_global'
+# RW-DEFAULT-NOMOVT: value: 'ptr @internal_global'
 # RWPI-NOMOVT: constants:
 # RWPI-NOMOVT: id: 0
 # RWPI-NOMOVT: value: 'internal_global(SBREL)'
@@ -63,7 +63,7 @@ registers:
   - { id: 1, class: gprb }
 # RW-DEFAULT-NOMOVT: constants:
 # RW-DEFAULT-NOMOVT: id: 0
-# RW-DEFAULT-NOMOVT: value: 'i32* @external_global'
+# RW-DEFAULT-NOMOVT: value: 'ptr @external_global'
 # RWPI-NOMOVT: constants:
 # RWPI-NOMOVT: id: 0
 # RWPI-NOMOVT: value: 'external_global(SBREL)'
@@ -97,7 +97,7 @@ registers:
   - { id: 1, class: gprb }
 # RO-DEFAULT-NOMOVT: constants:
 # RO-DEFAULT-NOMOVT: id: 0
-# RO-DEFAULT-NOMOVT: value: 'i32* @internal_constant'
+# RO-DEFAULT-NOMOVT: value: 'ptr @internal_constant'
 body:             |
   bb.0:
     %0(p0) = G_GLOBAL_VALUE @internal_constant
@@ -127,7 +127,7 @@ registers:
   - { id: 1, class: gprb }
 # RO-DEFAULT-NOMOVT: constants:
 # RO-DEFAULT-NOMOVT: id: 0
-# RO-DEFAULT-NOMOVT: value: 'i32* @external_constant'
+# RO-DEFAULT-NOMOVT: value: 'ptr @external_constant'
 body:             |
   bb.0:
     %0(p0) = G_GLOBAL_VALUE @external_constant

diff  --git a/llvm/test/CodeGen/ARM/GlobalISel/arm-select-globals-static.mir b/llvm/test/CodeGen/ARM/GlobalISel/arm-select-globals-static.mir
index 85f105a58c2dd..e4518db2887fb 100644
--- a/llvm/test/CodeGen/ARM/GlobalISel/arm-select-globals-static.mir
+++ b/llvm/test/CodeGen/ARM/GlobalISel/arm-select-globals-static.mir
@@ -21,7 +21,7 @@ registers:
   - { id: 1, class: gprb }
 # ELF-NOMOVT: constants:
 # ELF-NOMOVT: id: 0
-# ELF-NOMOVT: value: 'i32* @internal_global'
+# ELF-NOMOVT: value: 'ptr @internal_global'
 body:             |
   bb.0:
     %0(p0) = G_GLOBAL_VALUE @internal_global
@@ -51,7 +51,7 @@ registers:
   - { id: 1, class: gprb }
 # ELF-NOMOVT: constants:
 # ELF-NOMOVT: id: 0
-# ELF-NOMOVT: value: 'i32* @external_global'
+# ELF-NOMOVT: value: 'ptr @external_global'
 body:             |
   bb.0:
     %0(p0) = G_GLOBAL_VALUE @external_global

diff  --git a/llvm/test/CodeGen/ARM/GlobalISel/thumb-select-globals-ropi-rwpi.mir b/llvm/test/CodeGen/ARM/GlobalISel/thumb-select-globals-ropi-rwpi.mir
index 19e38cf5b8673..df6fb9b94eaf6 100644
--- a/llvm/test/CodeGen/ARM/GlobalISel/thumb-select-globals-ropi-rwpi.mir
+++ b/llvm/test/CodeGen/ARM/GlobalISel/thumb-select-globals-ropi-rwpi.mir
@@ -29,7 +29,7 @@ registers:
   - { id: 1, class: gprb }
 # RW-DEFAULT-NOMOVT: constants:
 # RW-DEFAULT-NOMOVT: id: 0
-# RW-DEFAULT-NOMOVT: value: 'i32* @internal_global'
+# RW-DEFAULT-NOMOVT: value: 'ptr @internal_global'
 # RWPI-NOMOVT: constants:
 # RWPI-NOMOVT: id: 0
 # RWPI-NOMOVT: value: 'internal_global(SBREL)'
@@ -63,7 +63,7 @@ registers:
   - { id: 1, class: gprb }
 # RW-DEFAULT-NOMOVT: constants:
 # RW-DEFAULT-NOMOVT: id: 0
-# RW-DEFAULT-NOMOVT: value: 'i32* @external_global'
+# RW-DEFAULT-NOMOVT: value: 'ptr @external_global'
 # RWPI-NOMOVT: constants:
 # RWPI-NOMOVT: id: 0
 # RWPI-NOMOVT: value: 'external_global(SBREL)'
@@ -97,7 +97,7 @@ registers:
   - { id: 1, class: gprb }
 # RO-DEFAULT-NOMOVT: constants:
 # RO-DEFAULT-NOMOVT: id: 0
-# RO-DEFAULT-NOMOVT: value: 'i32* @internal_constant'
+# RO-DEFAULT-NOMOVT: value: 'ptr @internal_constant'
 body:             |
   bb.0:
     %0(p0) = G_GLOBAL_VALUE @internal_constant
@@ -127,7 +127,7 @@ registers:
   - { id: 1, class: gprb }
 # RO-DEFAULT-NOMOVT: constants:
 # RO-DEFAULT-NOMOVT: id: 0
-# RO-DEFAULT-NOMOVT: value: 'i32* @external_constant'
+# RO-DEFAULT-NOMOVT: value: 'ptr @external_constant'
 body:             |
   bb.0:
     %0(p0) = G_GLOBAL_VALUE @external_constant

diff  --git a/llvm/test/CodeGen/ARM/GlobalISel/thumb-select-globals-static.mir b/llvm/test/CodeGen/ARM/GlobalISel/thumb-select-globals-static.mir
index c89db88c8f7a0..64274ed81f602 100644
--- a/llvm/test/CodeGen/ARM/GlobalISel/thumb-select-globals-static.mir
+++ b/llvm/test/CodeGen/ARM/GlobalISel/thumb-select-globals-static.mir
@@ -21,7 +21,7 @@ registers:
   - { id: 1, class: gprb }
 # ELF-NOMOVT: constants:
 # ELF-NOMOVT: id: 0
-# ELF-NOMOVT: value: 'i32* @internal_global'
+# ELF-NOMOVT: value: 'ptr @internal_global'
 body:             |
   bb.0:
     %0(p0) = G_GLOBAL_VALUE @internal_global
@@ -51,7 +51,7 @@ registers:
   - { id: 1, class: gprb }
 # ELF-NOMOVT: constants:
 # ELF-NOMOVT: id: 0
-# ELF-NOMOVT: value: 'i32* @external_global'
+# ELF-NOMOVT: value: 'ptr @external_global'
 body:             |
   bb.0:
     %0(p0) = G_GLOBAL_VALUE @external_global

diff  --git a/llvm/test/CodeGen/MIR/Generic/aligned-memoperands.mir b/llvm/test/CodeGen/MIR/Generic/aligned-memoperands.mir
index 7030eebffed9d..a6f2154100d55 100644
--- a/llvm/test/CodeGen/MIR/Generic/aligned-memoperands.mir
+++ b/llvm/test/CodeGen/MIR/Generic/aligned-memoperands.mir
@@ -6,23 +6,23 @@ body: |
   bb.0:
     ; CHECK-LABEL: name: aligned_memoperands
     ; CHECK: [[DEF:%[0-9]+]]:_(p0) = IMPLICIT_DEF
-    ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[DEF]](p0) :: (load (s32) from `i32* undef`)
-    ; CHECK-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[DEF]](p0) :: (load (s32) from `i32* undef`, align 2)
-    ; CHECK-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[DEF]](p0) :: (load (s32) from `i32* undef`)
-    ; CHECK-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[DEF]](p0) :: (load (s32) from `i32* undef`, align 8)
-    ; CHECK-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[DEF]](p0) :: (load (s32) from `i32* undef` + 12, align 2)
-    ; CHECK-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[DEF]](p0) :: (load (s32) from `i32* undef` + 12)
-    ; CHECK-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[DEF]](p0) :: (load (s32) from `i32* undef` + 12, align 2)
-    ; CHECK-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[DEF]](p0) :: (load (s32) from `i32* undef` + 12)
-    ; CHECK-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[DEF]](p0) :: (load (s32) from `i32* undef` + 12, basealign 8)
+    ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[DEF]](p0) :: (load (s32) from `ptr undef`)
+    ; CHECK-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[DEF]](p0) :: (load (s32) from `ptr undef`, align 2)
+    ; CHECK-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[DEF]](p0) :: (load (s32) from `ptr undef`)
+    ; CHECK-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[DEF]](p0) :: (load (s32) from `ptr undef`, align 8)
+    ; CHECK-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[DEF]](p0) :: (load (s32) from `ptr undef` + 12, align 2)
+    ; CHECK-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[DEF]](p0) :: (load (s32) from `ptr undef` + 12)
+    ; CHECK-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[DEF]](p0) :: (load (s32) from `ptr undef` + 12, align 2)
+    ; CHECK-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[DEF]](p0) :: (load (s32) from `ptr undef` + 12)
+    ; CHECK-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[DEF]](p0) :: (load (s32) from `ptr undef` + 12, basealign 8)
     %0:_(p0) = IMPLICIT_DEF
-    %1:_(s32) = G_LOAD %0 :: (load (s32) from `i32* undef`)
-    %1:_(s32) = G_LOAD %0 :: (load (s32) from `i32* undef`, align 2)
-    %1:_(s32) = G_LOAD %0 :: (load (s32) from `i32* undef`, align 4) ; redundant
-    %1:_(s32) = G_LOAD %0 :: (load (s32) from `i32* undef`, align 8)
-    %1:_(s32) = G_LOAD %0 :: (load (s32) from `i32* undef` + 12, align 2)
-    %1:_(s32) = G_LOAD %0 :: (load (s32) from `i32* undef` + 12, align 4) ; redundant
-    %1:_(s32) = G_LOAD %0 :: (load (s32) from `i32* undef` + 12, basealign 2) ; printed as "align"
-    %1:_(s32) = G_LOAD %0 :: (load (s32) from `i32* undef` + 12, basealign 4) ; redundant
-    %1:_(s32) = G_LOAD %0 :: (load (s32) from `i32* undef` + 12, basealign 8)
+    %1:_(s32) = G_LOAD %0 :: (load (s32) from `ptr undef`)
+    %1:_(s32) = G_LOAD %0 :: (load (s32) from `ptr undef`, align 2)
+    %1:_(s32) = G_LOAD %0 :: (load (s32) from `ptr undef`, align 4) ; redundant
+    %1:_(s32) = G_LOAD %0 :: (load (s32) from `ptr undef`, align 8)
+    %1:_(s32) = G_LOAD %0 :: (load (s32) from `ptr undef` + 12, align 2)
+    %1:_(s32) = G_LOAD %0 :: (load (s32) from `ptr undef` + 12, align 4) ; redundant
+    %1:_(s32) = G_LOAD %0 :: (load (s32) from `ptr undef` + 12, basealign 2) ; printed as "align"
+    %1:_(s32) = G_LOAD %0 :: (load (s32) from `ptr undef` + 12, basealign 4) ; redundant
+    %1:_(s32) = G_LOAD %0 :: (load (s32) from `ptr undef` + 12, basealign 8)
 ...

diff  --git a/llvm/test/CodeGen/Mips/cstmaterialization/isel-materialization.ll b/llvm/test/CodeGen/Mips/cstmaterialization/isel-materialization.ll
index cc6a7affcbd4f..b29115a4f7827 100644
--- a/llvm/test/CodeGen/Mips/cstmaterialization/isel-materialization.ll
+++ b/llvm/test/CodeGen/Mips/cstmaterialization/isel-materialization.ll
@@ -21,7 +21,7 @@ entry:
 ; MIPS-DAG: t{{[0-9]+}}: i32 = ADDiu Register:i32 $zero, TargetConstant:i32<1>
 ; MIPS-DAG: t{{[0-9]+}}: i32 = ADDiu Register:i32 $zero, TargetConstant:i32<2048>
 ; MIPS-DAG: t{{[0-9]+}}: i32 = LUi TargetConstant:i32<128>
-; MIPS:     t{{[0-9]+}}: ch,glue = JAL TargetGlobalAddress:i32<void (i32, i32, i32)* @f>
+; MIPS:     t{{[0-9]+}}: ch,glue = JAL TargetGlobalAddress:i32<ptr @f>
 
 ; MIPS:     t[[A:[0-9]+]]: i32 = LUi TargetConstant:i32<2304>
 ; MIPS:     t{{[0-9]+}}: i32 = ORi t[[A]], TargetConstant:i32<2>
@@ -30,7 +30,7 @@ entry:
 ; MM-DAG: t{{[0-9]+}}: i32 = LI16_MM TargetConstant:i32<1>
 ; MM-DAG: t{{[0-9]+}}: i32 = ADDiu_MM Register:i32 $zero, TargetConstant:i32<2048>
 ; MM-DAG: t{{[0-9]+}}: i32 = LUi_MM TargetConstant:i32<128>
-; MM:     t{{[0-9]+}}: ch,glue = JAL_MM TargetGlobalAddress:i32<void (i32, i32, i32)* @f>
+; MM:     t{{[0-9]+}}: ch,glue = JAL_MM TargetGlobalAddress:i32<ptr @f>
 
 ; MM:     t[[A:[0-9]+]]: i32 = LUi_MM TargetConstant:i32<2304>
 ; MM:     t{{[0-9]+}}: i32 = ORi_MM t[[A]], TargetConstant:i32<2>

diff  --git a/llvm/test/CodeGen/PowerPC/fmf-propagation.ll b/llvm/test/CodeGen/PowerPC/fmf-propagation.ll
index 5052d3a93e459..d456d2d752c09 100644
--- a/llvm/test/CodeGen/PowerPC/fmf-propagation.ll
+++ b/llvm/test/CodeGen/PowerPC/fmf-propagation.ll
@@ -577,13 +577,13 @@ define double @fcmp_nnan(double %a, double %y, double %z) {
 ; FP library calls can have fast-math-flags.
 
 ; FMFDEBUG-LABEL: Optimized lowered selection DAG: %bb.0 'log2_approx:'
-; FMFDEBUG:         ch,glue = PPCISD::CALL_NOP t11, TargetGlobalAddress:i64<double (double)* @log2>
+; FMFDEBUG:         ch,glue = PPCISD::CALL_NOP t11, TargetGlobalAddress:i64<ptr @log2>
 ; FMFDEBUG:         ch,glue = callseq_end t15, TargetConstant:i64<32>, TargetConstant:i64<0>, t15:1
 ; FMFDEBUG:         f64,ch,glue = CopyFromReg t16, Register:f64 $f1, t16:1
 ; FMFDEBUG:       Type-legalized selection DAG: %bb.0 'log2_approx:'
 
 ; GLOBALDEBUG-LABEL: Optimized lowered selection DAG: %bb.0 'log2_approx:'
-; GLOBALDEBUG:         ch,glue = PPCISD::CALL_NOP t11, TargetGlobalAddress:i64<double (double)* @log2>
+; GLOBALDEBUG:         ch,glue = PPCISD::CALL_NOP t11, TargetGlobalAddress:i64<ptr @log2>
 ; GLOBALDEBUG:         ch,glue = callseq_end t15, TargetConstant:i64<32>, TargetConstant:i64<0>, t15:1
 ; GLOBALDEBUG:         f64,ch,glue = CopyFromReg t16, Register:f64 $f1, t16:1
 ; GLOBALDEBUG:       Type-legalized selection DAG: %bb.0 'log2_approx:'

diff  --git a/llvm/test/CodeGen/SystemZ/regcoal-undef-lane-4-rm-cp-commuting-def.mir b/llvm/test/CodeGen/SystemZ/regcoal-undef-lane-4-rm-cp-commuting-def.mir
index 0bae4e5ae24a7..177afcd73e822 100644
--- a/llvm/test/CodeGen/SystemZ/regcoal-undef-lane-4-rm-cp-commuting-def.mir
+++ b/llvm/test/CodeGen/SystemZ/regcoal-undef-lane-4-rm-cp-commuting-def.mir
@@ -35,7 +35,7 @@ body:             |
   ; CHECK:   J %bb.4
   ; CHECK: bb.2:
   ; CHECK:   successors:
-  ; CHECK:   STMux %20.subreg_l32, undef %8:addr64bit, 0, $noreg :: (store (s32) into `i32* undef`)
+  ; CHECK:   STMux %20.subreg_l32, undef %8:addr64bit, 0, $noreg :: (store (s32) into `ptr undef`)
   ; CHECK: bb.3:
   ; CHECK:   successors:
   ; CHECK: bb.4:
@@ -84,7 +84,7 @@ body:             |
   bb.2:
     successors:
 
-    STMux killed %4, undef %22:addr64bit, 0, $noreg :: (store (s32) into `i32* undef`)
+    STMux killed %4, undef %22:addr64bit, 0, $noreg :: (store (s32) into `ptr undef`)
 
   bb.3:
     successors:

diff  --git a/llvm/test/CodeGen/X86/bug47278.mir b/llvm/test/CodeGen/X86/bug47278.mir
index 70390cbb04361..0b01c9de19c0b 100644
--- a/llvm/test/CodeGen/X86/bug47278.mir
+++ b/llvm/test/CodeGen/X86/bug47278.mir
@@ -11,9 +11,9 @@ body:             |
   bb.0:
     ; CHECK-LABEL: name: foo
     ; CHECK: renamable $eax = IMPLICIT_DEF
-    ; CHECK: renamable $edx = MOVZX32rm8 renamable $eax, 1, $noreg, 0, $noreg :: (load (s8) from `i168* undef` + 20, align 4, basealign 16)
-    ; CHECK: dead renamable $ecx = MOV32rm renamable $eax, 1, $noreg, 0, $noreg :: (load (s32) from `i168* undef` + 12, basealign 16)
-    ; CHECK: renamable $al = MOV8rm killed renamable $eax, 1, $noreg, 0, $noreg :: (load (s8) from `i32* undef`, align 4)
+    ; CHECK: renamable $edx = MOVZX32rm8 renamable $eax, 1, $noreg, 0, $noreg :: (load (s8) from `ptr undef` + 20, align 4, basealign 16)
+    ; CHECK: dead renamable $ecx = MOV32rm renamable $eax, 1, $noreg, 0, $noreg :: (load (s32) from `ptr undef` + 12, basealign 16)
+    ; CHECK: renamable $al = MOV8rm killed renamable $eax, 1, $noreg, 0, $noreg :: (load (s8) from `ptr undef`, align 4)
     ; CHECK: dead renamable $ecx = COPY renamable $edx
     ; CHECK: dead renamable $ecx = COPY renamable $edx
     ; CHECK: dead renamable $ecx = COPY renamable $edx
@@ -26,9 +26,9 @@ body:             |
     ; CHECK: dead renamable $eax = SHRD32rrCL renamable $eax, killed renamable $edx, implicit-def dead $eflags, implicit killed $cl
     ; CHECK: RET32
     %0:gr32 = IMPLICIT_DEF
-    %1:gr32 = MOVZX32rm8 %0, 1, $noreg, 0, $noreg :: (load (s8) from `i168* undef` + 20, align 4, basealign 16)
-    %2:gr32 = MOV32rm %0, 1, $noreg, 0, $noreg :: (load (s32) from `i168* undef` + 12, basealign 16)
-    %3:gr8 = MOV8rm %0, 1, $noreg, 0, $noreg :: (load (s8) from `i32* undef`, align 4)
+    %1:gr32 = MOVZX32rm8 %0, 1, $noreg, 0, $noreg :: (load (s8) from `ptr undef` + 20, align 4, basealign 16)
+    %2:gr32 = MOV32rm %0, 1, $noreg, 0, $noreg :: (load (s32) from `ptr undef` + 12, basealign 16)
+    %3:gr8 = MOV8rm %0, 1, $noreg, 0, $noreg :: (load (s8) from `ptr undef`, align 4)
     %4:gr32 = COPY %1
     %5:gr32 = COPY %1
     %6:gr32 = COPY %1

diff  --git a/llvm/test/Instrumentation/AddressSanitizer/no-globals.ll b/llvm/test/Instrumentation/AddressSanitizer/no-globals.ll
index a38230f56a07b..589e6d31b08b8 100644
--- a/llvm/test/Instrumentation/AddressSanitizer/no-globals.ll
+++ b/llvm/test/Instrumentation/AddressSanitizer/no-globals.ll
@@ -6,7 +6,7 @@ define void @f() {
 }
 
 ; CHECK-NOT: @llvm.global_dtors
-; CHECK: @llvm.global_ctors = appending global [1 x { i32, void ()*, i8* }] [{ i32, void ()*, i8* } { i32 1, void ()* @asan.module_ctor, i8* bitcast (void ()* @asan.module_ctor to i8*) }]
+; CHECK: @llvm.global_ctors = appending global [1 x { i32, ptr, ptr }] [{ i32, ptr, ptr } { i32 1, ptr @asan.module_ctor, ptr @asan.module_ctor }]
 ; CHECK-NOT: @llvm.global_dtors
 ; CHECK: define internal void @asan.module_ctor() #[[#]] comdat
 ; CHECK-NOT: @llvm.global_dtors

diff  --git a/llvm/test/Instrumentation/AddressSanitizer/odr-check-ignore.ll b/llvm/test/Instrumentation/AddressSanitizer/odr-check-ignore.ll
index 659c22c426119..727075ddbe99d 100644
--- a/llvm/test/Instrumentation/AddressSanitizer/odr-check-ignore.ll
+++ b/llvm/test/Instrumentation/AddressSanitizer/odr-check-ignore.ll
@@ -9,15 +9,15 @@ target triple = "x86_64-unknown-linux-gnu"
 @c = internal global [2 x i32] zeroinitializer, align 4
 @d = unnamed_addr global [2 x i32] zeroinitializer, align 4
 
-; NOALIAS:      @__asan_global_a = private global { i64, i64, i64, i64, i64, i64, i64, i64 } { i64 ptrtoint ({ [2 x i32], [24 x i8] }* @a to i64), i64 8, i64 32, i64 ptrtoint ([2 x i8]* @___asan_gen_.1 to i64), i64 ptrtoint ([8 x i8]* @___asan_gen_ to i64), i64 0, i64 0, i64 0 }
-; NOALIAS-NEXT: @__asan_global_b = private global { i64, i64, i64, i64, i64, i64, i64, i64 } { i64 ptrtoint ({ [2 x i32], [24 x i8] }* @b to i64), i64 8, i64 32, i64 ptrtoint ([2 x i8]* @___asan_gen_.2 to i64), i64 ptrtoint ([8 x i8]* @___asan_gen_ to i64), i64 0, i64 0, i64 -1 }
-; NOALIAS-NEXT: @__asan_global_c = private global { i64, i64, i64, i64, i64, i64, i64, i64 } { i64 ptrtoint ({ [2 x i32], [24 x i8] }* @c to i64), i64 8, i64 32, i64 ptrtoint ([2 x i8]* @___asan_gen_.3 to i64), i64 ptrtoint ([8 x i8]* @___asan_gen_ to i64), i64 0, i64 0, i64 -1 }
-; NOALIAS-NEXT: @__asan_global_d = private global { i64, i64, i64, i64, i64, i64, i64, i64 } { i64 ptrtoint ({ [2 x i32], [24 x i8] }* @d to i64), i64 8, i64 32, i64 ptrtoint ([2 x i8]* @___asan_gen_.4 to i64), i64 ptrtoint ([8 x i8]* @___asan_gen_ to i64), i64 0, i64 0, i64 0 }
+; NOALIAS:      @__asan_global_a = private global { i64, i64, i64, i64, i64, i64, i64, i64 } { i64 ptrtoint (ptr @a to i64), i64 8, i64 32, i64 ptrtoint (ptr @___asan_gen_.1 to i64), i64 ptrtoint (ptr @___asan_gen_ to i64), i64 0, i64 0, i64 0 }
+; NOALIAS-NEXT: @__asan_global_b = private global { i64, i64, i64, i64, i64, i64, i64, i64 } { i64 ptrtoint (ptr @b to i64), i64 8, i64 32, i64 ptrtoint (ptr @___asan_gen_.2 to i64), i64 ptrtoint (ptr @___asan_gen_ to i64), i64 0, i64 0, i64 -1 }
+; NOALIAS-NEXT: @__asan_global_c = private global { i64, i64, i64, i64, i64, i64, i64, i64 } { i64 ptrtoint (ptr @c to i64), i64 8, i64 32, i64 ptrtoint (ptr @___asan_gen_.3 to i64), i64 ptrtoint (ptr @___asan_gen_ to i64), i64 0, i64 0, i64 -1 }
+; NOALIAS-NEXT: @__asan_global_d = private global { i64, i64, i64, i64, i64, i64, i64, i64 } { i64 ptrtoint (ptr @d to i64), i64 8, i64 32, i64 ptrtoint (ptr @___asan_gen_.4 to i64), i64 ptrtoint (ptr @___asan_gen_ to i64), i64 0, i64 0, i64 0 }
 
-; ALIAS:      @__asan_global_a = private global { i64, i64, i64, i64, i64, i64, i64, i64 } { i64 ptrtoint ({ [2 x i32], [24 x i8] }* @0 to i64), i64 8, i64 32, i64 ptrtoint ([2 x i8]* @___asan_gen_.1 to i64), i64 ptrtoint ([8 x i8]* @___asan_gen_ to i64), i64 0, i64 0, i64 0 }
-; ALIAS-NEXT: @__asan_global_b = private global { i64, i64, i64, i64, i64, i64, i64, i64 } { i64 ptrtoint ({ [2 x i32], [24 x i8] }* @1 to i64), i64 8, i64 32, i64 ptrtoint ([2 x i8]* @___asan_gen_.2 to i64), i64 ptrtoint ([8 x i8]* @___asan_gen_ to i64), i64 0, i64 0, i64 -1 }
-; ALIAS-NEXT: @__asan_global_c = private global { i64, i64, i64, i64, i64, i64, i64, i64 } { i64 ptrtoint ({ [2 x i32], [24 x i8] }* @2 to i64), i64 8, i64 32, i64 ptrtoint ([2 x i8]* @___asan_gen_.3 to i64), i64 ptrtoint ([8 x i8]* @___asan_gen_ to i64), i64 0, i64 0, i64 -1 }
-; ALIAS-NEXT: @__asan_global_d = private global { i64, i64, i64, i64, i64, i64, i64, i64 } { i64 ptrtoint ({ [2 x i32], [24 x i8] }* @3 to i64), i64 8, i64 32, i64 ptrtoint ([2 x i8]* @___asan_gen_.4 to i64), i64 ptrtoint ([8 x i8]* @___asan_gen_ to i64), i64 0, i64 0, i64 0 }
+; ALIAS:      @__asan_global_a = private global { i64, i64, i64, i64, i64, i64, i64, i64 } { i64 ptrtoint (ptr @0 to i64), i64 8, i64 32, i64 ptrtoint (ptr @___asan_gen_.1 to i64), i64 ptrtoint (ptr @___asan_gen_ to i64), i64 0, i64 0, i64 0 }
+; ALIAS-NEXT: @__asan_global_b = private global { i64, i64, i64, i64, i64, i64, i64, i64 } { i64 ptrtoint (ptr @1 to i64), i64 8, i64 32, i64 ptrtoint (ptr @___asan_gen_.2 to i64), i64 ptrtoint (ptr @___asan_gen_ to i64), i64 0, i64 0, i64 -1 }
+; ALIAS-NEXT: @__asan_global_c = private global { i64, i64, i64, i64, i64, i64, i64, i64 } { i64 ptrtoint (ptr @2 to i64), i64 8, i64 32, i64 ptrtoint (ptr @___asan_gen_.3 to i64), i64 ptrtoint (ptr @___asan_gen_ to i64), i64 0, i64 0, i64 -1 }
+; ALIAS-NEXT: @__asan_global_d = private global { i64, i64, i64, i64, i64, i64, i64, i64 } { i64 ptrtoint (ptr @3 to i64), i64 8, i64 32, i64 ptrtoint (ptr @___asan_gen_.4 to i64), i64 ptrtoint (ptr @___asan_gen_ to i64), i64 0, i64 0, i64 0 }
 ; ALIAS:      @0 = private alias {{.*}} @a
 ; ALIAS-NEXT: @1 = private alias {{.*}} @b
 ; ALIAS-NEXT: @2 = private alias {{.*}} @c

diff  --git a/llvm/test/Instrumentation/DataFlowSanitizer/arith.ll b/llvm/test/Instrumentation/DataFlowSanitizer/arith.ll
index b9b0ca494a9d9..aa951041d8693 100644
--- a/llvm/test/Instrumentation/DataFlowSanitizer/arith.ll
+++ b/llvm/test/Instrumentation/DataFlowSanitizer/arith.ll
@@ -7,11 +7,11 @@ target triple = "x86_64-unknown-linux-gnu"
 
 define i8 @add(i8 %a, i8 %b) {
   ; CHECK: @add.dfsan
-  ; CHECK-DAG: %[[#ALABEL:]] = load i[[#SBITS]], i[[#SBITS]]* bitcast ([[ARGTLSTYPE:\[100 x i64\]]]* @__dfsan_arg_tls to i[[#SBITS]]*), align [[ALIGN:2]]
-  ; CHECK-DAG: %[[#BLABEL:]] = load i[[#SBITS]], i[[#SBITS]]* inttoptr (i64 add (i64 ptrtoint ([[ARGTLSTYPE]]* @__dfsan_arg_tls to i64), i64 2) to i[[#SBITS]]*), align [[ALIGN]]
+  ; CHECK-DAG: %[[#ALABEL:]] = load i[[#SBITS]], ptr @__dfsan_arg_tls, align [[ALIGN:2]]
+  ; CHECK-DAG: %[[#BLABEL:]] = load i[[#SBITS]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__dfsan_arg_tls to i64), i64 2) to ptr), align [[ALIGN]]
   ; CHECK: %[[#UNION:]] = or i[[#SBITS]] %[[#ALABEL]], %[[#BLABEL]]
   ; CHECK: %c = add i8 %a, %b
-  ; CHECK: store i[[#SBITS]] %[[#UNION]], i[[#SBITS]]* bitcast ([100 x i64]* @__dfsan_retval_tls to i[[#SBITS]]*), align [[ALIGN]]
+  ; CHECK: store i[[#SBITS]] %[[#UNION]], ptr @__dfsan_retval_tls, align [[ALIGN]]
   ; CHECK: ret i8 %c
   %c = add i8 %a, %b
   ret i8 %c

diff  --git a/llvm/test/Instrumentation/DataFlowSanitizer/origin_call.ll b/llvm/test/Instrumentation/DataFlowSanitizer/origin_call.ll
index 0d8bbe6e25348..7978fcb00a5db 100644
--- a/llvm/test/Instrumentation/DataFlowSanitizer/origin_call.ll
+++ b/llvm/test/Instrumentation/DataFlowSanitizer/origin_call.ll
@@ -30,8 +30,8 @@ i1 %a190, i1 %a191, i1 %a192, i1 %a193, i1 %a194, i1 %a195, i1 %a196, i1 %a197,
 i1 %a200
 ) {
   ; CHECK: @arg_overflow.dfsan
-  ; CHECK: [[A199:%.*]] = load i32, i32* getelementptr inbounds ([200 x i32], [200 x i32]* @__dfsan_arg_origin_tls, i64 0, i64 199), align 4
-  ; CHECK: store i32 [[A199]], i32* @__dfsan_retval_origin_tls, align 4
+  ; CHECK: [[A199:%.*]] = load i32, ptr getelementptr inbounds ([200 x i32], ptr @__dfsan_arg_origin_tls, i64 0, i64 199), align 4
+  ; CHECK: store i32 [[A199]], ptr @__dfsan_retval_origin_tls, align 4
 
   %r = add i1 %a199, %a200
   ret i1 %r
@@ -39,12 +39,12 @@ i1 %a200
 
 define i1 @param_overflow(i1 %a) {
   ; CHECK: @param_overflow.dfsan
-  ; CHECK: store i32 %1, i32* getelementptr inbounds ([200 x i32], [200 x i32]* @__dfsan_arg_origin_tls, i64 0, i64 199), align 4
-  ; CHECK-NEXT: store i[[#SBITS]] %2, i[[#SBITS]]* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__dfsan_arg_tls to i64), i64 398) to i[[#SBITS]]*), align 2
-  ; CHECK-NEXT: store i[[#SBITS]] %2, i[[#SBITS]]* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__dfsan_arg_tls to i64), i64 400) to i[[#SBITS]]*), align 2
+  ; CHECK: store i32 %1, ptr getelementptr inbounds ([200 x i32], ptr @__dfsan_arg_origin_tls, i64 0, i64 199), align 4
+  ; CHECK-NEXT: store i[[#SBITS]] %2, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__dfsan_arg_tls to i64), i64 398) to ptr), align 2
+  ; CHECK-NEXT: store i[[#SBITS]] %2, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__dfsan_arg_tls to i64), i64 400) to ptr), align 2
   ; CHECK-NEXT: %r = call i1 @arg_overflow.dfsan
-  ; CHECK: %_dfsret_o = load i32, i32* @__dfsan_retval_origin_tls, align 4
-  ; CHECK: store i32 %_dfsret_o, i32* @__dfsan_retval_origin_tls, align 4
+  ; CHECK: %_dfsret_o = load i32, ptr @__dfsan_retval_origin_tls, align 4
+  ; CHECK: store i32 %_dfsret_o, ptr @__dfsan_retval_origin_tls, align 4
 
   %r = call i1 @arg_overflow(
 i1 %a, i1 %a, i1 %a, i1 %a, i1 %a, i1 %a, i1 %a, i1 %a, i1 %a, i1 %a,
@@ -76,7 +76,7 @@ declare void @foo(i1 %a)
 
 define void @param_with_zero_shadow() {
   ; CHECK: @param_with_zero_shadow.dfsan
-  ; CHECK-NEXT: store i[[#SBITS]] 0, i[[#SBITS]]* bitcast ([100 x i64]* @__dfsan_arg_tls to i[[#SBITS]]*), align 2
+  ; CHECK-NEXT: store i[[#SBITS]] 0, ptr @__dfsan_arg_tls, align 2
   ; CHECK-NEXT: call void @foo.dfsan(i1 true)
 
   call void @foo(i1 1)

diff  --git a/llvm/test/Instrumentation/DataFlowSanitizer/origin_phi.ll b/llvm/test/Instrumentation/DataFlowSanitizer/origin_phi.ll
index 75a49a2159a82..74a3ccdac746b 100644
--- a/llvm/test/Instrumentation/DataFlowSanitizer/origin_phi.ll
+++ b/llvm/test/Instrumentation/DataFlowSanitizer/origin_phi.ll
@@ -9,10 +9,10 @@ target triple = "x86_64-unknown-linux-gnu"
 define i32 @phiop(i32 %a, i32 %b, i1 %c) {
   ; CHECK: @phiop.dfsan
   ; CHECK: entry:
-  ; CHECK: [[BO:%.*]] = load i32, i32* getelementptr inbounds ([200 x i32], [200 x i32]* @__dfsan_arg_origin_tls, i64 0, i64 1), align 4
-  ; CHECK: [[AO:%.*]] = load i32, i32* getelementptr inbounds ([200 x i32], [200 x i32]* @__dfsan_arg_origin_tls, i64 0, i64 0), align 4
-  ; CHECK: [[BS:%.*]] = load i[[#SBITS]], i[[#SBITS]]* inttoptr (i64 add (i64 ptrtoint ([[TLS_ARR]]* @__dfsan_arg_tls to i64), i64 2) to i[[#SBITS]]*), align [[ALIGN:2]]
-  ; CHECK: [[AS:%.*]] = load i[[#SBITS]], i[[#SBITS]]* bitcast ([[TLS_ARR]]* @__dfsan_arg_tls to i[[#SBITS]]*), align [[ALIGN]]
+  ; CHECK: [[BO:%.*]] = load i32, ptr getelementptr inbounds ([200 x i32], ptr @__dfsan_arg_origin_tls, i64 0, i64 1), align 4
+  ; CHECK: [[AO:%.*]] = load i32, ptr @__dfsan_arg_origin_tls, align 4
+  ; CHECK: [[BS:%.*]] = load i[[#SBITS]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__dfsan_arg_tls to i64), i64 2) to ptr), align [[ALIGN:2]]
+  ; CHECK: [[AS:%.*]] = load i[[#SBITS]], ptr @__dfsan_arg_tls, align [[ALIGN]]
   ; CHECK: br i1 %c, label %next, label %done
   ; CHECK: next:
   ; CHECK: br i1 %c, label %T, label %F
@@ -26,7 +26,7 @@ define i32 @phiop(i32 %a, i32 %b, i1 %c) {
   ; CHECK: br label %done
   ; CHECK: done:
   ; CHECK: [[PO:%.*]] = phi i32 [ [[BAO_T]], %T ], [ [[BAO_F]], %F ], [ [[AO]], %entry ]
-  ; CHECK: store i32 [[PO]], i32* @__dfsan_retval_origin_tls, align 4
+  ; CHECK: store i32 [[PO]], ptr @__dfsan_retval_origin_tls, align 4
 
 entry:
   br i1 %c, label %next, label %done

diff  --git a/llvm/test/Instrumentation/DataFlowSanitizer/origin_select.ll b/llvm/test/Instrumentation/DataFlowSanitizer/origin_select.ll
index 72f512d60583e..40e21cdaa59a5 100644
--- a/llvm/test/Instrumentation/DataFlowSanitizer/origin_select.ll
+++ b/llvm/test/Instrumentation/DataFlowSanitizer/origin_select.ll
@@ -10,20 +10,20 @@ target triple = "x86_64-unknown-linux-gnu"
 
 define i8 @select8(i1 %c, i8 %t, i8 %f) {
   ; TRACK_CONTROL_FLOW: @select8.dfsan
-  ; TRACK_CONTROL_FLOW: [[CO:%.*]] = load i32, i32* getelementptr inbounds ([200 x i32], [200 x i32]* @__dfsan_arg_origin_tls, i64 0, i64 0), align 4
-  ; TRACK_CONTROL_FLOW: [[FO:%.*]] = load i32, i32* getelementptr inbounds ([200 x i32], [200 x i32]* @__dfsan_arg_origin_tls, i64 0, i64 2), align 4
-  ; TRACK_CONTROL_FLOW: [[TO:%.*]] = load i32, i32* getelementptr inbounds ([200 x i32], [200 x i32]* @__dfsan_arg_origin_tls, i64 0, i64 1), align 4
-  ; TRACK_CONTROL_FLOW: [[CS:%.*]] = load i[[#SBITS]], i[[#SBITS]]* bitcast ([[TLS_ARR]]* @__dfsan_arg_tls to i[[#SBITS]]*), align 2
+  ; TRACK_CONTROL_FLOW: [[CO:%.*]] = load i32, ptr @__dfsan_arg_origin_tls, align 4
+  ; TRACK_CONTROL_FLOW: [[FO:%.*]] = load i32, ptr getelementptr inbounds ([200 x i32], ptr @__dfsan_arg_origin_tls, i64 0, i64 2), align 4
+  ; TRACK_CONTROL_FLOW: [[TO:%.*]] = load i32, ptr getelementptr inbounds ([200 x i32], ptr @__dfsan_arg_origin_tls, i64 0, i64 1), align 4
+  ; TRACK_CONTROL_FLOW: [[CS:%.*]] = load i[[#SBITS]], ptr @__dfsan_arg_tls, align 2
   ; TRACK_CONTROL_FLOW: [[TFO:%.*]] = select i1 %c, i32 [[TO]], i32 [[FO]]
   ; TRACK_CONTROL_FLOW: [[CS_NE:%.*]] = icmp ne i[[#SBITS]] [[CS]], 0
   ; TRACK_CONTROL_FLOW: [[CTFO:%.*]] = select i1 [[CS_NE]], i32 [[CO]], i32 [[TFO]]
-  ; TRACK_CONTROL_FLOW: store i32 [[CTFO]], i32* @__dfsan_retval_origin_tls, align 4
+  ; TRACK_CONTROL_FLOW: store i32 [[CTFO]], ptr @__dfsan_retval_origin_tls, align 4
 
   ; NO_TRACK_CONTROL_FLOW: @select8.dfsan
-  ; NO_TRACK_CONTROL_FLOW: [[FO:%.*]] = load i32, i32* getelementptr inbounds ([200 x i32], [200 x i32]* @__dfsan_arg_origin_tls, i64 0, i64 2), align 4
-  ; NO_TRACK_CONTROL_FLOW: [[TO:%.*]] = load i32, i32* getelementptr inbounds ([200 x i32], [200 x i32]* @__dfsan_arg_origin_tls, i64 0, i64 1), align 4
+  ; NO_TRACK_CONTROL_FLOW: [[FO:%.*]] = load i32, ptr getelementptr inbounds ([200 x i32], ptr @__dfsan_arg_origin_tls, i64 0, i64 2), align 4
+  ; NO_TRACK_CONTROL_FLOW: [[TO:%.*]] = load i32, ptr getelementptr inbounds ([200 x i32], ptr @__dfsan_arg_origin_tls, i64 0, i64 1), align 4
   ; NO_TRACK_CONTROL_FLOW: [[TFO:%.*]] = select i1 %c, i32 [[TO]], i32 [[FO]]
-  ; NO_TRACK_CONTROL_FLOW: store i32 [[TFO]], i32* @__dfsan_retval_origin_tls, align 4
+  ; NO_TRACK_CONTROL_FLOW: store i32 [[TFO]], ptr @__dfsan_retval_origin_tls, align 4
 
   %a = select i1 %c, i8 %t, i8 %f
   ret i8 %a
@@ -31,16 +31,16 @@ define i8 @select8(i1 %c, i8 %t, i8 %f) {
 
 define i8 @select8e(i1 %c, i8 %tf) {
   ; TRACK_CONTROL_FLOW: @select8e.dfsan
-  ; TRACK_CONTROL_FLOW: [[CO:%.*]] = load i32, i32* getelementptr inbounds ([200 x i32], [200 x i32]* @__dfsan_arg_origin_tls, i64 0, i64 0), align 4
-  ; TRACK_CONTROL_FLOW: [[TFO:%.*]] = load i32, i32* getelementptr inbounds ([200 x i32], [200 x i32]* @__dfsan_arg_origin_tls, i64 0, i64 1), align 4
-  ; TRACK_CONTROL_FLOW: [[CS:%.*]] = load i[[#SBITS]], i[[#SBITS]]* bitcast ([[TLS_ARR]]* @__dfsan_arg_tls to i[[#SBITS]]*), align 2
+  ; TRACK_CONTROL_FLOW: [[CO:%.*]] = load i32, ptr @__dfsan_arg_origin_tls, align 4
+  ; TRACK_CONTROL_FLOW: [[TFO:%.*]] = load i32, ptr getelementptr inbounds ([200 x i32], ptr @__dfsan_arg_origin_tls, i64 0, i64 1), align 4
+  ; TRACK_CONTROL_FLOW: [[CS:%.*]] = load i[[#SBITS]], ptr @__dfsan_arg_tls, align 2
   ; TRACK_CONTROL_FLOW: [[CS_NE:%.*]] = icmp ne i[[#SBITS]] [[CS]], 0
   ; TRACK_CONTROL_FLOW: [[CTFO:%.*]] = select i1 [[CS_NE]], i32 [[CO]], i32 [[TFO]]
-  ; TRACK_CONTROL_FLOW: store i32 [[CTFO]], i32* @__dfsan_retval_origin_tls, align 4
+  ; TRACK_CONTROL_FLOW: store i32 [[CTFO]], ptr @__dfsan_retval_origin_tls, align 4
 
   ; NO_TRACK_CONTROL_FLOW: @select8e.dfsan
-  ; NO_TRACK_CONTROL_FLOW: [[TFO:%.*]] = load i32, i32* getelementptr inbounds ([200 x i32], [200 x i32]* @__dfsan_arg_origin_tls, i64 0, i64 1), align 4
-  ; NO_TRACK_CONTROL_FLOW: store i32 [[TFO]], i32* @__dfsan_retval_origin_tls, align 4
+  ; NO_TRACK_CONTROL_FLOW: [[TFO:%.*]] = load i32, ptr getelementptr inbounds ([200 x i32], ptr @__dfsan_arg_origin_tls, i64 0, i64 1), align 4
+  ; NO_TRACK_CONTROL_FLOW: store i32 [[TFO]], ptr @__dfsan_retval_origin_tls, align 4
 
 %a = select i1 %c, i8 %tf, i8 %tf
   ret i8 %a
@@ -48,24 +48,24 @@ define i8 @select8e(i1 %c, i8 %tf) {
 
 define <4 x i8> @select8v(<4 x i1> %c, <4 x i8> %t, <4 x i8> %f) {
   ; TRACK_CONTROL_FLOW: @select8v.dfsan
-  ; TRACK_CONTROL_FLOW: [[CO:%.*]] = load i32, i32* getelementptr inbounds ([200 x i32], [200 x i32]* @__dfsan_arg_origin_tls, i64 0, i64 0), align 4
-  ; TRACK_CONTROL_FLOW: [[FO:%.*]] = load i32, i32* getelementptr inbounds ([200 x i32], [200 x i32]* @__dfsan_arg_origin_tls, i64 0, i64 2), align 4
-  ; TRACK_CONTROL_FLOW: [[TO:%.*]] = load i32, i32* getelementptr inbounds ([200 x i32], [200 x i32]* @__dfsan_arg_origin_tls, i64 0, i64 1), align 4
-  ; TRACK_CONTROL_FLOW: [[FS:%.*]] = load i[[#SBITS]], i[[#SBITS]]* inttoptr (i64 add (i64 ptrtoint ([[TLS_ARR]]* @__dfsan_arg_tls to i64), i64 4) to i[[#SBITS]]*), align 2
-  ; TRACK_CONTROL_FLOW: [[CS:%.*]] = load i[[#SBITS]], i[[#SBITS]]* bitcast ([[TLS_ARR]]* @__dfsan_arg_tls to i[[#SBITS]]*), align 2
+  ; TRACK_CONTROL_FLOW: [[CO:%.*]] = load i32, ptr @__dfsan_arg_origin_tls, align 4
+  ; TRACK_CONTROL_FLOW: [[FO:%.*]] = load i32, ptr getelementptr inbounds ([200 x i32], ptr @__dfsan_arg_origin_tls, i64 0, i64 2), align 4
+  ; TRACK_CONTROL_FLOW: [[TO:%.*]] = load i32, ptr getelementptr inbounds ([200 x i32], ptr @__dfsan_arg_origin_tls, i64 0, i64 1), align 4
+  ; TRACK_CONTROL_FLOW: [[FS:%.*]] = load i[[#SBITS]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__dfsan_arg_tls to i64), i64 4) to ptr), align 2
+  ; TRACK_CONTROL_FLOW: [[CS:%.*]] = load i[[#SBITS]], ptr @__dfsan_arg_tls, align 2
   ; TRACK_CONTROL_FLOW: [[FS_NE:%.*]] = icmp ne i[[#SBITS]] [[FS]], 0
   ; TRACK_CONTROL_FLOW: [[FTO:%.*]] = select i1 [[FS_NE]], i32 [[FO]], i32 [[TO]]
   ; TRACK_CONTROL_FLOW: [[CS_NE:%.*]] = icmp ne i[[#SBITS]] [[CS]], 0
   ; TRACK_CONTROL_FLOW: [[CFTO:%.*]] = select i1 [[CS_NE]], i32 [[CO]], i32 [[FTO]]
-  ; TRACK_CONTROL_FLOW: store i32 [[CFTO]], i32* @__dfsan_retval_origin_tls, align 4
+  ; TRACK_CONTROL_FLOW: store i32 [[CFTO]], ptr @__dfsan_retval_origin_tls, align 4
 
   ; NO_TRACK_CONTROL_FLOW: @select8v.dfsan
-  ; NO_TRACK_CONTROL_FLOW: [[FO:%.*]] = load i32, i32* getelementptr inbounds ([200 x i32], [200 x i32]* @__dfsan_arg_origin_tls, i64 0, i64 2), align 4
-  ; NO_TRACK_CONTROL_FLOW: [[TO:%.*]] = load i32, i32* getelementptr inbounds ([200 x i32], [200 x i32]* @__dfsan_arg_origin_tls, i64 0, i64 1), align 4
-  ; NO_TRACK_CONTROL_FLOW: [[FS:%.*]] = load i[[#SBITS]], i[[#SBITS]]* inttoptr (i64 add (i64 ptrtoint ([[TLS_ARR]]* @__dfsan_arg_tls to i64), i64 4) to i[[#SBITS]]*), align 2
+  ; NO_TRACK_CONTROL_FLOW: [[FO:%.*]] = load i32, ptr getelementptr inbounds ([200 x i32], ptr @__dfsan_arg_origin_tls, i64 0, i64 2), align 4
+  ; NO_TRACK_CONTROL_FLOW: [[TO:%.*]] = load i32, ptr getelementptr inbounds ([200 x i32], ptr @__dfsan_arg_origin_tls, i64 0, i64 1), align 4
+  ; NO_TRACK_CONTROL_FLOW: [[FS:%.*]] = load i[[#SBITS]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__dfsan_arg_tls to i64), i64 4) to ptr), align 2
   ; NO_TRACK_CONTROL_FLOW: [[FS_NE:%.*]] = icmp ne i[[#SBITS]] [[FS]], 0
   ; NO_TRACK_CONTROL_FLOW: [[FTO:%.*]] = select i1 [[FS_NE]], i32 [[FO]], i32 [[TO]]
-  ; NO_TRACK_CONTROL_FLOW: store i32 [[FTO]], i32* @__dfsan_retval_origin_tls, align 4
+  ; NO_TRACK_CONTROL_FLOW: store i32 [[FTO]], ptr @__dfsan_retval_origin_tls, align 4
 
   %a = select <4 x i1> %c, <4 x i8> %t, <4 x i8> %f
   ret <4 x i8> %a

diff  --git a/llvm/test/Instrumentation/DataFlowSanitizer/phi.ll b/llvm/test/Instrumentation/DataFlowSanitizer/phi.ll
index 244cdb3773e9a..98c5b1ba40081 100644
--- a/llvm/test/Instrumentation/DataFlowSanitizer/phi.ll
+++ b/llvm/test/Instrumentation/DataFlowSanitizer/phi.ll
@@ -6,11 +6,11 @@ target triple = "x86_64-unknown-linux-gnu"
 ; CHECK: @__dfsan_shadow_width_bytes = weak_odr constant i32 [[#SBYTES:]]
 
 define {i32, i32} @test({i32, i32} %a, i1 %c) {
-  ; CHECK: %[[#AL:]] = load { i[[#SBITS]], i[[#SBITS]] }, { i[[#SBITS]], i[[#SBITS]] }* bitcast ([100 x i64]* @__dfsan_arg_tls to { i[[#SBITS]], i[[#SBITS]] }*), align [[ALIGN:2]]
+  ; CHECK: %[[#AL:]] = load { i[[#SBITS]], i[[#SBITS]] }, ptr @__dfsan_arg_tls, align [[ALIGN:2]]
   ; CHECK: %[[#AL0:]] = insertvalue { i[[#SBITS]], i[[#SBITS]] } %[[#AL]], i[[#SBITS]] 0, 0
   ; CHECK: %[[#AL1:]] = insertvalue { i[[#SBITS]], i[[#SBITS]] } %[[#AL]], i[[#SBITS]] 0, 1
   ; CHECK: %[[#PL:]] = phi { i[[#SBITS]], i[[#SBITS]] } [ %[[#AL0]], %T ], [ %[[#AL1]], %F ]
-  ; CHECK: store { i[[#SBITS]], i[[#SBITS]] } %[[#PL]], { i[[#SBITS]], i[[#SBITS]] }* bitcast ([100 x i64]* @__dfsan_retval_tls to { i[[#SBITS]], i[[#SBITS]] }*), align [[ALIGN]]
+  ; CHECK: store { i[[#SBITS]], i[[#SBITS]] } %[[#PL]], ptr @__dfsan_retval_tls, align [[ALIGN]]
 
 entry:
   br i1 %c, label %T, label %F

diff  --git a/llvm/test/Instrumentation/DataFlowSanitizer/select.ll b/llvm/test/Instrumentation/DataFlowSanitizer/select.ll
index 44283f9e2a51b..684391a7de3f6 100644
--- a/llvm/test/Instrumentation/DataFlowSanitizer/select.ll
+++ b/llvm/test/Instrumentation/DataFlowSanitizer/select.ll
@@ -10,22 +10,22 @@ target triple = "x86_64-unknown-linux-gnu"
 
 define i8 @select8(i1 %c, i8 %t, i8 %f) {
   ; TRACK_CF: @select8.dfsan
-  ; TRACK_CF: %[[#R:]] = load i[[#SBITS]], i[[#SBITS]]* inttoptr (i64 add (i64 ptrtoint ([[TLS_ARR]]* @__dfsan_arg_tls to i64), i64 4) to i[[#SBITS]]*), align [[ALIGN:2]]
-  ; TRACK_CF: %[[#R+1]] = load i[[#SBITS]], i[[#SBITS]]* inttoptr (i64 add (i64 ptrtoint ([[TLS_ARR]]* @__dfsan_arg_tls to i64), i64 2) to i[[#SBITS]]*), align [[ALIGN]]
-  ; TRACK_CF: %[[#R+2]] = load i[[#SBITS]], i[[#SBITS]]* bitcast ([[TLS_ARR]]* @__dfsan_arg_tls to i[[#SBITS]]*), align [[ALIGN]]
+  ; TRACK_CF: %[[#R:]] = load i[[#SBITS]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__dfsan_arg_tls to i64), i64 4) to ptr), align [[ALIGN:2]]
+  ; TRACK_CF: %[[#R+1]] = load i[[#SBITS]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__dfsan_arg_tls to i64), i64 2) to ptr), align [[ALIGN]]
+  ; TRACK_CF: %[[#R+2]] = load i[[#SBITS]], ptr @__dfsan_arg_tls, align [[ALIGN]]
   ; TRACK_CF: %[[#R+3]] = select i1 %c, i[[#SBITS]] %[[#R+1]], i[[#SBITS]] %[[#R]]
   ; TRACK_CF: %[[#RO:]] = or i[[#SBITS]] %[[#R+2]], %[[#R+3]]
   ; TRACK_CF: %a = select i1 %c, i8 %t, i8 %f
-  ; TRACK_CF: store i[[#SBITS]] %[[#RO]], i[[#SBITS]]* bitcast ([100 x i64]* @__dfsan_retval_tls to i[[#SBITS]]*), align [[ALIGN]]
+  ; TRACK_CF: store i[[#SBITS]] %[[#RO]], ptr @__dfsan_retval_tls, align [[ALIGN]]
   ; TRACK_CF: ret i8 %a
 
   ; NO_TRACK_CF: @select8.dfsan
-  ; NO_TRACK_CF: %[[#R:]] = load i[[#SBITS]], i[[#SBITS]]* inttoptr (i64 add (i64 ptrtoint ([[TLS_ARR]]* @__dfsan_arg_tls to i64), i64 4) to i[[#SBITS]]*), align [[ALIGN:2]]
-  ; NO_TRACK_CF: %[[#R+1]] = load i[[#SBITS]], i[[#SBITS]]* inttoptr (i64 add (i64 ptrtoint ([[TLS_ARR]]* @__dfsan_arg_tls to i64), i64 2) to i[[#SBITS]]*), align [[ALIGN]]
-  ; NO_TRACK_CF: %[[#R+2]] = load i[[#SBITS]], i[[#SBITS]]* bitcast ([[TLS_ARR]]* @__dfsan_arg_tls to i[[#SBITS]]*), align [[ALIGN]]
+  ; NO_TRACK_CF: %[[#R:]] = load i[[#SBITS]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__dfsan_arg_tls to i64), i64 4) to ptr), align [[ALIGN:2]]
+  ; NO_TRACK_CF: %[[#R+1]] = load i[[#SBITS]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__dfsan_arg_tls to i64), i64 2) to ptr), align [[ALIGN]]
+  ; NO_TRACK_CF: %[[#R+2]] = load i[[#SBITS]], ptr @__dfsan_arg_tls, align [[ALIGN]]
   ; NO_TRACK_CF: %[[#R+3]] = select i1 %c, i[[#SBITS]] %[[#R+1]], i[[#SBITS]] %[[#R]]
   ; NO_TRACK_CF: %a = select i1 %c, i8 %t, i8 %f
-  ; NO_TRACK_CF: store i[[#SBITS]] %[[#R+3]], i[[#SBITS]]* bitcast ([100 x i64]* @__dfsan_retval_tls to i[[#SBITS]]*), align [[ALIGN]]
+  ; NO_TRACK_CF: store i[[#SBITS]] %[[#R+3]], ptr @__dfsan_retval_tls, align [[ALIGN]]
   ; NO_TRACK_CF: ret i8 %a
 
   %a = select i1 %c, i8 %t, i8 %f
@@ -34,18 +34,18 @@ define i8 @select8(i1 %c, i8 %t, i8 %f) {
 
 define i8 @select8e(i1 %c, i8 %tf) {
   ; TRACK_CF: @select8e.dfsan
-  ; TRACK_CF: %[[#R:]] = load i[[#SBITS]], i[[#SBITS]]* inttoptr (i64 add (i64 ptrtoint ([[TLS_ARR]]* @__dfsan_arg_tls to i64), i64 2) to i[[#SBITS]]*), align [[ALIGN]]
-  ; TRACK_CF: %[[#R+1]] = load i[[#SBITS]], i[[#SBITS]]* bitcast ([[TLS_ARR]]* @__dfsan_arg_tls to i[[#SBITS]]*), align [[ALIGN]]
+  ; TRACK_CF: %[[#R:]] = load i[[#SBITS]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__dfsan_arg_tls to i64), i64 2) to ptr), align [[ALIGN]]
+  ; TRACK_CF: %[[#R+1]] = load i[[#SBITS]], ptr @__dfsan_arg_tls, align [[ALIGN]]
   ; TRACK_CF: %[[#RO:]] = or i[[#SBITS]] %[[#R+1]], %[[#R]]
   ; TRACK_CF: %a = select i1 %c, i8 %tf, i8 %tf
-  ; TRACK_CF: store i[[#SBITS]] %[[#RO]], i[[#SBITS]]* bitcast ([100 x i64]* @__dfsan_retval_tls to i[[#SBITS]]*), align [[ALIGN]]
+  ; TRACK_CF: store i[[#SBITS]] %[[#RO]], ptr @__dfsan_retval_tls, align [[ALIGN]]
   ; TRACK_CF: ret i8 %a
 
   ; NO_TRACK_CF: @select8e.dfsan
-  ; NO_TRACK_CF: %[[#R:]] = load i[[#SBITS]], i[[#SBITS]]* inttoptr (i64 add (i64 ptrtoint ([[TLS_ARR]]* @__dfsan_arg_tls to i64), i64 2) to i[[#SBITS]]*), align [[ALIGN]]
-  ; NO_TRACK_CF: %[[#R+1]] = load i[[#SBITS]], i[[#SBITS]]* bitcast ([[TLS_ARR]]* @__dfsan_arg_tls to i[[#SBITS]]*), align [[ALIGN]]
+  ; NO_TRACK_CF: %[[#R:]] = load i[[#SBITS]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__dfsan_arg_tls to i64), i64 2) to ptr), align [[ALIGN]]
+  ; NO_TRACK_CF: %[[#R+1]] = load i[[#SBITS]], ptr @__dfsan_arg_tls, align [[ALIGN]]
   ; NO_TRACK_CF: %a = select i1 %c, i8 %tf, i8 %tf
-  ; NO_TRACK_CF: store i[[#SBITS]] %[[#R]], i[[#SBITS]]* bitcast ([[TLS_ARR]]* @__dfsan_retval_tls to i[[#SBITS]]*), align [[ALIGN]]
+  ; NO_TRACK_CF: store i[[#SBITS]] %[[#R]], ptr @__dfsan_retval_tls, align [[ALIGN]]
   ; NO_TRACK_CF: ret i8 %a
 
   %a = select i1 %c, i8 %tf, i8 %tf
@@ -54,22 +54,22 @@ define i8 @select8e(i1 %c, i8 %tf) {
 
 define <4 x i8> @select8v(<4 x i1> %c, <4 x i8> %t, <4 x i8> %f) {
   ; TRACK_CF: @select8v.dfsan
-  ; TRACK_CF: %[[#R:]] = load i[[#SBITS]], i[[#SBITS]]* inttoptr (i64 add (i64 ptrtoint ([[TLS_ARR]]* @__dfsan_arg_tls to i64), i64 4) to i[[#SBITS]]*), align [[ALIGN:2]]
-  ; TRACK_CF: %[[#R+1]] = load i[[#SBITS]], i[[#SBITS]]* inttoptr (i64 add (i64 ptrtoint ([[TLS_ARR]]* @__dfsan_arg_tls to i64), i64 2) to i[[#SBITS]]*), align [[ALIGN]]
-  ; TRACK_CF: %[[#R+2]] = load i[[#SBITS]], i[[#SBITS]]* bitcast ([[TLS_ARR]]* @__dfsan_arg_tls to i[[#SBITS]]*), align [[ALIGN]]
+  ; TRACK_CF: %[[#R:]] = load i[[#SBITS]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__dfsan_arg_tls to i64), i64 4) to ptr), align [[ALIGN:2]]
+  ; TRACK_CF: %[[#R+1]] = load i[[#SBITS]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__dfsan_arg_tls to i64), i64 2) to ptr), align [[ALIGN]]
+  ; TRACK_CF: %[[#R+2]] = load i[[#SBITS]], ptr @__dfsan_arg_tls, align [[ALIGN]]
   ; TRACK_CF: %[[#R+3]] = or i[[#SBITS]] %[[#R+1]], %[[#R]]
   ; TRACK_CF: %[[#RO:]] = or i[[#SBITS]] %[[#R+2]], %[[#R+3]]
   ; TRACK_CF: %a = select <4 x i1> %c, <4 x i8> %t, <4 x i8> %f
-  ; TRACK_CF: store i[[#SBITS]] %[[#RO]], i[[#SBITS]]* bitcast ([100 x i64]* @__dfsan_retval_tls to i[[#SBITS]]*), align [[ALIGN]]
+  ; TRACK_CF: store i[[#SBITS]] %[[#RO]], ptr @__dfsan_retval_tls, align [[ALIGN]]
   ; TRACK_CF: ret <4 x i8> %a
 
   ; NO_TRACK_CF: @select8v.dfsan
-  ; NO_TRACK_CF: %[[#R:]] = load i[[#SBITS]], i[[#SBITS]]* inttoptr (i64 add (i64 ptrtoint ([[TLS_ARR]]* @__dfsan_arg_tls to i64), i64 4) to i[[#SBITS]]*), align [[ALIGN:2]]
-  ; NO_TRACK_CF: %[[#R+1]] = load i[[#SBITS]], i[[#SBITS]]* inttoptr (i64 add (i64 ptrtoint ([[TLS_ARR]]* @__dfsan_arg_tls to i64), i64 2) to i[[#SBITS]]*), align [[ALIGN]]
-  ; NO_TRACK_CF: %[[#R+2]] = load i[[#SBITS]], i[[#SBITS]]* bitcast ([[TLS_ARR]]* @__dfsan_arg_tls to i[[#SBITS]]*), align [[ALIGN]]
+  ; NO_TRACK_CF: %[[#R:]] = load i[[#SBITS]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__dfsan_arg_tls to i64), i64 4) to ptr), align [[ALIGN:2]]
+  ; NO_TRACK_CF: %[[#R+1]] = load i[[#SBITS]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__dfsan_arg_tls to i64), i64 2) to ptr), align [[ALIGN]]
+  ; NO_TRACK_CF: %[[#R+2]] = load i[[#SBITS]], ptr @__dfsan_arg_tls, align [[ALIGN]]
   ; NO_TRACK_CF: %[[#RO:]] = or i[[#SBITS]] %[[#R+1]], %[[#R]]
   ; NO_TRACK_CF: %a = select <4 x i1> %c, <4 x i8> %t, <4 x i8> %f
-  ; NO_TRACK_CF: store i[[#SBITS]] %[[#RO]], i[[#SBITS]]* bitcast ([100 x i64]* @__dfsan_retval_tls to i[[#SBITS]]*), align [[ALIGN]]
+  ; NO_TRACK_CF: store i[[#SBITS]] %[[#RO]], ptr @__dfsan_retval_tls, align [[ALIGN]]
   ; NO_TRACK_CF: ret <4 x i8> %a
 
   %a = select <4 x i1> %c, <4 x i8> %t, <4 x i8> %f

diff  --git a/llvm/test/Instrumentation/DataFlowSanitizer/shadow-args-zext.ll b/llvm/test/Instrumentation/DataFlowSanitizer/shadow-args-zext.ll
index e1f32fc0a7435..05b21c11956ed 100644
--- a/llvm/test/Instrumentation/DataFlowSanitizer/shadow-args-zext.ll
+++ b/llvm/test/Instrumentation/DataFlowSanitizer/shadow-args-zext.ll
@@ -9,7 +9,7 @@
 
 define i32 @m() {
   ; CHECK-LABEL: @m.dfsan
-  ; CHECK: %{{.*}} = call zeroext i16 @__dfsw_dfsan_get_label(i64 signext 56, i[[#SBITS]] zeroext 0, i[[#SBITS]]* %{{.*}})
+  ; CHECK: %{{.*}} = call zeroext i16 @__dfsw_dfsan_get_label(i64 signext 56, i[[#SBITS]] zeroext 0, ptr %{{.*}})
 
 entry:
   %call = call zeroext i16 @dfsan_get_label(i64 signext 56)
@@ -19,7 +19,7 @@ entry:
 
 define i32 @k() {
   ; CHECK-LABEL: @k.dfsan
-  ; CHECK: %{{.*}} = call zeroext i16 @__dfsw_k2(i64 signext 56, i64 signext 67, i[[#SBITS]] zeroext {{.*}}, i[[#SBITS]] zeroext {{.*}}, i[[#SBITS]]* %{{.*}})
+  ; CHECK: %{{.*}} = call zeroext i16 @__dfsw_k2(i64 signext 56, i64 signext 67, i[[#SBITS]] zeroext {{.*}}, i[[#SBITS]] zeroext {{.*}}, ptr %{{.*}})
 
 entry:
   %call = call zeroext i16 @k2(i64 signext 56, i64 signext 67)
@@ -29,7 +29,7 @@ entry:
 
 define i32 @k3() {
   ; CHECK-LABEL: @k3.dfsan
-  ; CHECK: %{{.*}} = call zeroext i16 @__dfsw_k4(i64 signext 56, i64 signext 67, i64 signext 78, i64 signext 89, i[[#SBITS]] zeroext {{.*}}, i[[#SBITS]] zeroext {{.*}}, i[[#SBITS]] zeroext {{.*}}, i[[#SBITS]] zeroext {{.*}}, i[[#SBITS]]* %{{.*}})
+  ; CHECK: %{{.*}} = call zeroext i16 @__dfsw_k4(i64 signext 56, i64 signext 67, i64 signext 78, i64 signext 89, i[[#SBITS]] zeroext {{.*}}, i[[#SBITS]] zeroext {{.*}}, i[[#SBITS]] zeroext {{.*}}, i[[#SBITS]] zeroext {{.*}}, ptr %{{.*}})
 
 entry:
   %call = call zeroext i16 @k4(i64 signext 56, i64 signext 67, i64 signext 78, i64 signext 89)
@@ -39,17 +39,17 @@ entry:
 
 declare zeroext i16 @dfsan_get_label(i64 signext)
 ; CHECK-LABEL: @"dfsw$dfsan_get_label"
-; CHECK: %{{.*}} = call i16 @__dfsw_dfsan_get_label(i64 %0, i[[#SBITS]] zeroext %1, i[[#SBITS]]* %{{.*}})
+; CHECK: %{{.*}} = call i16 @__dfsw_dfsan_get_label(i64 %0, i[[#SBITS]] zeroext %1, ptr %{{.*}})
 
 declare zeroext i16 @k2(i64 signext, i64 signext)
 ; CHECK-LABEL: @"dfsw$k2"
-; CHECK: %{{.*}} = call i16 @__dfsw_k2(i64 %{{.*}}, i64 %{{.*}}, i[[#SBITS]] zeroext %{{.*}}, i[[#SBITS]] zeroext %{{.*}}, i[[#SBITS]]* %{{.*}})
+; CHECK: %{{.*}} = call i16 @__dfsw_k2(i64 %{{.*}}, i64 %{{.*}}, i[[#SBITS]] zeroext %{{.*}}, i[[#SBITS]] zeroext %{{.*}}, ptr %{{.*}})
 
 declare zeroext i16 @k4(i64 signext, i64 signext, i64 signext, i64 signext)
 ; CHECK-LABEL: @"dfsw$k4"
-; CHECK: %{{.*}} = call i16 @__dfsw_k4(i64 %{{.*}}, i64 %{{.*}}, i64  %{{.*}}, i64 %{{.*}}, i[[#SBITS]] zeroext %{{.*}}, i[[#SBITS]] zeroext %{{.*}}, i[[#SBITS]] zeroext %{{.*}}, i[[#SBITS]] zeroext %{{.*}}, i[[#SBITS]]* %{{.*}})
+; CHECK: %{{.*}} = call i16 @__dfsw_k4(i64 %{{.*}}, i64 %{{.*}}, i64  %{{.*}}, i64 %{{.*}}, i[[#SBITS]] zeroext %{{.*}}, i[[#SBITS]] zeroext %{{.*}}, i[[#SBITS]] zeroext %{{.*}}, i[[#SBITS]] zeroext %{{.*}}, ptr %{{.*}})
 
 
-; CHECK: declare zeroext i16 @__dfsw_dfsan_get_label(i64 signext, i[[#SBITS]], i[[#SBITS]]*)
-; CHECK: declare zeroext i16 @__dfsw_k2(i64 signext, i64 signext, i[[#SBITS]], i[[#SBITS]], i[[#SBITS]]*)
-; CHECK: declare zeroext i16 @__dfsw_k4(i64 signext, i64 signext, i64 signext, i64 signext, i[[#SBITS]], i[[#SBITS]], i[[#SBITS]], i[[#SBITS]], i[[#SBITS]]*)
+; CHECK: declare zeroext i16 @__dfsw_dfsan_get_label(i64 signext, i[[#SBITS]], ptr)
+; CHECK: declare zeroext i16 @__dfsw_k2(i64 signext, i64 signext, i[[#SBITS]], i[[#SBITS]], ptr)
+; CHECK: declare zeroext i16 @__dfsw_k4(i64 signext, i64 signext, i64 signext, i64 signext, i[[#SBITS]], i[[#SBITS]], i[[#SBITS]], i[[#SBITS]], ptr)

diff  --git a/llvm/test/Instrumentation/HWAddressSanitizer/X86/globals.ll b/llvm/test/Instrumentation/HWAddressSanitizer/X86/globals.ll
index 771e6e92e02da..546530e54e397 100644
--- a/llvm/test/Instrumentation/HWAddressSanitizer/X86/globals.ll
+++ b/llvm/test/Instrumentation/HWAddressSanitizer/X86/globals.ll
@@ -3,28 +3,28 @@
 ; CHECK: @__start_hwasan_globals = external hidden constant [0 x i8]
 ; CHECK: @__stop_hwasan_globals = external hidden constant [0 x i8]
 
-; CHECK: @hwasan.note = private constant { i32, i32, i32, [8 x i8], i32, i32 } { i32 8, i32 8, i32 3, [8 x i8] c"LLVM\00\00\00\00", i32 trunc (i64 sub (i64 ptrtoint ([0 x i8]* @__start_hwasan_globals to i64), i64 ptrtoint ({ i32, i32, i32, [8 x i8], i32, i32 }* @hwasan.note to i64)) to i32), i32 trunc (i64 sub (i64 ptrtoint ([0 x i8]* @__stop_hwasan_globals to i64), i64 ptrtoint ({ i32, i32, i32, [8 x i8], i32, i32 }* @hwasan.note to i64)) to i32) }, section ".note.hwasan.globals", comdat($hwasan.module_ctor), align 4
+; CHECK: @hwasan.note = private constant { i32, i32, i32, [8 x i8], i32, i32 } { i32 8, i32 8, i32 3, [8 x i8] c"LLVM\00\00\00\00", i32 trunc (i64 sub (i64 ptrtoint (ptr @__start_hwasan_globals to i64), i64 ptrtoint (ptr @hwasan.note to i64)) to i32), i32 trunc (i64 sub (i64 ptrtoint (ptr @__stop_hwasan_globals to i64), i64 ptrtoint (ptr @hwasan.note to i64)) to i32) }, section ".note.hwasan.globals", comdat($hwasan.module_ctor), align 4
 
 ; CHECK: @hwasan.dummy.global = private constant [0 x i8] zeroinitializer, section "hwasan_globals", comdat($hwasan.module_ctor), !associated [[NOTE:![0-9]+]]
 
 ; CHECK: @four.hwasan = private global { i32, [12 x i8] } { i32 1, [12 x i8] c"\00\00\00\00\00\00\00\00\00\00\00," }, align 16
-; CHECK: @four.hwasan.descriptor = private constant { i32, i32 } { i32 trunc (i64 sub (i64 ptrtoint ({ i32, [12 x i8] }* @four.hwasan to i64), i64 ptrtoint ({ i32, i32 }* @four.hwasan.descriptor to i64)) to i32), i32 738197508 }, section "hwasan_globals", !associated [[FOUR:![0-9]+]]
+; CHECK: @four.hwasan.descriptor = private constant { i32, i32 } { i32 trunc (i64 sub (i64 ptrtoint (ptr @four.hwasan to i64), i64 ptrtoint (ptr @four.hwasan.descriptor to i64)) to i32), i32 738197508 }, section "hwasan_globals", !associated [[FOUR:![0-9]+]]
 
 ; CHECK: @sixteen.hwasan = private global [16 x i8] zeroinitializer, align 16
-; CHECK: @sixteen.hwasan.descriptor = private constant { i32, i32 } { i32 trunc (i64 sub (i64 ptrtoint ([16 x i8]* @sixteen.hwasan to i64), i64 ptrtoint ({ i32, i32 }* @sixteen.hwasan.descriptor to i64)) to i32), i32 754974736 }, section "hwasan_globals", !associated [[SIXTEEN:![0-9]+]]
+; CHECK: @sixteen.hwasan.descriptor = private constant { i32, i32 } { i32 trunc (i64 sub (i64 ptrtoint (ptr @sixteen.hwasan to i64), i64 ptrtoint (ptr @sixteen.hwasan.descriptor to i64)) to i32), i32 754974736 }, section "hwasan_globals", !associated [[SIXTEEN:![0-9]+]]
 
 ; CHECK: @huge.hwasan = private global [16777232 x i8] zeroinitializer, align 16
-; CHECK: @huge.hwasan.descriptor = private constant { i32, i32 } { i32 trunc (i64 sub (i64 ptrtoint ([16777232 x i8]* @huge.hwasan to i64), i64 ptrtoint ({ i32, i32 }* @huge.hwasan.descriptor to i64)) to i32), i32 788529136 }, section "hwasan_globals", !associated [[HUGE:![0-9]+]]
-; CHECK: @huge.hwasan.descriptor.1 = private constant { i32, i32 } { i32 trunc (i64 add (i64 sub (i64 ptrtoint ([16777232 x i8]* @huge.hwasan to i64), i64 ptrtoint ({ i32, i32 }* @huge.hwasan.descriptor.1 to i64)), i64 16777200) to i32), i32 771751968 }, section "hwasan_globals", !associated [[HUGE]]
+; CHECK: @huge.hwasan.descriptor = private constant { i32, i32 } { i32 trunc (i64 sub (i64 ptrtoint (ptr @huge.hwasan to i64), i64 ptrtoint (ptr @huge.hwasan.descriptor to i64)) to i32), i32 788529136 }, section "hwasan_globals", !associated [[HUGE:![0-9]+]]
+; CHECK: @huge.hwasan.descriptor.1 = private constant { i32, i32 } { i32 trunc (i64 add (i64 sub (i64 ptrtoint (ptr @huge.hwasan to i64), i64 ptrtoint (ptr @huge.hwasan.descriptor.1 to i64)), i64 16777200) to i32), i32 771751968 }, section "hwasan_globals", !associated [[HUGE]]
 
-; CHECK: @four = alias i32, inttoptr (i64 add (i64 ptrtoint ({ i32, [12 x i8] }* @four.hwasan to i64), i64 6341068275337658368) to i32*)
-; CHECK: @sixteen = alias [16 x i8], inttoptr (i64 add (i64 ptrtoint ([16 x i8]* @sixteen.hwasan to i64), i64 6485183463413514240) to [16 x i8]*)
-; CHECK: @huge = alias [16777232 x i8], inttoptr (i64 add (i64 ptrtoint ([16777232 x i8]* @huge.hwasan to i64), i64 6629298651489370112) to [16777232 x i8]*)
+; CHECK: @four = alias i32, inttoptr (i64 add (i64 ptrtoint (ptr @four.hwasan to i64), i64 6341068275337658368) to ptr)
+; CHECK: @sixteen = alias [16 x i8], inttoptr (i64 add (i64 ptrtoint (ptr @sixteen.hwasan to i64), i64 6485183463413514240) to ptr)
+; CHECK: @huge = alias [16777232 x i8], inttoptr (i64 add (i64 ptrtoint (ptr @huge.hwasan to i64), i64 6629298651489370112) to ptr)
 
-; CHECK: [[NOTE]] = !{{{{}} i32, i32, i32, [8 x i8], i32, i32 }* @hwasan.note}
-; CHECK: [[FOUR]] = !{{{{}} i32, [12 x i8] }* @four.hwasan}
-; CHECK: [[SIXTEEN]] = !{[16 x i8]* @sixteen.hwasan}
-; CHECK: [[HUGE]] = !{[16777232 x i8]* @huge.hwasan}
+; CHECK: [[NOTE]] = !{ptr @hwasan.note}
+; CHECK: [[FOUR]] = !{ptr @four.hwasan}
+; CHECK: [[SIXTEEN]] = !{ptr @sixteen.hwasan}
+; CHECK: [[HUGE]] = !{ptr @huge.hwasan}
 
 source_filename = "foo"
 

diff  --git a/llvm/test/Instrumentation/InstrOrderFile/basic.ll b/llvm/test/Instrumentation/InstrOrderFile/basic.ll
index 5192890ca7ae6..110873ac5eec7 100644
--- a/llvm/test/Instrumentation/InstrOrderFile/basic.ll
+++ b/llvm/test/Instrumentation/InstrOrderFile/basic.ll
@@ -12,13 +12,13 @@ define i32 @_Z1fv() {
 }
 ; CHECK-LABEL: define i32 @_Z1fv
 ; CHECK: order_file_entry
-; CHECK: %[[T1:.+]] = load i8, i8* getelementptr inbounds ([1 x i8], [1 x i8]* @bitmap_0, i32 0, i32 0
-; CHECK: store i8 1, i8* getelementptr inbounds ([1 x i8], [1 x i8]* @bitmap_0, i32 0, i32 0)
+; CHECK: %[[T1:.+]] = load i8, ptr @bitmap_0
+; CHECK: store i8 1, ptr @bitmap_0
 ; CHECK: %[[T2:.+]] = icmp eq i8 %[[T1]], 0
 ; CHECK: br i1 %[[T2]], label %order_file_set, label
 
 ; CHECK: order_file_set
-; CHECK: %[[T3:.+]] = atomicrmw add i32* @_llvm_order_file_buffer_idx, i32 1 seq_cst
+; CHECK: %[[T3:.+]] = atomicrmw add ptr @_llvm_order_file_buffer_idx, i32 1 seq_cst
 ; CHECK: %[[T5:.+]] = and i32 %[[T3]], 131071
-; CHECK: %[[T4:.+]] = getelementptr [131072 x i64], [131072 x i64]* @_llvm_order_file_buffer, i32 0, i32 %[[T5]]
-; CHECK: store i64 {{.*}}, i64* %[[T4]]
+; CHECK: %[[T4:.+]] = getelementptr [131072 x i64], ptr @_llvm_order_file_buffer, i32 0, i32 %[[T5]]
+; CHECK: store i64 {{.*}}, ptr %[[T4]]

diff  --git a/llvm/test/Instrumentation/JustMyCode/jmc-instrument-elf.ll b/llvm/test/Instrumentation/JustMyCode/jmc-instrument-elf.ll
index e7739364be975..52915709eb891 100644
--- a/llvm/test/Instrumentation/JustMyCode/jmc-instrument-elf.ll
+++ b/llvm/test/Instrumentation/JustMyCode/jmc-instrument-elf.ll
@@ -4,36 +4,36 @@
 ; CHECK: @"__A8764FDD_x at c" = internal unnamed_addr global i8 1, section ".just.my.code", align 1, !dbg !5
 
 ; CHECK: define void @l1() !dbg !12 {
-; CHECK:   call void @__CheckForDebuggerJustMyCode(i8* noundef @"__7DF23CF5_x at c")
+; CHECK:   call void @__CheckForDebuggerJustMyCode(ptr noundef @"__7DF23CF5_x at c")
 ; CHECK:   ret void
 ; CHECK: }
 
 ; CHECK: define void @l2() !dbg !16 {
-; CHECK:   call void @__CheckForDebuggerJustMyCode(i8* noundef @"__7DF23CF5_x at c")
+; CHECK:   call void @__CheckForDebuggerJustMyCode(ptr noundef @"__7DF23CF5_x at c")
 ; CHECK:   ret void
 ; CHECK: }
 
 ; CHECK: define void @w1() !dbg !18 {
-; CHECK:   call void @__CheckForDebuggerJustMyCode(i8* noundef @"__A8764FDD_x at c")
+; CHECK:   call void @__CheckForDebuggerJustMyCode(ptr noundef @"__A8764FDD_x at c")
 ; CHECK:   ret void
 ; CHECK: }
 
 ; CHECK: define void @w2() !dbg !19 {
-; CHECK:   call void @__CheckForDebuggerJustMyCode(i8* noundef @"__A8764FDD_x at c")
+; CHECK:   call void @__CheckForDebuggerJustMyCode(ptr noundef @"__A8764FDD_x at c")
 ; CHECK:   ret void
 ; CHECK: }
 
 ; CHECK: define void @w3() !dbg !21 {
-; CHECK:   call void @__CheckForDebuggerJustMyCode(i8* noundef @"__A8764FDD_x at c")
+; CHECK:   call void @__CheckForDebuggerJustMyCode(ptr noundef @"__A8764FDD_x at c")
 ; CHECK:   ret void
 ; CHECK: }
 
 ; CHECK: define void @w4() !dbg !23 {
-; CHECK:   call void @__CheckForDebuggerJustMyCode(i8* noundef @"__A8764FDD_x at c")
+; CHECK:   call void @__CheckForDebuggerJustMyCode(ptr noundef @"__A8764FDD_x at c")
 ; CHECK:   ret void
 ; CHECK: }
 
-; CHECK: define weak void @__CheckForDebuggerJustMyCode(i8* noundef %0) unnamed_addr {
+; CHECK: define weak void @__CheckForDebuggerJustMyCode(ptr noundef %0) unnamed_addr {
 ; CHECK:   ret void
 ; CHECK: }
 

diff  --git a/llvm/test/Instrumentation/JustMyCode/jmc-instrument-x86.ll b/llvm/test/Instrumentation/JustMyCode/jmc-instrument-x86.ll
index 4f3ec1d64de3a..c884395bea49b 100644
--- a/llvm/test/Instrumentation/JustMyCode/jmc-instrument-x86.ll
+++ b/llvm/test/Instrumentation/JustMyCode/jmc-instrument-x86.ll
@@ -3,18 +3,18 @@
 ; CHECK: $_JustMyCode_Default = comdat any
 
 ; CHECK: @"_A8764FDD_x at c" = internal unnamed_addr global i8 1, section ".msvcjmc", align 1, !dbg !0
-; CHECK: @llvm.used = appending global [1 x i8*] [i8* bitcast (void (i8*)* @_JustMyCode_Default to i8*)], section "llvm.metadata"
+; CHECK: @llvm.used = appending global [1 x ptr] [ptr @_JustMyCode_Default], section "llvm.metadata"
 
 ; CHECK: define void @w1() #0 !dbg !10 {
-; CHECK:   call x86_fastcallcc void @__CheckForDebuggerJustMyCode(i8* inreg noundef @"_A8764FDD_x at c")
+; CHECK:   call x86_fastcallcc void @__CheckForDebuggerJustMyCode(ptr inreg noundef @"_A8764FDD_x at c")
 ; CHECK:   ret void
 ; CHECK: }
 
-; CHECK: define void @_JustMyCode_Default(i8* inreg noundef %0) unnamed_addr comdat {
+; CHECK: define void @_JustMyCode_Default(ptr inreg noundef %0) unnamed_addr comdat {
 ; CHECK:   ret void
 ; CHECK: }
 
-; CHECK: declare x86_fastcallcc void @__CheckForDebuggerJustMyCode(i8* inreg noundef) unnamed_addr
+; CHECK: declare x86_fastcallcc void @__CheckForDebuggerJustMyCode(ptr inreg noundef) unnamed_addr
 
 ; CHECK: !0 = !DIGlobalVariableExpression(var: !1, expr: !DIExpression())
 ; CHECK: !1 = distinct !DIGlobalVariable(name: "_A8764FDD_x at c", scope: !2, file: !3, type: !5, isLocal: true, isDefinition: true)

diff  --git a/llvm/test/Instrumentation/JustMyCode/jmc-instrument.ll b/llvm/test/Instrumentation/JustMyCode/jmc-instrument.ll
index e05ff76870fbb..098ff1b2a958a 100644
--- a/llvm/test/Instrumentation/JustMyCode/jmc-instrument.ll
+++ b/llvm/test/Instrumentation/JustMyCode/jmc-instrument.ll
@@ -5,61 +5,61 @@
 ; CHECK: $__JustMyCode_Default = comdat any
 
 ; CHECK: @"__7DF23CF5_x at c" = internal unnamed_addr global i8 1, section ".msvcjmc", align 1, !dbg !0
-; CHECK: @llvm.used = appending global [1 x i8*] [i8* bitcast (void (i8*)* @__JustMyCode_Default to i8*)], section "llvm.metadata"
+; CHECK: @llvm.used = appending global [1 x ptr] [ptr @__JustMyCode_Default], section "llvm.metadata"
 ; CHECK: @"__A8764FDD_x at c" = internal unnamed_addr global i8 1, section ".msvcjmc", align 1, !dbg !5
 ; CHECK: @"__0C712A50_x at c" = internal unnamed_addr global i8 1, section ".msvcjmc", align 1, !dbg !9
 ; CHECK: @"__A3605329_x at c" = internal unnamed_addr global i8 1, section ".msvcjmc", align 1, !dbg !12
 
 ; CHECK: define void @l1() !dbg !19 {
-; CHECK:   call void @__CheckForDebuggerJustMyCode(i8* noundef @"__7DF23CF5_x at c")
+; CHECK:   call void @__CheckForDebuggerJustMyCode(ptr noundef @"__7DF23CF5_x at c")
 ; CHECK:   ret void
 ; CHECK: }
 
 ; CHECK: define void @l2() !dbg !23 {
-; CHECK:   call void @__CheckForDebuggerJustMyCode(i8* noundef @"__7DF23CF5_x at c")
+; CHECK:   call void @__CheckForDebuggerJustMyCode(ptr noundef @"__7DF23CF5_x at c")
 ; CHECK:   ret void
 ; CHECK: }
 
 ; CHECK: define void @w1() !dbg !25 {
-; CHECK:   call void @__CheckForDebuggerJustMyCode(i8* noundef @"__A8764FDD_x at c")
+; CHECK:   call void @__CheckForDebuggerJustMyCode(ptr noundef @"__A8764FDD_x at c")
 ; CHECK:   ret void
 ; CHECK: }
 
 ; CHECK: define void @w2() !dbg !26 {
-; CHECK:   call void @__CheckForDebuggerJustMyCode(i8* noundef @"__A8764FDD_x at c")
+; CHECK:   call void @__CheckForDebuggerJustMyCode(ptr noundef @"__A8764FDD_x at c")
 ; CHECK:   ret void
 ; CHECK: }
 
 ; CHECK: define void @w3() !dbg !28 {
-; CHECK:   call void @__CheckForDebuggerJustMyCode(i8* noundef @"__A8764FDD_x at c")
+; CHECK:   call void @__CheckForDebuggerJustMyCode(ptr noundef @"__A8764FDD_x at c")
 ; CHECK:   ret void
 ; CHECK: }
 
 ; CHECK: define void @w4() !dbg !30 {
-; CHECK:   call void @__CheckForDebuggerJustMyCode(i8* noundef @"__A8764FDD_x at c")
+; CHECK:   call void @__CheckForDebuggerJustMyCode(ptr noundef @"__A8764FDD_x at c")
 ; CHECK:   ret void
 ; CHECK: }
 
 ; CHECK: define void @w5() !dbg !32 {
-; CHECK:   call void @__CheckForDebuggerJustMyCode(i8* noundef @"__0C712A50_x at c")
+; CHECK:   call void @__CheckForDebuggerJustMyCode(ptr noundef @"__0C712A50_x at c")
 ; CHECK:   ret void
 ; CHECK: }
 
 ; CHECK: define void @w6() !dbg !33 {
-; CHECK:   call void @__CheckForDebuggerJustMyCode(i8* noundef @"__A3605329_x at c")
+; CHECK:   call void @__CheckForDebuggerJustMyCode(ptr noundef @"__A3605329_x at c")
 ; CHECK:   ret void
 ; CHECK: }
 
 ; CHECK: define void @w7() !dbg !34 {
-; CHECK:   call void @__CheckForDebuggerJustMyCode(i8* noundef @"__0C712A50_x at c")
+; CHECK:   call void @__CheckForDebuggerJustMyCode(ptr noundef @"__0C712A50_x at c")
 ; CHECK:   ret void
 ; CHECK: }
 
-; CHECK: define void @__JustMyCode_Default(i8* noundef %0) unnamed_addr comdat {
+; CHECK: define void @__JustMyCode_Default(ptr noundef %0) unnamed_addr comdat {
 ; CHECK:   ret void
 ; CHECK: }
 
-; CHECK: declare void @__CheckForDebuggerJustMyCode(i8* noundef) unnamed_addr
+; CHECK: declare void @__CheckForDebuggerJustMyCode(ptr noundef) unnamed_addr
 
 ; CHECK: !llvm.linker.options = !{!18}
 

diff  --git a/llvm/test/Instrumentation/MemorySanitizer/SystemZ/vararg-kernel.ll b/llvm/test/Instrumentation/MemorySanitizer/SystemZ/vararg-kernel.ll
index 5bb1f1c9394f3..650b3ccc6a4aa 100644
--- a/llvm/test/Instrumentation/MemorySanitizer/SystemZ/vararg-kernel.ll
+++ b/llvm/test/Instrumentation/MemorySanitizer/SystemZ/vararg-kernel.ll
@@ -52,73 +52,73 @@ attributes #1 = { sanitize_memory }
 
 ; CHECK-LABEL: @bar
 
-; CHECK: [[B:%.*]] = ptrtoint [100 x i64]* %va_arg_shadow to i64
+; CHECK: [[B:%.*]] = ptrtoint ptr %va_arg_shadow to i64
 ; CHECK: [[S:%.*]] = add i64 [[B]], 24
 ; CHECK: [[V:%.*]] = zext {{.*}}
-; CHECK: [[M:%_msarg_va_s.*]] = inttoptr i64 [[S]] to i64*
+; CHECK: [[M:%_msarg_va_s.*]] = inttoptr i64 [[S]] to ptr
 ; CHECK: store {{.*}} [[V]], {{.*}} [[M]]
 
-; CHECK: [[B:%.*]] = ptrtoint [100 x i64]* %va_arg_shadow to i64
+; CHECK: [[B:%.*]] = ptrtoint ptr %va_arg_shadow to i64
 ; CHECK: [[S:%.*]] = add i64 [[B]], 36
-; CHECK: [[M:%_msarg_va_s.*]] = inttoptr i64 [[S]] to i32*
+; CHECK: [[M:%_msarg_va_s.*]] = inttoptr i64 [[S]] to ptr
 ; CHECK: store {{.*}} [[M]]
 
-; CHECK: [[B:%.*]] = ptrtoint [100 x i64]* %va_arg_shadow to i64
+; CHECK: [[B:%.*]] = ptrtoint ptr %va_arg_shadow to i64
 ; CHECK: [[S:%.*]] = add i64 [[B]], 40
 ; CHECK: [[V:%.*]] = sext {{.*}}
-; CHECK: [[M:%_msarg_va_s.*]] = inttoptr i64 [[S]] to i64*
+; CHECK: [[M:%_msarg_va_s.*]] = inttoptr i64 [[S]] to ptr
 ; CHECK: store {{.*}} [[V]], {{.*}} [[M]]
 
-; CHECK: [[B:%.*]] = ptrtoint [100 x i64]* %va_arg_shadow to i64
+; CHECK: [[B:%.*]] = ptrtoint ptr %va_arg_shadow to i64
 ; CHECK: [[S:%.*]] = add i64 [[B]], 48
-; CHECK: [[M:%_msarg_va_s.*]] = inttoptr i64 [[S]] to i64*
+; CHECK: [[M:%_msarg_va_s.*]] = inttoptr i64 [[S]] to ptr
 ; CHECK: store {{.*}} [[M]]
 
-; CHECK: [[B:%.*]] = ptrtoint [100 x i64]* %va_arg_shadow to i64
+; CHECK: [[B:%.*]] = ptrtoint ptr %va_arg_shadow to i64
 ; CHECK: [[S:%.*]] = add i64 [[B]], 160
-; CHECK: [[M:%_msarg_va_s.*]] = inttoptr i64 [[S]] to i64*
+; CHECK: [[M:%_msarg_va_s.*]] = inttoptr i64 [[S]] to ptr
 ; CHECK: store {{.*}} [[M]]
 
-; CHECK: [[B:%.*]] = ptrtoint [100 x i64]* %va_arg_shadow to i64
+; CHECK: [[B:%.*]] = ptrtoint ptr %va_arg_shadow to i64
 ; CHECK: [[S:%.*]] = add i64 [[B]], 168
-; CHECK: [[M:%_msarg_va_s.*]] = inttoptr i64 [[S]] to i64*
+; CHECK: [[M:%_msarg_va_s.*]] = inttoptr i64 [[S]] to ptr
 ; CHECK: store {{.*}} [[M]]
 
-; CHECK: [[B:%.*]] = ptrtoint [100 x i64]* %va_arg_shadow to i64
+; CHECK: [[B:%.*]] = ptrtoint ptr %va_arg_shadow to i64
 ; CHECK: [[S:%.*]] = add i64 [[B]], 176
-; CHECK: [[M:%_msarg_va_s.*]] = inttoptr i64 [[S]] to i64*
+; CHECK: [[M:%_msarg_va_s.*]] = inttoptr i64 [[S]] to ptr
 ; CHECK: store {{.*}} [[M]]
 
-; CHECK: [[B:%.*]] = ptrtoint [100 x i64]* %va_arg_shadow to i64
+; CHECK: [[B:%.*]] = ptrtoint ptr %va_arg_shadow to i64
 ; CHECK: [[S:%.*]] = add i64 [[B]], 184
 ; CHECK: [[V:%.*]] = zext {{.*}}
-; CHECK: [[M:%_msarg_va_s.*]] = inttoptr i64 [[S]] to i64*
+; CHECK: [[M:%_msarg_va_s.*]] = inttoptr i64 [[S]] to ptr
 ; CHECK: store {{.*}} [[V]], {{.*}} [[M]]
 
-; CHECK: [[B:%.*]] = ptrtoint [100 x i64]* %va_arg_shadow to i64
+; CHECK: [[B:%.*]] = ptrtoint ptr %va_arg_shadow to i64
 ; CHECK: [[S:%.*]] = add i64 [[B]], 192
-; CHECK: [[M:%_msarg_va_s.*]] = inttoptr i64 [[S]] to i64*
+; CHECK: [[M:%_msarg_va_s.*]] = inttoptr i64 [[S]] to ptr
 ; CHECK: store {{.*}} [[M]]
 
-; CHECK: [[B:%.*]] = ptrtoint [100 x i64]* %va_arg_shadow to i64
+; CHECK: [[B:%.*]] = ptrtoint ptr %va_arg_shadow to i64
 ; CHECK: [[S:%.*]] = add i64 [[B]], 204
-; CHECK: [[M:%_msarg_va_s.*]] = inttoptr i64 [[S]] to i32*
+; CHECK: [[M:%_msarg_va_s.*]] = inttoptr i64 [[S]] to ptr
 ; CHECK: store {{.*}} [[M]]
 
-; CHECK: [[B:%.*]] = ptrtoint [100 x i64]* %va_arg_shadow to i64
+; CHECK: [[B:%.*]] = ptrtoint ptr %va_arg_shadow to i64
 ; CHECK: [[S:%.*]] = add i64 [[B]], 208
 ; CHECK: [[V:%.*]] = sext {{.*}}
-; CHECK: [[M:%_msarg_va_s.*]] = inttoptr i64 [[S]] to i64*
+; CHECK: [[M:%_msarg_va_s.*]] = inttoptr i64 [[S]] to ptr
 ; CHECK: store {{.*}} [[V]], {{.*}} [[M]]
 
-; CHECK: [[B:%.*]] = ptrtoint [100 x i64]* %va_arg_shadow to i64
+; CHECK: [[B:%.*]] = ptrtoint ptr %va_arg_shadow to i64
 ; CHECK: [[S:%.*]] = add i64 [[B]], 216
-; CHECK: [[M:%_msarg_va_s.*]] = inttoptr i64 [[S]] to i64*
+; CHECK: [[M:%_msarg_va_s.*]] = inttoptr i64 [[S]] to ptr
 ; CHECK: store {{.*}} [[M]]
 
-; CHECK: [[B:%.*]] = ptrtoint [100 x i64]* %va_arg_shadow to i64
+; CHECK: [[B:%.*]] = ptrtoint ptr %va_arg_shadow to i64
 ; CHECK: [[S:%.*]] = add i64 [[B]], 224
-; CHECK: [[M:%_msarg_va_s.*]] = inttoptr i64 [[S]] to i64*
+; CHECK: [[M:%_msarg_va_s.*]] = inttoptr i64 [[S]] to ptr
 ; CHECK: store {{.*}} [[M]]
 
 ; CHECK: store {{.*}} 72, {{.*}} %va_arg_overflow_size

diff  --git a/llvm/test/Instrumentation/MemorySanitizer/X86/vararg-too-large.ll b/llvm/test/Instrumentation/MemorySanitizer/X86/vararg-too-large.ll
index d30c6eb6460c6..adb3e208d8553 100644
--- a/llvm/test/Instrumentation/MemorySanitizer/X86/vararg-too-large.ll
+++ b/llvm/test/Instrumentation/MemorySanitizer/X86/vararg-too-large.ll
@@ -29,6 +29,6 @@ entry:
 
 ; If the size of __msan_va_arg_tls changes the second argument of `add` must also be changed.
 ; CHECK-LABEL: @many_args
-; CHECK: i64 add (i64 ptrtoint ([100 x i64]* @__msan_va_arg_tls to i64), i64 792)
-; CHECK-NOT: i64 add (i64 ptrtoint ([100 x i64]* @__msan_va_arg_tls to i64), i64 800)
+; CHECK: i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 792)
+; CHECK-NOT: i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 800)
 declare i64 @sum(i64 %n, ...)

diff  --git a/llvm/test/Instrumentation/MemorySanitizer/abs-vector.ll b/llvm/test/Instrumentation/MemorySanitizer/abs-vector.ll
index 47191cbe3f045..fd16dfe54e675 100644
--- a/llvm/test/Instrumentation/MemorySanitizer/abs-vector.ll
+++ b/llvm/test/Instrumentation/MemorySanitizer/abs-vector.ll
@@ -8,16 +8,16 @@ target triple = "x86_64-unknown-linux-gnu"
 define <4 x i64> @test_mm256_abs_epi8(<4 x i64> %a) local_unnamed_addr #0 {
 ; CHECK-LABEL: @test_mm256_abs_epi8(
 ; CHECK-NEXT:  entry:
-; CHECK-NEXT:    [[TMP0:%.*]] = load <4 x i64>, <4 x i64>* bitcast ([100 x i64]* @__msan_param_tls to <4 x i64>*), align 8
-; ORIGIN-NEXT:   [[TMP1:%.*]] = load i32, i32* getelementptr inbounds ([200 x i32], [200 x i32]* @__msan_param_origin_tls, i32 0, i32 0), align 4
+; CHECK-NEXT:    [[TMP0:%.*]] = load <4 x i64>, ptr @__msan_param_tls, align 8
+; ORIGIN-NEXT:   [[TMP1:%.*]] = load i32, ptr @__msan_param_origin_tls, align 4
 ; CHECK:         call void @llvm.donothing()
 ; CHECK-NEXT:    [[TMP2:%.*]] = bitcast <4 x i64> [[TMP0]] to <32 x i8>
 ; CHECK-NEXT:    [[TMP3:%.*]] = bitcast <4 x i64> [[A:%.*]] to <32 x i8>
 ; CHECK-NEXT:    [[TMP4:%.*]] = tail call <32 x i8> @llvm.abs.v32i8(<32 x i8> [[TMP3]], i1 false)
 ; CHECK-NEXT:    [[TMP5:%.*]] = bitcast <32 x i8> [[TMP2]] to <4 x i64>
 ; CHECK-NEXT:    [[TMP6:%.*]] = bitcast <32 x i8> [[TMP4]] to <4 x i64>
-; CHECK-NEXT:    store <4 x i64> [[TMP5]], <4 x i64>* bitcast ([100 x i64]* @__msan_retval_tls to <4 x i64>*), align 8
-; ORIGIN-NEXT:   store i32 [[TMP1]], i32* @__msan_retval_origin_tls, align 4
+; CHECK-NEXT:    store <4 x i64> [[TMP5]], ptr @__msan_retval_tls, align 8
+; ORIGIN-NEXT:   store i32 [[TMP1]], ptr @__msan_retval_origin_tls, align 4
 ; CHECK:         ret <4 x i64> [[TMP6]]
 ;
 entry:
@@ -30,16 +30,16 @@ entry:
 define <4 x i64> @test_mm256_abs_epi16(<4 x i64> %a) local_unnamed_addr #0 {
 ; CHECK-LABEL: @test_mm256_abs_epi16(
 ; CHECK-NEXT:  entry:
-; CHECK-NEXT:    [[TMP0:%.*]] = load <4 x i64>, <4 x i64>* bitcast ([100 x i64]* @__msan_param_tls to <4 x i64>*), align 8
-; ORIGIN-NEXT:   [[TMP1:%.*]] = load i32, i32* getelementptr inbounds ([200 x i32], [200 x i32]* @__msan_param_origin_tls, i32 0, i32 0), align 4
+; CHECK-NEXT:    [[TMP0:%.*]] = load <4 x i64>, ptr @__msan_param_tls, align 8
+; ORIGIN-NEXT:   [[TMP1:%.*]] = load i32, ptr @__msan_param_origin_tls, align 4
 ; CHECK:         call void @llvm.donothing()
 ; CHECK-NEXT:    [[TMP2:%.*]] = bitcast <4 x i64> [[TMP0]] to <16 x i16>
 ; CHECK-NEXT:    [[TMP3:%.*]] = bitcast <4 x i64> [[A:%.*]] to <16 x i16>
 ; CHECK-NEXT:    [[TMP4:%.*]] = tail call <16 x i16> @llvm.abs.v16i16(<16 x i16> [[TMP3]], i1 false)
 ; CHECK-NEXT:    [[TMP5:%.*]] = bitcast <16 x i16> [[TMP2]] to <4 x i64>
 ; CHECK-NEXT:    [[TMP6:%.*]] = bitcast <16 x i16> [[TMP4]] to <4 x i64>
-; CHECK-NEXT:    store <4 x i64> [[TMP5]], <4 x i64>* bitcast ([100 x i64]* @__msan_retval_tls to <4 x i64>*), align 8
-; ORIGIN-NEXT:   store i32 [[TMP1]], i32* @__msan_retval_origin_tls, align 4
+; CHECK-NEXT:    store <4 x i64> [[TMP5]], ptr @__msan_retval_tls, align 8
+; ORIGIN-NEXT:   store i32 [[TMP1]], ptr @__msan_retval_origin_tls, align 4
 ; CHECK:         ret <4 x i64> [[TMP6]]
 ;
 entry:
@@ -52,16 +52,16 @@ entry:
 define <4 x i64> @test_mm256_abs_epi32(<4 x i64> %a) local_unnamed_addr #0 {
 ; CHECK-LABEL: @test_mm256_abs_epi32(
 ; CHECK-NEXT:  entry:
-; CHECK-NEXT:    [[TMP0:%.*]] = load <4 x i64>, <4 x i64>* bitcast ([100 x i64]* @__msan_param_tls to <4 x i64>*), align 8
-; ORIGIN-NEXT:   [[TMP1:%.*]] = load i32, i32* getelementptr inbounds ([200 x i32], [200 x i32]* @__msan_param_origin_tls, i32 0, i32 0), align 4
+; CHECK-NEXT:    [[TMP0:%.*]] = load <4 x i64>, ptr @__msan_param_tls, align 8
+; ORIGIN-NEXT:   [[TMP1:%.*]] = load i32, ptr @__msan_param_origin_tls, align 4
 ; CHECK:         call void @llvm.donothing()
 ; CHECK-NEXT:    [[TMP2:%.*]] = bitcast <4 x i64> [[TMP0]] to <8 x i32>
 ; CHECK-NEXT:    [[TMP3:%.*]] = bitcast <4 x i64> [[A:%.*]] to <8 x i32>
 ; CHECK-NEXT:    [[TMP4:%.*]] = tail call <8 x i32> @llvm.abs.v8i32(<8 x i32> [[TMP3]], i1 false)
 ; CHECK-NEXT:    [[TMP5:%.*]] = bitcast <8 x i32> [[TMP2]] to <4 x i64>
 ; CHECK-NEXT:    [[TMP6:%.*]] = bitcast <8 x i32> [[TMP4]] to <4 x i64>
-; CHECK-NEXT:    store <4 x i64> [[TMP5]], <4 x i64>* bitcast ([100 x i64]* @__msan_retval_tls to <4 x i64>*), align 8
-; ORIGIN-NEXT:   store i32 [[TMP1]], i32* @__msan_retval_origin_tls, align 4
+; CHECK-NEXT:    store <4 x i64> [[TMP5]], ptr @__msan_retval_tls, align 8
+; ORIGIN-NEXT:   store i32 [[TMP1]], ptr @__msan_retval_origin_tls, align 4
 ; CHECK:         ret <4 x i64> [[TMP6]]
 ;
 entry:
@@ -74,12 +74,12 @@ entry:
 define <4 x double> @test_fabs(<4 x double> %a) local_unnamed_addr #0 {
 ; CHECK-LABEL: @test_fabs(
 ; CHECK-NEXT:  entry:
-; CHECK-NEXT:    [[TMP0:%.*]] = load <4 x i64>, <4 x i64>* bitcast ([100 x i64]* @__msan_param_tls to <4 x i64>*), align 8
-; ORIGIN-NEXT:   [[TMP1:%.*]] = load i32, i32* getelementptr inbounds ([200 x i32], [200 x i32]* @__msan_param_origin_tls, i32 0, i32 0), align 4
+; CHECK-NEXT:    [[TMP0:%.*]] = load <4 x i64>, ptr @__msan_param_tls, align 8
+; ORIGIN-NEXT:   [[TMP1:%.*]] = load i32, ptr @__msan_param_origin_tls, align 4
 ; CHECK:         call void @llvm.donothing()
 ; CHECK-NEXT:    [[TMP2:%.*]] = tail call <4 x double> @llvm.fabs.v4f64(<4 x double> [[A:%.*]])
-; CHECK-NEXT:    store <4 x i64> [[TMP0]], <4 x i64>* bitcast ([100 x i64]* @__msan_retval_tls to <4 x i64>*), align 8
-; ORIGIN-NEXT:   store i32 [[TMP1]], i32* @__msan_retval_origin_tls, align 4
+; CHECK-NEXT:    store <4 x i64> [[TMP0]], ptr @__msan_retval_tls, align 8
+; ORIGIN-NEXT:   store i32 [[TMP1]], ptr @__msan_retval_origin_tls, align 4
 ; CHECK:         ret <4 x double> [[TMP2]]
 ;
 entry:

diff  --git a/llvm/test/Instrumentation/MemorySanitizer/array_types.ll b/llvm/test/Instrumentation/MemorySanitizer/array_types.ll
index f73930abf9467..d9e4eeb7f7cba 100644
--- a/llvm/test/Instrumentation/MemorySanitizer/array_types.ll
+++ b/llvm/test/Instrumentation/MemorySanitizer/array_types.ll
@@ -15,11 +15,11 @@ entry:
 }
 
 ; CHECK-LABEL: @InsertValue(
-; CHECK-DAG: [[Sx:%.*]] = load i32, i32* {{.*}}@__msan_param_tls to i32*)
-; CHECK-DAG: [[Sy:%.*]] = load i32, i32* {{.*}}@__msan_param_tls to i64), i64 8) to i32*)
+; CHECK-DAG: [[Sx:%.*]] = load i32, ptr @__msan_param_tls
+; CHECK-DAG: [[Sy:%.*]] = load i32, ptr {{.*}}@__msan_param_tls to i64), i64 8)
 ; CHECK: [[A:%.*]] = insertvalue [2 x i32] [i32 -1, i32 -1], i32 [[Sx]], 0
 ; CHECK: [[B:%.*]] = insertvalue [2 x i32] [[A]], i32 [[Sy]], 1
-; CHECK: store [2 x i32] [[B]], [2 x i32]* {{.*}}@__msan_retval_tls
+; CHECK: store [2 x i32] [[B]], ptr {{.*}}@__msan_retval_tls
 ; CHECK: ret [2 x i32]
 
 
@@ -31,11 +31,11 @@ entry:
 }
 
 ; CHECK-LABEL: @InsertValueDouble(
-; CHECK-DAG: [[Sx:%.*]] = load i64, i64* getelementptr {{.*}}@__msan_param_tls, i32 0, i32 0
-; CHECK-DAG: [[Sy:%.*]] = load i64, i64* {{.*}}@__msan_param_tls to i64), i64 8) to i64*)
+; CHECK-DAG: [[Sx:%.*]] = load i64, ptr @__msan_param_tls
+; CHECK-DAG: [[Sy:%.*]] = load i64, ptr {{.*}}@__msan_param_tls to i64), i64 8)
 ; CHECK: [[A:%.*]] = insertvalue [2 x i64] [i64 -1, i64 -1], i64 [[Sx]], 0
 ; CHECK: [[B:%.*]] = insertvalue [2 x i64] [[A]], i64 [[Sy]], 1
-; CHECK: store [2 x i64] [[B]], [2 x i64]* {{.*}}@__msan_retval_tls
+; CHECK: store [2 x i64] [[B]], ptr {{.*}}@__msan_retval_tls
 ; CHECK: ret [2 x double]
 
 
@@ -46,9 +46,9 @@ entry:
 }
 
 ; CHECK-LABEL: @ExtractValue(
-; CHECK: [[Sa:%.*]] = load [2 x i32], [2 x i32]* {{.*}}@__msan_param_tls to [2 x i32]*)
+; CHECK: [[Sa:%.*]] = load [2 x i32], ptr @__msan_param_tls
 ; CHECK: [[Sx:%.*]] = extractvalue [2 x i32] [[Sa]], 1
-; CHECK: store i32 [[Sx]], i32* {{.*}}@__msan_retval_tls
+; CHECK: store i32 [[Sx]], ptr @__msan_retval_tls
 ; CHECK: ret i32
 
 
@@ -62,9 +62,9 @@ define i32 @ArrayInStruct(%MyStruct %s) sanitize_memory {
 }
 
 ; CHECK-LABEL: @ArrayInStruct(
-; CHECK: [[Ss:%.*]] = load { i32, i32, [3 x i32] }, { i32, i32, [3 x i32] }* {{.*}}@__msan_param_tls to { i32, i32, [3 x i32] }*)
+; CHECK: [[Ss:%.*]] = load { i32, i32, [3 x i32] }, ptr @__msan_param_tls
 ; CHECK: [[Sx:%.*]] = extractvalue { i32, i32, [3 x i32] } [[Ss]], 2, 1
-; CHECK: store i32 [[Sx]], i32* {{.*}}@__msan_retval_tls
+; CHECK: store i32 [[Sx]], ptr @__msan_retval_tls
 ; CHECK: ret i32
 
 
@@ -74,9 +74,9 @@ define i32 @ArrayOfStructs([3 x { i32, i32 }] %a) sanitize_memory {
 }
 
 ; CHECK-LABEL: @ArrayOfStructs(
-; CHECK: [[Ss:%.*]] = load [3 x { i32, i32 }], [3 x { i32, i32 }]* {{.*}}@__msan_param_tls to [3 x { i32, i32 }]*)
+; CHECK: [[Ss:%.*]] = load [3 x { i32, i32 }], ptr @__msan_param_tls
 ; CHECK: [[Sx:%.*]] = extractvalue [3 x { i32, i32 }] [[Ss]], 2, 1
-; CHECK: store i32 [[Sx]], i32* {{.*}}@__msan_retval_tls
+; CHECK: store i32 [[Sx]], ptr @__msan_retval_tls
 ; CHECK: ret i32
 
 
@@ -86,7 +86,7 @@ define <8 x i16> @ArrayOfVectors([3 x <8 x i16>] %a) sanitize_memory {
 }
 
 ; CHECK-LABEL: @ArrayOfVectors(
-; CHECK: [[Ss:%.*]] = load [3 x <8 x i16>], [3 x <8 x i16>]* {{.*}}@__msan_param_tls to [3 x <8 x i16>]*)
+; CHECK: [[Ss:%.*]] = load [3 x <8 x i16>], ptr @__msan_param_tls
 ; CHECK: [[Sx:%.*]] = extractvalue [3 x <8 x i16>] [[Ss]], 1
-; CHECK: store <8 x i16> [[Sx]], <8 x i16>* {{.*}}@__msan_retval_tls
+; CHECK: store <8 x i16> [[Sx]], ptr @__msan_retval_tls
 ; CHECK: ret <8 x i16>

diff  --git a/llvm/test/Instrumentation/MemorySanitizer/bmi.ll b/llvm/test/Instrumentation/MemorySanitizer/bmi.ll
index c9c3a39dc11be..327fec0ed702e 100644
--- a/llvm/test/Instrumentation/MemorySanitizer/bmi.ll
+++ b/llvm/test/Instrumentation/MemorySanitizer/bmi.ll
@@ -22,7 +22,7 @@ entry:
 }
 
 ; CHECK-LABEL: @Test_bzhi_32(
-; CHECK-DAG: %[[SA:.*]] = load i32, {{.*}}@__msan_param_tls to i32*)
+; CHECK-DAG: %[[SA:.*]] = load i32, ptr @__msan_param_tls
 ; CHECK-DAG: %[[SB:.*]] = load i32, {{.*}}@__msan_param_tls to i64), i64 8)
 ; CHECK-DAG: %[[SB0:.*]] = icmp ne i32 %[[SB]], 0
 ; CHECK-DAG: %[[SB1:.*]] = sext i1 %[[SB0]] to i32
@@ -38,7 +38,7 @@ entry:
 }
 
 ; CHECK-LABEL: @Test_bzhi_64(
-; CHECK-DAG: %[[SA:.*]] = load i64, {{.*}}@__msan_param_tls, i32 0, i32 0
+; CHECK-DAG: %[[SA:.*]] = load i64, ptr @__msan_param_tls
 ; CHECK-DAG: %[[SB:.*]] = load i64, {{.*}}@__msan_param_tls to i64), i64 8)
 ; CHECK-DAG: %[[SB0:.*]] = icmp ne i64 %[[SB]], 0
 ; CHECK-DAG: %[[SB1:.*]] = sext i1 %[[SB0]] to i64
@@ -55,7 +55,7 @@ entry:
 }
 
 ; CHECK-LABEL: @Test_bextr_32(
-; CHECK-DAG: %[[SA:.*]] = load i32, {{.*}}@__msan_param_tls to i32*)
+; CHECK-DAG: %[[SA:.*]] = load i32, ptr @__msan_param_tls
 ; CHECK-DAG: %[[SB:.*]] = load i32, {{.*}}@__msan_param_tls to i64), i64 8)
 ; CHECK-DAG: %[[SB0:.*]] = icmp ne i32 %[[SB]], 0
 ; CHECK-DAG: %[[SB1:.*]] = sext i1 %[[SB0]] to i32
@@ -71,7 +71,7 @@ entry:
 }
 
 ; CHECK-LABEL: @Test_bextr_64(
-; CHECK-DAG: %[[SA:.*]] = load i64, {{.*}}@__msan_param_tls, i32 0, i32 0
+; CHECK-DAG: %[[SA:.*]] = load i64, ptr @__msan_param_tls
 ; CHECK-DAG: %[[SB:.*]] = load i64, {{.*}}@__msan_param_tls to i64), i64 8)
 ; CHECK-DAG: %[[SB0:.*]] = icmp ne i64 %[[SB]], 0
 ; CHECK-DAG: %[[SB1:.*]] = sext i1 %[[SB0]] to i64
@@ -88,7 +88,7 @@ entry:
 }
 
 ; CHECK-LABEL: @Test_pdep_32(
-; CHECK-DAG: %[[SA:.*]] = load i32, {{.*}}@__msan_param_tls to i32*)
+; CHECK-DAG: %[[SA:.*]] = load i32, ptr @__msan_param_tls
 ; CHECK-DAG: %[[SB:.*]] = load i32, {{.*}}@__msan_param_tls to i64), i64 8)
 ; CHECK-DAG: %[[SB0:.*]] = icmp ne i32 %[[SB]], 0
 ; CHECK-DAG: %[[SB1:.*]] = sext i1 %[[SB0]] to i32
@@ -104,7 +104,7 @@ entry:
 }
 
 ; CHECK-LABEL: @Test_pdep_64(
-; CHECK-DAG: %[[SA:.*]] = load i64, {{.*}}@__msan_param_tls, i32 0, i32 0
+; CHECK-DAG: %[[SA:.*]] = load i64, ptr @__msan_param_tls
 ; CHECK-DAG: %[[SB:.*]] = load i64, {{.*}}@__msan_param_tls to i64), i64 8)
 ; CHECK-DAG: %[[SB0:.*]] = icmp ne i64 %[[SB]], 0
 ; CHECK-DAG: %[[SB1:.*]] = sext i1 %[[SB0]] to i64
@@ -120,7 +120,7 @@ entry:
 }
 
 ; CHECK-LABEL: @Test_pext_32(
-; CHECK-DAG: %[[SA:.*]] = load i32, {{.*}}@__msan_param_tls to i32*)
+; CHECK-DAG: %[[SA:.*]] = load i32, ptr @__msan_param_tls
 ; CHECK-DAG: %[[SB:.*]] = load i32, {{.*}}@__msan_param_tls to i64), i64 8)
 ; CHECK-DAG: %[[SB0:.*]] = icmp ne i32 %[[SB]], 0
 ; CHECK-DAG: %[[SB1:.*]] = sext i1 %[[SB0]] to i32
@@ -136,7 +136,7 @@ entry:
 }
 
 ; CHECK-LABEL: @Test_pext_64(
-; CHECK-DAG: %[[SA:.*]] = load i64, {{.*}}@__msan_param_tls, i32 0, i32 0
+; CHECK-DAG: %[[SA:.*]] = load i64, ptr @__msan_param_tls
 ; CHECK-DAG: %[[SB:.*]] = load i64, {{.*}}@__msan_param_tls to i64), i64 8)
 ; CHECK-DAG: %[[SB0:.*]] = icmp ne i64 %[[SB]], 0
 ; CHECK-DAG: %[[SB1:.*]] = sext i1 %[[SB0]] to i64

diff  --git a/llvm/test/Instrumentation/MemorySanitizer/clmul.ll b/llvm/test/Instrumentation/MemorySanitizer/clmul.ll
index 4a1429a16e0fc..d9d187fe55f84 100644
--- a/llvm/test/Instrumentation/MemorySanitizer/clmul.ll
+++ b/llvm/test/Instrumentation/MemorySanitizer/clmul.ll
@@ -16,12 +16,12 @@ entry:
 }
 
 ; CHECK-LABEL: @clmul00
-; CHECK: %[[S0:.*]] = load <2 x i64>, <2 x i64>* {{.*}}@__msan_param_tls
-; CHECK: %[[S1:.*]] = load <2 x i64>, <2 x i64>* {{.*}}@__msan_param_tls
+; CHECK: %[[S0:.*]] = load <2 x i64>, ptr {{.*}}@__msan_param_tls
+; CHECK: %[[S1:.*]] = load <2 x i64>, ptr {{.*}}@__msan_param_tls
 ; CHECK: %[[SHUF0:.*]] = shufflevector <2 x i64> %[[S0]], <2 x i64> poison, <2 x i32> zeroinitializer
 ; CHECK: %[[SHUF1:.*]] = shufflevector <2 x i64> %[[S1]], <2 x i64> poison, <2 x i32> zeroinitializer
 ; CHECK: %[[SRET:.*]] = or <2 x i64> %[[SHUF0]], %[[SHUF1]]
-; CHECK: store <2 x i64> %[[SRET]], <2 x i64>* {{.*}}@__msan_retval_tls
+; CHECK: store <2 x i64> %[[SRET]], ptr {{.*}}@__msan_retval_tls
 
 define <2 x i64> @clmul10(<2 x i64> %a, <2 x i64> %b) sanitize_memory {
 entry:
@@ -30,12 +30,12 @@ entry:
 }
 
 ; CHECK-LABEL: @clmul10
-; CHECK: %[[S0:.*]] = load <2 x i64>, <2 x i64>* {{.*}}@__msan_param_tls
-; CHECK: %[[S1:.*]] = load <2 x i64>, <2 x i64>* {{.*}}@__msan_param_tls
+; CHECK: %[[S0:.*]] = load <2 x i64>, ptr {{.*}}@__msan_param_tls
+; CHECK: %[[S1:.*]] = load <2 x i64>, ptr {{.*}}@__msan_param_tls
 ; CHECK: %[[SHUF0:.*]] = shufflevector <2 x i64> %[[S0]], <2 x i64> poison, <2 x i32> zeroinitializer
 ; CHECK: %[[SHUF1:.*]] = shufflevector <2 x i64> %[[S1]], <2 x i64> poison, <2 x i32> <i32 1, i32 1>
 ; CHECK: %[[SRET:.*]] = or <2 x i64> %[[SHUF0]], %[[SHUF1]]
-; CHECK: store <2 x i64> %[[SRET]], <2 x i64>* {{.*}}@__msan_retval_tls
+; CHECK: store <2 x i64> %[[SRET]], ptr {{.*}}@__msan_retval_tls
 
 define <4 x i64> @clmul11_256(<4 x i64> %a, <4 x i64> %b) sanitize_memory {
 entry:
@@ -44,12 +44,12 @@ entry:
 }
 
 ; CHECK-LABEL: @clmul11_256
-; CHECK: %[[S0:.*]] = load <4 x i64>, <4 x i64>* {{.*}}@__msan_param_tls
-; CHECK: %[[S1:.*]] = load <4 x i64>, <4 x i64>* {{.*}}@__msan_param_tls
+; CHECK: %[[S0:.*]] = load <4 x i64>, ptr {{.*}}@__msan_param_tls
+; CHECK: %[[S1:.*]] = load <4 x i64>, ptr {{.*}}@__msan_param_tls
 ; CHECK: %[[SHUF0:.*]] = shufflevector <4 x i64> %[[S0]], <4 x i64> poison, <4 x i32> <i32 1, i32 1, i32 3, i32 3>
 ; CHECK: %[[SHUF1:.*]] = shufflevector <4 x i64> %[[S1]], <4 x i64> poison, <4 x i32> <i32 1, i32 1, i32 3, i32 3>
 ; CHECK: %[[SRET:.*]] = or <4 x i64> %[[SHUF0]], %[[SHUF1]]
-; CHECK: store <4 x i64> %[[SRET]], <4 x i64>* {{.*}}@__msan_retval_tls
+; CHECK: store <4 x i64> %[[SRET]], ptr {{.*}}@__msan_retval_tls
 
 define <8 x i64> @clmul01_512(<8 x i64> %a, <8 x i64> %b) sanitize_memory {
 entry:
@@ -58,13 +58,13 @@ entry:
 }
 
 ; CHECK-LABEL: @clmul01_512
-; CHECK: %[[S0:.*]] = load <8 x i64>, <8 x i64>* {{.*}}@__msan_param_tls
-; CHECK: %[[S1:.*]] = load <8 x i64>, <8 x i64>* {{.*}}@__msan_param_tls
+; CHECK: %[[S0:.*]] = load <8 x i64>, ptr {{.*}}@__msan_param_tls
+; CHECK: %[[S1:.*]] = load <8 x i64>, ptr {{.*}}@__msan_param_tls
 ; CHECK: %[[SHUF0:.*]] = shufflevector <8 x i64> %[[S0]], <8 x i64> poison, <8 x i32> <i32 0, i32 0, i32 2, i32 2, i32 4, i32 4, i32 6, i32 6>
 ; CHECK: %[[SHUF1:.*]] = shufflevector <8 x i64> %[[S1]], <8 x i64> poison, <8 x i32> <i32 1, i32 1, i32 3, i32 3, i32 5, i32 5, i32 7, i32 7>
 ; CHECK: %[[SRET:.*]] = or <8 x i64> %[[SHUF0]], %[[SHUF1]]
 ; ORIGIN: %[[FLAT:.*]] = bitcast <8 x i64> %[[SHUF1]] to i512
 ; ORIGIN: %[[I:.*]] = icmp ne i512 %[[FLAT]], 0
 ; ORIGIN: %[[O:.*]] = select i1 %[[I]],
-; CHECK: store <8 x i64> %[[SRET]], <8 x i64>* {{.*}}@__msan_retval_tls
+; CHECK: store <8 x i64> %[[SRET]], ptr {{.*}}@__msan_retval_tls
 ; ORIGIN: store i32 %[[O]], i32* @__msan_retval_origin_tls

diff  --git a/llvm/test/Instrumentation/MemorySanitizer/funnel_shift.ll b/llvm/test/Instrumentation/MemorySanitizer/funnel_shift.ll
index 8506291a57e45..5ea407b3fda7a 100644
--- a/llvm/test/Instrumentation/MemorySanitizer/funnel_shift.ll
+++ b/llvm/test/Instrumentation/MemorySanitizer/funnel_shift.ll
@@ -6,18 +6,18 @@ target triple = "x86_64-unknown-linux-gnu"
 
 define void @var_funnel_i64(i64 %a64, <2 x i64> %a128, <4 x i64> %a256, <8 x i64> %a512, i64 %b64, <2 x i64> %b128, <4 x i64> %b256, <8 x i64> %b512, i64 %c64, <2 x i64> %c128, <4 x i64> %c256, <8 x i64> %c512) sanitize_memory {
 ; CHECK-LABEL: @var_funnel_i64(
-; CHECK-NEXT:    [[TMP1:%.*]] = load i64, i64* getelementptr inbounds ([100 x i64], [100 x i64]* @__msan_param_tls, i32 0, i32 0), align 8
-; CHECK-NEXT:    [[TMP2:%.*]] = load i64, i64* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 120) to i64*), align 8
-; CHECK-NEXT:    [[TMP3:%.*]] = load i64, i64* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 240) to i64*), align 8
-; CHECK-NEXT:    [[TMP4:%.*]] = load <2 x i64>, <2 x i64>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 8) to <2 x i64>*), align 8
-; CHECK-NEXT:    [[TMP5:%.*]] = load <2 x i64>, <2 x i64>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 128) to <2 x i64>*), align 8
-; CHECK-NEXT:    [[TMP6:%.*]] = load <2 x i64>, <2 x i64>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 248) to <2 x i64>*), align 8
-; CHECK-NEXT:    [[TMP7:%.*]] = load <4 x i64>, <4 x i64>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 24) to <4 x i64>*), align 8
-; CHECK-NEXT:    [[TMP8:%.*]] = load <4 x i64>, <4 x i64>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 144) to <4 x i64>*), align 8
-; CHECK-NEXT:    [[TMP9:%.*]] = load <4 x i64>, <4 x i64>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 264) to <4 x i64>*), align 8
-; CHECK-NEXT:    [[TMP10:%.*]] = load <8 x i64>, <8 x i64>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 56) to <8 x i64>*), align 8
-; CHECK-NEXT:    [[TMP11:%.*]] = load <8 x i64>, <8 x i64>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 176) to <8 x i64>*), align 8
-; CHECK-NEXT:    [[TMP12:%.*]] = load <8 x i64>, <8 x i64>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 296) to <8 x i64>*), align 8
+; CHECK-NEXT:    [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
+; CHECK-NEXT:    [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 120) to ptr), align 8
+; CHECK-NEXT:    [[TMP3:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 240) to ptr), align 8
+; CHECK-NEXT:    [[TMP4:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT:    [[TMP5:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
+; CHECK-NEXT:    [[TMP6:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 248) to ptr), align 8
+; CHECK-NEXT:    [[TMP7:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8
+; CHECK-NEXT:    [[TMP8:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 144) to ptr), align 8
+; CHECK-NEXT:    [[TMP9:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 264) to ptr), align 8
+; CHECK-NEXT:    [[TMP10:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 56) to ptr), align 8
+; CHECK-NEXT:    [[TMP11:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 176) to ptr), align 8
+; CHECK-NEXT:    [[TMP12:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 296) to ptr), align 8
 ; CHECK-NEXT:    call void @llvm.donothing()
 ; CHECK-NEXT:    [[TMP13:%.*]] = icmp ne i64 [[TMP3]], 0
 ; CHECK-NEXT:    [[TMP14:%.*]] = sext i1 [[TMP13]] to i64
@@ -50,18 +50,18 @@ define void @var_funnel_i64(i64 %a64, <2 x i64> %a128, <4 x i64> %a256, <8 x i64
 
 define void @var_funnel_i32(i32 %a32, <4 x i32> %a128, <8 x i32> %a256, <16 x i32> %a512, i32 %b32, <4 x i32> %b128, <8 x i32> %b256, <16 x i32> %b512, i32 %c32, <4 x i32> %c128, <8 x i32> %c256, <16 x i32> %c512) sanitize_memory {
 ; CHECK-LABEL: @var_funnel_i32(
-; CHECK-NEXT:    [[TMP1:%.*]] = load i32, i32* bitcast ([100 x i64]* @__msan_param_tls to i32*), align 8
-; CHECK-NEXT:    [[TMP2:%.*]] = load i32, i32* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 120) to i32*), align 8
-; CHECK-NEXT:    [[TMP3:%.*]] = load i32, i32* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 240) to i32*), align 8
-; CHECK-NEXT:    [[TMP4:%.*]] = load <4 x i32>, <4 x i32>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 8) to <4 x i32>*), align 8
-; CHECK-NEXT:    [[TMP5:%.*]] = load <4 x i32>, <4 x i32>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 128) to <4 x i32>*), align 8
-; CHECK-NEXT:    [[TMP6:%.*]] = load <4 x i32>, <4 x i32>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 248) to <4 x i32>*), align 8
-; CHECK-NEXT:    [[TMP7:%.*]] = load <8 x i32>, <8 x i32>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 24) to <8 x i32>*), align 8
-; CHECK-NEXT:    [[TMP8:%.*]] = load <8 x i32>, <8 x i32>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 144) to <8 x i32>*), align 8
-; CHECK-NEXT:    [[TMP9:%.*]] = load <8 x i32>, <8 x i32>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 264) to <8 x i32>*), align 8
-; CHECK-NEXT:    [[TMP10:%.*]] = load <16 x i32>, <16 x i32>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 56) to <16 x i32>*), align 8
-; CHECK-NEXT:    [[TMP11:%.*]] = load <16 x i32>, <16 x i32>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 176) to <16 x i32>*), align 8
-; CHECK-NEXT:    [[TMP12:%.*]] = load <16 x i32>, <16 x i32>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 296) to <16 x i32>*), align 8
+; CHECK-NEXT:    [[TMP1:%.*]] = load i32, ptr @__msan_param_tls, align 8
+; CHECK-NEXT:    [[TMP2:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 120) to ptr), align 8
+; CHECK-NEXT:    [[TMP3:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 240) to ptr), align 8
+; CHECK-NEXT:    [[TMP4:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT:    [[TMP5:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
+; CHECK-NEXT:    [[TMP6:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 248) to ptr), align 8
+; CHECK-NEXT:    [[TMP7:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8
+; CHECK-NEXT:    [[TMP8:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 144) to ptr), align 8
+; CHECK-NEXT:    [[TMP9:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 264) to ptr), align 8
+; CHECK-NEXT:    [[TMP10:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 56) to ptr), align 8
+; CHECK-NEXT:    [[TMP11:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 176) to ptr), align 8
+; CHECK-NEXT:    [[TMP12:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 296) to ptr), align 8
 ; CHECK-NEXT:    call void @llvm.donothing()
 ; CHECK-NEXT:    [[TMP13:%.*]] = icmp ne i32 [[TMP3]], 0
 ; CHECK-NEXT:    [[TMP14:%.*]] = sext i1 [[TMP13]] to i32
@@ -94,18 +94,18 @@ define void @var_funnel_i32(i32 %a32, <4 x i32> %a128, <8 x i32> %a256, <16 x i3
 
 define void @var_funnel_i16(i16 %a16, <8 x i16> %a128, <16 x i16> %a256, <32 x i16> %a512, i16 %b16, <8 x i16> %b128, <16 x i16> %b256, <32 x i16> %b512, i16 %c16, <8 x i16> %c128, <16 x i16> %c256, <32 x i16> %c512) sanitize_memory {
 ; CHECK-LABEL: @var_funnel_i16(
-; CHECK-NEXT:    [[TMP1:%.*]] = load i16, i16* bitcast ([100 x i64]* @__msan_param_tls to i16*), align 8
-; CHECK-NEXT:    [[TMP2:%.*]] = load i16, i16* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 120) to i16*), align 8
-; CHECK-NEXT:    [[TMP3:%.*]] = load i16, i16* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 240) to i16*), align 8
-; CHECK-NEXT:    [[TMP4:%.*]] = load <8 x i16>, <8 x i16>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 8) to <8 x i16>*), align 8
-; CHECK-NEXT:    [[TMP5:%.*]] = load <8 x i16>, <8 x i16>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 128) to <8 x i16>*), align 8
-; CHECK-NEXT:    [[TMP6:%.*]] = load <8 x i16>, <8 x i16>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 248) to <8 x i16>*), align 8
-; CHECK-NEXT:    [[TMP7:%.*]] = load <16 x i16>, <16 x i16>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 24) to <16 x i16>*), align 8
-; CHECK-NEXT:    [[TMP8:%.*]] = load <16 x i16>, <16 x i16>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 144) to <16 x i16>*), align 8
-; CHECK-NEXT:    [[TMP9:%.*]] = load <16 x i16>, <16 x i16>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 264) to <16 x i16>*), align 8
-; CHECK-NEXT:    [[TMP10:%.*]] = load <32 x i16>, <32 x i16>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 56) to <32 x i16>*), align 8
-; CHECK-NEXT:    [[TMP11:%.*]] = load <32 x i16>, <32 x i16>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 176) to <32 x i16>*), align 8
-; CHECK-NEXT:    [[TMP12:%.*]] = load <32 x i16>, <32 x i16>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 296) to <32 x i16>*), align 8
+; CHECK-NEXT:    [[TMP1:%.*]] = load i16, ptr @__msan_param_tls, align 8
+; CHECK-NEXT:    [[TMP2:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 120) to ptr), align 8
+; CHECK-NEXT:    [[TMP3:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 240) to ptr), align 8
+; CHECK-NEXT:    [[TMP4:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT:    [[TMP5:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
+; CHECK-NEXT:    [[TMP6:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 248) to ptr), align 8
+; CHECK-NEXT:    [[TMP7:%.*]] = load <16 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8
+; CHECK-NEXT:    [[TMP8:%.*]] = load <16 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 144) to ptr), align 8
+; CHECK-NEXT:    [[TMP9:%.*]] = load <16 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 264) to ptr), align 8
+; CHECK-NEXT:    [[TMP10:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 56) to ptr), align 8
+; CHECK-NEXT:    [[TMP11:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 176) to ptr), align 8
+; CHECK-NEXT:    [[TMP12:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 296) to ptr), align 8
 ; CHECK-NEXT:    call void @llvm.donothing()
 ; CHECK-NEXT:    [[TMP13:%.*]] = icmp ne i16 [[TMP3]], 0
 ; CHECK-NEXT:    [[TMP14:%.*]] = sext i1 [[TMP13]] to i16
@@ -138,18 +138,18 @@ define void @var_funnel_i16(i16 %a16, <8 x i16> %a128, <16 x i16> %a256, <32 x i
 
 define void @var_funnel_i8(i8 %a8, <16 x i8> %a128, <32 x i8> %a256, <64 x i8> %a512, i8 %b8, <16 x i8> %b128, <32 x i8> %b256, <64 x i8> %b512, i8 %c8, <16 x i8> %c128, <32 x i8> %c256, <64 x i8> %c512) sanitize_memory {
 ; CHECK-LABEL: @var_funnel_i8(
-; CHECK-NEXT:    [[TMP1:%.*]] = load i8, i8* bitcast ([100 x i64]* @__msan_param_tls to i8*), align 8
-; CHECK-NEXT:    [[TMP2:%.*]] = load i8, i8* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 120) to i8*), align 8
-; CHECK-NEXT:    [[TMP3:%.*]] = load i8, i8* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 240) to i8*), align 8
-; CHECK-NEXT:    [[TMP4:%.*]] = load <16 x i8>, <16 x i8>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 8) to <16 x i8>*), align 8
-; CHECK-NEXT:    [[TMP5:%.*]] = load <16 x i8>, <16 x i8>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 128) to <16 x i8>*), align 8
-; CHECK-NEXT:    [[TMP6:%.*]] = load <16 x i8>, <16 x i8>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 248) to <16 x i8>*), align 8
-; CHECK-NEXT:    [[TMP7:%.*]] = load <32 x i8>, <32 x i8>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 24) to <32 x i8>*), align 8
-; CHECK-NEXT:    [[TMP8:%.*]] = load <32 x i8>, <32 x i8>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 144) to <32 x i8>*), align 8
-; CHECK-NEXT:    [[TMP9:%.*]] = load <32 x i8>, <32 x i8>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 264) to <32 x i8>*), align 8
-; CHECK-NEXT:    [[TMP10:%.*]] = load <64 x i8>, <64 x i8>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 56) to <64 x i8>*), align 8
-; CHECK-NEXT:    [[TMP11:%.*]] = load <64 x i8>, <64 x i8>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 176) to <64 x i8>*), align 8
-; CHECK-NEXT:    [[TMP12:%.*]] = load <64 x i8>, <64 x i8>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 296) to <64 x i8>*), align 8
+; CHECK-NEXT:    [[TMP1:%.*]] = load i8, ptr @__msan_param_tls, align 8
+; CHECK-NEXT:    [[TMP2:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 120) to ptr), align 8
+; CHECK-NEXT:    [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 240) to ptr), align 8
+; CHECK-NEXT:    [[TMP4:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT:    [[TMP5:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
+; CHECK-NEXT:    [[TMP6:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 248) to ptr), align 8
+; CHECK-NEXT:    [[TMP7:%.*]] = load <32 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8
+; CHECK-NEXT:    [[TMP8:%.*]] = load <32 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 144) to ptr), align 8
+; CHECK-NEXT:    [[TMP9:%.*]] = load <32 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 264) to ptr), align 8
+; CHECK-NEXT:    [[TMP10:%.*]] = load <64 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 56) to ptr), align 8
+; CHECK-NEXT:    [[TMP11:%.*]] = load <64 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 176) to ptr), align 8
+; CHECK-NEXT:    [[TMP12:%.*]] = load <64 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 296) to ptr), align 8
 ; CHECK-NEXT:    call void @llvm.donothing()
 ; CHECK-NEXT:    [[TMP13:%.*]] = icmp ne i8 [[TMP3]], 0
 ; CHECK-NEXT:    [[TMP14:%.*]] = sext i1 [[TMP13]] to i8
@@ -182,14 +182,14 @@ define void @var_funnel_i8(i8 %a8, <16 x i8> %a128, <32 x i8> %a256, <64 x i8> %
 
 define void @var_rotate_i64(i64 %a64, <2 x i64> %a128, <4 x i64> %a256, <8 x i64> %a512, i64 %c64, <2 x i64> %c128, <4 x i64> %c256, <8 x i64> %c512) sanitize_memory {
 ; CHECK-LABEL: @var_rotate_i64(
-; CHECK-NEXT:    [[TMP1:%.*]] = load i64, i64* getelementptr inbounds ([100 x i64], [100 x i64]* @__msan_param_tls, i32 0, i32 0), align 8
-; CHECK-NEXT:    [[TMP2:%.*]] = load i64, i64* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 120) to i64*), align 8
-; CHECK-NEXT:    [[TMP3:%.*]] = load <2 x i64>, <2 x i64>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 8) to <2 x i64>*), align 8
-; CHECK-NEXT:    [[TMP4:%.*]] = load <2 x i64>, <2 x i64>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 128) to <2 x i64>*), align 8
-; CHECK-NEXT:    [[TMP5:%.*]] = load <4 x i64>, <4 x i64>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 24) to <4 x i64>*), align 8
-; CHECK-NEXT:    [[TMP6:%.*]] = load <4 x i64>, <4 x i64>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 144) to <4 x i64>*), align 8
-; CHECK-NEXT:    [[TMP7:%.*]] = load <8 x i64>, <8 x i64>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 56) to <8 x i64>*), align 8
-; CHECK-NEXT:    [[TMP8:%.*]] = load <8 x i64>, <8 x i64>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 176) to <8 x i64>*), align 8
+; CHECK-NEXT:    [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
+; CHECK-NEXT:    [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 120) to ptr), align 8
+; CHECK-NEXT:    [[TMP3:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT:    [[TMP4:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
+; CHECK-NEXT:    [[TMP5:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8
+; CHECK-NEXT:    [[TMP6:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 144) to ptr), align 8
+; CHECK-NEXT:    [[TMP7:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 56) to ptr), align 8
+; CHECK-NEXT:    [[TMP8:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 176) to ptr), align 8
 ; CHECK-NEXT:    call void @llvm.donothing()
 ; CHECK-NEXT:    [[TMP9:%.*]] = icmp ne i64 [[TMP2]], 0
 ; CHECK-NEXT:    [[TMP10:%.*]] = sext i1 [[TMP9]] to i64
@@ -222,14 +222,14 @@ define void @var_rotate_i64(i64 %a64, <2 x i64> %a128, <4 x i64> %a256, <8 x i64
 
 define void @var_rotate_i32(i32 %a32, <4 x i32> %a128, <8 x i32> %a256, <16 x i32> %a512, i32 %c32, <4 x i32> %c128, <8 x i32> %c256, <16 x i32> %c512) sanitize_memory {
 ; CHECK-LABEL: @var_rotate_i32(
-; CHECK-NEXT:    [[TMP1:%.*]] = load i32, i32* bitcast ([100 x i64]* @__msan_param_tls to i32*), align 8
-; CHECK-NEXT:    [[TMP2:%.*]] = load i32, i32* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 120) to i32*), align 8
-; CHECK-NEXT:    [[TMP3:%.*]] = load <4 x i32>, <4 x i32>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 8) to <4 x i32>*), align 8
-; CHECK-NEXT:    [[TMP4:%.*]] = load <4 x i32>, <4 x i32>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 128) to <4 x i32>*), align 8
-; CHECK-NEXT:    [[TMP5:%.*]] = load <8 x i32>, <8 x i32>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 24) to <8 x i32>*), align 8
-; CHECK-NEXT:    [[TMP6:%.*]] = load <8 x i32>, <8 x i32>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 144) to <8 x i32>*), align 8
-; CHECK-NEXT:    [[TMP7:%.*]] = load <16 x i32>, <16 x i32>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 56) to <16 x i32>*), align 8
-; CHECK-NEXT:    [[TMP8:%.*]] = load <16 x i32>, <16 x i32>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 176) to <16 x i32>*), align 8
+; CHECK-NEXT:    [[TMP1:%.*]] = load i32, ptr @__msan_param_tls, align 8
+; CHECK-NEXT:    [[TMP2:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 120) to ptr), align 8
+; CHECK-NEXT:    [[TMP3:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT:    [[TMP4:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
+; CHECK-NEXT:    [[TMP5:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8
+; CHECK-NEXT:    [[TMP6:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 144) to ptr), align 8
+; CHECK-NEXT:    [[TMP7:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 56) to ptr), align 8
+; CHECK-NEXT:    [[TMP8:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 176) to ptr), align 8
 ; CHECK-NEXT:    call void @llvm.donothing()
 ; CHECK-NEXT:    [[TMP9:%.*]] = icmp ne i32 [[TMP2]], 0
 ; CHECK-NEXT:    [[TMP10:%.*]] = sext i1 [[TMP9]] to i32
@@ -262,14 +262,14 @@ define void @var_rotate_i32(i32 %a32, <4 x i32> %a128, <8 x i32> %a256, <16 x i3
 
 define void @var_rotate_i16(i16 %a16, <8 x i16> %a128, <16 x i16> %a256, <32 x i16> %a512, i16 %c16, <8 x i16> %c128, <16 x i16> %c256, <32 x i16> %c512) sanitize_memory {
 ; CHECK-LABEL: @var_rotate_i16(
-; CHECK-NEXT:    [[TMP1:%.*]] = load i16, i16* bitcast ([100 x i64]* @__msan_param_tls to i16*), align 8
-; CHECK-NEXT:    [[TMP2:%.*]] = load i16, i16* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 120) to i16*), align 8
-; CHECK-NEXT:    [[TMP3:%.*]] = load <8 x i16>, <8 x i16>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 8) to <8 x i16>*), align 8
-; CHECK-NEXT:    [[TMP4:%.*]] = load <8 x i16>, <8 x i16>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 128) to <8 x i16>*), align 8
-; CHECK-NEXT:    [[TMP5:%.*]] = load <16 x i16>, <16 x i16>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 24) to <16 x i16>*), align 8
-; CHECK-NEXT:    [[TMP6:%.*]] = load <16 x i16>, <16 x i16>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 144) to <16 x i16>*), align 8
-; CHECK-NEXT:    [[TMP7:%.*]] = load <32 x i16>, <32 x i16>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 56) to <32 x i16>*), align 8
-; CHECK-NEXT:    [[TMP8:%.*]] = load <32 x i16>, <32 x i16>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 176) to <32 x i16>*), align 8
+; CHECK-NEXT:    [[TMP1:%.*]] = load i16, ptr @__msan_param_tls, align 8
+; CHECK-NEXT:    [[TMP2:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 120) to ptr), align 8
+; CHECK-NEXT:    [[TMP3:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT:    [[TMP4:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
+; CHECK-NEXT:    [[TMP5:%.*]] = load <16 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8
+; CHECK-NEXT:    [[TMP6:%.*]] = load <16 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 144) to ptr), align 8
+; CHECK-NEXT:    [[TMP7:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 56) to ptr), align 8
+; CHECK-NEXT:    [[TMP8:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 176) to ptr), align 8
 ; CHECK-NEXT:    call void @llvm.donothing()
 ; CHECK-NEXT:    [[TMP9:%.*]] = icmp ne i16 [[TMP2]], 0
 ; CHECK-NEXT:    [[TMP10:%.*]] = sext i1 [[TMP9]] to i16
@@ -302,14 +302,14 @@ define void @var_rotate_i16(i16 %a16, <8 x i16> %a128, <16 x i16> %a256, <32 x i
 
 define void @var_rotate_i8(i8 %a8, <16 x i8> %a128, <32 x i8> %a256, <64 x i8> %a512, i8 %c8, <16 x i8> %c128, <32 x i8> %c256, <64 x i8> %c512) sanitize_memory {
 ; CHECK-LABEL: @var_rotate_i8(
-; CHECK-NEXT:    [[TMP1:%.*]] = load i8, i8* bitcast ([100 x i64]* @__msan_param_tls to i8*), align 8
-; CHECK-NEXT:    [[TMP2:%.*]] = load i8, i8* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 120) to i8*), align 8
-; CHECK-NEXT:    [[TMP3:%.*]] = load <16 x i8>, <16 x i8>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 8) to <16 x i8>*), align 8
-; CHECK-NEXT:    [[TMP4:%.*]] = load <16 x i8>, <16 x i8>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 128) to <16 x i8>*), align 8
-; CHECK-NEXT:    [[TMP5:%.*]] = load <32 x i8>, <32 x i8>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 24) to <32 x i8>*), align 8
-; CHECK-NEXT:    [[TMP6:%.*]] = load <32 x i8>, <32 x i8>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 144) to <32 x i8>*), align 8
-; CHECK-NEXT:    [[TMP7:%.*]] = load <64 x i8>, <64 x i8>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 56) to <64 x i8>*), align 8
-; CHECK-NEXT:    [[TMP8:%.*]] = load <64 x i8>, <64 x i8>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 176) to <64 x i8>*), align 8
+; CHECK-NEXT:    [[TMP1:%.*]] = load i8, ptr @__msan_param_tls, align 8
+; CHECK-NEXT:    [[TMP2:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 120) to ptr), align 8
+; CHECK-NEXT:    [[TMP3:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT:    [[TMP4:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
+; CHECK-NEXT:    [[TMP5:%.*]] = load <32 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8
+; CHECK-NEXT:    [[TMP6:%.*]] = load <32 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 144) to ptr), align 8
+; CHECK-NEXT:    [[TMP7:%.*]] = load <64 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 56) to ptr), align 8
+; CHECK-NEXT:    [[TMP8:%.*]] = load <64 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 176) to ptr), align 8
 ; CHECK-NEXT:    call void @llvm.donothing()
 ; CHECK-NEXT:    [[TMP9:%.*]] = icmp ne i8 [[TMP2]], 0
 ; CHECK-NEXT:    [[TMP10:%.*]] = sext i1 [[TMP9]] to i8

diff  --git a/llvm/test/Instrumentation/MemorySanitizer/missing_origin.ll b/llvm/test/Instrumentation/MemorySanitizer/missing_origin.ll
index 3f010c4f9083f..1c3c3ddd3fcf3 100644
--- a/llvm/test/Instrumentation/MemorySanitizer/missing_origin.ll
+++ b/llvm/test/Instrumentation/MemorySanitizer/missing_origin.ll
@@ -15,8 +15,8 @@ entry:
 }
 
 ; CHECK-LABEL: @Shuffle(
-; CHECK: [[A:%.*]] = load i32, i32* {{.*}}@__msan_param_origin_tls,
-; CHECK: store i32 [[A]], i32* @__msan_retval_origin_tls
+; CHECK: [[A:%.*]] = load i32, ptr @__msan_param_origin_tls
+; CHECK: store i32 [[A]], ptr @__msan_retval_origin_tls
 ; CHECK: ret <4 x i32>
 
 

diff  --git a/llvm/test/Instrumentation/SanitizerCoverage/cmp-tracing-api-x86_32.ll b/llvm/test/Instrumentation/SanitizerCoverage/cmp-tracing-api-x86_32.ll
index 3ac9e6d850d4c..dc9731a2cd8bc 100644
--- a/llvm/test/Instrumentation/SanitizerCoverage/cmp-tracing-api-x86_32.ll
+++ b/llvm/test/Instrumentation/SanitizerCoverage/cmp-tracing-api-x86_32.ll
@@ -19,8 +19,8 @@ entry:
 ; CHECK-DAG: declare void @__sanitizer_cov_trace_div4(i32 zeroext)
 ; CHECK-DAG: declare void @__sanitizer_cov_trace_div8(i64)
 ; CHECK-DAG: declare void @__sanitizer_cov_trace_gep(i64)
-; CHECK-DAG: declare void @__sanitizer_cov_trace_switch(i64, i64*)
+; CHECK-DAG: declare void @__sanitizer_cov_trace_switch(i64, ptr)
 ; CHECK-DAG: declare void @__sanitizer_cov_trace_pc()
-; CHECK-DAG: declare void @__sanitizer_cov_trace_pc_guard(i32*)
-; CHECK-DAG: declare void @__sanitizer_cov_trace_pc_guard_init(i32*, i32*)
+; CHECK-DAG: declare void @__sanitizer_cov_trace_pc_guard(ptr)
+; CHECK-DAG: declare void @__sanitizer_cov_trace_pc_guard_init(ptr, ptr)
 ; CHECK-NOT: declare

diff  --git a/llvm/test/Instrumentation/SanitizerCoverage/cmp-tracing-api-x86_64.ll b/llvm/test/Instrumentation/SanitizerCoverage/cmp-tracing-api-x86_64.ll
index 404b4c3807da9..6f51182f2b767 100644
--- a/llvm/test/Instrumentation/SanitizerCoverage/cmp-tracing-api-x86_64.ll
+++ b/llvm/test/Instrumentation/SanitizerCoverage/cmp-tracing-api-x86_64.ll
@@ -15,8 +15,8 @@ entry:
 ; CHECK-DAG: declare void @__sanitizer_cov_trace_div4(i32 zeroext)
 ; CHECK-DAG: declare void @__sanitizer_cov_trace_div8(i64)
 ; CHECK-DAG: declare void @__sanitizer_cov_trace_gep(i64)
-; CHECK-DAG: declare void @__sanitizer_cov_trace_switch(i64, i64*)
+; CHECK-DAG: declare void @__sanitizer_cov_trace_switch(i64, ptr)
 ; CHECK-DAG: declare void @__sanitizer_cov_trace_pc()
-; CHECK-DAG: declare void @__sanitizer_cov_trace_pc_guard(i32*)
-; CHECK-DAG: declare void @__sanitizer_cov_trace_pc_guard_init(i32*, i32*)
+; CHECK-DAG: declare void @__sanitizer_cov_trace_pc_guard(ptr)
+; CHECK-DAG: declare void @__sanitizer_cov_trace_pc_guard_init(ptr, ptr)
 ; CHECK-NOT: declare

diff  --git a/llvm/test/Instrumentation/SanitizerCoverage/coff-comdat.ll b/llvm/test/Instrumentation/SanitizerCoverage/coff-comdat.ll
index 8207f2890b54e..56757a532f973 100644
--- a/llvm/test/Instrumentation/SanitizerCoverage/coff-comdat.ll
+++ b/llvm/test/Instrumentation/SanitizerCoverage/coff-comdat.ll
@@ -26,16 +26,16 @@
 
 ; CHECK: @__sancov_gen_{{.*}} = private global [1 x i8] zeroinitializer, section ".SCOV$CM", comdat($foo), align 1
 
-; CHECK: @__sancov_gen_{{.*}} = private constant [2 x i64*]
-; CHECK-SAME: [i64* bitcast (i32 (i32)* @foo to i64*), i64* inttoptr (i64 1 to i64*)],
+; CHECK: @__sancov_gen_{{.*}} = private constant [2 x ptr]
+; CHECK-SAME: [ptr @foo, ptr inttoptr (i64 1 to ptr)],
 ; CHECK-SAME: section ".SCOVP$M", comdat($foo), align 8
 
 ; Tables for 'bar' should be in the 'bar' comdat.
 
 ; CHECK: @__sancov_gen_{{.*}} = private global [1 x i8] zeroinitializer, section ".SCOV$CM", comdat($bar), align 1
 
-; CHECK: @__sancov_gen_{{.*}} = private constant [2 x i64*]
-; CHECK-SAME: [i64* bitcast (i32 (i32)* @bar to i64*), i64* inttoptr (i64 1 to i64*)],
+; CHECK: @__sancov_gen_{{.*}} = private constant [2 x ptr]
+; CHECK-SAME: [ptr @bar, ptr inttoptr (i64 1 to ptr)],
 ; CHECK-SAME: section ".SCOVP$M", comdat($bar), align 8
 
 ; 'foo' and 'bar' should be in their new comdat groups.

diff  --git a/llvm/test/Instrumentation/SanitizerCoverage/inline-8bit-counters.ll b/llvm/test/Instrumentation/SanitizerCoverage/inline-8bit-counters.ll
index a8a36d14bce11..406ee3ff8eb9f 100644
--- a/llvm/test/Instrumentation/SanitizerCoverage/inline-8bit-counters.ll
+++ b/llvm/test/Instrumentation/SanitizerCoverage/inline-8bit-counters.ll
@@ -9,9 +9,9 @@ target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f3
 target triple = "x86_64-unknown-linux-gnu"
 define void @foo() {
 entry:
-; CHECK:  %0 = load i8, i8* getelementptr inbounds ([1 x i8], [1 x i8]* @__sancov_gen_, i64 0, i64 0), align 1, !nosanitize
+; CHECK:  %0 = load i8, ptr @__sancov_gen_, align 1, !nosanitize
 ; CHECK:  %1 = add i8 %0, 1
-; CHECK:  store i8 %1, i8* getelementptr inbounds ([1 x i8], [1 x i8]* @__sancov_gen_, i64 0, i64 0), align 1, !nosanitize
+; CHECK:  store i8 %1, ptr @__sancov_gen_, align 1, !nosanitize
   ret void
 }
-; CHECK: call void @__sanitizer_cov_8bit_counters_init(i8* @__start___sancov_cntrs, i8* @__stop___sancov_cntrs)
+; CHECK: call void @__sanitizer_cov_8bit_counters_init(ptr @__start___sancov_cntrs, ptr @__stop___sancov_cntrs)

diff  --git a/llvm/test/Instrumentation/SanitizerCoverage/inline-bool-flag.ll b/llvm/test/Instrumentation/SanitizerCoverage/inline-bool-flag.ll
index 5653a34cddc1a..be8b6c8b95ce9 100644
--- a/llvm/test/Instrumentation/SanitizerCoverage/inline-bool-flag.ll
+++ b/llvm/test/Instrumentation/SanitizerCoverage/inline-bool-flag.ll
@@ -5,19 +5,19 @@
 ; CHECK:      @__sancov_gen_ = private global [1 x i1] zeroinitializer, section "__sancov_bools", comdat($foo), align 1{{$}}
 ; CHECK:      @__start___sancov_bools = extern_weak hidden global i1
 ; CHECK-NEXT: @__stop___sancov_bools = extern_weak hidden global i1
-; CHECK:      @llvm.used = appending global [1 x i8*] [i8* bitcast (void ()* @sancov.module_ctor_bool_flag to i8*)], section "llvm.metadata"
-; CHECK:      @llvm.compiler.used = appending global [1 x i8*] [i8* bitcast ([1 x i1]* @__sancov_gen_ to i8*)], section "llvm.metadata"
+; CHECK:      @llvm.used = appending global [1 x ptr] [ptr @sancov.module_ctor_bool_flag], section "llvm.metadata"
+; CHECK:      @llvm.compiler.used = appending global [1 x ptr] [ptr @__sancov_gen_], section "llvm.metadata"
 
 target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64"
 target triple = "x86_64-unknown-linux-gnu"
 define void @foo() {
 ; CHECK-LABEL: @foo(
 ; CHECK-NEXT:  entry:
-; CHECK-NEXT:    [[TMP0:%.*]] = load i1, i1* getelementptr inbounds ([1 x i1], [1 x i1]* @__sancov_gen_, i64 0, i64 0), align 1, !nosanitize ![[#EMPTY:]]
+; CHECK-NEXT:    [[TMP0:%.*]] = load i1, ptr @__sancov_gen_, align 1, !nosanitize ![[#EMPTY:]]
 ; CHECK-NEXT:    [[TMP1:%.*]] = icmp eq i1 [[TMP0]], false
 ; CHECK-NEXT:    br i1 [[TMP1]], label [[TMP2:%.*]], label [[TMP3:%.*]]
 ; CHECK:       2:
-; CHECK-NEXT:    store i1 true, i1* getelementptr inbounds ([1 x i1], [1 x i1]* @__sancov_gen_, i64 0, i64 0), align 1, !nosanitize ![[#EMPTY:]]
+; CHECK-NEXT:    store i1 true, ptr @__sancov_gen_, align 1, !nosanitize ![[#EMPTY:]]
 ; CHECK-NEXT:    br label [[TMP3]]
 ; CHECK:       3:
 ; CHECK-NEXT:    ret void
@@ -25,6 +25,6 @@ define void @foo() {
 entry:
   ret void
 }
-; CHECK: call void @__sanitizer_cov_bool_flag_init(i1* @__start___sancov_bools, i1* @__stop___sancov_bools)
+; CHECK: call void @__sanitizer_cov_bool_flag_init(ptr @__start___sancov_bools, ptr @__stop___sancov_bools)
 
 ; CHECK: ![[#EMPTY]] = !{}

diff  --git a/llvm/test/Instrumentation/SanitizerCoverage/switch-tracing.ll b/llvm/test/Instrumentation/SanitizerCoverage/switch-tracing.ll
index 9195f00859251..2412adbbbf907 100644
--- a/llvm/test/Instrumentation/SanitizerCoverage/switch-tracing.ll
+++ b/llvm/test/Instrumentation/SanitizerCoverage/switch-tracing.ll
@@ -8,7 +8,7 @@ define void @foo(i32 %x) {
 entry:
 ; CHECK: __sancov_gen_cov_switch_values = internal global [5 x i64] [i64 3, i64 32, i64 1, i64 101, i64 1001]
 ; CHECK: [[TMP:%[0-9]*]] = zext i32 %x to i64
-; CHECK-NEXT: call void @__sanitizer_cov_trace_switch(i64 [[TMP]], i64* getelementptr inbounds ([5 x i64], [5 x i64]* @__sancov_gen_cov_switch_values, i32 0, i32 0))
+; CHECK-NEXT: call void @__sanitizer_cov_trace_switch(i64 [[TMP]], ptr @__sancov_gen_cov_switch_values)
   switch i32 %x, label %sw.epilog [
     i32 1, label %sw.bb
     i32 1001, label %sw.bb.1

diff  --git a/llvm/test/Linker/2003-01-30-LinkerRename.ll b/llvm/test/Linker/2003-01-30-LinkerRename.ll
index 1844e936baf27..4fc3c7b77aac1 100644
--- a/llvm/test/Linker/2003-01-30-LinkerRename.ll
+++ b/llvm/test/Linker/2003-01-30-LinkerRename.ll
@@ -2,7 +2,7 @@
 ; RUN: llvm-as %s -o %t.2.bc
 ; RUN: llvm-link %t.1.bc %t.2.bc -S | FileCheck %s
 
-; CHECK: @bar = global i32 ()* @foo.2
+; CHECK: @bar = global ptr @foo.2
 
 ; CHECK:      define internal i32 @foo.2() {
 ; CHECK-NEXT:   ret i32 7

diff  --git a/llvm/test/Linker/2008-07-06-AliasFnDecl.ll b/llvm/test/Linker/2008-07-06-AliasFnDecl.ll
index 555899a31acbb..ebdc74ac2c0c8 100644
--- a/llvm/test/Linker/2008-07-06-AliasFnDecl.ll
+++ b/llvm/test/Linker/2008-07-06-AliasFnDecl.ll
@@ -3,7 +3,7 @@
 ; RUN: llvm-as %p/2008-07-06-AliasFnDecl2.ll -o %t2.bc
 ; RUN: llvm-link %t1.bc %t2.bc -o %t3.bc
 
- at b = alias void (), void ()* @a
+ at b = alias void (), ptr @a
 
 define void @a() nounwind  {
 entry:

diff  --git a/llvm/test/Linker/2008-07-06-AliasWeakDest.ll b/llvm/test/Linker/2008-07-06-AliasWeakDest.ll
index 8db492363aa4f..98cac6271533f 100644
--- a/llvm/test/Linker/2008-07-06-AliasWeakDest.ll
+++ b/llvm/test/Linker/2008-07-06-AliasWeakDest.ll
@@ -7,9 +7,9 @@
 target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f80:32:32"
 target triple = "i386-pc-linux-gnu"
 
- at sched_clock = alias i64 (), i64 ()* @native_sched_clock
+ at sched_clock = alias i64 (), ptr @native_sched_clock
 
- at foo = alias i32, i32* @realfoo
+ at foo = alias i32, ptr @realfoo
 @realfoo = global i32 0
 
 define i64 @native_sched_clock() nounwind  {

diff  --git a/llvm/test/Linker/AppendingLinkage.ll b/llvm/test/Linker/AppendingLinkage.ll
index a3db5d28cd22d..d5f2a1fdfdbfe 100644
--- a/llvm/test/Linker/AppendingLinkage.ll
+++ b/llvm/test/Linker/AppendingLinkage.ll
@@ -6,11 +6,11 @@
 ; RUN: llvm-link %t.1.bc %t.2.bc -S | FileCheck %s
 ; CHECK: [i32 7, i32 4, i32 8]
 
- at X = appending global [2 x i32] [ i32 7, i32 4 ]		; <[2 x i32]*> [#uses=2]
- at Y = global i32* getelementptr ([2 x i32], [2 x i32]* @X, i64 0, i64 0)		; <i32**> [#uses=0]
+ at X = appending global [2 x i32] [ i32 7, i32 4 ]
+ at Y = global ptr @X
 
 define void @foo(i64 %V) {
-	%Y = getelementptr [2 x i32], [2 x i32]* @X, i64 0, i64 %V		; <i32*> [#uses=0]
+	%Y = getelementptr [2 x i32], ptr @X, i64 0, i64 %V
 	ret void
 }
 

diff  --git a/llvm/test/Linker/Inputs/2003-01-30-LinkerRename.ll b/llvm/test/Linker/Inputs/2003-01-30-LinkerRename.ll
index 5c6b5f567ff82..501672863b75c 100644
--- a/llvm/test/Linker/Inputs/2003-01-30-LinkerRename.ll
+++ b/llvm/test/Linker/Inputs/2003-01-30-LinkerRename.ll
@@ -1,4 +1,4 @@
- at bar = global i32()* @foo
+ at bar = global ptr @foo
 define internal i32 @foo() {
   ret i32 7
 }

diff  --git a/llvm/test/Linker/Inputs/only-needed-debug-metadata.ll b/llvm/test/Linker/Inputs/only-needed-debug-metadata.ll
index 63a02456e0585..4b7f14c92e1fa 100644
--- a/llvm/test/Linker/Inputs/only-needed-debug-metadata.ll
+++ b/llvm/test/Linker/Inputs/only-needed-debug-metadata.ll
@@ -3,7 +3,7 @@
 declare i32 @foo()
 
 define void @bar() !dbg !4 {
-	load i32, i32* @X, !dbg !10
+	load i32, ptr @X, !dbg !10
 	call i32 @foo(), !dbg !11
 	ret void, !dbg !12
 }

diff  --git a/llvm/test/Linker/Inputs/pr26037.ll b/llvm/test/Linker/Inputs/pr26037.ll
index 9d7190ef1e72b..be0b445d36c30 100644
--- a/llvm/test/Linker/Inputs/pr26037.ll
+++ b/llvm/test/Linker/Inputs/pr26037.ll
@@ -1,7 +1,7 @@
 define i32 @main() #0 !dbg !4 {
 entry:
   %retval = alloca i32, align 4
-  store i32 0, i32* %retval, align 4
+  store i32 0, ptr %retval, align 4
   ret i32 0, !dbg !11
 }
 

diff  --git a/llvm/test/Linker/alias-2.ll b/llvm/test/Linker/alias-2.ll
index 8875aa76455fd..4ee5ead0f2928 100644
--- a/llvm/test/Linker/alias-2.ll
+++ b/llvm/test/Linker/alias-2.ll
@@ -5,7 +5,7 @@
 ; erroneously renamed to A.1 and not linked to the declaration from
 ; the first module
 
- at C = alias void (), void ()* @A
+ at C = alias void (), ptr @A
 
 define void @D() {
   call void @C()
@@ -16,7 +16,7 @@ define void @A() {
   ret void
 }
 
-; CHECK-DAG: @C = alias void (), void ()* @A
+; CHECK-DAG: @C = alias void (), ptr @A
 ; CHECK-DAG: define void @B()
 ; CHECK-DAG:   call void @A()
 ; CHECK-DAG: define void @D()

diff  --git a/llvm/test/Linker/alias-threadlocal.ll b/llvm/test/Linker/alias-threadlocal.ll
index 3e50a62770b0a..a345a802b04ea 100644
--- a/llvm/test/Linker/alias-threadlocal.ll
+++ b/llvm/test/Linker/alias-threadlocal.ll
@@ -4,6 +4,6 @@
 ; Verify that linking GlobalAliases preserves the thread_local attribute
 
 ; CHECK: @tlsvar1 = thread_local global i32 0, align 4
-; CHECK: @tlsvar2 = hidden thread_local alias i32, i32* @tlsvar1
+; CHECK: @tlsvar2 = hidden thread_local alias i32, ptr @tlsvar1
 
 @tlsvar2 = external thread_local global i32, align 4

diff  --git a/llvm/test/Linker/comdat-any.ll b/llvm/test/Linker/comdat-any.ll
index 36935383b0c05..ef6db661c3e55 100644
--- a/llvm/test/Linker/comdat-any.ll
+++ b/llvm/test/Linker/comdat-any.ll
@@ -33,22 +33,22 @@ define void @bar() comdat($foo) {
 $c1 = comdat any
 
 @v1 = weak_odr global i32 42, comdat($c1)
-define weak_odr i32 @f1(i8*) comdat($c1) {
+define weak_odr i32 @f1(ptr) comdat($c1) {
 bb10:
   br label %bb11
 bb11:
   ret i32 42
 }
 
- at r11 = global i32* @v1
- at r12 = global i32 (i8*)* @f1
+ at r11 = global ptr @v1
+ at r12 = global ptr @f1
 
- at a11 = alias i32, i32* @v1
- at a12 = alias i16, bitcast (i32* @v1 to i16*)
+ at a11 = alias i32, ptr @v1
+ at a12 = alias i16, ptr @v1
 
- at a13 = alias i32 (i8*), i32 (i8*)* @f1
- at a14 = alias i16, bitcast (i32 (i8*)* @f1 to i16*)
- at a15 = alias i16, i16* @a14
+ at a13 = alias i32 (ptr), ptr @f1
+ at a14 = alias i16, ptr @f1
+ at a15 = alias i16, ptr @a14
 
 ; CHECK2: $c1 = comdat any
 ; CHECK2: $c2 = comdat any
@@ -57,36 +57,36 @@ bb11:
 
 ; CHECK2-DAG: @v1 = weak_odr global i32 42, comdat($c1)
 
-; CHECK2-DAG: @r11 = global i32* @v1{{$}}
-; CHECK2-DAG: @r12 = global i32 (i8*)* @f1{{$}}
+; CHECK2-DAG: @r11 = global ptr @v1{{$}}
+; CHECK2-DAG: @r12 = global ptr @f1{{$}}
 
-; CHECK2-DAG: @r21 = global i32* @v1{{$}}
-; CHECK2-DAG: @r22 = global i32 (i8*)* @f1{{$}}
+; CHECK2-DAG: @r21 = global ptr @v1{{$}}
+; CHECK2-DAG: @r22 = global ptr @f1{{$}}
 
 ; CHECK2-DAG: @v1.1 = internal global i32 41, comdat($c2)
 
-; CHECK2-DAG: @a11 = alias i32, i32* @v1{{$}}
-; CHECK2-DAG: @a12 = alias i16, bitcast (i32* @v1 to i16*)
+; CHECK2-DAG: @a11 = alias i32, ptr @v1{{$}}
+; CHECK2-DAG: @a12 = alias i16, ptr @v1
 
-; CHECK2-DAG: @a13 = alias i32 (i8*), i32 (i8*)* @f1{{$}}
-; CHECK2-DAG: @a14 = alias i16, bitcast (i32 (i8*)* @f1 to i16*)
+; CHECK2-DAG: @a13 = alias i32 (ptr), ptr @f1{{$}}
+; CHECK2-DAG: @a14 = alias i16, ptr @f1
 
-; CHECK2-DAG: @a21 = alias i32, i32* @v1.1{{$}}
-; CHECK2-DAG: @a22 = alias i16, bitcast (i32* @v1.1 to i16*)
+; CHECK2-DAG: @a21 = alias i32, ptr @v1.1{{$}}
+; CHECK2-DAG: @a22 = alias i16, ptr @v1.1
 
-; CHECK2-DAG: @a23 = alias i32 (i8*), i32 (i8*)* @f1.2{{$}}
-; CHECK2-DAG: @a24 = alias i16, bitcast (i32 (i8*)* @f1.2 to i16*)
+; CHECK2-DAG: @a23 = alias i32 (ptr), ptr @f1.2{{$}}
+; CHECK2-DAG: @a24 = alias i16, ptr @f1.2
 
-; CHECK2:      define weak_odr protected i32 @f1(i8* %0) comdat($c1) {
+; CHECK2:      define weak_odr protected i32 @f1(ptr %0) comdat($c1) {
 ; CHECK2-NEXT: bb10:
 ; CHECK2-NEXT:   br label %bb11{{$}}
 ; CHECK2:      bb11:
 ; CHECK2-NEXT:   ret i32 42
 ; CHECK2-NEXT: }
 
-; CHECK2:      define internal i32 @f1.2(i8* %this) comdat($c2) {
+; CHECK2:      define internal i32 @f1.2(ptr %this) comdat($c2) {
 ; CHECK2-NEXT: bb20:
-; CHECK2-NEXT:   store i8* %this, i8** null
+; CHECK2-NEXT:   store ptr %this, ptr null
 ; CHECK2-NEXT:   br label %bb21
 ; CHECK2:      bb21:
 ; CHECK2-NEXT:   ret i32 41
@@ -99,38 +99,38 @@ $c1 = comdat any
 ; This is only present in this file. The linker will keep $c1 from the first
 ; file and this will be undefined.
 @will_be_undefined = global i32 1, comdat($c1)
- at use = global i32* @will_be_undefined
+ at use = global ptr @will_be_undefined
 
 @v1 = weak_odr global i32 41, comdat($c2)
-define weak_odr protected i32 @f1(i8* %this) comdat($c2) {
+define weak_odr protected i32 @f1(ptr %this) comdat($c2) {
 bb20:
-  store i8* %this, i8** null
+  store ptr %this, ptr null
   br label %bb21
 bb21:
   ret i32 41
 }
 
- at r21 = global i32* @v1
- at r22 = global i32(i8*)* @f1
+ at r21 = global ptr @v1
+ at r22 = global ptr @f1
 
- at a21 = alias i32, i32* @v1
- at a22 = alias i16, bitcast (i32* @v1 to i16*)
+ at a21 = alias i32, ptr @v1
+ at a22 = alias i16, ptr @v1
 
- at a23 = alias i32(i8*), i32(i8*)* @f1
- at a24 = alias i16, bitcast (i32(i8*)* @f1 to i16*)
- at a25 = alias i16, i16* @a24
+ at a23 = alias i32(ptr), ptr @f1
+ at a24 = alias i16, ptr @f1
+ at a25 = alias i16, ptr @a24
 
 ;--- 3.ll
 ; CHECK3: @bar = global i32 0, comdat($a1)
 ; CHECK3: @baz = private global i32 42, comdat($a1)
-; CHECK3: @a1 = internal alias i32, i32* @baz
+; CHECK3: @a1 = internal alias i32, ptr @baz
 $a1 = comdat any
 @bar = global i32 0, comdat($a1)
 
 ;--- 3-aux.ll
 $a1 = comdat any
 @baz = private global i32 42, comdat($a1)
- at a1 = internal alias i32, i32* @baz
-define i32* @abc() {
-  ret i32* @a1
+ at a1 = internal alias i32, ptr @baz
+define ptr @abc() {
+  ret ptr @a1
 }

diff  --git a/llvm/test/Linker/ctors2.ll b/llvm/test/Linker/ctors2.ll
index c02973faf4d07..b316c09377539 100644
--- a/llvm/test/Linker/ctors2.ll
+++ b/llvm/test/Linker/ctors2.ll
@@ -3,5 +3,5 @@
 $foo = comdat any
 @foo = global i8 0, comdat
 
-; CHECK: @llvm.global_ctors = appending global [0 x { i32, void ()*, i8* }] zeroinitializer
+; CHECK: @llvm.global_ctors = appending global [0 x { i32, ptr, ptr }] zeroinitializer
 ; CHECK: @foo = global i8 0, comdat

diff  --git a/llvm/test/Linker/ctors3.ll b/llvm/test/Linker/ctors3.ll
index d522df58e8918..052a3c963daf9 100644
--- a/llvm/test/Linker/ctors3.ll
+++ b/llvm/test/Linker/ctors3.ll
@@ -4,5 +4,5 @@ $foo = comdat any
 %t = type { i8 }
 @foo = global %t zeroinitializer, comdat
 
-; CHECK: @llvm.global_ctors = appending global [0 x { i32, void ()*, i8* }] zeroinitializer
+; CHECK: @llvm.global_ctors = appending global [0 x { i32, ptr, ptr }] zeroinitializer
 ; CHECK: @foo = global %t zeroinitializer, comdat

diff  --git a/llvm/test/Linker/funcimport.ll b/llvm/test/Linker/funcimport.ll
index 9238fc1900eee..3ca09c26cdab4 100644
--- a/llvm/test/Linker/funcimport.ll
+++ b/llvm/test/Linker/funcimport.ll
@@ -16,7 +16,7 @@
 ; EXPORTSTATIC-DAG: @staticvar.llvm.{{.*}} = hidden global
 ; Eventually @staticconstvar can be exported as a copy and not promoted
 ; EXPORTSTATIC-DAG: @staticconstvar.llvm.0 = hidden unnamed_addr constant
-; EXPORTSTATIC-DAG: @P.llvm.{{.*}} = hidden global void ()* null
+; EXPORTSTATIC-DAG: @P.llvm.{{.*}} = hidden global ptr null
 ; EXPORTSTATIC-DAG: define hidden i32 @staticfunc.llvm.
 ; EXPORTSTATIC-DAG: define hidden void @staticfunc2.llvm.
 
@@ -72,7 +72,7 @@
 ; IMPORTSTATIC-DAG: @staticconstvar.llvm.{{.*}} = external hidden unnamed_addr constant
 ; IMPORTSTATIC-DAG: define available_externally i32 @referencestatics
 ; IMPORTSTATIC-DAG: %call = call i32 @staticfunc.llvm.
-; IMPORTSTATIC-DAG: %0 = load i32, i32* @staticvar.llvm.
+; IMPORTSTATIC-DAG: %0 = load i32, ptr @staticvar.llvm.
 ; IMPORTSTATIC-DAG: declare hidden i32 @staticfunc.llvm.
 
 ; Ensure that imported global (external) function and variable references
@@ -90,9 +90,9 @@
 
 ; Ensure that imported static function pointer correctly promoted and renamed.
 ; RUN: llvm-link %t2.bc -summary-index=%t3.thinlto.bc -import=callfuncptr:%t.bc -S | FileCheck %s --check-prefix=IMPORTFUNCPTR
-; IMPORTFUNCPTR-DAG: @P.llvm.{{.*}} = external hidden global void ()*
+; IMPORTFUNCPTR-DAG: @P.llvm.{{.*}} = external hidden global ptr
 ; IMPORTFUNCPTR-DAG: define available_externally void @callfuncptr
-; IMPORTFUNCPTR-DAG: %0 = load void ()*, void ()** @P.llvm.
+; IMPORTFUNCPTR-DAG: %0 = load ptr, ptr @P.llvm.
 
 ; Ensure that imported weak function reference/definition handled properly.
 ; Imported weak_any definition should be skipped with warning, and imported
@@ -107,11 +107,11 @@
 @staticvar = internal global i32 1, align 4
 @staticconstvar = internal unnamed_addr constant [2 x i32] [i32 10, i32 20], align 4
 @commonvar = common global i32 0, align 4
- at P = internal global void ()* null, align 8
+ at P = internal global ptr null, align 8
 
- at weakalias = weak alias void (...), bitcast (void ()* @globalfunc1 to void (...)*)
- at analias = alias void (...), bitcast (void ()* @globalfunc2 to void (...)*)
- at linkoncealias = alias void (...), bitcast (void ()* @linkoncefunc to void (...)*)
+ at weakalias = weak alias void (...), ptr @globalfunc1
+ at analias = alias void (...), ptr @globalfunc2
+ at linkoncealias = alias void (...), ptr @linkoncefunc
 
 define void @globalfunc1() #0 {
 entry:
@@ -131,14 +131,14 @@ entry:
 define i32 @referencestatics(i32 %i) #0 {
 entry:
   %i.addr = alloca i32, align 4
-  store i32 %i, i32* %i.addr, align 4
+  store i32 %i, ptr %i.addr, align 4
   %call = call i32 @staticfunc()
-  %0 = load i32, i32* @staticvar, align 4
+  %0 = load i32, ptr @staticvar, align 4
   %add = add nsw i32 %call, %0
-  %1 = load i32, i32* %i.addr, align 4
+  %1 = load i32, ptr %i.addr, align 4
   %idxprom = sext i32 %1 to i64
-  %arrayidx = getelementptr inbounds [2 x i32], [2 x i32]* @staticconstvar, i64 0, i64 %idxprom
-  %2 = load i32, i32* %arrayidx, align 4
+  %arrayidx = getelementptr inbounds [2 x i32], ptr @staticconstvar, i64 0, i64 %idxprom
+  %2 = load i32, ptr %arrayidx, align 4
   %add1 = add nsw i32 %add, %2
   ret i32 %add1
 }
@@ -146,29 +146,29 @@ entry:
 define i32 @referenceglobals(i32 %i) #0 {
 entry:
   %i.addr = alloca i32, align 4
-  store i32 %i, i32* %i.addr, align 4
+  store i32 %i, ptr %i.addr, align 4
   call void @globalfunc1()
-  %0 = load i32, i32* @globalvar, align 4
+  %0 = load i32, ptr @globalvar, align 4
   ret i32 %0
 }
 
 define i32 @referencecommon(i32 %i) #0 {
 entry:
   %i.addr = alloca i32, align 4
-  store i32 %i, i32* %i.addr, align 4
-  %0 = load i32, i32* @commonvar, align 4
+  store i32 %i, ptr %i.addr, align 4
+  %0 = load i32, ptr @commonvar, align 4
   ret i32 %0
 }
 
 define void @setfuncptr() #0 {
 entry:
-  store void ()* @staticfunc2, void ()** @P, align 8
+  store ptr @staticfunc2, ptr @P, align 8
   ret void
 }
 
 define void @callfuncptr() #0 {
 entry:
-  %0 = load void ()*, void ()** @P, align 8
+  %0 = load ptr, ptr @P, align 8
   call void %0()
   ret void
 }

diff  --git a/llvm/test/Linker/ifunc.ll b/llvm/test/Linker/ifunc.ll
index aaf5836a137da..f22e4436cfd99 100644
--- a/llvm/test/Linker/ifunc.ll
+++ b/llvm/test/Linker/ifunc.ll
@@ -3,23 +3,23 @@
 
 ;; Check that ifuncs are linked in properly.
 
-; CHECK-DAG: @foo = ifunc void (), void ()* ()* @foo_resolve
-; CHECK-DAG: define internal void ()* @foo_resolve() {
+; CHECK-DAG: @foo = ifunc void (), ptr @foo_resolve
+; CHECK-DAG: define internal ptr @foo_resolve() {
 
-; CHECK-DAG: @bar = ifunc void (), void ()* ()* @bar_resolve
-; CHECK-DAG: define internal void ()* @bar_resolve() {
+; CHECK-DAG: @bar = ifunc void (), ptr @bar_resolve
+; CHECK-DAG: define internal ptr @bar_resolve() {
 
 ;--- a.ll
 declare void @bar()
 
 ;--- b.ll
- at foo = ifunc void (), void ()* ()* @foo_resolve
- at bar = ifunc void (), void ()* ()* @bar_resolve
+ at foo = ifunc void (), ptr @foo_resolve
+ at bar = ifunc void (), ptr @bar_resolve
 
-define internal void ()* @foo_resolve() {
-  ret void ()* null
+define internal ptr @foo_resolve() {
+  ret ptr null
 }
 
-define internal void ()* @bar_resolve() {
-  ret void ()* null
+define internal ptr @bar_resolve() {
+  ret ptr null
 }

diff  --git a/llvm/test/Linker/wrong-addrspace-gv-declaration.ll b/llvm/test/Linker/wrong-addrspace-gv-declaration.ll
index f7037a71d00c9..b7abca7c0af51 100644
--- a/llvm/test/Linker/wrong-addrspace-gv-declaration.ll
+++ b/llvm/test/Linker/wrong-addrspace-gv-declaration.ll
@@ -8,10 +8,10 @@
 @is_really_as1_gv_other_type = external global i32
 
 ; CHECK-LABEL: @foo(
-; CHECK: %load0 = load volatile i32, i32* addrspacecast (i32 addrspace(1)* @is_really_as1_gv to i32*), align 4
-; CHECK: %load1 = load volatile i32, i32* addrspacecast (i32 addrspace(1)* bitcast (float addrspace(1)* @is_really_as1_gv_other_type to i32 addrspace(1)*) to i32*), align 4
+; CHECK: %load0 = load volatile i32, ptr addrspacecast (ptr addrspace(1) @is_really_as1_gv to ptr), align 4
+; CHECK: %load1 = load volatile i32, ptr addrspacecast (ptr addrspace(1) @is_really_as1_gv_other_type to ptr), align 4
 define void @foo() {
-  %load0 = load volatile i32, i32* @is_really_as1_gv, align 4
-  %load1 = load volatile i32, i32* @is_really_as1_gv_other_type, align 4
+  %load0 = load volatile i32, ptr @is_really_as1_gv, align 4
+  %load1 = load volatile i32, ptr @is_really_as1_gv_other_type, align 4
   ret void
 }

diff  --git a/llvm/test/ThinLTO/X86/Inputs/import-ro-constant-bar.ll b/llvm/test/ThinLTO/X86/Inputs/import-ro-constant-bar.ll
index 636d4027a03b8..daecf7bd09ba4 100644
--- a/llvm/test/ThinLTO/X86/Inputs/import-ro-constant-bar.ll
+++ b/llvm/test/ThinLTO/X86/Inputs/import-ro-constant-bar.ll
@@ -5,6 +5,6 @@ target triple = "x86_64-unknown-linux-gnu"
 @foo = external dso_local local_unnamed_addr constant i32, align 4
 define dso_local i32 @_Z3barv() local_unnamed_addr {
 entry:
-  %0 = load i32, i32* @foo, align 4
+  %0 = load i32, ptr @foo, align 4
   ret i32 %0
 }

diff  --git a/llvm/test/ThinLTO/X86/Inputs/module_asm.ll b/llvm/test/ThinLTO/X86/Inputs/module_asm.ll
index f713310eaef8a..7a1dfa59e4c1e 100644
--- a/llvm/test/ThinLTO/X86/Inputs/module_asm.ll
+++ b/llvm/test/ThinLTO/X86/Inputs/module_asm.ll
@@ -1,7 +1,7 @@
 target datalayout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128"
 target triple = "x86_64-unknown-linux-gnu"
 
-define i32 @main({ i64, { i64, i8* }* } %unnamed) #0 {
+define i32 @main({ i64, ptr } %unnamed) #0 {
   %1 = call i32 @_simplefunction() #1
   ret i32 %1
 }

diff  --git a/llvm/test/ThinLTO/X86/autoupgrade.ll b/llvm/test/ThinLTO/X86/autoupgrade.ll
index 3d31dd726a12c..0550947f63535 100644
--- a/llvm/test/ThinLTO/X86/autoupgrade.ll
+++ b/llvm/test/ThinLTO/X86/autoupgrade.ll
@@ -10,7 +10,7 @@
 ; RUN:     | llvm-bcanalyzer -dump | FileCheck %s
 
 ; CHECK: <STRTAB_BLOCK
-; CHECK-NEXT: blob data = 'mainglobalfunc1llvm.invariant.start.p0i8{{.*}}'
+; CHECK-NEXT: blob data = 'mainglobalfunc1llvm.invariant.start.p0{{.*}}'
 
 ; Check that the summary is able to print the names despite the lack of
 ; string table in the legacy bitcode.

diff  --git a/llvm/test/ThinLTO/X86/deadstrip.ll b/llvm/test/ThinLTO/X86/deadstrip.ll
index 240ac51592864..4e97cc3317fb1 100644
--- a/llvm/test/ThinLTO/X86/deadstrip.ll
+++ b/llvm/test/ThinLTO/X86/deadstrip.ll
@@ -139,7 +139,7 @@ target datalayout = "e-m:o-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16
 target triple = "x86_64-apple-macosx10.11.0"
 
 
- at llvm.global_ctors = appending global [1 x { i32, void ()*, i8* }] [{ i32, void ()*, i8* } { i32 65535, void ()* @_GLOBAL__I_a, i8* null }]
+ at llvm.global_ctors = appending global [1 x { i32, ptr, ptr }] [{ i32, ptr, ptr } { i32 65535, ptr @_GLOBAL__I_a, ptr null }]
 
 declare void @baz()
 
@@ -183,7 +183,7 @@ define available_externally void @live_available_externally_func() {
 ; alive.
 ; We want to make sure the @linkonceodrfuncwithalias copy in Input/deadstrip.ll
 ; is also scanned when computing reachability.
- at linkonceodralias = linkonce_odr alias void (), void ()* @linkonceodrfuncwithalias
+ at linkonceodralias = linkonce_odr alias void (), ptr @linkonceodrfuncwithalias
 
 define linkonce_odr void @linkonceodrfuncwithalias() {
 entry:

diff  --git a/llvm/test/ThinLTO/X86/dot-dumper2.ll b/llvm/test/ThinLTO/X86/dot-dumper2.ll
index d13a9878d8318..51f6c76ea5d70 100644
--- a/llvm/test/ThinLTO/X86/dot-dumper2.ll
+++ b/llvm/test/ThinLTO/X86/dot-dumper2.ll
@@ -38,6 +38,6 @@ target triple = "x86_64-unknown-linux-gnu"
 
 ; Function Attrs: nounwind uwtable
 define i32 @main() local_unnamed_addr {
-  store i32 42, i32* @A, align 4
+  store i32 42, ptr @A, align 4
   ret i32 0
 }

diff  --git a/llvm/test/ThinLTO/X86/funcimport-debug.ll b/llvm/test/ThinLTO/X86/funcimport-debug.ll
index 4e03813b6cf89..574346d438fb7 100644
--- a/llvm/test/ThinLTO/X86/funcimport-debug.ll
+++ b/llvm/test/ThinLTO/X86/funcimport-debug.ll
@@ -27,7 +27,7 @@ target triple = "x86_64-apple-macosx10.11.0"
 define i32 @main() #0 {
 entry:
   call void (...) @foo()
-  %0 = load i32, i32* @baz, align 4
+  %0 = load i32, ptr @baz, align 4
   ret i32 %0
 }
 

diff  --git a/llvm/test/ThinLTO/X86/globals-import-const-fold.ll b/llvm/test/ThinLTO/X86/globals-import-const-fold.ll
index 663b4ee7ebb44..ec94719abfa1f 100644
--- a/llvm/test/ThinLTO/X86/globals-import-const-fold.ll
+++ b/llvm/test/ThinLTO/X86/globals-import-const-fold.ll
@@ -18,6 +18,6 @@ target triple = "x86_64-pc-linux-gnu"
 @baz = external local_unnamed_addr constant i32, align 4
 
 define i32 @main() local_unnamed_addr {
-  %1 = load i32, i32* @baz, align 4
+  %1 = load i32, ptr @baz, align 4
   ret i32 %1
 }

diff  --git a/llvm/test/ThinLTO/X86/import-dsolocal.ll b/llvm/test/ThinLTO/X86/import-dsolocal.ll
index aea53cfcd0743..f4ff76654fac1 100644
--- a/llvm/test/ThinLTO/X86/import-dsolocal.ll
+++ b/llvm/test/ThinLTO/X86/import-dsolocal.ll
@@ -61,64 +61,64 @@ target datalayout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16
 target triple = "x86_64-unknown-linux-gnu"
 
 @a = dso_local global i32 42, align 4
- at b = dso_local global i32* @a, align 8
+ at b = dso_local global ptr @a, align 8
 
 define dso_local void @extern() {
-  call i32 @extern_aux(i32* @a, i32** @b)
+  call i32 @extern_aux(ptr @a, ptr @b)
   ret void
 }
 
-define dso_local i32 @extern_aux(i32* %a, i32** %b) {
-  %p = load i32*, i32** %b, align 8
-  store i32 33, i32* %p, align 4
-  %v = load i32, i32* %a, align 4
+define dso_local i32 @extern_aux(ptr %a, ptr %b) {
+  %p = load ptr, ptr %b, align 8
+  store i32 33, ptr %p, align 4
+  %v = load i32, ptr %a, align 4
   ret i32 %v
 }
 
 define linkonce dso_local void @linkonce() {
-  call i32 @linkonce_aux(i32* @a, i32** @b)
+  call i32 @linkonce_aux(ptr @a, ptr @b)
   ret void
 }
 
-define linkonce i32 @linkonce_aux(i32* %a, i32** %b) {
-  %p = load i32*, i32** %b, align 8
-  store i32 33, i32* %p, align 4
-  %v = load i32, i32* %a, align 4
+define linkonce i32 @linkonce_aux(ptr %a, ptr %b) {
+  %p = load ptr, ptr %b, align 8
+  store i32 33, ptr %p, align 4
+  %v = load i32, ptr %a, align 4
   ret i32 %v
 }
 
 define linkonce_odr dso_local void @linkonceodr() {
-  call i32 @linkonceodr_aux(i32* @a, i32** @b)
+  call i32 @linkonceodr_aux(ptr @a, ptr @b)
   ret void
 }
 
-define linkonce_odr i32 @linkonceodr_aux(i32* %a, i32** %b) {
-  %p = load i32*, i32** %b, align 8
-  store i32 33, i32* %p, align 4
-  %v = load i32, i32* %a, align 4
+define linkonce_odr i32 @linkonceodr_aux(ptr %a, ptr %b) {
+  %p = load ptr, ptr %b, align 8
+  store i32 33, ptr %p, align 4
+  %v = load i32, ptr %a, align 4
   ret i32 %v
 }
 
 define weak dso_local void @weak() {
-  call i32 @weak_aux(i32* @a, i32** @b)
+  call i32 @weak_aux(ptr @a, ptr @b)
   ret void
 }
 
-define weak i32 @weak_aux(i32* %a, i32** %b) {
-  %p = load i32*, i32** %b, align 8
-  store i32 33, i32* %p, align 4
-  %v = load i32, i32* %a, align 4
+define weak i32 @weak_aux(ptr %a, ptr %b) {
+  %p = load ptr, ptr %b, align 8
+  store i32 33, ptr %p, align 4
+  %v = load i32, ptr %a, align 4
   ret i32 %v
 }
 
 define weak_odr dso_local void @weakodr() {
-  call i32 @weakodr_aux(i32* @a, i32** @b)
+  call i32 @weakodr_aux(ptr @a, ptr @b)
   ret void
 }
 
-define weak_odr i32 @weakodr_aux(i32* %a, i32** %b) {
-  %p = load i32*, i32** %b, align 8
-  store i32 33, i32* %p, align 4
-  %v = load i32, i32* %a, align 4
+define weak_odr i32 @weakodr_aux(ptr %a, ptr %b) {
+  %p = load ptr, ptr %b, align 8
+  store i32 33, ptr %p, align 4
+  %v = load i32, ptr %a, align 4
   ret i32 %v
 }

diff  --git a/llvm/test/ThinLTO/X86/import-ro-constant.ll b/llvm/test/ThinLTO/X86/import-ro-constant.ll
index a3af22f1b34fa..604817b67dea2 100644
--- a/llvm/test/ThinLTO/X86/import-ro-constant.ll
+++ b/llvm/test/ThinLTO/X86/import-ro-constant.ll
@@ -26,7 +26,7 @@ target triple = "x86_64-unknown-linux-gnu"
 @foo = external dso_local local_unnamed_addr constant i32, align 4
 define dso_local i32 @main() local_unnamed_addr {
 entry:
-  %0 = load i32, i32* @foo, align 4
+  %0 = load i32, ptr @foo, align 4
   %call = tail call i32 @_Z3barv()
   %add = add nsw i32 %call, %0
   ret i32 %add

diff  --git a/llvm/test/ThinLTO/X86/index-const-prop-comdat.ll b/llvm/test/ThinLTO/X86/index-const-prop-comdat.ll
index d90bcbacba96e..fe0b87377b5ac 100644
--- a/llvm/test/ThinLTO/X86/index-const-prop-comdat.ll
+++ b/llvm/test/ThinLTO/X86/index-const-prop-comdat.ll
@@ -12,6 +12,6 @@ target triple = "x86_64-unknown-linux-gnu"
 @g = external global i32
 
 define i32 @main() {
-  %v = load i32, i32* @g
+  %v = load i32, ptr @g
   ret i32 %v
 }

diff  --git a/llvm/test/ThinLTO/X86/index-const-prop-full-lto.ll b/llvm/test/ThinLTO/X86/index-const-prop-full-lto.ll
index 0b9412a576f61..ccd9f33a0c310 100644
--- a/llvm/test/ThinLTO/X86/index-const-prop-full-lto.ll
+++ b/llvm/test/ThinLTO/X86/index-const-prop-full-lto.ll
@@ -18,7 +18,7 @@ declare i32 @foo()
 
 define i32 @main() {
   %v = call i32 @foo()
-  %v2 = load i32, i32* @g
+  %v2 = load i32, ptr @g
   %v3 = add i32 %v, %v2
   ret i32 %v3
 }

diff  --git a/llvm/test/ThinLTO/X86/index-const-prop-ldst.ll b/llvm/test/ThinLTO/X86/index-const-prop-ldst.ll
index 7c0f8229a7d09..ae68275d1f9f6 100644
--- a/llvm/test/ThinLTO/X86/index-const-prop-ldst.ll
+++ b/llvm/test/ThinLTO/X86/index-const-prop-ldst.ll
@@ -13,9 +13,9 @@ target triple = "x86_64-unknown-linux-gnu"
 @g = external global i32
 
 define i32 @main() {
-  %v = load i32, i32* @g
+  %v = load i32, ptr @g
   %q = add i32 %v, 1
-  store i32 %q, i32* @g
+  store i32 %q, ptr @g
   
   ret i32 %v
 }

diff  --git a/llvm/test/ThinLTO/X86/personality-local.ll b/llvm/test/ThinLTO/X86/personality-local.ll
index af88d2922a4fb..56878962b4d4d 100644
--- a/llvm/test/ThinLTO/X86/personality-local.ll
+++ b/llvm/test/ThinLTO/X86/personality-local.ll
@@ -23,11 +23,11 @@ target triple = "x86_64-pc-linux-gnu"
 
 declare void @foo()
 
-define void @bar() personality i32 (i32, i32, i64, i8*, i8*)* @personality_routine {
+define void @bar() personality ptr @personality_routine {
  ret void
 }
 
-define internal i32 @personality_routine(i32, i32, i64, i8*, i8*) {
+define internal i32 @personality_routine(i32, i32, i64, ptr, ptr) {
   call void @foo()
   ret i32 0
 }

diff  --git a/llvm/test/ThinLTO/X86/referenced_by_constant.ll b/llvm/test/ThinLTO/X86/referenced_by_constant.ll
index e097cd5161f0f..a336f6d13e65c 100644
--- a/llvm/test/ThinLTO/X86/referenced_by_constant.ll
+++ b/llvm/test/ThinLTO/X86/referenced_by_constant.ll
@@ -8,8 +8,8 @@
 ; can make a local copy of someglobal and someglobal2 because they are both
 ; 'unnamed_addr' constants. This should eventually be done as well.
 ; RUN: llvm-lto -thinlto-action=import -import-constants-with-refs %t.bc -thinlto-index=%t3.bc -o - | llvm-dis -o -   | FileCheck %s --check-prefix=IMPORT
-; IMPORT: @someglobal.llvm.0 = available_externally hidden unnamed_addr constant i8* bitcast (void ()* @referencedbyglobal to i8*)
-; IMPORT: @someglobal2.llvm.0 = available_externally hidden unnamed_addr constant i8* bitcast (void ()* @localreferencedbyglobal.llvm.0 to i8*)
+; IMPORT: @someglobal.llvm.0 = available_externally hidden unnamed_addr constant ptr @referencedbyglobal
+; IMPORT: @someglobal2.llvm.0 = available_externally hidden unnamed_addr constant ptr @localreferencedbyglobal.llvm.0
 ; IMPORT: define available_externally void @bar()
 
 ; Check the export side: we currently only export bar(), which causes

diff  --git a/llvm/test/ThinLTO/X86/weak_globals_import.ll b/llvm/test/ThinLTO/X86/weak_globals_import.ll
index 90f2f831e6ccc..39d5f76ce8b71 100644
--- a/llvm/test/ThinLTO/X86/weak_globals_import.ll
+++ b/llvm/test/ThinLTO/X86/weak_globals_import.ll
@@ -22,7 +22,7 @@ target triple = "x86_64-unknown-linux-gnu"
 @G = weak dso_local local_unnamed_addr global i32 0, align 4
 
 define dso_local i32 @main() local_unnamed_addr {
-  %1 = load i32, i32* @G, align 4
+  %1 = load i32, ptr @G, align 4
   ret i32 %1
 }
 

diff  --git a/llvm/test/Transforms/BlockExtractor/extract-blocks-with-groups.ll b/llvm/test/Transforms/BlockExtractor/extract-blocks-with-groups.ll
index d23c3f40d7c05..1439074664d17 100644
--- a/llvm/test/Transforms/BlockExtractor/extract-blocks-with-groups.ll
+++ b/llvm/test/Transforms/BlockExtractor/extract-blocks-with-groups.ll
@@ -14,8 +14,8 @@
 ;
 ; The if-then-else blocks should be in just one function.
 ; CHECK: [[FOO_DIAMOND_LABEL]]:
-; CHECK: call void [[FOO_DIAMOND:@[^(]*]](i32 %arg1, i32 %arg, i32* [[RES_VAL_ADDR]])
-; CHECK-NEXT: [[RES_VAL:%[^ ]*]] = load i32, i32* [[RES_VAL_ADDR]]
+; CHECK: call void [[FOO_DIAMOND:@[^(]*]](i32 %arg1, i32 %arg, ptr [[RES_VAL_ADDR]])
+; CHECK-NEXT: [[RES_VAL:%[^ ]*]] = load i32, ptr [[RES_VAL_ADDR]]
 ; Then it should directly jump to end.
 ; CHECK: br label %[[FOO_END_LABEL:.*$]]
 ;
@@ -61,7 +61,7 @@ ret1:
 ; CHECK: br i1 %or.cond, label %bb9, label %[[BAR_DIAMOND_LABEL:.*$]]
 ;
 ; CHECK: [[BAR_DIAMOND_LABEL]]:
-; CHECK: [[CMP:%[^ ]*]] = call i1 [[BAR_DIAMOND:@[^(]*]](i32 %arg1, i32 %arg, i32*
+; CHECK: [[CMP:%[^ ]*]] = call i1 [[BAR_DIAMOND:@[^(]*]](i32 %arg1, i32 %arg, ptr
 ; CHECK: br i1 [[CMP]], label %bb26, label %bb30
 define i32 @bar(i32 %arg, i32 %arg1) {
 bb:

diff  --git a/llvm/test/Transforms/CodeExtractor/PartialInlineAttributes.ll b/llvm/test/Transforms/CodeExtractor/PartialInlineAttributes.ll
index 3ce0520fc456d..ef9779c5f4735 100644
--- a/llvm/test/Transforms/CodeExtractor/PartialInlineAttributes.ll
+++ b/llvm/test/Transforms/CodeExtractor/PartialInlineAttributes.ll
@@ -66,8 +66,8 @@ entry:
   ret i32 %c3
 }
 
-; CHECK: define internal void @callee_writeonly.1.if.then(i32 %v, i32* %sub.out) [[FN_ATTRS0:#[0-9]+]]
-; CHECK: define internal void @callee_most.2.if.then(i32 %v, i32* %sub.out)  [[FN_ATTRS:#[0-9]+]]
+; CHECK: define internal void @callee_writeonly.1.if.then(i32 %v, ptr %sub.out) [[FN_ATTRS0:#[0-9]+]]
+; CHECK: define internal void @callee_most.2.if.then(i32 %v, ptr %sub.out)  [[FN_ATTRS:#[0-9]+]]
 
 ; attributes to preserve
 attributes #0 = {

diff  --git a/llvm/test/Transforms/CodeExtractor/PartialInlineDebug.ll b/llvm/test/Transforms/CodeExtractor/PartialInlineDebug.ll
index c68a070a41dd6..cbcb0afca5ecd 100644
--- a/llvm/test/Transforms/CodeExtractor/PartialInlineDebug.ll
+++ b/llvm/test/Transforms/CodeExtractor/PartialInlineDebug.ll
@@ -24,7 +24,7 @@ if.end:                                           ; preds = %if.then, %entry
 ; CHECK-LABEL: @caller
 ; CHECK: codeRepl.i:
 ; CHECK-NOT: br label
-; CHECK: call void @callee.2.if.then(i32 %v, i32* %mul.loc.i), !dbg ![[DBG2:[0-9]+]]
+; CHECK: call void @callee.2.if.then(i32 %v, ptr %mul.loc.i), !dbg ![[DBG2:[0-9]+]]
 define i32 @caller(i32 %v) !dbg !8 {
 entry:
   %call = call i32 @callee(i32 %v), !dbg !14
@@ -55,7 +55,7 @@ if.end:
 ; CHECK-LABEL: @caller2
 ; CHECK: codeRepl.i:
 ; CHECK-NOT: br label
-; CHECK: call void @callee2.1.if.then(i32 %v, i32* %sub.loc.i), !dbg ![[DBG4:[0-9]+]]
+; CHECK: call void @callee2.1.if.then(i32 %v, ptr %sub.loc.i), !dbg ![[DBG4:[0-9]+]]
 define i32 @caller2(i32 %v) !dbg !21 {
 entry:
   %call = call i32 @callee2(i32 %v), !dbg !22

diff  --git a/llvm/test/Transforms/CodeExtractor/PartialInlineVarArgsDebug.ll b/llvm/test/Transforms/CodeExtractor/PartialInlineVarArgsDebug.ll
index d19de842b61ee..151efacae86a4 100644
--- a/llvm/test/Transforms/CodeExtractor/PartialInlineVarArgsDebug.ll
+++ b/llvm/test/Transforms/CodeExtractor/PartialInlineVarArgsDebug.ll
@@ -20,7 +20,7 @@ if.end:                                           ; preds = %if.then, %entry
 ; CHECK-LABEL: @caller
 ; CHECK: codeRepl.i:
 ; CHECK-NOT: br label
-; CHECK: call void (i32, i32*, ...) @callee.1.if.then(i32 %v, i32* %mul.loc.i, i32 99), !dbg ![[DBG2:[0-9]+]]
+; CHECK: call void (i32, ptr, ...) @callee.1.if.then(i32 %v, ptr %mul.loc.i, i32 99), !dbg ![[DBG2:[0-9]+]]
 define i32 @caller(i32 %v) !dbg !8 {
 entry:
   %call = call i32 (i32, ...) @callee(i32 %v, i32 99), !dbg !14

diff  --git a/llvm/test/Transforms/FunctionImport/Inputs/funcimport.ll b/llvm/test/Transforms/FunctionImport/Inputs/funcimport.ll
index c66f8c47365ba..07a7f99e4626a 100644
--- a/llvm/test/Transforms/FunctionImport/Inputs/funcimport.ll
+++ b/llvm/test/Transforms/FunctionImport/Inputs/funcimport.ll
@@ -5,11 +5,11 @@ target triple = "x86_64-apple-macosx10.11.0"
 @staticvar = internal global i32 1, align 4
 @staticconstvar = internal unnamed_addr constant [2 x i32] [i32 10, i32 20], align 4
 @commonvar = common global i32 0, align 4
- at P = internal global void ()* null, align 8
+ at P = internal global ptr null, align 8
 
- at weakalias = weak alias void (...), bitcast (void ()* @globalfunc1 to void (...)*)
- at analias = alias void (...), bitcast (void ()* @globalfunc2 to void (...)*)
- at linkoncealias = alias void (...), bitcast (void ()* @linkoncefunc to void (...)*)
+ at weakalias = weak alias void (...), ptr @globalfunc1
+ at analias = alias void (...), ptr @globalfunc2
+ at linkoncealias = alias void (...), ptr @linkoncefunc
 
 define void @globalfunc1() #0 {
 entry:
@@ -31,14 +31,14 @@ entry:
 define i32 @referencestatics(i32 %i) #0 {
 entry:
   %i.addr = alloca i32, align 4
-  store i32 %i, i32* %i.addr, align 4
+  store i32 %i, ptr %i.addr, align 4
   %call = call i32 @staticfunc()
-  %0 = load i32, i32* @staticvar, align 4
+  %0 = load i32, ptr @staticvar, align 4
   %add = add nsw i32 %call, %0
-  %1 = load i32, i32* %i.addr, align 4
+  %1 = load i32, ptr %i.addr, align 4
   %idxprom = sext i32 %1 to i64
-  %arrayidx = getelementptr inbounds [2 x i32], [2 x i32]* @staticconstvar, i64 0, i64 %idxprom
-  %2 = load i32, i32* %arrayidx, align 4
+  %arrayidx = getelementptr inbounds [2 x i32], ptr @staticconstvar, i64 0, i64 %idxprom
+  %2 = load i32, ptr %arrayidx, align 4
   %add1 = add nsw i32 %add, %2
   ret i32 %add1
 }
@@ -46,29 +46,29 @@ entry:
 define i32 @referenceglobals(i32 %i) #0 {
 entry:
   %i.addr = alloca i32, align 4
-  store i32 %i, i32* %i.addr, align 4
+  store i32 %i, ptr %i.addr, align 4
   call void @globalfunc1()
-  %0 = load i32, i32* @globalvar, align 4
+  %0 = load i32, ptr @globalvar, align 4
   ret i32 %0
 }
 
 define i32 @referencecommon(i32 %i) #0 {
 entry:
   %i.addr = alloca i32, align 4
-  store i32 %i, i32* %i.addr, align 4
-  %0 = load i32, i32* @commonvar, align 4
+  store i32 %i, ptr %i.addr, align 4
+  %0 = load i32, ptr @commonvar, align 4
   ret i32 %0
 }
 
 define void @setfuncptr() #0 {
 entry:
-  store void ()* @staticfunc2, void ()** @P, align 8
+  store ptr @staticfunc2, ptr @P, align 8
   ret void
 }
 
 define void @callfuncptr() #0 {
 entry:
-  %0 = load void ()*, void ()** @P, align 8
+  %0 = load ptr, ptr @P, align 8
   call void %0()
   ret void
 }
@@ -92,7 +92,7 @@ entry:
 declare i32 @__gxx_personality_v0(...)
 
 ; Add enough instructions to prevent import with inst limit of 5
-define internal void @funcwithpersonality() #2 personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*) {
+define internal void @funcwithpersonality() #2 personality ptr @__gxx_personality_v0 {
 entry:
   call void @globalfunc2()
   call void @globalfunc2()
@@ -159,10 +159,9 @@ define void @variadic_no_va_start(...) {
 ; Variadic function with va_start should not be imported because inliner
 ; doesn't handle it.
 define void @variadic_va_start(...) {
-    %ap = alloca i8*, align 8
-    %ap.0 = bitcast i8** %ap to i8*
-    call void @llvm.va_start(i8* %ap.0)
+    %ap = alloca ptr, align 8
+    call void @llvm.va_start(ptr %ap)
     ret void
 }
 
-declare void @llvm.va_start(i8*) nounwind
+declare void @llvm.va_start(ptr) nounwind

diff  --git a/llvm/test/Transforms/FunctionImport/funcimport.ll b/llvm/test/Transforms/FunctionImport/funcimport.ll
index 02e355ce1eb4c..610a5ba7eb365 100644
--- a/llvm/test/Transforms/FunctionImport/funcimport.ll
+++ b/llvm/test/Transforms/FunctionImport/funcimport.ll
@@ -91,9 +91,9 @@ declare void @callfuncptr(...) #1
 
 ; Ensure that all uses of local variable @P which has used in setfuncptr
 ; and callfuncptr are to the same promoted/renamed global.
-; CHECK-DAG: @P.llvm.{{.*}} = available_externally hidden global void ()* null
-; CHECK-DAG: %0 = load void ()*, void ()** @P.llvm.
-; CHECK-DAG: store void ()* @staticfunc2.llvm.{{.*}}, void ()** @P.llvm.
+; CHECK-DAG: @P.llvm.{{.*}} = available_externally hidden global ptr null
+; CHECK-DAG: %0 = load ptr, ptr @P.llvm.
+; CHECK-DAG: store ptr @staticfunc2.llvm.{{.*}}, ptr @P.llvm.
 
 ; Ensure that @referencelargelinkonce definition is pulled in, but later we
 ; also check that the linkonceodr function is not.
@@ -110,7 +110,7 @@ declare void @weakfunc(...) #1
 declare void @linkoncefunc2(...) #1
 
 ; INSTLIMDEF-DAG: Import funcwithpersonality
-; INSTLIMDEF-DAG: define available_externally hidden void @funcwithpersonality.llvm.{{.*}}() personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*) !thinlto_src_module !0 {
+; INSTLIMDEF-DAG: define available_externally hidden void @funcwithpersonality.llvm.{{.*}}() personality ptr @__gxx_personality_v0 !thinlto_src_module !0 {
 ; INSTLIM5-DAG: declare hidden void @funcwithpersonality.llvm.{{.*}}()
 
 ; We can import variadic functions without a va_start, since the inliner

diff  --git a/llvm/test/Transforms/FunctionImport/funcimport_alias.ll b/llvm/test/Transforms/FunctionImport/funcimport_alias.ll
index 7868e08d32fd3..b8ce66c41d8c5 100644
--- a/llvm/test/Transforms/FunctionImport/funcimport_alias.ll
+++ b/llvm/test/Transforms/FunctionImport/funcimport_alias.ll
@@ -14,7 +14,7 @@ entry:
   ret i32 0
 }
 
- at analias = alias void (), void ()* @globalfunc
+ at analias = alias void (), ptr @globalfunc
 
 define void @globalfunc() #0 {
 entry:

diff  --git a/llvm/test/Transforms/GCOVProfiling/atomic-counter.ll b/llvm/test/Transforms/GCOVProfiling/atomic-counter.ll
index c6eb86ef92ce2..f5c5978edf5b9 100644
--- a/llvm/test/Transforms/GCOVProfiling/atomic-counter.ll
+++ b/llvm/test/Transforms/GCOVProfiling/atomic-counter.ll
@@ -4,7 +4,7 @@
 
 ; CHECK-LABEL: void @empty()
 ; CHECK-NEXT:  entry:
-; CHECK-NEXT:    %0 = atomicrmw add i64* getelementptr inbounds ([1 x i64], [1 x i64]* @__llvm_gcov_ctr, i64 0, i64 0), i64 1 monotonic, align 8, !dbg [[DBG:![0-9]+]]
+; CHECK-NEXT:    %0 = atomicrmw add ptr @__llvm_gcov_ctr, i64 1 monotonic, align 8, !dbg [[DBG:![0-9]+]]
 ; CHECK-NEXT:    ret void, !dbg [[DBG]]
 
 define dso_local void @empty() !dbg !5 {

diff  --git a/llvm/test/Transforms/GCOVProfiling/function-numbering.ll b/llvm/test/Transforms/GCOVProfiling/function-numbering.ll
index 615a432b6b445..f59f88562ac51 100644
--- a/llvm/test/Transforms/GCOVProfiling/function-numbering.ll
+++ b/llvm/test/Transforms/GCOVProfiling/function-numbering.ll
@@ -22,43 +22,43 @@ target triple = "x86_64-apple-macosx10.10.0"
 ;
 ; GCDA:       [[FILE_LOOP_HEADER]]:
 ; GCDA-NEXT:    %[[IV:.*]] = phi i32 [ 0, %entry ], [ %[[NEXT_IV:.*]], %[[FILE_LOOP_LATCH:.*]] ]
-; GCDA-NEXT:    %[[FILE_INFO:.*]] = getelementptr inbounds {{.*}}, {{.*}}* @__llvm_internal_gcov_emit_file_info, i32 0, i32 %[[IV]]
-; GCDA-NEXT:    %[[START_FILE_ARGS:.*]] = getelementptr inbounds {{.*}}, {{.*}}* %[[FILE_INFO]], i32 0, i32 0
-; GCDA-NEXT:    %[[START_FILE_ARG_0_PTR:.*]] = getelementptr inbounds {{.*}}, {{.*}}* %[[START_FILE_ARGS]], i32 0, i32 0
-; GCDA-NEXT:    %[[START_FILE_ARG_0:.*]] = load i8*, i8** %[[START_FILE_ARG_0_PTR]]
-; GCDA-NEXT:    %[[START_FILE_ARG_1_PTR:.*]] = getelementptr inbounds {{.*}}, {{.*}}* %[[START_FILE_ARGS]], i32 0, i32 1
-; GCDA-NEXT:    %[[START_FILE_ARG_1:.*]] = load i32, i32* %[[START_FILE_ARG_1_PTR]]
-; GCDA-NEXT:    %[[START_FILE_ARG_2_PTR:.*]] = getelementptr inbounds {{.*}}, {{.*}}* %[[START_FILE_ARGS]], i32 0, i32 2
-; GCDA-NEXT:    %[[START_FILE_ARG_2:.*]] = load i32, i32* %[[START_FILE_ARG_2_PTR]]
-; GCDA-NEXT:    call void @llvm_gcda_start_file(i8* %[[START_FILE_ARG_0]], i32 %[[START_FILE_ARG_1]], i32 %[[START_FILE_ARG_2]])
-; GCDA-NEXT:    %[[NUM_COUNTERS_PTR:.*]] = getelementptr inbounds {{.*}}, {{.*}}* %[[FILE_INFO]], i32 0, i32 1
-; GCDA-NEXT:    %[[NUM_COUNTERS:.*]] = load i32, i32* %[[NUM_COUNTERS_PTR]]
-; GCDA-NEXT:    %[[EMIT_FUN_ARGS_ARRAY_PTR:.*]] = getelementptr inbounds {{.*}}, {{.*}}* %[[FILE_INFO]], i32 0, i32 2
-; GCDA-NEXT:    %[[EMIT_FUN_ARGS_ARRAY:.*]] = load {{.*}}*, {{.*}}** %[[EMIT_FUN_ARGS_ARRAY_PTR]]
-; GCDA-NEXT:    %[[EMIT_ARCS_ARGS_ARRAY_PTR:.*]] = getelementptr inbounds {{.*}}, {{.*}}* %[[FILE_INFO]], i32 0, i32 3
-; GCDA-NEXT:    %[[EMIT_ARCS_ARGS_ARRAY:.*]] = load {{.*}}*, {{.*}}** %[[EMIT_ARCS_ARGS_ARRAY_PTR]]
+; GCDA-NEXT:    %[[FILE_INFO:.*]] = getelementptr inbounds {{.*}}, ptr @__llvm_internal_gcov_emit_file_info, i32 0, i32 %[[IV]]
+; GCDA-NEXT:    %[[START_FILE_ARGS:.*]] = getelementptr inbounds {{.*}}, ptr %[[FILE_INFO]], i32 0, i32 0
+; GCDA-NEXT:    %[[START_FILE_ARG_0_PTR:.*]] = getelementptr inbounds {{.*}}, ptr %[[START_FILE_ARGS]], i32 0, i32 0
+; GCDA-NEXT:    %[[START_FILE_ARG_0:.*]] = load ptr, ptr %[[START_FILE_ARG_0_PTR]]
+; GCDA-NEXT:    %[[START_FILE_ARG_1_PTR:.*]] = getelementptr inbounds {{.*}}, ptr %[[START_FILE_ARGS]], i32 0, i32 1
+; GCDA-NEXT:    %[[START_FILE_ARG_1:.*]] = load i32, ptr %[[START_FILE_ARG_1_PTR]]
+; GCDA-NEXT:    %[[START_FILE_ARG_2_PTR:.*]] = getelementptr inbounds {{.*}}, ptr %[[START_FILE_ARGS]], i32 0, i32 2
+; GCDA-NEXT:    %[[START_FILE_ARG_2:.*]] = load i32, ptr %[[START_FILE_ARG_2_PTR]]
+; GCDA-NEXT:    call void @llvm_gcda_start_file(ptr %[[START_FILE_ARG_0]], i32 %[[START_FILE_ARG_1]], i32 %[[START_FILE_ARG_2]])
+; GCDA-NEXT:    %[[NUM_COUNTERS_PTR:.*]] = getelementptr inbounds {{.*}}, ptr %[[FILE_INFO]], i32 0, i32 1
+; GCDA-NEXT:    %[[NUM_COUNTERS:.*]] = load i32, ptr %[[NUM_COUNTERS_PTR]]
+; GCDA-NEXT:    %[[EMIT_FUN_ARGS_ARRAY_PTR:.*]] = getelementptr inbounds {{.*}}, ptr %[[FILE_INFO]], i32 0, i32 2
+; GCDA-NEXT:    %[[EMIT_FUN_ARGS_ARRAY:.*]] = load ptr, ptr %[[EMIT_FUN_ARGS_ARRAY_PTR]]
+; GCDA-NEXT:    %[[EMIT_ARCS_ARGS_ARRAY_PTR:.*]] = getelementptr inbounds {{.*}}, ptr %[[FILE_INFO]], i32 0, i32 3
+; GCDA-NEXT:    %[[EMIT_ARCS_ARGS_ARRAY:.*]] = load ptr, ptr %[[EMIT_ARCS_ARGS_ARRAY_PTR]]
 ; GCDA-NEXT:    %[[ENTER_COUNTER_LOOP_COND:.*]] = icmp slt i32 0, %[[NUM_COUNTERS]]
 ; GCDA-NEXT:    br i1 %[[ENTER_COUNTER_LOOP_COND]], label %[[COUNTER_LOOP:.*]], label %[[FILE_LOOP_LATCH]]
 ;
 ; GCDA:       [[COUNTER_LOOP]]:
 ; GCDA-NEXT:    %[[JV:.*]] = phi i32 [ 0, %[[FILE_LOOP_HEADER]] ], [ %[[NEXT_JV:.*]], %[[COUNTER_LOOP]] ]
-; GCDA-NEXT:    %[[EMIT_FUN_ARGS:.*]] = getelementptr inbounds {{.*}}, {{.*}}* %[[EMIT_FUN_ARGS_ARRAY]], i32 %[[JV]]
-; GCDA-NEXT:    %[[EMIT_FUN_ARG_0_PTR:.*]] = getelementptr inbounds {{.*}}, {{.*}}* %[[EMIT_FUN_ARGS]], i32 0, i32 0
-; GCDA-NEXT:    %[[EMIT_FUN_ARG_0:.*]] = load i32, i32* %[[EMIT_FUN_ARG_0_PTR]]
-; GCDA-NEXT:    %[[EMIT_FUN_ARG_1_PTR:.*]] = getelementptr inbounds {{.*}}, {{.*}}* %[[EMIT_FUN_ARGS]], i32 0, i32 1
-; GCDA-NEXT:    %[[EMIT_FUN_ARG_1:.*]] = load i32, i32* %[[EMIT_FUN_ARG_1_PTR]]
-; GCDA-NEXT:    %[[EMIT_FUN_ARG_2_PTR:.*]] = getelementptr inbounds {{.*}}, {{.*}}* %[[EMIT_FUN_ARGS]], i32 0, i32 2
-; GCDA-NEXT:    %[[EMIT_FUN_ARG_2:.*]] = load i32, i32* %[[EMIT_FUN_ARG_2_PTR]]
+; GCDA-NEXT:    %[[EMIT_FUN_ARGS:.*]] = getelementptr inbounds {{.*}}, ptr %[[EMIT_FUN_ARGS_ARRAY]], i32 %[[JV]]
+; GCDA-NEXT:    %[[EMIT_FUN_ARG_0_PTR:.*]] = getelementptr inbounds {{.*}}, ptr %[[EMIT_FUN_ARGS]], i32 0, i32 0
+; GCDA-NEXT:    %[[EMIT_FUN_ARG_0:.*]] = load i32, ptr %[[EMIT_FUN_ARG_0_PTR]]
+; GCDA-NEXT:    %[[EMIT_FUN_ARG_1_PTR:.*]] = getelementptr inbounds {{.*}}, ptr %[[EMIT_FUN_ARGS]], i32 0, i32 1
+; GCDA-NEXT:    %[[EMIT_FUN_ARG_1:.*]] = load i32, ptr %[[EMIT_FUN_ARG_1_PTR]]
+; GCDA-NEXT:    %[[EMIT_FUN_ARG_2_PTR:.*]] = getelementptr inbounds {{.*}}, ptr %[[EMIT_FUN_ARGS]], i32 0, i32 2
+; GCDA-NEXT:    %[[EMIT_FUN_ARG_2:.*]] = load i32, ptr %[[EMIT_FUN_ARG_2_PTR]]
 ; GCDA-NEXT:    call void @llvm_gcda_emit_function(i32 %[[EMIT_FUN_ARG_0]],
 ; GCDA-SAME:                                       i32 %[[EMIT_FUN_ARG_1]],
 ; GCDA-SAME:                                       i32 %[[EMIT_FUN_ARG_2]])
-; GCDA-NEXT:    %[[EMIT_ARCS_ARGS:.*]] = getelementptr inbounds {{.*}}, {{.*}}* %[[EMIT_ARCS_ARGS_ARRAY]], i32 %[[JV]]
-; GCDA-NEXT:    %[[EMIT_ARCS_ARG_0_PTR:.*]] = getelementptr inbounds {{.*}}, {{.*}}* %[[EMIT_ARCS_ARGS]], i32 0, i32 0
-; GCDA-NEXT:    %[[EMIT_ARCS_ARG_0:.*]] = load i32, i32* %[[EMIT_ARCS_ARG_0_PTR]]
-; GCDA-NEXT:    %[[EMIT_ARCS_ARG_1_PTR:.*]] = getelementptr inbounds {{.*}}, {{.*}}* %[[EMIT_ARCS_ARGS]], i32 0, i32 1
-; GCDA-NEXT:    %[[EMIT_ARCS_ARG_1:.*]] = load i64*, i64** %[[EMIT_ARCS_ARG_1_PTR]]
+; GCDA-NEXT:    %[[EMIT_ARCS_ARGS:.*]] = getelementptr inbounds {{.*}}, ptr %[[EMIT_ARCS_ARGS_ARRAY]], i32 %[[JV]]
+; GCDA-NEXT:    %[[EMIT_ARCS_ARG_0_PTR:.*]] = getelementptr inbounds {{.*}}, ptr %[[EMIT_ARCS_ARGS]], i32 0, i32 0
+; GCDA-NEXT:    %[[EMIT_ARCS_ARG_0:.*]] = load i32, ptr %[[EMIT_ARCS_ARG_0_PTR]]
+; GCDA-NEXT:    %[[EMIT_ARCS_ARG_1_PTR:.*]] = getelementptr inbounds {{.*}}, ptr %[[EMIT_ARCS_ARGS]], i32 0, i32 1
+; GCDA-NEXT:    %[[EMIT_ARCS_ARG_1:.*]] = load ptr, ptr %[[EMIT_ARCS_ARG_1_PTR]]
 ; GCDA-NEXT:    call void @llvm_gcda_emit_arcs(i32 %[[EMIT_ARCS_ARG_0]],
-; GCDA-SAME:                                   i64* %[[EMIT_ARCS_ARG_1]])
+; GCDA-SAME:                                   ptr %[[EMIT_ARCS_ARG_1]])
 ; GCDA-NEXT:    %[[NEXT_JV]] = add i32 %[[JV]], 1
 ; GCDA-NEXT:    %[[COUNTER_LOOP_COND:.*]] = icmp slt i32 %[[NEXT_JV]], %[[NUM_COUNTERS]]
 ; GCDA-NEXT:    br i1 %[[COUNTER_LOOP_COND]], label %[[COUNTER_LOOP]], label %[[FILE_LOOP_LATCH]]

diff  --git a/llvm/test/Transforms/GCOVProfiling/noprofile.ll b/llvm/test/Transforms/GCOVProfiling/noprofile.ll
index 0a476a759f9bf..b1ce27e614a6d 100644
--- a/llvm/test/Transforms/GCOVProfiling/noprofile.ll
+++ b/llvm/test/Transforms/GCOVProfiling/noprofile.ll
@@ -12,9 +12,9 @@ define dso_local i32 @no_instr(i32 %a) noprofile !dbg !9 {
 
 define dso_local i32 @instr(i32 %a) !dbg !28 {
 ; CHECK-LABEL: @instr(
-; CHECK-NEXT:    [[GCOV_CTR:%.*]] = load i64, i64* getelementptr inbounds ([1 x i64], [1 x i64]* @__llvm_gcov_ctr, i64 0, i64 0), align 4, !dbg [[DBG8:![0-9]+]]
+; CHECK-NEXT:    [[GCOV_CTR:%.*]] = load i64, ptr @__llvm_gcov_ctr, align 4, !dbg [[DBG8:![0-9]+]]
 ; CHECK-NEXT:    [[TMP1:%.*]] = add i64 [[GCOV_CTR]], 1, !dbg [[DBG8]]
-; CHECK-NEXT:    store i64 [[TMP1]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @__llvm_gcov_ctr, i64 0, i64 0), align 4, !dbg [[DBG8]]
+; CHECK-NEXT:    store i64 [[TMP1]], ptr @__llvm_gcov_ctr, align 4, !dbg [[DBG8]]
 ; CHECK-NEXT:    ret i32 42, !dbg [[DBG8]]
 ;
   ret i32 42, !dbg !44

diff  --git a/llvm/test/Transforms/GCOVProfiling/reset.ll b/llvm/test/Transforms/GCOVProfiling/reset.ll
index b8869ea135d06..8bd4a80455463 100644
--- a/llvm/test/Transforms/GCOVProfiling/reset.ll
+++ b/llvm/test/Transforms/GCOVProfiling/reset.ll
@@ -15,8 +15,8 @@ entry:
 
 ; CHECK: define internal void @__llvm_gcov_reset()
 ; CHECK-NEXT: entry:
-; CHECK-NEXT: call void @llvm.memset.p0i8.i64(i8* bitcast ([1 x i64]* @__llvm_gcov_ctr to i8*), i8 0, i64 8, i1 false)
-; CHECK-NEXT: call void @llvm.memset.p0i8.i64(i8* bitcast ([1 x i64]* @__llvm_gcov_ctr.1 to i8*), i8 0, i64 8, i1 false)
+; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr @__llvm_gcov_ctr, i8 0, i64 8, i1 false)
+; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr @__llvm_gcov_ctr.1, i8 0, i64 8, i1 false)
 
 !llvm.dbg.cu = !{!0}
 !llvm.module.flags = !{!3, !4, !5, !6}

diff  --git a/llvm/test/Transforms/HotColdSplit/phi-with-distinct-outlined-values.ll b/llvm/test/Transforms/HotColdSplit/phi-with-distinct-outlined-values.ll
index cb12befcf463c..9a91c4ad12bb6 100644
--- a/llvm/test/Transforms/HotColdSplit/phi-with-distinct-outlined-values.ll
+++ b/llvm/test/Transforms/HotColdSplit/phi-with-distinct-outlined-values.ll
@@ -9,7 +9,7 @@ target triple = "x86_64-apple-macosx10.14.0"
 ; CHECK-LABEL: define {{.*}}@foo.cold.1(
 ; CHECK: call {{.*}}@sink
 ; CHECK: %p.ce = phi i32 [ 1, %coldbb ], [ 3, %coldbb2 ]
-; CHECK-NEXT: store i32 %p.ce, i32* %p.ce.out 
+; CHECK-NEXT: store i32 %p.ce, ptr %p.ce.out 
 
 define void @foo(i32 %cond) {
 entry:

diff  --git a/llvm/test/Transforms/HotColdSplit/split-phis-in-exit-blocks.ll b/llvm/test/Transforms/HotColdSplit/split-phis-in-exit-blocks.ll
index 2f5360ccb1e7e..4312aa7cb2da3 100644
--- a/llvm/test/Transforms/HotColdSplit/split-phis-in-exit-blocks.ll
+++ b/llvm/test/Transforms/HotColdSplit/split-phis-in-exit-blocks.ll
@@ -12,10 +12,9 @@ target triple = "x86_64-apple-macosx10.14.0"
 ; CHECK-NEXT:  ]
 ;
 ; CHECK:  codeRepl:
-; CHECK-NEXT:    bitcast
 ; CHECK-NEXT:    lifetime.start
-; CHECK-NEXT:    call void @pluto.cold.1(i1* %tmp8.ce.loc)
-; CHECK-NEXT:    %tmp8.ce.reload = load i1, i1* %tmp8.ce.loc
+; CHECK-NEXT:    call void @pluto.cold.1(ptr %tmp8.ce.loc)
+; CHECK-NEXT:    %tmp8.ce.reload = load i1, ptr %tmp8.ce.loc
 ; CHECK-NEXT:    lifetime.end
 ; CHECK-NEXT:    br label %bb7
 ;

diff  --git a/llvm/test/Transforms/IROutliner/
diff erent-order-phi-merges.ll b/llvm/test/Transforms/IROutliner/
diff erent-order-phi-merges.ll
index 7539f836026f8..0e4ea09cee727 100644
--- a/llvm/test/Transforms/IROutliner/
diff erent-order-phi-merges.ll
+++ b/llvm/test/Transforms/IROutliner/
diff erent-order-phi-merges.ll
@@ -46,11 +46,10 @@ bb5:
 ; CHECK-LABEL: @f1(
 ; CHECK-NEXT:  bb1:
 ; CHECK-NEXT:    [[PHINODE_CE_LOC:%.*]] = alloca i32, align 4
-; CHECK-NEXT:    [[LT_CAST:%.*]] = bitcast i32* [[PHINODE_CE_LOC]] to i8*
-; CHECK-NEXT:    call void @llvm.lifetime.start.p0i8(i64 -1, i8* [[LT_CAST]])
-; CHECK-NEXT:    [[TMP0:%.*]] = call i1 @outlined_ir_func_0(i32* [[PHINODE_CE_LOC]], i32 0)
-; CHECK-NEXT:    [[PHINODE_CE_RELOAD:%.*]] = load i32, i32* [[PHINODE_CE_LOC]], align 4
-; CHECK-NEXT:    call void @llvm.lifetime.end.p0i8(i64 -1, i8* [[LT_CAST]])
+; CHECK-NEXT:    call void @llvm.lifetime.start.p0(i64 -1, ptr [[PHINODE_CE_LOC]])
+; CHECK-NEXT:    [[TMP0:%.*]] = call i1 @outlined_ir_func_0(ptr [[PHINODE_CE_LOC]], i32 0)
+; CHECK-NEXT:    [[PHINODE_CE_RELOAD:%.*]] = load i32, ptr [[PHINODE_CE_LOC]], align 4
+; CHECK-NEXT:    call void @llvm.lifetime.end.p0(i64 -1, ptr [[PHINODE_CE_LOC]])
 ; CHECK-NEXT:    br i1 [[TMP0]], label [[BB5:%.*]], label [[BB1_AFTER_OUTLINE:%.*]]
 ; CHECK:       bb1_after_outline:
 ; CHECK-NEXT:    ret void
@@ -62,11 +61,10 @@ bb5:
 ; CHECK-LABEL: @f2(
 ; CHECK-NEXT:  bb1:
 ; CHECK-NEXT:    [[PHINODE_CE_LOC:%.*]] = alloca i32, align 4
-; CHECK-NEXT:    [[LT_CAST:%.*]] = bitcast i32* [[PHINODE_CE_LOC]] to i8*
-; CHECK-NEXT:    call void @llvm.lifetime.start.p0i8(i64 -1, i8* [[LT_CAST]])
-; CHECK-NEXT:    [[TMP0:%.*]] = call i1 @outlined_ir_func_0(i32* [[PHINODE_CE_LOC]], i32 1)
-; CHECK-NEXT:    [[PHINODE_CE_RELOAD:%.*]] = load i32, i32* [[PHINODE_CE_LOC]], align 4
-; CHECK-NEXT:    call void @llvm.lifetime.end.p0i8(i64 -1, i8* [[LT_CAST]])
+; CHECK-NEXT:    call void @llvm.lifetime.start.p0(i64 -1, ptr [[PHINODE_CE_LOC]])
+; CHECK-NEXT:    [[TMP0:%.*]] = call i1 @outlined_ir_func_0(ptr [[PHINODE_CE_LOC]], i32 1)
+; CHECK-NEXT:    [[PHINODE_CE_RELOAD:%.*]] = load i32, ptr [[PHINODE_CE_LOC]], align 4
+; CHECK-NEXT:    call void @llvm.lifetime.end.p0(i64 -1, ptr [[PHINODE_CE_LOC]])
 ; CHECK-NEXT:    br i1 [[TMP0]], label [[BB5:%.*]], label [[BB1_AFTER_OUTLINE:%.*]]
 ; CHECK:       bb1_after_outline:
 ; CHECK-NEXT:    ret void
@@ -103,10 +101,10 @@ bb5:
 ; CHECK-NEXT:    switch i32 [[TMP1]], label [[FINAL_BLOCK_0:%.*]] [
 ; CHECK-NEXT:    ]
 ; CHECK:       output_block_0_1:
-; CHECK-NEXT:    store i32 [[PHINODE_CE]], i32* [[TMP0:%.*]], align 4
+; CHECK-NEXT:    store i32 [[PHINODE_CE]], ptr [[TMP0:%.*]], align 4
 ; CHECK-NEXT:    br label [[FINAL_BLOCK_1]]
 ; CHECK:       output_block_1_1:
-; CHECK-NEXT:    store i32 [[TMP7]], i32* [[TMP0]], align 4
+; CHECK-NEXT:    store i32 [[TMP7]], ptr [[TMP0]], align 4
 ; CHECK-NEXT:    br label [[FINAL_BLOCK_1]]
 ; CHECK:       final_block_0:
 ; CHECK-NEXT:    ret i1 false

diff  --git a/llvm/test/Transforms/IROutliner/duplicate-merging-phis.ll b/llvm/test/Transforms/IROutliner/duplicate-merging-phis.ll
index 76bef612d77f4..e4b45e0f86187 100644
--- a/llvm/test/Transforms/IROutliner/duplicate-merging-phis.ll
+++ b/llvm/test/Transforms/IROutliner/duplicate-merging-phis.ll
@@ -49,15 +49,13 @@ bb5:
 ; CHECK-NEXT:  bb1:
 ; CHECK-NEXT:    [[PHINODE1_CE_LOC:%.*]] = alloca i32, align 4
 ; CHECK-NEXT:    [[PHINODE_CE_LOC:%.*]] = alloca i32, align 4
-; CHECK-NEXT:    [[LT_CAST:%.*]] = bitcast i32* [[PHINODE_CE_LOC]] to i8*
-; CHECK-NEXT:    call void @llvm.lifetime.start.p0i8(i64 -1, i8* [[LT_CAST]])
-; CHECK-NEXT:    [[LT_CAST1:%.*]] = bitcast i32* [[PHINODE1_CE_LOC]] to i8*
-; CHECK-NEXT:    call void @llvm.lifetime.start.p0i8(i64 -1, i8* [[LT_CAST1]])
-; CHECK-NEXT:    [[TARGETBLOCK:%.*]] = call i1 @outlined_ir_func_0(i32* [[PHINODE_CE_LOC]], i32* [[PHINODE1_CE_LOC]])
-; CHECK-NEXT:    [[PHINODE_CE_RELOAD:%.*]] = load i32, i32* [[PHINODE_CE_LOC]], align 4
-; CHECK-NEXT:    [[PHINODE1_CE_RELOAD:%.*]] = load i32, i32* [[PHINODE1_CE_LOC]], align 4
-; CHECK-NEXT:    call void @llvm.lifetime.end.p0i8(i64 -1, i8* [[LT_CAST]])
-; CHECK-NEXT:    call void @llvm.lifetime.end.p0i8(i64 -1, i8* [[LT_CAST1]])
+; CHECK-NEXT:    call void @llvm.lifetime.start.p0(i64 -1, ptr [[PHINODE_CE_LOC]])
+; CHECK-NEXT:    call void @llvm.lifetime.start.p0(i64 -1, ptr [[PHINODE1_CE_LOC]])
+; CHECK-NEXT:    [[TARGETBLOCK:%.*]] = call i1 @outlined_ir_func_0(ptr [[PHINODE_CE_LOC]], ptr [[PHINODE1_CE_LOC]])
+; CHECK-NEXT:    [[PHINODE_CE_RELOAD:%.*]] = load i32, ptr [[PHINODE_CE_LOC]], align 4
+; CHECK-NEXT:    [[PHINODE1_CE_RELOAD:%.*]] = load i32, ptr [[PHINODE1_CE_LOC]], align 4
+; CHECK-NEXT:    call void @llvm.lifetime.end.p0(i64 -1, ptr [[PHINODE_CE_LOC]])
+; CHECK-NEXT:    call void @llvm.lifetime.end.p0(i64 -1, ptr [[PHINODE1_CE_LOC]])
 ; CHECK-NEXT:    br i1 [[TARGETBLOCK]], label [[BB5:%.*]], label [[BB1_AFTER_OUTLINE:%.*]]
 ; CHECK:       bb1_after_outline:
 ; CHECK-NEXT:    ret void
@@ -71,15 +69,13 @@ bb5:
 ; CHECK-NEXT:  bb1:
 ; CHECK-NEXT:    [[PHINODE1_CE_LOC:%.*]] = alloca i32, align 4
 ; CHECK-NEXT:    [[PHINODE_CE_LOC:%.*]] = alloca i32, align 4
-; CHECK-NEXT:    [[LT_CAST:%.*]] = bitcast i32* [[PHINODE_CE_LOC]] to i8*
-; CHECK-NEXT:    call void @llvm.lifetime.start.p0i8(i64 -1, i8* [[LT_CAST]])
-; CHECK-NEXT:    [[LT_CAST1:%.*]] = bitcast i32* [[PHINODE1_CE_LOC]] to i8*
-; CHECK-NEXT:    call void @llvm.lifetime.start.p0i8(i64 -1, i8* [[LT_CAST1]])
-; CHECK-NEXT:    [[TARGETBLOCK:%.*]] = call i1 @outlined_ir_func_0(i32* [[PHINODE_CE_LOC]], i32* [[PHINODE1_CE_LOC]])
-; CHECK-NEXT:    [[PHINODE_CE_RELOAD:%.*]] = load i32, i32* [[PHINODE_CE_LOC]], align 4
-; CHECK-NEXT:    [[PHINODE1_CE_RELOAD:%.*]] = load i32, i32* [[PHINODE1_CE_LOC]], align 4
-; CHECK-NEXT:    call void @llvm.lifetime.end.p0i8(i64 -1, i8* [[LT_CAST]])
-; CHECK-NEXT:    call void @llvm.lifetime.end.p0i8(i64 -1, i8* [[LT_CAST1]])
+; CHECK-NEXT:    call void @llvm.lifetime.start.p0(i64 -1, ptr [[PHINODE_CE_LOC]])
+; CHECK-NEXT:    call void @llvm.lifetime.start.p0(i64 -1, ptr [[PHINODE1_CE_LOC]])
+; CHECK-NEXT:    [[TARGETBLOCK:%.*]] = call i1 @outlined_ir_func_0(ptr [[PHINODE_CE_LOC]], ptr [[PHINODE1_CE_LOC]])
+; CHECK-NEXT:    [[PHINODE_CE_RELOAD:%.*]] = load i32, ptr [[PHINODE_CE_LOC]], align 4
+; CHECK-NEXT:    [[PHINODE1_CE_RELOAD:%.*]] = load i32, ptr [[PHINODE1_CE_LOC]], align 4
+; CHECK-NEXT:    call void @llvm.lifetime.end.p0(i64 -1, ptr [[PHINODE_CE_LOC]])
+; CHECK-NEXT:    call void @llvm.lifetime.end.p0(i64 -1, ptr [[PHINODE1_CE_LOC]])
 ; CHECK-NEXT:    br i1 [[TARGETBLOCK]], label [[BB5:%.*]], label [[BB1_AFTER_OUTLINE:%.*]]
 ; CHECK:       bb1_after_outline:
 ; CHECK-NEXT:    ret void
@@ -109,8 +105,8 @@ bb5:
 ; CHECK-NEXT:    [[PHINODE1_CE:%.*]] = phi i32 [ 5, [[BB1_TO_OUTLINE]] ], [ 5, [[BB2]] ]
 ; CHECK-NEXT:    br label [[BB5_EXITSTUB:%.*]]
 ; CHECK:       bb5.exitStub:
-; CHECK-NEXT:    store i32 [[PHINODE_CE]], i32* [[TMP0:%.*]], align 4
-; CHECK-NEXT:    store i32 [[PHINODE1_CE]], i32* [[TMP1:%.*]], align 4
+; CHECK-NEXT:    store i32 [[PHINODE_CE]], ptr [[TMP0:%.*]], align 4
+; CHECK-NEXT:    store i32 [[PHINODE1_CE]], ptr [[TMP1:%.*]], align 4
 ; CHECK-NEXT:    ret i1 true
 ; CHECK:       bb1_after_outline.exitStub:
 ; CHECK-NEXT:    ret i1 false

diff  --git a/llvm/test/Transforms/IROutliner/exit-block-phi-node-value-attribution.ll b/llvm/test/Transforms/IROutliner/exit-block-phi-node-value-attribution.ll
index 54da00fe089e3..4e6fb738211e0 100644
--- a/llvm/test/Transforms/IROutliner/exit-block-phi-node-value-attribution.ll
+++ b/llvm/test/Transforms/IROutliner/exit-block-phi-node-value-attribution.ll
@@ -42,21 +42,19 @@ bb5:
 ; CHECK-NEXT:  bb1:
 ; CHECK-NEXT:    [[PHINODE_CE_LOC1:%.*]] = alloca i32, align 4
 ; CHECK-NEXT:    [[PHINODE_CE_LOC:%.*]] = alloca i32, align 4
-; CHECK-NEXT:    [[LT_CAST:%.*]] = bitcast i32* [[PHINODE_CE_LOC]] to i8*
-; CHECK-NEXT:    call void @llvm.lifetime.start.p0i8(i64 -1, i8* [[LT_CAST]])
-; CHECK-NEXT:    call void @outlined_ir_func_0(i32* [[PHINODE_CE_LOC]], i32 0)
-; CHECK-NEXT:    [[PHINODE_CE_RELOAD:%.*]] = load i32, i32* [[PHINODE_CE_LOC]], align 4
-; CHECK-NEXT:    call void @llvm.lifetime.end.p0i8(i64 -1, i8* [[LT_CAST]])
+; CHECK-NEXT:    call void @llvm.lifetime.start.p0(i64 -1, ptr [[PHINODE_CE_LOC]])
+; CHECK-NEXT:    call void @outlined_ir_func_0(ptr [[PHINODE_CE_LOC]], i32 0)
+; CHECK-NEXT:    [[PHINODE_CE_RELOAD:%.*]] = load i32, ptr [[PHINODE_CE_LOC]], align 4
+; CHECK-NEXT:    call void @llvm.lifetime.end.p0(i64 -1, ptr [[PHINODE_CE_LOC]])
 ; CHECK-NEXT:    br label [[BB5:%.*]]
 ; CHECK:       placeholder:
 ; CHECK-NEXT:    [[A:%.*]] = sub i32 5, 4
 ; CHECK-NEXT:    br label [[BB5]]
 ; CHECK:       bb3:
-; CHECK-NEXT:    [[LT_CAST3:%.*]] = bitcast i32* [[PHINODE_CE_LOC1]] to i8*
-; CHECK-NEXT:    call void @llvm.lifetime.start.p0i8(i64 -1, i8* [[LT_CAST3]])
-; CHECK-NEXT:    call void @outlined_ir_func_0(i32* [[PHINODE_CE_LOC1]], i32 1)
-; CHECK-NEXT:    [[PHINODE_CE_RELOAD2:%.*]] = load i32, i32* [[PHINODE_CE_LOC1]], align 4
-; CHECK-NEXT:    call void @llvm.lifetime.end.p0i8(i64 -1, i8* [[LT_CAST3]])
+; CHECK-NEXT:    call void @llvm.lifetime.start.p0(i64 -1, ptr [[PHINODE_CE_LOC1]])
+; CHECK-NEXT:    call void @outlined_ir_func_0(ptr [[PHINODE_CE_LOC1]], i32 1)
+; CHECK-NEXT:    [[PHINODE_CE_RELOAD2:%.*]] = load i32, ptr [[PHINODE_CE_LOC1]], align 4
+; CHECK-NEXT:    call void @llvm.lifetime.end.p0(i64 -1, ptr [[PHINODE_CE_LOC1]])
 ; CHECK-NEXT:    br label [[BB5]]
 ; CHECK:       placeholder1:
 ; CHECK-NEXT:    [[B:%.*]] = add i32 5, 4
@@ -88,10 +86,10 @@ bb5:
 ; CHECK-NEXT:    i32 1, label [[OUTPUT_BLOCK_1_0:%.*]]
 ; CHECK-NEXT:    ]
 ; CHECK:       output_block_0_0:
-; CHECK-NEXT:    store i32 [[PHINODE_CE]], i32* [[TMP0:%.*]], align 4
+; CHECK-NEXT:    store i32 [[PHINODE_CE]], ptr [[TMP0:%.*]], align 4
 ; CHECK-NEXT:    br label [[FINAL_BLOCK_0]]
 ; CHECK:       output_block_1_0:
-; CHECK-NEXT:    store i32 [[TMP7]], i32* [[TMP0]], align 4
+; CHECK-NEXT:    store i32 [[TMP7]], ptr [[TMP0]], align 4
 ; CHECK-NEXT:    br label [[FINAL_BLOCK_0]]
 ; CHECK:       final_block_0:
 ; CHECK-NEXT:    ret void

diff  --git a/llvm/test/Transforms/IROutliner/exit-phi-nodes-incoming-value-constant-argument.ll b/llvm/test/Transforms/IROutliner/exit-phi-nodes-incoming-value-constant-argument.ll
index e2e995c0c8794..fc871cab50419 100644
--- a/llvm/test/Transforms/IROutliner/exit-phi-nodes-incoming-value-constant-argument.ll
+++ b/llvm/test/Transforms/IROutliner/exit-phi-nodes-incoming-value-constant-argument.ll
@@ -57,7 +57,7 @@ bb5:
 ; CHECK-NEXT:    br label [[BB5:%.*]]
 ; CHECK:       bb2:
 ; CHECK-NEXT:    [[A:%.*]] = add i32 [[TMP0:%.*]], [[TMP1:%.*]]
-; CHECK-NEXT:    call void @outlined_ir_func_0(i32 [[TMP0]], i32 [[TMP1]], i32 [[A]], i32* null, i32 -1)
+; CHECK-NEXT:    call void @outlined_ir_func_0(i32 [[TMP0]], i32 [[TMP1]], i32 [[A]], ptr null, i32 -1)
 ; CHECK-NEXT:    br label [[BB5]]
 ; CHECK:       bb4:
 ; CHECK-NEXT:    [[E:%.*]] = sub i32 [[TMP0]], [[TMP1]]
@@ -72,11 +72,10 @@ bb5:
 ; CHECK-NEXT:    br label [[BB5:%.*]]
 ; CHECK:       bb2:
 ; CHECK-NEXT:    [[A:%.*]] = sub i32 [[TMP0:%.*]], [[TMP1:%.*]]
-; CHECK-NEXT:    [[LT_CAST:%.*]] = bitcast i32* [[F_CE_LOC]] to i8*
-; CHECK-NEXT:    call void @llvm.lifetime.start.p0i8(i64 -1, i8* [[LT_CAST]])
-; CHECK-NEXT:    call void @outlined_ir_func_0(i32 [[TMP0]], i32 [[TMP1]], i32 1, i32* [[F_CE_LOC]], i32 0)
-; CHECK-NEXT:    [[F_CE_RELOAD:%.*]] = load i32, i32* [[F_CE_LOC]], align 4
-; CHECK-NEXT:    call void @llvm.lifetime.end.p0i8(i64 -1, i8* [[LT_CAST]])
+; CHECK-NEXT:    call void @llvm.lifetime.start.p0(i64 -1, ptr [[F_CE_LOC]])
+; CHECK-NEXT:    call void @outlined_ir_func_0(i32 [[TMP0]], i32 [[TMP1]], i32 1, ptr [[F_CE_LOC]], i32 0)
+; CHECK-NEXT:    [[F_CE_RELOAD:%.*]] = load i32, ptr [[F_CE_LOC]], align 4
+; CHECK-NEXT:    call void @llvm.lifetime.end.p0(i64 -1, ptr [[F_CE_LOC]])
 ; CHECK-NEXT:    br label [[BB5]]
 ; CHECK:       bb4:
 ; CHECK-NEXT:    [[E:%.*]] = add i32 [[TMP0]], [[TMP1]]
@@ -101,7 +100,7 @@ bb5:
 ; CHECK-NEXT:    i32 0, label [[OUTPUT_BLOCK_1_0:%.*]]
 ; CHECK-NEXT:    ]
 ; CHECK:       output_block_1_0:
-; CHECK-NEXT:    store i32 [[TMP5:%.*]], i32* [[TMP3:%.*]], align 4
+; CHECK-NEXT:    store i32 [[TMP5:%.*]], ptr [[TMP3:%.*]], align 4
 ; CHECK-NEXT:    br label [[FINAL_BLOCK_0]]
 ; CHECK:       phi_block:
 ; CHECK-NEXT:    [[TMP5]] = phi i32 [ [[TMP2]], [[BB2_TO_OUTLINE]] ], [ [[TMP2]], [[BB3]] ]

diff  --git a/llvm/test/Transforms/IROutliner/no-external-block-entries.ll b/llvm/test/Transforms/IROutliner/no-external-block-entries.ll
index 947bda2887174..4f868818d6bfc 100644
--- a/llvm/test/Transforms/IROutliner/no-external-block-entries.ll
+++ b/llvm/test/Transforms/IROutliner/no-external-block-entries.ll
@@ -35,15 +35,14 @@ block_6:
 ; CHECK-LABEL: @fn1(
 ; CHECK-NEXT:  entry:
 ; CHECK-NEXT:    [[B_CE_LOC:%.*]] = alloca i32, align 4
-; CHECK-NEXT:    [[LT_CAST:%.*]] = bitcast i32* [[B_CE_LOC]] to i8*
-; CHECK-NEXT:    call void @llvm.lifetime.start.p0i8(i64 -1, i8* [[LT_CAST]])
-; CHECK-NEXT:    call void @outlined_ir_func_0(i32* [[B_CE_LOC]], i32 0)
-; CHECK-NEXT:    [[B_CE_RELOAD:%.*]] = load i32, i32* [[B_CE_LOC]], align 4
-; CHECK-NEXT:    call void @llvm.lifetime.end.p0i8(i64 -1, i8* [[LT_CAST]])
+; CHECK-NEXT:    call void @llvm.lifetime.start.p0(i64 -1, ptr [[B_CE_LOC]])
+; CHECK-NEXT:    call void @outlined_ir_func_0(ptr [[B_CE_LOC]], i32 0)
+; CHECK-NEXT:    [[B_CE_RELOAD:%.*]] = load i32, ptr [[B_CE_LOC]], align 4
+; CHECK-NEXT:    call void @llvm.lifetime.end.p0(i64 -1, ptr [[B_CE_LOC]])
 ; CHECK-NEXT:    br label [[BLOCK_3:%.*]]
 ; CHECK:       block_3:
 ; CHECK-NEXT:    [[B:%.*]] = phi i32 [ [[B_CE_RELOAD]], [[ENTRY:%.*]] ]
-; CHECK-NEXT:    call void @outlined_ir_func_0(i32* null, i32 -1)
+; CHECK-NEXT:    call void @outlined_ir_func_0(ptr null, i32 -1)
 ; CHECK-NEXT:    br label [[BLOCK_6:%.*]]
 ; CHECK:       block_6:
 ; CHECK-NEXT:    unreachable
@@ -69,7 +68,7 @@ block_6:
 ; CHECK-NEXT:    i32 0, label [[OUTPUT_BLOCK_0_0:%.*]]
 ; CHECK-NEXT:    ]
 ; CHECK:       output_block_0_0:
-; CHECK-NEXT:    store i32 [[B_CE]], i32* [[TMP0:%.*]], align 4
+; CHECK-NEXT:    store i32 [[B_CE]], ptr [[TMP0:%.*]], align 4
 ; CHECK-NEXT:    br label [[FINAL_BLOCK_0]]
 ; CHECK:       final_block_0:
 ; CHECK-NEXT:    ret void

diff  --git a/llvm/test/Transforms/IROutliner/one-external-incoming-block-phi-node.ll b/llvm/test/Transforms/IROutliner/one-external-incoming-block-phi-node.ll
index 53adf7c2a35c6..3fc38ae1006f7 100644
--- a/llvm/test/Transforms/IROutliner/one-external-incoming-block-phi-node.ll
+++ b/llvm/test/Transforms/IROutliner/one-external-incoming-block-phi-node.ll
@@ -33,15 +33,14 @@ block_6:
 ; CHECK-LABEL: @fn1(
 ; CHECK-NEXT:  entry:
 ; CHECK-NEXT:    [[B_CE_LOC:%.*]] = alloca i32, align 4
-; CHECK-NEXT:    [[LT_CAST:%.*]] = bitcast i32* [[B_CE_LOC]] to i8*
-; CHECK-NEXT:    call void @llvm.lifetime.start.p0i8(i64 -1, i8* [[LT_CAST]])
-; CHECK-NEXT:    call void @outlined_ir_func_0(i32* [[B_CE_LOC]], i32 0)
-; CHECK-NEXT:    [[B_CE_RELOAD:%.*]] = load i32, i32* [[B_CE_LOC]], align 4
-; CHECK-NEXT:    call void @llvm.lifetime.end.p0i8(i64 -1, i8* [[LT_CAST]])
+; CHECK-NEXT:    call void @llvm.lifetime.start.p0(i64 -1, ptr [[B_CE_LOC]])
+; CHECK-NEXT:    call void @outlined_ir_func_0(ptr [[B_CE_LOC]], i32 0)
+; CHECK-NEXT:    [[B_CE_RELOAD:%.*]] = load i32, ptr [[B_CE_LOC]], align 4
+; CHECK-NEXT:    call void @llvm.lifetime.end.p0(i64 -1, ptr [[B_CE_LOC]])
 ; CHECK-NEXT:    br label [[BLOCK_3:%.*]]
 ; CHECK:       block_3:
 ; CHECK-NEXT:    [[B:%.*]] = phi i32 [ [[B_CE_RELOAD]], [[ENTRY:%.*]] ]
-; CHECK-NEXT:    call void @outlined_ir_func_0(i32* null, i32 -1)
+; CHECK-NEXT:    call void @outlined_ir_func_0(ptr null, i32 -1)
 ; CHECK-NEXT:    br label [[BLOCK_6:%.*]]
 ; CHECK:       block_6:
 ; CHECK-NEXT:    unreachable
@@ -65,7 +64,7 @@ block_6:
 ; CHECK-NEXT:    i32 0, label [[OUTPUT_BLOCK_0_0:%.*]]
 ; CHECK-NEXT:    ]
 ; CHECK:       output_block_0_0:
-; CHECK-NEXT:    store i32 [[B_CE]], i32* [[TMP0:%.*]], align 4
+; CHECK-NEXT:    store i32 [[B_CE]], ptr [[TMP0:%.*]], align 4
 ; CHECK-NEXT:    br label [[FINAL_BLOCK_0]]
 ; CHECK:       final_block_0:
 ; CHECK-NEXT:    ret void

diff  --git a/llvm/test/Transforms/IROutliner/phi-node-exit-path-order.ll b/llvm/test/Transforms/IROutliner/phi-node-exit-path-order.ll
index 54bfab8de2597..5b134dd6161c4 100644
--- a/llvm/test/Transforms/IROutliner/phi-node-exit-path-order.ll
+++ b/llvm/test/Transforms/IROutliner/phi-node-exit-path-order.ll
@@ -58,11 +58,10 @@ bb5:
 ; CHECK-NEXT:    br label [[BB5:%.*]]
 ; CHECK:       bb2:
 ; CHECK-NEXT:    [[A:%.*]] = add i32 [[TMP0:%.*]], [[TMP1:%.*]]
-; CHECK-NEXT:    [[LT_CAST:%.*]] = bitcast i32* [[F_CE_LOC]] to i8*
-; CHECK-NEXT:    call void @llvm.lifetime.start.p0i8(i64 -1, i8* [[LT_CAST]])
-; CHECK-NEXT:    call void @outlined_ir_func_0(i32 [[TMP0]], i32 [[TMP1]], i32* [[F_CE_LOC]], i32 0)
-; CHECK-NEXT:    [[F_CE_RELOAD:%.*]] = load i32, i32* [[F_CE_LOC]], align 4
-; CHECK-NEXT:    call void @llvm.lifetime.end.p0i8(i64 -1, i8* [[LT_CAST]])
+; CHECK-NEXT:    call void @llvm.lifetime.start.p0(i64 -1, ptr [[F_CE_LOC]])
+; CHECK-NEXT:    call void @outlined_ir_func_0(i32 [[TMP0]], i32 [[TMP1]], ptr [[F_CE_LOC]], i32 0)
+; CHECK-NEXT:    [[F_CE_RELOAD:%.*]] = load i32, ptr [[F_CE_LOC]], align 4
+; CHECK-NEXT:    call void @llvm.lifetime.end.p0(i64 -1, ptr [[F_CE_LOC]])
 ; CHECK-NEXT:    br label [[BB5]]
 ; CHECK:       bb4:
 ; CHECK-NEXT:    [[E:%.*]] = sub i32 [[TMP0]], [[TMP1]]
@@ -78,11 +77,10 @@ bb5:
 ; CHECK-NEXT:    br label [[BB5:%.*]]
 ; CHECK:       bb2:
 ; CHECK-NEXT:    [[A:%.*]] = sub i32 [[TMP0:%.*]], [[TMP1:%.*]]
-; CHECK-NEXT:    [[LT_CAST:%.*]] = bitcast i32* [[F_CE_LOC]] to i8*
-; CHECK-NEXT:    call void @llvm.lifetime.start.p0i8(i64 -1, i8* [[LT_CAST]])
-; CHECK-NEXT:    call void @outlined_ir_func_0(i32 [[TMP0]], i32 [[TMP1]], i32* [[F_CE_LOC]], i32 1)
-; CHECK-NEXT:    [[F_CE_RELOAD:%.*]] = load i32, i32* [[F_CE_LOC]], align 4
-; CHECK-NEXT:    call void @llvm.lifetime.end.p0i8(i64 -1, i8* [[LT_CAST]])
+; CHECK-NEXT:    call void @llvm.lifetime.start.p0(i64 -1, ptr [[F_CE_LOC]])
+; CHECK-NEXT:    call void @outlined_ir_func_0(i32 [[TMP0]], i32 [[TMP1]], ptr [[F_CE_LOC]], i32 1)
+; CHECK-NEXT:    [[F_CE_RELOAD:%.*]] = load i32, ptr [[F_CE_LOC]], align 4
+; CHECK-NEXT:    call void @llvm.lifetime.end.p0(i64 -1, ptr [[F_CE_LOC]])
 ; CHECK-NEXT:    br label [[BB5]]
 ; CHECK:       bb4:
 ; CHECK-NEXT:    [[E:%.*]] = add i32 [[TMP0]], [[TMP1]]
@@ -112,10 +110,10 @@ bb5:
 ; CHECK-NEXT:    i32 1, label [[OUTPUT_BLOCK_1_0:%.*]]
 ; CHECK-NEXT:    ]
 ; CHECK:       output_block_0_0:
-; CHECK-NEXT:    store i32 [[F_CE]], i32* [[TMP2:%.*]], align 4
+; CHECK-NEXT:    store i32 [[F_CE]], ptr [[TMP2:%.*]], align 4
 ; CHECK-NEXT:    br label [[FINAL_BLOCK_0]]
 ; CHECK:       output_block_1_0:
-; CHECK-NEXT:    store i32 [[TMP4]], i32* [[TMP2]], align 4
+; CHECK-NEXT:    store i32 [[TMP4]], ptr [[TMP2]], align 4
 ; CHECK-NEXT:    br label [[FINAL_BLOCK_0]]
 ; CHECK:       final_block_0:
 ; CHECK-NEXT:    ret void

diff  --git a/llvm/test/Transforms/LowerTypeTests/export-alias.ll b/llvm/test/Transforms/LowerTypeTests/export-alias.ll
index 2ad8835475b10..b7b57826af95a 100644
--- a/llvm/test/Transforms/LowerTypeTests/export-alias.ll
+++ b/llvm/test/Transforms/LowerTypeTests/export-alias.ll
@@ -1,7 +1,7 @@
 ; RUN: opt -S %s -lowertypetests -lowertypetests-summary-action=export -lowertypetests-read-summary=%S/Inputs/exported-funcs.yaml | FileCheck %s
 ;
-; CHECK: @alias1 = weak alias void (), void ()* @external_addrtaken
-; CHECK: @alias2 = hidden alias void (), void ()* @external_addrtaken
+; CHECK: @alias1 = weak alias void (), ptr @external_addrtaken
+; CHECK: @alias2 = hidden alias void (), ptr @external_addrtaken
 ; CHECK-NOT: @alias3 = alias
 ; CHECK-NOT: @not_present
 

diff  --git a/llvm/test/Transforms/LowerTypeTests/export-allones.ll b/llvm/test/Transforms/LowerTypeTests/export-allones.ll
index 00af10449dfd9..cb4f278825964 100644
--- a/llvm/test/Transforms/LowerTypeTests/export-allones.ll
+++ b/llvm/test/Transforms/LowerTypeTests/export-allones.ll
@@ -141,17 +141,17 @@
 
 ; CHECK: [[G:@[0-9]+]] = private constant { [2048 x i8] } zeroinitializer
 
-; CHECK: @__typeid_typeid1_global_addr = hidden alias i8, getelementptr inbounds ({ [2048 x i8] }, { [2048 x i8] }* [[G]], i32 0, i32 0, i32 0)
-; X86: @__typeid_typeid1_align = hidden alias i8, inttoptr (i8 1 to i8*)
-; X86: @__typeid_typeid1_size_m1 = hidden alias i8, inttoptr (i64 1 to i8*)
+; CHECK: @__typeid_typeid1_global_addr = hidden alias i8, ptr [[G]]
+; X86: @__typeid_typeid1_align = hidden alias i8, inttoptr (i8 1 to ptr)
+; X86: @__typeid_typeid1_size_m1 = hidden alias i8, inttoptr (i64 1 to ptr)
 
-; CHECK: @__typeid_typeid2_global_addr = hidden alias i8, getelementptr inbounds ({ [2048 x i8] }, { [2048 x i8] }* [[G]], i32 0, i32 0, i64 4)
-; X86: @__typeid_typeid2_align = hidden alias i8, inttoptr (i8 2 to i8*)
-; X86: @__typeid_typeid2_size_m1 = hidden alias i8, inttoptr (i64 128 to i8*)
+; CHECK: @__typeid_typeid2_global_addr = hidden alias i8, getelementptr (i8, ptr [[G]], i64 4)
+; X86: @__typeid_typeid2_align = hidden alias i8, inttoptr (i8 2 to ptr)
+; X86: @__typeid_typeid2_size_m1 = hidden alias i8, inttoptr (i64 128 to ptr)
 
 ; ARM-NOT: alias {{.*}} inttoptr
 
-; CHECK: @foo = alias [2048 x i8], getelementptr inbounds ({ [2048 x i8] }, { [2048 x i8] }* [[G]], i32 0, i32 0)
+; CHECK: @foo = alias [2048 x i8], ptr [[G]]
 
 ; SUMMARY:      TypeIdMap:
 ; SUMMARY-NEXT:   typeid1:

diff  --git a/llvm/test/Transforms/LowerTypeTests/export-bytearray.ll b/llvm/test/Transforms/LowerTypeTests/export-bytearray.ll
index aa06a8d56612f..f4666e1ab083b 100644
--- a/llvm/test/Transforms/LowerTypeTests/export-bytearray.ll
+++ b/llvm/test/Transforms/LowerTypeTests/export-bytearray.ll
@@ -14,23 +14,23 @@
 ; CHECK: [[G:@[0-9]+]] = private constant { [2048 x i8] } zeroinitializer
 ; CHECK: [[B:@[0-9]+]] = private constant [258 x i8] c"\03\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\02\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\01"
 
-; CHECK: @__typeid_typeid1_global_addr = hidden alias i8, getelementptr inbounds ({ [2048 x i8] }, { [2048 x i8] }* [[G]], i32 0, i32 0, i32 0)
-; X86: @__typeid_typeid1_align = hidden alias i8, inttoptr (i8 1 to i8*)
-; X86: @__typeid_typeid1_size_m1 = hidden alias i8, inttoptr (i64 65 to i8*)
-; CHECK: @__typeid_typeid1_byte_array = hidden alias i8, i8* @bits.1
-; X86: @__typeid_typeid1_bit_mask = hidden alias i8, inttoptr (i8 2 to i8*)
-
-; CHECK: @__typeid_typeid2_global_addr = hidden alias i8, getelementptr inbounds ({ [2048 x i8] }, { [2048 x i8] }* [[G]], i32 0, i32 0, i64 4)
-; X86: @__typeid_typeid2_align = hidden alias i8, inttoptr (i8 2 to i8*)
-; X86: @__typeid_typeid2_size_m1 = hidden alias i8, inttoptr (i64 257 to i8*)
-; CHECK: @__typeid_typeid2_byte_array = hidden alias i8, i8* @bits
-; X86: @__typeid_typeid2_bit_mask = hidden alias i8, inttoptr (i8 1 to i8*)
+; CHECK: @__typeid_typeid1_global_addr = hidden alias i8, ptr [[G]]
+; X86: @__typeid_typeid1_align = hidden alias i8, inttoptr (i8 1 to ptr)
+; X86: @__typeid_typeid1_size_m1 = hidden alias i8, inttoptr (i64 65 to ptr)
+; CHECK: @__typeid_typeid1_byte_array = hidden alias i8, ptr @bits.1
+; X86: @__typeid_typeid1_bit_mask = hidden alias i8, inttoptr (i8 2 to ptr)
+
+; CHECK: @__typeid_typeid2_global_addr = hidden alias i8, getelementptr (i8, ptr [[G]], i64 4)
+; X86: @__typeid_typeid2_align = hidden alias i8, inttoptr (i8 2 to ptr)
+; X86: @__typeid_typeid2_size_m1 = hidden alias i8, inttoptr (i64 257 to ptr)
+; CHECK: @__typeid_typeid2_byte_array = hidden alias i8, ptr @bits
+; X86: @__typeid_typeid2_bit_mask = hidden alias i8, inttoptr (i8 1 to ptr)
 
 ; ARM-NOT: alias {{.*}} inttoptr
 
-; CHECK: @foo = alias [2048 x i8], getelementptr inbounds ({ [2048 x i8] }, { [2048 x i8] }* [[G]], i32 0, i32 0)
-; CHECK: @bits = private alias i8, getelementptr inbounds ([258 x i8], [258 x i8]* [[B]], i64 0, i64 0)
-; CHECK: @bits.1 = private alias i8, getelementptr inbounds ([258 x i8], [258 x i8]* [[B]], i64 0, i64 0)
+; CHECK: @foo = alias [2048 x i8], ptr [[G]]
+; CHECK: @bits = private alias i8, ptr [[B]]
+; CHECK: @bits.1 = private alias i8, ptr [[B]]
 
 ; SUMMARY:      TypeIdMap:
 ; SUMMARY-NEXT:   typeid1:

diff  --git a/llvm/test/Transforms/LowerTypeTests/export-icall.ll b/llvm/test/Transforms/LowerTypeTests/export-icall.ll
index 54c2f568b609f..bd889d2715e48 100644
--- a/llvm/test/Transforms/LowerTypeTests/export-icall.ll
+++ b/llvm/test/Transforms/LowerTypeTests/export-icall.ll
@@ -36,19 +36,19 @@ define void @f3(i32 %x) !type !8 {
 !8 = !{i64 0, !"typeid3"}
 
 
-; CHECK-DAG: @__typeid_typeid1_global_addr = hidden alias i8, bitcast (void ()* [[JT1:.*]] to i8*)
-; CHECK-DAG: @__typeid_typeid1_align = hidden alias i8, inttoptr (i8 3 to i8*)
-; CHECK-DAG: @__typeid_typeid1_size_m1 = hidden alias i8, inttoptr (i64 4 to i8*)
+; CHECK-DAG: @__typeid_typeid1_global_addr = hidden alias i8, ptr [[JT1:.*]]
+; CHECK-DAG: @__typeid_typeid1_align = hidden alias i8, inttoptr (i8 3 to ptr)
+; CHECK-DAG: @__typeid_typeid1_size_m1 = hidden alias i8, inttoptr (i64 4 to ptr)
 
-; CHECK-DAG: @h                    = alias void (i8), bitcast (void ()* [[JT1]] to void (i8)*)
-; CHECK-DAG: @f                    = alias void (i32), {{.*}}getelementptr {{.*}}void ()* [[JT1]]
-; CHECK-DAG: @f2                   = alias void (i32), {{.*}}getelementptr {{.*}}void ()* [[JT1]]
-; CHECK-DAG: @external.cfi_jt      = hidden alias void (), {{.*}}getelementptr {{.*}}void ()* [[JT1]]
-; CHECK-DAG: @external_weak.cfi_jt = hidden alias void (), {{.*}}getelementptr {{.*}}void ()* [[JT1]]
+; CHECK-DAG: @h                    = alias void (i8), ptr [[JT1]]
+; CHECK-DAG: @f                    = alias void (i32), {{.*}}getelementptr {{.*}}ptr [[JT1]]
+; CHECK-DAG: @f2                   = alias void (i32), {{.*}}getelementptr {{.*}}ptr [[JT1]]
+; CHECK-DAG: @external.cfi_jt      = hidden alias void (), {{.*}}getelementptr {{.*}}ptr [[JT1]]
+; CHECK-DAG: @external_weak.cfi_jt = hidden alias void (), {{.*}}getelementptr {{.*}}ptr [[JT1]]
 
-; CHECK-DAG: @__typeid_typeid2_global_addr = hidden alias i8, bitcast (void ()* [[JT2:.*]] to i8*)
+; CHECK-DAG: @__typeid_typeid2_global_addr = hidden alias i8, ptr [[JT2:.*]]
 
-; CHECK-DAG: @g                    = alias void (), void ()* [[JT2]]
+; CHECK-DAG: @g                    = alias void (), ptr [[JT2]]
 
 ; CHECK-DAG: define hidden void @h.cfi(i8 {{.*}}) !type !{{.*}}
 ; CHECK-DAG: declare !type !{{.*}} void @external()

diff  --git a/llvm/test/Transforms/LowerTypeTests/export-inline.ll b/llvm/test/Transforms/LowerTypeTests/export-inline.ll
index b481a8bce96da..61b739855b52f 100644
--- a/llvm/test/Transforms/LowerTypeTests/export-inline.ll
+++ b/llvm/test/Transforms/LowerTypeTests/export-inline.ll
@@ -13,17 +13,17 @@
 
 ; CHECK: [[G:@[0-9]+]] = private constant { [2048 x i8] } zeroinitializer
 
-; CHECK: @__typeid_typeid1_global_addr = hidden alias i8, getelementptr inbounds ({ [2048 x i8] }, { [2048 x i8] }* [[G]], i32 0, i32 0, i32 0)
-; CHECK-X86: @__typeid_typeid1_align = hidden alias i8, inttoptr (i8 1 to i8*)
-; CHECK-X86: @__typeid_typeid1_size_m1 = hidden alias i8, inttoptr (i64 3 to i8*)
-; CHECK-X86: @__typeid_typeid1_inline_bits = hidden alias i8, inttoptr (i32 9 to i8*)
+; CHECK: @__typeid_typeid1_global_addr = hidden alias i8, ptr [[G]]
+; CHECK-X86: @__typeid_typeid1_align = hidden alias i8, inttoptr (i8 1 to ptr)
+; CHECK-X86: @__typeid_typeid1_size_m1 = hidden alias i8, inttoptr (i64 3 to ptr)
+; CHECK-X86: @__typeid_typeid1_inline_bits = hidden alias i8, inttoptr (i32 9 to ptr)
 
-; CHECK: @__typeid_typeid2_global_addr = hidden alias i8, getelementptr inbounds ({ [2048 x i8] }, { [2048 x i8] }* [[G]], i32 0, i32 0, i64 4)
-; CHECK-X86: @__typeid_typeid2_align = hidden alias i8, inttoptr (i8 2 to i8*)
-; CHECK-X86: @__typeid_typeid2_size_m1 = hidden alias i8, inttoptr (i64 33 to i8*)
-; CHECK-X86: @__typeid_typeid2_inline_bits = hidden alias i8, inttoptr (i64 8589934593 to i8*)
+; CHECK: @__typeid_typeid2_global_addr = hidden alias i8, getelementptr (i8, ptr [[G]], i64 4)
+; CHECK-X86: @__typeid_typeid2_align = hidden alias i8, inttoptr (i8 2 to ptr)
+; CHECK-X86: @__typeid_typeid2_size_m1 = hidden alias i8, inttoptr (i64 33 to ptr)
+; CHECK-X86: @__typeid_typeid2_inline_bits = hidden alias i8, inttoptr (i64 8589934593 to ptr)
 
-; CHECK: @foo = alias [2048 x i8], getelementptr inbounds ({ [2048 x i8] }, { [2048 x i8] }* [[G]], i32 0, i32 0)
+; CHECK: @foo = alias [2048 x i8], ptr [[G]]
 
 ; SUMMARY:      TypeIdMap:
 ; SUMMARY-NEXT:   typeid1:

diff  --git a/llvm/test/Transforms/LowerTypeTests/export-single.ll b/llvm/test/Transforms/LowerTypeTests/export-single.ll
index 92e810c097764..d9876edfae2d7 100644
--- a/llvm/test/Transforms/LowerTypeTests/export-single.ll
+++ b/llvm/test/Transforms/LowerTypeTests/export-single.ll
@@ -7,8 +7,8 @@
 
 ; CHECK: [[G:@[0-9]+]] = private constant { i32 } { i32 42 }
 
-; CHECK: @__typeid_typeid1_global_addr = hidden alias i8, bitcast ({ i32 }* [[G]] to i8*)
-; CHECK: @foo = alias i32, getelementptr inbounds ({ i32 }, { i32 }* [[G]], i32 0, i32 0)
+; CHECK: @__typeid_typeid1_global_addr = hidden alias i8, ptr [[G]]
+; CHECK: @foo = alias i32, ptr [[G]]
 
 ; SUMMARY:      TypeIdMap:
 ; SUMMARY-NEXT:   typeid1:

diff  --git a/llvm/test/Transforms/LowerTypeTests/pr37625.ll b/llvm/test/Transforms/LowerTypeTests/pr37625.ll
index 04952ed2b6d40..a88b2ac139e02 100644
--- a/llvm/test/Transforms/LowerTypeTests/pr37625.ll
+++ b/llvm/test/Transforms/LowerTypeTests/pr37625.ll
@@ -11,4 +11,4 @@ declare !type !2 extern_weak void @external_addrtaken(i8)
 !1 = !{!"external_addrtaken", i8 0, !2}
 !2 = !{i64 0, !"typeid1"}
 
-; CHECK-DAG: @external_addrtaken = alias void (i8), bitcast
+; CHECK-DAG: @external_addrtaken = alias void (i8), ptr @.cfi.jumptable

diff  --git a/llvm/test/Transforms/NewGVN/phi-of-ops-simplified-to-existing-value-then-changes-again.ll b/llvm/test/Transforms/NewGVN/phi-of-ops-simplified-to-existing-value-then-changes-again.ll
index da95b73b80693..ac07148c75521 100644
--- a/llvm/test/Transforms/NewGVN/phi-of-ops-simplified-to-existing-value-then-changes-again.ll
+++ b/llvm/test/Transforms/NewGVN/phi-of-ops-simplified-to-existing-value-then-changes-again.ll
@@ -83,7 +83,7 @@ define void @pr42422(i1 %c.1, i1 %c.2) {
 ; CHECK:       bb14:
 ; CHECK-NEXT:    br label [[BB16]]
 ; CHECK:       bb15:
-; CHECK-NEXT:    store i8 poison, i8* null, align 1
+; CHECK-NEXT:    store i8 poison, ptr null, align 1
 ; CHECK-NEXT:    br label [[BB16]]
 ; CHECK:       bb16:
 ; CHECK-NEXT:    [[TMP17:%.*]] = phi i32 [ poison, [[BB15]] ], [ 1, [[BB14]] ], [ 9, [[BB7]] ]

diff  --git a/llvm/test/Transforms/NewGVN/pr42422-phi-of-ops.ll b/llvm/test/Transforms/NewGVN/pr42422-phi-of-ops.ll
index 6af941a3a2264..cbdf209d1ce50 100644
--- a/llvm/test/Transforms/NewGVN/pr42422-phi-of-ops.ll
+++ b/llvm/test/Transforms/NewGVN/pr42422-phi-of-ops.ll
@@ -35,7 +35,7 @@ define void @d() {
 ; CHECK:       if.then11:
 ; CHECK-NEXT:    br label [[CLEANUP]]
 ; CHECK:       if.end12:
-; CHECK-NEXT:    store i8 poison, i8* null, align 1
+; CHECK-NEXT:    store i8 poison, ptr null, align 1
 ; CHECK-NEXT:    br label [[CLEANUP]]
 ; CHECK:       cleanup:
 ; CHECK-NEXT:    [[CLEANUP_DEST:%.*]] = phi i32 [ poison, [[IF_END12]] ], [ 1, [[IF_THEN11]] ], [ 9, [[IF_THEN]] ]

diff  --git a/llvm/test/Transforms/PGOProfile/branch1.ll b/llvm/test/Transforms/PGOProfile/branch1.ll
index 1edb3c55d976d..7ec23d08ea5eb 100644
--- a/llvm/test/Transforms/PGOProfile/branch1.ll
+++ b/llvm/test/Transforms/PGOProfile/branch1.ll
@@ -23,7 +23,7 @@ define i32 @test_br_1(i32 %i) {
 ; USE-SAME: !prof ![[FUNC_ENTRY_COUNT:[0-9]+]]
 entry:
 ; GEN: entry:
-; GEN: call void @llvm.instrprof.increment(i8* getelementptr inbounds ([9 x i8], [9 x i8]* @__profn_test_br_1, i32 0, i32 0), i64 {{[0-9]+}}, i32 2, i32 0)
+; GEN: call void @llvm.instrprof.increment(ptr @__profn_test_br_1, i64 {{[0-9]+}}, i32 2, i32 0)
   %cmp = icmp sgt i32 %i, 0
   br i1 %cmp, label %if.then, label %if.end
 ; USE: br i1 %cmp, label %if.then, label %if.end
@@ -35,7 +35,7 @@ entry:
 
 if.then:
 ; GEN: if.then:
-; GEN: call void @llvm.instrprof.increment(i8* getelementptr inbounds ([9 x i8], [9 x i8]* @__profn_test_br_1, i32 0, i32 0), i64 {{[0-9]+}}, i32 2, i32 1)
+; GEN: call void @llvm.instrprof.increment(ptr @__profn_test_br_1, i64 {{[0-9]+}}, i32 2, i32 1)
   %add = add nsw i32 %i, 2
   br label %if.end
 

diff  --git a/llvm/test/Transforms/PGOProfile/branch2.ll b/llvm/test/Transforms/PGOProfile/branch2.ll
index 06d6623ab12e3..b65020d1557bf 100644
--- a/llvm/test/Transforms/PGOProfile/branch2.ll
+++ b/llvm/test/Transforms/PGOProfile/branch2.ll
@@ -16,7 +16,7 @@ define i32 @test_br_2(i32 %i) {
 entry:
 ; GEN: entry:
 ; NOTENTRY-NOT: llvm.instrprof.increment
-; ENTRY: call void @llvm.instrprof.increment(i8* getelementptr inbounds ([9 x i8], [9 x i8]* @__profn_test_br_2, i32 0, i32 0), i64 {{[0-9]+}}, i32 2, i32 0)
+; ENTRY: call void @llvm.instrprof.increment(ptr @__profn_test_br_2, i64 {{[0-9]+}}, i32 2, i32 0)
   %cmp = icmp sgt i32 %i, 0
   br i1 %cmp, label %if.then, label %if.else
 ; USE: br i1 %cmp, label %if.then, label %if.else
@@ -25,14 +25,14 @@ entry:
 
 if.then:
 ; GEN: if.then:
-; NOTENTRY: call void @llvm.instrprof.increment(i8* getelementptr inbounds ([9 x i8], [9 x i8]* @__profn_test_br_2, i32 0, i32 0), i64 {{[0-9]+}}, i32 2, i32 0)
+; NOTENTRY: call void @llvm.instrprof.increment(ptr @__profn_test_br_2, i64 {{[0-9]+}}, i32 2, i32 0)
 ; ENTRY-NOT: llvm.instrprof.increment
   %add = add nsw i32 %i, 2
   br label %if.end
 
 if.else:
 ; GEN: if.else:
-; GEN: call void @llvm.instrprof.increment(i8* getelementptr inbounds ([9 x i8], [9 x i8]* @__profn_test_br_2, i32 0, i32 0), i64 {{[0-9]+}}, i32 2, i32 1)
+; GEN: call void @llvm.instrprof.increment(ptr @__profn_test_br_2, i64 {{[0-9]+}}, i32 2, i32 1)
   %sub = sub nsw i32 %i, 2
   br label %if.end
 

diff  --git a/llvm/test/Transforms/PGOProfile/counter_promo.ll b/llvm/test/Transforms/PGOProfile/counter_promo.ll
index 0dcac8431ecd8..f4c4d2a8123a3 100644
--- a/llvm/test/Transforms/PGOProfile/counter_promo.ll
+++ b/llvm/test/Transforms/PGOProfile/counter_promo.ll
@@ -46,16 +46,16 @@ bb9:                                              ; preds = %bb8, %bb7, %bb4
 
 bb12:                                             ; preds = %bb9
   ret void
-; NONATOMIC_PROMO: %[[PROMO1:[a-z0-9.]+]] = load {{.*}} @__profc_foo{{.*}} 0)
+; NONATOMIC_PROMO: %[[PROMO1:[a-z0-9.]+]] = load {{.*}} @__profc_foo{{.*}}
 ; NONATOMIC_PROMO-NEXT: add {{.*}} %[[PROMO1]], %[[LIVEOUT1]] 
-; NONATOMIC_PROMO-NEXT: store {{.*}}@__profc_foo{{.*}}0)
+; NONATOMIC_PROMO-NEXT: store {{.*}}@__profc_foo{{.*}}
 ; NONATOMIC_PROMO-NEXT: %[[PROMO2:[a-z0-9.]+]] = load {{.*}} @__profc_foo{{.*}} 1)
 ; NONATOMIC_PROMO-NEXT: add {{.*}} %[[PROMO2]], %[[LIVEOUT2]]
 ; NONATOMIC_PROMO-NEXT: store {{.*}}@__profc_foo{{.*}}1)
 ; NONATOMIC_PROMO-NEXT: %[[PROMO3:[a-z0-9.]+]] = load {{.*}} @__profc_foo{{.*}} 2)
 ; NONATOMIC_PROMO-NEXT: add {{.*}} %[[PROMO3]], %[[LIVEOUT3]]
 ; NONATOMIC_PROMO-NEXT: store {{.*}}@__profc_foo{{.*}}2)
-; ATOMIC_PROMO: atomicrmw add {{.*}} @__profc_foo{{.*}}0), i64 %[[LIVEOUT1]] seq_cst
+; ATOMIC_PROMO: atomicrmw add {{.*}} @__profc_foo{{.*}}, i64 %[[LIVEOUT1]] seq_cst
 ; ATOMIC_PROMO-NEXT: atomicrmw add {{.*}} @__profc_foo{{.*}}1), i64 %[[LIVEOUT2]] seq_cst
 ; ATOMIC_PROMO-NEXT: atomicrmw add {{.*}} @__profc_foo{{.*}}2), i64 %[[LIVEOUT3]] seq_cst
 ; PROMO-NOT: @__profc_foo{{.*}})

diff  --git a/llvm/test/Transforms/PGOProfile/criticaledge.ll b/llvm/test/Transforms/PGOProfile/criticaledge.ll
index 55f691788fd45..4264b03c23d12 100644
--- a/llvm/test/Transforms/PGOProfile/criticaledge.ll
+++ b/llvm/test/Transforms/PGOProfile/criticaledge.ll
@@ -17,7 +17,7 @@ define i32 @test_criticalEdge(i32 %i, i32 %j) {
 entry:
 ; CHECK: entry:
 ; NOTENTRY-NOT: call void @llvm.instrprof.increment
-; ENTRY:   call void @llvm.instrprof.increment(i8* getelementptr inbounds ([17 x i8], [17 x i8]* @__profn_test_criticalEdge, i32 0, i32 0), i64 {{[0-9]+}}, i32 8, i32 0)
+; ENTRY:   call void @llvm.instrprof.increment(ptr @__profn_test_criticalEdge, i64 {{[0-9]+}}, i32 8, i32 0)
   switch i32 %i, label %sw.default [
     i32 1, label %sw.bb
     i32 2, label %sw.bb1
@@ -31,24 +31,24 @@ entry:
 ; USE-SAME: !prof ![[BW_SWITCH:[0-9]+]]
 
 ; CHECK: entry.sw.bb2_crit_edge1:
-; NOTENTRY:   call void @llvm.instrprof.increment(i8* getelementptr inbounds ([17 x i8], [17 x i8]* @__profn_test_criticalEdge, i32 0, i32 0), i64 {{[0-9]+}}, i32 8, i32 1)
-; ENTRY: call void @llvm.instrprof.increment(i8* getelementptr inbounds ([17 x i8], [17 x i8]* @__profn_test_criticalEdge, i32 0, i32 0), i64 {{[0-9]+}}, i32 8, i32 2)
+; NOTENTRY:   call void @llvm.instrprof.increment(ptr @__profn_test_criticalEdge, i64 {{[0-9]+}}, i32 8, i32 1)
+; ENTRY: call void @llvm.instrprof.increment(ptr @__profn_test_criticalEdge, i64 {{[0-9]+}}, i32 8, i32 2)
 ; CHECK:   br label %sw.bb2
 
 ; CHECK: entry.sw.bb2_crit_edge:
-; NOTENTRY:   call void @llvm.instrprof.increment(i8* getelementptr inbounds ([17 x i8], [17 x i8]* @__profn_test_criticalEdge, i32 0, i32 0), i64 {{[0-9]+}}, i32 8, i32 0)
-; TENTRY:   call void @llvm.instrprof.increment(i8* getelementptr inbounds ([17 x i8], [17 x i8]* @__profn_test_criticalEdge, i32 0, i32 0), i64 {{[0-9]+}}, i32 8, i32 1)
+; NOTENTRY:   call void @llvm.instrprof.increment(ptr @__profn_test_criticalEdge, i64 {{[0-9]+}}, i32 8, i32 0)
+; TENTRY:   call void @llvm.instrprof.increment(ptr @__profn_test_criticalEdge, i64 {{[0-9]+}}, i32 8, i32 1)
 ; CHECK:   br label %sw.bb2
 
 sw.bb:
 ; GEN: sw.bb:
-; GEN: call void @llvm.instrprof.increment(i8* getelementptr inbounds ([17 x i8], [17 x i8]* @__profn_test_criticalEdge, i32 0, i32 0), i64 {{[0-9]+}}, i32 8, i32 5)
+; GEN: call void @llvm.instrprof.increment(ptr @__profn_test_criticalEdge, i64 {{[0-9]+}}, i32 8, i32 5)
   %call = call i32 @bar(i32 2)
   br label %sw.epilog
 
 sw.bb1:
 ; GEN: sw.bb1:
-; GEN: call void @llvm.instrprof.increment(i8* getelementptr inbounds ([17 x i8], [17 x i8]* @__profn_test_criticalEdge, i32 0, i32 0), i64 {{[0-9]+}}, i32 8, i32 4)
+; GEN: call void @llvm.instrprof.increment(ptr @__profn_test_criticalEdge, i64 {{[0-9]+}}, i32 8, i32 4)
   %call2 = call i32 @bar(i32 1024)
   br label %sw.epilog
 
@@ -62,14 +62,14 @@ sw.bb2:
 
 if.then:
 ; GEN: if.then:
-; NOTENTRY: call void @llvm.instrprof.increment(i8* getelementptr inbounds ([17 x i8], [17 x i8]* @__profn_test_criticalEdge, i32 0, i32 0), i64 {{[0-9]+}}, i32 8, i32 2)
+; NOTENTRY: call void @llvm.instrprof.increment(ptr @__profn_test_criticalEdge, i64 {{[0-9]+}}, i32 8, i32 2)
 ; ENTRY-NOT: call void @llvm.instrprof.increment
   %call4 = call i32 @bar(i32 4)
   br label %return
 
 if.end:
 ; GEN: if.end:
-; GEN: call void @llvm.instrprof.increment(i8* getelementptr inbounds ([17 x i8], [17 x i8]* @__profn_test_criticalEdge, i32 0, i32 0), i64 {{[0-9]+}}, i32 8, i32 3)
+; GEN: call void @llvm.instrprof.increment(ptr @__profn_test_criticalEdge, i64 {{[0-9]+}}, i32 8, i32 3)
   %call5 = call i32 @bar(i32 8)
   br label %sw.epilog
 
@@ -84,13 +84,13 @@ sw.default:
 
 if.then8:
 ; GEN: if.then8:
-; GEN: call void @llvm.instrprof.increment(i8* getelementptr inbounds ([17 x i8], [17 x i8]* @__profn_test_criticalEdge, i32 0, i32 0), i64 {{[0-9]+}}, i32 8, i32 7)
+; GEN: call void @llvm.instrprof.increment(ptr @__profn_test_criticalEdge, i64 {{[0-9]+}}, i32 8, i32 7)
   %add = add nsw i32 %call6, 10
   br label %if.end9
 
 if.end9:
 ; GEN: if.end9:
-; GEN: call void @llvm.instrprof.increment(i8* getelementptr inbounds ([17 x i8], [17 x i8]* @__profn_test_criticalEdge, i32 0, i32 0), i64 {{[0-9]+}}, i32 8, i32 6)
+; GEN: call void @llvm.instrprof.increment(ptr @__profn_test_criticalEdge, i64 {{[0-9]+}}, i32 8, i32 6)
   %res.0 = phi i32 [ %add, %if.then8 ], [ %call6, %sw.default ]
   br label %sw.epilog
 
@@ -109,7 +109,7 @@ return:
 
 define internal i32 @bar(i32 %i) {
 entry:
-; GEN: call void @llvm.instrprof.increment(i8* getelementptr inbounds ([11 x i8], [11 x i8]* @__profn__stdin__bar, i32 0, i32 0), i64 {{[0-9]+}}, i32 1, i32 0)
+; GEN: call void @llvm.instrprof.increment(ptr @__profn__stdin__bar, i64 {{[0-9]+}}, i32 1, i32 0)
   ret i32 %i
 }
 

diff  --git a/llvm/test/Transforms/PGOProfile/instr_entry_bb.ll b/llvm/test/Transforms/PGOProfile/instr_entry_bb.ll
index 766faa5fb682a..e1380a0edf0e1 100644
--- a/llvm/test/Transforms/PGOProfile/instr_entry_bb.ll
+++ b/llvm/test/Transforms/PGOProfile/instr_entry_bb.ll
@@ -13,9 +13,9 @@ target triple = "x86_64-unknown-linux-gnu"
 define i32 @test_br_2(i32 %i) {
 entry:
 ; GEN: entry:
-; GEN: call void @llvm.instrprof.increment(i8* getelementptr inbounds ([9 x i8], [9 x i8]* @__profn_test_br_2, i32 0, i32 0), i64 {{[0-9]+}}, i32 2, i32 0)
+; GEN: call void @llvm.instrprof.increment(ptr @__profn_test_br_2, i64 {{[0-9]+}}, i32 2, i32 0)
 ; GENA: entry:
-; GENA: %{{[0-9+]}} = atomicrmw add i64* getelementptr inbounds ([2 x i64], [2 x i64]* @__profc_test_br_2, i32 0, i32 0), i64 1 monotonic
+; GENA: %{{[0-9+]}} = atomicrmw add ptr @__profc_test_br_2, i64 1 monotonic
 ; USE: br i1 %cmp, label %if.then, label %if.else
 ; USE-SAME: !prof ![[BW_ENTRY:[0-9]+]]
 ; USE: ![[BW_ENTRY]] = !{!"branch_weights", i32 0, i32 1}
@@ -30,11 +30,11 @@ if.then:
 
 if.else:
 ; GEN: if.else:
-; GEN: call void @llvm.instrprof.increment(i8* getelementptr inbounds ([9 x i8], [9 x i8]* @__profn_test_br_2, i32 0, i32 0), i64 {{[0-9]+}}, i32 2, i32 1)
+; GEN: call void @llvm.instrprof.increment(ptr @__profn_test_br_2, i64 {{[0-9]+}}, i32 2, i32 1)
 ; GENA: if.else:
-; GENA:  %pgocount = load i64, i64* getelementptr inbounds ([2 x i64], [2 x i64]* @__profc_test_br_2, i32 0, i32 1), align 8
+; GENA:  %pgocount = load i64, ptr getelementptr inbounds ([2 x i64], ptr @__profc_test_br_2, i32 0, i32 1), align 8
 ; GENA:  [[V:%[0-9]*]] = add i64 %pgocount, 1
-; GENA:  store i64 [[V]], i64* getelementptr inbounds ([2 x i64], [2 x i64]* @__profc_test_br_2, i32 0, i32 1), align 8
+; GENA:  store i64 [[V]], ptr getelementptr inbounds ([2 x i64], ptr @__profc_test_br_2, i32 0, i32 1), align 8
   %sub = sub nsw i32 %i, 2
   br label %if.end
 

diff  --git a/llvm/test/Transforms/PGOProfile/loop1.ll b/llvm/test/Transforms/PGOProfile/loop1.ll
index bb4f42843ab32..86029663bf8e1 100644
--- a/llvm/test/Transforms/PGOProfile/loop1.ll
+++ b/llvm/test/Transforms/PGOProfile/loop1.ll
@@ -15,8 +15,8 @@ target triple = "x86_64-unknown-linux-gnu"
 define i32 @test_simple_for(i32 %n) {
 entry:
 ; GEN: entry:
-; NOTENTRY: call void @llvm.instrprof.increment(i8* getelementptr inbounds ([15 x i8], [15 x i8]* @__profn_test_simple_for, i32 0, i32 0), i64 {{[0-9]+}}, i32 2, i32 1)
-; ENTRY: call void @llvm.instrprof.increment(i8* getelementptr inbounds ([15 x i8], [15 x i8]* @__profn_test_simple_for, i32 0, i32 0), i64 {{[0-9]+}}, i32 2, i32 0)
+; NOTENTRY: call void @llvm.instrprof.increment(ptr @__profn_test_simple_for, i64 {{[0-9]+}}, i32 2, i32 1)
+; ENTRY: call void @llvm.instrprof.increment(ptr @__profn_test_simple_for, i64 {{[0-9]+}}, i32 2, i32 0)
   br label %for.cond
 
 for.cond:
@@ -38,8 +38,8 @@ for.body:
 
 for.inc:
 ; GEN: for.inc:
-; NOTENTRY: call void @llvm.instrprof.increment(i8* getelementptr inbounds ([15 x i8], [15 x i8]* @__profn_test_simple_for, i32 0, i32 0), i64 {{[0-9]+}}, i32 2, i32 0)
-; ENTRY: call void @llvm.instrprof.increment(i8* getelementptr inbounds ([15 x i8], [15 x i8]* @__profn_test_simple_for, i32 0, i32 0), i64 {{[0-9]+}}, i32 2, i32 1)
+; NOTENTRY: call void @llvm.instrprof.increment(ptr @__profn_test_simple_for, i64 {{[0-9]+}}, i32 2, i32 0)
+; ENTRY: call void @llvm.instrprof.increment(ptr @__profn_test_simple_for, i64 {{[0-9]+}}, i32 2, i32 1)
   %inc1 = add nsw i32 %i, 1
   br label %for.cond
 

diff  --git a/llvm/test/Transforms/PGOProfile/loop2.ll b/llvm/test/Transforms/PGOProfile/loop2.ll
index 9e043afdfeff6..13725ec097546 100644
--- a/llvm/test/Transforms/PGOProfile/loop2.ll
+++ b/llvm/test/Transforms/PGOProfile/loop2.ll
@@ -14,8 +14,8 @@ target triple = "x86_64-unknown-linux-gnu"
 define i32 @test_nested_for(i32 %r, i32 %s) {
 entry:
 ; GEN: entry:
-; NOTENTRY: call void @llvm.instrprof.increment(i8* getelementptr inbounds ([15 x i8], [15 x i8]* @__profn_test_nested_for, i32 0, i32 0), i64 {{[0-9]+}}, i32 3, i32 2)
-; ENTRY: call void @llvm.instrprof.increment(i8* getelementptr inbounds ([15 x i8], [15 x i8]* @__profn_test_nested_for, i32 0, i32 0), i64 {{[0-9]+}}, i32 3, i32 0)
+; NOTENTRY: call void @llvm.instrprof.increment(ptr @__profn_test_nested_for, i64 {{[0-9]+}}, i32 3, i32 2)
+; ENTRY: call void @llvm.instrprof.increment(ptr @__profn_test_nested_for, i64 {{[0-9]+}}, i32 3, i32 0)
   br label %for.cond.outer
 
 for.cond.outer:
@@ -51,8 +51,8 @@ for.body.inner:
 
 for.inc.inner:
 ; GEN: for.inc.inner:
-; NOTENTRY: call void @llvm.instrprof.increment(i8* getelementptr inbounds ([15 x i8], [15 x i8]* @__profn_test_nested_for, i32 0, i32 0), i64 {{[0-9]+}}, i32 3, i32 0)
-; ENTRY: call void @llvm.instrprof.increment(i8* getelementptr inbounds ([15 x i8], [15 x i8]* @__profn_test_nested_for, i32 0, i32 0), i64 {{[0-9]+}}, i32 3, i32 1)
+; NOTENTRY: call void @llvm.instrprof.increment(ptr @__profn_test_nested_for, i64 {{[0-9]+}}, i32 3, i32 0)
+; ENTRY: call void @llvm.instrprof.increment(ptr @__profn_test_nested_for, i64 {{[0-9]+}}, i32 3, i32 1)
   %inc.1 = add nsw i32 %j.0, 1
   br label %for.cond.inner
 
@@ -62,8 +62,8 @@ for.end.inner:
 
 for.inc.outer:
 ; GEN: for.inc.outer:
-; NOTENTRY: call void @llvm.instrprof.increment(i8* getelementptr inbounds ([15 x i8], [15 x i8]* @__profn_test_nested_for, i32 0, i32 0), i64 {{[0-9]+}}, i32 3, i32 1)
-; ENTRY: call void @llvm.instrprof.increment(i8* getelementptr inbounds ([15 x i8], [15 x i8]* @__profn_test_nested_for, i32 0, i32 0), i64 {{[0-9]+}}, i32 3, i32 2)
+; NOTENTRY: call void @llvm.instrprof.increment(ptr @__profn_test_nested_for, i64 {{[0-9]+}}, i32 3, i32 1)
+; ENTRY: call void @llvm.instrprof.increment(ptr @__profn_test_nested_for, i64 {{[0-9]+}}, i32 3, i32 2)
   %inc.2 = add nsw i32 %i.0, 1
   br label %for.cond.outer
 

diff  --git a/llvm/test/Transforms/PGOProfile/preinline.ll b/llvm/test/Transforms/PGOProfile/preinline.ll
index 9a42e97d196e1..9afc89d2e4430 100644
--- a/llvm/test/Transforms/PGOProfile/preinline.ll
+++ b/llvm/test/Transforms/PGOProfile/preinline.ll
@@ -5,8 +5,8 @@ target triple = "x86_64-unknown-linux-gnu"
 
 define i32 @foo(i32 %i) {
 entry:
-; GEN: %pgocount = load i64, i64* getelementptr inbounds ([1 x i64], [1 x i64]* @__profc_foo
-; GEN-NOT: %pgocount.i = load i64, i64* getelementptr inbounds ([1 x i64], [1 x i64]* @__profc__stdin__bar
+; GEN: %pgocount = load i64, ptr @__profc_foo
+; GEN-NOT: %pgocount.i = load i64, ptr @__profc__stdin__bar
   %call = call i32 @bar()
   %add = add nsw i32 %i, %call
   ret i32 %add

diff  --git a/llvm/test/Transforms/PGOProfile/single_bb.ll b/llvm/test/Transforms/PGOProfile/single_bb.ll
index ed4267e5b0b08..f60cbfe565e19 100644
--- a/llvm/test/Transforms/PGOProfile/single_bb.ll
+++ b/llvm/test/Transforms/PGOProfile/single_bb.ll
@@ -9,6 +9,6 @@ target triple = "x86_64-unknown-linux-gnu"
 define i32 @single_bb() {
 entry:
 ; GEN: entry:
-; GEN: call void @llvm.instrprof.increment(i8* getelementptr inbounds ([9 x i8], [9 x i8]* @__profn_single_bb, i32 0, i32 0), i64 {{[0-9]+}}, i32 1, i32 0)
+; GEN: call void @llvm.instrprof.increment(ptr @__profn_single_bb, i64 {{[0-9]+}}, i32 1, i32 0)
   ret i32 0
 }

diff  --git a/llvm/test/Transforms/PGOProfile/switch.ll b/llvm/test/Transforms/PGOProfile/switch.ll
index cfad177af39bf..dac4347a729d0 100644
--- a/llvm/test/Transforms/PGOProfile/switch.ll
+++ b/llvm/test/Transforms/PGOProfile/switch.ll
@@ -15,7 +15,7 @@ define void @test_switch(i32 %i) {
 entry:
 ; GEN: entry:
 ; NOTENTRY-NOT: call void @llvm.instrprof.increment
-; ENTRY: call void @llvm.instrprof.increment(i8* getelementptr inbounds ([11 x i8], [11 x i8]* @__profn_test_switch, i32 0, i32 0), i64 {{[0-9]+}}, i32 4, i32 0)
+; ENTRY: call void @llvm.instrprof.increment(ptr @__profn_test_switch, i64 {{[0-9]+}}, i32 4, i32 0)
   switch i32 %i, label %sw.default [
     i32 1, label %sw.bb
     i32 2, label %sw.bb1
@@ -27,23 +27,23 @@ entry:
 
 sw.bb:
 ; GEN: sw.bb:
-; GEN: call void @llvm.instrprof.increment(i8* getelementptr inbounds ([11 x i8], [11 x i8]* @__profn_test_switch, i32 0, i32 0), i64 {{[0-9]+}}, i32 4, i32 2)
+; GEN: call void @llvm.instrprof.increment(ptr @__profn_test_switch, i64 {{[0-9]+}}, i32 4, i32 2)
   br label %sw.epilog
 
 sw.bb1:
 ; GEN: sw.bb1:
-; NOTENTRY: call void @llvm.instrprof.increment(i8* getelementptr inbounds ([11 x i8], [11 x i8]* @__profn_test_switch, i32 0, i32 0), i64 {{[0-9]+}}, i32 4, i32 0)
+; NOTENTRY: call void @llvm.instrprof.increment(ptr @__profn_test_switch, i64 {{[0-9]+}}, i32 4, i32 0)
 ; ENTRY-NOT: call void @llvm.instrprof.increment
   br label %sw.epilog
 
 sw.bb2:
 ; GEN: sw.bb2:
-; GEN: call void @llvm.instrprof.increment(i8* getelementptr inbounds ([11 x i8], [11 x i8]* @__profn_test_switch, i32 0, i32 0), i64 {{[0-9]+}}, i32 4, i32 1)
+; GEN: call void @llvm.instrprof.increment(ptr @__profn_test_switch, i64 {{[0-9]+}}, i32 4, i32 1)
   br label %sw.epilog
 
 sw.default:
 ; GEN: sw.default:
-; GEN: call void @llvm.instrprof.increment(i8* getelementptr inbounds ([11 x i8], [11 x i8]* @__profn_test_switch, i32 0, i32 0), i64 {{[0-9]+}}, i32 4, i32 3)
+; GEN: call void @llvm.instrprof.increment(ptr @__profn_test_switch, i64 {{[0-9]+}}, i32 4, i32 3)
   br label %sw.epilog
 
 sw.epilog:

diff  --git a/llvm/test/Transforms/PGOProfile/thinlto_indirect_call_promotion.ll b/llvm/test/Transforms/PGOProfile/thinlto_indirect_call_promotion.ll
index f4340cf4d825e..4cdf732d7d3eb 100644
--- a/llvm/test/Transforms/PGOProfile/thinlto_indirect_call_promotion.ll
+++ b/llvm/test/Transforms/PGOProfile/thinlto_indirect_call_promotion.ll
@@ -15,15 +15,15 @@
 target datalayout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128"
 target triple = "x86_64-unknown-linux-gnu"
 
- at foo = external local_unnamed_addr global void ()*, align 8
- at bar = external local_unnamed_addr global void ()*, align 8
+ at foo = external local_unnamed_addr global ptr, align 8
+ at bar = external local_unnamed_addr global ptr, align 8
 
 define i32 @main() local_unnamed_addr {
 entry:
-  %0 = load void ()*, void ()** @foo, align 8
+  %0 = load ptr, ptr @foo, align 8
 ; ICALL-PROM:   br i1 %{{[0-9]+}}, label %if.true.direct_targ, label %if.false.orig_indirect, !prof [[BRANCH_WEIGHT:![0-9]+]]
   tail call void %0(), !prof !1
-  %1 = load void ()*, void ()** @bar, align 8
+  %1 = load ptr, ptr @bar, align 8
 ; ICALL-PROM:   br i1 %{{[0-9]+}}, label %if.true.direct_targ1, label %if.false.orig_indirect2, !prof [[BRANCH_WEIGHT:![0-9]+]]
   tail call void %1(), !prof !2
   ret i32 0

diff  --git a/llvm/test/Transforms/PGOProfile/thinlto_samplepgo_icp2.ll b/llvm/test/Transforms/PGOProfile/thinlto_samplepgo_icp2.ll
index 1271b7780e495..5f611008b2171 100644
--- a/llvm/test/Transforms/PGOProfile/thinlto_samplepgo_icp2.ll
+++ b/llvm/test/Transforms/PGOProfile/thinlto_samplepgo_icp2.ll
@@ -27,12 +27,12 @@
 target datalayout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128"
 target triple = "x86_64-unknown-linux-gnu"
 
- at fptr = local_unnamed_addr global void ()* null, align 8
+ at fptr = local_unnamed_addr global ptr null, align 8
 
 ; Function Attrs: norecurse uwtable
 define i32 @main() local_unnamed_addr #0 !prof !34 {
 entry:
-  %0 = load void ()*, void ()** @fptr, align 8
+  %0 = load ptr, ptr @fptr, align 8
 ; ICALL-PROM:   br i1 %{{[0-9]+}}, label %if.true.direct_targ, label %if.false.orig_indirect
   tail call void %0(), !prof !40
   ret i32 0

diff  --git a/llvm/test/Transforms/PhaseOrdering/X86/simplifycfg-late.ll b/llvm/test/Transforms/PhaseOrdering/X86/simplifycfg-late.ll
index 98a588a09b6be..02ca48effbff3 100644
--- a/llvm/test/Transforms/PhaseOrdering/X86/simplifycfg-late.ll
+++ b/llvm/test/Transforms/PhaseOrdering/X86/simplifycfg-late.ll
@@ -14,8 +14,8 @@ define i32 @f(i32 %c) {
 ; CHECK-NEXT:    br i1 [[TMP0]], label [[SWITCH_LOOKUP:%.*]], label [[RETURN:%.*]]
 ; CHECK:       switch.lookup:
 ; CHECK-NEXT:    [[TMP1:%.*]] = sext i32 [[SWITCH_TABLEIDX]] to i64
-; CHECK-NEXT:    [[SWITCH_GEP:%.*]] = getelementptr inbounds [7 x i32], [7 x i32]* @switch.table.f, i64 0, i64 [[TMP1]]
-; CHECK-NEXT:    [[SWITCH_LOAD:%.*]] = load i32, i32* [[SWITCH_GEP]], align 4
+; CHECK-NEXT:    [[SWITCH_GEP:%.*]] = getelementptr inbounds [7 x i32], ptr @switch.table.f, i64 0, i64 [[TMP1]]
+; CHECK-NEXT:    [[SWITCH_LOAD:%.*]] = load i32, ptr [[SWITCH_GEP]], align 4
 ; CHECK-NEXT:    br label [[RETURN]]
 ; CHECK:       return:
 ; CHECK-NEXT:    [[R:%.*]] = phi i32 [ [[SWITCH_LOAD]], [[SWITCH_LOOKUP]] ], [ 15, [[ENTRY:%.*]] ]

diff  --git a/llvm/test/Transforms/RewriteStatepointsForGC/call-gc-result.ll b/llvm/test/Transforms/RewriteStatepointsForGC/call-gc-result.ll
index a10dbda4fb5f7..d898cf2a4d37c 100644
--- a/llvm/test/Transforms/RewriteStatepointsForGC/call-gc-result.ll
+++ b/llvm/test/Transforms/RewriteStatepointsForGC/call-gc-result.ll
@@ -21,7 +21,7 @@ define i32 @test1(i1 %cond, i32 %a) gc "statepoint-example" {
 ; CHECK-NEXT:    br label [[MERGE]]
 ; CHECK:       merge:
 ; CHECK-NEXT:    [[PHI:%.*]] = phi i32 [ [[A]], [[BRANCH2]] ], [ [[B]], [[BRANCH1]] ]
-; CHECK-NEXT:    [[STATEPOINT_TOKEN:%.*]] = call token (i64, i32, i32 ()*, i32, i32, ...) @llvm.experimental.gc.statepoint.p0f_i32f(i64 2882400000, i32 0, i32 ()* elementtype(i32 ()) @foo, i32 0, i32 0, i32 0, i32 0)
+; CHECK-NEXT:    [[STATEPOINT_TOKEN:%.*]] = call token (i64, i32, ptr, i32, i32, ...) @llvm.experimental.gc.statepoint.p0(i64 2882400000, i32 0, ptr elementtype(i32 ()) @foo, i32 0, i32 0, i32 0, i32 0)
 ; CHECK-NEXT:    [[RET1:%.*]] = call i32 @llvm.experimental.gc.result.i32(token [[STATEPOINT_TOKEN]])
 ; CHECK-NEXT:    ret i32 [[RET1]]
 ;

diff  --git a/llvm/test/Transforms/RewriteStatepointsForGC/deopt-intrinsic-cconv.ll b/llvm/test/Transforms/RewriteStatepointsForGC/deopt-intrinsic-cconv.ll
index 6be0b41b1897e..72d6dd49935bb 100644
--- a/llvm/test/Transforms/RewriteStatepointsForGC/deopt-intrinsic-cconv.ll
+++ b/llvm/test/Transforms/RewriteStatepointsForGC/deopt-intrinsic-cconv.ll
@@ -1,3 +1,4 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
 ; RUN: opt -rewrite-statepoints-for-gc -S < %s | FileCheck %s
 ; RUN: opt -passes=rewrite-statepoints-for-gc -S < %s | FileCheck %s
 
@@ -8,8 +9,10 @@ declare cc42 double @llvm.experimental.deoptimize.f64(...)
 
 define double @caller_3() gc "statepoint-example" {
 ; CHECK-LABEL: @caller_3(
-; CHECK: call cc42 token (i64, i32, void ()*, i32, i32, ...) @llvm.experimental.gc.statepoint
-; CHECK:  unreachable
+; CHECK-NEXT:  entry:
+; CHECK-NEXT:    [[SAFEPOINT_TOKEN:%.*]] = call cc42 token (i64, i32, ptr, i32, i32, ...) @llvm.experimental.gc.statepoint.p0(i64 2882400000, i32 0, ptr elementtype(void ()) @__llvm_deoptimize, i32 0, i32 0, i32 0, i32 0) [ "deopt"() ]
+; CHECK-NEXT:    unreachable
+;
 
 entry:
   %val = call cc42 double(...) @llvm.experimental.deoptimize.f64() [ "deopt"() ]

diff  --git a/llvm/test/Transforms/RewriteStatepointsForGC/deopt-lowering-attrs.ll b/llvm/test/Transforms/RewriteStatepointsForGC/deopt-lowering-attrs.ll
index 5fce7f7a8692d..532f4cc620c97 100644
--- a/llvm/test/Transforms/RewriteStatepointsForGC/deopt-lowering-attrs.ll
+++ b/llvm/test/Transforms/RewriteStatepointsForGC/deopt-lowering-attrs.ll
@@ -14,9 +14,9 @@ declare void @baz() "deopt-lowering"="live-through"
 define void @test1() gc "statepoint-example" {
 ; CHECK-LABEL: @test1(
 ; CHECK-NEXT:  entry:
-; CHECK-NEXT:    [[STATEPOINT_TOKEN:%.*]] = call token (i64, i32, void ()*, i32, i32, ...) @llvm.experimental.gc.statepoint.p0f_isVoidf(i64 2882400000, i32 0, void ()* elementtype(void ()) @foo, i32 0, i32 0, i32 0, i32 0) [ "deopt"(i32 57) ]
-; CHECK-NEXT:    [[STATEPOINT_TOKEN1:%.*]] = call token (i64, i32, void ()*, i32, i32, ...) @llvm.experimental.gc.statepoint.p0f_isVoidf(i64 2882400000, i32 0, void ()* elementtype(void ()) @bar, i32 0, i32 2, i32 0, i32 0) [ "deopt"(i32 42) ]
-; CHECK-NEXT:    [[STATEPOINT_TOKEN2:%.*]] = call token (i64, i32, void ()*, i32, i32, ...) @llvm.experimental.gc.statepoint.p0f_isVoidf(i64 2882400000, i32 0, void ()* elementtype(void ()) @baz, i32 0, i32 0, i32 0, i32 0) [ "deopt"(i32 13) ]
+; CHECK-NEXT:    [[STATEPOINT_TOKEN:%.*]] = call token (i64, i32, ptr, i32, i32, ...) @llvm.experimental.gc.statepoint.p0(i64 2882400000, i32 0, ptr elementtype(void ()) @foo, i32 0, i32 0, i32 0, i32 0) [ "deopt"(i32 57) ]
+; CHECK-NEXT:    [[STATEPOINT_TOKEN1:%.*]] = call token (i64, i32, ptr, i32, i32, ...) @llvm.experimental.gc.statepoint.p0(i64 2882400000, i32 0, ptr elementtype(void ()) @bar, i32 0, i32 2, i32 0, i32 0) [ "deopt"(i32 42) ]
+; CHECK-NEXT:    [[STATEPOINT_TOKEN2:%.*]] = call token (i64, i32, ptr, i32, i32, ...) @llvm.experimental.gc.statepoint.p0(i64 2882400000, i32 0, ptr elementtype(void ()) @baz, i32 0, i32 0, i32 0, i32 0) [ "deopt"(i32 13) ]
 ; CHECK-NEXT:    ret void
 ;
 
@@ -31,7 +31,7 @@ entry:
 define void @test2() gc "statepoint-example" {
 ; CHECK-LABEL: @test2(
 ; CHECK-NEXT:  entry:
-; CHECK-NEXT:    [[STATEPOINT_TOKEN:%.*]] = call token (i64, i32, void ()*, i32, i32, ...) @llvm.experimental.gc.statepoint.p0f_isVoidf(i64 2882400000, i32 0, void ()* elementtype(void ()) @foo, i32 0, i32 2, i32 0, i32 0) #[[ATTR0:[0-9]+]] [ "deopt"(i32 57) ]
+; CHECK-NEXT:    [[STATEPOINT_TOKEN:%.*]] = call token (i64, i32, ptr, i32, i32, ...) @llvm.experimental.gc.statepoint.p0(i64 2882400000, i32 0, ptr elementtype(void ()) @foo, i32 0, i32 2, i32 0, i32 0) #[[ATTR0:[0-9]+]] [ "deopt"(i32 57) ]
 ; CHECK-NEXT:    ret void
 ;
 

diff  --git a/llvm/test/Transforms/SimplifyCFG/X86/disable-lookup-table.ll b/llvm/test/Transforms/SimplifyCFG/X86/disable-lookup-table.ll
index a1ce78b1bdd17..f53267e5f97fe 100644
--- a/llvm/test/Transforms/SimplifyCFG/X86/disable-lookup-table.ll
+++ b/llvm/test/Transforms/SimplifyCFG/X86/disable-lookup-table.ll
@@ -50,8 +50,8 @@ define i32 @bar(i32 %c) {
 ; CHECK-NEXT:    [[TMP0:%.*]] = icmp ult i32 [[SWITCH_TABLEIDX]], 4
 ; CHECK-NEXT:    br i1 [[TMP0]], label [[SWITCH_LOOKUP:%.*]], label [[RETURN:%.*]]
 ; CHECK:       switch.lookup:
-; CHECK-NEXT:    [[SWITCH_GEP:%.*]] = getelementptr inbounds [4 x i32], [4 x i32]* @switch.table.bar, i32 0, i32 [[SWITCH_TABLEIDX]]
-; CHECK-NEXT:    [[SWITCH_LOAD:%.*]] = load i32, i32* [[SWITCH_GEP]], align 4
+; CHECK-NEXT:    [[SWITCH_GEP:%.*]] = getelementptr inbounds [4 x i32], ptr @switch.table.bar, i32 0, i32 [[SWITCH_TABLEIDX]]
+; CHECK-NEXT:    [[SWITCH_LOAD:%.*]] = load i32, ptr [[SWITCH_GEP]], align 4
 ; CHECK-NEXT:    br label [[RETURN]]
 ; CHECK:       return:
 ; CHECK-NEXT:    [[RETVAL_0:%.*]] = phi i32 [ [[SWITCH_LOAD]], [[SWITCH_LOOKUP]] ], [ 15, [[ENTRY:%.*]] ]

diff  --git a/llvm/test/Transforms/SimplifyCFG/X86/switch-covered-bug.ll b/llvm/test/Transforms/SimplifyCFG/X86/switch-covered-bug.ll
index 8adb5ab000f8a..f7e427c625982 100644
--- a/llvm/test/Transforms/SimplifyCFG/X86/switch-covered-bug.ll
+++ b/llvm/test/Transforms/SimplifyCFG/X86/switch-covered-bug.ll
@@ -11,8 +11,8 @@ define i64 @test(i3 %arg) {
 ; CHECK-NEXT:  entry:
 ; CHECK-NEXT:    [[SWITCH_TABLEIDX:%.*]] = sub i3 [[ARG:%.*]], -4
 ; CHECK-NEXT:    [[SWITCH_TABLEIDX_ZEXT:%.*]] = zext i3 [[SWITCH_TABLEIDX]] to i4
-; CHECK-NEXT:    [[SWITCH_GEP:%.*]] = getelementptr inbounds [8 x i64], [8 x i64]* @switch.table.test, i32 0, i4 [[SWITCH_TABLEIDX_ZEXT]]
-; CHECK-NEXT:    [[SWITCH_LOAD:%.*]] = load i64, i64* [[SWITCH_GEP]]
+; CHECK-NEXT:    [[SWITCH_GEP:%.*]] = getelementptr inbounds [8 x i64], ptr @switch.table.test, i32 0, i4 [[SWITCH_TABLEIDX_ZEXT]]
+; CHECK-NEXT:    [[SWITCH_LOAD:%.*]] = load i64, ptr [[SWITCH_GEP]], align 8
 ; CHECK-NEXT:    [[V3:%.*]] = add i64 [[SWITCH_LOAD]], 0
 ; CHECK-NEXT:    ret i64 [[V3]]
 ;

diff  --git a/llvm/test/Transforms/SimplifyCFG/X86/switch-table-bug.ll b/llvm/test/Transforms/SimplifyCFG/X86/switch-table-bug.ll
index c2f3b307e28c0..7f09743397d79 100644
--- a/llvm/test/Transforms/SimplifyCFG/X86/switch-table-bug.ll
+++ b/llvm/test/Transforms/SimplifyCFG/X86/switch-table-bug.ll
@@ -11,8 +11,8 @@ define i64 @_TFO6reduce1E5toRawfS0_FT_Si(i2) {
 ; CHECK-NEXT:  entry:
 ; CHECK-NEXT:    [[SWITCH_TABLEIDX:%.*]] = sub i2 [[TMP0:%.*]], -2
 ; CHECK-NEXT:    [[SWITCH_TABLEIDX_ZEXT:%.*]] = zext i2 [[SWITCH_TABLEIDX]] to i3
-; CHECK-NEXT:    [[SWITCH_GEP:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* @switch.table._TFO6reduce1E5toRawfS0_FT_Si, i32 0, i3 [[SWITCH_TABLEIDX_ZEXT]]
-; CHECK-NEXT:    [[SWITCH_LOAD:%.*]] = load i64, i64* [[SWITCH_GEP]]
+; CHECK-NEXT:    [[SWITCH_GEP:%.*]] = getelementptr inbounds [4 x i64], ptr @switch.table._TFO6reduce1E5toRawfS0_FT_Si, i32 0, i3 [[SWITCH_TABLEIDX_ZEXT]]
+; CHECK-NEXT:    [[SWITCH_LOAD:%.*]] = load i64, ptr [[SWITCH_GEP]], align 8
 ; CHECK-NEXT:    ret i64 [[SWITCH_LOAD]]
 ;
 entry:

diff  --git a/llvm/test/Transforms/SimplifyCFG/X86/switch-to-lookup-large-types.ll b/llvm/test/Transforms/SimplifyCFG/X86/switch-to-lookup-large-types.ll
index 31c020f8523d0..70725468a1114 100644
--- a/llvm/test/Transforms/SimplifyCFG/X86/switch-to-lookup-large-types.ll
+++ b/llvm/test/Transforms/SimplifyCFG/X86/switch-to-lookup-large-types.ll
@@ -15,8 +15,8 @@ define i8 @switch_to_lookup_i64(i128 %x){
 ; CHECK-NEXT:    [[COMMON_RET_OP:%.*]] = phi i8 [ [[SWITCH_LOAD:%.*]], [[SWITCH_LOOKUP]] ], [ 10, [[START:%.*]] ]
 ; CHECK-NEXT:    ret i8 [[COMMON_RET_OP]]
 ; CHECK:       switch.lookup:
-; CHECK-NEXT:    [[SWITCH_GEP:%.*]] = getelementptr inbounds [3 x i8], [3 x i8]* @switch.table.switch_to_lookup_i64, i32 0, i128 [[X]]
-; CHECK-NEXT:    [[SWITCH_LOAD]] = load i8, i8* [[SWITCH_GEP]], align 1
+; CHECK-NEXT:    [[SWITCH_GEP:%.*]] = getelementptr inbounds [3 x i8], ptr @switch.table.switch_to_lookup_i64, i32 0, i128 [[X]]
+; CHECK-NEXT:    [[SWITCH_LOAD]] = load i8, ptr [[SWITCH_GEP]], align 1
 ; CHECK-NEXT:    br label [[COMMON_RET]]
 ;
 start:
@@ -49,8 +49,8 @@ define i8 @switch_to_lookup_i128(i128 %x){
 ; CHECK-NEXT:    [[COMMON_RET_OP:%.*]] = phi i8 [ [[SWITCH_LOAD:%.*]], [[SWITCH_LOOKUP]] ], [ 10, [[START:%.*]] ]
 ; CHECK-NEXT:    ret i8 [[COMMON_RET_OP]]
 ; CHECK:       switch.lookup:
-; CHECK-NEXT:    [[SWITCH_GEP:%.*]] = getelementptr inbounds [3 x i8], [3 x i8]* @switch.table.switch_to_lookup_i128, i32 0, i128 [[X]]
-; CHECK-NEXT:    [[SWITCH_LOAD]] = load i8, i8* [[SWITCH_GEP]], align 1
+; CHECK-NEXT:    [[SWITCH_GEP:%.*]] = getelementptr inbounds [3 x i8], ptr @switch.table.switch_to_lookup_i128, i32 0, i128 [[X]]
+; CHECK-NEXT:    [[SWITCH_LOAD]] = load i8, ptr [[SWITCH_GEP]], align 1
 ; CHECK-NEXT:    br label [[COMMON_RET]]
 ;
 start:

diff  --git a/llvm/test/Transforms/SimplifyCFG/rangereduce.ll b/llvm/test/Transforms/SimplifyCFG/rangereduce.ll
index 4e948cdef0afe..943b746ed3a0e 100644
--- a/llvm/test/Transforms/SimplifyCFG/rangereduce.ll
+++ b/llvm/test/Transforms/SimplifyCFG/rangereduce.ll
@@ -13,8 +13,8 @@ define i32 @test1(i32 %a) {
 ; CHECK-NEXT:    [[TMP5:%.*]] = icmp ult i32 [[TMP4]], 4
 ; CHECK-NEXT:    br i1 [[TMP5]], label [[SWITCH_LOOKUP:%.*]], label [[COMMON_RET:%.*]]
 ; CHECK:       switch.lookup:
-; CHECK-NEXT:    [[SWITCH_GEP:%.*]] = getelementptr inbounds [4 x i32], [4 x i32]* @switch.table.test1, i32 0, i32 [[TMP4]]
-; CHECK-NEXT:    [[SWITCH_LOAD:%.*]] = load i32, i32* [[SWITCH_GEP]], align 4
+; CHECK-NEXT:    [[SWITCH_GEP:%.*]] = getelementptr inbounds [4 x i32], ptr @switch.table.test1, i32 0, i32 [[TMP4]]
+; CHECK-NEXT:    [[SWITCH_LOAD:%.*]] = load i32, ptr [[SWITCH_GEP]], align 4
 ; CHECK-NEXT:    br label [[COMMON_RET]]
 ; CHECK:       common.ret:
 ; CHECK-NEXT:    [[COMMON_RET_OP:%.*]] = phi i32 [ [[SWITCH_LOAD]], [[SWITCH_LOOKUP]] ], [ 8867, [[TMP0:%.*]] ]
@@ -82,8 +82,8 @@ define i32 @test3(i32 %a) {
 ; CHECK-NEXT:    [[TMP1:%.*]] = icmp ult i32 [[SWITCH_TABLEIDX]], 3
 ; CHECK-NEXT:    br i1 [[TMP1]], label [[SWITCH_LOOKUP:%.*]], label [[COMMON_RET:%.*]]
 ; CHECK:       switch.lookup:
-; CHECK-NEXT:    [[SWITCH_GEP:%.*]] = getelementptr inbounds [3 x i32], [3 x i32]* @switch.table.test3, i32 0, i32 [[SWITCH_TABLEIDX]]
-; CHECK-NEXT:    [[SWITCH_LOAD:%.*]] = load i32, i32* [[SWITCH_GEP]], align 4
+; CHECK-NEXT:    [[SWITCH_GEP:%.*]] = getelementptr inbounds [3 x i32], ptr @switch.table.test3, i32 0, i32 [[SWITCH_TABLEIDX]]
+; CHECK-NEXT:    [[SWITCH_LOAD:%.*]] = load i32, ptr [[SWITCH_GEP]], align 4
 ; CHECK-NEXT:    br label [[COMMON_RET]]
 ; CHECK:       common.ret:
 ; CHECK-NEXT:    [[COMMON_RET_OP:%.*]] = phi i32 [ [[SWITCH_LOAD]], [[SWITCH_LOOKUP]] ], [ 8867, [[TMP0:%.*]] ]
@@ -189,8 +189,8 @@ define i32 @test6(i32 %a) optsize {
 ; CHECK-NEXT:    [[TMP5:%.*]] = icmp ult i32 [[TMP4]], 4
 ; CHECK-NEXT:    br i1 [[TMP5]], label [[SWITCH_LOOKUP:%.*]], label [[COMMON_RET:%.*]]
 ; CHECK:       switch.lookup:
-; CHECK-NEXT:    [[SWITCH_GEP:%.*]] = getelementptr inbounds [4 x i32], [4 x i32]* @switch.table.test6, i32 0, i32 [[TMP4]]
-; CHECK-NEXT:    [[SWITCH_LOAD:%.*]] = load i32, i32* [[SWITCH_GEP]], align 4
+; CHECK-NEXT:    [[SWITCH_GEP:%.*]] = getelementptr inbounds [4 x i32], ptr @switch.table.test6, i32 0, i32 [[TMP4]]
+; CHECK-NEXT:    [[SWITCH_LOAD:%.*]] = load i32, ptr [[SWITCH_GEP]], align 4
 ; CHECK-NEXT:    br label [[COMMON_RET]]
 ; CHECK:       common.ret:
 ; CHECK-NEXT:    [[COMMON_RET_OP:%.*]] = phi i32 [ [[SWITCH_LOAD]], [[SWITCH_LOOKUP]] ], [ 8867, [[TMP0:%.*]] ]
@@ -256,8 +256,8 @@ define i32 @test8(i32 %a) optsize {
 ; CHECK-NEXT:    [[TMP5:%.*]] = icmp ult i32 [[TMP4]], 5
 ; CHECK-NEXT:    br i1 [[TMP5]], label [[SWITCH_LOOKUP:%.*]], label [[COMMON_RET:%.*]]
 ; CHECK:       switch.lookup:
-; CHECK-NEXT:    [[SWITCH_GEP:%.*]] = getelementptr inbounds [5 x i32], [5 x i32]* @switch.table.test8, i32 0, i32 [[TMP4]]
-; CHECK-NEXT:    [[SWITCH_LOAD:%.*]] = load i32, i32* [[SWITCH_GEP]], align 4
+; CHECK-NEXT:    [[SWITCH_GEP:%.*]] = getelementptr inbounds [5 x i32], ptr @switch.table.test8, i32 0, i32 [[TMP4]]
+; CHECK-NEXT:    [[SWITCH_LOAD:%.*]] = load i32, ptr [[SWITCH_GEP]], align 4
 ; CHECK-NEXT:    br label [[COMMON_RET]]
 ; CHECK:       common.ret:
 ; CHECK-NEXT:    [[COMMON_RET_OP:%.*]] = phi i32 [ [[SWITCH_LOAD]], [[SWITCH_LOOKUP]] ], [ 8867, [[TMP0:%.*]] ]
@@ -290,8 +290,8 @@ define i32 @test9(i32 %a) {
 ; CHECK-NEXT:    [[TMP5:%.*]] = icmp ult i32 [[TMP4]], 8
 ; CHECK-NEXT:    br i1 [[TMP5]], label [[SWITCH_LOOKUP:%.*]], label [[COMMON_RET:%.*]]
 ; CHECK:       switch.lookup:
-; CHECK-NEXT:    [[SWITCH_GEP:%.*]] = getelementptr inbounds [8 x i32], [8 x i32]* @switch.table.test9, i32 0, i32 [[TMP4]]
-; CHECK-NEXT:    [[SWITCH_LOAD:%.*]] = load i32, i32* [[SWITCH_GEP]], align 4
+; CHECK-NEXT:    [[SWITCH_GEP:%.*]] = getelementptr inbounds [8 x i32], ptr @switch.table.test9, i32 0, i32 [[TMP4]]
+; CHECK-NEXT:    [[SWITCH_LOAD:%.*]] = load i32, ptr [[SWITCH_GEP]], align 4
 ; CHECK-NEXT:    br label [[COMMON_RET]]
 ; CHECK:       common.ret:
 ; CHECK-NEXT:    [[COMMON_RET_OP:%.*]] = phi i32 [ [[SWITCH_LOAD]], [[SWITCH_LOOKUP]] ], [ 8867, [[TMP0:%.*]] ]

diff  --git a/llvm/test/Transforms/Util/add-TLI-mappings.ll b/llvm/test/Transforms/Util/add-TLI-mappings.ll
index e8c83c4d9bd1f..8e04c22bfdc3e 100644
--- a/llvm/test/Transforms/Util/add-TLI-mappings.ll
+++ b/llvm/test/Transforms/Util/add-TLI-mappings.ll
@@ -11,21 +11,21 @@ target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
 target triple = "x86_64-unknown-linux-gnu"
 
 ; COMMON-LABEL: @llvm.compiler.used = appending global
-; SVML-SAME:        [6 x i8*] [
-; SVML-SAME:          i8* bitcast (<2 x double> (<2 x double>)* @__svml_sin2 to i8*),
-; SVML-SAME:          i8* bitcast (<4 x double> (<4 x double>)* @__svml_sin4 to i8*),
-; SVML-SAME:          i8* bitcast (<8 x double> (<8 x double>)* @__svml_sin8 to i8*),
-; SVML-SAME:          i8* bitcast (<4 x float> (<4 x float>)* @__svml_log10f4 to i8*),
-; SVML-SAME:          i8* bitcast (<8 x float> (<8 x float>)* @__svml_log10f8 to i8*),
-; SVML-SAME:          i8* bitcast (<16 x float> (<16 x float>)* @__svml_log10f16 to i8*)
-; MASSV-SAME:       [2 x i8*] [
-; MASSV-SAME:         i8* bitcast (<2 x double> (<2 x double>)* @__sind2 to i8*),
-; MASSV-SAME:         i8* bitcast (<4 x float> (<4 x float>)* @__log10f4 to i8*)
-; ACCELERATE-SAME:  [1 x i8*] [
-; ACCELERATE-SAME:    i8* bitcast (<4 x float> (<4 x float>)* @vlog10f to i8*)
-; LIBMVEC-X86-SAME: [2 x i8*] [
-; LIBMVEC-X86-SAME:   i8* bitcast (<2 x double> (<2 x double>)* @_ZGVbN2v_sin to i8*),
-; LIBMVEC-X86-SAME:   i8* bitcast (<4 x double> (<4 x double>)* @_ZGVdN4v_sin to i8*)
+; SVML-SAME:        [6 x ptr] [
+; SVML-SAME:          ptr @__svml_sin2,
+; SVML-SAME:          ptr @__svml_sin4,
+; SVML-SAME:          ptr @__svml_sin8,
+; SVML-SAME:          ptr @__svml_log10f4,
+; SVML-SAME:          ptr @__svml_log10f8,
+; SVML-SAME:          ptr @__svml_log10f16
+; MASSV-SAME:       [2 x ptr] [
+; MASSV-SAME:         ptr @__sind2,
+; MASSV-SAME:         ptr @__log10f4
+; ACCELERATE-SAME:  [1 x ptr] [
+; ACCELERATE-SAME:    ptr @vlog10f
+; LIBMVEC-X86-SAME: [2 x ptr] [
+; LIBMVEC-X86-SAME:   ptr @_ZGVbN2v_sin,
+; LIBMVEC-X86-SAME:   ptr @_ZGVdN4v_sin
 ; COMMON-SAME:      ], section "llvm.metadata"
 
 define double @sin_f64(double %in) {

diff  --git a/llvm/test/Verifier/byval-1.ll b/llvm/test/Verifier/byval-1.ll
index 6d0c46a79e0a2..0d303fc9eb851 100644
--- a/llvm/test/Verifier/byval-1.ll
+++ b/llvm/test/Verifier/byval-1.ll
@@ -1,5 +1,5 @@
 ; RUN: not llvm-as < %s -o /dev/null 2>&1 | FileCheck %s
 
 ; CHECK: Attribute 'byval(i32)' applied to incompatible type!
-; CHECK-NEXT: void (i32)* @h
+; CHECK-NEXT: ptr @h
 declare void @h(i32 byval(i32) %num)

diff  --git a/llvm/test/Verifier/get-active-lane-mask.ll b/llvm/test/Verifier/get-active-lane-mask.ll
index c637916faccfc..141476e6f83d9 100644
--- a/llvm/test/Verifier/get-active-lane-mask.ll
+++ b/llvm/test/Verifier/get-active-lane-mask.ll
@@ -14,7 +14,7 @@ declare i32 @llvm.get.active.lane.mask.i32.i32(i32, i32)
 
 define i32 @t2(i32 %IV, i32 %TC) {
 ; CHECK:      Intrinsic has incorrect return type!
-; CHECK-NEXT: i32 (i32, i32)* @llvm.get.active.lane.mask.i32.i32
+; CHECK-NEXT: ptr @llvm.get.active.lane.mask.i32.i32
 
   %res = call i32 @llvm.get.active.lane.mask.i32.i32(i32 %IV, i32 %TC)
   ret i32 %res

diff  --git a/llvm/test/Verifier/jumptable.ll b/llvm/test/Verifier/jumptable.ll
index 81984eeb187fe..e7b74c310c824 100644
--- a/llvm/test/Verifier/jumptable.ll
+++ b/llvm/test/Verifier/jumptable.ll
@@ -5,4 +5,4 @@ define i32 @f() jumptable {
 }
 
 ; CHECK: Attribute 'jumptable' requires 'unnamed_addr'
-; CHECK: i32 ()* @f
+; CHECK: ptr @f

diff  --git a/llvm/test/Verifier/llvm.compiler_used-invalid-type.ll b/llvm/test/Verifier/llvm.compiler_used-invalid-type.ll
index ef533b5bf783c..c699a0e45871e 100644
--- a/llvm/test/Verifier/llvm.compiler_used-invalid-type.ll
+++ b/llvm/test/Verifier/llvm.compiler_used-invalid-type.ll
@@ -3,4 +3,4 @@
 @llvm.compiler.used = appending global [1 x i32] [i32 0], section "llvm.metadata"
 
 ; CHECK:       wrong type for intrinsic global variable
-; CHECK-NEXT: [1 x i32]* @llvm.compiler.used
+; CHECK-NEXT: ptr @llvm.compiler.used

diff  --git a/llvm/test/Verifier/llvm.used-invalid-type.ll b/llvm/test/Verifier/llvm.used-invalid-type.ll
index 2de5c86fce3f7..ca97082ed5835 100644
--- a/llvm/test/Verifier/llvm.used-invalid-type.ll
+++ b/llvm/test/Verifier/llvm.used-invalid-type.ll
@@ -3,4 +3,4 @@
 @llvm.used = appending global [1 x i32] [i32 0], section "llvm.metadata"
 
 ; CHECK:       wrong type for intrinsic global variable
-; CHECK-NEXT: [1 x i32]* @llvm.used
+; CHECK-NEXT: ptr @llvm.used

diff  --git a/llvm/test/Verifier/llvm.used-invalid-type2.ll b/llvm/test/Verifier/llvm.used-invalid-type2.ll
index 4bd0aa455645c..ce1a2fbd49bd5 100644
--- a/llvm/test/Verifier/llvm.used-invalid-type2.ll
+++ b/llvm/test/Verifier/llvm.used-invalid-type2.ll
@@ -2,4 +2,4 @@
 @llvm.used = appending global i32 0, section "llvm.metadata"
 
 ; CHECK: Only global arrays can have appending linkage!
-; CHECK-NEXT: i32* @llvm.used
+; CHECK-NEXT: ptr @llvm.used

diff  --git a/llvm/test/Verifier/metadata-function-dbg.ll b/llvm/test/Verifier/metadata-function-dbg.ll
index c33299551ecb6..23ac6f31c9e1a 100644
--- a/llvm/test/Verifier/metadata-function-dbg.ll
+++ b/llvm/test/Verifier/metadata-function-dbg.ll
@@ -23,7 +23,7 @@ define void @f4() !dbg !4 {
 
 ; CHECK-NOT:  !dbg
 ; CHECK:      function !dbg attachment must be a subprogram
-; CHECK-NEXT: void ()* @bar
+; CHECK-NEXT: ptr @bar
 ; CHECK-NEXT: !{{[0-9]+}} = !{}
 define void @bar() !dbg !3 {
   unreachable

diff  --git a/llvm/test/tools/llvm-extract/extract-blocks-with-groups.ll b/llvm/test/tools/llvm-extract/extract-blocks-with-groups.ll
index 90c92aa61fd8f..057e70008ff96 100644
--- a/llvm/test/tools/llvm-extract/extract-blocks-with-groups.ll
+++ b/llvm/test/tools/llvm-extract/extract-blocks-with-groups.ll
@@ -4,7 +4,7 @@
 
 ; The first extracted function is the region composed by the
 ; blocks if, then, and else from foo.
-; CHECK: define dso_local void @foo.if.split(i32 %arg1, i32 %arg, i32* %tmp.0.ce.out) {
+; CHECK: define dso_local void @foo.if.split(i32 %arg1, i32 %arg, ptr %tmp.0.ce.out) {
 ; CHECK: newFuncRoot:
 ; CHECK:   br label %if.split
 ;
@@ -27,7 +27,7 @@
 ;
 ; CHECK: end.split:                                        ; preds = %then, %else
 ; CHECK:   %tmp.0.ce = phi i32 [ %tmp13, %then ], [ %tmp25, %else ]
-; CHECK:   store i32 %tmp.0.ce, i32* %tmp.0.ce.out
+; CHECK:   store i32 %tmp.0.ce, ptr %tmp.0.ce.out
 ; CHECK:   br label %end.exitStub
 ;
 ; CHECK: end.exitStub:                                     ; preds = %end.split
@@ -36,7 +36,7 @@
 
 ; The second extracted function is the region composed by the blocks
 ; bb14 and bb20 from bar.
-; CHECK: define dso_local i1 @bar.bb14(i32 %arg1, i32 %arg, i32* %tmp25.out) {
+; CHECK: define dso_local i1 @bar.bb14(i32 %arg1, i32 %arg, ptr %tmp25.out) {
 ; CHECK: newFuncRoot:
 ; CHECK:   br label %bb14
 ;
@@ -49,7 +49,7 @@
 ; CHECK:   %tmp22 = mul nsw i32 %arg, 3
 ; CHECK:   %tmp24 = sdiv i32 %arg1, 6
 ; CHECK:   %tmp25 = add nsw i32 %tmp24, %tmp22
-; CHECK:   store i32 %tmp25, i32* %tmp25.out
+; CHECK:   store i32 %tmp25, ptr %tmp25.out
 ; CHECK:   br label %bb30.exitStub
 ;
 ; CHECK: bb26.exitStub:                                    ; preds = %bb14

diff  --git a/llvm/test/tools/llvm-link/archive-only-needed.ll b/llvm/test/tools/llvm-link/archive-only-needed.ll
index d997e6c71b9c3..a128d3beae6f6 100644
--- a/llvm/test/tools/llvm-link/archive-only-needed.ll
+++ b/llvm/test/tools/llvm-link/archive-only-needed.ll
@@ -11,5 +11,5 @@
 
 target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
 
- at i = external global i8*
- at llvm.used = appending global [1 x i8*] [i8* bitcast (i8** @i to i8*)], section "llvm.metadata"
+ at i = external global ptr
+ at llvm.used = appending global [1 x ptr] [ptr @i], section "llvm.metadata"

diff  --git a/llvm/unittests/Frontend/OpenMPIRBuilderTest.cpp b/llvm/unittests/Frontend/OpenMPIRBuilderTest.cpp
index 331f6a04850c2..9abce4443c639 100644
--- a/llvm/unittests/Frontend/OpenMPIRBuilderTest.cpp
+++ b/llvm/unittests/Frontend/OpenMPIRBuilderTest.cpp
@@ -158,6 +158,7 @@ static omp::ScheduleKind getSchedKind(omp::OMPScheduleType SchedType) {
 class OpenMPIRBuilderTest : public testing::Test {
 protected:
   void SetUp() override {
+    Ctx.setOpaquePointers(false); // TODO: Update tests for opaque pointers.
     M.reset(new Module("MyModule", Ctx));
     FunctionType *FTy =
         FunctionType::get(Type::getVoidTy(Ctx), {Type::getInt32Ty(Ctx)},

diff  --git a/llvm/unittests/IR/ConstantsTest.cpp b/llvm/unittests/IR/ConstantsTest.cpp
index b764af6cafae1..c72c05a9cdd7a 100644
--- a/llvm/unittests/IR/ConstantsTest.cpp
+++ b/llvm/unittests/IR/ConstantsTest.cpp
@@ -233,13 +233,13 @@ TEST(ConstantsTest, AsInstructionsTest) {
   Constant *Undef64 = UndefValue::get(Int64Ty);
   Constant *PoisonV16 = PoisonValue::get(P6->getType());
 
-#define P0STR "ptrtoint (i32** @dummy to i32)"
-#define P1STR "uitofp (i32 ptrtoint (i32** @dummy to i32) to float)"
-#define P2STR "uitofp (i32 ptrtoint (i32** @dummy to i32) to double)"
-#define P3STR "ptrtoint (i32** @dummy to i1)"
-#define P4STR "ptrtoint (i32** @dummy2 to i32)"
-#define P5STR "uitofp (i32 ptrtoint (i32** @dummy2 to i32) to float)"
-#define P6STR "bitcast (i32 ptrtoint (i32** @dummy2 to i32) to <2 x i16>)"
+#define P0STR "ptrtoint (ptr @dummy to i32)"
+#define P1STR "uitofp (i32 ptrtoint (ptr @dummy to i32) to float)"
+#define P2STR "uitofp (i32 ptrtoint (ptr @dummy to i32) to double)"
+#define P3STR "ptrtoint (ptr @dummy to i1)"
+#define P4STR "ptrtoint (ptr @dummy2 to i32)"
+#define P5STR "uitofp (i32 ptrtoint (ptr @dummy2 to i32) to float)"
+#define P6STR "bitcast (i32 ptrtoint (ptr @dummy2 to i32) to <2 x i16>)"
 
   CHECK(ConstantExpr::getNeg(P0), "sub i32 0, " P0STR);
   CHECK(ConstantExpr::getFNeg(P1), "fneg float " P1STR);
@@ -298,7 +298,7 @@ TEST(ConstantsTest, AsInstructionsTest) {
   //      "getelementptr i32*, i32** @dummy, i32 1");
   CHECK(ConstantExpr::getInBoundsGetElementPtr(PointerType::getUnqual(Int32Ty),
                                                Global, V),
-        "getelementptr inbounds i32*, i32** @dummy, i32 1");
+        "getelementptr inbounds ptr, ptr @dummy, i32 1");
 
   CHECK(ConstantExpr::getExtractElement(P6, One),
         "extractelement <2 x i16> " P6STR ", i32 1");

diff  --git a/llvm/unittests/IR/IRBuilderTest.cpp b/llvm/unittests/IR/IRBuilderTest.cpp
index c177ed6358bcd..a0836c6e75e31 100644
--- a/llvm/unittests/IR/IRBuilderTest.cpp
+++ b/llvm/unittests/IR/IRBuilderTest.cpp
@@ -436,7 +436,7 @@ TEST_F(IRBuilderTest, Lifetime) {
   EXPECT_EQ(Start3->getArgOperand(0), Builder.getInt64(100));
 
   EXPECT_EQ(Start1->getArgOperand(1), Var1);
-  EXPECT_NE(Start2->getArgOperand(1), Var2);
+  EXPECT_EQ(Start2->getArgOperand(1)->stripPointerCasts(), Var2);
   EXPECT_EQ(Start3->getArgOperand(1), Var3);
 
   Value *End1 = Builder.CreateLifetimeEnd(Var1);

diff  --git a/llvm/unittests/IR/PatternMatch.cpp b/llvm/unittests/IR/PatternMatch.cpp
index 58d364f11836d..a95e2df1eb2c3 100644
--- a/llvm/unittests/IR/PatternMatch.cpp
+++ b/llvm/unittests/IR/PatternMatch.cpp
@@ -1717,14 +1717,15 @@ TEST_F(PatternMatchTest, VScale) {
   Value *PtrToInt = IRB.CreatePtrToInt(GEP, DL.getIntPtrType(GEP->getType()));
   EXPECT_TRUE(match(PtrToInt, m_VScale(DL)));
 
-  // Prior to this patch, this case would cause assertion failures when attempting to match m_VScale
+  // This used to cause assertion failures when attempting to match m_VScale.
+  // With opaque pointers the bitcast is no longer present.
   Type *VecTy2 = ScalableVectorType::get(IRB.getInt8Ty(), 2);
   Value *NullPtrVec2 = Constant::getNullValue(VecTy2->getPointerTo());
   Value *BitCast = IRB.CreateBitCast(NullPtrVec2, VecPtrTy);
   Value *GEP2 = IRB.CreateGEP(VecTy, BitCast, IRB.getInt64(1));
   Value *PtrToInt2 =
       IRB.CreatePtrToInt(GEP2, DL.getIntPtrType(GEP2->getType()));
-  EXPECT_FALSE(match(PtrToInt2, m_VScale(DL)));
+  EXPECT_TRUE(match(PtrToInt2, m_VScale(DL)));
 }
 
 TEST_F(PatternMatchTest, NotForbidUndef) {

diff  --git a/llvm/unittests/IR/TypesTest.cpp b/llvm/unittests/IR/TypesTest.cpp
index d8b605fb6a581..2ad43045371bf 100644
--- a/llvm/unittests/IR/TypesTest.cpp
+++ b/llvm/unittests/IR/TypesTest.cpp
@@ -48,6 +48,7 @@ TEST(TypesTest, CopyPointerType) {
   EXPECT_TRUE(P1C0->isOpaque());
 
   LLVMContext CTypedPointers;
+  CTypedPointers.setOpaquePointers(false);
   Type *Int8 = Type::getInt8Ty(CTypedPointers);
   PointerType *P2 = PointerType::get(Int8, 1);
   EXPECT_FALSE(P2->isOpaque());

diff  --git a/llvm/unittests/IR/ValueTest.cpp b/llvm/unittests/IR/ValueTest.cpp
index 8a0fba836dcb9..0ab326d519a95 100644
--- a/llvm/unittests/IR/ValueTest.cpp
+++ b/llvm/unittests/IR/ValueTest.cpp
@@ -86,7 +86,6 @@ TEST(GlobalTest, CreateAddressSpace) {
   // Make sure the address space isn't dropped when returning this.
   Constant *DummyCast1 = M->getOrInsertGlobal("dummy_cast", Int8Ty);
   EXPECT_EQ(1u, DummyCast1->getType()->getPointerAddressSpace());
-  EXPECT_NE(DummyCast0, DummyCast1) << *DummyCast1;
 }
 
 #ifdef GTEST_HAS_DEATH_TEST
@@ -183,8 +182,8 @@ TEST(ValueTest, printSlots) {
   CHECK_PRINT_AS_OPERAND(I1, false, "%1");
   CHECK_PRINT_AS_OPERAND(I0, true, "i32 %0");
   CHECK_PRINT_AS_OPERAND(I1, true, "i32 %1");
-  CHECK_PRINT_AS_OPERAND(G0, true, "%0* @g0");
-  CHECK_PRINT_AS_OPERAND(G1, true, "%1* @g1");
+  CHECK_PRINT_AS_OPERAND(G0, true, "ptr @g0");
+  CHECK_PRINT_AS_OPERAND(G1, true, "ptr @g1");
 #undef CHECK_PRINT_AS_OPERAND
 }
 

diff  --git a/llvm/unittests/IR/VerifierTest.cpp b/llvm/unittests/IR/VerifierTest.cpp
index 76ab52bb8c974..5f34463a345eb 100644
--- a/llvm/unittests/IR/VerifierTest.cpp
+++ b/llvm/unittests/IR/VerifierTest.cpp
@@ -138,15 +138,15 @@ TEST(VerifierTest, CrossModuleRef) {
   EXPECT_TRUE(verifyModule(M2, &ErrorOS));
   EXPECT_TRUE(StringRef(ErrorOS.str())
                   .equals("Global is referenced in a 
diff erent module!\n"
-                          "i32 ()* @foo2\n"
+                          "ptr @foo2\n"
                           "; ModuleID = 'M2'\n"
                           "  %call = call i32 @foo2()\n"
-                          "i32 ()* @foo1\n"
+                          "ptr @foo1\n"
                           "; ModuleID = 'M1'\n"
                           "Global is used by function in a 
diff erent module\n"
-                          "i32 ()* @foo2\n"
+                          "ptr @foo2\n"
                           "; ModuleID = 'M2'\n"
-                          "i32 ()* @foo3\n"
+                          "ptr @foo3\n"
                           "; ModuleID = 'M3'\n"));
 
   Error.clear();
@@ -155,7 +155,7 @@ TEST(VerifierTest, CrossModuleRef) {
       "Referencing function in another module!\n"
       "  %call = call i32 @foo2()\n"
       "; ModuleID = 'M1'\n"
-      "i32 ()* @foo2\n"
+      "ptr @foo2\n"
       "; ModuleID = 'M2'\n"));
 
   Error.clear();

diff  --git a/mlir/test/Target/LLVMIR/amx.mlir b/mlir/test/Target/LLVMIR/amx.mlir
index d1f3cd6ce30ab..4df349b17b0a0 100644
--- a/mlir/test/Target/LLVMIR/amx.mlir
+++ b/mlir/test/Target/LLVMIR/amx.mlir
@@ -1,8 +1,8 @@
 // RUN: mlir-translate --mlir-to-llvmir %s | FileCheck %s
 
-// CHECK-LABEL: define void @target(i8* %0)
+// CHECK-LABEL: define void @target(ptr %0)
 // CHECK: %[[c:.*]] = call x86_amx @llvm.x86.tilezero.internal(i16 16, i16 16)
-// CHECK: call void @llvm.x86.tilestored64.internal(i16 16, i16 16, i8* %0, i64 32, x86_amx %[[c]]
+// CHECK: call void @llvm.x86.tilestored64.internal(i16 16, i16 16, ptr %0, i64 32, x86_amx %[[c]]
 llvm.func @target(%ptr: !llvm.ptr<i8>) {
   %c = llvm.mlir.constant(16 : i16) : i16
   %s = llvm.mlir.constant(32 : i64) : i64

diff  --git a/mlir/test/Target/LLVMIR/arm-sve.mlir b/mlir/test/Target/LLVMIR/arm-sve.mlir
index 07adf90e9c21b..999df8079e072 100644
--- a/mlir/test/Target/LLVMIR/arm-sve.mlir
+++ b/mlir/test/Target/LLVMIR/arm-sve.mlir
@@ -242,25 +242,23 @@ llvm.func @memcopy(%arg0: !llvm.ptr<f32>, %arg1: !llvm.ptr<f32>,
   %17 = llvm.icmp "slt" %16, %arg10 : i64
   llvm.cond_br %17, ^bb2, ^bb3
 ^bb2:
-  // CHECK: extractvalue { float*, float*, i64, [1 x i64], [1 x i64] }
+  // CHECK: extractvalue { ptr, ptr, i64, [1 x i64], [1 x i64] }
   %18 = llvm.extractvalue %5[1] : !llvm.struct<(ptr<f32>, ptr<f32>, i64,
                                                 array<1 x i64>,
                                                 array<1 x i64>)>
-  // CHECK: etelementptr float, float*
+  // CHECK: getelementptr float, ptr
   %19 = llvm.getelementptr %18[%16] : (!llvm.ptr<f32>, i64) -> !llvm.ptr<f32>
-  // CHECK: bitcast float* %{{[0-9]+}} to <vscale x 4 x float>*
   %20 = llvm.bitcast %19 : !llvm.ptr<f32> to !llvm.ptr<vector<[4]xf32>>
-  // CHECK: load <vscale x 4 x float>, <vscale x 4 x float>*
+  // CHECK: load <vscale x 4 x float>, ptr
   %21 = llvm.load %20 : !llvm.ptr<vector<[4]xf32>>
-  // CHECK: extractvalue { float*, float*, i64, [1 x i64], [1 x i64] }
+  // CHECK: extractvalue { ptr, ptr, i64, [1 x i64], [1 x i64] }
   %22 = llvm.extractvalue %11[1] : !llvm.struct<(ptr<f32>, ptr<f32>, i64,
                                                  array<1 x i64>,
                                                  array<1 x i64>)>
-  // CHECK: getelementptr float, float* %32
+  // CHECK: getelementptr float, ptr
   %23 = llvm.getelementptr %22[%16] : (!llvm.ptr<f32>, i64) -> !llvm.ptr<f32>
-  // CHECK: bitcast float* %33 to <vscale x 4 x float>*
   %24 = llvm.bitcast %23 : !llvm.ptr<f32> to !llvm.ptr<vector<[4]xf32>>
-  // CHECK: store <vscale x 4 x float> %{{[0-9]+}}, <vscale x 4 x float>* %{{[0-9]+}}
+  // CHECK: store <vscale x 4 x float> %{{[0-9]+}}, ptr %{{[0-9]+}}
   llvm.store %21, %24 : !llvm.ptr<vector<[4]xf32>>
   %25 = llvm.add %16, %15  : i64
   llvm.br ^bb1(%25 : i64)

diff  --git a/mlir/test/Target/LLVMIR/llvmir-intrinsics.mlir b/mlir/test/Target/LLVMIR/llvmir-intrinsics.mlir
index 31f6d624bd816..b0a07c1d6c59e 100644
--- a/mlir/test/Target/LLVMIR/llvmir-intrinsics.mlir
+++ b/mlir/test/Target/LLVMIR/llvmir-intrinsics.mlir
@@ -13,7 +13,7 @@ llvm.func @intrinsics(%arg0: f32, %arg1: f32, %arg2: vector<8xf32>, %arg3: !llvm
   "llvm.intr.fma"(%arg0, %arg1, %arg0) : (f32, f32, f32) -> f32
   // CHECK: call <8 x float> @llvm.fma.v8f32
   "llvm.intr.fma"(%arg2, %arg2, %arg2) : (vector<8xf32>, vector<8xf32>, vector<8xf32>) -> vector<8xf32>
-  // CHECK: call void @llvm.prefetch.p0i8(i8* %3, i32 0, i32 3, i32 1)
+  // CHECK: call void @llvm.prefetch.p0(ptr %3, i32 0, i32 3, i32 1)
   "llvm.intr.prefetch"(%arg3, %c0, %c3, %c1) : (!llvm.ptr<i8>, i32, i32, i32) -> ()
   llvm.return
 }
@@ -282,11 +282,11 @@ llvm.func @matrix_intrinsics(%A: vector<64 x f32>, %B: vector<48 x f32>,
   // CHECK: call <48 x float> @llvm.matrix.transpose.v48f32(<48 x float> %1, i32 3, i32 16)
   %D = llvm.intr.matrix.transpose %B { rows = 3: i32, columns = 16: i32} :
     vector<48 x f32> into vector<48 x f32>
-  // CHECK: call <48 x float> @llvm.matrix.column.major.load.v48f32.i64(float* align 4 %2, i64 %3, i1 false, i32 3, i32 16)
+  // CHECK: call <48 x float> @llvm.matrix.column.major.load.v48f32.i64(ptr align 4 %2, i64 %3, i1 false, i32 3, i32 16)
   %E = llvm.intr.matrix.column.major.load %ptr, <stride=%stride>
     { isVolatile = 0: i1, rows = 3: i32, columns = 16: i32} :
     vector<48 x f32> from !llvm.ptr<f32> stride i64
-  // CHECK: call void @llvm.matrix.column.major.store.v48f32.i64(<48 x float> %7, float* align 4 %2, i64 %3, i1 false, i32 3, i32 16)
+  // CHECK: call void @llvm.matrix.column.major.store.v48f32.i64(<48 x float> %7, ptr align 4 %2, i64 %3, i1 false, i32 3, i32 16)
   llvm.intr.matrix.column.major.store %E, %ptr, <stride=%stride>
     { isVolatile = 0: i1, rows = 3: i32, columns = 16: i32} :
     vector<48 x f32> to !llvm.ptr<f32> stride i64
@@ -302,13 +302,13 @@ llvm.func @get_active_lane_mask(%base: i64, %n: i64) -> (vector<7xi1>) {
 
 // CHECK-LABEL: @masked_load_store_intrinsics
 llvm.func @masked_load_store_intrinsics(%A: !llvm.ptr<vector<7xf32>>, %mask: vector<7xi1>) {
-  // CHECK: call <7 x float> @llvm.masked.load.v7f32.p0v7f32(<7 x float>* %{{.*}}, i32 1, <7 x i1> %{{.*}}, <7 x float> undef)
+  // CHECK: call <7 x float> @llvm.masked.load.v7f32.p0(ptr %{{.*}}, i32 1, <7 x i1> %{{.*}}, <7 x float> undef)
   %a = llvm.intr.masked.load %A, %mask { alignment = 1: i32} :
     (!llvm.ptr<vector<7xf32>>, vector<7xi1>) -> vector<7xf32>
-  // CHECK: call <7 x float> @llvm.masked.load.v7f32.p0v7f32(<7 x float>* %{{.*}}, i32 1, <7 x i1> %{{.*}}, <7 x float> %{{.*}})
+  // CHECK: call <7 x float> @llvm.masked.load.v7f32.p0(ptr %{{.*}}, i32 1, <7 x i1> %{{.*}}, <7 x float> %{{.*}})
   %b = llvm.intr.masked.load %A, %mask, %a { alignment = 1: i32} :
     (!llvm.ptr<vector<7xf32>>, vector<7xi1>, vector<7xf32>) -> vector<7xf32>
-  // CHECK: call void @llvm.masked.store.v7f32.p0v7f32(<7 x float> %{{.*}}, <7 x float>* %0, i32 {{.*}}, <7 x i1> %{{.*}})
+  // CHECK: call void @llvm.masked.store.v7f32.p0(<7 x float> %{{.*}}, ptr %0, i32 {{.*}}, <7 x i1> %{{.*}})
   llvm.intr.masked.store %b, %A, %mask { alignment = 1: i32} :
     vector<7xf32>, vector<7xi1> into !llvm.ptr<vector<7xf32>>
   llvm.return
@@ -316,13 +316,13 @@ llvm.func @masked_load_store_intrinsics(%A: !llvm.ptr<vector<7xf32>>, %mask: vec
 
 // CHECK-LABEL: @masked_gather_scatter_intrinsics
 llvm.func @masked_gather_scatter_intrinsics(%M: !llvm.vec<7 x ptr<f32>>, %mask: vector<7xi1>) {
-  // CHECK: call <7 x float> @llvm.masked.gather.v7f32.v7p0f32(<7 x float*> %{{.*}}, i32 1, <7 x i1> %{{.*}}, <7 x float> undef)
+  // CHECK: call <7 x float> @llvm.masked.gather.v7f32.v7p0(<7 x ptr> %{{.*}}, i32 1, <7 x i1> %{{.*}}, <7 x float> undef)
   %a = llvm.intr.masked.gather %M, %mask { alignment = 1: i32} :
       (!llvm.vec<7 x ptr<f32>>, vector<7xi1>) -> vector<7xf32>
-  // CHECK: call <7 x float> @llvm.masked.gather.v7f32.v7p0f32(<7 x float*> %{{.*}}, i32 1, <7 x i1> %{{.*}}, <7 x float> %{{.*}})
+  // CHECK: call <7 x float> @llvm.masked.gather.v7f32.v7p0(<7 x ptr> %{{.*}}, i32 1, <7 x i1> %{{.*}}, <7 x float> %{{.*}})
   %b = llvm.intr.masked.gather %M, %mask, %a { alignment = 1: i32} :
       (!llvm.vec<7 x ptr<f32>>, vector<7xi1>, vector<7xf32>) -> vector<7xf32>
-  // CHECK: call void @llvm.masked.scatter.v7f32.v7p0f32(<7 x float> %{{.*}}, <7 x float*> %{{.*}}, i32 1, <7 x i1> %{{.*}})
+  // CHECK: call void @llvm.masked.scatter.v7f32.v7p0(<7 x float> %{{.*}}, <7 x ptr> %{{.*}}, i32 1, <7 x i1> %{{.*}})
   llvm.intr.masked.scatter %b, %M, %mask { alignment = 1: i32} :
       vector<7xf32>, vector<7xi1> into !llvm.vec<7 x ptr<f32>>
   llvm.return
@@ -330,10 +330,10 @@ llvm.func @masked_gather_scatter_intrinsics(%M: !llvm.vec<7 x ptr<f32>>, %mask:
 
 // CHECK-LABEL: @masked_expand_compress_intrinsics
 llvm.func @masked_expand_compress_intrinsics(%ptr: !llvm.ptr<f32>, %mask: vector<7xi1>, %passthru: vector<7xf32>) {
-  // CHECK: call <7 x float> @llvm.masked.expandload.v7f32(float* %{{.*}}, <7 x i1> %{{.*}}, <7 x float> %{{.*}})
+  // CHECK: call <7 x float> @llvm.masked.expandload.v7f32(ptr %{{.*}}, <7 x i1> %{{.*}}, <7 x float> %{{.*}})
   %0 = "llvm.intr.masked.expandload"(%ptr, %mask, %passthru)
     : (!llvm.ptr<f32>, vector<7xi1>, vector<7xf32>) -> (vector<7xf32>)
-  // CHECK: call void @llvm.masked.compressstore.v7f32(<7 x float> %{{.*}}, float* %{{.*}}, <7 x i1> %{{.*}})
+  // CHECK: call void @llvm.masked.compressstore.v7f32(<7 x float> %{{.*}}, ptr %{{.*}}, <7 x i1> %{{.*}})
   "llvm.intr.masked.compressstore"(%0, %ptr, %mask)
     : (vector<7xf32>, !llvm.ptr<f32>, vector<7xi1>) -> ()
   llvm.return
@@ -342,10 +342,10 @@ llvm.func @masked_expand_compress_intrinsics(%ptr: !llvm.ptr<f32>, %mask: vector
 // CHECK-LABEL: @memcpy_test
 llvm.func @memcpy_test(%arg0: i32, %arg2: !llvm.ptr<i8>, %arg3: !llvm.ptr<i8>) {
   %i1 = llvm.mlir.constant(false) : i1
-  // CHECK: call void @llvm.memcpy.p0i8.p0i8.i32(i8* %{{.*}}, i8* %{{.*}}, i32 %{{.*}}, i1 {{.*}})
+  // CHECK: call void @llvm.memcpy.p0.p0.i32(ptr %{{.*}}, ptr %{{.*}}, i32 %{{.*}}, i1 {{.*}})
   "llvm.intr.memcpy"(%arg2, %arg3, %arg0, %i1) : (!llvm.ptr<i8>, !llvm.ptr<i8>, i32, i1) -> ()
   %sz = llvm.mlir.constant(10: i64) : i64
-  // CHECK: call void @llvm.memcpy.inline.p0i8.p0i8.i64(i8* %{{.*}}, i8* %{{.*}}, i64 10, i1 {{.*}})
+  // CHECK: call void @llvm.memcpy.inline.p0.p0.i64(ptr %{{.*}}, ptr %{{.*}}, i64 10, i1 {{.*}})
   "llvm.intr.memcpy.inline"(%arg2, %arg3, %sz, %i1) : (!llvm.ptr<i8>, !llvm.ptr<i8>, i64, i1) -> ()
   llvm.return
 }
@@ -353,7 +353,7 @@ llvm.func @memcpy_test(%arg0: i32, %arg2: !llvm.ptr<i8>, %arg3: !llvm.ptr<i8>) {
 // CHECK-LABEL: @memmove_test
 llvm.func @memmove_test(%arg0: i32, %arg2: !llvm.ptr<i8>, %arg3: !llvm.ptr<i8>) {
   %i1 = llvm.mlir.constant(false) : i1
-  // CHECK: call void @llvm.memmove.p0i8.p0i8.i32(i8* %{{.*}}, i8* %{{.*}}, i32 %{{.*}}, i1 {{.*}})
+  // CHECK: call void @llvm.memmove.p0.p0.i32(ptr %{{.*}}, ptr %{{.*}}, i32 %{{.*}}, i1 {{.*}})
   "llvm.intr.memmove"(%arg2, %arg3, %arg0, %i1) : (!llvm.ptr<i8>, !llvm.ptr<i8>, i32, i1) -> ()
   llvm.return
 }
@@ -361,7 +361,7 @@ llvm.func @memmove_test(%arg0: i32, %arg2: !llvm.ptr<i8>, %arg3: !llvm.ptr<i8>)
 // CHECK-LABEL: @memset_test
 llvm.func @memset_test(%arg0: i32, %arg2: !llvm.ptr<i8>, %arg3: i8) {
   %i1 = llvm.mlir.constant(false) : i1
-  // CHECK: call void @llvm.memset.p0i8.i32(i8* %{{.*}}, i8 %{{.*}}, i32 %{{.*}}, i1 {{.*}})
+  // CHECK: call void @llvm.memset.p0.i32(ptr %{{.*}}, i8 %{{.*}}, i32 %{{.*}}, i1 {{.*}})
   "llvm.intr.memset"(%arg2, %arg3, %arg0, %i1) : (!llvm.ptr<i8>, i8, i32, i1) -> ()
   llvm.return
 }
@@ -432,7 +432,7 @@ llvm.func @coro_id(%arg0: i32, %arg1: !llvm.ptr<i8>) {
 llvm.func @coro_begin(%arg0: i32, %arg1: !llvm.ptr<i8>) {
   %null = llvm.mlir.null : !llvm.ptr<i8>
   %token = llvm.intr.coro.id %arg0, %arg1, %arg1, %null : !llvm.token
-  // CHECK: call i8* @llvm.coro.begin
+  // CHECK: call ptr @llvm.coro.begin
   llvm.intr.coro.begin %token, %arg1 : !llvm.ptr<i8>
   llvm.return
 }
@@ -482,7 +482,7 @@ llvm.func @coro_end(%arg0: !llvm.ptr<i8>, %arg1 : i1) {
 llvm.func @coro_free(%arg0: i32, %arg1 : !llvm.ptr<i8>) {
   %null = llvm.mlir.null : !llvm.ptr<i8>
   %token = llvm.intr.coro.id %arg0, %arg1, %arg1, %null : !llvm.token
-  // CHECK: call i8* @llvm.coro.free
+  // CHECK: call ptr @llvm.coro.free
   %0 = llvm.intr.coro.free %token, %arg1 : !llvm.ptr<i8>
   llvm.return
 }
@@ -503,7 +503,7 @@ llvm.func @eh_typeid_for(%arg0 : !llvm.ptr<i8>) {
 
 // CHECK-LABEL: @stack_save
 llvm.func @stack_save() {
-  // CHECK: call i8* @llvm.stacksave
+  // CHECK: call ptr @llvm.stacksave
   %0 = llvm.intr.stacksave : !llvm.ptr<i8>
   llvm.return
 }
@@ -634,16 +634,16 @@ llvm.func @vector_predication_intrinsics(%A: vector<8xi32>, %B: vector<8xi32>,
   "llvm.intr.vp.merge" (%mask, %A, %B, %evl) :
          (vector<8xi1>, vector<8xi32>, vector<8xi32>, i32) -> vector<8xi32>
 
-  // CHECK: call void @llvm.vp.store.v8i32.p0i32
+  // CHECK: call void @llvm.vp.store.v8i32.p0
   "llvm.intr.vp.store" (%A, %iptr, %mask, %evl) : 
          (vector<8xi32>, !llvm.ptr<i32>, vector<8xi1>, i32) -> ()
-  // CHECK: call <8 x i32> @llvm.vp.load.v8i32.p0i32
+  // CHECK: call <8 x i32> @llvm.vp.load.v8i32.p0
   "llvm.intr.vp.load" (%iptr, %mask, %evl) :
          (!llvm.ptr<i32>, vector<8xi1>, i32) -> vector<8xi32>
-  // CHECK: call void @llvm.experimental.vp.strided.store.v8i32.p0i32.i32
+  // CHECK: call void @llvm.experimental.vp.strided.store.v8i32.p0.i32
   "llvm.intr.experimental.vp.strided.store" (%A, %iptr, %i, %mask, %evl) : 
          (vector<8xi32>, !llvm.ptr<i32>, i32, vector<8xi1>, i32) -> ()
-  // CHECK: call <8 x i32> @llvm.experimental.vp.strided.load.v8i32.p0i32.i32
+  // CHECK: call <8 x i32> @llvm.experimental.vp.strided.load.v8i32.p0.i32
   "llvm.intr.experimental.vp.strided.load" (%iptr, %i, %mask, %evl) :
          (!llvm.ptr<i32>, i32, vector<8xi1>, i32) -> vector<8xi32>
 
@@ -671,10 +671,10 @@ llvm.func @vector_predication_intrinsics(%A: vector<8xi32>, %B: vector<8xi32>,
   "llvm.intr.vp.fptosi" (%F, %mask, %evl) :
          (vector<8xf64>, vector<8xi1>, i32) -> vector<8xi64>
 
-  // CHECK: call <8 x i64> @llvm.vp.ptrtoint.v8i64.v8p0i32
+  // CHECK: call <8 x i64> @llvm.vp.ptrtoint.v8i64.v8p0
   "llvm.intr.vp.ptrtoint" (%G, %mask, %evl) :
          (!llvm.vec<8 x !llvm.ptr<i32>>, vector<8xi1>, i32) -> vector<8xi64>
-  // CHECK: call <8 x i32*> @llvm.vp.inttoptr.v8p0i32.v8i64
+  // CHECK: call <8 x ptr> @llvm.vp.inttoptr.v8p0.v8i64
   "llvm.intr.vp.inttoptr" (%E, %mask, %evl) :
          (vector<8xi64>, vector<8xi1>, i32) -> !llvm.vec<8 x !llvm.ptr<i32>>
   llvm.return
@@ -685,7 +685,7 @@ llvm.func @vector_predication_intrinsics(%A: vector<8xi32>, %B: vector<8xi32>,
 // CHECK-DAG: declare <8 x float> @llvm.fma.v8f32(<8 x float>, <8 x float>, <8 x float>) #0
 // CHECK-DAG: declare float @llvm.fmuladd.f32(float, float, float)
 // CHECK-DAG: declare <8 x float> @llvm.fmuladd.v8f32(<8 x float>, <8 x float>, <8 x float>) #0
-// CHECK-DAG: declare void @llvm.prefetch.p0i8(i8* nocapture readonly, i32 immarg, i32 immarg, i32)
+// CHECK-DAG: declare void @llvm.prefetch.p0(ptr nocapture readonly, i32 immarg, i32 immarg, i32)
 // CHECK-DAG: declare float @llvm.exp.f32(float)
 // CHECK-DAG: declare <8 x float> @llvm.exp.v8f32(<8 x float>) #0
 // CHECK-DAG: declare float @llvm.log.f32(float)
@@ -705,17 +705,17 @@ llvm.func @vector_predication_intrinsics(%A: vector<8xi32>, %B: vector<8xi32>,
 // CHECK-DAG: declare float @llvm.copysign.f32(float, float)
 // CHECK-DAG: declare <12 x float> @llvm.matrix.multiply.v12f32.v64f32.v48f32(<64 x float>, <48 x float>, i32 immarg, i32 immarg, i32 immarg)
 // CHECK-DAG: declare <48 x float> @llvm.matrix.transpose.v48f32(<48 x float>, i32 immarg, i32 immarg)
-// CHECK-DAG: declare <48 x float> @llvm.matrix.column.major.load.v48f32.i64(float* nocapture, i64, i1 immarg, i32 immarg, i32 immarg)
-// CHECK-DAG: declare void @llvm.matrix.column.major.store.v48f32.i64(<48 x float>, float* nocapture writeonly, i64, i1 immarg, i32 immarg, i32 immarg)
+// CHECK-DAG: declare <48 x float> @llvm.matrix.column.major.load.v48f32.i64(ptr nocapture, i64, i1 immarg, i32 immarg, i32 immarg)
+// CHECK-DAG: declare void @llvm.matrix.column.major.store.v48f32.i64(<48 x float>, ptr nocapture writeonly, i64, i1 immarg, i32 immarg, i32 immarg)
 // CHECK-DAG: declare <7 x i1> @llvm.get.active.lane.mask.v7i1.i64(i64, i64)
-// CHECK-DAG: declare <7 x float> @llvm.masked.load.v7f32.p0v7f32(<7 x float>*, i32 immarg, <7 x i1>, <7 x float>)
-// CHECK-DAG: declare void @llvm.masked.store.v7f32.p0v7f32(<7 x float>, <7 x float>*, i32 immarg, <7 x i1>)
-// CHECK-DAG: declare <7 x float> @llvm.masked.gather.v7f32.v7p0f32(<7 x float*>, i32 immarg, <7 x i1>, <7 x float>)
-// CHECK-DAG: declare void @llvm.masked.scatter.v7f32.v7p0f32(<7 x float>, <7 x float*>, i32 immarg, <7 x i1>)
-// CHECK-DAG: declare <7 x float> @llvm.masked.expandload.v7f32(float*, <7 x i1>, <7 x float>)
-// CHECK-DAG: declare void @llvm.masked.compressstore.v7f32(<7 x float>, float*, <7 x i1>)
-// CHECK-DAG: declare void @llvm.memcpy.p0i8.p0i8.i32(i8* noalias nocapture writeonly, i8* noalias nocapture readonly, i32, i1 immarg)
-// CHECK-DAG: declare void @llvm.memcpy.inline.p0i8.p0i8.i64(i8* noalias nocapture writeonly, i8* noalias nocapture readonly, i64 immarg, i1 immarg)
+// CHECK-DAG: declare <7 x float> @llvm.masked.load.v7f32.p0(ptr, i32 immarg, <7 x i1>, <7 x float>)
+// CHECK-DAG: declare void @llvm.masked.store.v7f32.p0(<7 x float>, ptr, i32 immarg, <7 x i1>)
+// CHECK-DAG: declare <7 x float> @llvm.masked.gather.v7f32.v7p0(<7 x ptr>, i32 immarg, <7 x i1>, <7 x float>)
+// CHECK-DAG: declare void @llvm.masked.scatter.v7f32.v7p0(<7 x float>, <7 x ptr>, i32 immarg, <7 x i1>)
+// CHECK-DAG: declare <7 x float> @llvm.masked.expandload.v7f32(ptr, <7 x i1>, <7 x float>)
+// CHECK-DAG: declare void @llvm.masked.compressstore.v7f32(<7 x float>, ptr, <7 x i1>)
+// CHECK-DAG: declare void @llvm.memcpy.p0.p0.i32(ptr noalias nocapture writeonly, ptr noalias nocapture readonly, i32, i1 immarg)
+// CHECK-DAG: declare void @llvm.memcpy.inline.p0.p0.i64(ptr noalias nocapture writeonly, ptr noalias nocapture readonly, i64 immarg, i1 immarg)
 // CHECK-DAG: declare { i32, i1 } @llvm.sadd.with.overflow.i32(i32, i32)
 // CHECK-DAG: declare { <8 x i32>, <8 x i1> } @llvm.sadd.with.overflow.v8i32(<8 x i32>, <8 x i32>) #0
 // CHECK-DAG: declare { i32, i1 } @llvm.uadd.with.overflow.i32(i32, i32)
@@ -726,15 +726,15 @@ llvm.func @vector_predication_intrinsics(%A: vector<8xi32>, %B: vector<8xi32>,
 // CHECK-DAG: declare { <8 x i32>, <8 x i1> } @llvm.usub.with.overflow.v8i32(<8 x i32>, <8 x i32>) #0
 // CHECK-DAG: declare { i32, i1 } @llvm.umul.with.overflow.i32(i32, i32)
 // CHECK-DAG: declare { <8 x i32>, <8 x i1> } @llvm.umul.with.overflow.v8i32(<8 x i32>, <8 x i32>) #0
-// CHECK-DAG: declare token @llvm.coro.id(i32, i8* readnone, i8* nocapture readonly, i8*)
-// CHECK-DAG: declare i8* @llvm.coro.begin(token, i8* writeonly)
+// CHECK-DAG: declare token @llvm.coro.id(i32, ptr readnone, ptr nocapture readonly, ptr)
+// CHECK-DAG: declare ptr @llvm.coro.begin(token, ptr writeonly)
 // CHECK-DAG: declare i64 @llvm.coro.size.i64()
 // CHECK-DAG: declare i32 @llvm.coro.size.i32()
-// CHECK-DAG: declare token @llvm.coro.save(i8*)
+// CHECK-DAG: declare token @llvm.coro.save(ptr)
 // CHECK-DAG: declare i8 @llvm.coro.suspend(token, i1)
-// CHECK-DAG: declare i1 @llvm.coro.end(i8*, i1)
-// CHECK-DAG: declare i8* @llvm.coro.free(token, i8* nocapture readonly)
-// CHECK-DAG: declare void @llvm.coro.resume(i8*)
+// CHECK-DAG: declare i1 @llvm.coro.end(ptr, i1)
+// CHECK-DAG: declare ptr @llvm.coro.free(token, ptr nocapture readonly)
+// CHECK-DAG: declare void @llvm.coro.resume(ptr)
 // CHECK-DAG: declare <8 x i32> @llvm.vp.add.v8i32(<8 x i32>, <8 x i32>, <8 x i1>, i32) #2
 // CHECK-DAG: declare <8 x i32> @llvm.vp.sub.v8i32(<8 x i32>, <8 x i32>, <8 x i1>, i32) #2
 // CHECK-DAG: declare <8 x i32> @llvm.vp.mul.v8i32(<8 x i32>, <8 x i32>, <8 x i1>, i32) #2
@@ -770,8 +770,8 @@ llvm.func @vector_predication_intrinsics(%A: vector<8xi32>, %B: vector<8xi32>,
 // CHECK-DAG: declare float @llvm.vp.reduce.fmin.v8f32(float, <8 x float>, <8 x i1>, i32) #2
 // CHECK-DAG: declare <8 x i32> @llvm.vp.select.v8i32(<8 x i1>, <8 x i32>, <8 x i32>, i32) #2
 // CHECK-DAG: declare <8 x i32> @llvm.vp.merge.v8i32(<8 x i1>, <8 x i32>, <8 x i32>, i32) #2
-// CHECK-DAG: declare void @llvm.experimental.vp.strided.store.v8i32.p0i32.i32(<8 x i32>, i32* nocapture, i32, <8 x i1>, i32) #4
-// CHECK-DAG: declare <8 x i32> @llvm.experimental.vp.strided.load.v8i32.p0i32.i32(i32* nocapture, i32, <8 x i1>, i32) #3
+// CHECK-DAG: declare void @llvm.experimental.vp.strided.store.v8i32.p0.i32(<8 x i32>, ptr nocapture, i32, <8 x i1>, i32) #4
+// CHECK-DAG: declare <8 x i32> @llvm.experimental.vp.strided.load.v8i32.p0.i32(ptr nocapture, i32, <8 x i1>, i32) #3
 // CHECK-DAG: declare <8 x i32> @llvm.vp.trunc.v8i32.v8i64(<8 x i64>, <8 x i1>, i32) #2
 // CHECK-DAG: declare <8 x i64> @llvm.vp.zext.v8i64.v8i32(<8 x i32>, <8 x i1>, i32) #2
 // CHECK-DAG: declare <8 x i64> @llvm.vp.sext.v8i64.v8i32(<8 x i32>, <8 x i1>, i32) #2
@@ -779,5 +779,5 @@ llvm.func @vector_predication_intrinsics(%A: vector<8xi32>, %B: vector<8xi32>,
 // CHECK-DAG: declare <8 x double> @llvm.vp.fpext.v8f64.v8f32(<8 x float>, <8 x i1>, i32) #2
 // CHECK-DAG: declare <8 x i64> @llvm.vp.fptoui.v8i64.v8f64(<8 x double>, <8 x i1>, i32) #2
 // CHECK-DAG: declare <8 x i64> @llvm.vp.fptosi.v8i64.v8f64(<8 x double>, <8 x i1>, i32) #2
-// CHECK-DAG: declare <8 x i64> @llvm.vp.ptrtoint.v8i64.v8p0i32(<8 x i32*>, <8 x i1>, i32) #2
-// CHECK-DAG: declare <8 x i32*> @llvm.vp.inttoptr.v8p0i32.v8i64(<8 x i64>, <8 x i1>, i32) #2
+// CHECK-DAG: declare <8 x i64> @llvm.vp.ptrtoint.v8i64.v8p0(<8 x ptr>, <8 x i1>, i32) #2
+// CHECK-DAG: declare <8 x ptr> @llvm.vp.inttoptr.v8p0.v8i64(<8 x i64>, <8 x i1>, i32) #2

diff  --git a/mlir/test/Target/LLVMIR/llvmir-types.mlir b/mlir/test/Target/LLVMIR/llvmir-types.mlir
index da3b395c3ab10..9d972f6fa6b63 100644
--- a/mlir/test/Target/LLVMIR/llvmir-types.mlir
+++ b/mlir/test/Target/LLVMIR/llvmir-types.mlir
@@ -39,7 +39,7 @@ llvm.func @f_i32_i32_i32(i32, i32) -> i32
 llvm.func @f_void_variadic(...)
 // CHECK: declare void @f_void_i32_i32_variadic(i32, i32, ...)
 llvm.func @f_void_i32_i32_variadic(i32, i32, ...)
-// CHECK: declare i32 (i32)* @f_f_i32_i32()
+// CHECK: declare ptr @f_f_i32_i32()
 llvm.func @f_f_i32_i32() -> !llvm.ptr<func<i32 (i32)>>
 
 //
@@ -65,21 +65,21 @@ llvm.func @return_i129() -> i129
 // Pointers.
 //
 
-// CHECK: declare i8* @return_pi8()
+// CHECK: declare ptr @return_pi8()
 llvm.func @return_pi8() -> !llvm.ptr<i8>
-// CHECK: declare float* @return_pfloat()
+// CHECK: declare ptr @return_pfloat()
 llvm.func @return_pfloat() -> !llvm.ptr<f32>
-// CHECK: declare i8** @return_ppi8()
+// CHECK: declare ptr @return_ppi8()
 llvm.func @return_ppi8() -> !llvm.ptr<ptr<i8>>
-// CHECK: declare i8***** @return_pppppi8()
+// CHECK: declare ptr @return_pppppi8()
 llvm.func @return_pppppi8() -> !llvm.ptr<ptr<ptr<ptr<ptr<i8>>>>>
-// CHECK: declare i8* @return_pi8_0()
+// CHECK: declare ptr @return_pi8_0()
 llvm.func @return_pi8_0() -> !llvm.ptr<i8, 0>
-// CHECK: declare i8 addrspace(1)* @return_pi8_1()
+// CHECK: declare ptr addrspace(1) @return_pi8_1()
 llvm.func @return_pi8_1() -> !llvm.ptr<i8, 1>
-// CHECK: declare i8 addrspace(42)* @return_pi8_42()
+// CHECK: declare ptr addrspace(42) @return_pi8_42()
 llvm.func @return_pi8_42() -> !llvm.ptr<i8, 42>
-// CHECK: declare i8 addrspace(42)* addrspace(9)* @return_ppi8_42_9()
+// CHECK: declare ptr addrspace(9) @return_ppi8_42_9()
 llvm.func @return_ppi8_42_9() -> !llvm.ptr<ptr<i8, 42>, 9>
 
 //
@@ -96,7 +96,7 @@ llvm.func @return_vs_4_float() -> vector<[4]xf32>
 llvm.func @return_vs_4_i32() -> !llvm.vec<?x4 x i32>
 // CHECK: declare <vscale x 8 x half> @return_vs_8_half()
 llvm.func @return_vs_8_half() -> !llvm.vec<?x8 x f16>
-// CHECK: declare <4 x i8*> @return_v_4_pi8()
+// CHECK: declare <4 x ptr> @return_v_4_pi8()
 llvm.func @return_v_4_pi8() -> !llvm.vec<4xptr<i8>>
 
 //
@@ -107,7 +107,7 @@ llvm.func @return_v_4_pi8() -> !llvm.vec<4xptr<i8>>
 llvm.func @return_a10_i32() -> !llvm.array<10 x i32>
 // CHECK: declare [8 x float] @return_a8_float()
 llvm.func @return_a8_float() -> !llvm.array<8 x f32>
-// CHECK: declare [10 x i32 addrspace(4)*] @return_a10_pi32_4()
+// CHECK: declare [10 x ptr addrspace(4)] @return_a10_pi32_4()
 llvm.func @return_a10_pi32_4() -> !llvm.array<10 x ptr<i32, 4>>
 // CHECK: declare [10 x [4 x float]] @return_a10_a4_float()
 llvm.func @return_a10_a4_float() -> !llvm.array<10 x array<4 x f32>>
@@ -147,16 +147,15 @@ llvm.func @return_sp_s_i32() -> !llvm.struct<packed (struct<(i32)>)>
 
 // CHECK: %empty = type {}
 // CHECK: %opaque = type opaque
-// CHECK: %long = type { i32, { i32, i1 }, float, void ()* }
-// CHECK: %self-recursive = type { %self-recursive* }
+// CHECK: %long = type { i32, { i32, i1 }, float, ptr }
+// CHECK: %self-recursive = type { ptr }
 // CHECK: %unpacked = type { i32 }
 // CHECK: %packed = type <{ i32 }>
 // CHECK: %"name with spaces and !^$@$#" = type <{ i32 }>
-// CHECK: %mutually-a = type { %mutually-b* }
-// CHECK: %mutually-b = type { %mutually-a addrspace(3)* }
+// CHECK: %mutually-a = type { ptr }
+// CHECK: %mutually-b = type { ptr addrspace(3) }
 // CHECK: %struct-of-arrays = type { [10 x i32] }
 // CHECK: %array-of-structs = type { i32 }
-// CHECK: %ptr-to-struct = type { i8 }
 
 // CHECK: declare %empty
 llvm.func @return_s_empty() -> !llvm.struct<"empty", ()>
@@ -182,5 +181,5 @@ llvm.func @return_s_mutually_b() -> !llvm.struct<"mutually-b", (ptr<struct<"mutu
 llvm.func @return_s_struct_of_arrays() -> !llvm.struct<"struct-of-arrays", (array<10 x i32>)>
 // CHECK: declare [10 x %array-of-structs]
 llvm.func @return_s_array_of_structs() -> !llvm.array<10 x struct<"array-of-structs", (i32)>>
-// CHECK: declare %ptr-to-struct*
+// CHECK: declare ptr
 llvm.func @return_s_ptr_to_struct() -> !llvm.ptr<struct<"ptr-to-struct", (i8)>>

diff  --git a/mlir/test/Target/LLVMIR/llvmir.mlir b/mlir/test/Target/LLVMIR/llvmir.mlir
index b4a2dbcf02d8a..a5b4bd65a58f2 100644
--- a/mlir/test/Target/LLVMIR/llvmir.mlir
+++ b/mlir/test/Target/LLVMIR/llvmir.mlir
@@ -45,7 +45,7 @@ llvm.mlir.global external @explicit_undef() : i32 {
   llvm.return %0 : i32
 }
 
-// CHECK: @int_gep = internal constant i32* getelementptr (i32, i32* @i32_global, i32 2)
+// CHECK: @int_gep = internal constant ptr getelementptr (i32, ptr @i32_global, i32 2)
 llvm.mlir.global internal constant @int_gep() : !llvm.ptr<i32> {
   %addr = llvm.mlir.addressof @i32_global : !llvm.ptr<i32>
   %_c0 = llvm.mlir.constant(2: i32) :i32
@@ -147,9 +147,9 @@ llvm.mlir.global internal constant @sectionvar("teststring")  {section = ".mysec
 // inserted before other functions in the module.
 //
 
-// CHECK: declare i8* @malloc(i64)
+// CHECK: declare ptr @malloc(i64)
 llvm.func @malloc(i64) -> !llvm.ptr<i8>
-// CHECK: declare void @free(i8*)
+// CHECK: declare void @free(ptr)
 
 
 //
@@ -167,12 +167,12 @@ llvm.func @empty() {
 // CHECK-LABEL: @global_refs
 llvm.func @global_refs() {
   // Check load from globals.
-  // CHECK: load i32, i32* @i32_global
+  // CHECK: load i32, ptr @i32_global
   %0 = llvm.mlir.addressof @i32_global : !llvm.ptr<i32>
   %1 = llvm.load %0 : !llvm.ptr<i32>
 
   // Check the contracted form of load from array constants.
-  // CHECK: load i8, i8* getelementptr inbounds ([6 x i8], [6 x i8]* @string_const, i64 0, i64 0)
+  // CHECK: load i8, ptr @string_const
   %2 = llvm.mlir.addressof @string_const : !llvm.ptr<array<6 x i8>>
   %c0 = llvm.mlir.constant(0 : index) : i64
   %3 = llvm.getelementptr %2[%c0, %c0] : (!llvm.ptr<array<6 x i8>>, i64, i64) -> !llvm.ptr<i8>
@@ -499,9 +499,8 @@ llvm.func @dso_local_func() attributes {dso_local} {
 
 // CHECK-LABEL: define void @memref_alloc()
 llvm.func @memref_alloc() {
-// CHECK-NEXT: %{{[0-9]+}} = call i8* @malloc(i64 400)
-// CHECK-NEXT: %{{[0-9]+}} = bitcast i8* %{{[0-9]+}} to float*
-// CHECK-NEXT: %{{[0-9]+}} = insertvalue { float* } undef, float* %{{[0-9]+}}, 0
+// CHECK-NEXT: %{{[0-9]+}} = call ptr @malloc(i64 400)
+// CHECK-NEXT: %{{[0-9]+}} = insertvalue { ptr } undef, ptr %{{[0-9]+}}, 0
   %0 = llvm.mlir.constant(10 : index) : i64
   %1 = llvm.mlir.constant(10 : index) : i64
   %2 = llvm.mul %0, %1 : i64
@@ -521,9 +520,8 @@ llvm.func @get_index() -> i64
 // CHECK-LABEL: define void @store_load_static()
 llvm.func @store_load_static() {
 ^bb0:
-// CHECK-NEXT: %{{[0-9]+}} = call i8* @malloc(i64 40)
-// CHECK-NEXT: %{{[0-9]+}} = bitcast i8* %{{[0-9]+}} to float*
-// CHECK-NEXT: %{{[0-9]+}} = insertvalue { float* } undef, float* %{{[0-9]+}}, 0
+// CHECK-NEXT: %{{[0-9]+}} = call ptr @malloc(i64 40)
+// CHECK-NEXT: %{{[0-9]+}} = insertvalue { ptr } undef, ptr %{{[0-9]+}}, 0
   %0 = llvm.mlir.constant(10 : index) : i64
   %1 = llvm.mlir.undef : !llvm.struct<(ptr<f32>)>
   %2 = llvm.mlir.constant(4 : index) : i64
@@ -544,9 +542,9 @@ llvm.func @store_load_static() {
 // CHECK-NEXT: br i1 %{{[0-9]+}}, label %{{[0-9]+}}, label %{{[0-9]+}}
   llvm.cond_br %11, ^bb3, ^bb4
 ^bb3:   // pred: ^bb2
-// CHECK: %{{[0-9]+}} = extractvalue { float* } %{{[0-9]+}}, 0
-// CHECK-NEXT: %{{[0-9]+}} = getelementptr float, float* %{{[0-9]+}}, i64 %{{[0-9]+}}
-// CHECK-NEXT: store float 1.000000e+00, float* %{{[0-9]+}}
+// CHECK: %{{[0-9]+}} = extractvalue { ptr } %{{[0-9]+}}, 0
+// CHECK-NEXT: %{{[0-9]+}} = getelementptr float, ptr %{{[0-9]+}}, i64 %{{[0-9]+}}
+// CHECK-NEXT: store float 1.000000e+00, ptr %{{[0-9]+}}
   %12 = llvm.mlir.constant(10 : index) : i64
   %13 = llvm.extractvalue %6[0] : !llvm.struct<(ptr<f32>)>
   %14 = llvm.getelementptr %13[%10] : (!llvm.ptr<f32>, i64) -> !llvm.ptr<f32>
@@ -569,9 +567,9 @@ llvm.func @store_load_static() {
 // CHECK-NEXT: br i1 %{{[0-9]+}}, label %{{[0-9]+}}, label %{{[0-9]+}}
   llvm.cond_br %20, ^bb7, ^bb8
 ^bb7:   // pred: ^bb6
-// CHECK:      %{{[0-9]+}} = extractvalue { float* } %{{[0-9]+}}, 0
-// CHECK-NEXT: %{{[0-9]+}} = getelementptr float, float* %{{[0-9]+}}, i64 %{{[0-9]+}}
-// CHECK-NEXT: %{{[0-9]+}} = load float, float* %{{[0-9]+}}
+// CHECK:      %{{[0-9]+}} = extractvalue { ptr } %{{[0-9]+}}, 0
+// CHECK-NEXT: %{{[0-9]+}} = getelementptr float, ptr %{{[0-9]+}}, i64 %{{[0-9]+}}
+// CHECK-NEXT: %{{[0-9]+}} = load float, ptr %{{[0-9]+}}
   %21 = llvm.mlir.constant(10 : index) : i64
   %22 = llvm.extractvalue %6[0] : !llvm.struct<(ptr<f32>)>
   %23 = llvm.getelementptr %22[%19] : (!llvm.ptr<f32>, i64) -> !llvm.ptr<f32>
@@ -589,10 +587,9 @@ llvm.func @store_load_static() {
 // CHECK-LABEL: define void @store_load_dynamic(i64 {{%.*}})
 llvm.func @store_load_dynamic(%arg0: i64) {
 // CHECK-NEXT: %{{[0-9]+}} = mul i64 %{{[0-9]+}}, 4
-// CHECK-NEXT: %{{[0-9]+}} = call i8* @malloc(i64 %{{[0-9]+}})
-// CHECK-NEXT: %{{[0-9]+}} = bitcast i8* %{{[0-9]+}} to float*
-// CHECK-NEXT: %{{[0-9]+}} = insertvalue { float*, i64 } undef, float* %{{[0-9]+}}, 0
-// CHECK-NEXT: %{{[0-9]+}} = insertvalue { float*, i64 } %{{[0-9]+}}, i64 %{{[0-9]+}}, 1
+// CHECK-NEXT: %{{[0-9]+}} = call ptr @malloc(i64 %{{[0-9]+}})
+// CHECK-NEXT: %{{[0-9]+}} = insertvalue { ptr, i64 } undef, ptr %{{[0-9]+}}, 0
+// CHECK-NEXT: %{{[0-9]+}} = insertvalue { ptr, i64 } %{{[0-9]+}}, i64 %{{[0-9]+}}, 1
   %0 = llvm.mlir.undef : !llvm.struct<(ptr<f32>, i64)>
   %1 = llvm.mlir.constant(4 : index) : i64
   %2 = llvm.mul %arg0, %1 : i64
@@ -613,10 +610,10 @@ llvm.func @store_load_dynamic(%arg0: i64) {
 // CHECK-NEXT: br i1 %{{[0-9]+}}, label %{{[0-9]+}}, label %{{[0-9]+}}
   llvm.cond_br %10, ^bb3, ^bb4
 ^bb3:   // pred: ^bb2
-// CHECK:      %{{[0-9]+}} = extractvalue { float*, i64 } %{{[0-9]+}}, 1
-// CHECK-NEXT: %{{[0-9]+}} = extractvalue { float*, i64 } %{{[0-9]+}}, 0
-// CHECK-NEXT: %{{[0-9]+}} = getelementptr float, float* %{{[0-9]+}}, i64 %{{[0-9]+}}
-// CHECK-NEXT: store float 1.000000e+00, float* %{{[0-9]+}}
+// CHECK:      %{{[0-9]+}} = extractvalue { ptr, i64 } %{{[0-9]+}}, 1
+// CHECK-NEXT: %{{[0-9]+}} = extractvalue { ptr, i64 } %{{[0-9]+}}, 0
+// CHECK-NEXT: %{{[0-9]+}} = getelementptr float, ptr %{{[0-9]+}}, i64 %{{[0-9]+}}
+// CHECK-NEXT: store float 1.000000e+00, ptr %{{[0-9]+}}
   %11 = llvm.extractvalue %6[1] : !llvm.struct<(ptr<f32>, i64)>
   %12 = llvm.extractvalue %6[0] : !llvm.struct<(ptr<f32>, i64)>
   %13 = llvm.getelementptr %12[%9] : (!llvm.ptr<f32>, i64) -> !llvm.ptr<f32>
@@ -638,10 +635,10 @@ llvm.func @store_load_dynamic(%arg0: i64) {
 // CHECK-NEXT: br i1 %{{[0-9]+}}, label %{{[0-9]+}}, label %{{[0-9]+}}
   llvm.cond_br %18, ^bb7, ^bb8
 ^bb7:   // pred: ^bb6
-// CHECK:      %{{[0-9]+}} = extractvalue { float*, i64 } %{{[0-9]+}}, 1
-// CHECK-NEXT: %{{[0-9]+}} = extractvalue { float*, i64 } %{{[0-9]+}}, 0
-// CHECK-NEXT: %{{[0-9]+}} = getelementptr float, float* %{{[0-9]+}}, i64 %{{[0-9]+}}
-// CHECK-NEXT: %{{[0-9]+}} = load float, float* %{{[0-9]+}}
+// CHECK:      %{{[0-9]+}} = extractvalue { ptr, i64 } %{{[0-9]+}}, 1
+// CHECK-NEXT: %{{[0-9]+}} = extractvalue { ptr, i64 } %{{[0-9]+}}, 0
+// CHECK-NEXT: %{{[0-9]+}} = getelementptr float, ptr %{{[0-9]+}}, i64 %{{[0-9]+}}
+// CHECK-NEXT: %{{[0-9]+}} = load float, ptr %{{[0-9]+}}
   %19 = llvm.extractvalue %6[1] : !llvm.struct<(ptr<f32>, i64)>
   %20 = llvm.extractvalue %6[0] : !llvm.struct<(ptr<f32>, i64)>
   %21 = llvm.getelementptr %20[%17] : (!llvm.ptr<f32>, i64) -> !llvm.ptr<f32>
@@ -663,11 +660,10 @@ llvm.func @store_load_mixed(%arg0: i64) {
 // CHECK-NEXT: %{{[0-9]+}} = mul i64 %{{[0-9]+}}, 4
 // CHECK-NEXT: %{{[0-9]+}} = mul i64 %{{[0-9]+}}, 10
 // CHECK-NEXT: %{{[0-9]+}} = mul i64 %{{[0-9]+}}, 4
-// CHECK-NEXT: %{{[0-9]+}} = call i8* @malloc(i64 %{{[0-9]+}})
-// CHECK-NEXT: %{{[0-9]+}} = bitcast i8* %{{[0-9]+}} to float*
-// CHECK-NEXT: %{{[0-9]+}} = insertvalue { float*, i64, i64 } undef, float* %{{[0-9]+}}, 0
-// CHECK-NEXT: %{{[0-9]+}} = insertvalue { float*, i64, i64 } %{{[0-9]+}}, i64 %{{[0-9]+}}, 1
-// CHECK-NEXT: %{{[0-9]+}} = insertvalue { float*, i64, i64 } %{{[0-9]+}}, i64 10, 2
+// CHECK-NEXT: %{{[0-9]+}} = call ptr @malloc(i64 %{{[0-9]+}})
+// CHECK-NEXT: %{{[0-9]+}} = insertvalue { ptr, i64, i64 } undef, ptr %{{[0-9]+}}, 0
+// CHECK-NEXT: %{{[0-9]+}} = insertvalue { ptr, i64, i64 } %{{[0-9]+}}, i64 %{{[0-9]+}}, 1
+// CHECK-NEXT: %{{[0-9]+}} = insertvalue { ptr, i64, i64 } %{{[0-9]+}}, i64 10, 2
   %1 = llvm.mlir.constant(2 : index) : i64
   %2 = llvm.mlir.constant(4 : index) : i64
   %3 = llvm.mul %1, %arg0 : i64
@@ -690,17 +686,17 @@ llvm.func @store_load_mixed(%arg0: i64) {
   %17 = llvm.call @get_index() : () -> i64
   %18 = llvm.mlir.constant(4.200000e+01 : f32) : f32
   %19 = llvm.mlir.constant(2 : index) : i64
-// CHECK-NEXT: %{{[0-9]+}} = extractvalue { float*, i64, i64 } %{{[0-9]+}}, 1
-// CHECK-NEXT: %{{[0-9]+}} = extractvalue { float*, i64, i64 } %{{[0-9]+}}, 2
+// CHECK-NEXT: %{{[0-9]+}} = extractvalue { ptr, i64, i64 } %{{[0-9]+}}, 1
+// CHECK-NEXT: %{{[0-9]+}} = extractvalue { ptr, i64, i64 } %{{[0-9]+}}, 2
 // CHECK-NEXT: %{{[0-9]+}} = mul i64 1, %{{[0-9]+}}
 // CHECK-NEXT: %{{[0-9]+}} = add i64 %{{[0-9]+}}, 2
 // CHECK-NEXT: %{{[0-9]+}} = mul i64 %{{[0-9]+}}, 4
 // CHECK-NEXT: %{{[0-9]+}} = add i64 %{{[0-9]+}}, %{{[0-9]+}}
 // CHECK-NEXT: %{{[0-9]+}} = mul i64 %{{[0-9]+}}, %{{[0-9]+}}
 // CHECK-NEXT: %{{[0-9]+}} = add i64 %{{[0-9]+}}, %{{[0-9]+}}
-// CHECK-NEXT: %{{[0-9]+}} = extractvalue { float*, i64, i64 } %{{[0-9]+}}, 0
-// CHECK-NEXT: %{{[0-9]+}} = getelementptr float, float* %{{[0-9]+}}, i64 %{{[0-9]+}}
-// CHECK-NEXT: store float 4.200000e+01, float* %{{[0-9]+}}
+// CHECK-NEXT: %{{[0-9]+}} = extractvalue { ptr, i64, i64 } %{{[0-9]+}}, 0
+// CHECK-NEXT: %{{[0-9]+}} = getelementptr float, ptr %{{[0-9]+}}, i64 %{{[0-9]+}}
+// CHECK-NEXT: store float 4.200000e+01, ptr %{{[0-9]+}}
   %20 = llvm.extractvalue %13[1] : !llvm.struct<(ptr<f32>, i64, i64)>
   %21 = llvm.mlir.constant(4 : index) : i64
   %22 = llvm.extractvalue %13[2] : !llvm.struct<(ptr<f32>, i64, i64)>
@@ -713,17 +709,17 @@ llvm.func @store_load_mixed(%arg0: i64) {
   %29 = llvm.extractvalue %13[0] : !llvm.struct<(ptr<f32>, i64, i64)>
   %30 = llvm.getelementptr %29[%28] : (!llvm.ptr<f32>, i64) -> !llvm.ptr<f32>
   llvm.store %18, %30 : !llvm.ptr<f32>
-// CHECK-NEXT: %{{[0-9]+}} = extractvalue { float*, i64, i64 } %{{[0-9]+}}, 1
-// CHECK-NEXT: %{{[0-9]+}} = extractvalue { float*, i64, i64 } %{{[0-9]+}}, 2
+// CHECK-NEXT: %{{[0-9]+}} = extractvalue { ptr, i64, i64 } %{{[0-9]+}}, 1
+// CHECK-NEXT: %{{[0-9]+}} = extractvalue { ptr, i64, i64 } %{{[0-9]+}}, 2
 // CHECK-NEXT: %{{[0-9]+}} = mul i64 %{{[0-9]+}}, %{{[0-9]+}}
 // CHECK-NEXT: %{{[0-9]+}} = add i64 %{{[0-9]+}}, %{{[0-9]+}}
 // CHECK-NEXT: %{{[0-9]+}} = mul i64 %{{[0-9]+}}, 4
 // CHECK-NEXT: %{{[0-9]+}} = add i64 %{{[0-9]+}}, 2
 // CHECK-NEXT: %{{[0-9]+}} = mul i64 %{{[0-9]+}}, %{{[0-9]+}}
 // CHECK-NEXT: %{{[0-9]+}} = add i64 %{{[0-9]+}}, 1
-// CHECK-NEXT: %{{[0-9]+}} = extractvalue { float*, i64, i64 } %{{[0-9]+}}, 0
-// CHECK-NEXT: %{{[0-9]+}} = getelementptr float, float* %{{[0-9]+}}, i64 %{{[0-9]+}}
-// CHECK-NEXT: %{{[0-9]+}} = load float, float* %{{[0-9]+}}
+// CHECK-NEXT: %{{[0-9]+}} = extractvalue { ptr, i64, i64 } %{{[0-9]+}}, 0
+// CHECK-NEXT: %{{[0-9]+}} = getelementptr float, ptr %{{[0-9]+}}, i64 %{{[0-9]+}}
+// CHECK-NEXT: %{{[0-9]+}} = load float, ptr %{{[0-9]+}}
   %31 = llvm.mlir.constant(2 : index) : i64
   %32 = llvm.extractvalue %13[1] : !llvm.struct<(ptr<f32>, i64, i64)>
   %33 = llvm.mlir.constant(4 : index) : i64
@@ -741,33 +737,33 @@ llvm.func @store_load_mixed(%arg0: i64) {
   llvm.return
 }
 
-// CHECK-LABEL: define { float*, i64 } @memref_args_rets({ float* } {{%.*}}, { float*, i64 } {{%.*}}, { float*, i64 } {{%.*}})
+// CHECK-LABEL: define { ptr, i64 } @memref_args_rets({ ptr } {{%.*}}, { ptr, i64 } {{%.*}}, { ptr, i64 } {{%.*}})
 llvm.func @memref_args_rets(%arg0: !llvm.struct<(ptr<f32>)>, %arg1: !llvm.struct<(ptr<f32>, i64)>, %arg2: !llvm.struct<(ptr<f32>, i64)>) -> !llvm.struct<(ptr<f32>, i64)> {
   %0 = llvm.mlir.constant(7 : index) : i64
 // CHECK-NEXT: %{{[0-9]+}} = call i64 @get_index()
   %1 = llvm.call @get_index() : () -> i64
   %2 = llvm.mlir.constant(4.200000e+01 : f32) : f32
-// CHECK-NEXT: %{{[0-9]+}} = extractvalue { float* } %{{[0-9]+}}, 0
-// CHECK-NEXT: %{{[0-9]+}} = getelementptr float, float* %{{[0-9]+}}, i64 7
-// CHECK-NEXT: store float 4.200000e+01, float* %{{[0-9]+}}
+// CHECK-NEXT: %{{[0-9]+}} = extractvalue { ptr } %{{[0-9]+}}, 0
+// CHECK-NEXT: %{{[0-9]+}} = getelementptr float, ptr %{{[0-9]+}}, i64 7
+// CHECK-NEXT: store float 4.200000e+01, ptr %{{[0-9]+}}
   %3 = llvm.mlir.constant(10 : index) : i64
   %4 = llvm.extractvalue %arg0[0] : !llvm.struct<(ptr<f32>)>
   %5 = llvm.getelementptr %4[%0] : (!llvm.ptr<f32>, i64) -> !llvm.ptr<f32>
   llvm.store %2, %5 : !llvm.ptr<f32>
-// CHECK-NEXT: %{{[0-9]+}} = extractvalue { float*, i64 } %{{[0-9]+}}, 1
-// CHECK-NEXT: %{{[0-9]+}} = extractvalue { float*, i64 } %{{[0-9]+}}, 0
-// CHECK-NEXT: %{{[0-9]+}} = getelementptr float, float* %{{[0-9]+}}, i64 7
-// CHECK-NEXT: store float 4.200000e+01, float* %{{[0-9]+}}
+// CHECK-NEXT: %{{[0-9]+}} = extractvalue { ptr, i64 } %{{[0-9]+}}, 1
+// CHECK-NEXT: %{{[0-9]+}} = extractvalue { ptr, i64 } %{{[0-9]+}}, 0
+// CHECK-NEXT: %{{[0-9]+}} = getelementptr float, ptr %{{[0-9]+}}, i64 7
+// CHECK-NEXT: store float 4.200000e+01, ptr %{{[0-9]+}}
   %6 = llvm.extractvalue %arg1[1] : !llvm.struct<(ptr<f32>, i64)>
   %7 = llvm.extractvalue %arg1[0] : !llvm.struct<(ptr<f32>, i64)>
   %8 = llvm.getelementptr %7[%0] : (!llvm.ptr<f32>, i64) -> !llvm.ptr<f32>
   llvm.store %2, %8 : !llvm.ptr<f32>
-// CHECK-NEXT: %{{[0-9]+}} = extractvalue { float*, i64 } %{{[0-9]+}}, 1
+// CHECK-NEXT: %{{[0-9]+}} = extractvalue { ptr, i64 } %{{[0-9]+}}, 1
 // CHECK-NEXT: %{{[0-9]+}} = mul i64 7, %{{[0-9]+}}
 // CHECK-NEXT: %{{[0-9]+}} = add i64 %{{[0-9]+}}, %{{[0-9]+}}
-// CHECK-NEXT: %{{[0-9]+}} = extractvalue { float*, i64 } %{{[0-9]+}}, 0
-// CHECK-NEXT: %{{[0-9]+}} = getelementptr float, float* %{{[0-9]+}}, i64 %{{[0-9]+}}
-// CHECK-NEXT: store float 4.200000e+01, float* %{{[0-9]+}}
+// CHECK-NEXT: %{{[0-9]+}} = extractvalue { ptr, i64 } %{{[0-9]+}}, 0
+// CHECK-NEXT: %{{[0-9]+}} = getelementptr float, ptr %{{[0-9]+}}, i64 %{{[0-9]+}}
+// CHECK-NEXT: store float 4.200000e+01, ptr %{{[0-9]+}}
   %9 = llvm.mlir.constant(10 : index) : i64
   %10 = llvm.extractvalue %arg2[1] : !llvm.struct<(ptr<f32>, i64)>
   %11 = llvm.mul %0, %10 : i64
@@ -777,10 +773,9 @@ llvm.func @memref_args_rets(%arg0: !llvm.struct<(ptr<f32>)>, %arg1: !llvm.struct
   llvm.store %2, %14 : !llvm.ptr<f32>
 // CHECK-NEXT: %{{[0-9]+}} = mul i64 10, %{{[0-9]+}}
 // CHECK-NEXT: %{{[0-9]+}} = mul i64 %{{[0-9]+}}, 4
-// CHECK-NEXT: %{{[0-9]+}} = call i8* @malloc(i64 %{{[0-9]+}})
-// CHECK-NEXT: %{{[0-9]+}} = bitcast i8* %{{[0-9]+}} to float*
-// CHECK-NEXT: %{{[0-9]+}} = insertvalue { float*, i64 } undef, float* %{{[0-9]+}}, 0
-// CHECK-NEXT: %{{[0-9]+}} = insertvalue { float*, i64 } %{{[0-9]+}}, i64 %{{[0-9]+}}, 1
+// CHECK-NEXT: %{{[0-9]+}} = call ptr @malloc(i64 %{{[0-9]+}})
+// CHECK-NEXT: %{{[0-9]+}} = insertvalue { ptr, i64 } undef, ptr %{{[0-9]+}}, 0
+// CHECK-NEXT: %{{[0-9]+}} = insertvalue { ptr, i64 } %{{[0-9]+}}, i64 %{{[0-9]+}}, 1
   %15 = llvm.mlir.constant(10 : index) : i64
   %16 = llvm.mul %15, %1 : i64
   %17 = llvm.mlir.undef : !llvm.struct<(ptr<f32>, i64)>
@@ -790,20 +785,20 @@ llvm.func @memref_args_rets(%arg0: !llvm.struct<(ptr<f32>)>, %arg1: !llvm.struct
   %21 = llvm.bitcast %20 : !llvm.ptr<i8> to !llvm.ptr<f32>
   %22 = llvm.insertvalue %21, %17[0] : !llvm.struct<(ptr<f32>, i64)>
   %23 = llvm.insertvalue %1, %22[1] : !llvm.struct<(ptr<f32>, i64)>
-// CHECK-NEXT: ret { float*, i64 } %{{[0-9]+}}
+// CHECK-NEXT: ret { ptr, i64 } %{{[0-9]+}}
   llvm.return %23 : !llvm.struct<(ptr<f32>, i64)>
 }
 
 
-// CHECK-LABEL: define i64 @memref_dim({ float*, i64, i64 } {{%.*}})
+// CHECK-LABEL: define i64 @memref_dim({ ptr, i64, i64 } {{%.*}})
 llvm.func @memref_dim(%arg0: !llvm.struct<(ptr<f32>, i64, i64)>) -> i64 {
 // Expecting this to create an LLVM constant.
   %0 = llvm.mlir.constant(42 : index) : i64
-// CHECK-NEXT: %2 = extractvalue { float*, i64, i64 } %0, 1
+// CHECK-NEXT: %2 = extractvalue { ptr, i64, i64 } %0, 1
   %1 = llvm.extractvalue %arg0[1] : !llvm.struct<(ptr<f32>, i64, i64)>
 // Expecting this to create an LLVM constant.
   %2 = llvm.mlir.constant(10 : index) : i64
-// CHECK-NEXT: %3 = extractvalue { float*, i64, i64 } %0, 2
+// CHECK-NEXT: %3 = extractvalue { ptr, i64, i64 } %0, 2
   %3 = llvm.extractvalue %arg0[2] : !llvm.struct<(ptr<f32>, i64, i64)>
 // Checking that the constant for d0 has been created.
 // CHECK-NEXT: %4 = add i64 42, %2
@@ -821,15 +816,15 @@ llvm.func @get_i64() -> i64
 llvm.func @get_f32() -> f32
 llvm.func @get_memref() -> !llvm.struct<(ptr<f32>, i64, i64)>
 
-// CHECK-LABEL: define { i64, float, { float*, i64, i64 } } @multireturn()
+// CHECK-LABEL: define { i64, float, { ptr, i64, i64 } } @multireturn()
 llvm.func @multireturn() -> !llvm.struct<(i64, f32, struct<(ptr<f32>, i64, i64)>)> {
   %0 = llvm.call @get_i64() : () -> i64
   %1 = llvm.call @get_f32() : () -> f32
   %2 = llvm.call @get_memref() : () -> !llvm.struct<(ptr<f32>, i64, i64)>
-// CHECK:        %{{[0-9]+}} = insertvalue { i64, float, { float*, i64, i64 } } undef, i64 %{{[0-9]+}}, 0
-// CHECK-NEXT:   %{{[0-9]+}} = insertvalue { i64, float, { float*, i64, i64 } } %{{[0-9]+}}, float %{{[0-9]+}}, 1
-// CHECK-NEXT:   %{{[0-9]+}} = insertvalue { i64, float, { float*, i64, i64 } } %{{[0-9]+}}, { float*, i64, i64 } %{{[0-9]+}}, 2
-// CHECK-NEXT:   ret { i64, float, { float*, i64, i64 } } %{{[0-9]+}}
+// CHECK:        %{{[0-9]+}} = insertvalue { i64, float, { ptr, i64, i64 } } undef, i64 %{{[0-9]+}}, 0
+// CHECK-NEXT:   %{{[0-9]+}} = insertvalue { i64, float, { ptr, i64, i64 } } %{{[0-9]+}}, float %{{[0-9]+}}, 1
+// CHECK-NEXT:   %{{[0-9]+}} = insertvalue { i64, float, { ptr, i64, i64 } } %{{[0-9]+}}, { ptr, i64, i64 } %{{[0-9]+}}, 2
+// CHECK-NEXT:   ret { i64, float, { ptr, i64, i64 } } %{{[0-9]+}}
   %3 = llvm.mlir.undef : !llvm.struct<(i64, f32, struct<(ptr<f32>, i64, i64)>)>
   %4 = llvm.insertvalue %0, %3[0] : !llvm.struct<(i64, f32, struct<(ptr<f32>, i64, i64)>)>
   %5 = llvm.insertvalue %1, %4[1] : !llvm.struct<(i64, f32, struct<(ptr<f32>, i64, i64)>)>
@@ -840,10 +835,10 @@ llvm.func @multireturn() -> !llvm.struct<(i64, f32, struct<(ptr<f32>, i64, i64)>
 
 // CHECK-LABEL: define void @multireturn_caller()
 llvm.func @multireturn_caller() {
-// CHECK-NEXT:   %1 = call { i64, float, { float*, i64, i64 } } @multireturn()
-// CHECK-NEXT:   [[ret0:%[0-9]+]] = extractvalue { i64, float, { float*, i64, i64 } } %1, 0
-// CHECK-NEXT:   [[ret1:%[0-9]+]] = extractvalue { i64, float, { float*, i64, i64 } } %1, 1
-// CHECK-NEXT:   [[ret2:%[0-9]+]] = extractvalue { i64, float, { float*, i64, i64 } } %1, 2
+// CHECK-NEXT:   %1 = call { i64, float, { ptr, i64, i64 } } @multireturn()
+// CHECK-NEXT:   [[ret0:%[0-9]+]] = extractvalue { i64, float, { ptr, i64, i64 } } %1, 0
+// CHECK-NEXT:   [[ret1:%[0-9]+]] = extractvalue { i64, float, { ptr, i64, i64 } } %1, 1
+// CHECK-NEXT:   [[ret2:%[0-9]+]] = extractvalue { i64, float, { ptr, i64, i64 } } %1, 2
   %0 = llvm.call @multireturn() : () -> !llvm.struct<(i64, f32, struct<(ptr<f32>, i64, i64)>)>
   %1 = llvm.extractvalue %0[0] : !llvm.struct<(i64, f32, struct<(ptr<f32>, i64, i64)>)>
   %2 = llvm.extractvalue %0[1] : !llvm.struct<(i64, f32, struct<(ptr<f32>, i64, i64)>)>
@@ -856,7 +851,7 @@ llvm.func @multireturn_caller() {
   %7 = llvm.fadd %2, %6 : f32
   %8 = llvm.mlir.constant(0 : index) : i64
   %9 = llvm.mlir.constant(42 : index) : i64
-// CHECK:   extractvalue { float*, i64, i64 } [[ret2]], 0
+// CHECK:   extractvalue { ptr, i64, i64 } [[ret2]], 0
   %10 = llvm.extractvalue %3[1] : !llvm.struct<(ptr<f32>, i64, i64)>
   %11 = llvm.mlir.constant(10 : index) : i64
   %12 = llvm.extractvalue %3[2] : !llvm.struct<(ptr<f32>, i64, i64)>
@@ -999,9 +994,9 @@ llvm.func @ops(%arg0: f32, %arg1: f32, %arg2: i32, %arg3: i32) -> !llvm.struct<(
 // CHECK-LABEL: @gep
 llvm.func @gep(%ptr: !llvm.ptr<struct<(i32, struct<(i32, f32)>)>>, %idx: i64,
                %ptr2: !llvm.ptr<struct<(array<10xf32>)>>) {
-  // CHECK: = getelementptr { i32, { i32, float } }, { i32, { i32, float } }* %{{.*}}, i64 %{{.*}}, i32 1, i32 0
+  // CHECK: = getelementptr { i32, { i32, float } }, ptr %{{.*}}, i64 %{{.*}}, i32 1, i32 0
   llvm.getelementptr %ptr[%idx, 1, 0] : (!llvm.ptr<struct<(i32, struct<(i32, f32)>)>>, i64) -> !llvm.ptr<i32>
-  // CHECK: = getelementptr { [10 x float] }, { [10 x float] }* %{{.*}}, i64 %{{.*}}, i32 0, i64 %{{.*}}
+  // CHECK: = getelementptr { [10 x float] }, ptr %{{.*}}, i64 %{{.*}}, i32 0, i64 %{{.*}}
   llvm.getelementptr %ptr2[%idx, 0, %idx] : (!llvm.ptr<struct<(array<10xf32>)>>, i64, i64) -> !llvm.ptr<f32>
   llvm.return
 }
@@ -1019,7 +1014,7 @@ llvm.func @indirect_const_call(%arg0: i64) {
   llvm.return
 }
 
-// CHECK-LABEL: define i32 @indirect_call(i32 (float)* {{%.*}}, float {{%.*}})
+// CHECK-LABEL: define i32 @indirect_call(ptr {{%.*}}, float {{%.*}})
 llvm.func @indirect_call(%arg0: !llvm.ptr<func<i32 (f32)>>, %arg1: f32) -> i32 {
 // CHECK-NEXT:  %3 = call i32 %0(float %1)
   %0 = llvm.call %arg0(%arg1) : (f32) -> i32
@@ -1049,27 +1044,27 @@ llvm.func @cond_br_arguments(%arg0: i1, %arg1: i1) {
   llvm.br ^bb1(%arg1 : i1)
 }
 
-// CHECK-LABEL: define void @llvm_noalias(float* noalias {{%*.}})
+// CHECK-LABEL: define void @llvm_noalias(ptr noalias {{%*.}})
 llvm.func @llvm_noalias(%arg0: !llvm.ptr<f32> {llvm.noalias}) {
   llvm.return
 }
 
-// CHECK-LABEL: define void @byvalattr(i32* byval(i32) %
+// CHECK-LABEL: define void @byvalattr(ptr byval(i32) %
 llvm.func @byvalattr(%arg0: !llvm.ptr<i32> {llvm.byval}) {
   llvm.return
 }
 
-// CHECK-LABEL: define void @sretattr(i32* sret(i32) %
+// CHECK-LABEL: define void @sretattr(ptr sret(i32) %
 llvm.func @sretattr(%arg0: !llvm.ptr<i32> {llvm.sret}) {
   llvm.return
 }
 
-// CHECK-LABEL: define void @nestattr(i32* nest %
+// CHECK-LABEL: define void @nestattr(ptr nest %
 llvm.func @nestattr(%arg0: !llvm.ptr<i32> {llvm.nest}) {
   llvm.return
 }
 
-// CHECK-LABEL: define void @llvm_align(float* align 4 {{%*.}})
+// CHECK-LABEL: define void @llvm_align(ptr align 4 {{%*.}})
 llvm.func @llvm_align(%arg0: !llvm.ptr<f32> {llvm.align = 4}) {
   llvm.return
 }
@@ -1078,8 +1073,8 @@ llvm.func @llvm_align(%arg0: !llvm.ptr<f32> {llvm.align = 4}) {
 llvm.func @llvm_varargs(...)
 
 llvm.func @intpointerconversion(%arg0 : i32) -> i32 {
-// CHECK:      %2 = inttoptr i32 %0 to i32*
-// CHECK-NEXT: %3 = ptrtoint i32* %2 to i32
+// CHECK:      %2 = inttoptr i32 %0 to ptr
+// CHECK-NEXT: %3 = ptrtoint ptr %2 to i32
   %1 = llvm.inttoptr %arg0 : i32 to !llvm.ptr<i32>
   %2 = llvm.ptrtoint %1 : !llvm.ptr<i32> to i32
   llvm.return %2 : i32
@@ -1099,7 +1094,7 @@ llvm.func @fpconversion(%arg0 : i32) -> i32 {
 
 // CHECK-LABEL: @addrspace
 llvm.func @addrspace(%arg0 : !llvm.ptr<i32>) -> !llvm.ptr<i32, 2> {
-// CHECK: %2 = addrspacecast i32* %0 to i32 addrspace(2)*
+// CHECK: %2 = addrspacecast ptr %0 to ptr addrspace(2)
   %1 = llvm.addrspacecast %arg0 : !llvm.ptr<i32> to !llvm.ptr<i32, 2>
   llvm.return %1 : !llvm.ptr<i32, 2>
 }
@@ -1258,7 +1253,7 @@ llvm.func @integer_extension_and_truncation(%a : i32) {
 // CHECK-LABEL: @null
 llvm.func @null() -> !llvm.ptr<i32> {
   %0 = llvm.mlir.null : !llvm.ptr<i32>
-  // CHECK: ret i32* null
+  // CHECK: ret ptr null
   llvm.return %0 : !llvm.ptr<i32>
 }
 
@@ -1285,38 +1280,38 @@ llvm.func @elements_constant_3d_array() -> !llvm.array<2 x array<2 x array<2 x i
 llvm.func @atomicrmw(
     %f32_ptr : !llvm.ptr<f32>, %f32 : f32,
     %i32_ptr : !llvm.ptr<i32>, %i32 : i32) {
-  // CHECK: atomicrmw fadd float* %{{.*}}, float %{{.*}} monotonic
+  // CHECK: atomicrmw fadd ptr %{{.*}}, float %{{.*}} monotonic
   %0 = llvm.atomicrmw fadd %f32_ptr, %f32 monotonic : f32
-  // CHECK: atomicrmw fsub float* %{{.*}}, float %{{.*}} monotonic
+  // CHECK: atomicrmw fsub ptr %{{.*}}, float %{{.*}} monotonic
   %1 = llvm.atomicrmw fsub %f32_ptr, %f32 monotonic : f32
-  // CHECK: atomicrmw xchg float* %{{.*}}, float %{{.*}} monotonic
+  // CHECK: atomicrmw xchg ptr %{{.*}}, float %{{.*}} monotonic
   %2 = llvm.atomicrmw xchg %f32_ptr, %f32 monotonic : f32
-  // CHECK: atomicrmw add i32* %{{.*}}, i32 %{{.*}} acquire
+  // CHECK: atomicrmw add ptr %{{.*}}, i32 %{{.*}} acquire
   %3 = llvm.atomicrmw add %i32_ptr, %i32 acquire : i32
-  // CHECK: atomicrmw sub i32* %{{.*}}, i32 %{{.*}} release
+  // CHECK: atomicrmw sub ptr %{{.*}}, i32 %{{.*}} release
   %4 = llvm.atomicrmw sub %i32_ptr, %i32 release : i32
-  // CHECK: atomicrmw and i32* %{{.*}}, i32 %{{.*}} acq_rel
+  // CHECK: atomicrmw and ptr %{{.*}}, i32 %{{.*}} acq_rel
   %5 = llvm.atomicrmw _and %i32_ptr, %i32 acq_rel : i32
-  // CHECK: atomicrmw nand i32* %{{.*}}, i32 %{{.*}} seq_cst
+  // CHECK: atomicrmw nand ptr %{{.*}}, i32 %{{.*}} seq_cst
   %6 = llvm.atomicrmw nand %i32_ptr, %i32 seq_cst : i32
-  // CHECK: atomicrmw or i32* %{{.*}}, i32 %{{.*}} monotonic
+  // CHECK: atomicrmw or ptr %{{.*}}, i32 %{{.*}} monotonic
   %7 = llvm.atomicrmw _or %i32_ptr, %i32 monotonic : i32
-  // CHECK: atomicrmw xor i32* %{{.*}}, i32 %{{.*}} monotonic
+  // CHECK: atomicrmw xor ptr %{{.*}}, i32 %{{.*}} monotonic
   %8 = llvm.atomicrmw _xor %i32_ptr, %i32 monotonic : i32
-  // CHECK: atomicrmw max i32* %{{.*}}, i32 %{{.*}} monotonic
+  // CHECK: atomicrmw max ptr %{{.*}}, i32 %{{.*}} monotonic
   %9 = llvm.atomicrmw max %i32_ptr, %i32 monotonic : i32
-  // CHECK: atomicrmw min i32* %{{.*}}, i32 %{{.*}} monotonic
+  // CHECK: atomicrmw min ptr %{{.*}}, i32 %{{.*}} monotonic
   %10 = llvm.atomicrmw min %i32_ptr, %i32 monotonic : i32
-  // CHECK: atomicrmw umax i32* %{{.*}}, i32 %{{.*}} monotonic
+  // CHECK: atomicrmw umax ptr %{{.*}}, i32 %{{.*}} monotonic
   %11 = llvm.atomicrmw umax %i32_ptr, %i32 monotonic : i32
-  // CHECK: atomicrmw umin i32* %{{.*}}, i32 %{{.*}} monotonic
+  // CHECK: atomicrmw umin ptr %{{.*}}, i32 %{{.*}} monotonic
   %12 = llvm.atomicrmw umin %i32_ptr, %i32 monotonic : i32
   llvm.return
 }
 
 // CHECK-LABEL: @cmpxchg
 llvm.func @cmpxchg(%ptr : !llvm.ptr<i32>, %cmp : i32, %val: i32) {
-  // CHECK: cmpxchg i32* %{{.*}}, i32 %{{.*}}, i32 %{{.*}} acq_rel monotonic
+  // CHECK: cmpxchg ptr %{{.*}}, i32 %{{.*}}, i32 %{{.*}} acq_rel monotonic
   %0 = llvm.cmpxchg %ptr, %cmp, %val acq_rel monotonic : i32
   // CHECK: %{{[0-9]+}} = extractvalue { i32, i1 } %{{[0-9]+}}, 0
   %1 = llvm.extractvalue %0[0] : !llvm.struct<(i32, i1)>
@@ -1340,15 +1335,15 @@ llvm.func @invokeLandingpad() -> i32 attributes { personality = @__gxx_personali
   %4 = llvm.mlir.null : !llvm.ptr<ptr<i8>>
   %5 = llvm.mlir.constant(1 : i32) : i32
   %6 = llvm.alloca %5 x i8 : (i32) -> !llvm.ptr<i8>
-// CHECK: invoke void @foo(i8* %[[a1]])
+// CHECK: invoke void @foo(ptr %[[a1]])
 // CHECK-NEXT: to label %[[normal:[0-9]+]] unwind label %[[unwind:[0-9]+]]
   llvm.invoke @foo(%6) to ^bb2 unwind ^bb1 : (!llvm.ptr<i8>) -> ()
 
 // CHECK: [[unwind]]:
 ^bb1:
-// CHECK: %{{[0-9]+}} = landingpad { i8*, i32 }
-// CHECK-NEXT:             catch i8** null
-// CHECK-NEXT:             catch i8* bitcast (i8** @_ZTIi to i8*)
+// CHECK: %{{[0-9]+}} = landingpad { ptr, i32 }
+// CHECK-NEXT:             catch ptr null
+// CHECK-NEXT:             catch ptr @_ZTIi
 // CHECK-NEXT:             filter [1 x i8] zeroinitializer
   %7 = llvm.landingpad (catch %4 : !llvm.ptr<ptr<i8>>) (catch %3 : !llvm.ptr<i8>) (filter %1 : !llvm.array<1 x i8>) : !llvm.struct<(ptr<i8>, i32)>
 // CHECK: br label %[[final:[0-9]+]]
@@ -1360,7 +1355,7 @@ llvm.func @invokeLandingpad() -> i32 attributes { personality = @__gxx_personali
   llvm.return %5 : i32
 
 // CHECK: [[final]]:
-// CHECK-NEXT: %{{[0-9]+}} = invoke i8* @bar(i8* %[[a1]])
+// CHECK-NEXT: %{{[0-9]+}} = invoke ptr @bar(ptr %[[a1]])
 // CHECK-NEXT:          to label %[[normal]] unwind label %[[unwind]]
 ^bb3:	// pred: ^bb1
   %8 = llvm.invoke @bar(%6) to ^bb2 unwind ^bb1 : (!llvm.ptr<i8>) -> !llvm.ptr<i8>
@@ -1379,14 +1374,14 @@ llvm.func @invoke_result(%arg0 : !llvm.ptr<i8>) attributes { personality = @__gx
     %0 = llvm.invoke @foo() to ^bb1 unwind ^bb2 : () -> i8
 
 // CHECK: [[normal]]:
-// CHECK-NEXT: store i8 %[[a1]], i8* %[[a0]]
+// CHECK-NEXT: store i8 %[[a1]], ptr %[[a0]]
 // CHECK-NEXT: ret void
 ^bb1:
     llvm.store %0, %arg0 : !llvm.ptr<i8>
     llvm.return
 
 // CHECK: [[unwind]]:
-// CHECK-NEXT: landingpad { i8*, i32 }
+// CHECK-NEXT: landingpad { ptr, i32 }
 // CHECK-NEXT: cleanup
 // CHECK-NEXT: ret void
 ^bb2:
@@ -1413,7 +1408,7 @@ llvm.func @invoke_phis() -> i32 attributes { personality = @__gxx_personality_v0
     llvm.return %1 : i32
 
 // CHECK: [[unwind]]:
-// CHECK-NEXT: landingpad { i8*, i32 }
+// CHECK-NEXT: landingpad { ptr, i32 }
 // CHECK-NEXT: cleanup
 // CHECK-NEXT: br label %[[normal]]
 ^bb2:
@@ -1495,7 +1490,7 @@ llvm.mlir.global internal constant @taker_of_address() : !llvm.ptr<func<void ()>
 
 // -----
 
-// CHECK: @forward_use_of_address = linkonce global float* @address_declared_after_use
+// CHECK: @forward_use_of_address = linkonce global ptr @address_declared_after_use
 llvm.mlir.global linkonce @forward_use_of_address() : !llvm.ptr<f32> {
   %0 = llvm.mlir.addressof @address_declared_after_use : !llvm.ptr<f32>
   llvm.return %0 : !llvm.ptr<f32>
@@ -1505,7 +1500,7 @@ llvm.mlir.global linkonce @address_declared_after_use() : f32
 
 // -----
 
-// CHECK: @take_self_address = linkonce global { i32, i32* } {{.*}} { i32, i32* }* @take_self_address
+// CHECK: @take_self_address = linkonce global { i32, ptr } {{.*}} ptr @take_self_address
 llvm.mlir.global linkonce @take_self_address() : !llvm.struct<(i32, !llvm.ptr<i32>)> {
   %z32 = llvm.mlir.constant(0 : i32) : i32
   %0 = llvm.mlir.undef : !llvm.struct<(i32, !llvm.ptr<i32>)>
@@ -1518,7 +1513,7 @@ llvm.mlir.global linkonce @take_self_address() : !llvm.struct<(i32, !llvm.ptr<i3
 
 // -----
 
-// CHECK: @llvm.global_ctors = appending global [1 x { i32, void ()*, i8* }] [{ i32, void ()*, i8* } { i32 0, void ()* @foo, i8* null }]
+// CHECK: @llvm.global_ctors = appending global [1 x { i32, ptr, ptr }] [{ i32, ptr, ptr } { i32 0, ptr @foo, ptr null }]
 llvm.mlir.global_ctors { ctors = [@foo], priorities = [0 : i32]}
 
 llvm.func @foo() {
@@ -1527,7 +1522,7 @@ llvm.func @foo() {
 
 // -----
 
-// CHECK: @llvm.global_dtors = appending global [1 x { i32, void ()*, i8* }] [{ i32, void ()*, i8* } { i32 0, void ()* @foo, i8* null }]
+// CHECK: @llvm.global_dtors = appending global [1 x { i32, ptr, ptr }] [{ i32, ptr, ptr } { i32 0, ptr @foo, ptr null }]
 llvm.mlir.global_dtors { dtors = [@foo], priorities = [0 : i32]}
 
 llvm.func @foo() {
@@ -1554,9 +1549,9 @@ llvm.func @volatile_store_and_load() {
   %val = llvm.mlir.constant(5 : i32) : i32
   %size = llvm.mlir.constant(1 : i64) : i64
   %0 = llvm.alloca %size x i32 : (i64) -> (!llvm.ptr<i32>)
-  // CHECK: store volatile i32 5, i32* %{{.*}}
+  // CHECK: store volatile i32 5, ptr %{{.*}}
   llvm.store volatile %val, %0 : !llvm.ptr<i32>
-  // CHECK: %{{.*}} = load volatile i32, i32* %{{.*}}
+  // CHECK: %{{.*}} = load volatile i32, ptr %{{.*}}
   %1 = llvm.load volatile %0: !llvm.ptr<i32>
   llvm.return
 }
@@ -1754,7 +1749,7 @@ module {
       llvm.cond_br %2, ^bb4, ^bb5 {llvm.loop = {parallel_access = [@metadata::@group1, @metadata::@group2], options = #llvm.loopopts<disable_licm = true, disable_unroll = true, interleave_count = 1, disable_pipeline = true, pipeline_initiation_interval = 2>}}
     ^bb4:
       %3 = llvm.add %1, %arg2  : i32
-      // CHECK: = load i32, i32* %{{.*}} !llvm.access.group ![[ACCESS_GROUPS_NODE:[0-9]+]]
+      // CHECK: = load i32, ptr %{{.*}} !llvm.access.group ![[ACCESS_GROUPS_NODE:[0-9]+]]
       %5 = llvm.load %4 { access_groups = [@metadata::@group1, @metadata::@group2] } : !llvm.ptr<i32>
       // CHECK: br label {{.*}} !llvm.loop ![[LOOP_NODE]]
       llvm.br ^bb3(%3 : i32) {llvm.loop = {parallel_access = [@metadata::@group1, @metadata::@group2], options = #llvm.loopopts<disable_unroll = true, disable_licm = true, interleave_count = 1, disable_pipeline = true, pipeline_initiation_interval = 2>}}

diff  --git a/mlir/test/Target/LLVMIR/nvvmir.mlir b/mlir/test/Target/LLVMIR/nvvmir.mlir
index 36239c3e19d4f..53af04140c38d 100644
--- a/mlir/test/Target/LLVMIR/nvvmir.mlir
+++ b/mlir/test/Target/LLVMIR/nvvmir.mlir
@@ -233,7 +233,7 @@ llvm.func @nvvm_mma_m16n8k4_tf32_f32(%a0 : i32, %a1 : i32,
 // in the LLVM NVPTX backend.
 // CHECK-LABEL: @gpu_wmma_load_op
 llvm.func @gpu_wmma_load_op(%arg0: !llvm.ptr<i32, 3>, %arg1: i32) {
-  // CHECK: call { <2 x half>, <2 x half>, <2 x half>, <2 x half>, <2 x half>, <2 x half>, <2 x half>, <2 x half> } @llvm.nvvm.wmma.m16n16k16.load.a.row.stride.f16.p3i32(i32 addrspace(3)* %{{.*}}, i32 %{{.*}})
+  // CHECK: call { <2 x half>, <2 x half>, <2 x half>, <2 x half>, <2 x half>, <2 x half>, <2 x half>, <2 x half> } @llvm.nvvm.wmma.m16n16k16.load.a.row.stride.f16.p3(ptr addrspace(3) %{{.*}}, i32 %{{.*}})
   %0 = nvvm.wmma.load %arg0, %arg1
     {eltype = #nvvm.mma_type<f16>, frag = #nvvm.mma_frag<a>, k = 16 : i32, layout = #nvvm.mma_layout<row>, m = 16 : i32, n = 16 : i32}
     : (!llvm.ptr<i32, 3>) -> !llvm.struct<(vector<2xf16>, vector<2xf16>, vector<2xf16>, vector<2xf16>, vector<2xf16>, vector<2xf16>, vector<2xf16>, vector<2xf16>)>
@@ -247,7 +247,7 @@ llvm.func @gpu_wmma_load_op(%arg0: !llvm.ptr<i32, 3>, %arg1: i32) {
 llvm.func @gpu_wmma_store_op(%arg0: !llvm.ptr<i32, 3>, %arg1: i32,
                             %arg2: vector<2 x f16>, %arg3: vector<2 x f16>,
                             %arg4: vector<2 xf16>, %arg5: vector<2 x f16>) {
-  // CHECK: call void @llvm.nvvm.wmma.m16n16k16.store.d.row.stride.f16.p3i32(i32 addrspace(3)* %{{.*}}, <2 x half> {{.*}}, <2 x half> %{{.*}}, <2 x half> %{{.*}}, <2 x half> %{{.*}}, i32 %{{.*}})
+  // CHECK: call void @llvm.nvvm.wmma.m16n16k16.store.d.row.stride.f16.p3(ptr addrspace(3) %{{.*}}, <2 x half> {{.*}}, <2 x half> %{{.*}}, <2 x half> %{{.*}}, <2 x half> %{{.*}}, i32 %{{.*}})
   nvvm.wmma.store %arg0, %arg1, %arg2, %arg3, %arg4, %arg5
     {eltype = #nvvm.mma_type<f16>, k = 16 : i32, layout = #nvvm.mma_layout<row>, m = 16 : i32, n = 16 : i32}
     : !llvm.ptr<i32, 3>, vector<2 x f16>, vector<2 x f16>, vector<2 x f16>, vector<2 x f16>
@@ -280,7 +280,7 @@ llvm.func @gpu_wmma_mma_op(%arg0: vector<2 x f16>, %arg1: vector<2 x f16>,
 
 // CHECK-LABEL: @nvvm_wmma_load_tf32
 llvm.func @nvvm_wmma_load_tf32(%arg0: !llvm.ptr<i32>, %arg1 : i32) {
-  // CHECK: call { i32, i32, i32, i32 } @llvm.nvvm.wmma.m16n16k8.load.a.row.stride.tf32.p0i32(i32* %{{.*}}, i32 %{{.*}})
+  // CHECK: call { i32, i32, i32, i32 } @llvm.nvvm.wmma.m16n16k8.load.a.row.stride.tf32.p0(ptr %{{.*}}, i32 %{{.*}})
   %0 = nvvm.wmma.load %arg0, %arg1
     {eltype = #nvvm.mma_type<tf32>, frag = #nvvm.mma_frag<a>, k = 8 : i32, layout = #nvvm.mma_layout<row>, m = 16 : i32, n = 16 : i32}
     : (!llvm.ptr<i32>) -> !llvm.struct<(i32, i32, i32, i32)>
@@ -301,13 +301,13 @@ llvm.func @nvvm_wmma_mma(%0 : i32, %1 : i32, %2 : i32, %3 : i32, %4 : i32, %5 :
 
 // CHECK-LABEL: @cp_async
 llvm.func @cp_async(%arg0: !llvm.ptr<i8, 3>, %arg1: !llvm.ptr<i8, 1>) {
-// CHECK: call void @llvm.nvvm.cp.async.ca.shared.global.4(i8 addrspace(3)* %{{.*}}, i8 addrspace(1)* %{{.*}})
+// CHECK: call void @llvm.nvvm.cp.async.ca.shared.global.4(ptr addrspace(3) %{{.*}}, ptr addrspace(1) %{{.*}})
   nvvm.cp.async.shared.global %arg0, %arg1, 4
-// CHECK: call void @llvm.nvvm.cp.async.ca.shared.global.8(i8 addrspace(3)* %{{.*}}, i8 addrspace(1)* %{{.*}})
+// CHECK: call void @llvm.nvvm.cp.async.ca.shared.global.8(ptr addrspace(3) %{{.*}}, ptr addrspace(1) %{{.*}})
   nvvm.cp.async.shared.global %arg0, %arg1, 8
-// CHECK: call void @llvm.nvvm.cp.async.ca.shared.global.16(i8 addrspace(3)* %{{.*}}, i8 addrspace(1)* %{{.*}})
+// CHECK: call void @llvm.nvvm.cp.async.ca.shared.global.16(ptr addrspace(3) %{{.*}}, ptr addrspace(1) %{{.*}})
   nvvm.cp.async.shared.global %arg0, %arg1, 16
-// CHECK: call void @llvm.nvvm.cp.async.cg.shared.global.16(i8 addrspace(3)* %{{.*}}, i8 addrspace(1)* %{{.*}})
+// CHECK: call void @llvm.nvvm.cp.async.cg.shared.global.16(ptr addrspace(3) %{{.*}}, ptr addrspace(1) %{{.*}})
   nvvm.cp.async.shared.global %arg0, %arg1, 16 {bypass_l1}
 // CHECK: call void @llvm.nvvm.cp.async.commit.group()
   nvvm.cp.async.commit.group
@@ -318,17 +318,17 @@ llvm.func @cp_async(%arg0: !llvm.ptr<i8, 3>, %arg1: !llvm.ptr<i8, 1>) {
 
 // CHECK-LABEL: @ld_matrix
 llvm.func @ld_matrix(%arg0: !llvm.ptr<i32, 3>) {
-  // CHECK: call i32 @llvm.nvvm.ldmatrix.sync.aligned.m8n8.x1.b16.p3i32(i32 addrspace(3)* %{{.*}})
+  // CHECK: call i32 @llvm.nvvm.ldmatrix.sync.aligned.m8n8.x1.b16.p3(ptr addrspace(3) %{{.*}})
   %l1 = nvvm.ldmatrix %arg0 {num = 1 : i32, layout = #nvvm.mma_layout<row>} : (!llvm.ptr<i32, 3>) -> i32
-  // CHECK: call { i32, i32 } @llvm.nvvm.ldmatrix.sync.aligned.m8n8.x2.b16.p3i32(i32 addrspace(3)* %{{.*}})
+  // CHECK: call { i32, i32 } @llvm.nvvm.ldmatrix.sync.aligned.m8n8.x2.b16.p3(ptr addrspace(3) %{{.*}})
   %l2 = nvvm.ldmatrix %arg0 {num = 2 : i32, layout = #nvvm.mma_layout<row>} : (!llvm.ptr<i32, 3>) -> !llvm.struct<(i32, i32)>
-  // CHECK: call { i32, i32, i32, i32 } @llvm.nvvm.ldmatrix.sync.aligned.m8n8.x4.b16.p3i32(i32 addrspace(3)* %{{.*}})
+  // CHECK: call { i32, i32, i32, i32 } @llvm.nvvm.ldmatrix.sync.aligned.m8n8.x4.b16.p3(ptr addrspace(3) %{{.*}})
   %l4 = nvvm.ldmatrix %arg0 {num = 4 : i32, layout = #nvvm.mma_layout<row>} : (!llvm.ptr<i32, 3>) -> !llvm.struct<(i32, i32, i32, i32)>
-   // CHECK: call i32 @llvm.nvvm.ldmatrix.sync.aligned.m8n8.x1.trans.b16.p3i32(i32 addrspace(3)* %{{.*}})
+   // CHECK: call i32 @llvm.nvvm.ldmatrix.sync.aligned.m8n8.x1.trans.b16.p3(ptr addrspace(3) %{{.*}})
   %l1t = nvvm.ldmatrix %arg0 {num = 1 : i32, layout = #nvvm.mma_layout<col>} : (!llvm.ptr<i32, 3>) -> i32
-  // CHECK: call { i32, i32 } @llvm.nvvm.ldmatrix.sync.aligned.m8n8.x2.trans.b16.p3i32(i32 addrspace(3)* %{{.*}})
+  // CHECK: call { i32, i32 } @llvm.nvvm.ldmatrix.sync.aligned.m8n8.x2.trans.b16.p3(ptr addrspace(3) %{{.*}})
   %l2t = nvvm.ldmatrix %arg0 {num = 2 : i32, layout = #nvvm.mma_layout<col>} : (!llvm.ptr<i32, 3>) -> !llvm.struct<(i32, i32)>
-  // CHECK: call { i32, i32, i32, i32 } @llvm.nvvm.ldmatrix.sync.aligned.m8n8.x4.trans.b16.p3i32(i32 addrspace(3)* %{{.*}})
+  // CHECK: call { i32, i32, i32, i32 } @llvm.nvvm.ldmatrix.sync.aligned.m8n8.x4.trans.b16.p3(ptr addrspace(3) %{{.*}})
   %l4t = nvvm.ldmatrix %arg0 {num = 4 : i32, layout = #nvvm.mma_layout<col>} : (!llvm.ptr<i32, 3>) -> !llvm.struct<(i32, i32, i32, i32)>
   llvm.return
 }
@@ -340,5 +340,5 @@ llvm.func @kernel_func() attributes {nvvm.kernel} {
 }
 
 // CHECK:     !nvvm.annotations =
-// CHECK-NOT: {i32 ()* @nvvm_special_regs, !"kernel", i32 1}
-// CHECK:     {void ()* @kernel_func, !"kernel", i32 1}
+// CHECK-NOT: {ptr @nvvm_special_regs, !"kernel", i32 1}
+// CHECK:     {ptr @kernel_func, !"kernel", i32 1}

diff  --git a/mlir/test/Target/LLVMIR/openacc-llvm.mlir b/mlir/test/Target/LLVMIR/openacc-llvm.mlir
index d0ebfe0e2c9c7..df3ce7a1d96d0 100644
--- a/mlir/test/Target/LLVMIR/openacc-llvm.mlir
+++ b/mlir/test/Target/LLVMIR/openacc-llvm.mlir
@@ -21,48 +21,44 @@ llvm.func @testenterdataop(%arg0: !llvm.ptr<f32>, %arg1: !llvm.ptr<f32>, %arg2:
   llvm.return
 }
 
-// CHECK: %struct.ident_t = type { i32, i32, i32, i32, i8* }
+// CHECK: %struct.ident_t = type { i32, i32, i32, i32, ptr }
 
 // CHECK: [[LOCSTR:@.*]] = private unnamed_addr constant [{{[0-9]*}} x i8] c";{{.*}};testenterdataop;{{[0-9]*}};{{[0-9]*}};;\00", align 1
-// CHECK: [[LOCGLOBAL:@.*]] = private unnamed_addr constant %struct.ident_t { i32 0, i32 2, i32 0, i32 {{[0-9]*}}, i8* getelementptr inbounds ([{{[0-9]*}} x i8], [{{[0-9]*}} x i8]* [[LOCSTR]], i32 0, i32 0) }, align 8
+// CHECK: [[LOCGLOBAL:@.*]] = private unnamed_addr constant %struct.ident_t { i32 0, i32 2, i32 0, i32 {{[0-9]*}}, ptr [[LOCSTR]] }, align 8
 // CHECK: [[MAPNAME1:@.*]] = private unnamed_addr constant [{{[0-9]*}} x i8] c";{{.*}};unknown;{{[0-9]*}};{{[0-9]*}};;\00", align 1
 // CHECK: [[MAPNAME2:@.*]] = private unnamed_addr constant [{{[0-9]*}} x i8] c";{{.*}};unknown;{{[0-9]*}};{{[0-9]*}};;\00", align 1
 // CHECK: [[MAPTYPES:@.*]] = private unnamed_addr constant [{{[0-9]*}} x i64] [i64 0, i64 1]
-// CHECK: [[MAPNAMES:@.*]] = private constant [{{[0-9]*}} x i8*] [i8* getelementptr inbounds ([{{[0-9]*}} x i8], [{{[0-9]*}} x i8]* [[MAPNAME1]], i32 0, i32 0), i8* getelementptr inbounds ([{{[0-9]*}} x i8], [{{[0-9]*}} x i8]* [[MAPNAME2]], i32 0, i32 0)]
+// CHECK: [[MAPNAMES:@.*]] = private constant [{{[0-9]*}} x ptr] [ptr [[MAPNAME1]], ptr [[MAPNAME2]]]
 
-// CHECK: define void @testenterdataop(float* %{{.*}}, float* %{{.*}}, i64 %{{.*}}, i64 %{{.*}}, i64 %{{.*}}, float* [[SIMPLEPTR:%.*]])
-// CHECK: [[ARGBASE_ALLOCA:%.*]] = alloca [{{[0-9]*}} x i8*], align 8
-// CHECK: [[ARG_ALLOCA:%.*]] = alloca [{{[0-9]*}} x i8*], align 8
+// CHECK: define void @testenterdataop(ptr %{{.*}}, ptr %{{.*}}, i64 %{{.*}}, i64 %{{.*}}, i64 %{{.*}}, ptr [[SIMPLEPTR:%.*]])
+// CHECK: [[ARGBASE_ALLOCA:%.*]] = alloca [{{[0-9]*}} x ptr], align 8
+// CHECK: [[ARG_ALLOCA:%.*]] = alloca [{{[0-9]*}} x ptr], align 8
 // CHECK: [[SIZE_ALLOCA:%.*]] = alloca [{{[0-9]*}} x i64], align 8
 
 // CHECK: [[ARGBASE:%.*]] = extractvalue %openacc_data %{{.*}}, 0
 // CHECK: [[ARG:%.*]] = extractvalue %openacc_data %{{.*}}, 1
 // CHECK: [[ARGSIZE:%.*]] = extractvalue %openacc_data %{{.*}}, 2
-// CHECK: [[ARGBASEGEP:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[ARGBASE_ALLOCA]], i32 0, i32 0
-// CHECK: [[ARGBASEGEPCAST:%.*]] = bitcast i8** [[ARGBASEGEP]] to { float*, float*, i64, [1 x i64], [1 x i64] }*
-// CHECK: store { float*, float*, i64, [1 x i64], [1 x i64] } [[ARGBASE]], { float*, float*, i64, [1 x i64], [1 x i64] }* [[ARGBASEGEPCAST]], align 8
-// CHECK: [[ARGGEP:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[ARG_ALLOCA]], i32 0, i32 0
-// CHECK: [[ARGGEPCAST:%.*]] = bitcast i8** [[ARGGEP]] to float**
-// CHECK: store float* [[ARG]], float** [[ARGGEPCAST]], align 8
-// CHECK: [[SIZEGEP:%.*]] = getelementptr inbounds [2 x i64], [2 x i64]* [[SIZE_ALLOCA]], i32 0, i32 0
-// CHECK: store i64 [[ARGSIZE]], i64* [[SIZEGEP]], align 4
-
-// CHECK: [[ARGBASEGEP:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[ARGBASE_ALLOCA]], i32 0, i32 1
-// CHECK: [[ARGBASEGEPCAST:%.*]] = bitcast i8** [[ARGBASEGEP]] to float**
-// CHECK: store float* [[SIMPLEPTR]], float** [[ARGBASEGEPCAST]], align 8
-// CHECK: [[ARGGEP:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[ARG_ALLOCA]], i32 0, i32 1
-// CHECK: [[ARGGEPCAST:%.*]] = bitcast i8** [[ARGGEP]] to float**
-// CHECK: store float* [[SIMPLEPTR]], float** [[ARGGEPCAST]], align 8
-// CHECK: [[SIZEGEP:%.*]] = getelementptr inbounds [2 x i64], [2 x i64]* [[SIZE_ALLOCA]], i32 0, i32 1
-// CHECK: store i64 ptrtoint (float** getelementptr (float*, float** null, i32 1) to i64), i64* [[SIZEGEP]], align 4
-
-// CHECK: [[ARGBASE_ALLOCA_GEP:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[ARGBASE_ALLOCA]], i32 0, i32 0
-// CHECK: [[ARG_ALLOCA_GEP:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[ARG_ALLOCA]], i32 0, i32 0
-// CHECK: [[SIZE_ALLOCA_GEP:%.*]] = getelementptr inbounds [2 x i64], [2 x i64]* [[SIZE_ALLOCA]], i32 0, i32 0
-
-// CHECK: call void @__tgt_target_data_begin_mapper(%struct.ident_t* [[LOCGLOBAL]], i64 -1, i32 2, i8** [[ARGBASE_ALLOCA_GEP]], i8** [[ARG_ALLOCA_GEP]], i64* [[SIZE_ALLOCA_GEP]], i64* getelementptr inbounds ([{{[0-9]*}} x i64], [{{[0-9]*}} x i64]* [[MAPTYPES]], i32 0, i32 0), i8** getelementptr inbounds ([{{[0-9]*}} x i8*], [{{[0-9]*}} x i8*]* [[MAPNAMES]], i32 0, i32 0), i8** null)
-
-// CHECK: declare void @__tgt_target_data_begin_mapper(%struct.ident_t*, i64, i32, i8**, i8**, i64*, i64*, i8**, i8**) #0
+// CHECK: [[ARGBASEGEP:%.*]] = getelementptr inbounds [2 x ptr], ptr [[ARGBASE_ALLOCA]], i32 0, i32 0
+// CHECK: store { ptr, ptr, i64, [1 x i64], [1 x i64] } [[ARGBASE]], ptr [[ARGBASEGEP]], align 8
+// CHECK: [[ARGGEP:%.*]] = getelementptr inbounds [2 x ptr], ptr [[ARG_ALLOCA]], i32 0, i32 0
+// CHECK: store ptr [[ARG]], ptr [[ARGGEP]], align 8
+// CHECK: [[SIZEGEP:%.*]] = getelementptr inbounds [2 x i64], ptr [[SIZE_ALLOCA]], i32 0, i32 0
+// CHECK: store i64 [[ARGSIZE]], ptr [[SIZEGEP]], align 4
+
+// CHECK: [[ARGBASEGEP:%.*]] = getelementptr inbounds [2 x ptr], ptr [[ARGBASE_ALLOCA]], i32 0, i32 1
+// CHECK: store ptr [[SIMPLEPTR]], ptr [[ARGBASEGEP]], align 8
+// CHECK: [[ARGGEP:%.*]] = getelementptr inbounds [2 x ptr], ptr [[ARG_ALLOCA]], i32 0, i32 1
+// CHECK: store ptr [[SIMPLEPTR]], ptr [[ARGGEP]], align 8
+// CHECK: [[SIZEGEP:%.*]] = getelementptr inbounds [2 x i64], ptr [[SIZE_ALLOCA]], i32 0, i32 1
+// CHECK: store i64 ptrtoint (ptr getelementptr (ptr, ptr null, i32 1) to i64), ptr [[SIZEGEP]], align 4
+
+// CHECK: [[ARGBASE_ALLOCA_GEP:%.*]] = getelementptr inbounds [2 x ptr], ptr [[ARGBASE_ALLOCA]], i32 0, i32 0
+// CHECK: [[ARG_ALLOCA_GEP:%.*]] = getelementptr inbounds [2 x ptr], ptr [[ARG_ALLOCA]], i32 0, i32 0
+// CHECK: [[SIZE_ALLOCA_GEP:%.*]] = getelementptr inbounds [2 x i64], ptr [[SIZE_ALLOCA]], i32 0, i32 0
+
+// CHECK: call void @__tgt_target_data_begin_mapper(ptr [[LOCGLOBAL]], i64 -1, i32 2, ptr [[ARGBASE_ALLOCA_GEP]], ptr [[ARG_ALLOCA_GEP]], ptr [[SIZE_ALLOCA_GEP]], ptr [[MAPTYPES]], ptr [[MAPNAMES]], ptr null)
+
+// CHECK: declare void @__tgt_target_data_begin_mapper(ptr, i64, i32, ptr, ptr, ptr, ptr, ptr, ptr) #0
 
 // -----
 
@@ -81,48 +77,44 @@ llvm.func @testexitdataop(%arg0: !llvm.struct<(ptr<f32>, ptr<f32>, i64, array<1
   llvm.return
 }
 
-// CHECK: %struct.ident_t = type { i32, i32, i32, i32, i8* }
+// CHECK: %struct.ident_t = type { i32, i32, i32, i32, ptr }
 
 // CHECK: [[LOCSTR:@.*]] = private unnamed_addr constant [{{[0-9]*}} x i8] c";{{.*}};testexitdataop;{{[0-9]*}};{{[0-9]*}};;\00", align 1
-// CHECK: [[LOCGLOBAL:@.*]] = private unnamed_addr constant %struct.ident_t { i32 0, i32 2, i32 0, i32 {{[0-9]*}}, i8* getelementptr inbounds ([{{[0-9]*}} x i8], [{{[0-9]*}} x i8]* [[LOCSTR]], i32 0, i32 0) }, align 8
+// CHECK: [[LOCGLOBAL:@.*]] = private unnamed_addr constant %struct.ident_t { i32 0, i32 2, i32 0, i32 {{[0-9]*}}, ptr [[LOCSTR]] }, align 8
 // CHECK: [[MAPNAME1:@.*]] = private unnamed_addr constant [{{[0-9]*}} x i8] c";{{.*}};unknown;{{[0-9]*}};{{[0-9]*}};;\00", align 1
 // CHECK: [[MAPNAME2:@.*]] = private unnamed_addr constant [{{[0-9]*}} x i8] c";{{.*}};unknown;{{[0-9]*}};{{[0-9]*}};;\00", align 1
 // CHECK: [[MAPTYPES:@.*]] = private unnamed_addr constant [{{[0-9]*}} x i64] [i64 8, i64 2]
-// CHECK: [[MAPNAMES:@.*]] = private constant [{{[0-9]*}} x i8*] [i8* getelementptr inbounds ([{{[0-9]*}} x i8], [{{[0-9]*}} x i8]* [[MAPNAME1]], i32 0, i32 0), i8* getelementptr inbounds ([{{[0-9]*}} x i8], [{{[0-9]*}} x i8]* [[MAPNAME2]], i32 0, i32 0)]
+// CHECK: [[MAPNAMES:@.*]] = private constant [{{[0-9]*}} x ptr] [ptr [[MAPNAME1]], ptr [[MAPNAME2]]]
 
-// CHECK: define void @testexitdataop({ float*, float*, i64, [1 x i64], [1 x i64] } %{{.*}}, float* [[SIMPLEPTR:%.*]])
-// CHECK: [[ARGBASE_ALLOCA:%.*]] = alloca [{{[0-9]*}} x i8*], align 8
-// CHECK: [[ARG_ALLOCA:%.*]] = alloca [{{[0-9]*}} x i8*], align 8
+// CHECK: define void @testexitdataop({ ptr, ptr, i64, [1 x i64], [1 x i64] } %{{.*}}, ptr [[SIMPLEPTR:%.*]])
+// CHECK: [[ARGBASE_ALLOCA:%.*]] = alloca [{{[0-9]*}} x ptr], align 8
+// CHECK: [[ARG_ALLOCA:%.*]] = alloca [{{[0-9]*}} x ptr], align 8
 // CHECK: [[SIZE_ALLOCA:%.*]] = alloca [{{[0-9]*}} x i64], align 8
 
 // CHECK: [[ARGBASE:%.*]] = extractvalue %openacc_data %{{.*}}, 0
 // CHECK: [[ARG:%.*]] = extractvalue %openacc_data %{{.*}}, 1
 // CHECK: [[ARGSIZE:%.*]] = extractvalue %openacc_data %{{.*}}, 2
-// CHECK: [[ARGBASEGEP:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[ARGBASE_ALLOCA]], i32 0, i32 0
-// CHECK: [[ARGBASEGEPCAST:%.*]] = bitcast i8** [[ARGBASEGEP]] to { float*, float*, i64, [1 x i64], [1 x i64] }*
-// CHECK: store { float*, float*, i64, [1 x i64], [1 x i64] } [[ARGBASE]], { float*, float*, i64, [1 x i64], [1 x i64] }* [[ARGBASEGEPCAST]], align 8
-// CHECK: [[ARGGEP:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[ARG_ALLOCA]], i32 0, i32 0
-// CHECK: [[ARGGEPCAST:%.*]] = bitcast i8** [[ARGGEP]] to float**
-// CHECK: store float* [[ARG]], float** [[ARGGEPCAST]], align 8
-// CHECK: [[SIZEGEP:%.*]] = getelementptr inbounds [2 x i64], [2 x i64]* [[SIZE_ALLOCA]], i32 0, i32 0
-// CHECK: store i64 [[ARGSIZE]], i64* [[SIZEGEP]], align 4
-
-// CHECK: [[ARGBASEGEP:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[ARGBASE_ALLOCA]], i32 0, i32 1
-// CHECK: [[ARGBASEGEPCAST:%.*]] = bitcast i8** [[ARGBASEGEP]] to float**
-// CHECK: store float* [[SIMPLEPTR]], float** [[ARGBASEGEPCAST]], align 8
-// CHECK: [[ARGGEP:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[ARG_ALLOCA]], i32 0, i32 1
-// CHECK: [[ARGGEPCAST:%.*]] = bitcast i8** [[ARGGEP]] to float**
-// CHECK: store float* [[SIMPLEPTR]], float** [[ARGGEPCAST]], align 8
-// CHECK: [[SIZEGEP:%.*]] = getelementptr inbounds [2 x i64], [2 x i64]* [[SIZE_ALLOCA]], i32 0, i32 1
-// CHECK: store i64 ptrtoint (float** getelementptr (float*, float** null, i32 1) to i64), i64* [[SIZEGEP]], align 4
-
-// CHECK: [[ARGBASE_ALLOCA_GEP:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[ARGBASE_ALLOCA]], i32 0, i32 0
-// CHECK: [[ARG_ALLOCA_GEP:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[ARG_ALLOCA]], i32 0, i32 0
-// CHECK: [[SIZE_ALLOCA_GEP:%.*]] = getelementptr inbounds [2 x i64], [2 x i64]* [[SIZE_ALLOCA]], i32 0, i32 0
-
-// CHECK: call void @__tgt_target_data_end_mapper(%struct.ident_t* [[LOCGLOBAL]], i64 -1, i32 2, i8** [[ARGBASE_ALLOCA_GEP]], i8** [[ARG_ALLOCA_GEP]], i64* [[SIZE_ALLOCA_GEP]], i64* getelementptr inbounds ([{{[0-9]*}} x i64], [{{[0-9]*}} x i64]* [[MAPTYPES]], i32 0, i32 0), i8** getelementptr inbounds ([{{[0-9]*}} x i8*], [{{[0-9]*}} x i8*]* [[MAPNAMES]], i32 0, i32 0), i8** null)
-
-// CHECK: declare void @__tgt_target_data_end_mapper(%struct.ident_t*, i64, i32, i8**, i8**, i64*, i64*, i8**, i8**) #0
+// CHECK: [[ARGBASEGEP:%.*]] = getelementptr inbounds [2 x ptr], ptr [[ARGBASE_ALLOCA]], i32 0, i32 0
+// CHECK: store { ptr, ptr, i64, [1 x i64], [1 x i64] } [[ARGBASE]], ptr [[ARGBASEGEP]], align 8
+// CHECK: [[ARGGEP:%.*]] = getelementptr inbounds [2 x ptr], ptr [[ARG_ALLOCA]], i32 0, i32 0
+// CHECK: store ptr [[ARG]], ptr [[ARGGEP]], align 8
+// CHECK: [[SIZEGEP:%.*]] = getelementptr inbounds [2 x i64], ptr [[SIZE_ALLOCA]], i32 0, i32 0
+// CHECK: store i64 [[ARGSIZE]], ptr [[SIZEGEP]], align 4
+
+// CHECK: [[ARGBASEGEP:%.*]] = getelementptr inbounds [2 x ptr], ptr [[ARGBASE_ALLOCA]], i32 0, i32 1
+// CHECK: store ptr [[SIMPLEPTR]], ptr [[ARGBASEGEP]], align 8
+// CHECK: [[ARGGEP:%.*]] = getelementptr inbounds [2 x ptr], ptr [[ARG_ALLOCA]], i32 0, i32 1
+// CHECK: store ptr [[SIMPLEPTR]], ptr [[ARGGEP]], align 8
+// CHECK: [[SIZEGEP:%.*]] = getelementptr inbounds [2 x i64], ptr [[SIZE_ALLOCA]], i32 0, i32 1
+// CHECK: store i64 ptrtoint (ptr getelementptr (ptr, ptr null, i32 1) to i64), ptr [[SIZEGEP]], align 4
+
+// CHECK: [[ARGBASE_ALLOCA_GEP:%.*]] = getelementptr inbounds [2 x ptr], ptr [[ARGBASE_ALLOCA]], i32 0, i32 0
+// CHECK: [[ARG_ALLOCA_GEP:%.*]] = getelementptr inbounds [2 x ptr], ptr [[ARG_ALLOCA]], i32 0, i32 0
+// CHECK: [[SIZE_ALLOCA_GEP:%.*]] = getelementptr inbounds [2 x i64], ptr [[SIZE_ALLOCA]], i32 0, i32 0
+
+// CHECK: call void @__tgt_target_data_end_mapper(ptr [[LOCGLOBAL]], i64 -1, i32 2, ptr [[ARGBASE_ALLOCA_GEP]], ptr [[ARG_ALLOCA_GEP]], ptr [[SIZE_ALLOCA_GEP]], ptr [[MAPTYPES]], ptr [[MAPNAMES]], ptr null)
+
+// CHECK: declare void @__tgt_target_data_end_mapper(ptr, i64, i32, ptr, ptr, ptr, ptr, ptr, ptr) #0
 
 // -----
 
@@ -140,48 +132,44 @@ llvm.func @testupdateop(%arg0: !llvm.struct<(ptr<f32>, ptr<f32>, i64, array<1 x
   llvm.return
 }
 
-// CHECK: %struct.ident_t = type { i32, i32, i32, i32, i8* }
+// CHECK: %struct.ident_t = type { i32, i32, i32, i32, ptr }
 
 // CHECK: [[LOCSTR:@.*]] = private unnamed_addr constant [{{[0-9]*}} x i8] c";{{.*}};testupdateop;{{[0-9]*}};{{[0-9]*}};;\00", align 1
-// CHECK: [[LOCGLOBAL:@.*]] = private unnamed_addr constant %struct.ident_t { i32 0, i32 2, i32 0, i32 {{[0-9]*}}, i8* getelementptr inbounds ([{{[0-9]*}} x i8], [{{[0-9]*}} x i8]* [[LOCSTR]], i32 0, i32 0) }, align 8
+// CHECK: [[LOCGLOBAL:@.*]] = private unnamed_addr constant %struct.ident_t { i32 0, i32 2, i32 0, i32 {{[0-9]*}}, ptr [[LOCSTR]] }, align 8
 // CHECK: [[MAPNAME1:@.*]] = private unnamed_addr constant [{{[0-9]*}} x i8] c";{{.*}};unknown;{{[0-9]*}};{{[0-9]*}};;\00", align 1
 // CHECK: [[MAPNAME2:@.*]] = private unnamed_addr constant [{{[0-9]*}} x i8] c";{{.*}};unknown;{{[0-9]*}};{{[0-9]*}};;\00", align 1
 // CHECK: [[MAPTYPES:@.*]] = private unnamed_addr constant [{{[0-9]*}} x i64] [i64 2, i64 1]
-// CHECK: [[MAPNAMES:@.*]] = private constant [{{[0-9]*}} x i8*] [i8* getelementptr inbounds ([{{[0-9]*}} x i8], [{{[0-9]*}} x i8]* [[MAPNAME1]], i32 0, i32 0), i8* getelementptr inbounds ([{{[0-9]*}} x i8], [{{[0-9]*}} x i8]* [[MAPNAME2]], i32 0, i32 0)]
+// CHECK: [[MAPNAMES:@.*]] = private constant [{{[0-9]*}} x ptr] [ptr [[MAPNAME1]], ptr [[MAPNAME2]]]
 
-// CHECK: define void @testupdateop({ float*, float*, i64, [1 x i64], [1 x i64] } %{{.*}}, float* [[SIMPLEPTR:%.*]])
-// CHECK: [[ARGBASE_ALLOCA:%.*]] = alloca [{{[0-9]*}} x i8*], align 8
-// CHECK: [[ARG_ALLOCA:%.*]] = alloca [{{[0-9]*}} x i8*], align 8
+// CHECK: define void @testupdateop({ ptr, ptr, i64, [1 x i64], [1 x i64] } %{{.*}}, ptr [[SIMPLEPTR:%.*]])
+// CHECK: [[ARGBASE_ALLOCA:%.*]] = alloca [{{[0-9]*}} x ptr], align 8
+// CHECK: [[ARG_ALLOCA:%.*]] = alloca [{{[0-9]*}} x ptr], align 8
 // CHECK: [[SIZE_ALLOCA:%.*]] = alloca [{{[0-9]*}} x i64], align 8
 
 // CHECK: [[ARGBASE:%.*]] = extractvalue %openacc_data %{{.*}}, 0
 // CHECK: [[ARG:%.*]] = extractvalue %openacc_data %{{.*}}, 1
 // CHECK: [[ARGSIZE:%.*]] = extractvalue %openacc_data %{{.*}}, 2
-// CHECK: [[ARGBASEGEP:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[ARGBASE_ALLOCA]], i32 0, i32 0
-// CHECK: [[ARGBASEGEPCAST:%.*]] = bitcast i8** [[ARGBASEGEP]] to { float*, float*, i64, [1 x i64], [1 x i64] }*
-// CHECK: store { float*, float*, i64, [1 x i64], [1 x i64] } [[ARGBASE]], { float*, float*, i64, [1 x i64], [1 x i64] }* [[ARGBASEGEPCAST]], align 8
-// CHECK: [[ARGGEP:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[ARG_ALLOCA]], i32 0, i32 0
-// CHECK: [[ARGGEPCAST:%.*]] = bitcast i8** [[ARGGEP]] to float**
-// CHECK: store float* [[ARG]], float** [[ARGGEPCAST]], align 8
-// CHECK: [[SIZEGEP:%.*]] = getelementptr inbounds [2 x i64], [2 x i64]* [[SIZE_ALLOCA]], i32 0, i32 0
-// CHECK: store i64 [[ARGSIZE]], i64* [[SIZEGEP]], align 4
-
-// CHECK: [[ARGBASEGEP:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[ARGBASE_ALLOCA]], i32 0, i32 1
-// CHECK: [[ARGBASEGEPCAST:%.*]] = bitcast i8** [[ARGBASEGEP]] to float**
-// CHECK: store float* [[SIMPLEPTR]], float** [[ARGBASEGEPCAST]], align 8
-// CHECK: [[ARGGEP:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[ARG_ALLOCA]], i32 0, i32 1
-// CHECK: [[ARGGEPCAST:%.*]] = bitcast i8** [[ARGGEP]] to float**
-// CHECK: store float* [[SIMPLEPTR]], float** [[ARGGEPCAST]], align 8
-// CHECK: [[SIZEGEP:%.*]] = getelementptr inbounds [2 x i64], [2 x i64]* [[SIZE_ALLOCA]], i32 0, i32 1
-// CHECK: store i64 ptrtoint (float** getelementptr (float*, float** null, i32 1) to i64), i64* [[SIZEGEP]], align 4
-
-// CHECK: [[ARGBASE_ALLOCA_GEP:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[ARGBASE_ALLOCA]], i32 0, i32 0
-// CHECK: [[ARG_ALLOCA_GEP:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[ARG_ALLOCA]], i32 0, i32 0
-// CHECK: [[SIZE_ALLOCA_GEP:%.*]] = getelementptr inbounds [2 x i64], [2 x i64]* [[SIZE_ALLOCA]], i32 0, i32 0
-
-// CHECK: call void @__tgt_target_data_update_mapper(%struct.ident_t* [[LOCGLOBAL]], i64 -1, i32 2, i8** [[ARGBASE_ALLOCA_GEP]], i8** [[ARG_ALLOCA_GEP]], i64* [[SIZE_ALLOCA_GEP]], i64* getelementptr inbounds ([{{[0-9]*}} x i64], [{{[0-9]*}} x i64]* [[MAPTYPES]], i32 0, i32 0), i8** getelementptr inbounds ([{{[0-9]*}} x i8*], [{{[0-9]*}} x i8*]* [[MAPNAMES]], i32 0, i32 0), i8** null)
-
-// CHECK: declare void @__tgt_target_data_update_mapper(%struct.ident_t*, i64, i32, i8**, i8**, i64*, i64*, i8**, i8**) #0
+// CHECK: [[ARGBASEGEP:%.*]] = getelementptr inbounds [2 x ptr], ptr [[ARGBASE_ALLOCA]], i32 0, i32 0
+// CHECK: store { ptr, ptr, i64, [1 x i64], [1 x i64] } [[ARGBASE]], ptr [[ARGBASEGEP]], align 8
+// CHECK: [[ARGGEP:%.*]] = getelementptr inbounds [2 x ptr], ptr [[ARG_ALLOCA]], i32 0, i32 0
+// CHECK: store ptr [[ARG]], ptr [[ARGGEP]], align 8
+// CHECK: [[SIZEGEP:%.*]] = getelementptr inbounds [2 x i64], ptr [[SIZE_ALLOCA]], i32 0, i32 0
+// CHECK: store i64 [[ARGSIZE]], ptr [[SIZEGEP]], align 4
+
+// CHECK: [[ARGBASEGEP:%.*]] = getelementptr inbounds [2 x ptr], ptr [[ARGBASE_ALLOCA]], i32 0, i32 1
+// CHECK: store ptr [[SIMPLEPTR]], ptr [[ARGBASEGEP]], align 8
+// CHECK: [[ARGGEP:%.*]] = getelementptr inbounds [2 x ptr], ptr [[ARG_ALLOCA]], i32 0, i32 1
+// CHECK: store ptr [[SIMPLEPTR]], ptr [[ARGGEP]], align 8
+// CHECK: [[SIZEGEP:%.*]] = getelementptr inbounds [2 x i64], ptr [[SIZE_ALLOCA]], i32 0, i32 1
+// CHECK: store i64 ptrtoint (ptr getelementptr (ptr, ptr null, i32 1) to i64), ptr [[SIZEGEP]], align 4
+
+// CHECK: [[ARGBASE_ALLOCA_GEP:%.*]] = getelementptr inbounds [2 x ptr], ptr [[ARGBASE_ALLOCA]], i32 0, i32 0
+// CHECK: [[ARG_ALLOCA_GEP:%.*]] = getelementptr inbounds [2 x ptr], ptr [[ARG_ALLOCA]], i32 0, i32 0
+// CHECK: [[SIZE_ALLOCA_GEP:%.*]] = getelementptr inbounds [2 x i64], ptr [[SIZE_ALLOCA]], i32 0, i32 0
+
+// CHECK: call void @__tgt_target_data_update_mapper(ptr [[LOCGLOBAL]], i64 -1, i32 2, ptr [[ARGBASE_ALLOCA_GEP]], ptr [[ARG_ALLOCA_GEP]], ptr [[SIZE_ALLOCA_GEP]], ptr [[MAPTYPES]], ptr [[MAPNAMES]], ptr null)
+
+// CHECK: declare void @__tgt_target_data_update_mapper(ptr, i64, i32, ptr, ptr, ptr, ptr, ptr, ptr) #0
 
 // -----
 
@@ -203,55 +191,51 @@ llvm.func @testdataop(%arg0: !llvm.struct<(ptr<f32>, ptr<f32>, i64, array<1 x i6
   llvm.return
 }
 
-// CHECK: %struct.ident_t = type { i32, i32, i32, i32, i8* }
+// CHECK: %struct.ident_t = type { i32, i32, i32, i32, ptr }
 // CHECK: [[LOCSTR:@.*]] = private unnamed_addr constant [{{[0-9]*}} x i8] c";{{.*}};testdataop;{{[0-9]*}};{{[0-9]*}};;\00", align 1
-// CHECK: [[LOCGLOBAL:@.*]] = private unnamed_addr constant %struct.ident_t { i32 0, i32 2, i32 0, i32 {{[0-9]*}}, i8* getelementptr inbounds ([{{[0-9]*}} x i8], [{{[0-9]*}} x i8]* [[LOCSTR]], i32 0, i32 0) }, align 8
+// CHECK: [[LOCGLOBAL:@.*]] = private unnamed_addr constant %struct.ident_t { i32 0, i32 2, i32 0, i32 {{[0-9]*}}, ptr [[LOCSTR]] }, align 8
 // CHECK: [[MAPNAME1:@.*]] = private unnamed_addr constant [{{[0-9]*}} x i8] c";{{.*}};unknown;{{[0-9]*}};{{[0-9]*}};;\00", align 1
 // CHECK: [[MAPNAME2:@.*]] = private unnamed_addr constant [{{[0-9]*}} x i8] c";{{.*}};unknown;{{[0-9]*}};{{[0-9]*}};;\00", align 1
 // CHECK: [[MAPTYPES:@.*]] = private unnamed_addr constant [{{[0-9]*}} x i64] [i64 8195, i64 8194]
-// CHECK: [[MAPNAMES:@.*]] = private constant [{{[0-9]*}} x i8*] [i8* getelementptr inbounds ([{{[0-9]*}} x i8], [{{[0-9]*}} x i8]* [[MAPNAME1]], i32 0, i32 0), i8* getelementptr inbounds ([{{[0-9]*}} x i8], [{{[0-9]*}} x i8]* [[MAPNAME2]], i32 0, i32 0)]
+// CHECK: [[MAPNAMES:@.*]] = private constant [{{[0-9]*}} x ptr] [ptr [[MAPNAME1]], ptr [[MAPNAME2]]]
 
-// CHECK: define void @testdataop({ float*, float*, i64, [1 x i64], [1 x i64] } %{{.*}}, float* [[SIMPLEPTR:%.*]], i32* %{{.*}})
-// CHECK: [[ARGBASE_ALLOCA:%.*]] = alloca [{{[0-9]*}} x i8*], align 8
-// CHECK: [[ARG_ALLOCA:%.*]] = alloca [{{[0-9]*}} x i8*], align 8
+// CHECK: define void @testdataop({ ptr, ptr, i64, [1 x i64], [1 x i64] } %{{.*}}, ptr [[SIMPLEPTR:%.*]], ptr %{{.*}})
+// CHECK: [[ARGBASE_ALLOCA:%.*]] = alloca [{{[0-9]*}} x ptr], align 8
+// CHECK: [[ARG_ALLOCA:%.*]] = alloca [{{[0-9]*}} x ptr], align 8
 // CHECK: [[SIZE_ALLOCA:%.*]] = alloca [{{[0-9]*}} x i64], align 8
 
 // CHECK: [[ARGBASE:%.*]] = extractvalue %openacc_data %{{.*}}, 0
 // CHECK: [[ARG:%.*]] = extractvalue %openacc_data %{{.*}}, 1
 // CHECK: [[ARGSIZE:%.*]] = extractvalue %openacc_data %{{.*}}, 2
-// CHECK: [[ARGBASEGEP:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[ARGBASE_ALLOCA]], i32 0, i32 0
-// CHECK: [[ARGBASEGEPCAST:%.*]] = bitcast i8** [[ARGBASEGEP]] to { float*, float*, i64, [1 x i64], [1 x i64] }*
-// CHECK: store { float*, float*, i64, [1 x i64], [1 x i64] } [[ARGBASE]], { float*, float*, i64, [1 x i64], [1 x i64] }* [[ARGBASEGEPCAST]], align 8
-// CHECK: [[ARGGEP:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[ARG_ALLOCA]], i32 0, i32 0
-// CHECK: [[ARGGEPCAST:%.*]] = bitcast i8** [[ARGGEP]] to float**
-// CHECK: store float* [[ARG]], float** [[ARGGEPCAST]], align 8
-// CHECK: [[SIZEGEP:%.*]] = getelementptr inbounds [2 x i64], [2 x i64]* [[SIZE_ALLOCA]], i32 0, i32 0
-// CHECK: store i64 [[ARGSIZE]], i64* [[SIZEGEP]], align 4
-
-// CHECK: [[ARGBASEGEP:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[ARGBASE_ALLOCA]], i32 0, i32 1
-// CHECK: [[ARGBASEGEPCAST:%.*]] = bitcast i8** [[ARGBASEGEP]] to float**
-// CHECK: store float* [[SIMPLEPTR]], float** [[ARGBASEGEPCAST]], align 8
-// CHECK: [[ARGGEP:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[ARG_ALLOCA]], i32 0, i32 1
-// CHECK: [[ARGGEPCAST:%.*]] = bitcast i8** [[ARGGEP]] to float**
-// CHECK: store float* [[SIMPLEPTR]], float** [[ARGGEPCAST]], align 8
-// CHECK: [[SIZEGEP:%.*]] = getelementptr inbounds [2 x i64], [2 x i64]* [[SIZE_ALLOCA]], i32 0, i32 1
-// CHECK: store i64 ptrtoint (float** getelementptr (float*, float** null, i32 1) to i64), i64* [[SIZEGEP]], align 4
-
-// CHECK: [[ARGBASE_ALLOCA_GEP:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[ARGBASE_ALLOCA]], i32 0, i32 0
-// CHECK: [[ARG_ALLOCA_GEP:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[ARG_ALLOCA]], i32 0, i32 0
-// CHECK: [[SIZE_ALLOCA_GEP:%.*]] = getelementptr inbounds [2 x i64], [2 x i64]* [[SIZE_ALLOCA]], i32 0, i32 0
-// CHECK: call void @__tgt_target_data_begin_mapper(%struct.ident_t* [[LOCGLOBAL]], i64 -1, i32 2, i8** [[ARGBASE_ALLOCA_GEP]], i8** [[ARG_ALLOCA_GEP]], i64* [[SIZE_ALLOCA_GEP]], i64* getelementptr inbounds ([{{[0-9]*}} x i64], [{{[0-9]*}} x i64]* [[MAPTYPES]], i32 0, i32 0), i8** getelementptr inbounds ([{{[0-9]*}} x i8*], [{{[0-9]*}} x i8*]* [[MAPNAMES]], i32 0, i32 0), i8** null)
+// CHECK: [[ARGBASEGEP:%.*]] = getelementptr inbounds [2 x ptr], ptr [[ARGBASE_ALLOCA]], i32 0, i32 0
+// CHECK: store { ptr, ptr, i64, [1 x i64], [1 x i64] } [[ARGBASE]], ptr [[ARGBASEGEP]], align 8
+// CHECK: [[ARGGEP:%.*]] = getelementptr inbounds [2 x ptr], ptr [[ARG_ALLOCA]], i32 0, i32 0
+// CHECK: store ptr [[ARG]], ptr [[ARGGEP]], align 8
+// CHECK: [[SIZEGEP:%.*]] = getelementptr inbounds [2 x i64], ptr [[SIZE_ALLOCA]], i32 0, i32 0
+// CHECK: store i64 [[ARGSIZE]], ptr [[SIZEGEP]], align 4
+
+// CHECK: [[ARGBASEGEP:%.*]] = getelementptr inbounds [2 x ptr], ptr [[ARGBASE_ALLOCA]], i32 0, i32 1
+// CHECK: store ptr [[SIMPLEPTR]], ptr [[ARGBASEGEP]], align 8
+// CHECK: [[ARGGEP:%.*]] = getelementptr inbounds [2 x ptr], ptr [[ARG_ALLOCA]], i32 0, i32 1
+// CHECK: store ptr [[SIMPLEPTR]], ptr [[ARGGEP]], align 8
+// CHECK: [[SIZEGEP:%.*]] = getelementptr inbounds [2 x i64], ptr [[SIZE_ALLOCA]], i32 0, i32 1
+// CHECK: store i64 ptrtoint (ptr getelementptr (ptr, ptr null, i32 1) to i64), ptr [[SIZEGEP]], align 4
+
+// CHECK: [[ARGBASE_ALLOCA_GEP:%.*]] = getelementptr inbounds [2 x ptr], ptr [[ARGBASE_ALLOCA]], i32 0, i32 0
+// CHECK: [[ARG_ALLOCA_GEP:%.*]] = getelementptr inbounds [2 x ptr], ptr [[ARG_ALLOCA]], i32 0, i32 0
+// CHECK: [[SIZE_ALLOCA_GEP:%.*]] = getelementptr inbounds [2 x i64], ptr [[SIZE_ALLOCA]], i32 0, i32 0
+// CHECK: call void @__tgt_target_data_begin_mapper(ptr [[LOCGLOBAL]], i64 -1, i32 2, ptr [[ARGBASE_ALLOCA_GEP]], ptr [[ARG_ALLOCA_GEP]], ptr [[SIZE_ALLOCA_GEP]], ptr [[MAPTYPES]], ptr [[MAPNAMES]], ptr null)
 // CHECK: br label %acc.data
 
 // CHECK:      acc.data:
-// CHECK-NEXT:   store i32 2, i32* %{{.*}}
+// CHECK-NEXT:   store i32 2, ptr %{{.*}}
 // CHECK-NEXT:   br label %acc.end_data
 
 // CHECK: acc.end_data:
-// CHECK:   [[ARGBASE_ALLOCA_GEP:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[ARGBASE_ALLOCA]], i32 0, i32 0
-// CHECK:   [[ARG_ALLOCA_GEP:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[ARG_ALLOCA]], i32 0, i32 0
-// CHECK:   [[SIZE_ALLOCA_GEP:%.*]] = getelementptr inbounds [2 x i64], [2 x i64]* [[SIZE_ALLOCA]], i32 0, i32 0
-// CHECK:   call void @__tgt_target_data_end_mapper(%struct.ident_t* [[LOCGLOBAL]], i64 -1, i32 2, i8** [[ARGBASE_ALLOCA_GEP]], i8** [[ARG_ALLOCA_GEP]], i64* [[SIZE_ALLOCA_GEP]], i64* getelementptr inbounds ([{{[0-9]*}} x i64], [{{[0-9]*}} x i64]* [[MAPTYPES]], i32 0, i32 0), i8** getelementptr inbounds ([{{[0-9]*}} x i8*], [{{[0-9]*}} x i8*]* [[MAPNAMES]], i32 0, i32 0), i8** null)
+// CHECK:   [[ARGBASE_ALLOCA_GEP:%.*]] = getelementptr inbounds [2 x ptr], ptr [[ARGBASE_ALLOCA]], i32 0, i32 0
+// CHECK:   [[ARG_ALLOCA_GEP:%.*]] = getelementptr inbounds [2 x ptr], ptr [[ARG_ALLOCA]], i32 0, i32 0
+// CHECK:   [[SIZE_ALLOCA_GEP:%.*]] = getelementptr inbounds [2 x i64], ptr [[SIZE_ALLOCA]], i32 0, i32 0
+// CHECK:   call void @__tgt_target_data_end_mapper(ptr [[LOCGLOBAL]], i64 -1, i32 2, ptr [[ARGBASE_ALLOCA_GEP]], ptr [[ARG_ALLOCA_GEP]], ptr [[SIZE_ALLOCA_GEP]], ptr [[MAPTYPES]], ptr [[MAPNAMES]], ptr null)
 
-// CHECK: declare void @__tgt_target_data_begin_mapper(%struct.ident_t*, i64, i32, i8**, i8**, i64*, i64*, i8**, i8**)
-// CHECK: declare void @__tgt_target_data_end_mapper(%struct.ident_t*, i64, i32, i8**, i8**, i64*, i64*, i8**, i8**)
+// CHECK: declare void @__tgt_target_data_begin_mapper(ptr, i64, i32, ptr, ptr, ptr, ptr, ptr, ptr)
+// CHECK: declare void @__tgt_target_data_end_mapper(ptr, i64, i32, ptr, ptr, ptr, ptr, ptr, ptr)

diff  --git a/mlir/test/Target/LLVMIR/openmp-llvm.mlir b/mlir/test/Target/LLVMIR/openmp-llvm.mlir
index 71a8da2ae0d92..7b484812d22e3 100644
--- a/mlir/test/Target/LLVMIR/openmp-llvm.mlir
+++ b/mlir/test/Target/LLVMIR/openmp-llvm.mlir
@@ -2,16 +2,16 @@
 
 // CHECK-LABEL: define void @test_stand_alone_directives()
 llvm.func @test_stand_alone_directives() {
-  // CHECK: [[OMP_THREAD:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @{{[0-9]+}})
-  // CHECK-NEXT:  call void @__kmpc_barrier(%struct.ident_t* @{{[0-9]+}}, i32 [[OMP_THREAD]])
+  // CHECK: [[OMP_THREAD:%.*]] = call i32 @__kmpc_global_thread_num(ptr @{{[0-9]+}})
+  // CHECK-NEXT:  call void @__kmpc_barrier(ptr @{{[0-9]+}}, i32 [[OMP_THREAD]])
   omp.barrier
 
-  // CHECK: [[OMP_THREAD1:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @{{[0-9]+}})
-  // CHECK-NEXT:  [[RET_VAL:%.*]] = call i32 @__kmpc_omp_taskwait(%struct.ident_t* @{{[0-9]+}}, i32 [[OMP_THREAD1]])
+  // CHECK: [[OMP_THREAD1:%.*]] = call i32 @__kmpc_global_thread_num(ptr @{{[0-9]+}})
+  // CHECK-NEXT:  [[RET_VAL:%.*]] = call i32 @__kmpc_omp_taskwait(ptr @{{[0-9]+}}, i32 [[OMP_THREAD1]])
   omp.taskwait
 
-  // CHECK: [[OMP_THREAD2:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @{{[0-9]+}})
-  // CHECK-NEXT:  [[RET_VAL:%.*]] = call i32 @__kmpc_omp_taskyield(%struct.ident_t* @{{[0-9]+}}, i32 [[OMP_THREAD2]], i32 0)
+  // CHECK: [[OMP_THREAD2:%.*]] = call i32 @__kmpc_global_thread_num(ptr @{{[0-9]+}})
+  // CHECK-NEXT:  [[RET_VAL:%.*]] = call i32 @__kmpc_omp_taskyield(ptr @{{[0-9]+}}, i32 [[OMP_THREAD2]], i32 0)
   omp.taskyield
 
   // CHECK-NEXT:    ret void
@@ -20,21 +20,21 @@ llvm.func @test_stand_alone_directives() {
 
 // CHECK-LABEL: define void @test_flush_construct(i32 %0)
 llvm.func @test_flush_construct(%arg0: i32) {
-  // CHECK: call void @__kmpc_flush(%struct.ident_t* @{{[0-9]+}}
+  // CHECK: call void @__kmpc_flush(ptr @{{[0-9]+}}
   omp.flush
 
-  // CHECK: call void @__kmpc_flush(%struct.ident_t* @{{[0-9]+}}
+  // CHECK: call void @__kmpc_flush(ptr @{{[0-9]+}}
   omp.flush (%arg0 : i32)
 
-  // CHECK: call void @__kmpc_flush(%struct.ident_t* @{{[0-9]+}}
+  // CHECK: call void @__kmpc_flush(ptr @{{[0-9]+}}
   omp.flush (%arg0, %arg0 : i32, i32)
 
   %0 = llvm.mlir.constant(1 : i64) : i64
   //  CHECK: alloca {{.*}} align 4
   %1 = llvm.alloca %0 x i32 {in_type = i32, name = "a"} : (i64) -> !llvm.ptr<i32>
-  // CHECK: call void @__kmpc_flush(%struct.ident_t* @{{[0-9]+}}
+  // CHECK: call void @__kmpc_flush(ptr @{{[0-9]+}}
   omp.flush
-  //  CHECK: load i32, i32*
+  //  CHECK: load i32, ptr
   %2 = llvm.load %1 : !llvm.ptr<i32>
 
   // CHECK-NEXT:    ret void
@@ -43,7 +43,7 @@ llvm.func @test_flush_construct(%arg0: i32) {
 
 // CHECK-LABEL: define void @test_omp_parallel_1()
 llvm.func @test_omp_parallel_1() -> () {
-  // CHECK: call void{{.*}}@__kmpc_fork_call{{.*}}@[[OMP_OUTLINED_FN_1:.*]] to {{.*}}
+  // CHECK: call void{{.*}}@__kmpc_fork_call{{.*}}@[[OMP_OUTLINED_FN_1:.*]])
   omp.parallel {
     omp.barrier
     omp.terminator
@@ -59,7 +59,7 @@ llvm.func @body(i64)
 
 // CHECK-LABEL: define void @test_omp_parallel_2()
 llvm.func @test_omp_parallel_2() -> () {
-  // CHECK: call void{{.*}}@__kmpc_fork_call{{.*}}@[[OMP_OUTLINED_FN_2:.*]] to {{.*}}
+  // CHECK: call void{{.*}}@__kmpc_fork_call{{.*}}@[[OMP_OUTLINED_FN_2:.*]])
   omp.parallel {
     ^bb0:
       %0 = llvm.mlir.constant(1 : index) : i64
@@ -89,9 +89,9 @@ llvm.func @test_omp_parallel_2() -> () {
 
 // CHECK: define void @test_omp_parallel_num_threads_1(i32 %[[NUM_THREADS_VAR_1:.*]])
 llvm.func @test_omp_parallel_num_threads_1(%arg0: i32) -> () {
-  // CHECK: %[[GTN_NUM_THREADS_VAR_1:.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GTN_SI_VAR_1:.*]])
-  // CHECK: call void @__kmpc_push_num_threads(%struct.ident_t* @[[GTN_SI_VAR_1]], i32 %[[GTN_NUM_THREADS_VAR_1]], i32 %[[NUM_THREADS_VAR_1]])
-  // CHECK: call void{{.*}}@__kmpc_fork_call{{.*}}@[[OMP_OUTLINED_FN_NUM_THREADS_1:.*]] to {{.*}}
+  // CHECK: %[[GTN_NUM_THREADS_VAR_1:.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GTN_SI_VAR_1:.*]])
+  // CHECK: call void @__kmpc_push_num_threads(ptr @[[GTN_SI_VAR_1]], i32 %[[GTN_NUM_THREADS_VAR_1]], i32 %[[NUM_THREADS_VAR_1]])
+  // CHECK: call void{{.*}}@__kmpc_fork_call{{.*}}@[[OMP_OUTLINED_FN_NUM_THREADS_1:.*]])
   omp.parallel num_threads(%arg0: i32) {
     omp.barrier
     omp.terminator
@@ -106,9 +106,9 @@ llvm.func @test_omp_parallel_num_threads_1(%arg0: i32) -> () {
 // CHECK: define void @test_omp_parallel_num_threads_2()
 llvm.func @test_omp_parallel_num_threads_2() -> () {
   %0 = llvm.mlir.constant(4 : index) : i32
-  // CHECK: %[[GTN_NUM_THREADS_VAR_2:.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GTN_SI_VAR_2:.*]])
-  // CHECK: call void @__kmpc_push_num_threads(%struct.ident_t* @[[GTN_SI_VAR_2]], i32 %[[GTN_NUM_THREADS_VAR_2]], i32 4)
-  // CHECK: call void{{.*}}@__kmpc_fork_call{{.*}}@[[OMP_OUTLINED_FN_NUM_THREADS_2:.*]] to {{.*}}
+  // CHECK: %[[GTN_NUM_THREADS_VAR_2:.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GTN_SI_VAR_2:.*]])
+  // CHECK: call void @__kmpc_push_num_threads(ptr @[[GTN_SI_VAR_2]], i32 %[[GTN_NUM_THREADS_VAR_2]], i32 4)
+  // CHECK: call void{{.*}}@__kmpc_fork_call{{.*}}@[[OMP_OUTLINED_FN_NUM_THREADS_2:.*]])
   omp.parallel num_threads(%0: i32) {
     omp.barrier
     omp.terminator
@@ -123,17 +123,17 @@ llvm.func @test_omp_parallel_num_threads_2() -> () {
 // CHECK: define void @test_omp_parallel_num_threads_3()
 llvm.func @test_omp_parallel_num_threads_3() -> () {
   %0 = llvm.mlir.constant(4 : index) : i32
-  // CHECK: %[[GTN_NUM_THREADS_VAR_3_1:.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GTN_SI_VAR_3_1:.*]])
-  // CHECK: call void @__kmpc_push_num_threads(%struct.ident_t* @[[GTN_SI_VAR_3_1]], i32 %[[GTN_NUM_THREADS_VAR_3_1]], i32 4)
-  // CHECK: call void{{.*}}@__kmpc_fork_call{{.*}}@[[OMP_OUTLINED_FN_NUM_THREADS_3_1:.*]] to {{.*}}
+  // CHECK: %[[GTN_NUM_THREADS_VAR_3_1:.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GTN_SI_VAR_3_1:.*]])
+  // CHECK: call void @__kmpc_push_num_threads(ptr @[[GTN_SI_VAR_3_1]], i32 %[[GTN_NUM_THREADS_VAR_3_1]], i32 4)
+  // CHECK: call void{{.*}}@__kmpc_fork_call{{.*}}@[[OMP_OUTLINED_FN_NUM_THREADS_3_1:.*]])
   omp.parallel num_threads(%0: i32) {
     omp.barrier
     omp.terminator
   }
   %1 = llvm.mlir.constant(8 : index) : i32
-  // CHECK: %[[GTN_NUM_THREADS_VAR_3_2:.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GTN_SI_VAR_3_2:.*]])
-  // CHECK: call void @__kmpc_push_num_threads(%struct.ident_t* @[[GTN_SI_VAR_3_2]], i32 %[[GTN_NUM_THREADS_VAR_3_2]], i32 8)
-  // CHECK: call void{{.*}}@__kmpc_fork_call{{.*}}@[[OMP_OUTLINED_FN_NUM_THREADS_3_2:.*]] to {{.*}}
+  // CHECK: %[[GTN_NUM_THREADS_VAR_3_2:.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GTN_SI_VAR_3_2:.*]])
+  // CHECK: call void @__kmpc_push_num_threads(ptr @[[GTN_SI_VAR_3_2]], i32 %[[GTN_NUM_THREADS_VAR_3_2]], i32 8)
+  // CHECK: call void{{.*}}@__kmpc_fork_call{{.*}}@[[OMP_OUTLINED_FN_NUM_THREADS_3_2:.*]])
   omp.parallel num_threads(%1: i32) {
     omp.barrier
     omp.terminator
@@ -162,21 +162,21 @@ llvm.func @test_omp_parallel_if_1(%arg0: i32) -> () {
   %0 = llvm.mlir.constant(0 : index) : i32
   %1 = llvm.icmp "slt" %arg0, %0 : i32
 
-// CHECK: %[[GTN_IF_1:.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[SI_VAR_IF_1:.*]])
+// CHECK: %[[GTN_IF_1:.*]] = call i32 @__kmpc_global_thread_num(ptr @[[SI_VAR_IF_1:.*]])
 // CHECK: br i1 %[[IF_COND_VAR_1]], label %[[IF_COND_TRUE_BLOCK_1:.*]], label %[[IF_COND_FALSE_BLOCK_1:.*]]
 // CHECK: [[IF_COND_TRUE_BLOCK_1]]:
 // CHECK: br label %[[OUTLINED_CALL_IF_BLOCK_1:.*]]
 // CHECK: [[OUTLINED_CALL_IF_BLOCK_1]]:
-// CHECK: call void {{.*}} @__kmpc_fork_call(%struct.ident_t* @[[SI_VAR_IF_1]], {{.*}} @[[OMP_OUTLINED_FN_IF_1:.*]] to void
+// CHECK: call void {{.*}} @__kmpc_fork_call(ptr @[[SI_VAR_IF_1]], {{.*}} @[[OMP_OUTLINED_FN_IF_1:.*]])
 // CHECK: br label %[[OUTLINED_EXIT_IF_1:.*]]
 // CHECK: [[OUTLINED_EXIT_IF_1]]:
 // CHECK: br label %[[OUTLINED_EXIT_IF_2:.*]]
 // CHECK: [[OUTLINED_EXIT_IF_2]]:
 // CHECK: br label %[[RETURN_BLOCK_IF_1:.*]]
 // CHECK: [[IF_COND_FALSE_BLOCK_1]]:
-// CHECK: call void @__kmpc_serialized_parallel(%struct.ident_t* @[[SI_VAR_IF_1]], i32 %[[GTN_IF_1]])
+// CHECK: call void @__kmpc_serialized_parallel(ptr @[[SI_VAR_IF_1]], i32 %[[GTN_IF_1]])
 // CHECK: call void @[[OMP_OUTLINED_FN_IF_1]]
-// CHECK: call void @__kmpc_end_serialized_parallel(%struct.ident_t* @[[SI_VAR_IF_1]], i32 %[[GTN_IF_1]])
+// CHECK: call void @__kmpc_end_serialized_parallel(ptr @[[SI_VAR_IF_1]], i32 %[[GTN_IF_1]])
 // CHECK: br label %[[RETURN_BLOCK_IF_1]]
   omp.parallel if(%1 : i1) {
     omp.barrier
@@ -247,23 +247,23 @@ llvm.func @test_nested_alloca_ip(%arg0: i32) -> () {
 
 // CHECK-LABEL: define void @test_omp_parallel_3()
 llvm.func @test_omp_parallel_3() -> () {
-  // CHECK: [[OMP_THREAD_3_1:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @{{[0-9]+}})
-  // CHECK: call void @__kmpc_push_proc_bind(%struct.ident_t* @{{[0-9]+}}, i32 [[OMP_THREAD_3_1]], i32 2)
-  // CHECK: call void{{.*}}@__kmpc_fork_call{{.*}}@[[OMP_OUTLINED_FN_3_1:.*]] to {{.*}}
+  // CHECK: [[OMP_THREAD_3_1:%.*]] = call i32 @__kmpc_global_thread_num(ptr @{{[0-9]+}})
+  // CHECK: call void @__kmpc_push_proc_bind(ptr @{{[0-9]+}}, i32 [[OMP_THREAD_3_1]], i32 2)
+  // CHECK: call void{{.*}}@__kmpc_fork_call{{.*}}@[[OMP_OUTLINED_FN_3_1:.*]])
   omp.parallel proc_bind(master) {
     omp.barrier
     omp.terminator
   }
-  // CHECK: [[OMP_THREAD_3_2:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @{{[0-9]+}})
-  // CHECK: call void @__kmpc_push_proc_bind(%struct.ident_t* @{{[0-9]+}}, i32 [[OMP_THREAD_3_2]], i32 3)
-  // CHECK: call void{{.*}}@__kmpc_fork_call{{.*}}@[[OMP_OUTLINED_FN_3_2:.*]] to {{.*}}
+  // CHECK: [[OMP_THREAD_3_2:%.*]] = call i32 @__kmpc_global_thread_num(ptr @{{[0-9]+}})
+  // CHECK: call void @__kmpc_push_proc_bind(ptr @{{[0-9]+}}, i32 [[OMP_THREAD_3_2]], i32 3)
+  // CHECK: call void{{.*}}@__kmpc_fork_call{{.*}}@[[OMP_OUTLINED_FN_3_2:.*]])
   omp.parallel proc_bind(close) {
     omp.barrier
     omp.terminator
   }
-  // CHECK: [[OMP_THREAD_3_3:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @{{[0-9]+}})
-  // CHECK: call void @__kmpc_push_proc_bind(%struct.ident_t* @{{[0-9]+}}, i32 [[OMP_THREAD_3_3]], i32 4)
-  // CHECK: call void{{.*}}@__kmpc_fork_call{{.*}}@[[OMP_OUTLINED_FN_3_3:.*]] to {{.*}}
+  // CHECK: [[OMP_THREAD_3_3:%.*]] = call i32 @__kmpc_global_thread_num(ptr @{{[0-9]+}})
+  // CHECK: call void @__kmpc_push_proc_bind(ptr @{{[0-9]+}}, i32 [[OMP_THREAD_3_3]], i32 4)
+  // CHECK: call void{{.*}}@__kmpc_fork_call{{.*}}@[[OMP_OUTLINED_FN_3_3:.*]])
   omp.parallel proc_bind(spread) {
     omp.barrier
     omp.terminator
@@ -278,10 +278,10 @@ llvm.func @test_omp_parallel_3() -> () {
 
 // CHECK-LABEL: define void @test_omp_parallel_4()
 llvm.func @test_omp_parallel_4() -> () {
-// CHECK: call void {{.*}}@__kmpc_fork_call{{.*}} @[[OMP_OUTLINED_FN_4_1:.*]] to
+// CHECK: call void {{.*}}@__kmpc_fork_call{{.*}} @[[OMP_OUTLINED_FN_4_1:.*]])
 // CHECK: define internal void @[[OMP_OUTLINED_FN_4_1]]
 // CHECK: call void @__kmpc_barrier
-// CHECK: call void {{.*}}@__kmpc_fork_call{{.*}} @[[OMP_OUTLINED_FN_4_1_1:.*]] to
+// CHECK: call void {{.*}}@__kmpc_fork_call{{.*}} @[[OMP_OUTLINED_FN_4_1_1:.*]])
 // CHECK: call void @__kmpc_barrier
   omp.parallel {
     omp.barrier
@@ -300,17 +300,17 @@ llvm.func @test_omp_parallel_4() -> () {
 }
 
 llvm.func @test_omp_parallel_5() -> () {
-// CHECK: call void {{.*}}@__kmpc_fork_call{{.*}} @[[OMP_OUTLINED_FN_5_1:.*]] to
+// CHECK: call void {{.*}}@__kmpc_fork_call{{.*}} @[[OMP_OUTLINED_FN_5_1:.*]])
 // CHECK: define internal void @[[OMP_OUTLINED_FN_5_1]]
 // CHECK: call void @__kmpc_barrier
-// CHECK: call void {{.*}}@__kmpc_fork_call{{.*}} @[[OMP_OUTLINED_FN_5_1_1:.*]] to
+// CHECK: call void {{.*}}@__kmpc_fork_call{{.*}} @[[OMP_OUTLINED_FN_5_1_1:.*]])
 // CHECK: call void @__kmpc_barrier
   omp.parallel {
     omp.barrier
 
 // CHECK: define internal void @[[OMP_OUTLINED_FN_5_1_1]]
     omp.parallel {
-// CHECK: call void {{.*}}@__kmpc_fork_call{{.*}} @[[OMP_OUTLINED_FN_5_1_1_1:.*]] to
+// CHECK: call void {{.*}}@__kmpc_fork_call{{.*}} @[[OMP_OUTLINED_FN_5_1_1_1:.*]])
 // CHECK: define internal void @[[OMP_OUTLINED_FN_5_1_1_1]]
 // CHECK: call void @__kmpc_barrier
       omp.parallel {
@@ -328,14 +328,14 @@ llvm.func @test_omp_parallel_5() -> () {
 
 // CHECK-LABEL: define void @test_omp_master()
 llvm.func @test_omp_master() -> () {
-// CHECK: call void {{.*}}@__kmpc_fork_call{{.*}} @{{.*}} to
+// CHECK: call void {{.*}}@__kmpc_fork_call{{.*}} @{{.*}})
 // CHECK: omp.par.region1:
   omp.parallel {
     omp.master {
-// CHECK: [[OMP_THREAD_3_4:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @{{[0-9]+}})
-// CHECK: {{[0-9]+}} = call i32 @__kmpc_master(%struct.ident_t* @{{[0-9]+}}, i32 [[OMP_THREAD_3_4]])
+// CHECK: [[OMP_THREAD_3_4:%.*]] = call i32 @__kmpc_global_thread_num(ptr @{{[0-9]+}})
+// CHECK: {{[0-9]+}} = call i32 @__kmpc_master(ptr @{{[0-9]+}}, i32 [[OMP_THREAD_3_4]])
 // CHECK: omp.master.region
-// CHECK: call void @__kmpc_end_master(%struct.ident_t* @{{[0-9]+}}, i32 [[OMP_THREAD_3_4]])
+// CHECK: call void @__kmpc_end_master(ptr @{{[0-9]+}}, i32 [[OMP_THREAD_3_4]])
 // CHECK: br label %omp_region.end
       omp.terminator
     }
@@ -357,10 +357,10 @@ llvm.func @test_omp_master() -> () {
 
 // CHECK: %struct.ident_t = type
 // CHECK: @[[$parallel_loc:.*]] = private unnamed_addr constant {{.*}} c";LLVMDialectModule;wsloop_simple;{{[0-9]+}};{{[0-9]+}};;\00"
-// CHECK: @[[$parallel_loc_struct:.*]] = private unnamed_addr constant %struct.ident_t {{.*}} @[[$parallel_loc]], {{.*}}
+// CHECK: @[[$parallel_loc_struct:.*]] = private unnamed_addr constant %struct.ident_t {{.*}} @[[$parallel_loc]] {{.*}}
 
 // CHECK: @[[$wsloop_loc:.*]] = private unnamed_addr constant {{.*}} c";LLVMDialectModule;wsloop_simple;{{[0-9]+}};{{[0-9]+}};;\00"
-// CHECK: @[[$wsloop_loc_struct:.*]] = private unnamed_addr constant %struct.ident_t {{.*}} @[[$wsloop_loc]], {{.*}}
+// CHECK: @[[$wsloop_loc_struct:.*]] = private unnamed_addr constant %struct.ident_t {{.*}} @[[$wsloop_loc]] {{.*}}
 
 // CHECK-LABEL: @wsloop_simple
 llvm.func @wsloop_simple(%arg0: !llvm.ptr<f32>) {
@@ -373,12 +373,12 @@ llvm.func @wsloop_simple(%arg0: !llvm.ptr<f32>) {
       // The form of the emitted IR is controlled by OpenMPIRBuilder and
       // tested there. Just check that the right functions are called.
       // CHECK: call i32 @__kmpc_global_thread_num
-      // CHECK: call void @__kmpc_for_static_init_{{.*}}(%struct.ident_t* @[[$wsloop_loc_struct]],
+      // CHECK: call void @__kmpc_for_static_init_{{.*}}(ptr @[[$wsloop_loc_struct]],
       %3 = llvm.mlir.constant(2.000000e+00 : f32) : f32
       %4 = llvm.getelementptr %arg0[%arg1] : (!llvm.ptr<f32>, i64) -> !llvm.ptr<f32>
       llvm.store %3, %4 : !llvm.ptr<f32>
       omp.yield
-      // CHECK: call void @__kmpc_for_static_fini(%struct.ident_t* @[[$wsloop_loc_struct]],
+      // CHECK: call void @__kmpc_for_static_fini(ptr @[[$wsloop_loc_struct]],
     }) {operand_segment_sizes = dense<[1, 1, 1, 0, 0, 0, 0]> : vector<7xi32>} : (i64, i64, i64) -> ()
     omp.terminator
   }
@@ -392,7 +392,7 @@ llvm.func @wsloop_inclusive_1(%arg0: !llvm.ptr<f32>) {
   %0 = llvm.mlir.constant(42 : index) : i64
   %1 = llvm.mlir.constant(10 : index) : i64
   %2 = llvm.mlir.constant(1 : index) : i64
-  // CHECK: store i64 31, i64* %{{.*}}upperbound
+  // CHECK: store i64 31, ptr %{{.*}}upperbound
   "omp.wsloop"(%1, %0, %2) ({
   ^bb0(%arg1: i64):
     %3 = llvm.mlir.constant(2.000000e+00 : f32) : f32
@@ -410,7 +410,7 @@ llvm.func @wsloop_inclusive_2(%arg0: !llvm.ptr<f32>) {
   %0 = llvm.mlir.constant(42 : index) : i64
   %1 = llvm.mlir.constant(10 : index) : i64
   %2 = llvm.mlir.constant(1 : index) : i64
-  // CHECK: store i64 32, i64* %{{.*}}upperbound
+  // CHECK: store i64 32, ptr %{{.*}}upperbound
   "omp.wsloop"(%1, %0, %2) ({
   ^bb0(%arg1: i64):
     %3 = llvm.mlir.constant(2.000000e+00 : f32) : f32
@@ -429,7 +429,7 @@ llvm.func @body(i32)
 llvm.func @test_omp_wsloop_static_defchunk(%lb : i32, %ub : i32, %step : i32) -> () {
  omp.wsloop schedule(static)
  for (%iv) : i32 = (%lb) to (%ub) step (%step) {
-   // CHECK: call void @__kmpc_for_static_init_4u(%struct.ident_t* @{{.*}}, i32 %{{.*}}, i32 34, i32* %{{.*}}, i32* %{{.*}}, i32* %{{.*}}, i32* %{{.*}}, i32 1, i32 0)
+   // CHECK: call void @__kmpc_for_static_init_4u(ptr @{{.*}}, i32 %{{.*}}, i32 34, ptr %{{.*}}, ptr %{{.*}}, ptr %{{.*}}, ptr %{{.*}}, i32 1, i32 0)
    // CHECK: call void @__kmpc_for_static_fini
    llvm.call @body(%iv) : (i32) -> ()
    omp.yield
@@ -446,7 +446,7 @@ llvm.func @test_omp_wsloop_static_1(%lb : i32, %ub : i32, %step : i32) -> () {
  %static_chunk_size = llvm.mlir.constant(1 : i32) : i32
  omp.wsloop schedule(static = %static_chunk_size : i32)
  for (%iv) : i32 = (%lb) to (%ub) step (%step) {
-   // CHECK: call void @__kmpc_for_static_init_4u(%struct.ident_t* @{{.*}}, i32 %{{.*}}, i32 33, i32* %{{.*}}, i32* %{{.*}}, i32* %{{.*}}, i32* %{{.*}}, i32 1, i32 1)
+   // CHECK: call void @__kmpc_for_static_init_4u(ptr @{{.*}}, i32 %{{.*}}, i32 33, ptr %{{.*}}, ptr %{{.*}}, ptr %{{.*}}, ptr %{{.*}}, i32 1, i32 1)
    // CHECK: call void @__kmpc_for_static_fini
    llvm.call @body(%iv) : (i32) -> ()
    omp.yield
@@ -463,7 +463,7 @@ llvm.func @test_omp_wsloop_static_2(%lb : i32, %ub : i32, %step : i32) -> () {
  %static_chunk_size = llvm.mlir.constant(2 : i32) : i32
  omp.wsloop schedule(static = %static_chunk_size : i32)
  for (%iv) : i32 = (%lb) to (%ub) step (%step) {
-   // CHECK: call void @__kmpc_for_static_init_4u(%struct.ident_t* @{{.*}}, i32 %{{.*}}, i32 33, i32* %{{.*}}, i32* %{{.*}}, i32* %{{.*}}, i32* %{{.*}}, i32 1, i32 2)
+   // CHECK: call void @__kmpc_for_static_init_4u(ptr @{{.*}}, i32 %{{.*}}, i32 33, ptr %{{.*}}, ptr %{{.*}}, ptr %{{.*}}, ptr %{{.*}}, i32 1, i32 2)
    // CHECK: call void @__kmpc_for_static_fini
    llvm.call @body(%iv) : (i32) -> ()
    omp.yield
@@ -496,7 +496,7 @@ llvm.func @test_omp_wsloop_dynamic_chunk_const(%lb : i64, %ub : i64, %step : i64
  %chunk_size_const = llvm.mlir.constant(2 : i16) : i16
  omp.wsloop schedule(dynamic = %chunk_size_const : i16)
  for (%iv) : i64 = (%lb) to (%ub) step (%step)  {
-  // CHECK: call void @__kmpc_dispatch_init_8u(%struct.ident_t* @{{.*}}, i32 %{{.*}}, i32 1073741859, i64 {{.*}}, i64 %{{.*}}, i64 {{.*}}, i64 2)
+  // CHECK: call void @__kmpc_dispatch_init_8u(ptr @{{.*}}, i32 %{{.*}}, i32 1073741859, i64 {{.*}}, i64 %{{.*}}, i64 {{.*}}, i64 2)
   // CHECK: %[[continue:.*]] = call i32 @__kmpc_dispatch_next_8u
   // CHECK: %[[cond:.*]] = icmp ne i32 %[[continue]], 0
   // CHECK: br i1 %[[cond]], label %omp_loop.header{{.*}}, label %omp_loop.exit{{.*}}
@@ -517,7 +517,7 @@ llvm.func @test_omp_wsloop_dynamic_chunk_var(%lb : i32, %ub : i32, %step : i32)
  omp.wsloop schedule(dynamic = %chunk_size_var : i16)
  for (%iv) : i32 = (%lb) to (%ub) step (%step) {
   // CHECK: %[[CHUNK_SIZE:.*]] = sext i16 %{{.*}} to i32
-  // CHECK: call void @__kmpc_dispatch_init_4u(%struct.ident_t* @{{.*}}, i32 %{{.*}}, i32 1073741859, i32 {{.*}}, i32 %{{.*}}, i32 {{.*}}, i32 %[[CHUNK_SIZE]])
+  // CHECK: call void @__kmpc_dispatch_init_4u(ptr @{{.*}}, i32 %{{.*}}, i32 1073741859, i32 {{.*}}, i32 %{{.*}}, i32 {{.*}}, i32 %[[CHUNK_SIZE]])
   // CHECK: %[[continue:.*]] = call i32 @__kmpc_dispatch_next_4u
   // CHECK: %[[cond:.*]] = icmp ne i32 %[[continue]], 0
   // CHECK: br i1 %[[cond]], label %omp_loop.header{{.*}}, label %omp_loop.exit{{.*}}
@@ -538,7 +538,7 @@ llvm.func @test_omp_wsloop_dynamic_chunk_var2(%lb : i32, %ub : i32, %step : i32)
  omp.wsloop schedule(dynamic = %chunk_size_var : i64)
  for (%iv) : i32 = (%lb) to (%ub) step (%step) {
   // CHECK: %[[CHUNK_SIZE:.*]] = trunc i64 %{{.*}} to i32
-  // CHECK: call void @__kmpc_dispatch_init_4u(%struct.ident_t* @{{.*}}, i32 %{{.*}}, i32 1073741859, i32 {{.*}}, i32 %{{.*}}, i32 {{.*}}, i32 %[[CHUNK_SIZE]])
+  // CHECK: call void @__kmpc_dispatch_init_4u(ptr @{{.*}}, i32 %{{.*}}, i32 1073741859, i32 {{.*}}, i32 %{{.*}}, i32 {{.*}}, i32 %[[CHUNK_SIZE]])
   // CHECK: %[[continue:.*]] = call i32 @__kmpc_dispatch_next_4u
   // CHECK: %[[cond:.*]] = icmp ne i32 %[[continue]], 0
   // CHECK: br i1 %[[cond]], label %omp_loop.header{{.*}}, label %omp_loop.exit{{.*}}
@@ -555,7 +555,7 @@ llvm.func @body(i32)
 llvm.func @test_omp_wsloop_dynamic_chunk_var3(%lb : i32, %ub : i32, %step : i32, %chunk_size : i32) -> () {
  omp.wsloop schedule(dynamic = %chunk_size : i32)
  for (%iv) : i32 = (%lb) to (%ub) step (%step) {
-  // CHECK: call void @__kmpc_dispatch_init_4u(%struct.ident_t* @{{.*}}, i32 %{{.*}}, i32 1073741859, i32 {{.*}}, i32 %{{.*}}, i32 {{.*}}, i32 %{{.*}})
+  // CHECK: call void @__kmpc_dispatch_init_4u(ptr @{{.*}}, i32 %{{.*}}, i32 1073741859, i32 {{.*}}, i32 %{{.*}}, i32 {{.*}}, i32 %{{.*}})
   // CHECK: %[[continue:.*]] = call i32 @__kmpc_dispatch_next_4u
   // CHECK: %[[cond:.*]] = icmp ne i32 %[[continue]], 0
   // CHECK: br i1 %[[cond]], label %omp_loop.header{{.*}}, label %omp_loop.exit{{.*}}
@@ -623,7 +623,7 @@ llvm.func @body(i64)
 llvm.func @test_omp_wsloop_dynamic_nonmonotonic(%lb : i64, %ub : i64, %step : i64) -> () {
   omp.wsloop schedule(dynamic, nonmonotonic)
   for (%iv) : i64 = (%lb) to (%ub) step (%step) {
-    // CHECK: call void @__kmpc_dispatch_init_8u(%struct.ident_t* @{{.*}}, i32 %{{.*}}, i32 1073741859
+    // CHECK: call void @__kmpc_dispatch_init_8u(ptr @{{.*}}, i32 %{{.*}}, i32 1073741859
     // CHECK: %[[continue:.*]] = call i32 @__kmpc_dispatch_next_8u
     // CHECK: %[[cond:.*]] = icmp ne i32 %[[continue]], 0
     // CHECK  br i1 %[[cond]], label %omp_loop.header{{.*}}, label %omp_loop.exit{{.*}}
@@ -640,7 +640,7 @@ llvm.func @body(i64)
 llvm.func @test_omp_wsloop_dynamic_monotonic(%lb : i64, %ub : i64, %step : i64) -> () {
   omp.wsloop schedule(dynamic, monotonic)
   for (%iv) : i64 = (%lb) to (%ub) step (%step) {
-    // CHECK: call void @__kmpc_dispatch_init_8u(%struct.ident_t* @{{.*}}, i32 %{{.*}}, i32 536870947
+    // CHECK: call void @__kmpc_dispatch_init_8u(ptr @{{.*}}, i32 %{{.*}}, i32 536870947
     // CHECK: %[[continue:.*]] = call i32 @__kmpc_dispatch_next_8u
     // CHECK: %[[cond:.*]] = icmp ne i32 %[[continue]], 0
     // CHECK  br i1 %[[cond]], label %omp_loop.header{{.*}}, label %omp_loop.exit{{.*}}
@@ -657,7 +657,7 @@ llvm.func @body(i64)
 llvm.func @test_omp_wsloop_runtime_simd(%lb : i64, %ub : i64, %step : i64) -> () {
   omp.wsloop schedule(runtime, simd)
   for (%iv) : i64 = (%lb) to (%ub) step (%step) {
-    // CHECK: call void @__kmpc_dispatch_init_8u(%struct.ident_t* @{{.*}}, i32 %{{.*}}, i32 1073741871
+    // CHECK: call void @__kmpc_dispatch_init_8u(ptr @{{.*}}, i32 %{{.*}}, i32 1073741871
     // CHECK: %[[continue:.*]] = call i32 @__kmpc_dispatch_next_8u
     // CHECK: %[[cond:.*]] = icmp ne i32 %[[continue]], 0
     // CHECK  br i1 %[[cond]], label %omp_loop.header{{.*}}, label %omp_loop.exit{{.*}}
@@ -674,7 +674,7 @@ llvm.func @body(i64)
 llvm.func @test_omp_wsloop_guided_simd(%lb : i64, %ub : i64, %step : i64) -> () {
   omp.wsloop schedule(guided, simd)
   for (%iv) : i64 = (%lb) to (%ub) step (%step) {
-    // CHECK: call void @__kmpc_dispatch_init_8u(%struct.ident_t* @{{.*}}, i32 %{{.*}}, i32 1073741870
+    // CHECK: call void @__kmpc_dispatch_init_8u(ptr @{{.*}}, i32 %{{.*}}, i32 1073741870
     // CHECK: %[[continue:.*]] = call i32 @__kmpc_dispatch_next_8u
     // CHECK: %[[cond:.*]] = icmp ne i32 %[[continue]], 0
     // CHECK  br i1 %[[cond]], label %omp_loop.header{{.*}}, label %omp_loop.exit{{.*}}
@@ -733,7 +733,7 @@ llvm.func @body(i64)
 llvm.func @test_omp_wsloop_ordered(%lb : i64, %ub : i64, %step : i64) -> () {
  omp.wsloop ordered(0)
  for (%iv) : i64 = (%lb) to (%ub) step (%step) {
-  // CHECK: call void @__kmpc_dispatch_init_8u(%struct.ident_t* @{{.*}}, i32 %{{.*}}, i32 66, i64 1, i64 %{{.*}}, i64 1, i64 1)
+  // CHECK: call void @__kmpc_dispatch_init_8u(ptr @{{.*}}, i32 %{{.*}}, i32 66, i64 1, i64 %{{.*}}, i64 1, i64 1)
   // CHECK: call void @__kmpc_dispatch_fini_8u
   // CHECK: %[[continue:.*]] = call i32 @__kmpc_dispatch_next_8u
   // CHECK: %[[cond:.*]] = icmp ne i32 %[[continue]], 0
@@ -751,7 +751,7 @@ llvm.func @body(i64)
 llvm.func @test_omp_wsloop_static_ordered(%lb : i64, %ub : i64, %step : i64) -> () {
  omp.wsloop schedule(static) ordered(0)
  for (%iv) : i64 = (%lb) to (%ub) step (%step) {
-  // CHECK: call void @__kmpc_dispatch_init_8u(%struct.ident_t* @{{.*}}, i32 %{{.*}}, i32 66, i64 1, i64 %{{.*}}, i64 1, i64 1)
+  // CHECK: call void @__kmpc_dispatch_init_8u(ptr @{{.*}}, i32 %{{.*}}, i32 66, i64 1, i64 %{{.*}}, i64 1, i64 1)
   // CHECK: call void @__kmpc_dispatch_fini_8u
   // CHECK: %[[continue:.*]] = call i32 @__kmpc_dispatch_next_8u
   // CHECK: %[[cond:.*]] = icmp ne i32 %[[continue]], 0
@@ -770,7 +770,7 @@ llvm.func @test_omp_wsloop_static_chunk_ordered(%lb : i32, %ub : i32, %step : i3
  %static_chunk_size = llvm.mlir.constant(1 : i32) : i32
  omp.wsloop schedule(static = %static_chunk_size : i32) ordered(0)
  for (%iv) : i32 = (%lb) to (%ub) step (%step) {
-  // CHECK: call void @__kmpc_dispatch_init_4u(%struct.ident_t* @{{.*}}, i32 %{{.*}}, i32 65, i32 1, i32 %{{.*}}, i32 1, i32 1)
+  // CHECK: call void @__kmpc_dispatch_init_4u(ptr @{{.*}}, i32 %{{.*}}, i32 65, i32 1, i32 %{{.*}}, i32 1, i32 1)
   // CHECK: call void @__kmpc_dispatch_fini_4u
   // CHECK: %[[continue:.*]] = call i32 @__kmpc_dispatch_next_4u
   // CHECK: %[[cond:.*]] = icmp ne i32 %[[continue]], 0
@@ -788,7 +788,7 @@ llvm.func @body(i64)
 llvm.func @test_omp_wsloop_dynamic_ordered(%lb : i64, %ub : i64, %step : i64) -> () {
  omp.wsloop schedule(dynamic) ordered(0)
  for (%iv) : i64 = (%lb) to (%ub) step (%step) {
-  // CHECK: call void @__kmpc_dispatch_init_8u(%struct.ident_t* @{{.*}}, i32 %{{.*}}, i32 67, i64 1, i64 %{{.*}}, i64 1, i64 1)
+  // CHECK: call void @__kmpc_dispatch_init_8u(ptr @{{.*}}, i32 %{{.*}}, i32 67, i64 1, i64 %{{.*}}, i64 1, i64 1)
   // CHECK: call void @__kmpc_dispatch_fini_8u
   // CHECK: %[[continue:.*]] = call i32 @__kmpc_dispatch_next_8u
   // CHECK: %[[cond:.*]] = icmp ne i32 %[[continue]], 0
@@ -806,7 +806,7 @@ llvm.func @body(i64)
 llvm.func @test_omp_wsloop_auto_ordered(%lb : i64, %ub : i64, %step : i64) -> () {
  omp.wsloop schedule(auto) ordered(0)
  for (%iv) : i64 = (%lb) to (%ub) step (%step) {
-  // CHECK: call void @__kmpc_dispatch_init_8u(%struct.ident_t* @{{.*}}, i32 %{{.*}}, i32 70, i64 1, i64 %{{.*}}, i64 1, i64 1)
+  // CHECK: call void @__kmpc_dispatch_init_8u(ptr @{{.*}}, i32 %{{.*}}, i32 70, i64 1, i64 %{{.*}}, i64 1, i64 1)
   // CHECK: call void @__kmpc_dispatch_fini_8u
   // CHECK: %[[continue:.*]] = call i32 @__kmpc_dispatch_next_8u
   // CHECK: %[[cond:.*]] = icmp ne i32 %[[continue]], 0
@@ -824,7 +824,7 @@ llvm.func @body(i64)
 llvm.func @test_omp_wsloop_runtime_ordered(%lb : i64, %ub : i64, %step : i64) -> () {
  omp.wsloop schedule(runtime) ordered(0)
  for (%iv) : i64 = (%lb) to (%ub) step (%step) {
-  // CHECK: call void @__kmpc_dispatch_init_8u(%struct.ident_t* @{{.*}}, i32 %{{.*}}, i32 69, i64 1, i64 %{{.*}}, i64 1, i64 1)
+  // CHECK: call void @__kmpc_dispatch_init_8u(ptr @{{.*}}, i32 %{{.*}}, i32 69, i64 1, i64 %{{.*}}, i64 1, i64 1)
   // CHECK: call void @__kmpc_dispatch_fini_8u
   // CHECK: %[[continue:.*]] = call i32 @__kmpc_dispatch_next_8u
   // CHECK: %[[cond:.*]] = icmp ne i32 %[[continue]], 0
@@ -842,7 +842,7 @@ llvm.func @body(i64)
 llvm.func @test_omp_wsloop_guided_ordered(%lb : i64, %ub : i64, %step : i64) -> () {
  omp.wsloop schedule(guided) ordered(0)
  for (%iv) : i64 = (%lb) to (%ub) step (%step) {
-  // CHECK: call void @__kmpc_dispatch_init_8u(%struct.ident_t* @{{.*}}, i32 %{{.*}}, i32 68, i64 1, i64 %{{.*}}, i64 1, i64 1)
+  // CHECK: call void @__kmpc_dispatch_init_8u(ptr @{{.*}}, i32 %{{.*}}, i32 68, i64 1, i64 %{{.*}}, i64 1, i64 1)
   // CHECK: call void @__kmpc_dispatch_fini_8u
   // CHECK: %[[continue:.*]] = call i32 @__kmpc_dispatch_next_8u
   // CHECK: %[[cond:.*]] = icmp ne i32 %[[continue]], 0
@@ -860,7 +860,7 @@ llvm.func @body(i64)
 llvm.func @test_omp_wsloop_dynamic_nonmonotonic_ordered(%lb : i64, %ub : i64, %step : i64) -> () {
  omp.wsloop schedule(dynamic, nonmonotonic) ordered(0)
  for (%iv) : i64 = (%lb) to (%ub) step (%step) {
-  // CHECK: call void @__kmpc_dispatch_init_8u(%struct.ident_t* @{{.*}}, i32 %{{.*}}, i32 1073741891, i64 1, i64 %{{.*}}, i64 1, i64 1)
+  // CHECK: call void @__kmpc_dispatch_init_8u(ptr @{{.*}}, i32 %{{.*}}, i32 1073741891, i64 1, i64 %{{.*}}, i64 1, i64 1)
   // CHECK: call void @__kmpc_dispatch_fini_8u
   // CHECK: %[[continue:.*]] = call i32 @__kmpc_dispatch_next_8u
   // CHECK: %[[cond:.*]] = icmp ne i32 %[[continue]], 0
@@ -878,7 +878,7 @@ llvm.func @body(i64)
 llvm.func @test_omp_wsloop_dynamic_monotonic_ordered(%lb : i64, %ub : i64, %step : i64) -> () {
  omp.wsloop schedule(dynamic, monotonic) ordered(0)
  for (%iv) : i64 = (%lb) to (%ub) step (%step) {
-  // CHECK: call void @__kmpc_dispatch_init_8u(%struct.ident_t* @{{.*}}, i32 %{{.*}}, i32 536870979, i64 1, i64 %{{.*}}, i64 1, i64 1)
+  // CHECK: call void @__kmpc_dispatch_init_8u(ptr @{{.*}}, i32 %{{.*}}, i32 536870979, i64 1, i64 %{{.*}}, i64 1, i64 1)
   // CHECK: call void @__kmpc_dispatch_fini_8u
   // CHECK: %[[continue:.*]] = call i32 @__kmpc_dispatch_next_8u
   // CHECK: %[[cond:.*]] = icmp ne i32 %[[continue]], 0
@@ -1012,8 +1012,8 @@ llvm.func @omp_critical(%x : !llvm.ptr<i32>, %xval : i32) -> () {
 // is done by the OpenMPIRBuilder.
 
 // CHECK-LABEL: @collapse_wsloop
-// CHECK: i32* noalias %[[TIDADDR:[0-9A-Za-z.]*]]
-// CHECK: load i32, i32* %[[TIDADDR]]
+// CHECK: ptr noalias %[[TIDADDR:[0-9A-Za-z.]*]]
+// CHECK: load i32, ptr %[[TIDADDR]]
 // CHECK: store
 // CHECK: load
 // CHECK: %[[LB0:.*]] = load i32
@@ -1048,9 +1048,9 @@ llvm.func @collapse_wsloop(
     // CHECK: br label %[[COLLAPSED_PREHEADER:.*]]
     //
     // CHECK: [[COLLAPSED_PREHEADER]]:
-    // CHECK: store i32 0, i32*
+    // CHECK: store i32 0, ptr
     // CHECK: %[[TOTAL_SUB_1:.*]] = sub i32 %[[TOTAL]], 1
-    // CHECK: store i32 %[[TOTAL_SUB_1]], i32*
+    // CHECK: store i32 %[[TOTAL_SUB_1]], ptr
     // CHECK: call void @__kmpc_for_static_init_4u
     omp.wsloop collapse(3)
     for (%arg0, %arg1, %arg2) : i32 = (%0, %1, %2) to (%3, %4, %5) step (%6, %7, %8) {
@@ -1073,8 +1073,8 @@ llvm.func @collapse_wsloop(
 // detailed checking is done by the OpenMPIRBuilder.
 
 // CHECK-LABEL: @collapse_wsloop_dynamic
-// CHECK: i32* noalias %[[TIDADDR:[0-9A-Za-z.]*]]
-// CHECK: load i32, i32* %[[TIDADDR]]
+// CHECK: ptr noalias %[[TIDADDR:[0-9A-Za-z.]*]]
+// CHECK: load i32, ptr %[[TIDADDR]]
 // CHECK: store
 // CHECK: load
 // CHECK: %[[LB0:.*]] = load i32
@@ -1110,8 +1110,8 @@ llvm.func @collapse_wsloop_dynamic(
     // CHECK: br label %[[COLLAPSED_PREHEADER:.*]]
     //
     // CHECK: [[COLLAPSED_PREHEADER]]:
-    // CHECK: store i32 1, i32*
-    // CHECK: store i32 %[[TOTAL]], i32*
+    // CHECK: store i32 1, ptr
+    // CHECK: store i32 %[[TOTAL]], ptr
     // CHECK: call void @__kmpc_dispatch_init_4u
     omp.wsloop collapse(3) schedule(dynamic)
     for (%arg0, %arg1, %arg2) : i32 = (%0, %1, %2) to (%3, %4, %5) step (%6, %7, %8) {
@@ -1138,37 +1138,37 @@ llvm.func @omp_ordered(%arg0 : i32, %arg1 : i32, %arg2 : i32, %arg3 : i64,
   // CHECK: [[ADDR3:%.*]] = alloca [1 x i64], align 8
   // CHECK: [[ADDR:%.*]] = alloca [1 x i64], align 8
 
-  // CHECK: [[OMP_THREAD:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB1:[0-9]+]])
-  // CHECK-NEXT:  call void @__kmpc_ordered(%struct.ident_t* @[[GLOB1]], i32 [[OMP_THREAD]])
+  // CHECK: [[OMP_THREAD:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1:[0-9]+]])
+  // CHECK-NEXT:  call void @__kmpc_ordered(ptr @[[GLOB1]], i32 [[OMP_THREAD]])
   omp.ordered_region {
     omp.terminator
-  // CHECK: call void @__kmpc_end_ordered(%struct.ident_t* @[[GLOB1]], i32 [[OMP_THREAD]])
+  // CHECK: call void @__kmpc_end_ordered(ptr @[[GLOB1]], i32 [[OMP_THREAD]])
   }
 
   omp.wsloop ordered(0)
   for (%arg7) : i32 = (%arg0) to (%arg1) step (%arg2) {
-    // CHECK:  call void @__kmpc_ordered(%struct.ident_t* @[[GLOB3:[0-9]+]], i32 [[OMP_THREAD2:%.*]])
+    // CHECK:  call void @__kmpc_ordered(ptr @[[GLOB3:[0-9]+]], i32 [[OMP_THREAD2:%.*]])
     omp.ordered_region  {
       omp.terminator
-    // CHECK: call void @__kmpc_end_ordered(%struct.ident_t* @[[GLOB3]], i32 [[OMP_THREAD2]])
+    // CHECK: call void @__kmpc_end_ordered(ptr @[[GLOB3]], i32 [[OMP_THREAD2]])
     }
     omp.yield
   }
 
   omp.wsloop ordered(1)
   for (%arg7) : i32 = (%arg0) to (%arg1) step (%arg2) {
-    // CHECK: [[TMP:%.*]] = getelementptr inbounds [1 x i64], [1 x i64]* [[ADDR]], i64 0, i64 0
-    // CHECK: store i64 [[ARG0:%.*]], i64* [[TMP]], align 8
-    // CHECK: [[TMP2:%.*]] = getelementptr inbounds [1 x i64], [1 x i64]* [[ADDR]], i64 0, i64 0
-    // CHECK: [[OMP_THREAD2:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB3:[0-9]+]])
-    // CHECK: call void @__kmpc_doacross_wait(%struct.ident_t* @[[GLOB3]], i32 [[OMP_THREAD2]], i64* [[TMP2]])
+    // CHECK: [[TMP:%.*]] = getelementptr inbounds [1 x i64], ptr [[ADDR]], i64 0, i64 0
+    // CHECK: store i64 [[ARG0:%.*]], ptr [[TMP]], align 8
+    // CHECK: [[TMP2:%.*]] = getelementptr inbounds [1 x i64], ptr [[ADDR]], i64 0, i64 0
+    // CHECK: [[OMP_THREAD2:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB3:[0-9]+]])
+    // CHECK: call void @__kmpc_doacross_wait(ptr @[[GLOB3]], i32 [[OMP_THREAD2]], ptr [[TMP2]])
     omp.ordered depend_type(dependsink) depend_vec(%arg3 : i64) {num_loops_val = 1 : i64}
 
-    // CHECK: [[TMP3:%.*]] = getelementptr inbounds [1 x i64], [1 x i64]* [[ADDR3]], i64 0, i64 0
-    // CHECK: store i64 [[ARG0]], i64* [[TMP3]], align 8
-    // CHECK: [[TMP4:%.*]] = getelementptr inbounds [1 x i64], [1 x i64]* [[ADDR3]], i64 0, i64 0
-    // CHECK: [[OMP_THREAD4:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB5:[0-9]+]])
-    // CHECK: call void @__kmpc_doacross_post(%struct.ident_t* @[[GLOB5]], i32 [[OMP_THREAD4]], i64* [[TMP4]])
+    // CHECK: [[TMP3:%.*]] = getelementptr inbounds [1 x i64], ptr [[ADDR3]], i64 0, i64 0
+    // CHECK: store i64 [[ARG0]], ptr [[TMP3]], align 8
+    // CHECK: [[TMP4:%.*]] = getelementptr inbounds [1 x i64], ptr [[ADDR3]], i64 0, i64 0
+    // CHECK: [[OMP_THREAD4:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB5:[0-9]+]])
+    // CHECK: call void @__kmpc_doacross_post(ptr @[[GLOB5]], i32 [[OMP_THREAD4]], ptr [[TMP4]])
     omp.ordered depend_type(dependsource) depend_vec(%arg3 : i64) {num_loops_val = 1 : i64}
 
     omp.yield
@@ -1176,29 +1176,29 @@ llvm.func @omp_ordered(%arg0 : i32, %arg1 : i32, %arg2 : i32, %arg3 : i64,
 
   omp.wsloop ordered(2)
   for (%arg7) : i32 = (%arg0) to (%arg1) step (%arg2) {
-    // CHECK: [[TMP5:%.*]] = getelementptr inbounds [2 x i64], [2 x i64]* [[ADDR5]], i64 0, i64 0
-    // CHECK: store i64 [[ARG0]], i64* [[TMP5]], align 8
-    // CHECK: [[TMP6:%.*]] = getelementptr inbounds [2 x i64], [2 x i64]* [[ADDR5]], i64 0, i64 1
-    // CHECK: store i64 [[ARG1:%.*]], i64* [[TMP6]], align 8
-    // CHECK: [[TMP7:%.*]] = getelementptr inbounds [2 x i64], [2 x i64]* [[ADDR5]], i64 0, i64 0
-    // CHECK: [[OMP_THREAD6:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB7:[0-9]+]])
-    // CHECK: call void @__kmpc_doacross_wait(%struct.ident_t* @[[GLOB7]], i32 [[OMP_THREAD6]], i64* [[TMP7]])
-    // CHECK: [[TMP8:%.*]] = getelementptr inbounds [2 x i64], [2 x i64]* [[ADDR7]], i64 0, i64 0
-    // CHECK: store i64 [[ARG2:%.*]], i64* [[TMP8]], align 8
-    // CHECK: [[TMP9:%.*]] = getelementptr inbounds [2 x i64], [2 x i64]* [[ADDR7]], i64 0, i64 1
-    // CHECK: store i64 [[ARG3:%.*]], i64* [[TMP9]], align 8
-    // CHECK: [[TMP10:%.*]] = getelementptr inbounds [2 x i64], [2 x i64]* [[ADDR7]], i64 0, i64 0
-    // CHECK: [[OMP_THREAD8:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB7]])
-    // CHECK: call void @__kmpc_doacross_wait(%struct.ident_t* @[[GLOB7]], i32 [[OMP_THREAD8]], i64* [[TMP10]])
+    // CHECK: [[TMP5:%.*]] = getelementptr inbounds [2 x i64], ptr [[ADDR5]], i64 0, i64 0
+    // CHECK: store i64 [[ARG0]], ptr [[TMP5]], align 8
+    // CHECK: [[TMP6:%.*]] = getelementptr inbounds [2 x i64], ptr [[ADDR5]], i64 0, i64 1
+    // CHECK: store i64 [[ARG1:%.*]], ptr [[TMP6]], align 8
+    // CHECK: [[TMP7:%.*]] = getelementptr inbounds [2 x i64], ptr [[ADDR5]], i64 0, i64 0
+    // CHECK: [[OMP_THREAD6:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB7:[0-9]+]])
+    // CHECK: call void @__kmpc_doacross_wait(ptr @[[GLOB7]], i32 [[OMP_THREAD6]], ptr [[TMP7]])
+    // CHECK: [[TMP8:%.*]] = getelementptr inbounds [2 x i64], ptr [[ADDR7]], i64 0, i64 0
+    // CHECK: store i64 [[ARG2:%.*]], ptr [[TMP8]], align 8
+    // CHECK: [[TMP9:%.*]] = getelementptr inbounds [2 x i64], ptr [[ADDR7]], i64 0, i64 1
+    // CHECK: store i64 [[ARG3:%.*]], ptr [[TMP9]], align 8
+    // CHECK: [[TMP10:%.*]] = getelementptr inbounds [2 x i64], ptr [[ADDR7]], i64 0, i64 0
+    // CHECK: [[OMP_THREAD8:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB7]])
+    // CHECK: call void @__kmpc_doacross_wait(ptr @[[GLOB7]], i32 [[OMP_THREAD8]], ptr [[TMP10]])
     omp.ordered depend_type(dependsink) depend_vec(%arg3, %arg4, %arg5, %arg6 : i64, i64, i64, i64) {num_loops_val = 2 : i64}
 
-    // CHECK: [[TMP11:%.*]] = getelementptr inbounds [2 x i64], [2 x i64]* [[ADDR9]], i64 0, i64 0
-    // CHECK: store i64 [[ARG0]], i64* [[TMP11]], align 8
-    // CHECK: [[TMP12:%.*]] = getelementptr inbounds [2 x i64], [2 x i64]* [[ADDR9]], i64 0, i64 1
-    // CHECK: store i64 [[ARG1]], i64* [[TMP12]], align 8
-    // CHECK: [[TMP13:%.*]] = getelementptr inbounds [2 x i64], [2 x i64]* [[ADDR9]], i64 0, i64 0
-    // CHECK: [[OMP_THREAD10:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB9:[0-9]+]])
-    // CHECK: call void @__kmpc_doacross_post(%struct.ident_t* @[[GLOB9]], i32 [[OMP_THREAD10]], i64* [[TMP13]])
+    // CHECK: [[TMP11:%.*]] = getelementptr inbounds [2 x i64], ptr [[ADDR9]], i64 0, i64 0
+    // CHECK: store i64 [[ARG0]], ptr [[TMP11]], align 8
+    // CHECK: [[TMP12:%.*]] = getelementptr inbounds [2 x i64], ptr [[ADDR9]], i64 0, i64 1
+    // CHECK: store i64 [[ARG1]], ptr [[TMP12]], align 8
+    // CHECK: [[TMP13:%.*]] = getelementptr inbounds [2 x i64], ptr [[ADDR9]], i64 0, i64 0
+    // CHECK: [[OMP_THREAD10:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB9:[0-9]+]])
+    // CHECK: call void @__kmpc_doacross_post(ptr @[[GLOB9]], i32 [[OMP_THREAD10]], ptr [[TMP13]])
     omp.ordered depend_type(dependsource) depend_vec(%arg3, %arg4 : i64, i64) {num_loops_val = 2 : i64}
 
     omp.yield
@@ -1210,25 +1210,25 @@ llvm.func @omp_ordered(%arg0 : i32, %arg1 : i32, %arg2 : i32, %arg3 : i64,
 // -----
 
 // CHECK-LABEL: @omp_atomic_read
-// CHECK-SAME: (i32* %[[ARG0:.*]], i32* %[[ARG1:.*]])
+// CHECK-SAME: (ptr %[[ARG0:.*]], ptr %[[ARG1:.*]])
 llvm.func @omp_atomic_read(%arg0 : !llvm.ptr<i32>, %arg1 : !llvm.ptr<i32>) -> () {
 
-  // CHECK: %[[X1:.*]] = load atomic i32, i32* %[[ARG0]] monotonic, align 4
-  // CHECK: store i32 %[[X1]], i32* %[[ARG1]], align 4
+  // CHECK: %[[X1:.*]] = load atomic i32, ptr %[[ARG0]] monotonic, align 4
+  // CHECK: store i32 %[[X1]], ptr %[[ARG1]], align 4
   omp.atomic.read %arg1 = %arg0 : !llvm.ptr<i32>
 
-  // CHECK: %[[X2:.*]] = load atomic i32, i32* %[[ARG0]] seq_cst, align 4
-  // CHECK: call void @__kmpc_flush(%{{.*}})
-  // CHECK: store i32 %[[X2]], i32* %[[ARG1]], align 4
+  // CHECK: %[[X2:.*]] = load atomic i32, ptr %[[ARG0]] seq_cst, align 4
+  // CHECK: call void @__kmpc_flush(ptr @{{.*}})
+  // CHECK: store i32 %[[X2]], ptr %[[ARG1]], align 4
   omp.atomic.read %arg1 = %arg0 memory_order(seq_cst) : !llvm.ptr<i32>
 
-  // CHECK: %[[X3:.*]] = load atomic i32, i32* %[[ARG0]] acquire, align 4
-  // CHECK: call void @__kmpc_flush(%{{.*}})
-  // CHECK: store i32 %[[X3]], i32* %[[ARG1]], align 4
+  // CHECK: %[[X3:.*]] = load atomic i32, ptr %[[ARG0]] acquire, align 4
+  // CHECK: call void @__kmpc_flush(ptr @{{.*}})
+  // CHECK: store i32 %[[X3]], ptr %[[ARG1]], align 4
   omp.atomic.read %arg1 = %arg0 memory_order(acquire) : !llvm.ptr<i32>
 
-  // CHECK: %[[X4:.*]] = load atomic i32, i32* %[[ARG0]] monotonic, align 4
-  // CHECK: store i32 %[[X4]], i32* %[[ARG1]], align 4
+  // CHECK: %[[X4:.*]] = load atomic i32, ptr %[[ARG0]] monotonic, align 4
+  // CHECK: store i32 %[[X4]], ptr %[[ARG1]], align 4
   omp.atomic.read %arg1 = %arg0 memory_order(relaxed) : !llvm.ptr<i32>
   llvm.return
 }
@@ -1236,17 +1236,17 @@ llvm.func @omp_atomic_read(%arg0 : !llvm.ptr<i32>, %arg1 : !llvm.ptr<i32>) -> ()
 // -----
 
 // CHECK-LABEL: @omp_atomic_write
-// CHECK-SAME: (i32* %[[x:.*]], i32 %[[expr:.*]])
+// CHECK-SAME: (ptr %[[x:.*]], i32 %[[expr:.*]])
 llvm.func @omp_atomic_write(%x: !llvm.ptr<i32>, %expr: i32) -> () {
-  // CHECK: store atomic i32 %[[expr]], i32* %[[x]] monotonic, align 4
+  // CHECK: store atomic i32 %[[expr]], ptr %[[x]] monotonic, align 4
   omp.atomic.write %x = %expr : !llvm.ptr<i32>, i32
-  // CHECK: store atomic i32 %[[expr]], i32* %[[x]] seq_cst, align 4
-  // CHECK: call void @__kmpc_flush(%struct.ident_t* @{{.*}})
+  // CHECK: store atomic i32 %[[expr]], ptr %[[x]] seq_cst, align 4
+  // CHECK: call void @__kmpc_flush(ptr @{{.*}})
   omp.atomic.write %x = %expr memory_order(seq_cst) : !llvm.ptr<i32>, i32
-  // CHECK: store atomic i32 %[[expr]], i32* %[[x]] release, align 4
-  // CHECK: call void @__kmpc_flush(%struct.ident_t* @{{.*}})
+  // CHECK: store atomic i32 %[[expr]], ptr %[[x]] release, align 4
+  // CHECK: call void @__kmpc_flush(ptr @{{.*}})
   omp.atomic.write %x = %expr memory_order(release) : !llvm.ptr<i32>, i32
-  // CHECK: store atomic i32 %[[expr]], i32* %[[x]] monotonic, align 4
+  // CHECK: store atomic i32 %[[expr]], ptr %[[x]] monotonic, align 4
   omp.atomic.write %x = %expr memory_order(relaxed) : !llvm.ptr<i32>, i32
   llvm.return
 }
@@ -1256,18 +1256,18 @@ llvm.func @omp_atomic_write(%x: !llvm.ptr<i32>, %expr: i32) -> () {
 // Checking simple atomicrmw and cmpxchg based translation. This also checks for
 // ambigous alloca insert point by putting llvm.mul as the first update operation.
 // CHECK-LABEL: @omp_atomic_update
-// CHECK-SAME: (i32* %[[x:.*]], i32 %[[expr:.*]], i1* %[[xbool:.*]], i1 %[[exprbool:.*]])
+// CHECK-SAME: (ptr %[[x:.*]], i32 %[[expr:.*]], ptr %[[xbool:.*]], i1 %[[exprbool:.*]])
 llvm.func @omp_atomic_update(%x:!llvm.ptr<i32>, %expr: i32, %xbool: !llvm.ptr<i1>, %exprbool: i1) {
   // CHECK: %[[t1:.*]] = mul i32 %[[x_old:.*]], %[[expr]]
-  // CHECK: store i32 %[[t1]], i32* %[[x_new:.*]]
-  // CHECK: %[[t2:.*]] = load i32, i32* %[[x_new]]
-  // CHECK: cmpxchg i32* %[[x]], i32 %[[x_old]], i32 %[[t2]]
+  // CHECK: store i32 %[[t1]], ptr %[[x_new:.*]]
+  // CHECK: %[[t2:.*]] = load i32, ptr %[[x_new]]
+  // CHECK: cmpxchg ptr %[[x]], i32 %[[x_old]], i32 %[[t2]]
   omp.atomic.update %x : !llvm.ptr<i32> {
   ^bb0(%xval: i32):
     %newval = llvm.mul %xval, %expr : i32
     omp.yield(%newval : i32)
   }
-  // CHECK: atomicrmw add i32* %[[x]], i32 %[[expr]] monotonic
+  // CHECK: atomicrmw add ptr %[[x]], i32 %[[expr]] monotonic
   omp.atomic.update %x : !llvm.ptr<i32> {
   ^bb0(%xval: i32):
     %newval = llvm.add %xval, %expr : i32
@@ -1280,12 +1280,12 @@ llvm.func @omp_atomic_update(%x:!llvm.ptr<i32>, %expr: i32, %xbool: !llvm.ptr<i1
 
 // Checking an order-dependent operation when the order is `expr binop x`
 // CHECK-LABEL: @omp_atomic_update_ordering
-// CHECK-SAME: (i32* %[[x:.*]], i32 %[[expr:.*]])
+// CHECK-SAME: (ptr %[[x:.*]], i32 %[[expr:.*]])
 llvm.func @omp_atomic_update_ordering(%x:!llvm.ptr<i32>, %expr: i32) {
   // CHECK: %[[t1:.*]] = shl i32 %[[expr]], %[[x_old:[^ ,]*]]
-  // CHECK: store i32 %[[t1]], i32* %[[x_new:.*]]
-  // CHECK: %[[t2:.*]] = load i32, i32* %[[x_new]]
-  // CHECK: cmpxchg i32* %[[x]], i32 %[[x_old]], i32 %[[t2]]
+  // CHECK: store i32 %[[t1]], ptr %[[x_new:.*]]
+  // CHECK: %[[t2:.*]] = load i32, ptr %[[x_new]]
+  // CHECK: cmpxchg ptr %[[x]], i32 %[[x_old]], i32 %[[t2]]
   omp.atomic.update %x : !llvm.ptr<i32> {
   ^bb0(%xval: i32):
     %newval = llvm.shl %expr, %xval : i32
@@ -1298,12 +1298,12 @@ llvm.func @omp_atomic_update_ordering(%x:!llvm.ptr<i32>, %expr: i32) {
 
 // Checking an order-dependent operation when the order is `x binop expr`
 // CHECK-LABEL: @omp_atomic_update_ordering
-// CHECK-SAME: (i32* %[[x:.*]], i32 %[[expr:.*]])
+// CHECK-SAME: (ptr %[[x:.*]], i32 %[[expr:.*]])
 llvm.func @omp_atomic_update_ordering(%x:!llvm.ptr<i32>, %expr: i32) {
   // CHECK: %[[t1:.*]] = shl i32 %[[x_old:.*]], %[[expr]]
-  // CHECK: store i32 %[[t1]], i32* %[[x_new:.*]]
-  // CHECK: %[[t2:.*]] = load i32, i32* %[[x_new]]
-  // CHECK: cmpxchg i32* %[[x]], i32 %[[x_old]], i32 %[[t2]] monotonic
+  // CHECK: store i32 %[[t1]], ptr %[[x_new:.*]]
+  // CHECK: %[[t2:.*]] = load i32, ptr %[[x_new]]
+  // CHECK: cmpxchg ptr %[[x]], i32 %[[x_old]], i32 %[[t2]] monotonic
   omp.atomic.update %x : !llvm.ptr<i32> {
   ^bb0(%xval: i32):
     %newval = llvm.shl %xval, %expr : i32
@@ -1316,21 +1316,21 @@ llvm.func @omp_atomic_update_ordering(%x:!llvm.ptr<i32>, %expr: i32) {
 
 // Checking intrinsic translation.
 // CHECK-LABEL: @omp_atomic_update_intrinsic
-// CHECK-SAME: (i32* %[[x:.*]], i32 %[[expr:.*]])
+// CHECK-SAME: (ptr %[[x:.*]], i32 %[[expr:.*]])
 llvm.func @omp_atomic_update_intrinsic(%x:!llvm.ptr<i32>, %expr: i32) {
   // CHECK: %[[t1:.*]] = call i32 @llvm.smax.i32(i32 %[[x_old:.*]], i32 %[[expr]])
-  // CHECK: store i32 %[[t1]], i32* %[[x_new:.*]]
-  // CHECK: %[[t2:.*]] = load i32, i32* %[[x_new]]
-  // CHECK: cmpxchg i32* %[[x]], i32 %[[x_old]], i32 %[[t2]]
+  // CHECK: store i32 %[[t1]], ptr %[[x_new:.*]]
+  // CHECK: %[[t2:.*]] = load i32, ptr %[[x_new]]
+  // CHECK: cmpxchg ptr %[[x]], i32 %[[x_old]], i32 %[[t2]]
   omp.atomic.update %x : !llvm.ptr<i32> {
   ^bb0(%xval: i32):
     %newval = "llvm.intr.smax"(%xval, %expr) : (i32, i32) -> i32
     omp.yield(%newval : i32)
   }
   // CHECK: %[[t1:.*]] = call i32 @llvm.umax.i32(i32 %[[x_old:.*]], i32 %[[expr]])
-  // CHECK: store i32 %[[t1]], i32* %[[x_new:.*]]
-  // CHECK: %[[t2:.*]] = load i32, i32* %[[x_new]]
-  // CHECK: cmpxchg i32* %[[x]], i32 %[[x_old]], i32 %[[t2]]
+  // CHECK: store i32 %[[t1]], ptr %[[x_new:.*]]
+  // CHECK: %[[t2:.*]] = load i32, ptr %[[x_new]]
+  // CHECK: cmpxchg ptr %[[x]], i32 %[[x_old]], i32 %[[t2]]
   omp.atomic.update %x : !llvm.ptr<i32> {
   ^bb0(%xval: i32):
     %newval = "llvm.intr.umax"(%xval, %expr) : (i32, i32) -> i32
@@ -1342,13 +1342,13 @@ llvm.func @omp_atomic_update_intrinsic(%x:!llvm.ptr<i32>, %expr: i32) {
 // -----
 
 // CHECK-LABEL: @omp_atomic_capture_prefix_update
-// CHECK-SAME: (i32* %[[x:.*]], i32* %[[v:.*]], i32 %[[expr:.*]], float* %[[xf:.*]], float* %[[vf:.*]], float %[[exprf:.*]])
+// CHECK-SAME: (ptr %[[x:.*]], ptr %[[v:.*]], i32 %[[expr:.*]], ptr %[[xf:.*]], ptr %[[vf:.*]], float %[[exprf:.*]])
 llvm.func @omp_atomic_capture_prefix_update(
   %x: !llvm.ptr<i32>, %v: !llvm.ptr<i32>, %expr: i32,
   %xf: !llvm.ptr<f32>, %vf: !llvm.ptr<f32>, %exprf: f32) -> () {
-  // CHECK: %[[res:.*]] = atomicrmw add i32* %[[x]], i32 %[[expr]] monotonic
+  // CHECK: %[[res:.*]] = atomicrmw add ptr %[[x]], i32 %[[expr]] monotonic
   // CHECK-NEXT: %[[newval:.*]] = add i32 %[[res]], %[[expr]]
-  // CHECK: store i32 %[[newval]], i32* %[[v]]
+  // CHECK: store i32 %[[newval]], ptr %[[v]]
   omp.atomic.capture {
     omp.atomic.update %x : !llvm.ptr<i32> {
     ^bb0(%xval: i32):
@@ -1358,9 +1358,9 @@ llvm.func @omp_atomic_capture_prefix_update(
     omp.atomic.read %v = %x : !llvm.ptr<i32>
   }
 
-  // CHECK: %[[res:.*]] = atomicrmw sub i32* %[[x]], i32 %[[expr]] monotonic
+  // CHECK: %[[res:.*]] = atomicrmw sub ptr %[[x]], i32 %[[expr]] monotonic
   // CHECK-NEXT: %[[newval:.*]] = sub i32 %[[res]], %[[expr]]
-  // CHECK: store i32 %[[newval]], i32* %[[v]]
+  // CHECK: store i32 %[[newval]], ptr %[[v]]
   omp.atomic.capture {
     omp.atomic.update %x : !llvm.ptr<i32> {
     ^bb0(%xval: i32):
@@ -1370,9 +1370,9 @@ llvm.func @omp_atomic_capture_prefix_update(
     omp.atomic.read %v = %x : !llvm.ptr<i32>
   }
 
-  // CHECK: %[[res:.*]] = atomicrmw and i32* %[[x]], i32 %[[expr]] monotonic
+  // CHECK: %[[res:.*]] = atomicrmw and ptr %[[x]], i32 %[[expr]] monotonic
   // CHECK-NEXT: %[[newval:.*]] = and i32 %[[res]], %[[expr]]
-  // CHECK: store i32 %[[newval]], i32* %[[v]]
+  // CHECK: store i32 %[[newval]], ptr %[[v]]
   omp.atomic.capture {
     omp.atomic.update %x : !llvm.ptr<i32> {
     ^bb0(%xval: i32):
@@ -1382,9 +1382,9 @@ llvm.func @omp_atomic_capture_prefix_update(
     omp.atomic.read %v = %x : !llvm.ptr<i32>
   }
 
-  // CHECK: %[[res:.*]] = atomicrmw or i32* %[[x]], i32 %[[expr]] monotonic
+  // CHECK: %[[res:.*]] = atomicrmw or ptr %[[x]], i32 %[[expr]] monotonic
   // CHECK-NEXT: %[[newval:.*]] = or i32 %[[res]], %[[expr]]
-  // CHECK: store i32 %[[newval]], i32* %[[v]]
+  // CHECK: store i32 %[[newval]], ptr %[[v]]
   omp.atomic.capture {
     omp.atomic.update %x : !llvm.ptr<i32> {
     ^bb0(%xval: i32):
@@ -1394,9 +1394,9 @@ llvm.func @omp_atomic_capture_prefix_update(
     omp.atomic.read %v = %x : !llvm.ptr<i32>
   }
 
-  // CHECK: %[[res:.*]] = atomicrmw xor i32* %[[x]], i32 %[[expr]] monotonic
+  // CHECK: %[[res:.*]] = atomicrmw xor ptr %[[x]], i32 %[[expr]] monotonic
   // CHECK-NEXT: %[[newval:.*]] = xor i32 %[[res]], %[[expr]]
-  // CHECK: store i32 %[[newval]], i32* %[[v]]
+  // CHECK: store i32 %[[newval]], ptr %[[v]]
   omp.atomic.capture {
     omp.atomic.update %x : !llvm.ptr<i32> {
     ^bb0(%xval: i32):
@@ -1408,10 +1408,10 @@ llvm.func @omp_atomic_capture_prefix_update(
 
   // CHECK: %[[xval:.*]] = phi i32
   // CHECK-NEXT: %[[newval:.*]] = mul i32 %[[xval]], %[[expr]]
-  // CHECK-NEXT: store i32 %[[newval]], i32* %{{.*}}
-  // CHECK-NEXT: %[[newval_:.*]] = load i32, i32* %{{.*}}
-  // CHECK-NEXT: %{{.*}} = cmpxchg i32* %[[x]], i32 %[[xval]], i32 %[[newval_]] monotonic monotonic
-  // CHECK: store i32 %[[newval]], i32* %[[v]]
+  // CHECK-NEXT: store i32 %[[newval]], ptr %{{.*}}
+  // CHECK-NEXT: %[[newval_:.*]] = load i32, ptr %{{.*}}
+  // CHECK-NEXT: %{{.*}} = cmpxchg ptr %[[x]], i32 %[[xval]], i32 %[[newval_]] monotonic monotonic
+  // CHECK: store i32 %[[newval]], ptr %[[v]]
   omp.atomic.capture {
     omp.atomic.update %x : !llvm.ptr<i32> {
     ^bb0(%xval: i32):
@@ -1423,10 +1423,10 @@ llvm.func @omp_atomic_capture_prefix_update(
 
   // CHECK: %[[xval:.*]] = phi i32
   // CHECK-NEXT: %[[newval:.*]] = sdiv i32 %[[xval]], %[[expr]]
-  // CHECK-NEXT: store i32 %[[newval]], i32* %{{.*}}
-  // CHECK-NEXT: %[[newval_:.*]] = load i32, i32* %{{.*}}
-  // CHECK-NEXT: %{{.*}} = cmpxchg i32* %[[x]], i32 %[[xval]], i32 %[[newval_]] monotonic monotonic
-  // CHECK: store i32 %[[newval]], i32* %[[v]]
+  // CHECK-NEXT: store i32 %[[newval]], ptr %{{.*}}
+  // CHECK-NEXT: %[[newval_:.*]] = load i32, ptr %{{.*}}
+  // CHECK-NEXT: %{{.*}} = cmpxchg ptr %[[x]], i32 %[[xval]], i32 %[[newval_]] monotonic monotonic
+  // CHECK: store i32 %[[newval]], ptr %[[v]]
   omp.atomic.capture {
     omp.atomic.update %x : !llvm.ptr<i32> {
     ^bb0(%xval: i32):
@@ -1438,10 +1438,10 @@ llvm.func @omp_atomic_capture_prefix_update(
 
   // CHECK: %[[xval:.*]] = phi i32
   // CHECK-NEXT: %[[newval:.*]] = udiv i32 %[[xval]], %[[expr]]
-  // CHECK-NEXT: store i32 %[[newval]], i32* %{{.*}}
-  // CHECK-NEXT: %[[newval_:.*]] = load i32, i32* %{{.*}}
-  // CHECK-NEXT: %{{.*}} = cmpxchg i32* %[[x]], i32 %[[xval]], i32 %[[newval_]] monotonic monotonic
-  // CHECK: store i32 %[[newval]], i32* %[[v]]
+  // CHECK-NEXT: store i32 %[[newval]], ptr %{{.*}}
+  // CHECK-NEXT: %[[newval_:.*]] = load i32, ptr %{{.*}}
+  // CHECK-NEXT: %{{.*}} = cmpxchg ptr %[[x]], i32 %[[xval]], i32 %[[newval_]] monotonic monotonic
+  // CHECK: store i32 %[[newval]], ptr %[[v]]
   omp.atomic.capture {
     omp.atomic.update %x : !llvm.ptr<i32> {
     ^bb0(%xval: i32):
@@ -1453,10 +1453,10 @@ llvm.func @omp_atomic_capture_prefix_update(
 
   // CHECK: %[[xval:.*]] = phi i32
   // CHECK-NEXT: %[[newval:.*]] = shl i32 %[[xval]], %[[expr]]
-  // CHECK-NEXT: store i32 %[[newval]], i32* %{{.*}}
-  // CHECK-NEXT: %[[newval_:.*]] = load i32, i32* %{{.*}}
-  // CHECK-NEXT: %{{.*}} = cmpxchg i32* %[[x]], i32 %[[xval]], i32 %[[newval_]] monotonic monotonic
-  // CHECK: store i32 %[[newval]], i32* %[[v]]
+  // CHECK-NEXT: store i32 %[[newval]], ptr %{{.*}}
+  // CHECK-NEXT: %[[newval_:.*]] = load i32, ptr %{{.*}}
+  // CHECK-NEXT: %{{.*}} = cmpxchg ptr %[[x]], i32 %[[xval]], i32 %[[newval_]] monotonic monotonic
+  // CHECK: store i32 %[[newval]], ptr %[[v]]
   omp.atomic.capture {
     omp.atomic.update %x : !llvm.ptr<i32> {
     ^bb0(%xval: i32):
@@ -1468,10 +1468,10 @@ llvm.func @omp_atomic_capture_prefix_update(
 
   // CHECK: %[[xval:.*]] = phi i32
   // CHECK-NEXT: %[[newval:.*]] = lshr i32 %[[xval]], %[[expr]]
-  // CHECK-NEXT: store i32 %[[newval]], i32* %{{.*}}
-  // CHECK-NEXT: %[[newval_:.*]] = load i32, i32* %{{.*}}
-  // CHECK-NEXT: %{{.*}} = cmpxchg i32* %[[x]], i32 %[[xval]], i32 %[[newval_]] monotonic monotonic
-  // CHECK: store i32 %[[newval]], i32* %[[v]]
+  // CHECK-NEXT: store i32 %[[newval]], ptr %{{.*}}
+  // CHECK-NEXT: %[[newval_:.*]] = load i32, ptr %{{.*}}
+  // CHECK-NEXT: %{{.*}} = cmpxchg ptr %[[x]], i32 %[[xval]], i32 %[[newval_]] monotonic monotonic
+  // CHECK: store i32 %[[newval]], ptr %[[v]]
   omp.atomic.capture {
     omp.atomic.update %x : !llvm.ptr<i32> {
     ^bb0(%xval: i32):
@@ -1483,10 +1483,10 @@ llvm.func @omp_atomic_capture_prefix_update(
 
   // CHECK: %[[xval:.*]] = phi i32
   // CHECK-NEXT: %[[newval:.*]] = ashr i32 %[[xval]], %[[expr]]
-  // CHECK-NEXT: store i32 %[[newval]], i32* %{{.*}}
-  // CHECK-NEXT: %[[newval_:.*]] = load i32, i32* %{{.*}}
-  // CHECK-NEXT: %{{.*}} = cmpxchg i32* %[[x]], i32 %[[xval]], i32 %[[newval_]] monotonic monotonic
-  // CHECK: store i32 %[[newval]], i32* %[[v]]
+  // CHECK-NEXT: store i32 %[[newval]], ptr %{{.*}}
+  // CHECK-NEXT: %[[newval_:.*]] = load i32, ptr %{{.*}}
+  // CHECK-NEXT: %{{.*}} = cmpxchg ptr %[[x]], i32 %[[xval]], i32 %[[newval_]] monotonic monotonic
+  // CHECK: store i32 %[[newval]], ptr %[[v]]
   omp.atomic.capture {
     omp.atomic.update %x : !llvm.ptr<i32> {
     ^bb0(%xval: i32):
@@ -1498,10 +1498,10 @@ llvm.func @omp_atomic_capture_prefix_update(
 
   // CHECK: %[[xval:.*]] = phi i32
   // CHECK-NEXT: %[[newval:.*]] = call i32 @llvm.smax.i32(i32 %[[xval]], i32 %[[expr]])
-  // CHECK-NEXT: store i32 %[[newval]], i32* %{{.*}}
-  // CHECK-NEXT: %[[newval_:.*]] = load i32, i32* %{{.*}}
-  // CHECK-NEXT: %{{.*}} = cmpxchg i32* %[[x]], i32 %[[xval]], i32 %[[newval_]] monotonic monotonic
-  // CHECK: store i32 %[[newval]], i32* %[[v]]
+  // CHECK-NEXT: store i32 %[[newval]], ptr %{{.*}}
+  // CHECK-NEXT: %[[newval_:.*]] = load i32, ptr %{{.*}}
+  // CHECK-NEXT: %{{.*}} = cmpxchg ptr %[[x]], i32 %[[xval]], i32 %[[newval_]] monotonic monotonic
+  // CHECK: store i32 %[[newval]], ptr %[[v]]
   omp.atomic.capture {
     omp.atomic.update %x : !llvm.ptr<i32> {
     ^bb0(%xval: i32):
@@ -1513,10 +1513,10 @@ llvm.func @omp_atomic_capture_prefix_update(
 
   // CHECK: %[[xval:.*]] = phi i32
   // CHECK-NEXT: %[[newval:.*]] = call i32 @llvm.smin.i32(i32 %[[xval]], i32 %[[expr]])
-  // CHECK-NEXT: store i32 %[[newval]], i32* %{{.*}}
-  // CHECK-NEXT: %[[newval_:.*]] = load i32, i32* %{{.*}}
-  // CHECK-NEXT: %{{.*}} = cmpxchg i32* %[[x]], i32 %[[xval]], i32 %[[newval_]] monotonic monotonic
-  // CHECK: store i32 %[[newval]], i32* %[[v]]
+  // CHECK-NEXT: store i32 %[[newval]], ptr %{{.*}}
+  // CHECK-NEXT: %[[newval_:.*]] = load i32, ptr %{{.*}}
+  // CHECK-NEXT: %{{.*}} = cmpxchg ptr %[[x]], i32 %[[xval]], i32 %[[newval_]] monotonic monotonic
+  // CHECK: store i32 %[[newval]], ptr %[[v]]
   omp.atomic.capture {
     omp.atomic.update %x : !llvm.ptr<i32> {
     ^bb0(%xval: i32):
@@ -1528,10 +1528,10 @@ llvm.func @omp_atomic_capture_prefix_update(
 
   // CHECK: %[[xval:.*]] = phi i32
   // CHECK-NEXT: %[[newval:.*]] = call i32 @llvm.umax.i32(i32 %[[xval]], i32 %[[expr]])
-  // CHECK-NEXT: store i32 %[[newval]], i32* %{{.*}}
-  // CHECK-NEXT: %[[newval_:.*]] = load i32, i32* %{{.*}}
-  // CHECK-NEXT: %{{.*}} = cmpxchg i32* %[[x]], i32 %[[xval]], i32 %[[newval_]] monotonic monotonic
-  // CHECK: store i32 %[[newval]], i32* %[[v]]
+  // CHECK-NEXT: store i32 %[[newval]], ptr %{{.*}}
+  // CHECK-NEXT: %[[newval_:.*]] = load i32, ptr %{{.*}}
+  // CHECK-NEXT: %{{.*}} = cmpxchg ptr %[[x]], i32 %[[xval]], i32 %[[newval_]] monotonic monotonic
+  // CHECK: store i32 %[[newval]], ptr %[[v]]
   omp.atomic.capture {
     omp.atomic.update %x : !llvm.ptr<i32> {
     ^bb0(%xval: i32):
@@ -1543,10 +1543,10 @@ llvm.func @omp_atomic_capture_prefix_update(
 
   // CHECK: %[[xval:.*]] = phi i32
   // CHECK-NEXT: %[[newval:.*]] = call i32 @llvm.umin.i32(i32 %[[xval]], i32 %[[expr]])
-  // CHECK-NEXT: store i32 %[[newval]], i32* %{{.*}}
-  // CHECK-NEXT: %[[newval_:.*]] = load i32, i32* %{{.*}}
-  // CHECK-NEXT: %{{.*}} = cmpxchg i32* %[[x]], i32 %[[xval]], i32 %[[newval_]] monotonic monotonic
-  // CHECK: store i32 %[[newval]], i32* %[[v]]
+  // CHECK-NEXT: store i32 %[[newval]], ptr %{{.*}}
+  // CHECK-NEXT: %[[newval_:.*]] = load i32, ptr %{{.*}}
+  // CHECK-NEXT: %{{.*}} = cmpxchg ptr %[[x]], i32 %[[xval]], i32 %[[newval_]] monotonic monotonic
+  // CHECK: store i32 %[[newval]], ptr %[[v]]
   omp.atomic.capture {
     omp.atomic.update %x : !llvm.ptr<i32> {
     ^bb0(%xval: i32):
@@ -1558,11 +1558,10 @@ llvm.func @omp_atomic_capture_prefix_update(
 
   // CHECK: %[[xval:.*]] = phi i32
   // CHECK: %[[newval:.*]] = fadd float %{{.*}}, %[[exprf]]
-  // CHECK: store float %[[newval]], float* %{{.*}}
-  // CHECK: %[[newval_:.*]] = load i32, i32* %{{.*}}
-  // CHECK: %[[xf_bitcast:.*]] = bitcast float* %[[xf]] to i32*
-  // CHECK: %{{.*}} = cmpxchg i32* %[[xf_bitcast]], i32 %[[xval]], i32 %[[newval_]] monotonic monotonic
-  // CHECK: store float %[[newval]], float* %[[vf]]
+  // CHECK: store float %[[newval]], ptr %{{.*}}
+  // CHECK: %[[newval_:.*]] = load i32, ptr %{{.*}}
+  // CHECK: %{{.*}} = cmpxchg ptr %[[xf]], i32 %[[xval]], i32 %[[newval_]] monotonic monotonic
+  // CHECK: store float %[[newval]], ptr %[[vf]]
   omp.atomic.capture {
     omp.atomic.update %xf : !llvm.ptr<f32> {
     ^bb0(%xval: f32):
@@ -1574,11 +1573,10 @@ llvm.func @omp_atomic_capture_prefix_update(
 
   // CHECK: %[[xval:.*]] = phi i32
   // CHECK: %[[newval:.*]] = fsub float %{{.*}}, %[[exprf]]
-  // CHECK: store float %[[newval]], float* %{{.*}}
-  // CHECK: %[[newval_:.*]] = load i32, i32* %{{.*}}
-  // CHECK: %[[xf_bitcast:.*]] = bitcast float* %[[xf]] to i32*
-  // CHECK: %{{.*}} = cmpxchg i32* %[[xf_bitcast]], i32 %[[xval]], i32 %[[newval_]] monotonic monotonic
-  // CHECK: store float %[[newval]], float* %[[vf]]
+  // CHECK: store float %[[newval]], ptr %{{.*}}
+  // CHECK: %[[newval_:.*]] = load i32, ptr %{{.*}}
+  // CHECK: %{{.*}} = cmpxchg ptr %[[xf]], i32 %[[xval]], i32 %[[newval_]] monotonic monotonic
+  // CHECK: store float %[[newval]], ptr %[[vf]]
   omp.atomic.capture {
     omp.atomic.update %xf : !llvm.ptr<f32> {
     ^bb0(%xval: f32):
@@ -1594,12 +1592,12 @@ llvm.func @omp_atomic_capture_prefix_update(
 // -----
 
 // CHECK-LABEL: @omp_atomic_capture_postfix_update
-// CHECK-SAME: (i32* %[[x:.*]], i32* %[[v:.*]], i32 %[[expr:.*]], float* %[[xf:.*]], float* %[[vf:.*]], float %[[exprf:.*]])
+// CHECK-SAME: (ptr %[[x:.*]], ptr %[[v:.*]], i32 %[[expr:.*]], ptr %[[xf:.*]], ptr %[[vf:.*]], float %[[exprf:.*]])
 llvm.func @omp_atomic_capture_postfix_update(
   %x: !llvm.ptr<i32>, %v: !llvm.ptr<i32>, %expr: i32,
   %xf: !llvm.ptr<f32>, %vf: !llvm.ptr<f32>, %exprf: f32) -> () {
-  // CHECK: %[[res:.*]] = atomicrmw add i32* %[[x]], i32 %[[expr]] monotonic
-  // CHECK: store i32 %[[res]], i32* %[[v]]
+  // CHECK: %[[res:.*]] = atomicrmw add ptr %[[x]], i32 %[[expr]] monotonic
+  // CHECK: store i32 %[[res]], ptr %[[v]]
   omp.atomic.capture {
     omp.atomic.read %v = %x : !llvm.ptr<i32>
     omp.atomic.update %x : !llvm.ptr<i32> {
@@ -1609,8 +1607,8 @@ llvm.func @omp_atomic_capture_postfix_update(
     }
   }
 
-  // CHECK: %[[res:.*]] = atomicrmw sub i32* %[[x]], i32 %[[expr]] monotonic
-  // CHECK: store i32 %[[res]], i32* %[[v]]
+  // CHECK: %[[res:.*]] = atomicrmw sub ptr %[[x]], i32 %[[expr]] monotonic
+  // CHECK: store i32 %[[res]], ptr %[[v]]
   omp.atomic.capture {
     omp.atomic.read %v = %x : !llvm.ptr<i32>
     omp.atomic.update %x : !llvm.ptr<i32> {
@@ -1620,8 +1618,8 @@ llvm.func @omp_atomic_capture_postfix_update(
     }
   }
 
-  // CHECK: %[[res:.*]] = atomicrmw and i32* %[[x]], i32 %[[expr]] monotonic
-  // CHECK: store i32 %[[res]], i32* %[[v]]
+  // CHECK: %[[res:.*]] = atomicrmw and ptr %[[x]], i32 %[[expr]] monotonic
+  // CHECK: store i32 %[[res]], ptr %[[v]]
   omp.atomic.capture {
     omp.atomic.read %v = %x : !llvm.ptr<i32>
     omp.atomic.update %x : !llvm.ptr<i32> {
@@ -1631,8 +1629,8 @@ llvm.func @omp_atomic_capture_postfix_update(
     }
   }
 
-  // CHECK: %[[res:.*]] = atomicrmw or i32* %[[x]], i32 %[[expr]] monotonic
-  // CHECK: store i32 %[[res]], i32* %[[v]]
+  // CHECK: %[[res:.*]] = atomicrmw or ptr %[[x]], i32 %[[expr]] monotonic
+  // CHECK: store i32 %[[res]], ptr %[[v]]
   omp.atomic.capture {
     omp.atomic.read %v = %x : !llvm.ptr<i32>
     omp.atomic.update %x : !llvm.ptr<i32> {
@@ -1642,8 +1640,8 @@ llvm.func @omp_atomic_capture_postfix_update(
     }
   }
 
-  // CHECK: %[[res:.*]] = atomicrmw xor i32* %[[x]], i32 %[[expr]] monotonic
-  // CHECK: store i32 %[[res]], i32* %[[v]]
+  // CHECK: %[[res:.*]] = atomicrmw xor ptr %[[x]], i32 %[[expr]] monotonic
+  // CHECK: store i32 %[[res]], ptr %[[v]]
   omp.atomic.capture {
     omp.atomic.read %v = %x : !llvm.ptr<i32>
     omp.atomic.update %x : !llvm.ptr<i32> {
@@ -1655,10 +1653,10 @@ llvm.func @omp_atomic_capture_postfix_update(
 
   // CHECK: %[[xval:.*]] = phi i32
   // CHECK-NEXT: %[[newval:.*]] = mul i32 %[[xval]], %[[expr]]
-  // CHECK-NEXT: store i32 %[[newval]], i32* %{{.*}}
-  // CHECK-NEXT: %[[newval_:.*]] = load i32, i32* %{{.*}}
-  // CHECK-NEXT: %{{.*}} = cmpxchg i32* %[[x]], i32 %[[xval]], i32 %[[newval_]] monotonic monotonic
-  // CHECK: store i32 %[[xval]], i32* %[[v]]
+  // CHECK-NEXT: store i32 %[[newval]], ptr %{{.*}}
+  // CHECK-NEXT: %[[newval_:.*]] = load i32, ptr %{{.*}}
+  // CHECK-NEXT: %{{.*}} = cmpxchg ptr %[[x]], i32 %[[xval]], i32 %[[newval_]] monotonic monotonic
+  // CHECK: store i32 %[[xval]], ptr %[[v]]
   omp.atomic.capture {
     omp.atomic.read %v = %x : !llvm.ptr<i32>
     omp.atomic.update %x : !llvm.ptr<i32> {
@@ -1670,10 +1668,10 @@ llvm.func @omp_atomic_capture_postfix_update(
 
   // CHECK: %[[xval:.*]] = phi i32
   // CHECK-NEXT: %[[newval:.*]] = sdiv i32 %[[xval]], %[[expr]]
-  // CHECK-NEXT: store i32 %[[newval]], i32* %{{.*}}
-  // CHECK-NEXT: %[[newval_:.*]] = load i32, i32* %{{.*}}
-  // CHECK-NEXT: %{{.*}} = cmpxchg i32* %[[x]], i32 %[[xval]], i32 %[[newval_]] monotonic monotonic
-  // CHECK: store i32 %[[xval]], i32* %[[v]]
+  // CHECK-NEXT: store i32 %[[newval]], ptr %{{.*}}
+  // CHECK-NEXT: %[[newval_:.*]] = load i32, ptr %{{.*}}
+  // CHECK-NEXT: %{{.*}} = cmpxchg ptr %[[x]], i32 %[[xval]], i32 %[[newval_]] monotonic monotonic
+  // CHECK: store i32 %[[xval]], ptr %[[v]]
   omp.atomic.capture {
     omp.atomic.read %v = %x : !llvm.ptr<i32>
     omp.atomic.update %x : !llvm.ptr<i32> {
@@ -1685,10 +1683,10 @@ llvm.func @omp_atomic_capture_postfix_update(
 
   // CHECK: %[[xval:.*]] = phi i32
   // CHECK-NEXT: %[[newval:.*]] = udiv i32 %[[xval]], %[[expr]]
-  // CHECK-NEXT: store i32 %[[newval]], i32* %{{.*}}
-  // CHECK-NEXT: %[[newval_:.*]] = load i32, i32* %{{.*}}
-  // CHECK-NEXT: %{{.*}} = cmpxchg i32* %[[x]], i32 %[[xval]], i32 %[[newval_]] monotonic monotonic
-  // CHECK: store i32 %[[xval]], i32* %[[v]]
+  // CHECK-NEXT: store i32 %[[newval]], ptr %{{.*}}
+  // CHECK-NEXT: %[[newval_:.*]] = load i32, ptr %{{.*}}
+  // CHECK-NEXT: %{{.*}} = cmpxchg ptr %[[x]], i32 %[[xval]], i32 %[[newval_]] monotonic monotonic
+  // CHECK: store i32 %[[xval]], ptr %[[v]]
   omp.atomic.capture {
     omp.atomic.read %v = %x : !llvm.ptr<i32>
     omp.atomic.update %x : !llvm.ptr<i32> {
@@ -1700,10 +1698,10 @@ llvm.func @omp_atomic_capture_postfix_update(
 
   // CHECK: %[[xval:.*]] = phi i32
   // CHECK-NEXT: %[[newval:.*]] = shl i32 %[[xval]], %[[expr]]
-  // CHECK-NEXT: store i32 %[[newval]], i32* %{{.*}}
-  // CHECK-NEXT: %[[newval_:.*]] = load i32, i32* %{{.*}}
-  // CHECK-NEXT: %{{.*}} = cmpxchg i32* %[[x]], i32 %[[xval]], i32 %[[newval_]] monotonic monotonic
-  // CHECK: store i32 %[[xval]], i32* %[[v]]
+  // CHECK-NEXT: store i32 %[[newval]], ptr %{{.*}}
+  // CHECK-NEXT: %[[newval_:.*]] = load i32, ptr %{{.*}}
+  // CHECK-NEXT: %{{.*}} = cmpxchg ptr %[[x]], i32 %[[xval]], i32 %[[newval_]] monotonic monotonic
+  // CHECK: store i32 %[[xval]], ptr %[[v]]
   omp.atomic.capture {
     omp.atomic.read %v = %x : !llvm.ptr<i32>
     omp.atomic.update %x : !llvm.ptr<i32> {
@@ -1715,10 +1713,10 @@ llvm.func @omp_atomic_capture_postfix_update(
 
   // CHECK: %[[xval:.*]] = phi i32
   // CHECK-NEXT: %[[newval:.*]] = lshr i32 %[[xval]], %[[expr]]
-  // CHECK-NEXT: store i32 %[[newval]], i32* %{{.*}}
-  // CHECK-NEXT: %[[newval_:.*]] = load i32, i32* %{{.*}}
-  // CHECK-NEXT: %{{.*}} = cmpxchg i32* %[[x]], i32 %[[xval]], i32 %[[newval_]] monotonic monotonic
-  // CHECK: store i32 %[[xval]], i32* %[[v]]
+  // CHECK-NEXT: store i32 %[[newval]], ptr %{{.*}}
+  // CHECK-NEXT: %[[newval_:.*]] = load i32, ptr %{{.*}}
+  // CHECK-NEXT: %{{.*}} = cmpxchg ptr %[[x]], i32 %[[xval]], i32 %[[newval_]] monotonic monotonic
+  // CHECK: store i32 %[[xval]], ptr %[[v]]
   omp.atomic.capture {
     omp.atomic.read %v = %x : !llvm.ptr<i32>
     omp.atomic.update %x : !llvm.ptr<i32> {
@@ -1730,10 +1728,10 @@ llvm.func @omp_atomic_capture_postfix_update(
 
   // CHECK: %[[xval:.*]] = phi i32
   // CHECK-NEXT: %[[newval:.*]] = ashr i32 %[[xval]], %[[expr]]
-  // CHECK-NEXT: store i32 %[[newval]], i32* %{{.*}}
-  // CHECK-NEXT: %[[newval_:.*]] = load i32, i32* %{{.*}}
-  // CHECK-NEXT: %{{.*}} = cmpxchg i32* %[[x]], i32 %[[xval]], i32 %[[newval_]] monotonic monotonic
-  // CHECK: store i32 %[[xval]], i32* %[[v]]
+  // CHECK-NEXT: store i32 %[[newval]], ptr %{{.*}}
+  // CHECK-NEXT: %[[newval_:.*]] = load i32, ptr %{{.*}}
+  // CHECK-NEXT: %{{.*}} = cmpxchg ptr %[[x]], i32 %[[xval]], i32 %[[newval_]] monotonic monotonic
+  // CHECK: store i32 %[[xval]], ptr %[[v]]
   omp.atomic.capture {
     omp.atomic.read %v = %x : !llvm.ptr<i32>
     omp.atomic.update %x : !llvm.ptr<i32> {
@@ -1745,10 +1743,10 @@ llvm.func @omp_atomic_capture_postfix_update(
 
   // CHECK: %[[xval:.*]] = phi i32
   // CHECK-NEXT: %[[newval:.*]] = call i32 @llvm.smax.i32(i32 %[[xval]], i32 %[[expr]])
-  // CHECK-NEXT: store i32 %[[newval]], i32* %{{.*}}
-  // CHECK-NEXT: %[[newval_:.*]] = load i32, i32* %{{.*}}
-  // CHECK-NEXT: %{{.*}} = cmpxchg i32* %[[x]], i32 %[[xval]], i32 %[[newval_]] monotonic monotonic
-  // CHECK: store i32 %[[xval]], i32* %[[v]]
+  // CHECK-NEXT: store i32 %[[newval]], ptr %{{.*}}
+  // CHECK-NEXT: %[[newval_:.*]] = load i32, ptr %{{.*}}
+  // CHECK-NEXT: %{{.*}} = cmpxchg ptr %[[x]], i32 %[[xval]], i32 %[[newval_]] monotonic monotonic
+  // CHECK: store i32 %[[xval]], ptr %[[v]]
   omp.atomic.capture {
     omp.atomic.read %v = %x : !llvm.ptr<i32>
     omp.atomic.update %x : !llvm.ptr<i32> {
@@ -1760,10 +1758,10 @@ llvm.func @omp_atomic_capture_postfix_update(
 
   // CHECK: %[[xval:.*]] = phi i32
   // CHECK-NEXT: %[[newval:.*]] = call i32 @llvm.smin.i32(i32 %[[xval]], i32 %[[expr]])
-  // CHECK-NEXT: store i32 %[[newval]], i32* %{{.*}}
-  // CHECK-NEXT: %[[newval_:.*]] = load i32, i32* %{{.*}}
-  // CHECK-NEXT: %{{.*}} = cmpxchg i32* %[[x]], i32 %[[xval]], i32 %[[newval_]] monotonic monotonic
-  // CHECK: store i32 %[[xval]], i32* %[[v]]
+  // CHECK-NEXT: store i32 %[[newval]], ptr %{{.*}}
+  // CHECK-NEXT: %[[newval_:.*]] = load i32, ptr %{{.*}}
+  // CHECK-NEXT: %{{.*}} = cmpxchg ptr %[[x]], i32 %[[xval]], i32 %[[newval_]] monotonic monotonic
+  // CHECK: store i32 %[[xval]], ptr %[[v]]
   omp.atomic.capture {
     omp.atomic.read %v = %x : !llvm.ptr<i32>
     omp.atomic.update %x : !llvm.ptr<i32> {
@@ -1775,10 +1773,10 @@ llvm.func @omp_atomic_capture_postfix_update(
 
   // CHECK: %[[xval:.*]] = phi i32
   // CHECK-NEXT: %[[newval:.*]] = call i32 @llvm.umax.i32(i32 %[[xval]], i32 %[[expr]])
-  // CHECK-NEXT: store i32 %[[newval]], i32* %{{.*}}
-  // CHECK-NEXT: %[[newval_:.*]] = load i32, i32* %{{.*}}
-  // CHECK-NEXT: %{{.*}} = cmpxchg i32* %[[x]], i32 %[[xval]], i32 %[[newval_]] monotonic monotonic
-  // CHECK: store i32 %[[xval]], i32* %[[v]]
+  // CHECK-NEXT: store i32 %[[newval]], ptr %{{.*}}
+  // CHECK-NEXT: %[[newval_:.*]] = load i32, ptr %{{.*}}
+  // CHECK-NEXT: %{{.*}} = cmpxchg ptr %[[x]], i32 %[[xval]], i32 %[[newval_]] monotonic monotonic
+  // CHECK: store i32 %[[xval]], ptr %[[v]]
   omp.atomic.capture {
     omp.atomic.read %v = %x : !llvm.ptr<i32>
     omp.atomic.update %x : !llvm.ptr<i32> {
@@ -1790,10 +1788,10 @@ llvm.func @omp_atomic_capture_postfix_update(
 
   // CHECK: %[[xval:.*]] = phi i32
   // CHECK-NEXT: %[[newval:.*]] = call i32 @llvm.umin.i32(i32 %[[xval]], i32 %[[expr]])
-  // CHECK-NEXT: store i32 %[[newval]], i32* %{{.*}}
-  // CHECK-NEXT: %[[newval_:.*]] = load i32, i32* %{{.*}}
-  // CHECK-NEXT: %{{.*}} = cmpxchg i32* %[[x]], i32 %[[xval]], i32 %[[newval_]] monotonic monotonic
-  // CHECK: store i32 %[[xval]], i32* %[[v]]
+  // CHECK-NEXT: store i32 %[[newval]], ptr %{{.*}}
+  // CHECK-NEXT: %[[newval_:.*]] = load i32, ptr %{{.*}}
+  // CHECK-NEXT: %{{.*}} = cmpxchg ptr %[[x]], i32 %[[xval]], i32 %[[newval_]] monotonic monotonic
+  // CHECK: store i32 %[[xval]], ptr %[[v]]
   omp.atomic.capture {
     omp.atomic.read %v = %x : !llvm.ptr<i32>
     omp.atomic.update %x : !llvm.ptr<i32> {
@@ -1806,11 +1804,10 @@ llvm.func @omp_atomic_capture_postfix_update(
   // CHECK: %[[xval:.*]] = phi i32
   // CHECK: %[[xvalf:.*]] = bitcast i32 %[[xval]] to float
   // CHECK: %[[newval:.*]] = fadd float %{{.*}}, %[[exprf]]
-  // CHECK: store float %[[newval]], float* %{{.*}}
-  // CHECK: %[[newval_:.*]] = load i32, i32* %{{.*}}
-  // CHECK: %[[xf_bitcast:.*]] = bitcast float* %[[xf]] to i32*
-  // CHECK: %{{.*}} = cmpxchg i32* %[[xf_bitcast]], i32 %[[xval]], i32 %[[newval_]] monotonic monotonic
-  // CHECK: store float %[[xvalf]], float* %[[vf]]
+  // CHECK: store float %[[newval]], ptr %{{.*}}
+  // CHECK: %[[newval_:.*]] = load i32, ptr %{{.*}}
+  // CHECK: %{{.*}} = cmpxchg ptr %[[xf]], i32 %[[xval]], i32 %[[newval_]] monotonic monotonic
+  // CHECK: store float %[[xvalf]], ptr %[[vf]]
   omp.atomic.capture {
     omp.atomic.read %vf = %xf : !llvm.ptr<f32>
     omp.atomic.update %xf : !llvm.ptr<f32> {
@@ -1823,11 +1820,10 @@ llvm.func @omp_atomic_capture_postfix_update(
   // CHECK: %[[xval:.*]] = phi i32
   // CHECK: %[[xvalf:.*]] = bitcast i32 %[[xval]] to float
   // CHECK: %[[newval:.*]] = fsub float %{{.*}}, %[[exprf]]
-  // CHECK: store float %[[newval]], float* %{{.*}}
-  // CHECK: %[[newval_:.*]] = load i32, i32* %{{.*}}
-  // CHECK: %[[xf_bitcast:.*]] = bitcast float* %[[xf]] to i32*
-  // CHECK: %{{.*}} = cmpxchg i32* %[[xf_bitcast]], i32 %[[xval]], i32 %[[newval_]] monotonic monotonic
-  // CHECK: store float %[[xvalf]], float* %[[vf]]
+  // CHECK: store float %[[newval]], ptr %{{.*}}
+  // CHECK: %[[newval_:.*]] = load i32, ptr %{{.*}}
+  // CHECK: %{{.*}} = cmpxchg ptr %[[xf]], i32 %[[xval]], i32 %[[newval_]] monotonic monotonic
+  // CHECK: store float %[[xvalf]], ptr %[[vf]]
   omp.atomic.capture {
     omp.atomic.read %vf = %xf : !llvm.ptr<f32>
     omp.atomic.update %xf : !llvm.ptr<f32> {
@@ -1842,12 +1838,12 @@ llvm.func @omp_atomic_capture_postfix_update(
 
 // -----
 // CHECK-LABEL: @omp_atomic_capture_misc
-// CHECK-SAME: (i32* %[[x:.*]], i32* %[[v:.*]], i32 %[[expr:.*]], float* %[[xf:.*]], float* %[[vf:.*]], float %[[exprf:.*]])
+// CHECK-SAME: (ptr %[[x:.*]], ptr %[[v:.*]], i32 %[[expr:.*]], ptr %[[xf:.*]], ptr %[[vf:.*]], float %[[exprf:.*]])
 llvm.func @omp_atomic_capture_misc(
   %x: !llvm.ptr<i32>, %v: !llvm.ptr<i32>, %expr: i32,
   %xf: !llvm.ptr<f32>, %vf: !llvm.ptr<f32>, %exprf: f32) -> () {
-  // CHECK: %[[xval:.*]] = atomicrmw xchg i32* %[[x]], i32 %[[expr]] monotonic
-  // CHECK: store i32 %[[xval]], i32* %[[v]]
+  // CHECK: %[[xval:.*]] = atomicrmw xchg ptr %[[x]], i32 %[[expr]] monotonic
+  // CHECK: store i32 %[[xval]], ptr %[[v]]
   omp.atomic.capture{
     omp.atomic.read %v = %x : !llvm.ptr<i32>
     omp.atomic.write %x = %expr : !llvm.ptr<i32>, i32
@@ -1855,18 +1851,17 @@ llvm.func @omp_atomic_capture_misc(
 
   // CHECK: %[[xval:.*]] = phi i32
   // CHECK: %[[xvalf:.*]] = bitcast i32 %[[xval]] to float
-  // CHECK: store float %[[exprf]], float* %{{.*}}
-  // CHECK: %[[newval_:.*]] = load i32, i32* %{{.*}}
-  // CHECK: %[[xf_bitcast:.*]] = bitcast float* %[[xf]] to i32*
-  // CHECK: %{{.*}} = cmpxchg i32* %[[xf_bitcast]], i32 %[[xval]], i32 %[[newval_]] monotonic monotonic
-  // CHECK: store float %[[xvalf]], float* %[[vf]]
+  // CHECK: store float %[[exprf]], ptr %{{.*}}
+  // CHECK: %[[newval_:.*]] = load i32, ptr %{{.*}}
+  // CHECK: %{{.*}} = cmpxchg ptr %[[xf]], i32 %[[xval]], i32 %[[newval_]] monotonic monotonic
+  // CHECK: store float %[[xvalf]], ptr %[[vf]]
   omp.atomic.capture{
     omp.atomic.read %vf = %xf : !llvm.ptr<f32>
     omp.atomic.write %xf = %exprf : !llvm.ptr<f32>, f32
   }
 
-  // CHECK: %[[res:.*]] = atomicrmw add i32* %[[x]], i32 %[[expr]] seq_cst
-  // CHECK: store i32 %[[res]], i32* %[[v]]
+  // CHECK: %[[res:.*]] = atomicrmw add ptr %[[x]], i32 %[[expr]] seq_cst
+  // CHECK: store i32 %[[res]], ptr %[[v]]
   omp.atomic.capture memory_order(seq_cst) {
     omp.atomic.read %v = %x : !llvm.ptr<i32>
     omp.atomic.update %x : !llvm.ptr<i32> {
@@ -1876,8 +1871,8 @@ llvm.func @omp_atomic_capture_misc(
     }
   }
 
-  // CHECK: %[[res:.*]] = atomicrmw add i32* %[[x]], i32 %[[expr]] acquire
-  // CHECK: store i32 %[[res]], i32* %[[v]]
+  // CHECK: %[[res:.*]] = atomicrmw add ptr %[[x]], i32 %[[expr]] acquire
+  // CHECK: store i32 %[[res]], ptr %[[v]]
   omp.atomic.capture memory_order(acquire) {
     omp.atomic.read %v = %x : !llvm.ptr<i32>
     omp.atomic.update %x : !llvm.ptr<i32> {
@@ -1887,8 +1882,8 @@ llvm.func @omp_atomic_capture_misc(
     }
   }
 
-  // CHECK: %[[res:.*]] = atomicrmw add i32* %[[x]], i32 %[[expr]] release
-  // CHECK: store i32 %[[res]], i32* %[[v]]
+  // CHECK: %[[res:.*]] = atomicrmw add ptr %[[x]], i32 %[[expr]] release
+  // CHECK: store i32 %[[res]], ptr %[[v]]
   omp.atomic.capture memory_order(release) {
     omp.atomic.read %v = %x : !llvm.ptr<i32>
     omp.atomic.update %x : !llvm.ptr<i32> {
@@ -1898,8 +1893,8 @@ llvm.func @omp_atomic_capture_misc(
     }
   }
 
-  // CHECK: %[[res:.*]] = atomicrmw add i32* %[[x]], i32 %[[expr]] monotonic
-  // CHECK: store i32 %[[res]], i32* %[[v]]
+  // CHECK: %[[res:.*]] = atomicrmw add ptr %[[x]], i32 %[[expr]] monotonic
+  // CHECK: store i32 %[[res]], ptr %[[v]]
   omp.atomic.capture memory_order(relaxed) {
     omp.atomic.read %v = %x : !llvm.ptr<i32>
     omp.atomic.update %x : !llvm.ptr<i32> {
@@ -1909,8 +1904,8 @@ llvm.func @omp_atomic_capture_misc(
     }
   }
 
-  // CHECK: %[[res:.*]] = atomicrmw add i32* %[[x]], i32 %[[expr]] acq_rel
-  // CHECK: store i32 %[[res]], i32* %[[v]]
+  // CHECK: %[[res:.*]] = atomicrmw add ptr %[[x]], i32 %[[expr]] acq_rel
+  // CHECK: store i32 %[[res]], ptr %[[v]]
   omp.atomic.capture memory_order(acq_rel) {
     omp.atomic.read %v = %x : !llvm.ptr<i32>
     omp.atomic.update %x : !llvm.ptr<i32> {
@@ -2037,7 +2032,7 @@ llvm.func @omp_sections(%arg0 : i32, %arg1 : i32, %arg2 : !llvm.ptr<i32>) -> ()
       // CHECK: [[REGION3]]:
       // CHECK:   %11 = add i32 %{{.*}}, %{{.*}}
       %add = llvm.add %arg0, %arg1 : i32
-      // CHECK:   store i32 %{{.*}}, i32* %{{.*}}, align 4
+      // CHECK:   store i32 %{{.*}}, ptr %{{.*}}, align 4
       // CHECK:   br label %{{.*}}
       llvm.store %add, %arg2 : !llvm.ptr<i32>
       omp.terminator
@@ -2087,17 +2082,17 @@ llvm.func @repeated_successor(%arg0: i64, %arg1: i64, %arg2: i64, %arg3: i1) {
 // -----
 
 // CHECK-LABEL: @single
-// CHECK-SAME: (i32 %[[x:.*]], i32 %[[y:.*]], i32* %[[zaddr:.*]])
+// CHECK-SAME: (i32 %[[x:.*]], i32 %[[y:.*]], ptr %[[zaddr:.*]])
 llvm.func @single(%x: i32, %y: i32, %zaddr: !llvm.ptr<i32>) {
   // CHECK: %[[a:.*]] = sub i32 %[[x]], %[[y]]
   %a = llvm.sub %x, %y : i32
-  // CHECK: store i32 %[[a]], i32* %[[zaddr]]
+  // CHECK: store i32 %[[a]], ptr %[[zaddr]]
   llvm.store %a, %zaddr : !llvm.ptr<i32>
   // CHECK: call i32 @__kmpc_single
   omp.single {
     // CHECK: %[[z:.*]] = add i32 %[[x]], %[[y]]
     %z = llvm.add %x, %y : i32
-    // CHECK: store i32 %[[z]], i32* %[[zaddr]]
+    // CHECK: store i32 %[[z]], ptr %[[zaddr]]
     llvm.store %z, %zaddr : !llvm.ptr<i32>
     // CHECK: call void @__kmpc_end_single
     // CHECK: call void @__kmpc_barrier
@@ -2105,7 +2100,7 @@ llvm.func @single(%x: i32, %y: i32, %zaddr: !llvm.ptr<i32>) {
   }
   // CHECK: %[[b:.*]] = mul i32 %[[x]], %[[y]]
   %b = llvm.mul %x, %y : i32
-  // CHECK: store i32 %[[b]], i32* %[[zaddr]]
+  // CHECK: store i32 %[[b]], ptr %[[zaddr]]
   llvm.store %b, %zaddr : !llvm.ptr<i32>
   // CHECK: ret void
   llvm.return
@@ -2114,17 +2109,17 @@ llvm.func @single(%x: i32, %y: i32, %zaddr: !llvm.ptr<i32>) {
 // -----
 
 // CHECK-LABEL: @single_nowait
-// CHECK-SAME: (i32 %[[x:.*]], i32 %[[y:.*]], i32* %[[zaddr:.*]])
+// CHECK-SAME: (i32 %[[x:.*]], i32 %[[y:.*]], ptr %[[zaddr:.*]])
 llvm.func @single_nowait(%x: i32, %y: i32, %zaddr: !llvm.ptr<i32>) {
   // CHECK: %[[a:.*]] = sub i32 %[[x]], %[[y]]
   %a = llvm.sub %x, %y : i32
-  // CHECK: store i32 %[[a]], i32* %[[zaddr]]
+  // CHECK: store i32 %[[a]], ptr %[[zaddr]]
   llvm.store %a, %zaddr : !llvm.ptr<i32>
   // CHECK: call i32 @__kmpc_single
   omp.single nowait {
     // CHECK: %[[z:.*]] = add i32 %[[x]], %[[y]]
     %z = llvm.add %x, %y : i32
-    // CHECK: store i32 %[[z]], i32* %[[zaddr]]
+    // CHECK: store i32 %[[z]], ptr %[[zaddr]]
     llvm.store %z, %zaddr : !llvm.ptr<i32>
     // CHECK: call void @__kmpc_end_single
     // CHECK-NOT: call void @__kmpc_barrier
@@ -2132,7 +2127,7 @@ llvm.func @single_nowait(%x: i32, %y: i32, %zaddr: !llvm.ptr<i32>) {
   }
   // CHECK: %[[t:.*]] = mul i32 %[[x]], %[[y]]
   %t = llvm.mul %x, %y : i32
-  // CHECK: store i32 %[[t]], i32* %[[zaddr]]
+  // CHECK: store i32 %[[t]], ptr %[[zaddr]]
   llvm.store %t, %zaddr : !llvm.ptr<i32>
   // CHECK: ret void
   llvm.return
@@ -2141,21 +2136,19 @@ llvm.func @single_nowait(%x: i32, %y: i32, %zaddr: !llvm.ptr<i32>) {
 // -----
 
 // CHECK: @_QFsubEx = internal global i32 undef
-// CHECK: @_QFsubEx.cache = common global i8** null
+// CHECK: @_QFsubEx.cache = common global ptr null
 
 // CHECK-LABEL: @omp_threadprivate
 llvm.func @omp_threadprivate() {
-// CHECK:  [[THREAD:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB:[0-9]+]])
-// CHECK:  [[TMP1:%.*]] = call i8* @__kmpc_threadprivate_cached(%struct.ident_t* @[[GLOB]], i32 [[THREAD]], i8* bitcast (i32* @_QFsubEx to i8*), i64 4, i8*** @_QFsubEx.cache)
-// CHECK:  [[TMP2:%.*]] = bitcast i8* [[TMP1]] to i32*
-// CHECK:  store i32 1, i32* [[TMP2]], align 4
-// CHECK:  store i32 3, i32* [[TMP2]], align 4
+// CHECK:  [[THREAD:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB:[0-9]+]])
+// CHECK:  [[TMP1:%.*]] = call ptr @__kmpc_threadprivate_cached(ptr @[[GLOB]], i32 [[THREAD]], ptr @_QFsubEx, i64 4, ptr @_QFsubEx.cache)
+// CHECK:  store i32 1, ptr [[TMP1]], align 4
+// CHECK:  store i32 3, ptr [[TMP1]], align 4
 
 // CHECK-LABEL: omp.par.region{{.*}}
-// CHECK:  [[THREAD2:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB2:[0-9]+]])
-// CHECK:  [[TMP3:%.*]] = call i8* @__kmpc_threadprivate_cached(%struct.ident_t* @[[GLOB2]], i32 [[THREAD2]], i8* bitcast (i32* @_QFsubEx to i8*), i64 4, i8*** @_QFsubEx.cache)
-// CHECK:  [[TMP4:%.*]] = bitcast i8* [[TMP3]] to i32*
-// CHECK:  store i32 2, i32* [[TMP4]], align 4
+// CHECK:  [[THREAD2:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB2:[0-9]+]])
+// CHECK:  [[TMP3:%.*]] = call ptr @__kmpc_threadprivate_cached(ptr @[[GLOB2]], i32 [[THREAD2]], ptr @_QFsubEx, i64 4, ptr @_QFsubEx.cache)
+// CHECK:  store i32 2, ptr [[TMP3]], align 4
 
   %0 = llvm.mlir.constant(1 : i32) : i32
   %1 = llvm.mlir.constant(2 : i32) : i32

diff  --git a/mlir/test/Target/LLVMIR/openmp-nested.mlir b/mlir/test/Target/LLVMIR/openmp-nested.mlir
index 414c70663b503..3c4c0770dc270 100644
--- a/mlir/test/Target/LLVMIR/openmp-nested.mlir
+++ b/mlir/test/Target/LLVMIR/openmp-nested.mlir
@@ -34,8 +34,8 @@ module {
 
 }
 
-// CHECK: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @1, i32 0, void (i32*, i32*, ...)* bitcast (void (i32*, i32*)* @[[inner1:.+]] to void (i32*, i32*, ...)*))
+// CHECK: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @1, i32 0, ptr @[[inner1:.+]])
 
 // CHECK: define internal void @[[inner1]]
-// CHECK: %[[structArg:.+]] = alloca { i64* }
-// CHECK: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @3, i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, { i64* }*)* @[[inner2:.+]] to void (i32*, i32*, ...)*), { i64* }* %[[structArg]])
+// CHECK: %[[structArg:.+]] = alloca { ptr }
+// CHECK: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @3, i32 1, ptr @[[inner2:.+]], ptr %[[structArg]])

diff  --git a/mlir/test/Target/LLVMIR/openmp-reduction.mlir b/mlir/test/Target/LLVMIR/openmp-reduction.mlir
index 3dfd1dc84bab1..2f70e2fbfbf2e 100644
--- a/mlir/test/Target/LLVMIR/openmp-reduction.mlir
+++ b/mlir/test/Target/LLVMIR/openmp-reduction.mlir
@@ -46,15 +46,15 @@ llvm.func @simple_reduction(%lb : i64, %ub : i64, %step : i64) {
 
 // Private reduction variable and its initialization.
 // CHECK: %[[PRIVATE:.+]] = alloca float
-// CHECK: store float 0.000000e+00, float* %[[PRIVATE]]
+// CHECK: store float 0.000000e+00, ptr %[[PRIVATE]]
 
 // Call to the reduction function.
 // CHECK: call i32 @__kmpc_reduce
 // CHECK-SAME: @[[REDFUNC:[A-Za-z_.][A-Za-z0-9_.]*]]
 
 // Atomic reduction.
-// CHECK: %[[PARTIAL:.+]] = load float, float* %[[PRIVATE]]
-// CHECK: atomicrmw fadd float* %{{.*}}, float %[[PARTIAL]]
+// CHECK: %[[PARTIAL:.+]] = load float, ptr %[[PRIVATE]]
+// CHECK: atomicrmw fadd ptr %{{.*}}, float %[[PARTIAL]]
 
 // Non-atomic reduction:
 // CHECK: fadd float
@@ -66,9 +66,9 @@ llvm.func @simple_reduction(%lb : i64, %ub : i64, %step : i64) {
 
 // Update of the private variable using the reduction region
 // (the body block currently comes after all the other blocks).
-// CHECK: %[[PARTIAL:.+]] = load float, float* %[[PRIVATE]]
+// CHECK: %[[PARTIAL:.+]] = load float, ptr %[[PRIVATE]]
 // CHECK: %[[UPDATED:.+]] = fadd float %[[PARTIAL]], 2.000000e+00
-// CHECK: store float %[[UPDATED]], float* %[[PRIVATE]]
+// CHECK: store float %[[UPDATED]], ptr %[[PRIVATE]]
 
 // Reduction function.
 // CHECK: define internal void @[[REDFUNC]]
@@ -125,18 +125,18 @@ llvm.func @reuse_declaration(%lb : i64, %ub : i64, %step : i64) {
 // Private reduction variable and its initialization.
 // CHECK: %[[PRIVATE1:.+]] = alloca float
 // CHECK: %[[PRIVATE2:.+]] = alloca float
-// CHECK: store float 0.000000e+00, float* %[[PRIVATE1]]
-// CHECK: store float 0.000000e+00, float* %[[PRIVATE2]]
+// CHECK: store float 0.000000e+00, ptr %[[PRIVATE1]]
+// CHECK: store float 0.000000e+00, ptr %[[PRIVATE2]]
 
 // Call to the reduction function.
 // CHECK: call i32 @__kmpc_reduce
 // CHECK-SAME: @[[REDFUNC:[A-Za-z_.][A-Za-z0-9_.]*]]
 
 // Atomic reduction.
-// CHECK: %[[PARTIAL1:.+]] = load float, float* %[[PRIVATE1]]
-// CHECK: atomicrmw fadd float* %{{.*}}, float %[[PARTIAL1]]
-// CHECK: %[[PARTIAL2:.+]] = load float, float* %[[PRIVATE2]]
-// CHECK: atomicrmw fadd float* %{{.*}}, float %[[PARTIAL2]]
+// CHECK: %[[PARTIAL1:.+]] = load float, ptr %[[PRIVATE1]]
+// CHECK: atomicrmw fadd ptr %{{.*}}, float %[[PARTIAL1]]
+// CHECK: %[[PARTIAL2:.+]] = load float, ptr %[[PRIVATE2]]
+// CHECK: atomicrmw fadd ptr %{{.*}}, float %[[PARTIAL2]]
 
 // Non-atomic reduction:
 // CHECK: fadd float
@@ -149,12 +149,12 @@ llvm.func @reuse_declaration(%lb : i64, %ub : i64, %step : i64) {
 
 // Update of the private variable using the reduction region
 // (the body block currently comes after all the other blocks).
-// CHECK: %[[PARTIAL1:.+]] = load float, float* %[[PRIVATE1]]
+// CHECK: %[[PARTIAL1:.+]] = load float, ptr %[[PRIVATE1]]
 // CHECK: %[[UPDATED1:.+]] = fadd float %[[PARTIAL1]], 2.000000e+00
-// CHECK: store float %[[UPDATED1]], float* %[[PRIVATE1]]
-// CHECK: %[[PARTIAL2:.+]] = load float, float* %[[PRIVATE2]]
+// CHECK: store float %[[UPDATED1]], ptr %[[PRIVATE1]]
+// CHECK: %[[PARTIAL2:.+]] = load float, ptr %[[PRIVATE2]]
 // CHECK: %[[UPDATED2:.+]] = fadd float %[[PARTIAL2]], 2.000000e+00
-// CHECK: store float %[[UPDATED2]], float* %[[PRIVATE2]]
+// CHECK: store float %[[UPDATED2]], ptr %[[PRIVATE2]]
 
 // Reduction function.
 // CHECK: define internal void @[[REDFUNC]]
@@ -210,18 +210,18 @@ llvm.func @missing_omp_reduction(%lb : i64, %ub : i64, %step : i64) {
 // Private reduction variable and its initialization.
 // CHECK: %[[PRIVATE1:.+]] = alloca float
 // CHECK: %[[PRIVATE2:.+]] = alloca float
-// CHECK: store float 0.000000e+00, float* %[[PRIVATE1]]
-// CHECK: store float 0.000000e+00, float* %[[PRIVATE2]]
+// CHECK: store float 0.000000e+00, ptr %[[PRIVATE1]]
+// CHECK: store float 0.000000e+00, ptr %[[PRIVATE2]]
 
 // Call to the reduction function.
 // CHECK: call i32 @__kmpc_reduce
 // CHECK-SAME: @[[REDFUNC:[A-Za-z_.][A-Za-z0-9_.]*]]
 
 // Atomic reduction.
-// CHECK: %[[PARTIAL1:.+]] = load float, float* %[[PRIVATE1]]
-// CHECK: atomicrmw fadd float* %{{.*}}, float %[[PARTIAL1]]
-// CHECK: %[[PARTIAL2:.+]] = load float, float* %[[PRIVATE2]]
-// CHECK: atomicrmw fadd float* %{{.*}}, float %[[PARTIAL2]]
+// CHECK: %[[PARTIAL1:.+]] = load float, ptr %[[PRIVATE1]]
+// CHECK: atomicrmw fadd ptr %{{.*}}, float %[[PARTIAL1]]
+// CHECK: %[[PARTIAL2:.+]] = load float, ptr %[[PRIVATE2]]
+// CHECK: atomicrmw fadd ptr %{{.*}}, float %[[PARTIAL2]]
 
 // Non-atomic reduction:
 // CHECK: fadd float
@@ -234,10 +234,10 @@ llvm.func @missing_omp_reduction(%lb : i64, %ub : i64, %step : i64) {
 
 // Update of the private variable using the reduction region
 // (the body block currently comes after all the other blocks).
-// CHECK: %[[PARTIAL1:.+]] = load float, float* %[[PRIVATE1]]
+// CHECK: %[[PARTIAL1:.+]] = load float, ptr %[[PRIVATE1]]
 // CHECK: %[[UPDATED1:.+]] = fadd float %[[PARTIAL1]], 2.000000e+00
-// CHECK: store float %[[UPDATED1]], float* %[[PRIVATE1]]
-// CHECK-NOT: %{{.*}} = load float, float* %[[PRIVATE2]]
+// CHECK: store float %[[UPDATED1]], ptr %[[PRIVATE1]]
+// CHECK-NOT: %{{.*}} = load float, ptr %[[PRIVATE2]]
 // CHECK-NOT: %{{.*}} = fadd float %[[PARTIAL2]], 2.000000e+00
 
 // Reduction function.
@@ -293,15 +293,15 @@ llvm.func @double_reference(%lb : i64, %ub : i64, %step : i64) {
 
 // Private reduction variable and its initialization.
 // CHECK: %[[PRIVATE:.+]] = alloca float
-// CHECK: store float 0.000000e+00, float* %[[PRIVATE]]
+// CHECK: store float 0.000000e+00, ptr %[[PRIVATE]]
 
 // Call to the reduction function.
 // CHECK: call i32 @__kmpc_reduce
 // CHECK-SAME: @[[REDFUNC:[A-Za-z_.][A-Za-z0-9_.]*]]
 
 // Atomic reduction.
-// CHECK: %[[PARTIAL:.+]] = load float, float* %[[PRIVATE]]
-// CHECK: atomicrmw fadd float* %{{.*}}, float %[[PARTIAL]]
+// CHECK: %[[PARTIAL:.+]] = load float, ptr %[[PRIVATE]]
+// CHECK: atomicrmw fadd ptr %{{.*}}, float %[[PARTIAL]]
 
 // Non-atomic reduction:
 // CHECK: fadd float
@@ -313,12 +313,12 @@ llvm.func @double_reference(%lb : i64, %ub : i64, %step : i64) {
 
 // Update of the private variable using the reduction region
 // (the body block currently comes after all the other blocks).
-// CHECK: %[[PARTIAL:.+]] = load float, float* %[[PRIVATE]]
+// CHECK: %[[PARTIAL:.+]] = load float, ptr %[[PRIVATE]]
 // CHECK: %[[UPDATED:.+]] = fadd float %[[PARTIAL]], 2.000000e+00
-// CHECK: store float %[[UPDATED]], float* %[[PRIVATE]]
-// CHECK: %[[PARTIAL:.+]] = load float, float* %[[PRIVATE]]
+// CHECK: store float %[[UPDATED]], ptr %[[PRIVATE]]
+// CHECK: %[[PARTIAL:.+]] = load float, ptr %[[PRIVATE]]
 // CHECK: %[[UPDATED:.+]] = fadd float %[[PARTIAL]], 2.000000e+00
-// CHECK: store float %[[UPDATED]], float* %[[PRIVATE]]
+// CHECK: store float %[[UPDATED]], ptr %[[PRIVATE]]
 
 // Reduction function.
 // CHECK: define internal void @[[REDFUNC]]
@@ -384,8 +384,8 @@ llvm.func @no_atomic(%lb : i64, %ub : i64, %step : i64) {
 // Private reduction variable and its initialization.
 // CHECK: %[[PRIVATE1:.+]] = alloca float
 // CHECK: %[[PRIVATE2:.+]] = alloca float
-// CHECK: store float 0.000000e+00, float* %[[PRIVATE1]]
-// CHECK: store float 1.000000e+00, float* %[[PRIVATE2]]
+// CHECK: store float 0.000000e+00, ptr %[[PRIVATE1]]
+// CHECK: store float 1.000000e+00, ptr %[[PRIVATE2]]
 
 // Call to the reduction function.
 // CHECK: call i32 @__kmpc_reduce
@@ -405,12 +405,12 @@ llvm.func @no_atomic(%lb : i64, %ub : i64, %step : i64) {
 
 // Update of the private variable using the reduction region
 // (the body block currently comes after all the other blocks).
-// CHECK: %[[PARTIAL1:.+]] = load float, float* %[[PRIVATE1]]
+// CHECK: %[[PARTIAL1:.+]] = load float, ptr %[[PRIVATE1]]
 // CHECK: %[[UPDATED1:.+]] = fadd float %[[PARTIAL1]], 2.000000e+00
-// CHECK: store float %[[UPDATED1]], float* %[[PRIVATE1]]
-// CHECK: %[[PARTIAL2:.+]] = load float, float* %[[PRIVATE2]]
+// CHECK: store float %[[UPDATED1]], ptr %[[PRIVATE1]]
+// CHECK: %[[PARTIAL2:.+]] = load float, ptr %[[PRIVATE2]]
 // CHECK: %[[UPDATED2:.+]] = fmul float %[[PARTIAL2]], 2.000000e+00
-// CHECK: store float %[[UPDATED2]], float* %[[PRIVATE2]]
+// CHECK: store float %[[UPDATED2]], ptr %[[PRIVATE2]]
 
 // Reduction function.
 // CHECK: define internal void @[[REDFUNC]]

diff  --git a/polly/test/CodeGen/non-affine-exit-node-dominance.ll b/polly/test/CodeGen/non-affine-exit-node-dominance.ll
index 077dd2de50076..af19d2420e3e6 100644
--- a/polly/test/CodeGen/non-affine-exit-node-dominance.ll
+++ b/polly/test/CodeGen/non-affine-exit-node-dominance.ll
@@ -11,7 +11,7 @@
 ; CHECK:         %p_escaping = select i1 undef, i32 undef, i32 undef
 ;
 ; CHECK-LABEL: polly.stmt.polly.merge_new_and_old.exit:
-; CHECK:         store i32 %p_escaping, i32* %escaping.s2a
+; CHECK:         store i32 %p_escaping, ptr %escaping.s2a
 
 define i32 @func() {
 entry:

diff  --git a/polly/test/CodeGen/non-affine-region-implicit-store.ll b/polly/test/CodeGen/non-affine-region-implicit-store.ll
index e31b872a7b818..e89197e24852c 100644
--- a/polly/test/CodeGen/non-affine-region-implicit-store.ll
+++ b/polly/test/CodeGen/non-affine-region-implicit-store.ll
@@ -10,10 +10,10 @@
 ; This checks that the stored value is indeed from the generated code.
 ;
 ; CHECK-LABEL: polly.stmt.do.body.entry:
-; CHECK:        a.phiops.reload = load i32, i32* %a.phiops
+; CHECK:        a.phiops.reload = load i32, ptr %a.phiops
 ;
 ; CHECK-LABEL: polly.stmt.polly.merge_new_and_old.exit:
-; CHECK:         store i32 %polly.a, i32* %a.s2a
+; CHECK:         store i32 %polly.a, ptr %a.s2a
 
 define void @func() {
 entry:

diff  --git a/polly/test/CodeGen/out-of-scop-phi-node-use.ll b/polly/test/CodeGen/out-of-scop-phi-node-use.ll
index 5f08bbc98837a..54e909ecf3782 100644
--- a/polly/test/CodeGen/out-of-scop-phi-node-use.ll
+++ b/polly/test/CodeGen/out-of-scop-phi-node-use.ll
@@ -10,7 +10,7 @@ target triple = "x86_64-unknown-linux-gnu"
 ; CHECK-NEXT:  %_s.sroa.343.0.ph5161118 = phi i32 [ undef, %for.cond ], [ %_s.sroa.343.0.ph5161118.ph.merge, %polly.merge_new_and_old ]
 
 ; CHECK-LABEL: polly.exiting:
-; CHECK-NEXT: %_s.sroa.343.0.ph5161118.ph.final_reload = load i32, i32* %_s.sroa.343.0.ph5161118.s2a
+; CHECK-NEXT: %_s.sroa.343.0.ph5161118.ph.final_reload = load i32, ptr %_s.sroa.343.0.ph5161118.s2a
 
 ; Function Attrs: nounwind uwtable
 define void @lzmaDecode() #0 {

diff  --git a/polly/test/CodeGen/synthesizable_phi_write_after_loop.ll b/polly/test/CodeGen/synthesizable_phi_write_after_loop.ll
index 9cd72a82c62bc..6a8d3b94d1cc2 100644
--- a/polly/test/CodeGen/synthesizable_phi_write_after_loop.ll
+++ b/polly/test/CodeGen/synthesizable_phi_write_after_loop.ll
@@ -11,7 +11,7 @@
 ; The first is currently generated by Polly and tested here.
 
 ; CHECK:      polly.stmt.next:
-; CHECK-NEXT:   store i32 2, i32* %phi.phiops
+; CHECK-NEXT:   store i32 2, ptr %phi.phiops
 ; CHECK-NEXT:   br label %polly.stmt.join
 
 define i32 @func() {

diff  --git a/polly/test/ScopInfo/out-of-scop-use-in-region-entry-phi-node-nonaffine-subregion.ll b/polly/test/ScopInfo/out-of-scop-use-in-region-entry-phi-node-nonaffine-subregion.ll
index 8bd073daf9411..394173bdc986a 100644
--- a/polly/test/ScopInfo/out-of-scop-use-in-region-entry-phi-node-nonaffine-subregion.ll
+++ b/polly/test/ScopInfo/out-of-scop-use-in-region-entry-phi-node-nonaffine-subregion.ll
@@ -10,17 +10,17 @@
 ; CHECK:         %newval.merge = phi float [ %newval.final_reload, %polly.exiting ], [ %newval, %subregion_exit.region_exiting ]
 ;
 ; CHECK-LABEL: polly.start:
-; CHECK:         store float %loop_carried.ph, float* %loop_carried.phiops
+; CHECK:         store float %loop_carried.ph, ptr %loop_carried.phiops
 ;
 ; CHECK-LABEL: polly.stmt.subregion_entry.entry:
-; CHECK:         %loop_carried.phiops.reload = load float, float* %loop_carried.phiops
+; CHECK:         %loop_carried.phiops.reload = load float, ptr %loop_carried.phiops
 ;
 ; CHECK-LABEL: polly.stmt.subregion_entry:
 ; CHECK:         %polly.loop_carried = phi float [ %loop_carried.phiops.reload, %polly.stmt.subregion_entry.entry ]
 ; CHECK:         %p_newval = fadd float %polly.loop_carried, 1.000000e+00
 ;
 ; CHECK-LABEL: polly.stmt.polly.merge_new_and_old.exit:
-; CHECK:         %newval.final_reload = load float, float* %newval.s2a
+; CHECK:         %newval.final_reload = load float, ptr %newval.s2a
 
 define void @func() {
 entry:


        


More information about the llvm-commits mailing list