[llvm] 1aee1e1 - [Analysis] Convert tests to opaque pointers (NFC)

Nikita Popov via llvm-commits llvm-commits at lists.llvm.org
Mon Feb 5 03:04:47 PST 2024


Author: Nikita Popov
Date: 2024-02-05T12:04:39+01:00
New Revision: 1aee1e1f4c4b504becc06521546de992a662694b

URL: https://github.com/llvm/llvm-project/commit/1aee1e1f4c4b504becc06521546de992a662694b
DIFF: https://github.com/llvm/llvm-project/commit/1aee1e1f4c4b504becc06521546de992a662694b.diff

LOG: [Analysis] Convert tests to opaque pointers (NFC)

Added: 
    

Modified: 
    llvm/test/Analysis/BasicAA/assume-index-positive.ll
    llvm/test/Analysis/BasicAA/index-size.ll
    llvm/test/Analysis/BasicAA/noalias-bugs.ll
    llvm/test/Analysis/BasicAA/vscale.ll
    llvm/test/Analysis/BlockFrequencyInfo/basic.ll
    llvm/test/Analysis/BlockFrequencyInfo/irreducible_loop_crash.ll
    llvm/test/Analysis/BlockFrequencyInfo/irreducible_pgo.ll
    llvm/test/Analysis/BlockFrequencyInfo/loop_with_invoke.ll
    llvm/test/Analysis/BlockFrequencyInfo/loops_with_profile_info.ll
    llvm/test/Analysis/CostModel/X86/interleaved-load-f32-stride-2.ll
    llvm/test/Analysis/CostModel/X86/interleaved-load-f32-stride-3.ll
    llvm/test/Analysis/CostModel/X86/interleaved-load-f32-stride-4.ll
    llvm/test/Analysis/CostModel/X86/interleaved-load-f32-stride-5.ll
    llvm/test/Analysis/CostModel/X86/interleaved-load-f32-stride-6.ll
    llvm/test/Analysis/CostModel/X86/interleaved-load-f32-stride-7.ll
    llvm/test/Analysis/CostModel/X86/interleaved-load-f32-stride-8.ll
    llvm/test/Analysis/CostModel/X86/interleaved-load-f64-stride-2.ll
    llvm/test/Analysis/CostModel/X86/interleaved-load-f64-stride-3.ll
    llvm/test/Analysis/CostModel/X86/interleaved-load-f64-stride-4.ll
    llvm/test/Analysis/CostModel/X86/interleaved-load-f64-stride-5.ll
    llvm/test/Analysis/CostModel/X86/interleaved-load-f64-stride-6.ll
    llvm/test/Analysis/CostModel/X86/interleaved-load-f64-stride-7.ll
    llvm/test/Analysis/CostModel/X86/interleaved-load-f64-stride-8.ll
    llvm/test/Analysis/CostModel/X86/interleaved-load-i16-stride-2.ll
    llvm/test/Analysis/CostModel/X86/interleaved-load-i16-stride-3.ll
    llvm/test/Analysis/CostModel/X86/interleaved-load-i16-stride-4.ll
    llvm/test/Analysis/CostModel/X86/interleaved-load-i16-stride-5.ll
    llvm/test/Analysis/CostModel/X86/interleaved-load-i16-stride-6.ll
    llvm/test/Analysis/CostModel/X86/interleaved-load-i16-stride-7.ll
    llvm/test/Analysis/CostModel/X86/interleaved-load-i16-stride-8.ll
    llvm/test/Analysis/CostModel/X86/interleaved-load-i32-stride-2.ll
    llvm/test/Analysis/CostModel/X86/interleaved-load-i32-stride-3.ll
    llvm/test/Analysis/CostModel/X86/interleaved-load-i32-stride-4.ll
    llvm/test/Analysis/CostModel/X86/interleaved-load-i32-stride-5.ll
    llvm/test/Analysis/CostModel/X86/interleaved-load-i32-stride-6.ll
    llvm/test/Analysis/CostModel/X86/interleaved-load-i32-stride-7.ll
    llvm/test/Analysis/CostModel/X86/interleaved-load-i32-stride-8.ll
    llvm/test/Analysis/CostModel/X86/interleaved-load-i64-stride-2.ll
    llvm/test/Analysis/CostModel/X86/interleaved-load-i64-stride-3.ll
    llvm/test/Analysis/CostModel/X86/interleaved-load-i64-stride-4.ll
    llvm/test/Analysis/CostModel/X86/interleaved-load-i64-stride-5.ll
    llvm/test/Analysis/CostModel/X86/interleaved-load-i64-stride-6.ll
    llvm/test/Analysis/CostModel/X86/interleaved-load-i64-stride-7.ll
    llvm/test/Analysis/CostModel/X86/interleaved-load-i64-stride-8.ll
    llvm/test/Analysis/CostModel/X86/interleaved-load-i8-stride-2.ll
    llvm/test/Analysis/CostModel/X86/interleaved-load-i8-stride-3.ll
    llvm/test/Analysis/CostModel/X86/interleaved-load-i8-stride-4.ll
    llvm/test/Analysis/CostModel/X86/interleaved-load-i8-stride-5.ll
    llvm/test/Analysis/CostModel/X86/interleaved-load-i8-stride-6.ll
    llvm/test/Analysis/CostModel/X86/interleaved-load-i8-stride-7.ll
    llvm/test/Analysis/CostModel/X86/interleaved-load-i8-stride-8.ll
    llvm/test/Analysis/Dominators/2007-01-14-BreakCritEdges.ll
    llvm/test/Analysis/Dominators/2007-07-12-SplitBlock.ll
    llvm/test/Analysis/Dominators/invoke.ll
    llvm/test/Analysis/FunctionPropertiesAnalysis/matmul.ll
    llvm/test/Analysis/IVUsers/deep_recursion_in_scev.ll
    llvm/test/Analysis/LazyCallGraph/non-leaf-intrinsics.ll
    llvm/test/Analysis/LazyValueAnalysis/invalidation.ll
    llvm/test/Analysis/LoopAccessAnalysis/forked-pointers.ll
    llvm/test/Analysis/LoopAccessAnalysis/underlying-objects-2.ll
    llvm/test/Analysis/LoopCacheAnalysis/PowerPC/LoopnestFixedSize.ll
    llvm/test/Analysis/LoopCacheAnalysis/PowerPC/compute-cost-m32.ll
    llvm/test/Analysis/LoopCacheAnalysis/PowerPC/compute-cost.ll
    llvm/test/Analysis/LoopCacheAnalysis/PowerPC/loads-store.ll
    llvm/test/Analysis/LoopCacheAnalysis/PowerPC/matmul.ll
    llvm/test/Analysis/LoopCacheAnalysis/PowerPC/matvecmul.ll
    llvm/test/Analysis/LoopCacheAnalysis/PowerPC/single-store.ll
    llvm/test/Analysis/LoopCacheAnalysis/PowerPC/stencil.ll
    llvm/test/Analysis/LoopCacheAnalysis/compute-cost.ll
    llvm/test/Analysis/LoopInfo/annotated-parallel-complex.ll
    llvm/test/Analysis/LoopInfo/annotated-parallel-simple.ll
    llvm/test/Analysis/LoopNestAnalysis/duplicate-successors.ll
    llvm/test/Analysis/LoopNestAnalysis/imperfectnest.ll
    llvm/test/Analysis/LoopNestAnalysis/infinite.ll
    llvm/test/Analysis/LoopNestAnalysis/perfectnest.ll
    llvm/test/Analysis/UniformityAnalysis/AMDGPU/MIR/always-uniform-gmir.mir
    llvm/test/Analysis/UniformityAnalysis/AMDGPU/MIR/hidden-diverge-gmir.mir
    llvm/test/Analysis/UniformityAnalysis/AMDGPU/atomics.ll

Removed: 
    


################################################################################
diff  --git a/llvm/test/Analysis/BasicAA/assume-index-positive.ll b/llvm/test/Analysis/BasicAA/assume-index-positive.ll
index 42014dfe02642..3402886a06f00 100644
--- a/llvm/test/Analysis/BasicAA/assume-index-positive.ll
+++ b/llvm/test/Analysis/BasicAA/assume-index-positive.ll
@@ -105,9 +105,9 @@ define void @shl_of_non_negative(ptr %ptr, i64 %a) {
   %ptr.a = getelementptr i8, ptr %ptr, i64 %a
   %shl = shl i64 %a, 1
   %ptr.shl = getelementptr i8, ptr %ptr, i64 %shl
-  load i8, i8* %ptr.a
-  load i8, i8* %ptr.neg
-  load i8, i8* %ptr.shl
+  load i8, ptr %ptr.a
+  load i8, ptr %ptr.neg
+  load i8, ptr %ptr.shl
   ret void
 }
 

diff  --git a/llvm/test/Analysis/BasicAA/index-size.ll b/llvm/test/Analysis/BasicAA/index-size.ll
index 61d85baf00213..4286c8d9d0aac 100644
--- a/llvm/test/Analysis/BasicAA/index-size.ll
+++ b/llvm/test/Analysis/BasicAA/index-size.ll
@@ -13,7 +13,7 @@ define void @mustalias_due_to_index_size(ptr %ptr) {
   load i8, ptr %ptr
   %gep.1 = getelementptr i8, ptr %ptr, i64 4294967296
   store i8 0, ptr %gep.1
-  %gep.2 = getelementptr i8, i8* %ptr, i64 0
+  %gep.2 = getelementptr i8, ptr %ptr, i64 0
   store i8 1, ptr %gep.2
   ret void
 }

diff  --git a/llvm/test/Analysis/BasicAA/noalias-bugs.ll b/llvm/test/Analysis/BasicAA/noalias-bugs.ll
index 1361cf7d18286..4bed4e2cf45c5 100644
--- a/llvm/test/Analysis/BasicAA/noalias-bugs.ll
+++ b/llvm/test/Analysis/BasicAA/noalias-bugs.ll
@@ -10,14 +10,14 @@ target triple = "x86_64-unknown-linux-gnu"
 %nested = type { %nested.i64 }
 %nested.i64 = type { i64 }
 
-define i64 @testcase(%nested * noalias %p1, %nested * noalias %p2,
+define i64 @testcase(ptr noalias %p1, ptr noalias %p2,
                      i32 %a, i32 %b) {
-  %ptr = getelementptr inbounds %nested, %nested* %p1, i64 -1, i32 0
-  %ptr.64 = getelementptr inbounds %nested.i64, %nested.i64* %ptr, i64 0, i32 0
-  %ptr2= getelementptr inbounds %nested, %nested* %p2, i64 0, i32 0
+  %ptr = getelementptr inbounds %nested, ptr %p1, i64 -1, i32 0
+  %ptr.64 = getelementptr inbounds %nested.i64, ptr %ptr, i64 0, i32 0
+  %ptr2= getelementptr inbounds %nested, ptr %p2, i64 0, i32 0
   %cmp = icmp ult i32 %a, %b
-  %either_ptr = select i1 %cmp, %nested.i64* %ptr2, %nested.i64* %ptr
-  %either_ptr.64 = getelementptr inbounds %nested.i64, %nested.i64* %either_ptr, i64 0, i32 0
+  %either_ptr = select i1 %cmp, ptr %ptr2, ptr %ptr
+  %either_ptr.64 = getelementptr inbounds %nested.i64, ptr %either_ptr, i64 0, i32 0
 
 ; Because either_ptr.64 and ptr.64 can alias (we used to return noalias)
 ; elimination of the first store is not valid.
@@ -26,8 +26,8 @@ define i64 @testcase(%nested * noalias %p1, %nested * noalias %p2,
 ; CHECK: load
 ; CHECK: store i64 1
 
-  store i64 2, i64* %ptr.64, align 8
-  %r = load i64, i64* %either_ptr.64, align 8
-  store i64 1, i64* %ptr.64, align 8
+  store i64 2, ptr %ptr.64, align 8
+  %r = load i64, ptr %either_ptr.64, align 8
+  store i64 1, ptr %ptr.64, align 8
   ret i64 %r
 }

diff  --git a/llvm/test/Analysis/BasicAA/vscale.ll b/llvm/test/Analysis/BasicAA/vscale.ll
index 5c3c185af9141..7b8fc035f1ecb 100644
--- a/llvm/test/Analysis/BasicAA/vscale.ll
+++ b/llvm/test/Analysis/BasicAA/vscale.ll
@@ -51,8 +51,8 @@ define void @gep_alloca_const_offset_3() {
 ; CHECK-DAG:  MustAlias:    <vscale x 4 x i32>* %gep1, i32* %gep2
 define void @gep_alloca_const_offset_4() {
   %alloc = alloca <vscale x 4 x i32>
-  %gep1 = getelementptr <vscale x 4 x i32>, <vscale x 4 x i32>* %alloc, i64 0
-  %gep2 = getelementptr <vscale x 4 x i32>, <vscale x 4 x i32>* %alloc, i64 0, i64 0
+  %gep1 = getelementptr <vscale x 4 x i32>, ptr %alloc, i64 0
+  %gep2 = getelementptr <vscale x 4 x i32>, ptr %alloc, i64 0, i64 0
   load <vscale x 4 x i32>, ptr %alloc
   load <vscale x 4 x i32>, ptr %gep1
   load i32, ptr %gep2

diff  --git a/llvm/test/Analysis/BlockFrequencyInfo/basic.ll b/llvm/test/Analysis/BlockFrequencyInfo/basic.ll
index 09ab58aec43f1..73e8036d18067 100644
--- a/llvm/test/Analysis/BlockFrequencyInfo/basic.ll
+++ b/llvm/test/Analysis/BlockFrequencyInfo/basic.ll
@@ -1,6 +1,6 @@
 ; RUN: opt < %s -passes='print<block-freq>' -disable-output 2>&1 | FileCheck %s
 
-define i32 @test1(i32 %i, i32* %a) {
+define i32 @test1(i32 %i, ptr %a) {
 ; CHECK-LABEL: Printing analysis {{.*}} for function 'test1':
 ; CHECK-NEXT: block-frequency-info: test1
 ; CHECK-NEXT: entry: float = 1.0, int = [[ENTRY:[0-9]+]]
@@ -12,8 +12,8 @@ entry:
 body:
   %iv = phi i32 [ 0, %entry ], [ %next, %body ]
   %base = phi i32 [ 0, %entry ], [ %sum, %body ]
-  %arrayidx = getelementptr inbounds i32, i32* %a, i32 %iv
-  %0 = load i32, i32* %arrayidx
+  %arrayidx = getelementptr inbounds i32, ptr %a, i32 %iv
+  %0 = load i32, ptr %arrayidx
   %sum = add nsw i32 %0, %base
   %next = add i32 %iv, 1
   %exitcond = icmp eq i32 %next, %i

diff  --git a/llvm/test/Analysis/BlockFrequencyInfo/irreducible_loop_crash.ll b/llvm/test/Analysis/BlockFrequencyInfo/irreducible_loop_crash.ll
index 4d1676fe8fd11..b2aa0648c0a92 100644
--- a/llvm/test/Analysis/BlockFrequencyInfo/irreducible_loop_crash.ll
+++ b/llvm/test/Analysis/BlockFrequencyInfo/irreducible_loop_crash.ll
@@ -3,13 +3,13 @@
 target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
 target triple = "x86_64-unknown-linux-gnu"
 
-define void @fn1(i32* %f) {
+define void @fn1(ptr %f) {
 entry:
   %tobool7 = icmp eq i32 undef, 0
   br i1 undef, label %if.end.12, label %for.body.5
 
 for.inc:
-  store i32 undef, i32* %f, align 4
+  store i32 undef, ptr %f, align 4
   br label %for.body.5
 
 for.body.5:                                       ; preds = %for.cond.4.preheader

diff  --git a/llvm/test/Analysis/BlockFrequencyInfo/irreducible_pgo.ll b/llvm/test/Analysis/BlockFrequencyInfo/irreducible_pgo.ll
index 9a80f7111f51e..dabd2fb30ab21 100644
--- a/llvm/test/Analysis/BlockFrequencyInfo/irreducible_pgo.ll
+++ b/llvm/test/Analysis/BlockFrequencyInfo/irreducible_pgo.ll
@@ -72,13 +72,13 @@ for.end:                                          ; preds = %for.cond2
 ; CHECK-NEXT: - if.end9: {{.*}} count = 1000, irr_loop_header_weight = 1000
 ; CHECK-NEXT: - for.end: {{.*}} count = 100
 
- at targets = local_unnamed_addr global [256 x i8*] zeroinitializer, align 16
+ at targets = local_unnamed_addr global [256 x ptr] zeroinitializer, align 16
 @tracing = local_unnamed_addr global i32 0, align 4
 
 ; Function Attrs: noinline norecurse nounwind uwtable
-define i32 @_Z11irreduciblePh(i8* nocapture readonly %p) !prof !27 {
+define i32 @_Z11irreduciblePh(ptr nocapture readonly %p) !prof !27 {
 entry:
-  %0 = load i32, i32* @tracing, align 4
+  %0 = load i32, ptr @tracing, align 4
   %1 = trunc i32 %0 to i8
   %tobool = icmp eq i32 %0, 0
   br label %for.cond1
@@ -128,9 +128,9 @@ exit:                                             ; preds = %sw.bb15, %sw.bb
 
 indirectgoto:                                     ; preds = %if.then18, %if.then
   %idxprom21 = zext i32 %0 to i64
-  %arrayidx22 = getelementptr inbounds [256 x i8*], [256 x i8*]* @targets, i64 0, i64 %idxprom21
-  %target = load i8*, i8** %arrayidx22, align 8
-  indirectbr i8* %target, [label %unknown_op, label %sw.bb, label %TARGET_1, label %TARGET_2], !prof !41, !irr_loop !42
+  %arrayidx22 = getelementptr inbounds [256 x ptr], ptr @targets, i64 0, i64 %idxprom21
+  %target = load ptr, ptr %arrayidx22, align 8
+  indirectbr ptr %target, [label %unknown_op, label %sw.bb, label %TARGET_1, label %TARGET_2], !prof !41, !irr_loop !42
 }
 
 !36 = !{!"branch_weights", i32 0, i32 0, i32 201, i32 1}
@@ -161,9 +161,9 @@ indirectgoto:                                     ; preds = %if.then18, %if.then
 
 ; Missing some irr loop annotations.
 ; Function Attrs: noinline norecurse nounwind uwtable
-define i32 @_Z11irreduciblePh2(i8* nocapture readonly %p) !prof !27 {
+define i32 @_Z11irreduciblePh2(ptr nocapture readonly %p) !prof !27 {
 entry:
-  %0 = load i32, i32* @tracing, align 4
+  %0 = load i32, ptr @tracing, align 4
   %1 = trunc i32 %0 to i8
   %tobool = icmp eq i32 %0, 0
   br label %for.cond1
@@ -213,9 +213,9 @@ exit:                                             ; preds = %sw.bb15, %sw.bb
 
 indirectgoto:                                     ; preds = %if.then18, %if.then
   %idxprom21 = zext i32 %0 to i64
-  %arrayidx22 = getelementptr inbounds [256 x i8*], [256 x i8*]* @targets, i64 0, i64 %idxprom21
-  %target = load i8*, i8** %arrayidx22, align 8
-  indirectbr i8* %target, [label %unknown_op, label %sw.bb, label %TARGET_1, label %TARGET_2], !prof !41, !irr_loop !42
+  %arrayidx22 = getelementptr inbounds [256 x ptr], ptr @targets, i64 0, i64 %idxprom21
+  %target = load ptr, ptr %arrayidx22, align 8
+  indirectbr ptr %target, [label %unknown_op, label %sw.bb, label %TARGET_1, label %TARGET_2], !prof !41, !irr_loop !42
 }
 
 ; CHECK-LABEL: Printing analysis {{.*}} for function '_Z11irreduciblePh2':

diff  --git a/llvm/test/Analysis/BlockFrequencyInfo/loop_with_invoke.ll b/llvm/test/Analysis/BlockFrequencyInfo/loop_with_invoke.ll
index 3d9414ffbe1ea..5137860006dd0 100644
--- a/llvm/test/Analysis/BlockFrequencyInfo/loop_with_invoke.ll
+++ b/llvm/test/Analysis/BlockFrequencyInfo/loop_with_invoke.ll
@@ -20,7 +20,7 @@ invoke.cont:
 
 ; CHECK-NEXT: lpad: float = 0.0094467
 lpad:
-  %ll = landingpad { i8*, i32 }
+  %ll = landingpad { ptr, i32 }
           cleanup
   br label %exit
 

diff  --git a/llvm/test/Analysis/BlockFrequencyInfo/loops_with_profile_info.ll b/llvm/test/Analysis/BlockFrequencyInfo/loops_with_profile_info.ll
index 7cebfb114f4ed..35cc1d0111ee3 100644
--- a/llvm/test/Analysis/BlockFrequencyInfo/loops_with_profile_info.ll
+++ b/llvm/test/Analysis/BlockFrequencyInfo/loops_with_profile_info.ll
@@ -55,7 +55,7 @@
 @.str = private unnamed_addr constant [8 x i8] c"g = %d\0A\00", align 1
 
 declare void @bar()
-declare i32 @printf(i8*, ...)
+declare i32 @printf(ptr, ...)
 
 ; CHECK: Printing analysis {{.*}} for function 'main':
 ; CHECK-NEXT: block-frequency-info: main
@@ -65,31 +65,31 @@ entry:
   %i = alloca i32, align 4
   %j = alloca i32, align 4
   %k = alloca i32, align 4
-  store i32 0, i32* %retval
-  store i32 0, i32* @g, align 4
-  store i32 0, i32* %i, align 4
+  store i32 0, ptr %retval
+  store i32 0, ptr @g, align 4
+  store i32 0, ptr %i, align 4
   br label %for.cond
 
 for.cond:                                         ; preds = %for.inc10, %entry
-  %0 = load i32, i32* %i, align 4
+  %0 = load i32, ptr %i, align 4
   %cmp = icmp slt i32 %0, 100
   br i1 %cmp, label %for.body, label %for.end12, !prof !1
 
 for.body:                                         ; preds = %for.cond
-  store i32 0, i32* %j, align 4
+  store i32 0, ptr %j, align 4
   br label %for.cond1
 
 for.cond1:                                        ; preds = %for.inc7, %for.body
-  %1 = load i32, i32* %j, align 4
+  %1 = load i32, ptr %j, align 4
   %cmp2 = icmp slt i32 %1, 100
   br i1 %cmp2, label %for.body3, label %for.end9, !prof !2
 
 for.body3:                                        ; preds = %for.cond1
-  store i32 0, i32* %k, align 4
+  store i32 0, ptr %k, align 4
   br label %for.cond4
 
 for.cond4:                                        ; preds = %for.inc, %for.body3
-  %2 = load i32, i32* %k, align 4
+  %2 = load i32, ptr %k, align 4
   %cmp5 = icmp slt i32 %2, 100
   br i1 %cmp5, label %for.body6, label %for.end, !prof !3
 
@@ -99,47 +99,47 @@ for.body6:                                        ; preds = %for.cond4
   br label %for.inc
 
 for.inc:                                          ; preds = %for.body6
-  %3 = load i32, i32* %k, align 4
+  %3 = load i32, ptr %k, align 4
   %inc = add nsw i32 %3, 1
-  store i32 %inc, i32* %k, align 4
+  store i32 %inc, ptr %k, align 4
   br label %for.cond4
 
 for.end:                                          ; preds = %for.cond4
   br label %for.inc7
 
 for.inc7:                                         ; preds = %for.end
-  %4 = load i32, i32* %j, align 4
+  %4 = load i32, ptr %j, align 4
   %inc8 = add nsw i32 %4, 1
-  store i32 %inc8, i32* %j, align 4
+  store i32 %inc8, ptr %j, align 4
   br label %for.cond1
 
 for.end9:                                         ; preds = %for.cond1
   br label %for.inc10
 
 for.inc10:                                        ; preds = %for.end9
-  %5 = load i32, i32* %i, align 4
+  %5 = load i32, ptr %i, align 4
   %inc11 = add nsw i32 %5, 1
-  store i32 %inc11, i32* %i, align 4
+  store i32 %inc11, ptr %i, align 4
   br label %for.cond
 
 for.end12:                                        ; preds = %for.cond
-  %6 = load i32, i32* @g, align 4
-  %call = call i32 (i8*, ...) @printf(i8* getelementptr inbounds ([8 x i8], [8 x i8]* @.str, i32 0, i32 0), i32 %6)
-  store i32 0, i32* @g, align 4
-  store i32 0, i32* %i, align 4
+  %6 = load i32, ptr @g, align 4
+  %call = call i32 (ptr, ...) @printf(ptr @.str, i32 %6)
+  store i32 0, ptr @g, align 4
+  store i32 0, ptr %i, align 4
   br label %for.cond13
 
 for.cond13:                                       ; preds = %for.inc22, %for.end12
-  %7 = load i32, i32* %i, align 4
+  %7 = load i32, ptr %i, align 4
   %cmp14 = icmp slt i32 %7, 100
   br i1 %cmp14, label %for.body15, label %for.end24, !prof !1
 
 for.body15:                                       ; preds = %for.cond13
-  store i32 0, i32* %j, align 4
+  store i32 0, ptr %j, align 4
   br label %for.cond16
 
 for.cond16:                                       ; preds = %for.inc19, %for.body15
-  %8 = load i32, i32* %j, align 4
+  %8 = load i32, ptr %j, align 4
   %cmp17 = icmp slt i32 %8, 10000
   br i1 %cmp17, label %for.body18, label %for.end21, !prof !4
 
@@ -149,29 +149,29 @@ for.body18:                                       ; preds = %for.cond16
   br label %for.inc19
 
 for.inc19:                                        ; preds = %for.body18
-  %9 = load i32, i32* %j, align 4
+  %9 = load i32, ptr %j, align 4
   %inc20 = add nsw i32 %9, 1
-  store i32 %inc20, i32* %j, align 4
+  store i32 %inc20, ptr %j, align 4
   br label %for.cond16
 
 for.end21:                                        ; preds = %for.cond16
   br label %for.inc22
 
 for.inc22:                                        ; preds = %for.end21
-  %10 = load i32, i32* %i, align 4
+  %10 = load i32, ptr %i, align 4
   %inc23 = add nsw i32 %10, 1
-  store i32 %inc23, i32* %i, align 4
+  store i32 %inc23, ptr %i, align 4
   br label %for.cond13
 
 for.end24:                                        ; preds = %for.cond13
-  %11 = load i32, i32* @g, align 4
-  %call25 = call i32 (i8*, ...) @printf(i8* getelementptr inbounds ([8 x i8], [8 x i8]* @.str, i32 0, i32 0), i32 %11)
-  store i32 0, i32* @g, align 4
-  store i32 0, i32* %i, align 4
+  %11 = load i32, ptr @g, align 4
+  %call25 = call i32 (ptr, ...) @printf(ptr @.str, i32 %11)
+  store i32 0, ptr @g, align 4
+  store i32 0, ptr %i, align 4
   br label %for.cond26
 
 for.cond26:                                       ; preds = %for.inc29, %for.end24
-  %12 = load i32, i32* %i, align 4
+  %12 = load i32, ptr %i, align 4
   %cmp27 = icmp slt i32 %12, 1000000
   br i1 %cmp27, label %for.body28, label %for.end31, !prof !5
 
@@ -181,16 +181,16 @@ for.body28:                                       ; preds = %for.cond26
   br label %for.inc29
 
 for.inc29:                                        ; preds = %for.body28
-  %13 = load i32, i32* %i, align 4
+  %13 = load i32, ptr %i, align 4
   %inc30 = add nsw i32 %13, 1
-  store i32 %inc30, i32* %i, align 4
+  store i32 %inc30, ptr %i, align 4
   br label %for.cond26
 
 for.end31:                                        ; preds = %for.cond26
-  %14 = load i32, i32* @g, align 4
-  %call32 = call i32 (i8*, ...) @printf(i8* getelementptr inbounds ([8 x i8], [8 x i8]* @.str, i32 0, i32 0), i32 %14)
-  store i32 0, i32* @g, align 4
-  %15 = load i32, i32* %retval
+  %14 = load i32, ptr @g, align 4
+  %call32 = call i32 (ptr, ...) @printf(ptr @.str, i32 %14)
+  store i32 0, ptr @g, align 4
+  %15 = load i32, ptr %retval
   ret i32 %15
 }
 

diff  --git a/llvm/test/Analysis/CostModel/X86/interleaved-load-f32-stride-2.ll b/llvm/test/Analysis/CostModel/X86/interleaved-load-f32-stride-2.ll
index 96d5da9392afa..1eec906171edd 100644
--- a/llvm/test/Analysis/CostModel/X86/interleaved-load-f32-stride-2.ll
+++ b/llvm/test/Analysis/CostModel/X86/interleaved-load-f32-stride-2.ll
@@ -64,7 +64,7 @@ for.body:
   %reduce.add.0.narrow = fptoui float %reduce.add.0 to i8
 
   %out = getelementptr inbounds [1024 x i8], ptr @B, i64 0, i64 %iv.0
-  store i8 %reduce.add.0.narrow, i8* %out
+  store i8 %reduce.add.0.narrow, ptr %out
 
   %iv.next = add nuw nsw i64 %iv.0, 2
   %cmp = icmp ult i64 %iv.next, 1024

diff  --git a/llvm/test/Analysis/CostModel/X86/interleaved-load-f32-stride-3.ll b/llvm/test/Analysis/CostModel/X86/interleaved-load-f32-stride-3.ll
index 688ed7b09fa9d..d119ca93f04de 100644
--- a/llvm/test/Analysis/CostModel/X86/interleaved-load-f32-stride-3.ll
+++ b/llvm/test/Analysis/CostModel/X86/interleaved-load-f32-stride-3.ll
@@ -68,7 +68,7 @@ for.body:
   %reduce.add.1.narrow = fptoui float %reduce.add.1 to i8
 
   %out = getelementptr inbounds [1024 x i8], ptr @B, i64 0, i64 %iv.0
-  store i8 %reduce.add.1.narrow, i8* %out
+  store i8 %reduce.add.1.narrow, ptr %out
 
   %iv.next = add nuw nsw i64 %iv.0, 3
   %cmp = icmp ult i64 %iv.next, 1024

diff  --git a/llvm/test/Analysis/CostModel/X86/interleaved-load-f32-stride-4.ll b/llvm/test/Analysis/CostModel/X86/interleaved-load-f32-stride-4.ll
index df5ce73d53133..c5c6555de0da5 100644
--- a/llvm/test/Analysis/CostModel/X86/interleaved-load-f32-stride-4.ll
+++ b/llvm/test/Analysis/CostModel/X86/interleaved-load-f32-stride-4.ll
@@ -71,7 +71,7 @@ for.body:
   %reduce.add.2.narrow = fptoui float %reduce.add.2 to i8
 
   %out = getelementptr inbounds [1024 x i8], ptr @B, i64 0, i64 %iv.0
-  store i8 %reduce.add.2.narrow, i8* %out
+  store i8 %reduce.add.2.narrow, ptr %out
 
   %iv.next = add nuw nsw i64 %iv.0, 4
   %cmp = icmp ult i64 %iv.next, 1024

diff  --git a/llvm/test/Analysis/CostModel/X86/interleaved-load-f32-stride-5.ll b/llvm/test/Analysis/CostModel/X86/interleaved-load-f32-stride-5.ll
index 45dbf592c4a57..de178cdf19308 100644
--- a/llvm/test/Analysis/CostModel/X86/interleaved-load-f32-stride-5.ll
+++ b/llvm/test/Analysis/CostModel/X86/interleaved-load-f32-stride-5.ll
@@ -72,7 +72,7 @@ for.body:
   %reduce.add.3.narrow = fptoui float %reduce.add.3 to i8
 
   %out = getelementptr inbounds [1024 x i8], ptr @B, i64 0, i64 %iv.0
-  store i8 %reduce.add.3.narrow, i8* %out
+  store i8 %reduce.add.3.narrow, ptr %out
 
   %iv.next = add nuw nsw i64 %iv.0, 5
   %cmp = icmp ult i64 %iv.next, 1024

diff  --git a/llvm/test/Analysis/CostModel/X86/interleaved-load-f32-stride-6.ll b/llvm/test/Analysis/CostModel/X86/interleaved-load-f32-stride-6.ll
index 33fbee365cac8..155f42f8f23c8 100644
--- a/llvm/test/Analysis/CostModel/X86/interleaved-load-f32-stride-6.ll
+++ b/llvm/test/Analysis/CostModel/X86/interleaved-load-f32-stride-6.ll
@@ -76,7 +76,7 @@ for.body:
   %reduce.add.4.narrow = fptoui float %reduce.add.4 to i8
 
   %out = getelementptr inbounds [1024 x i8], ptr @B, i64 0, i64 %iv.0
-  store i8 %reduce.add.4.narrow, i8* %out
+  store i8 %reduce.add.4.narrow, ptr %out
 
   %iv.next = add nuw nsw i64 %iv.0, 6
   %cmp = icmp ult i64 %iv.next, 1024

diff  --git a/llvm/test/Analysis/CostModel/X86/interleaved-load-f32-stride-7.ll b/llvm/test/Analysis/CostModel/X86/interleaved-load-f32-stride-7.ll
index e744992ae0a50..1f54b7485aa8f 100644
--- a/llvm/test/Analysis/CostModel/X86/interleaved-load-f32-stride-7.ll
+++ b/llvm/test/Analysis/CostModel/X86/interleaved-load-f32-stride-7.ll
@@ -80,7 +80,7 @@ for.body:
   %reduce.add.5.narrow = fptoui float %reduce.add.5 to i8
 
   %out = getelementptr inbounds [1024 x i8], ptr @B, i64 0, i64 %iv.0
-  store i8 %reduce.add.5.narrow, i8* %out
+  store i8 %reduce.add.5.narrow, ptr %out
 
   %iv.next = add nuw nsw i64 %iv.0, 7
   %cmp = icmp ult i64 %iv.next, 1024

diff  --git a/llvm/test/Analysis/CostModel/X86/interleaved-load-f32-stride-8.ll b/llvm/test/Analysis/CostModel/X86/interleaved-load-f32-stride-8.ll
index ee46d2594eb85..d53dca05155b7 100644
--- a/llvm/test/Analysis/CostModel/X86/interleaved-load-f32-stride-8.ll
+++ b/llvm/test/Analysis/CostModel/X86/interleaved-load-f32-stride-8.ll
@@ -83,7 +83,7 @@ for.body:
   %reduce.add.6.narrow = fptoui float %reduce.add.6 to i8
 
   %out = getelementptr inbounds [1024 x i8], ptr @B, i64 0, i64 %iv.0
-  store i8 %reduce.add.6.narrow, i8* %out
+  store i8 %reduce.add.6.narrow, ptr %out
 
   %iv.next = add nuw nsw i64 %iv.0, 8
   %cmp = icmp ult i64 %iv.next, 1024

diff  --git a/llvm/test/Analysis/CostModel/X86/interleaved-load-f64-stride-2.ll b/llvm/test/Analysis/CostModel/X86/interleaved-load-f64-stride-2.ll
index b61308c5915e7..1575f92465d52 100644
--- a/llvm/test/Analysis/CostModel/X86/interleaved-load-f64-stride-2.ll
+++ b/llvm/test/Analysis/CostModel/X86/interleaved-load-f64-stride-2.ll
@@ -64,7 +64,7 @@ for.body:
   %reduce.add.0.narrow = fptoui double %reduce.add.0 to i8
 
   %out = getelementptr inbounds [1024 x i8], ptr @B, i64 0, i64 %iv.0
-  store i8 %reduce.add.0.narrow, i8* %out
+  store i8 %reduce.add.0.narrow, ptr %out
 
   %iv.next = add nuw nsw i64 %iv.0, 2
   %cmp = icmp ult i64 %iv.next, 1024

diff  --git a/llvm/test/Analysis/CostModel/X86/interleaved-load-f64-stride-3.ll b/llvm/test/Analysis/CostModel/X86/interleaved-load-f64-stride-3.ll
index 7786daeac3dd1..89175a65990f6 100644
--- a/llvm/test/Analysis/CostModel/X86/interleaved-load-f64-stride-3.ll
+++ b/llvm/test/Analysis/CostModel/X86/interleaved-load-f64-stride-3.ll
@@ -65,7 +65,7 @@ for.body:
   %reduce.add.1.narrow = fptoui double %reduce.add.1 to i8
 
   %out = getelementptr inbounds [1024 x i8], ptr @B, i64 0, i64 %iv.0
-  store i8 %reduce.add.1.narrow, i8* %out
+  store i8 %reduce.add.1.narrow, ptr %out
 
   %iv.next = add nuw nsw i64 %iv.0, 3
   %cmp = icmp ult i64 %iv.next, 1024

diff  --git a/llvm/test/Analysis/CostModel/X86/interleaved-load-f64-stride-4.ll b/llvm/test/Analysis/CostModel/X86/interleaved-load-f64-stride-4.ll
index 1bd83144c0d2f..8db9fd364133e 100644
--- a/llvm/test/Analysis/CostModel/X86/interleaved-load-f64-stride-4.ll
+++ b/llvm/test/Analysis/CostModel/X86/interleaved-load-f64-stride-4.ll
@@ -68,7 +68,7 @@ for.body:
   %reduce.add.2.narrow = fptoui double %reduce.add.2 to i8
 
   %out = getelementptr inbounds [1024 x i8], ptr @B, i64 0, i64 %iv.0
-  store i8 %reduce.add.2.narrow, i8* %out
+  store i8 %reduce.add.2.narrow, ptr %out
 
   %iv.next = add nuw nsw i64 %iv.0, 4
   %cmp = icmp ult i64 %iv.next, 1024

diff  --git a/llvm/test/Analysis/CostModel/X86/interleaved-load-f64-stride-5.ll b/llvm/test/Analysis/CostModel/X86/interleaved-load-f64-stride-5.ll
index 2186d50d7af46..25c49e3b8a811 100644
--- a/llvm/test/Analysis/CostModel/X86/interleaved-load-f64-stride-5.ll
+++ b/llvm/test/Analysis/CostModel/X86/interleaved-load-f64-stride-5.ll
@@ -69,7 +69,7 @@ for.body:
   %reduce.add.3.narrow = fptoui double %reduce.add.3 to i8
 
   %out = getelementptr inbounds [1024 x i8], ptr @B, i64 0, i64 %iv.0
-  store i8 %reduce.add.3.narrow, i8* %out
+  store i8 %reduce.add.3.narrow, ptr %out
 
   %iv.next = add nuw nsw i64 %iv.0, 5
   %cmp = icmp ult i64 %iv.next, 1024

diff  --git a/llvm/test/Analysis/CostModel/X86/interleaved-load-f64-stride-6.ll b/llvm/test/Analysis/CostModel/X86/interleaved-load-f64-stride-6.ll
index ceb7ccc458833..42c980b6d3985 100644
--- a/llvm/test/Analysis/CostModel/X86/interleaved-load-f64-stride-6.ll
+++ b/llvm/test/Analysis/CostModel/X86/interleaved-load-f64-stride-6.ll
@@ -73,7 +73,7 @@ for.body:
   %reduce.add.4.narrow = fptoui double %reduce.add.4 to i8
 
   %out = getelementptr inbounds [1024 x i8], ptr @B, i64 0, i64 %iv.0
-  store i8 %reduce.add.4.narrow, i8* %out
+  store i8 %reduce.add.4.narrow, ptr %out
 
   %iv.next = add nuw nsw i64 %iv.0, 6
   %cmp = icmp ult i64 %iv.next, 1024

diff  --git a/llvm/test/Analysis/CostModel/X86/interleaved-load-f64-stride-7.ll b/llvm/test/Analysis/CostModel/X86/interleaved-load-f64-stride-7.ll
index ff937921c775c..68afa6d17f02f 100644
--- a/llvm/test/Analysis/CostModel/X86/interleaved-load-f64-stride-7.ll
+++ b/llvm/test/Analysis/CostModel/X86/interleaved-load-f64-stride-7.ll
@@ -77,7 +77,7 @@ for.body:
   %reduce.add.5.narrow = fptoui double %reduce.add.5 to i8
 
   %out = getelementptr inbounds [1024 x i8], ptr @B, i64 0, i64 %iv.0
-  store i8 %reduce.add.5.narrow, i8* %out
+  store i8 %reduce.add.5.narrow, ptr %out
 
   %iv.next = add nuw nsw i64 %iv.0, 7
   %cmp = icmp ult i64 %iv.next, 1024

diff  --git a/llvm/test/Analysis/CostModel/X86/interleaved-load-f64-stride-8.ll b/llvm/test/Analysis/CostModel/X86/interleaved-load-f64-stride-8.ll
index 9712d855e56d8..7894912c88fab 100644
--- a/llvm/test/Analysis/CostModel/X86/interleaved-load-f64-stride-8.ll
+++ b/llvm/test/Analysis/CostModel/X86/interleaved-load-f64-stride-8.ll
@@ -80,7 +80,7 @@ for.body:
   %reduce.add.6.narrow = fptoui double %reduce.add.6 to i8
 
   %out = getelementptr inbounds [1024 x i8], ptr @B, i64 0, i64 %iv.0
-  store i8 %reduce.add.6.narrow, i8* %out
+  store i8 %reduce.add.6.narrow, ptr %out
 
   %iv.next = add nuw nsw i64 %iv.0, 8
   %cmp = icmp ult i64 %iv.next, 1024

diff  --git a/llvm/test/Analysis/CostModel/X86/interleaved-load-i16-stride-2.ll b/llvm/test/Analysis/CostModel/X86/interleaved-load-i16-stride-2.ll
index 417728cdf1bb2..78389fadd5226 100644
--- a/llvm/test/Analysis/CostModel/X86/interleaved-load-i16-stride-2.ll
+++ b/llvm/test/Analysis/CostModel/X86/interleaved-load-i16-stride-2.ll
@@ -74,7 +74,7 @@ for.body:
   %reduce.add.0.narrow = trunc i16 %reduce.add.0 to i8
 
   %out = getelementptr inbounds [1024 x i8], ptr @B, i64 0, i64 %iv.0
-  store i8 %reduce.add.0.narrow, i8* %out
+  store i8 %reduce.add.0.narrow, ptr %out
 
   %iv.next = add nuw nsw i64 %iv.0, 2
   %cmp = icmp ult i64 %iv.next, 1024

diff  --git a/llvm/test/Analysis/CostModel/X86/interleaved-load-i16-stride-3.ll b/llvm/test/Analysis/CostModel/X86/interleaved-load-i16-stride-3.ll
index a62a34a6cb38f..edf044dd092e7 100644
--- a/llvm/test/Analysis/CostModel/X86/interleaved-load-i16-stride-3.ll
+++ b/llvm/test/Analysis/CostModel/X86/interleaved-load-i16-stride-3.ll
@@ -78,7 +78,7 @@ for.body:
   %reduce.add.1.narrow = trunc i16 %reduce.add.1 to i8
 
   %out = getelementptr inbounds [1024 x i8], ptr @B, i64 0, i64 %iv.0
-  store i8 %reduce.add.1.narrow, i8* %out
+  store i8 %reduce.add.1.narrow, ptr %out
 
   %iv.next = add nuw nsw i64 %iv.0, 3
   %cmp = icmp ult i64 %iv.next, 1024

diff  --git a/llvm/test/Analysis/CostModel/X86/interleaved-load-i16-stride-4.ll b/llvm/test/Analysis/CostModel/X86/interleaved-load-i16-stride-4.ll
index d261da3536b4e..6f22ec2a86080 100644
--- a/llvm/test/Analysis/CostModel/X86/interleaved-load-i16-stride-4.ll
+++ b/llvm/test/Analysis/CostModel/X86/interleaved-load-i16-stride-4.ll
@@ -82,7 +82,7 @@ for.body:
   %reduce.add.2.narrow = trunc i16 %reduce.add.2 to i8
 
   %out = getelementptr inbounds [1024 x i8], ptr @B, i64 0, i64 %iv.0
-  store i8 %reduce.add.2.narrow, i8* %out
+  store i8 %reduce.add.2.narrow, ptr %out
 
   %iv.next = add nuw nsw i64 %iv.0, 4
   %cmp = icmp ult i64 %iv.next, 1024

diff  --git a/llvm/test/Analysis/CostModel/X86/interleaved-load-i16-stride-5.ll b/llvm/test/Analysis/CostModel/X86/interleaved-load-i16-stride-5.ll
index 6a508e0125db0..d8eaa0aad61d5 100644
--- a/llvm/test/Analysis/CostModel/X86/interleaved-load-i16-stride-5.ll
+++ b/llvm/test/Analysis/CostModel/X86/interleaved-load-i16-stride-5.ll
@@ -86,7 +86,7 @@ for.body:
   %reduce.add.3.narrow = trunc i16 %reduce.add.3 to i8
 
   %out = getelementptr inbounds [1024 x i8], ptr @B, i64 0, i64 %iv.0
-  store i8 %reduce.add.3.narrow, i8* %out
+  store i8 %reduce.add.3.narrow, ptr %out
 
   %iv.next = add nuw nsw i64 %iv.0, 5
   %cmp = icmp ult i64 %iv.next, 1024

diff  --git a/llvm/test/Analysis/CostModel/X86/interleaved-load-i16-stride-6.ll b/llvm/test/Analysis/CostModel/X86/interleaved-load-i16-stride-6.ll
index b1f35cf290980..09823572118ad 100644
--- a/llvm/test/Analysis/CostModel/X86/interleaved-load-i16-stride-6.ll
+++ b/llvm/test/Analysis/CostModel/X86/interleaved-load-i16-stride-6.ll
@@ -90,7 +90,7 @@ for.body:
   %reduce.add.4.narrow = trunc i16 %reduce.add.4 to i8
 
   %out = getelementptr inbounds [1024 x i8], ptr @B, i64 0, i64 %iv.0
-  store i8 %reduce.add.4.narrow, i8* %out
+  store i8 %reduce.add.4.narrow, ptr %out
 
   %iv.next = add nuw nsw i64 %iv.0, 6
   %cmp = icmp ult i64 %iv.next, 1024

diff  --git a/llvm/test/Analysis/CostModel/X86/interleaved-load-i16-stride-7.ll b/llvm/test/Analysis/CostModel/X86/interleaved-load-i16-stride-7.ll
index b9ac320e0f010..9c0d102a70d1e 100644
--- a/llvm/test/Analysis/CostModel/X86/interleaved-load-i16-stride-7.ll
+++ b/llvm/test/Analysis/CostModel/X86/interleaved-load-i16-stride-7.ll
@@ -94,7 +94,7 @@ for.body:
   %reduce.add.5.narrow = trunc i16 %reduce.add.5 to i8
 
   %out = getelementptr inbounds [1024 x i8], ptr @B, i64 0, i64 %iv.0
-  store i8 %reduce.add.5.narrow, i8* %out
+  store i8 %reduce.add.5.narrow, ptr %out
 
   %iv.next = add nuw nsw i64 %iv.0, 7
   %cmp = icmp ult i64 %iv.next, 1024

diff  --git a/llvm/test/Analysis/CostModel/X86/interleaved-load-i16-stride-8.ll b/llvm/test/Analysis/CostModel/X86/interleaved-load-i16-stride-8.ll
index f7b42cce79546..7654185635d3e 100644
--- a/llvm/test/Analysis/CostModel/X86/interleaved-load-i16-stride-8.ll
+++ b/llvm/test/Analysis/CostModel/X86/interleaved-load-i16-stride-8.ll
@@ -98,7 +98,7 @@ for.body:
   %reduce.add.6.narrow = trunc i16 %reduce.add.6 to i8
 
   %out = getelementptr inbounds [1024 x i8], ptr @B, i64 0, i64 %iv.0
-  store i8 %reduce.add.6.narrow, i8* %out
+  store i8 %reduce.add.6.narrow, ptr %out
 
   %iv.next = add nuw nsw i64 %iv.0, 8
   %cmp = icmp ult i64 %iv.next, 1024

diff  --git a/llvm/test/Analysis/CostModel/X86/interleaved-load-i32-stride-2.ll b/llvm/test/Analysis/CostModel/X86/interleaved-load-i32-stride-2.ll
index 62e6362a152a3..a0f4334597103 100644
--- a/llvm/test/Analysis/CostModel/X86/interleaved-load-i32-stride-2.ll
+++ b/llvm/test/Analysis/CostModel/X86/interleaved-load-i32-stride-2.ll
@@ -64,7 +64,7 @@ for.body:
   %reduce.add.0.narrow = trunc i32 %reduce.add.0 to i8
 
   %out = getelementptr inbounds [1024 x i8], ptr @B, i64 0, i64 %iv.0
-  store i8 %reduce.add.0.narrow, i8* %out
+  store i8 %reduce.add.0.narrow, ptr %out
 
   %iv.next = add nuw nsw i64 %iv.0, 2
   %cmp = icmp ult i64 %iv.next, 1024

diff  --git a/llvm/test/Analysis/CostModel/X86/interleaved-load-i32-stride-3.ll b/llvm/test/Analysis/CostModel/X86/interleaved-load-i32-stride-3.ll
index 1e9d4a081d2d1..239c9e1e1434e 100644
--- a/llvm/test/Analysis/CostModel/X86/interleaved-load-i32-stride-3.ll
+++ b/llvm/test/Analysis/CostModel/X86/interleaved-load-i32-stride-3.ll
@@ -68,7 +68,7 @@ for.body:
   %reduce.add.1.narrow = trunc i32 %reduce.add.1 to i8
 
   %out = getelementptr inbounds [1024 x i8], ptr @B, i64 0, i64 %iv.0
-  store i8 %reduce.add.1.narrow, i8* %out
+  store i8 %reduce.add.1.narrow, ptr %out
 
   %iv.next = add nuw nsw i64 %iv.0, 3
   %cmp = icmp ult i64 %iv.next, 1024

diff  --git a/llvm/test/Analysis/CostModel/X86/interleaved-load-i32-stride-4.ll b/llvm/test/Analysis/CostModel/X86/interleaved-load-i32-stride-4.ll
index 8890abd636aaf..1f59aab2860f8 100644
--- a/llvm/test/Analysis/CostModel/X86/interleaved-load-i32-stride-4.ll
+++ b/llvm/test/Analysis/CostModel/X86/interleaved-load-i32-stride-4.ll
@@ -71,7 +71,7 @@ for.body:
   %reduce.add.2.narrow = trunc i32 %reduce.add.2 to i8
 
   %out = getelementptr inbounds [1024 x i8], ptr @B, i64 0, i64 %iv.0
-  store i8 %reduce.add.2.narrow, i8* %out
+  store i8 %reduce.add.2.narrow, ptr %out
 
   %iv.next = add nuw nsw i64 %iv.0, 4
   %cmp = icmp ult i64 %iv.next, 1024

diff  --git a/llvm/test/Analysis/CostModel/X86/interleaved-load-i32-stride-5.ll b/llvm/test/Analysis/CostModel/X86/interleaved-load-i32-stride-5.ll
index 246a5ee48518a..63901617bb9dd 100644
--- a/llvm/test/Analysis/CostModel/X86/interleaved-load-i32-stride-5.ll
+++ b/llvm/test/Analysis/CostModel/X86/interleaved-load-i32-stride-5.ll
@@ -72,7 +72,7 @@ for.body:
   %reduce.add.3.narrow = trunc i32 %reduce.add.3 to i8
 
   %out = getelementptr inbounds [1024 x i8], ptr @B, i64 0, i64 %iv.0
-  store i8 %reduce.add.3.narrow, i8* %out
+  store i8 %reduce.add.3.narrow, ptr %out
 
   %iv.next = add nuw nsw i64 %iv.0, 5
   %cmp = icmp ult i64 %iv.next, 1024

diff  --git a/llvm/test/Analysis/CostModel/X86/interleaved-load-i32-stride-6.ll b/llvm/test/Analysis/CostModel/X86/interleaved-load-i32-stride-6.ll
index b7467680b87fe..278e4a80073a6 100644
--- a/llvm/test/Analysis/CostModel/X86/interleaved-load-i32-stride-6.ll
+++ b/llvm/test/Analysis/CostModel/X86/interleaved-load-i32-stride-6.ll
@@ -76,7 +76,7 @@ for.body:
   %reduce.add.4.narrow = trunc i32 %reduce.add.4 to i8
 
   %out = getelementptr inbounds [1024 x i8], ptr @B, i64 0, i64 %iv.0
-  store i8 %reduce.add.4.narrow, i8* %out
+  store i8 %reduce.add.4.narrow, ptr %out
 
   %iv.next = add nuw nsw i64 %iv.0, 6
   %cmp = icmp ult i64 %iv.next, 1024

diff  --git a/llvm/test/Analysis/CostModel/X86/interleaved-load-i32-stride-7.ll b/llvm/test/Analysis/CostModel/X86/interleaved-load-i32-stride-7.ll
index 26a8b2e50bd96..1eabac4e0b9c3 100644
--- a/llvm/test/Analysis/CostModel/X86/interleaved-load-i32-stride-7.ll
+++ b/llvm/test/Analysis/CostModel/X86/interleaved-load-i32-stride-7.ll
@@ -80,7 +80,7 @@ for.body:
   %reduce.add.5.narrow = trunc i32 %reduce.add.5 to i8
 
   %out = getelementptr inbounds [1024 x i8], ptr @B, i64 0, i64 %iv.0
-  store i8 %reduce.add.5.narrow, i8* %out
+  store i8 %reduce.add.5.narrow, ptr %out
 
   %iv.next = add nuw nsw i64 %iv.0, 7
   %cmp = icmp ult i64 %iv.next, 1024

diff  --git a/llvm/test/Analysis/CostModel/X86/interleaved-load-i32-stride-8.ll b/llvm/test/Analysis/CostModel/X86/interleaved-load-i32-stride-8.ll
index 2bf20a8141bdf..a1bb2efd73963 100644
--- a/llvm/test/Analysis/CostModel/X86/interleaved-load-i32-stride-8.ll
+++ b/llvm/test/Analysis/CostModel/X86/interleaved-load-i32-stride-8.ll
@@ -83,7 +83,7 @@ for.body:
   %reduce.add.6.narrow = trunc i32 %reduce.add.6 to i8
 
   %out = getelementptr inbounds [1024 x i8], ptr @B, i64 0, i64 %iv.0
-  store i8 %reduce.add.6.narrow, i8* %out
+  store i8 %reduce.add.6.narrow, ptr %out
 
   %iv.next = add nuw nsw i64 %iv.0, 8
   %cmp = icmp ult i64 %iv.next, 1024

diff  --git a/llvm/test/Analysis/CostModel/X86/interleaved-load-i64-stride-2.ll b/llvm/test/Analysis/CostModel/X86/interleaved-load-i64-stride-2.ll
index b8368a99cbb2c..bd230166ebe78 100644
--- a/llvm/test/Analysis/CostModel/X86/interleaved-load-i64-stride-2.ll
+++ b/llvm/test/Analysis/CostModel/X86/interleaved-load-i64-stride-2.ll
@@ -64,7 +64,7 @@ for.body:
   %reduce.add.0.narrow = trunc i64 %reduce.add.0 to i8
 
   %out = getelementptr inbounds [1024 x i8], ptr @B, i64 0, i64 %iv.0
-  store i8 %reduce.add.0.narrow, i8* %out
+  store i8 %reduce.add.0.narrow, ptr %out
 
   %iv.next = add nuw nsw i64 %iv.0, 2
   %cmp = icmp ult i64 %iv.next, 1024

diff  --git a/llvm/test/Analysis/CostModel/X86/interleaved-load-i64-stride-3.ll b/llvm/test/Analysis/CostModel/X86/interleaved-load-i64-stride-3.ll
index 529afe126d2d3..e03d3c2f8b3a4 100644
--- a/llvm/test/Analysis/CostModel/X86/interleaved-load-i64-stride-3.ll
+++ b/llvm/test/Analysis/CostModel/X86/interleaved-load-i64-stride-3.ll
@@ -65,7 +65,7 @@ for.body:
   %reduce.add.1.narrow = trunc i64 %reduce.add.1 to i8
 
   %out = getelementptr inbounds [1024 x i8], ptr @B, i64 0, i64 %iv.0
-  store i8 %reduce.add.1.narrow, i8* %out
+  store i8 %reduce.add.1.narrow, ptr %out
 
   %iv.next = add nuw nsw i64 %iv.0, 3
   %cmp = icmp ult i64 %iv.next, 1024

diff  --git a/llvm/test/Analysis/CostModel/X86/interleaved-load-i64-stride-4.ll b/llvm/test/Analysis/CostModel/X86/interleaved-load-i64-stride-4.ll
index 233785277a75c..f7249666918dd 100644
--- a/llvm/test/Analysis/CostModel/X86/interleaved-load-i64-stride-4.ll
+++ b/llvm/test/Analysis/CostModel/X86/interleaved-load-i64-stride-4.ll
@@ -68,7 +68,7 @@ for.body:
   %reduce.add.2.narrow = trunc i64 %reduce.add.2 to i8
 
   %out = getelementptr inbounds [1024 x i8], ptr @B, i64 0, i64 %iv.0
-  store i8 %reduce.add.2.narrow, i8* %out
+  store i8 %reduce.add.2.narrow, ptr %out
 
   %iv.next = add nuw nsw i64 %iv.0, 4
   %cmp = icmp ult i64 %iv.next, 1024

diff  --git a/llvm/test/Analysis/CostModel/X86/interleaved-load-i64-stride-5.ll b/llvm/test/Analysis/CostModel/X86/interleaved-load-i64-stride-5.ll
index 57545d6106bc9..96946bd58dea1 100644
--- a/llvm/test/Analysis/CostModel/X86/interleaved-load-i64-stride-5.ll
+++ b/llvm/test/Analysis/CostModel/X86/interleaved-load-i64-stride-5.ll
@@ -69,7 +69,7 @@ for.body:
   %reduce.add.3.narrow = trunc i64 %reduce.add.3 to i8
 
   %out = getelementptr inbounds [1024 x i8], ptr @B, i64 0, i64 %iv.0
-  store i8 %reduce.add.3.narrow, i8* %out
+  store i8 %reduce.add.3.narrow, ptr %out
 
   %iv.next = add nuw nsw i64 %iv.0, 5
   %cmp = icmp ult i64 %iv.next, 1024

diff  --git a/llvm/test/Analysis/CostModel/X86/interleaved-load-i64-stride-6.ll b/llvm/test/Analysis/CostModel/X86/interleaved-load-i64-stride-6.ll
index 2a3d2fbf9e0f8..2355c6e8b57a1 100644
--- a/llvm/test/Analysis/CostModel/X86/interleaved-load-i64-stride-6.ll
+++ b/llvm/test/Analysis/CostModel/X86/interleaved-load-i64-stride-6.ll
@@ -73,7 +73,7 @@ for.body:
   %reduce.add.4.narrow = trunc i64 %reduce.add.4 to i8
 
   %out = getelementptr inbounds [1024 x i8], ptr @B, i64 0, i64 %iv.0
-  store i8 %reduce.add.4.narrow, i8* %out
+  store i8 %reduce.add.4.narrow, ptr %out
 
   %iv.next = add nuw nsw i64 %iv.0, 6
   %cmp = icmp ult i64 %iv.next, 1024

diff  --git a/llvm/test/Analysis/CostModel/X86/interleaved-load-i64-stride-7.ll b/llvm/test/Analysis/CostModel/X86/interleaved-load-i64-stride-7.ll
index 4cf6c6b5e011c..646003a41dcf5 100644
--- a/llvm/test/Analysis/CostModel/X86/interleaved-load-i64-stride-7.ll
+++ b/llvm/test/Analysis/CostModel/X86/interleaved-load-i64-stride-7.ll
@@ -77,7 +77,7 @@ for.body:
   %reduce.add.5.narrow = trunc i64 %reduce.add.5 to i8
 
   %out = getelementptr inbounds [1024 x i8], ptr @B, i64 0, i64 %iv.0
-  store i8 %reduce.add.5.narrow, i8* %out
+  store i8 %reduce.add.5.narrow, ptr %out
 
   %iv.next = add nuw nsw i64 %iv.0, 7
   %cmp = icmp ult i64 %iv.next, 1024

diff  --git a/llvm/test/Analysis/CostModel/X86/interleaved-load-i64-stride-8.ll b/llvm/test/Analysis/CostModel/X86/interleaved-load-i64-stride-8.ll
index d3de76664513e..568ab74068f94 100644
--- a/llvm/test/Analysis/CostModel/X86/interleaved-load-i64-stride-8.ll
+++ b/llvm/test/Analysis/CostModel/X86/interleaved-load-i64-stride-8.ll
@@ -80,7 +80,7 @@ for.body:
   %reduce.add.6.narrow = trunc i64 %reduce.add.6 to i8
 
   %out = getelementptr inbounds [1024 x i8], ptr @B, i64 0, i64 %iv.0
-  store i8 %reduce.add.6.narrow, i8* %out
+  store i8 %reduce.add.6.narrow, ptr %out
 
   %iv.next = add nuw nsw i64 %iv.0, 8
   %cmp = icmp ult i64 %iv.next, 1024

diff  --git a/llvm/test/Analysis/CostModel/X86/interleaved-load-i8-stride-2.ll b/llvm/test/Analysis/CostModel/X86/interleaved-load-i8-stride-2.ll
index 0ed38e9254ec4..91376c26a7a4c 100644
--- a/llvm/test/Analysis/CostModel/X86/interleaved-load-i8-stride-2.ll
+++ b/llvm/test/Analysis/CostModel/X86/interleaved-load-i8-stride-2.ll
@@ -72,7 +72,7 @@ for.body:
   %reduce.add.0 = add i8 %v0, %v1
 
   %out = getelementptr inbounds [1024 x i8], ptr @B, i64 0, i64 %iv.0
-  store i8 %reduce.add.0, i8* %out
+  store i8 %reduce.add.0, ptr %out
 
   %iv.next = add nuw nsw i64 %iv.0, 2
   %cmp = icmp ult i64 %iv.next, 1024

diff  --git a/llvm/test/Analysis/CostModel/X86/interleaved-load-i8-stride-3.ll b/llvm/test/Analysis/CostModel/X86/interleaved-load-i8-stride-3.ll
index 6f8c3056b7aa7..4a2de69f43d43 100644
--- a/llvm/test/Analysis/CostModel/X86/interleaved-load-i8-stride-3.ll
+++ b/llvm/test/Analysis/CostModel/X86/interleaved-load-i8-stride-3.ll
@@ -76,7 +76,7 @@ for.body:
   %reduce.add.1 = add i8 %reduce.add.0, %v2
 
   %out = getelementptr inbounds [1024 x i8], ptr @B, i64 0, i64 %iv.0
-  store i8 %reduce.add.1, i8* %out
+  store i8 %reduce.add.1, ptr %out
 
   %iv.next = add nuw nsw i64 %iv.0, 3
   %cmp = icmp ult i64 %iv.next, 1024

diff  --git a/llvm/test/Analysis/CostModel/X86/interleaved-load-i8-stride-4.ll b/llvm/test/Analysis/CostModel/X86/interleaved-load-i8-stride-4.ll
index f1c15d8d64be6..7bb14702d5b92 100644
--- a/llvm/test/Analysis/CostModel/X86/interleaved-load-i8-stride-4.ll
+++ b/llvm/test/Analysis/CostModel/X86/interleaved-load-i8-stride-4.ll
@@ -80,7 +80,7 @@ for.body:
   %reduce.add.2 = add i8 %reduce.add.1, %v3
 
   %out = getelementptr inbounds [1024 x i8], ptr @B, i64 0, i64 %iv.0
-  store i8 %reduce.add.2, i8* %out
+  store i8 %reduce.add.2, ptr %out
 
   %iv.next = add nuw nsw i64 %iv.0, 4
   %cmp = icmp ult i64 %iv.next, 1024

diff  --git a/llvm/test/Analysis/CostModel/X86/interleaved-load-i8-stride-5.ll b/llvm/test/Analysis/CostModel/X86/interleaved-load-i8-stride-5.ll
index 53df030a9ba7a..6c1dd916311ab 100644
--- a/llvm/test/Analysis/CostModel/X86/interleaved-load-i8-stride-5.ll
+++ b/llvm/test/Analysis/CostModel/X86/interleaved-load-i8-stride-5.ll
@@ -84,7 +84,7 @@ for.body:
   %reduce.add.3 = add i8 %reduce.add.2, %v4
 
   %out = getelementptr inbounds [1024 x i8], ptr @B, i64 0, i64 %iv.0
-  store i8 %reduce.add.3, i8* %out
+  store i8 %reduce.add.3, ptr %out
 
   %iv.next = add nuw nsw i64 %iv.0, 5
   %cmp = icmp ult i64 %iv.next, 1024

diff  --git a/llvm/test/Analysis/CostModel/X86/interleaved-load-i8-stride-6.ll b/llvm/test/Analysis/CostModel/X86/interleaved-load-i8-stride-6.ll
index 23c97d502acc0..1ff3bc57a50d9 100644
--- a/llvm/test/Analysis/CostModel/X86/interleaved-load-i8-stride-6.ll
+++ b/llvm/test/Analysis/CostModel/X86/interleaved-load-i8-stride-6.ll
@@ -88,7 +88,7 @@ for.body:
   %reduce.add.4 = add i8 %reduce.add.3, %v5
 
   %out = getelementptr inbounds [1024 x i8], ptr @B, i64 0, i64 %iv.0
-  store i8 %reduce.add.4, i8* %out
+  store i8 %reduce.add.4, ptr %out
 
   %iv.next = add nuw nsw i64 %iv.0, 6
   %cmp = icmp ult i64 %iv.next, 1024

diff  --git a/llvm/test/Analysis/CostModel/X86/interleaved-load-i8-stride-7.ll b/llvm/test/Analysis/CostModel/X86/interleaved-load-i8-stride-7.ll
index 15f1e021eb32e..d77bca6b7aa5a 100644
--- a/llvm/test/Analysis/CostModel/X86/interleaved-load-i8-stride-7.ll
+++ b/llvm/test/Analysis/CostModel/X86/interleaved-load-i8-stride-7.ll
@@ -92,7 +92,7 @@ for.body:
   %reduce.add.5 = add i8 %reduce.add.4, %v6
 
   %out = getelementptr inbounds [1024 x i8], ptr @B, i64 0, i64 %iv.0
-  store i8 %reduce.add.5, i8* %out
+  store i8 %reduce.add.5, ptr %out
 
   %iv.next = add nuw nsw i64 %iv.0, 7
   %cmp = icmp ult i64 %iv.next, 1024

diff  --git a/llvm/test/Analysis/CostModel/X86/interleaved-load-i8-stride-8.ll b/llvm/test/Analysis/CostModel/X86/interleaved-load-i8-stride-8.ll
index f0062eea1cfd0..00ad2f68814b8 100644
--- a/llvm/test/Analysis/CostModel/X86/interleaved-load-i8-stride-8.ll
+++ b/llvm/test/Analysis/CostModel/X86/interleaved-load-i8-stride-8.ll
@@ -96,7 +96,7 @@ for.body:
   %reduce.add.6 = add i8 %reduce.add.5, %v7
 
   %out = getelementptr inbounds [1024 x i8], ptr @B, i64 0, i64 %iv.0
-  store i8 %reduce.add.6, i8* %out
+  store i8 %reduce.add.6, ptr %out
 
   %iv.next = add nuw nsw i64 %iv.0, 8
   %cmp = icmp ult i64 %iv.next, 1024

diff  --git a/llvm/test/Analysis/Dominators/2007-01-14-BreakCritEdges.ll b/llvm/test/Analysis/Dominators/2007-01-14-BreakCritEdges.ll
index 8aac32666fbcd..f11882bed2cef 100644
--- a/llvm/test/Analysis/Dominators/2007-01-14-BreakCritEdges.ll
+++ b/llvm/test/Analysis/Dominators/2007-01-14-BreakCritEdges.ll
@@ -1,16 +1,16 @@
 ; RUN: opt < %s -passes='require<domtree>,break-crit-edges' -disable-output
 ; PR1110
 
-	%struct.OggVorbis_File = type { i8*, i32, i64, i64, %struct.ogg_sync_state, i32, i64*, i64*, i32*, i64*, %struct.vorbis_info*, %struct.vorbis_comment*, i64, i32, i32, i32, double, double, %struct.ogg_stream_state, %struct.vorbis_dsp_state, %struct.vorbis_block, %struct.ov_callbacks }
-	%struct.alloc_chain = type { i8*, %struct.alloc_chain* }
-	%struct.ogg_stream_state = type { i8*, i32, i32, i32, i32*, i64*, i32, i32, i32, i32, [282 x i8], i32, i32, i32, i32, i32, i64, i64 }
-	%struct.ogg_sync_state = type { i8*, i32, i32, i32, i32, i32, i32 }
-	%struct.oggpack_buffer = type { i32, i32, i8*, i8*, i32 }
-	%struct.ov_callbacks = type { i32 (i8*, i32, i32, i8*)*, i32 (i8*, i64, i32)*, i32 (i8*)*, i32 (i8*)* }
-	%struct.vorbis_block = type { float**, %struct.oggpack_buffer, i32, i32, i32, i32, i32, i32, i64, i64, %struct.vorbis_dsp_state*, i8*, i32, i32, i32, %struct.alloc_chain*, i32, i32, i32, i32, i8* }
-	%struct.vorbis_comment = type { i8**, i32*, i32, i8* }
-	%struct.vorbis_dsp_state = type { i32, %struct.vorbis_info*, float**, float**, i32, i32, i32, i32, i32, i32, i32, i32, i32, i64, i64, i64, i64, i64, i64, i8* }
-	%struct.vorbis_info = type { i32, i32, i32, i32, i32, i32, i32, i8* }
+	%struct.OggVorbis_File = type { ptr, i32, i64, i64, %struct.ogg_sync_state, i32, ptr, ptr, ptr, ptr, ptr, ptr, i64, i32, i32, i32, double, double, %struct.ogg_stream_state, %struct.vorbis_dsp_state, %struct.vorbis_block, %struct.ov_callbacks }
+	%struct.alloc_chain = type { ptr, ptr }
+	%struct.ogg_stream_state = type { ptr, i32, i32, i32, ptr, ptr, i32, i32, i32, i32, [282 x i8], i32, i32, i32, i32, i32, i64, i64 }
+	%struct.ogg_sync_state = type { ptr, i32, i32, i32, i32, i32, i32 }
+	%struct.oggpack_buffer = type { i32, i32, ptr, ptr, i32 }
+	%struct.ov_callbacks = type { ptr, ptr, ptr, ptr }
+	%struct.vorbis_block = type { ptr, %struct.oggpack_buffer, i32, i32, i32, i32, i32, i32, i64, i64, ptr, ptr, i32, i32, i32, ptr, i32, i32, i32, i32, ptr }
+	%struct.vorbis_comment = type { ptr, ptr, i32, ptr }
+	%struct.vorbis_dsp_state = type { i32, ptr, ptr, ptr, i32, i32, i32, i32, i32, i32, i32, i32, i32, i64, i64, i64, i64, i64, i64, ptr }
+	%struct.vorbis_info = type { i32, i32, i32, i32, i32, i32, i32, ptr }
 
 
 define void @ov_read() {

diff  --git a/llvm/test/Analysis/Dominators/2007-07-12-SplitBlock.ll b/llvm/test/Analysis/Dominators/2007-07-12-SplitBlock.ll
index a59bdf5045147..be0a9bf64c29f 100644
--- a/llvm/test/Analysis/Dominators/2007-07-12-SplitBlock.ll
+++ b/llvm/test/Analysis/Dominators/2007-07-12-SplitBlock.ll
@@ -1,6 +1,6 @@
 ; RUN: opt < %s -passes='loop-mssa(loop-rotate,licm,simple-loop-unswitch)' -disable-output
 
-define i32 @main(i32 %argc, i8** %argv) {
+define i32 @main(i32 %argc, ptr %argv) {
 entry:
 	br label %bb7
 

diff  --git a/llvm/test/Analysis/Dominators/invoke.ll b/llvm/test/Analysis/Dominators/invoke.ll
index 383d402eaf0b9..0d928af00656c 100644
--- a/llvm/test/Analysis/Dominators/invoke.ll
+++ b/llvm/test/Analysis/Dominators/invoke.ll
@@ -1,19 +1,19 @@
 ; RUN: opt -passes=verify -disable-output < %s
 ; This tests that we handle unreachable blocks correctly
 
-define void @f() personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*) {
-  %v1 = invoke i32* @g()
+define void @f() personality ptr @__gxx_personality_v0 {
+  %v1 = invoke ptr @g()
           to label %bb1 unwind label %bb2
   invoke void @__dynamic_cast()
           to label %bb1 unwind label %bb2
 bb1:
-  %Hidden = getelementptr inbounds i32, i32* %v1, i64 1
+  %Hidden = getelementptr inbounds i32, ptr %v1, i64 1
   ret void
 bb2:
-  %lpad.loopexit80 = landingpad { i8*, i32 }
+  %lpad.loopexit80 = landingpad { ptr, i32 }
           cleanup
   ret void
 }
 declare i32 @__gxx_personality_v0(...)
 declare void @__dynamic_cast()
-declare i32* @g()
+declare ptr @g()

diff  --git a/llvm/test/Analysis/FunctionPropertiesAnalysis/matmul.ll b/llvm/test/Analysis/FunctionPropertiesAnalysis/matmul.ll
index 500cbbc7622bf..cabb55fc448f1 100644
--- a/llvm/test/Analysis/FunctionPropertiesAnalysis/matmul.ll
+++ b/llvm/test/Analysis/FunctionPropertiesAnalysis/matmul.ll
@@ -12,11 +12,11 @@ entry:
   %res = alloca [2 x [2 x i32]], align 16
   %i = alloca i32, align 4
   %j = alloca i32, align 4
-  store i32 0, i32* %retval, align 4
-  %arraydecay = getelementptr inbounds [2 x [2 x i32]], [2 x [2 x i32]]* %mat1, i64 0, i64 0
-  %arraydecay1 = getelementptr inbounds [2 x [2 x i32]], [2 x [2 x i32]]* %mat2, i64 0, i64 0
-  %arraydecay2 = getelementptr inbounds [2 x [2 x i32]], [2 x [2 x i32]]* %res, i64 0, i64 0
-  call void @multiply([2 x i32]* %arraydecay, [2 x i32]* %arraydecay1, [2 x i32]* %arraydecay2)
+  store i32 0, ptr %retval, align 4
+  %arraydecay = getelementptr inbounds [2 x [2 x i32]], ptr %mat1, i64 0, i64 0
+  %arraydecay1 = getelementptr inbounds [2 x [2 x i32]], ptr %mat2, i64 0, i64 0
+  %arraydecay2 = getelementptr inbounds [2 x [2 x i32]], ptr %res, i64 0, i64 0
+  call void @multiply(ptr %arraydecay, ptr %arraydecay1, ptr %arraydecay2)
   ret i32 0
 }
 ; CHECK-DAG: BasicBlockCount: 1
@@ -71,105 +71,105 @@ entry:
 ; DETAILED-PROPERTIES-DAG: CallWithManyArgumentsCount: 0
 ; DETAILED-PROPERTIES-DAG: CallWithPointerArgumentCount: 1
 
-define void @multiply([2 x i32]* %mat1, [2 x i32]* %mat2, [2 x i32]* %res) {
+define void @multiply(ptr %mat1, ptr %mat2, ptr %res) {
 ; CHECK-DAG: Printing analysis results of CFA for function 'multiply':
 ; DETAILED-PROPERTIES-DAG: Printing analysis results of CFA for function 'multiply':
 entry:
-  %mat1.addr = alloca [2 x i32]*, align 8
-  %mat2.addr = alloca [2 x i32]*, align 8
-  %res.addr = alloca [2 x i32]*, align 8
+  %mat1.addr = alloca ptr, align 8
+  %mat2.addr = alloca ptr, align 8
+  %res.addr = alloca ptr, align 8
   %i = alloca i32, align 4
   %j = alloca i32, align 4
   %k = alloca i32, align 4
-  store [2 x i32]* %mat1, [2 x i32]** %mat1.addr, align 8
-  store [2 x i32]* %mat2, [2 x i32]** %mat2.addr, align 8
-  store [2 x i32]* %res, [2 x i32]** %res.addr, align 8
-  store i32 0, i32* %i, align 4
+  store ptr %mat1, ptr %mat1.addr, align 8
+  store ptr %mat2, ptr %mat2.addr, align 8
+  store ptr %res, ptr %res.addr, align 8
+  store i32 0, ptr %i, align 4
   br label %for.cond
 
 for.cond:                                         ; preds = %for.inc24, %entry
-  %0 = load i32, i32* %i, align 4
+  %0 = load i32, ptr %i, align 4
   %cmp = icmp slt i32 %0, 2
   br i1 %cmp, label %for.body, label %for.end26
 
 for.body:                                         ; preds = %for.cond
-  store i32 0, i32* %j, align 4
+  store i32 0, ptr %j, align 4
   br label %for.cond1
 
 for.cond1:                                        ; preds = %for.inc21, %for.body
-  %1 = load i32, i32* %j, align 4
+  %1 = load i32, ptr %j, align 4
   %cmp2 = icmp slt i32 %1, 2
   br i1 %cmp2, label %for.body3, label %for.end23
 
 for.body3:                                        ; preds = %for.cond1
-  %2 = load [2 x i32]*, [2 x i32]** %res.addr, align 8
-  %3 = load i32, i32* %i, align 4
+  %2 = load ptr, ptr %res.addr, align 8
+  %3 = load i32, ptr %i, align 4
   %idxprom = sext i32 %3 to i64
-  %arrayidx = getelementptr inbounds [2 x i32], [2 x i32]* %2, i64 %idxprom
-  %4 = load i32, i32* %j, align 4
+  %arrayidx = getelementptr inbounds [2 x i32], ptr %2, i64 %idxprom
+  %4 = load i32, ptr %j, align 4
   %idxprom4 = sext i32 %4 to i64
-  %arrayidx5 = getelementptr inbounds [2 x i32], [2 x i32]* %arrayidx, i64 0, i64 %idxprom4
-  store i32 0, i32* %arrayidx5, align 4
-  store i32 0, i32* %k, align 4
+  %arrayidx5 = getelementptr inbounds [2 x i32], ptr %arrayidx, i64 0, i64 %idxprom4
+  store i32 0, ptr %arrayidx5, align 4
+  store i32 0, ptr %k, align 4
   br label %for.cond6
 
 for.cond6:                                        ; preds = %for.inc, %for.body3
-  %5 = load i32, i32* %k, align 4
+  %5 = load i32, ptr %k, align 4
   %cmp7 = icmp slt i32 %5, 2
   br i1 %cmp7, label %for.body8, label %for.end
 
 for.body8:                                        ; preds = %for.cond6
-  %6 = load [2 x i32]*, [2 x i32]** %mat1.addr, align 8
-  %7 = load i32, i32* %i, align 4
+  %6 = load ptr, ptr %mat1.addr, align 8
+  %7 = load i32, ptr %i, align 4
   %idxprom9 = sext i32 %7 to i64
-  %arrayidx10 = getelementptr inbounds [2 x i32], [2 x i32]* %6, i64 %idxprom9
-  %8 = load i32, i32* %k, align 4
+  %arrayidx10 = getelementptr inbounds [2 x i32], ptr %6, i64 %idxprom9
+  %8 = load i32, ptr %k, align 4
   %idxprom11 = sext i32 %8 to i64
-  %arrayidx12 = getelementptr inbounds [2 x i32], [2 x i32]* %arrayidx10, i64 0, i64 %idxprom11
-  %9 = load i32, i32* %arrayidx12, align 4
-  %10 = load [2 x i32]*, [2 x i32]** %mat2.addr, align 8
-  %11 = load i32, i32* %k, align 4
+  %arrayidx12 = getelementptr inbounds [2 x i32], ptr %arrayidx10, i64 0, i64 %idxprom11
+  %9 = load i32, ptr %arrayidx12, align 4
+  %10 = load ptr, ptr %mat2.addr, align 8
+  %11 = load i32, ptr %k, align 4
   %idxprom13 = sext i32 %11 to i64
-  %arrayidx14 = getelementptr inbounds [2 x i32], [2 x i32]* %10, i64 %idxprom13
-  %12 = load i32, i32* %j, align 4
+  %arrayidx14 = getelementptr inbounds [2 x i32], ptr %10, i64 %idxprom13
+  %12 = load i32, ptr %j, align 4
   %idxprom15 = sext i32 %12 to i64
-  %arrayidx16 = getelementptr inbounds [2 x i32], [2 x i32]* %arrayidx14, i64 0, i64 %idxprom15
-  %13 = load i32, i32* %arrayidx16, align 4
+  %arrayidx16 = getelementptr inbounds [2 x i32], ptr %arrayidx14, i64 0, i64 %idxprom15
+  %13 = load i32, ptr %arrayidx16, align 4
   %mul = mul nsw i32 %9, %13
-  %14 = load [2 x i32]*, [2 x i32]** %res.addr, align 8
-  %15 = load i32, i32* %i, align 4
+  %14 = load ptr, ptr %res.addr, align 8
+  %15 = load i32, ptr %i, align 4
   %idxprom17 = sext i32 %15 to i64
-  %arrayidx18 = getelementptr inbounds [2 x i32], [2 x i32]* %14, i64 %idxprom17
-  %16 = load i32, i32* %j, align 4
+  %arrayidx18 = getelementptr inbounds [2 x i32], ptr %14, i64 %idxprom17
+  %16 = load i32, ptr %j, align 4
   %idxprom19 = sext i32 %16 to i64
-  %arrayidx20 = getelementptr inbounds [2 x i32], [2 x i32]* %arrayidx18, i64 0, i64 %idxprom19
-  %17 = load i32, i32* %arrayidx20, align 4
+  %arrayidx20 = getelementptr inbounds [2 x i32], ptr %arrayidx18, i64 0, i64 %idxprom19
+  %17 = load i32, ptr %arrayidx20, align 4
   %add = add nsw i32 %17, %mul
-  store i32 %add, i32* %arrayidx20, align 4
+  store i32 %add, ptr %arrayidx20, align 4
   br label %for.inc
 
 for.inc:                                          ; preds = %for.body8
-  %18 = load i32, i32* %k, align 4
+  %18 = load i32, ptr %k, align 4
   %inc = add nsw i32 %18, 1
-  store i32 %inc, i32* %k, align 4
+  store i32 %inc, ptr %k, align 4
   br label %for.cond6
 
 for.end:                                          ; preds = %for.cond6
   br label %for.inc21
 
 for.inc21:                                        ; preds = %for.end
-  %19 = load i32, i32* %j, align 4
+  %19 = load i32, ptr %j, align 4
   %inc22 = add nsw i32 %19, 1
-  store i32 %inc22, i32* %j, align 4
+  store i32 %inc22, ptr %j, align 4
   br label %for.cond1
 
 for.end23:                                        ; preds = %for.cond1
   br label %for.inc24
 
 for.inc24:                                        ; preds = %for.end23
-  %20 = load i32, i32* %i, align 4
+  %20 = load i32, ptr %i, align 4
   %inc25 = add nsw i32 %20, 1
-  store i32 %inc25, i32* %i, align 4
+  store i32 %inc25, ptr %i, align 4
   br label %for.cond
 
 for.end26:                                        ; preds = %for.cond

diff  --git a/llvm/test/Analysis/IVUsers/deep_recursion_in_scev.ll b/llvm/test/Analysis/IVUsers/deep_recursion_in_scev.ll
index 882bfaf22bbcb..1402f7f3a6bf8 100644
--- a/llvm/test/Analysis/IVUsers/deep_recursion_in_scev.ll
+++ b/llvm/test/Analysis/IVUsers/deep_recursion_in_scev.ll
@@ -4,30 +4,30 @@
 target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128-ni:1"
 target triple = "x86_64-unknown-linux-gnu"
 
-define void @quux(i8 addrspace(1)* %arg, i8 addrspace(1)* %arg1) {
+define void @quux(ptr addrspace(1) %arg, ptr addrspace(1) %arg1) {
 bb:
-  %tmp2 = getelementptr inbounds i8, i8 addrspace(1)* %arg, i64 80
-  %tmp3 = bitcast i8 addrspace(1)* %tmp2 to i8 addrspace(1)* addrspace(1)*
-  %tmp4 = load i8 addrspace(1)*, i8 addrspace(1)* addrspace(1)* %tmp3, align 8
-  %tmp5 = getelementptr inbounds i8, i8 addrspace(1)* %tmp4, i64 8
-  %tmp6 = bitcast i8 addrspace(1)* %tmp5 to i8 addrspace(1)* addrspace(1)*
-  %tmp7 = load i8 addrspace(1)*, i8 addrspace(1)* addrspace(1)* %tmp6, align 8
-  %tmp8 = getelementptr inbounds i8, i8 addrspace(1)* %tmp7, i64 8
-  %tmp9 = bitcast i8 addrspace(1)* %tmp8 to i32 addrspace(1)*
-  %tmp10 = load i32, i32 addrspace(1)* %tmp9, align 8
+  %tmp2 = getelementptr inbounds i8, ptr addrspace(1) %arg, i64 80
+  %tmp3 = bitcast ptr addrspace(1) %tmp2 to ptr addrspace(1)
+  %tmp4 = load ptr addrspace(1), ptr addrspace(1) %tmp3, align 8
+  %tmp5 = getelementptr inbounds i8, ptr addrspace(1) %tmp4, i64 8
+  %tmp6 = bitcast ptr addrspace(1) %tmp5 to ptr addrspace(1)
+  %tmp7 = load ptr addrspace(1), ptr addrspace(1) %tmp6, align 8
+  %tmp8 = getelementptr inbounds i8, ptr addrspace(1) %tmp7, i64 8
+  %tmp9 = bitcast ptr addrspace(1) %tmp8 to ptr addrspace(1)
+  %tmp10 = load i32, ptr addrspace(1) %tmp9, align 8
   %tmp11 = udiv i32 65, %tmp10
-  %tmp12 = getelementptr inbounds i8, i8 addrspace(1)* %arg, i64 80
-  %tmp13 = bitcast i8 addrspace(1)* %tmp12 to i8 addrspace(1)* addrspace(1)*
-  %tmp14 = load i8 addrspace(1)*, i8 addrspace(1)* addrspace(1)* %tmp13, align 8
-  %tmp15 = getelementptr inbounds i8, i8 addrspace(1)* %tmp14, i64 8
-  %tmp16 = bitcast i8 addrspace(1)* %tmp15 to i8 addrspace(1)* addrspace(1)*
-  %tmp17 = load i8 addrspace(1)*, i8 addrspace(1)* addrspace(1)* %tmp16, align 8
-  %tmp18 = getelementptr inbounds i8, i8 addrspace(1)* %arg1, i64 8
-  %tmp19 = bitcast i8 addrspace(1)* %tmp18 to i32 addrspace(1)*
-  %tmp20 = load i32, i32 addrspace(1)* %tmp19, align 8, !range !0
-  %tmp21 = getelementptr inbounds i8, i8 addrspace(1)* %tmp17, i64 8
-  %tmp22 = bitcast i8 addrspace(1)* %tmp21 to i32 addrspace(1)*
-  %tmp23 = load i32, i32 addrspace(1)* %tmp22, align 8, !range !0
+  %tmp12 = getelementptr inbounds i8, ptr addrspace(1) %arg, i64 80
+  %tmp13 = bitcast ptr addrspace(1) %tmp12 to ptr addrspace(1)
+  %tmp14 = load ptr addrspace(1), ptr addrspace(1) %tmp13, align 8
+  %tmp15 = getelementptr inbounds i8, ptr addrspace(1) %tmp14, i64 8
+  %tmp16 = bitcast ptr addrspace(1) %tmp15 to ptr addrspace(1)
+  %tmp17 = load ptr addrspace(1), ptr addrspace(1) %tmp16, align 8
+  %tmp18 = getelementptr inbounds i8, ptr addrspace(1) %arg1, i64 8
+  %tmp19 = bitcast ptr addrspace(1) %tmp18 to ptr addrspace(1)
+  %tmp20 = load i32, ptr addrspace(1) %tmp19, align 8, !range !0
+  %tmp21 = getelementptr inbounds i8, ptr addrspace(1) %tmp17, i64 8
+  %tmp22 = bitcast ptr addrspace(1) %tmp21 to ptr addrspace(1)
+  %tmp23 = load i32, ptr addrspace(1) %tmp22, align 8, !range !0
   %tmp24 = zext i32 %tmp23 to i64
   %tmp25 = and i32 %tmp11, 7
   %tmp26 = icmp ugt i32 %tmp10, 9

diff  --git a/llvm/test/Analysis/LazyCallGraph/non-leaf-intrinsics.ll b/llvm/test/Analysis/LazyCallGraph/non-leaf-intrinsics.ll
index d754517a51f6c..f794807edb611 100644
--- a/llvm/test/Analysis/LazyCallGraph/non-leaf-intrinsics.ll
+++ b/llvm/test/Analysis/LazyCallGraph/non-leaf-intrinsics.ll
@@ -1,7 +1,7 @@
 ; RUN: opt -S -disable-output -passes=print-lcg < %s 2>&1 | FileCheck %s
 
 declare void @llvm.experimental.patchpoint.void(i64, i32, ptr, i32, ...)
-declare token @llvm.experimental.gc.statepoint.p0f_isVoidf(i64, i32, ptr, i32, i32, ...)
+declare token @llvm.experimental.gc.statepoint.p0(i64, i32, ptr, i32, i32, ...)
 
 define private void @f() {
   ret void
@@ -11,7 +11,7 @@ define void @calls_statepoint(ptr addrspace(1) %arg) gc "statepoint-example" {
 ; CHECK: Edges in function: calls_statepoint
 ; CHECK-NEXT:  -> f
 entry:
-  %safepoint_token = call token (i64, i32, ptr, i32, i32, ...) @llvm.experimental.gc.statepoint.p0f_isVoidf(i64 0, i32 0, ptr elementtype(void ()) @f, i32 0, i32 0, i32 0, i32 0) ["gc-live" (ptr addrspace(1) %arg, ptr addrspace(1) %arg, ptr addrspace(1) %arg, ptr addrspace(1) %arg), "deopt" (i32 0, i32 0, i32 0, i32 10, i32 0)]
+  %safepoint_token = call token (i64, i32, ptr, i32, i32, ...) @llvm.experimental.gc.statepoint.p0(i64 0, i32 0, ptr elementtype(void ()) @f, i32 0, i32 0, i32 0, i32 0) ["gc-live" (ptr addrspace(1) %arg, ptr addrspace(1) %arg, ptr addrspace(1) %arg, ptr addrspace(1) %arg), "deopt" (i32 0, i32 0, i32 0, i32 10, i32 0)]
   ret void
 }
 

diff  --git a/llvm/test/Analysis/LazyValueAnalysis/invalidation.ll b/llvm/test/Analysis/LazyValueAnalysis/invalidation.ll
index 1e2d8ad72a030..71ea5d2ec1dae 100644
--- a/llvm/test/Analysis/LazyValueAnalysis/invalidation.ll
+++ b/llvm/test/Analysis/LazyValueAnalysis/invalidation.ll
@@ -17,19 +17,19 @@ target triple = "x86_64-unknown-linux-gnu"
 
 @.str = private unnamed_addr constant [8 x i8] c"a = %l\0A\00", align 1
 
-declare void @llvm.lifetime.start.p0i8(i64, i8* nocapture)
+declare void @llvm.lifetime.start.p0(i64, ptr nocapture)
 
-declare void @hoo(i64*)
+declare void @hoo(ptr)
 
-declare i32 @printf(i8* nocapture readonly, ...)
+declare i32 @printf(ptr nocapture readonly, ...)
 
-declare void @llvm.lifetime.end.p0i8(i64, i8* nocapture)
+declare void @llvm.lifetime.end.p0(i64, ptr nocapture)
 
-define void @goo(i32 %N, i64* %b) {
+define void @goo(i32 %N, ptr %b) {
 entry:
   %a.i = alloca i64, align 8
-  %tmp = bitcast i64* %a.i to i8*
-  %c = getelementptr inbounds i64, i64* %b, i64 0
+  %tmp = bitcast ptr %a.i to ptr
+  %c = getelementptr inbounds i64, ptr %b, i64 0
   br label %for.cond
 
 for.cond:                                         ; preds = %for.body, %entry
@@ -38,12 +38,12 @@ for.cond:                                         ; preds = %for.body, %entry
   br i1 %cmp, label %for.body, label %for.end
 
 for.body:                                         ; preds = %for.cond
-  call void @llvm.lifetime.start.p0i8(i64 8, i8* %tmp)
-  call void @hoo(i64* %a.i)
-  call void @hoo(i64* %c)
-  %tmp1 = load volatile i64, i64* %a.i, align 8
-  %call.i = call i32 (i8*, ...) @printf(i8* getelementptr inbounds ([8 x i8], [8 x i8]* @.str, i64 0, i64 0), i64 %tmp1)
-  call void @llvm.lifetime.end.p0i8(i64 8, i8* %tmp)
+  call void @llvm.lifetime.start.p0(i64 8, ptr %tmp)
+  call void @hoo(ptr %a.i)
+  call void @hoo(ptr %c)
+  %tmp1 = load volatile i64, ptr %a.i, align 8
+  %call.i = call i32 (ptr, ...) @printf(ptr @.str, i64 %tmp1)
+  call void @llvm.lifetime.end.p0(i64 8, ptr %tmp)
   %inc = add nsw i32 %i.0, 1
   br label %for.cond
 

diff  --git a/llvm/test/Analysis/LoopAccessAnalysis/forked-pointers.ll b/llvm/test/Analysis/LoopAccessAnalysis/forked-pointers.ll
index 848302bc24025..cd388b4ee87f2 100644
--- a/llvm/test/Analysis/LoopAccessAnalysis/forked-pointers.ll
+++ b/llvm/test/Analysis/LoopAccessAnalysis/forked-pointers.ll
@@ -934,7 +934,7 @@ for.body:                                         ; preds = %entry, %for.body
 ;;;; Derived from forked_ptrs_same_base_
diff erent_offset with a manually
 ;;;; added uniform offset and a mul to provide a stride
 
-define dso_local void @forked_ptrs_uniform_and_strided_forks(float* nocapture readonly %Base, float* nocapture %Dest, i32* nocapture readonly %Preds) {
+define dso_local void @forked_ptrs_uniform_and_strided_forks(ptr nocapture readonly %Base, ptr nocapture %Dest, ptr nocapture readonly %Preds) {
 ; CHECK-LABEL: 'forked_ptrs_uniform_and_strided_forks'
 ; CHECK-NEXT:    for.body:
 ; CHECK-NEXT:      Report: cannot identify array bounds
@@ -1073,7 +1073,7 @@ for.cond.cleanup:
 
 for.body:
   %indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
-  %arrayidx = getelementptr inbounds i32, i32* %Preds, i64 %indvars.iv
+  %arrayidx = getelementptr inbounds i32, ptr %Preds, i64 %indvars.iv
   %0 = load i32, ptr %arrayidx, align 4
   %cmp1.not = icmp eq i32 %0, 0
   %spec.select = select i1 %cmp1.not, ptr %Base2, ptr %Base1

diff  --git a/llvm/test/Analysis/LoopAccessAnalysis/underlying-objects-2.ll b/llvm/test/Analysis/LoopAccessAnalysis/underlying-objects-2.ll
index f72a2a9c8414a..abfdff79dc113 100644
--- a/llvm/test/Analysis/LoopAccessAnalysis/underlying-objects-2.ll
+++ b/llvm/test/Analysis/LoopAccessAnalysis/underlying-objects-2.ll
@@ -119,14 +119,14 @@ for_j.body:
   %gepCurr = getelementptr inbounds i8, ptr %curr, i64 %j
   %gepB = getelementptr inbounds i8, ptr %B, i64 %j
   %gepB1 = getelementptr inbounds i8, ptr %gepB, i64 %j
-  %gepB2 = getelementptr inbounds i8, i8* %gepB1, i64 0
-  %gepB3 = getelementptr inbounds i8, i8* %gepB2, i64 0
-  %gepB4 = getelementptr inbounds i8, i8* %gepB3, i64 0
-  %gepB5 = getelementptr inbounds i8, i8* %gepB4, i64 0
-  %gepB6 = getelementptr inbounds i8, i8* %gepB5, i64 0
-  %gepB7 = getelementptr inbounds i8, i8* %gepB6, i64 0
-  %gepB8 = getelementptr inbounds i8, i8* %gepB7, i64 0
-  %gepB9 = getelementptr inbounds i8, i8* %gepB8, i64 0
+  %gepB2 = getelementptr inbounds i8, ptr %gepB1, i64 0
+  %gepB3 = getelementptr inbounds i8, ptr %gepB2, i64 0
+  %gepB4 = getelementptr inbounds i8, ptr %gepB3, i64 0
+  %gepB5 = getelementptr inbounds i8, ptr %gepB4, i64 0
+  %gepB6 = getelementptr inbounds i8, ptr %gepB5, i64 0
+  %gepB7 = getelementptr inbounds i8, ptr %gepB6, i64 0
+  %gepB8 = getelementptr inbounds i8, ptr %gepB7, i64 0
+  %gepB9 = getelementptr inbounds i8, ptr %gepB8, i64 0
 
   %loadPrev = load i8, ptr %gepPrev, align 1
   %loadB = load i8, ptr %gepB9, align 1

diff  --git a/llvm/test/Analysis/LoopCacheAnalysis/PowerPC/LoopnestFixedSize.ll b/llvm/test/Analysis/LoopCacheAnalysis/PowerPC/LoopnestFixedSize.ll
index 47ac25f68903d..e15f06843500e 100644
--- a/llvm/test/Analysis/LoopCacheAnalysis/PowerPC/LoopnestFixedSize.ll
+++ b/llvm/test/Analysis/LoopCacheAnalysis/PowerPC/LoopnestFixedSize.ll
@@ -17,7 +17,7 @@ target triple = "powerpc64le-unknown-linux-gnu"
 ;;       a[i][j] = a[i+1][j-2];
 ;; }
 
-define void @t1([2048 x i32]* %a) {
+define void @t1(ptr %a) {
 entry:
   br label %for.body
 
@@ -29,11 +29,11 @@ for.body4:                                        ; preds = %for.body, %for.body
   %indvars.iv = phi i64 [ 2, %for.body ], [ %indvars.iv.next, %for.body4 ]
   %0 = add nuw nsw i64 %indvars.iv4, 1
   %1 = add nsw i64 %indvars.iv, -2
-  %arrayidx6 = getelementptr inbounds [2048 x i32], [2048 x i32]* %a, i64 %0, i64 %1
-  %2 = load i32, i32* %arrayidx6, align 4
-  %a_gep = getelementptr inbounds [2048 x i32], [2048 x i32]* %a, i64 0
-  %arrayidx10 = getelementptr inbounds [2048 x i32], [2048 x i32]* %a_gep, i64 %indvars.iv4, i64 %indvars.iv
-  store i32 %2, i32* %arrayidx10, align 4
+  %arrayidx6 = getelementptr inbounds [2048 x i32], ptr %a, i64 %0, i64 %1
+  %2 = load i32, ptr %arrayidx6, align 4
+  %a_gep = getelementptr inbounds [2048 x i32], ptr %a, i64 0
+  %arrayidx10 = getelementptr inbounds [2048 x i32], ptr %a_gep, i64 %indvars.iv4, i64 %indvars.iv
+  store i32 %2, ptr %arrayidx10, align 4
   %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
   %exitcond = icmp ne i64 %indvars.iv.next, 2048
   br i1 %exitcond, label %for.body4, label %for.inc11
@@ -51,7 +51,7 @@ for.end13:                                        ; preds = %for.inc11
 ; CHECK: Loop 'for.body' has cost = 4186116
 ; CHECK-NEXT: Loop 'for.body4' has cost = 128898
 
-define void @t2([2048 x i32]* %a) {
+define void @t2(ptr %a) {
 entry:
   br label %for.body
 
@@ -63,11 +63,11 @@ for.body4:                                        ; preds = %for.body, %for.body
   %indvars.iv = phi i64 [ 2, %for.body ], [ %indvars.iv.next, %for.body4 ]
   %0 = add nuw nsw i64 %indvars.iv4, 1
   %1 = add nsw i64 %indvars.iv, -2
-  %arrayidx6 = getelementptr inbounds [2048 x i32], [2048 x i32]* %a, i64 %0, i64 %1
-  %2 = load i32, i32* %arrayidx6, align 4
-  %call = call [2048 x i32]* @func_with_returned_arg([2048 x i32]* returned %a)
-  %arrayidx10 = getelementptr inbounds [2048 x i32], [2048 x i32]* %call, i64 %indvars.iv4, i64 %indvars.iv
-  store i32 %2, i32* %arrayidx10, align 4
+  %arrayidx6 = getelementptr inbounds [2048 x i32], ptr %a, i64 %0, i64 %1
+  %2 = load i32, ptr %arrayidx6, align 4
+  %call = call ptr @func_with_returned_arg(ptr returned %a)
+  %arrayidx10 = getelementptr inbounds [2048 x i32], ptr %call, i64 %indvars.iv4, i64 %indvars.iv
+  store i32 %2, ptr %arrayidx10, align 4
   %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
   %exitcond = icmp ne i64 %indvars.iv.next, 2048
   br i1 %exitcond, label %for.body4, label %for.inc11
@@ -81,7 +81,7 @@ for.end13:                                        ; preds = %for.inc11
   ret void
 }
 
-declare [2048 x i32]* @func_with_returned_arg([2048 x i32]* returned %arg)
+declare ptr @func_with_returned_arg(ptr returned %arg)
 
 ; CHECK: Loop 'for.body' has cost = 2112128815104000000
 ; CHECK-NEXT: Loop 'for.body4' has cost = 16762927104000000
@@ -100,7 +100,7 @@ declare [2048 x i32]* @func_with_returned_arg([2048 x i32]* returned %arg)
 ;;             a[i1][i2][i3][i4][i5] = a[i1+1][i2-2][i3][i4-3][i5+2];
 ;; }
 
-define void @t3([128 x [128 x [128 x [2048 x i32]]]]* %a) {
+define void @t3(ptr %a) {
 entry:
   br label %for.body
 
@@ -126,10 +126,10 @@ for.body16:                                       ; preds = %for.body12, %for.bo
   %1 = add nsw i64 %indvars.iv14, -2
   %2 = add nsw i64 %indvars.iv7, -3
   %3 = add nuw nsw i64 %indvars.iv, 2
-  %arrayidx26 = getelementptr inbounds [128 x [128 x [128 x [2048 x i32]]]], [128 x [128 x [128 x [2048 x i32]]]]* %a, i64 %0, i64 %1, i64 %indvars.iv11, i64 %2, i64 %3
-  %4 = load i32, i32* %arrayidx26, align 4
-  %arrayidx36 = getelementptr inbounds [128 x [128 x [128 x [2048 x i32]]]], [128 x [128 x [128 x [2048 x i32]]]]* %a, i64 %indvars.iv18, i64 %indvars.iv14, i64 %indvars.iv11, i64 %indvars.iv7, i64 %indvars.iv
-  store i32 %4, i32* %arrayidx36, align 4
+  %arrayidx26 = getelementptr inbounds [128 x [128 x [128 x [2048 x i32]]]], ptr %a, i64 %0, i64 %1, i64 %indvars.iv11, i64 %2, i64 %3
+  %4 = load i32, ptr %arrayidx26, align 4
+  %arrayidx36 = getelementptr inbounds [128 x [128 x [128 x [2048 x i32]]]], ptr %a, i64 %indvars.iv18, i64 %indvars.iv14, i64 %indvars.iv11, i64 %indvars.iv7, i64 %indvars.iv
+  store i32 %4, ptr %arrayidx36, align 4
   %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
   %exitcond = icmp ne i64 %indvars.iv.next, 2046
   br i1 %exitcond, label %for.body16, label %for.inc37

diff  --git a/llvm/test/Analysis/LoopCacheAnalysis/PowerPC/compute-cost-m32.ll b/llvm/test/Analysis/LoopCacheAnalysis/PowerPC/compute-cost-m32.ll
index cf24705a17fb1..2f25f12a13b5b 100644
--- a/llvm/test/Analysis/LoopCacheAnalysis/PowerPC/compute-cost-m32.ll
+++ b/llvm/test/Analysis/LoopCacheAnalysis/PowerPC/compute-cost-m32.ll
@@ -10,15 +10,15 @@ target triple = "powerpc-ibm-aix7.2.0.0"
 
 %_elem_type_of_v = type <{ i32 }>
 
-define signext i32 @foo(%_elem_type_of_v* %v) {
+define signext i32 @foo(ptr %v) {
 _entry:
   br label %_loop_1_do_
 
 _loop_1_do_:                                      ; preds = %_entry, %_loop_1_do_
   %i.011 = phi i64 [ 1, %_entry ], [ %_loop_1_update_loop_ix, %_loop_1_do_ ]
   %_conv = trunc i64 %i.011 to i32
-  %_ind_cast = getelementptr %_elem_type_of_v, %_elem_type_of_v* %v, i32 %_conv, i32 0
-  store i32 %_conv, i32* %_ind_cast, align 4
+  %_ind_cast = getelementptr %_elem_type_of_v, ptr %v, i32 %_conv, i32 0
+  store i32 %_conv, ptr %_ind_cast, align 4
   %_loop_1_update_loop_ix = add nuw nsw i64 %i.011, 1
   %_leq_tmp = icmp ult i64 %_loop_1_update_loop_ix, 33
   br i1 %_leq_tmp, label %_loop_1_do_, label %_loop_1_endl_

diff  --git a/llvm/test/Analysis/LoopCacheAnalysis/PowerPC/compute-cost.ll b/llvm/test/Analysis/LoopCacheAnalysis/PowerPC/compute-cost.ll
index c6510970e6532..87f522c982544 100644
--- a/llvm/test/Analysis/LoopCacheAnalysis/PowerPC/compute-cost.ll
+++ b/llvm/test/Analysis/LoopCacheAnalysis/PowerPC/compute-cost.ll
@@ -8,9 +8,9 @@ target triple = "powerpc64le-unknown-linux-gnu"
 
 ; CHECK: Loop 'for.cond' has cost = 64
 
-%struct._Handleitem = type { %struct._Handleitem* }
+%struct._Handleitem = type { ptr }
 
-define void @handle_to_ptr(%struct._Handleitem** %blocks) {
+define void @handle_to_ptr(ptr %blocks) {
 ; Preheader:
 entry:
   br label %for.cond
@@ -23,8 +23,8 @@ for.cond:                                         ; preds = %for.body, %entry
 
 for.body:                                         ; preds = %for.cond
   %idxprom = zext i32 %i.0 to i64
-  %arrayidx = getelementptr inbounds %struct._Handleitem*, %struct._Handleitem** %blocks, i64 %idxprom
-  store %struct._Handleitem* null, %struct._Handleitem** %arrayidx, align 8
+  %arrayidx = getelementptr inbounds ptr, ptr %blocks, i64 %idxprom
+  store ptr null, ptr %arrayidx, align 8
   %inc = add nuw nsw i32 %i.0, 1
   br label %for.cond
 
@@ -86,7 +86,7 @@ for.end19:
 
 ; CHECK: Loop 'for.neg.cond' has cost = 64
 
-define void @handle_to_ptr_neg_stride(%struct._Handleitem** %blocks) {
+define void @handle_to_ptr_neg_stride(ptr %blocks) {
 ; Preheader:
 entry:
   br label %for.neg.cond
@@ -99,8 +99,8 @@ for.neg.cond:                                         ; preds = %for.neg.body, %
 
 for.neg.body:                                         ; preds = %for.neg.cond
   %idxprom = zext i32 %i.0 to i64
-  %arrayidx = getelementptr inbounds %struct._Handleitem*, %struct._Handleitem** %blocks, i64 %idxprom
-  store %struct._Handleitem* null, %struct._Handleitem** %arrayidx, align 8
+  %arrayidx = getelementptr inbounds ptr, ptr %blocks, i64 %idxprom
+  store ptr null, ptr %arrayidx, align 8
   %dec = add nsw i32 %i.0, -1
   br label %for.neg.cond
 
@@ -119,7 +119,7 @@ for.neg.end:                                          ; preds = %for.neg.cond
 ; approximately 2x higher.
 
 ; CHECK: Loop 'for.cond2' has cost = 2560
-define void @Test2(double* %B) {
+define void @Test2(ptr %B) {
 entry:
   br label %for.cond2
 
@@ -131,11 +131,11 @@ for.cond2:                                         ; preds = %for.body, %entry
 for.body:                                         ; preds = %for.cond
   %sub = sub nsw i32 40960, %i.0
   %idxprom = sext i32 %sub to i64
-  %arrayidx = getelementptr inbounds double, double* %B, i64 %idxprom
-  %0 = load double, double* %arrayidx, align 8
+  %arrayidx = getelementptr inbounds double, ptr %B, i64 %idxprom
+  %0 = load double, ptr %arrayidx, align 8
   %idxprom1 = sext i32 %i.0 to i64
-  %arrayidx2 = getelementptr inbounds double, double* %B, i64 %idxprom1
-  store double %0, double* %arrayidx2, align 8
+  %arrayidx2 = getelementptr inbounds double, ptr %B, i64 %idxprom1
+  store double %0, ptr %arrayidx2, align 8
   %dec = add nsw i32 %i.0, -1
   br label %for.cond2
 
@@ -149,7 +149,7 @@ for.end:                                          ; preds = %for.cond
 ;     C[i] = C[i];
 
 ; CHECK: Loop 'for.cond3' has cost = 2560
-define void @Test3(double** %C) {
+define void @Test3(ptr %C) {
 entry:
   br label %for.cond3
 
@@ -160,11 +160,11 @@ for.cond3:                                         ; preds = %for.body, %entry
 
 for.body:                                         ; preds = %for.cond
   %idxprom = sext i32 %i.0 to i64
-  %arrayidx = getelementptr inbounds double*, double** %C, i64 %idxprom
-  %0 = load double*, double** %arrayidx, align 8
+  %arrayidx = getelementptr inbounds ptr, ptr %C, i64 %idxprom
+  %0 = load ptr, ptr %arrayidx, align 8
   %idxprom1 = sext i32 %i.0 to i64
-  %arrayidx2 = getelementptr inbounds double*, double** %C, i64 %idxprom1
-  store double* %0, double** %arrayidx2, align 8
+  %arrayidx2 = getelementptr inbounds ptr, ptr %C, i64 %idxprom1
+  store ptr %0, ptr %arrayidx2, align 8
   %dec = add nsw i32 %i.0, -1
   br label %for.cond3
 
@@ -178,7 +178,7 @@ for.end:                                          ; preds = %for.cond
 ;     D[i] = D[i];
 
 ; CHECK: Loop 'for.cond4' has cost = 2560
-define void @Test4(double** %D) {
+define void @Test4(ptr %D) {
 entry:
   br label %for.cond4
 
@@ -189,11 +189,11 @@ for.cond4:                                         ; preds = %for.body, %entry
 
 for.body:                                         ; preds = %for.cond
   %idxprom = sext i32 %i.0 to i64
-  %arrayidx = getelementptr inbounds double*, double** %D, i64 %idxprom
-  %0 = load double*, double** %arrayidx, align 8
+  %arrayidx = getelementptr inbounds ptr, ptr %D, i64 %idxprom
+  %0 = load ptr, ptr %arrayidx, align 8
   %idxprom1 = sext i32 %i.0 to i64
-  %arrayidx2 = getelementptr inbounds double*, double** %D, i64 %idxprom1
-  store double* %0, double** %arrayidx2, align 8
+  %arrayidx2 = getelementptr inbounds ptr, ptr %D, i64 %idxprom1
+  store ptr %0, ptr %arrayidx2, align 8
   %inc = add nsw i32 %i.0, 1
   br label %for.cond4
 

diff  --git a/llvm/test/Analysis/LoopCacheAnalysis/PowerPC/loads-store.ll b/llvm/test/Analysis/LoopCacheAnalysis/PowerPC/loads-store.ll
index 3eb21c25f4bfa..39fe382a41196 100644
--- a/llvm/test/Analysis/LoopCacheAnalysis/PowerPC/loads-store.ll
+++ b/llvm/test/Analysis/LoopCacheAnalysis/PowerPC/loads-store.ll
@@ -14,7 +14,7 @@ target triple = "powerpc64le-unknown-linux-gnu"
 ; CHECK-NEXT: Loop 'for.k' has cost = 2030000
 ; CHECK-NEXT: Loop 'for.j' has cost = 1060000
 
-define void @foo(i64 %n, i64 %m, i64 %o, i32* %A, i32* %B, i32* %C) {
+define void @foo(i64 %n, i64 %m, i64 %o, ptr %A, ptr %B, ptr %C) {
 entry:
   %cmp32 = icmp sgt i64 %n, 0
   %cmp230 = icmp sgt i64 %m, 0
@@ -48,23 +48,23 @@ for.k:                                            ; preds = %for.k, %for.j
   %addk = add i64 %muli, %k
   %mulk = mul i64 %addk, %o
   %arrayidx1 = add i64 %j, %mulk
-  %arrayidx2 = getelementptr inbounds i32, i32* %B, i64 %arrayidx1
-  %elem_B = load i32, i32* %arrayidx2, align 4
+  %arrayidx2 = getelementptr inbounds i32, ptr %B, i64 %arrayidx1
+  %elem_B = load i32, ptr %arrayidx2, align 4
 
   ; C[i][j][k]
   %arrayidx3 = add i64 %k, %mulj
-  %arrayidx4 = getelementptr inbounds i32, i32* %C, i64 %arrayidx3
-  %elem_C = load i32, i32* %arrayidx4, align 4
+  %arrayidx4 = getelementptr inbounds i32, ptr %C, i64 %arrayidx3
+  %elem_C = load i32, ptr %arrayidx4, align 4
 
   ; A[i][k][j]
-  %arrayidx5 = getelementptr inbounds i32, i32* %A, i64 %arrayidx1
-  %elem_A = load i32, i32* %arrayidx5, align 4
+  %arrayidx5 = getelementptr inbounds i32, ptr %A, i64 %arrayidx1
+  %elem_A = load i32, ptr %arrayidx5, align 4
 
   ; A[i][k][j] += B[i][k][j] + C[i][j][k]
   %add1 = add i32 %elem_B, %elem_C
   %add2 = add i32 %add1, %elem_A
-  %arrayidx6 = getelementptr inbounds i32, i32* %A, i64 %arrayidx1
-  store i32 %add2, i32* %arrayidx6, align 4
+  %arrayidx6 = getelementptr inbounds i32, ptr %A, i64 %arrayidx1
+  store i32 %add2, ptr %arrayidx6, align 4
 
   %inck = add nsw i64 %k, 1
   %exitcond.us = icmp eq i64 %inck, %o

diff  --git a/llvm/test/Analysis/LoopCacheAnalysis/PowerPC/matmul.ll b/llvm/test/Analysis/LoopCacheAnalysis/PowerPC/matmul.ll
index 08ceb569eac4a..9538c3c93538a 100644
--- a/llvm/test/Analysis/LoopCacheAnalysis/PowerPC/matmul.ll
+++ b/llvm/test/Analysis/LoopCacheAnalysis/PowerPC/matmul.ll
@@ -14,7 +14,7 @@ target triple = "powerpc64le-unknown-linux-gnu"
 ; CHECK-NEXT:Loop 'for.k' has cost = 1040000
 ; CHECK-NEXT:Loop 'for.j' has cost = 70000
     
-define void @matmul(i64 %n, i64 %m, i64 %o, i32* %A, i32* %B, i32* %C) {
+define void @matmul(i64 %n, i64 %m, i64 %o, ptr %A, ptr %B, ptr %C) {
 entry:
   br label %for.i
 
@@ -34,24 +34,24 @@ for.k:                                        ; preds = %for.j, %for.inc.k
 
   ; A[i][k]
   %arrayidx3 = add i64 %k, %muli
-  %arrayidx4 = getelementptr inbounds i32, i32* %A, i64 %arrayidx3
-  %elem_A = load i32, i32* %arrayidx4, align 4
+  %arrayidx4 = getelementptr inbounds i32, ptr %A, i64 %arrayidx3
+  %elem_A = load i32, ptr %arrayidx4, align 4
 
   ; B[k][j]
   %mulk = mul i64 %k, %o
   %arrayidx5 = add i64 %j, %mulk
-  %arrayidx6 = getelementptr inbounds i32, i32* %B, i64 %arrayidx5
-  %elem_B = load i32, i32* %arrayidx6, align 4
+  %arrayidx6 = getelementptr inbounds i32, ptr %B, i64 %arrayidx5
+  %elem_B = load i32, ptr %arrayidx6, align 4
 
   ; C[i][k]
   %arrayidx7 = add i64 %j, %muli
-  %arrayidx8 = getelementptr inbounds i32, i32* %C, i64 %arrayidx7
-  %elem_C = load i32, i32* %arrayidx8, align 4
+  %arrayidx8 = getelementptr inbounds i32, ptr %C, i64 %arrayidx7
+  %elem_C = load i32, ptr %arrayidx8, align 4
 
   ; C[i][j] = C[i][j] + A[i][k] * B[k][j];
   %mul = mul nsw i32 %elem_A, %elem_B
   %add = add nsw i32 %elem_C, %mul
-  store i32 %add, i32* %arrayidx8, align 4
+  store i32 %add, ptr %arrayidx8, align 4
 
   br label %for.inc.k
 

diff  --git a/llvm/test/Analysis/LoopCacheAnalysis/PowerPC/matvecmul.ll b/llvm/test/Analysis/LoopCacheAnalysis/PowerPC/matvecmul.ll
index a39dff4460c46..7bbbe43f5a2fc 100644
--- a/llvm/test/Analysis/LoopCacheAnalysis/PowerPC/matvecmul.ll
+++ b/llvm/test/Analysis/LoopCacheAnalysis/PowerPC/matvecmul.ll
@@ -23,23 +23,23 @@ target triple = "powerpc64le-unknown-linux-gnu"
 %_elem_type_of_double = type <{ double }>
 
 ; Function Attrs: norecurse nounwind
-define void @mat_vec_mpy([0 x %_elem_type_of_double]* noalias %y, [0 x %_elem_type_of_double]* noalias readonly %x,
-    [0 x %_elem_type_of_double]* noalias readonly %b, i32* noalias readonly %nb, i32* noalias readonly %nx, 
-    i32* noalias readonly %ny, i32* noalias readonly %nz) {
+define void @mat_vec_mpy(ptr noalias %y, ptr noalias readonly %x,
+    ptr noalias readonly %b, ptr noalias readonly %nb, ptr noalias readonly %nx, 
+    ptr noalias readonly %ny, ptr noalias readonly %nz) {
 mat_times_vec_entry:
-  %_ind_val = load i32, i32* %nb, align 4
+  %_ind_val = load i32, ptr %nb, align 4
   %_conv = sext i32 %_ind_val to i64
   %_grt_tmp.i = icmp sgt i64 %_conv, 0
   %a_b.i = select i1 %_grt_tmp.i, i64 %_conv, i64 0
-  %_ind_val1 = load i32, i32* %nx, align 4
+  %_ind_val1 = load i32, ptr %nx, align 4
   %_conv2 = sext i32 %_ind_val1 to i64
   %_grt_tmp.i266 = icmp sgt i64 %_conv2, 0
   %a_b.i267 = select i1 %_grt_tmp.i266, i64 %_conv2, i64 0
-  %_ind_val3 = load i32, i32* %ny, align 4
+  %_ind_val3 = load i32, ptr %ny, align 4
   %_conv4 = sext i32 %_ind_val3 to i64
   %_grt_tmp.i264 = icmp sgt i64 %_conv4, 0
   %a_b.i265 = select i1 %_grt_tmp.i264, i64 %_conv4, i64 0
-  %_ind_val5 = load i32, i32* %nz, align 4
+  %_ind_val5 = load i32, ptr %nz, align 4
   %_mult_tmp = shl nsw i64 %a_b.i, 3
   %_mult_tmp7 = mul i64 %_mult_tmp, %a_b.i267
   %_mult_tmp8 = mul i64 %_mult_tmp7, %a_b.i265
@@ -58,12 +58,12 @@ mat_times_vec_entry:
 k_loop.lr.ph:                                     ; preds = %mat_times_vec_entry
   %_grt_tmp851279 = icmp slt i32 %_ind_val3, 1
   %_grt_tmp847270 = icmp slt i32 %_ind_val, 1
-  %_aa_conv = bitcast [0 x %_elem_type_of_double]* %y to i8*
-  %_adda_ = getelementptr inbounds i8, i8* %_aa_conv, i64 %_sub_tmp23
-  %_aa_conv434 = bitcast [0 x %_elem_type_of_double]* %x to i8*
-  %_adda_435 = getelementptr inbounds i8, i8* %_aa_conv434, i64 %_sub_tmp23
-  %_aa_conv785 = bitcast [0 x %_elem_type_of_double]* %b to i8*
-  %_adda_786 = getelementptr inbounds i8, i8* %_aa_conv785, i64 %_sub_tmp97
+  %_aa_conv = bitcast ptr %y to ptr
+  %_adda_ = getelementptr inbounds i8, ptr %_aa_conv, i64 %_sub_tmp23
+  %_aa_conv434 = bitcast ptr %x to ptr
+  %_adda_435 = getelementptr inbounds i8, ptr %_aa_conv434, i64 %_sub_tmp23
+  %_aa_conv785 = bitcast ptr %b to ptr
+  %_adda_786 = getelementptr inbounds i8, ptr %_aa_conv785, i64 %_sub_tmp97
   br i1 %_grt_tmp851279, label %k_loop.us.preheader, label %k_loop.lr.ph.split
 
 k_loop.us.preheader:                              ; preds = %k_loop.lr.ph
@@ -118,26 +118,26 @@ m_loop:                                           ; preds = %m_loop, %l_loop
   %indvars.iv = phi i64 [ %indvars.iv.next, %m_loop ], [ 1, %l_loop ]
   %_ix_x_len424 = mul i64 %_mult_tmp, %indvars.iv
   %_ix_x_len454 = shl nuw nsw i64 %indvars.iv, 3
-  %_ixa_gep = getelementptr inbounds i8, i8* %_adda_, i64 %_ix_x_len
-  %_ixa_gep791 = getelementptr inbounds i8, i8* %_adda_786, i64 %_ix_x_len410
-  %_ixa_gep823 = getelementptr inbounds i8, i8* %_adda_435, i64 %_ix_x_len822
-  %_ixa_gep372 = getelementptr inbounds i8, i8* %_ixa_gep, i64 %_ix_x_len371
-  %_ixa_gep376 = getelementptr inbounds i8, i8* %_ixa_gep372, i64 %_ix_x_len375
-  %_ixa_gep796 = getelementptr inbounds i8, i8* %_ixa_gep791, i64 %_ix_x_len415
-  %_ixa_gep828 = getelementptr inbounds i8, i8* %_ixa_gep823, i64 %_ix_x_len371
-  %_ixa_gep379 = getelementptr inbounds i8, i8* %_ixa_gep376, i64 %_ix_x_len378
-  %_ixa_gep801 = getelementptr inbounds i8, i8* %_ixa_gep796, i64 %_ix_x_len420
-  %_ixa_gep833 = getelementptr inbounds i8, i8* %_ixa_gep828, i64 %_ix_x_len375
-  %_ixa_gep806 = getelementptr inbounds i8, i8* %_ixa_gep801, i64 %_ix_x_len378
-  %_ixa_gep810 = getelementptr inbounds i8, i8* %_ixa_gep806, i64 %_ix_x_len424
-  %_gepp = bitcast i8* %_ixa_gep379 to double*
-  %_gepp813 = bitcast i8* %_ixa_gep810 to double*
-  %_ind_val814 = load double, double* %_gepp813, align 8
-  %_ixa_gep837 = getelementptr inbounds i8, i8* %_ixa_gep833, i64 %_ix_x_len454
-  %_gepp840 = bitcast i8* %_ixa_gep837 to double*
-  %_ind_val841 = load double, double* %_gepp840, align 8
+  %_ixa_gep = getelementptr inbounds i8, ptr %_adda_, i64 %_ix_x_len
+  %_ixa_gep791 = getelementptr inbounds i8, ptr %_adda_786, i64 %_ix_x_len410
+  %_ixa_gep823 = getelementptr inbounds i8, ptr %_adda_435, i64 %_ix_x_len822
+  %_ixa_gep372 = getelementptr inbounds i8, ptr %_ixa_gep, i64 %_ix_x_len371
+  %_ixa_gep376 = getelementptr inbounds i8, ptr %_ixa_gep372, i64 %_ix_x_len375
+  %_ixa_gep796 = getelementptr inbounds i8, ptr %_ixa_gep791, i64 %_ix_x_len415
+  %_ixa_gep828 = getelementptr inbounds i8, ptr %_ixa_gep823, i64 %_ix_x_len371
+  %_ixa_gep379 = getelementptr inbounds i8, ptr %_ixa_gep376, i64 %_ix_x_len378
+  %_ixa_gep801 = getelementptr inbounds i8, ptr %_ixa_gep796, i64 %_ix_x_len420
+  %_ixa_gep833 = getelementptr inbounds i8, ptr %_ixa_gep828, i64 %_ix_x_len375
+  %_ixa_gep806 = getelementptr inbounds i8, ptr %_ixa_gep801, i64 %_ix_x_len378
+  %_ixa_gep810 = getelementptr inbounds i8, ptr %_ixa_gep806, i64 %_ix_x_len424
+  %_gepp = bitcast ptr %_ixa_gep379 to ptr
+  %_gepp813 = bitcast ptr %_ixa_gep810 to ptr
+  %_ind_val814 = load double, ptr %_gepp813, align 8
+  %_ixa_gep837 = getelementptr inbounds i8, ptr %_ixa_gep833, i64 %_ix_x_len454
+  %_gepp840 = bitcast ptr %_ixa_gep837 to ptr
+  %_ind_val841 = load double, ptr %_gepp840, align 8
   %_mult_tmp842 = fmul double %_ind_val814, %_ind_val841
-  store double %_mult_tmp842, double* %_gepp, align 8
+  store double %_mult_tmp842, ptr %_gepp, align 8
   %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
   %wide.trip.count = zext i32 %0 to i64
   %wide.trip.count305 = zext i32 %0 to i64

diff  --git a/llvm/test/Analysis/LoopCacheAnalysis/PowerPC/single-store.ll b/llvm/test/Analysis/LoopCacheAnalysis/PowerPC/single-store.ll
index 9a060e8d6249e..f583822579cf9 100644
--- a/llvm/test/Analysis/LoopCacheAnalysis/PowerPC/single-store.ll
+++ b/llvm/test/Analysis/LoopCacheAnalysis/PowerPC/single-store.ll
@@ -14,7 +14,7 @@ target triple = "powerpc64le-unknown-linux-gnu"
 ; CHECK-NEXT: Loop 'for.j' has cost = 1000000
 ; CHECK-NEXT: Loop 'for.k' has cost = 60000
 
-define void @foo(i64 %n, i64 %m, i64 %o, i32* %A) {
+define void @foo(i64 %n, i64 %m, i64 %o, ptr %A) {
 entry:
   %cmp32 = icmp sgt i64 %n, 0
   %cmp230 = icmp sgt i64 %m, 0
@@ -51,8 +51,8 @@ for.k:                                            ; preds = %for.k, %for.j.us
   %mul = mul nsw i64 %k, 2
   %arrayidx.sum = add i64 %mul, 7
   %arrayidx10.sum = add i64 %arrayidx.sum, %tmp27
-  %arrayidx11 = getelementptr inbounds i32, i32* %A, i64 %arrayidx10.sum
-  store i32 1, i32* %arrayidx11, align 4
+  %arrayidx11 = getelementptr inbounds i32, ptr %A, i64 %arrayidx10.sum
+  store i32 1, ptr %arrayidx11, align 4
 
   %inck = add nsw i64 %k, 1
   %exitcond.us = icmp eq i64 %inck, %o
@@ -92,7 +92,7 @@ for.end:                                          ; preds = %for.end.loopexit, %
 ; CHECK-NEXT: Loop 'for.j' has cost = 1000000
 ; CHECK-NEXT: Loop 'for.k' has cost = 60000
 
-define void @foo2(i64 %n, i64 %m, i64 %o, i32* %A) {
+define void @foo2(i64 %n, i64 %m, i64 %o, ptr %A) {
 entry:
   %cmp32 = icmp sgt i64 %n, 0
   %cmp230 = icmp sgt i64 %m, 0
@@ -129,8 +129,8 @@ for.k:                                            ; preds = %for.k, %for.i
   %mul = mul nsw i64 %k, 2
   %arrayidx.sum = add i64 %mul, 7
   %arrayidx10.sum = add i64 %arrayidx.sum, %tmp27
-  %arrayidx11 = getelementptr inbounds i32, i32* %A, i64 %arrayidx10.sum
-  store i32 1, i32* %arrayidx11, align 4
+  %arrayidx11 = getelementptr inbounds i32, ptr %A, i64 %arrayidx10.sum
+  store i32 1, ptr %arrayidx11, align 4
 
   %inck = add nsw i64 %k, 1
   %exitcond.us = icmp eq i64 %inck, %o

diff  --git a/llvm/test/Analysis/LoopCacheAnalysis/PowerPC/stencil.ll b/llvm/test/Analysis/LoopCacheAnalysis/PowerPC/stencil.ll
index b799595f65a57..b79a47aed1ef0 100644
--- a/llvm/test/Analysis/LoopCacheAnalysis/PowerPC/stencil.ll
+++ b/llvm/test/Analysis/LoopCacheAnalysis/PowerPC/stencil.ll
@@ -14,7 +14,7 @@ target triple = "powerpc64le-unknown-linux-gnu"
 ; CHECK: Loop 'for.i' has cost = 20600
 ; CHECK-NEXT: Loop 'for.j' has cost = 800
 
-define void @foo(i64 %n, i64 %m, i32* %A, i32* %B, i32* %C) {
+define void @foo(i64 %n, i64 %m, ptr %A, ptr %B, ptr %C) {
 entry:
   %cmp32 = icmp sgt i64 %n, 0
   %cmp230 = icmp sgt i64 %m, 0
@@ -41,41 +41,41 @@ for.j:                                            ; preds = %for.incj, %for.i
 
   ; B[i-1][j]
   %arrayidx1 = add i64 %j, %muliminusone
-  %arrayidx2 = getelementptr inbounds i32, i32* %B, i64 %arrayidx1
-  %elem_B1 = load i32, i32* %arrayidx2, align 4
+  %arrayidx2 = getelementptr inbounds i32, ptr %B, i64 %arrayidx1
+  %elem_B1 = load i32, ptr %arrayidx2, align 4
 
   ; B[i-1][j+1]
   %addjone = add i64 %j, 1
   %arrayidx3 = add i64 %addjone, %muliminusone
-  %arrayidx4 = getelementptr inbounds i32, i32* %B, i64 %arrayidx3
-  %elem_B2 = load i32, i32* %arrayidx4, align 4
+  %arrayidx4 = getelementptr inbounds i32, ptr %B, i64 %arrayidx3
+  %elem_B2 = load i32, ptr %arrayidx4, align 4
 
   ; C[i]
-  %arrayidx6 = getelementptr inbounds i32, i32* %C, i64 %i
-  %elem_C = load i32, i32* %arrayidx6, align 4
+  %arrayidx6 = getelementptr inbounds i32, ptr %C, i64 %i
+  %elem_C = load i32, ptr %arrayidx6, align 4
 
   ; A[i][j+1]
   %arrayidx7 = add i64 %addjone, %muli
-  %arrayidx8 = getelementptr inbounds i32, i32* %A, i64 %arrayidx7
-  %elem_A = load i32, i32* %arrayidx8, align 4
+  %arrayidx8 = getelementptr inbounds i32, ptr %A, i64 %arrayidx7
+  %elem_A = load i32, ptr %arrayidx8, align 4
 
   ; A[i][j] = A[i][j+1] + B[i-1][j] + B[i-1][j+1] + C[i]
   %addB = add i32 %elem_B1, %elem_B2
   %addC = add i32 %addB, %elem_C
   %addA = add i32 %elem_A, %elem_C
   %arrayidx9 = add i64 %j, %muli
-  %arrayidx10 = getelementptr inbounds i32, i32* %A, i64 %arrayidx9
-  store i32 %addA, i32* %arrayidx10, align 4
+  %arrayidx10 = getelementptr inbounds i32, ptr %A, i64 %arrayidx9
+  store i32 %addA, ptr %arrayidx10, align 4
 
   ; A[i][j] += B[i][i];
   %arrayidx11 = add i64 %j, %muli
-  %arrayidx12 = getelementptr inbounds i32, i32* %A, i64 %arrayidx11
-  %elem_A1 = load i32, i32* %arrayidx12, align 4
+  %arrayidx12 = getelementptr inbounds i32, ptr %A, i64 %arrayidx11
+  %elem_A1 = load i32, ptr %arrayidx12, align 4
   %arrayidx13 = add i64 %i, %muli
-  %arrayidx14 = getelementptr inbounds i32, i32* %B, i64 %arrayidx13
-  %elem_B3 = load i32, i32* %arrayidx14, align 4
+  %arrayidx14 = getelementptr inbounds i32, ptr %B, i64 %arrayidx13
+  %elem_B3 = load i32, ptr %arrayidx14, align 4
   %addA1 = add i32 %elem_A1, %elem_B3
-  store i32 %addA1, i32* %arrayidx12, align 4
+  store i32 %addA1, ptr %arrayidx12, align 4
 
   br label %for.incj
 

diff  --git a/llvm/test/Analysis/LoopCacheAnalysis/compute-cost.ll b/llvm/test/Analysis/LoopCacheAnalysis/compute-cost.ll
index 04f31b3845db0..d979645bef579 100644
--- a/llvm/test/Analysis/LoopCacheAnalysis/compute-cost.ll
+++ b/llvm/test/Analysis/LoopCacheAnalysis/compute-cost.ll
@@ -10,9 +10,9 @@
 
 ; SMALLER-CACHELINE: Loop 'for.cond' has cost = 256
 ; LARGER-CACHELINE: Loop 'for.cond' has cost = 32
-%struct._Handleitem = type { %struct._Handleitem* }
+%struct._Handleitem = type { ptr }
 
-define void @handle_to_ptr(%struct._Handleitem** %blocks) {
+define void @handle_to_ptr(ptr %blocks) {
 ; Preheader:
 entry:
   br label %for.cond
@@ -25,8 +25,8 @@ for.cond:                                         ; preds = %for.body, %entry
 
 for.body:                                         ; preds = %for.cond
   %idxprom = zext i32 %i.0 to i64
-  %arrayidx = getelementptr inbounds %struct._Handleitem*, %struct._Handleitem** %blocks, i64 %idxprom
-  store %struct._Handleitem* null, %struct._Handleitem** %arrayidx, align 8
+  %arrayidx = getelementptr inbounds ptr, ptr %blocks, i64 %idxprom
+  store ptr null, ptr %arrayidx, align 8
   %inc = add nuw nsw i32 %i.0, 1
   br label %for.cond
 
@@ -90,7 +90,7 @@ for.end19:
 
 ; SMALLER-CACHELINE: Loop 'for.neg.cond' has cost = 256
 ; LARGER-CACHELINE: Loop 'for.neg.cond' has cost = 32
-define void @handle_to_ptr_neg_stride(%struct._Handleitem** %blocks) {
+define void @handle_to_ptr_neg_stride(ptr %blocks) {
 ; Preheader:
 entry:
   br label %for.neg.cond
@@ -103,8 +103,8 @@ for.neg.cond:                                         ; preds = %for.neg.body, %
 
 for.neg.body:                                         ; preds = %for.neg.cond
   %idxprom = zext i32 %i.0 to i64
-  %arrayidx = getelementptr inbounds %struct._Handleitem*, %struct._Handleitem** %blocks, i64 %idxprom
-  store %struct._Handleitem* null, %struct._Handleitem** %arrayidx, align 8
+  %arrayidx = getelementptr inbounds ptr, ptr %blocks, i64 %idxprom
+  store ptr null, ptr %arrayidx, align 8
   %dec = add nsw i32 %i.0, -1
   br label %for.neg.cond
 
@@ -124,7 +124,7 @@ for.neg.end:                                          ; preds = %for.neg.cond
 
 ; SMALLER-CACHELINE: Loop 'for.cond2' has cost = 10240
 ; LARGER-CACHELINE: Loop 'for.cond2' has cost = 1280
-define void @Test2(double* %B) {
+define void @Test2(ptr %B) {
 entry:
   br label %for.cond2
 
@@ -136,11 +136,11 @@ for.cond2:                                         ; preds = %for.body, %entry
 for.body:                                         ; preds = %for.cond
   %sub = sub nsw i32 40960, %i.0
   %idxprom = sext i32 %sub to i64
-  %arrayidx = getelementptr inbounds double, double* %B, i64 %idxprom
-  %0 = load double, double* %arrayidx, align 8
+  %arrayidx = getelementptr inbounds double, ptr %B, i64 %idxprom
+  %0 = load double, ptr %arrayidx, align 8
   %idxprom1 = sext i32 %i.0 to i64
-  %arrayidx2 = getelementptr inbounds double, double* %B, i64 %idxprom1
-  store double %0, double* %arrayidx2, align 8
+  %arrayidx2 = getelementptr inbounds double, ptr %B, i64 %idxprom1
+  store double %0, ptr %arrayidx2, align 8
   %dec = add nsw i32 %i.0, -1
   br label %for.cond2
 
@@ -155,7 +155,7 @@ for.end:                                          ; preds = %for.cond
 
 ; SMALLER-CACHELINE: Loop 'for.cond3' has cost = 10240
 ; LARGER-CACHELINE: Loop 'for.cond3' has cost = 1280
-define void @Test3(double** %C) {
+define void @Test3(ptr %C) {
 entry:
   br label %for.cond3
 
@@ -166,11 +166,11 @@ for.cond3:                                         ; preds = %for.body, %entry
 
 for.body:                                         ; preds = %for.cond
   %idxprom = sext i32 %i.0 to i64
-  %arrayidx = getelementptr inbounds double*, double** %C, i64 %idxprom
-  %0 = load double*, double** %arrayidx, align 8
+  %arrayidx = getelementptr inbounds ptr, ptr %C, i64 %idxprom
+  %0 = load ptr, ptr %arrayidx, align 8
   %idxprom1 = sext i32 %i.0 to i64
-  %arrayidx2 = getelementptr inbounds double*, double** %C, i64 %idxprom1
-  store double* %0, double** %arrayidx2, align 8
+  %arrayidx2 = getelementptr inbounds ptr, ptr %C, i64 %idxprom1
+  store ptr %0, ptr %arrayidx2, align 8
   %dec = add nsw i32 %i.0, -1
   br label %for.cond3
 
@@ -185,7 +185,7 @@ for.end:                                          ; preds = %for.cond
 
 ; SMALLER-CACHELINE: Loop 'for.cond4' has cost = 10240
 ; LARGER-CACHELINE: Loop 'for.cond4' has cost = 1280
-define void @Test4(double** %D) {
+define void @Test4(ptr %D) {
 entry:
   br label %for.cond4
 
@@ -196,11 +196,11 @@ for.cond4:                                         ; preds = %for.body, %entry
 
 for.body:                                         ; preds = %for.cond
   %idxprom = sext i32 %i.0 to i64
-  %arrayidx = getelementptr inbounds double*, double** %D, i64 %idxprom
-  %0 = load double*, double** %arrayidx, align 8
+  %arrayidx = getelementptr inbounds ptr, ptr %D, i64 %idxprom
+  %0 = load ptr, ptr %arrayidx, align 8
   %idxprom1 = sext i32 %i.0 to i64
-  %arrayidx2 = getelementptr inbounds double*, double** %D, i64 %idxprom1
-  store double* %0, double** %arrayidx2, align 8
+  %arrayidx2 = getelementptr inbounds ptr, ptr %D, i64 %idxprom1
+  store ptr %0, ptr %arrayidx2, align 8
   %inc = add nsw i32 %i.0, 1
   br label %for.cond4
 

diff  --git a/llvm/test/Analysis/LoopInfo/annotated-parallel-complex.ll b/llvm/test/Analysis/LoopInfo/annotated-parallel-complex.ll
index 8b0969ec59e78..9d54a19515272 100644
--- a/llvm/test/Analysis/LoopInfo/annotated-parallel-complex.ll
+++ b/llvm/test/Analysis/LoopInfo/annotated-parallel-complex.ll
@@ -14,7 +14,7 @@
 ;
 target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
 
-define void @func(i64 %n, double* noalias nonnull %A, double* noalias nonnull %B) {
+define void @func(i64 %n, ptr noalias nonnull %A, ptr noalias nonnull %B) {
 entry:
   br label %for.cond
 
@@ -42,13 +42,13 @@ for.body13:
   %add = add nuw nsw i64 %i.0, %j.0
   %add14 = add nuw nsw i64 %add, %k.0
   %add15 = add nuw nsw i64 %add14, %l.0
-  %arrayidx = getelementptr inbounds double, double* %A, i64 %add15
-  store double 2.100000e+01, double* %arrayidx, align 8, !llvm.access.group !5
+  %arrayidx = getelementptr inbounds double, ptr %A, i64 %add15
+  store double 2.100000e+01, ptr %arrayidx, align 8, !llvm.access.group !5
   %add16 = add nuw nsw i64 %i.0, %j.0
   %add17 = add nuw nsw i64 %add16, %k.0
   %add18 = add nuw nsw i64 %add17, %l.0
-  %arrayidx19 = getelementptr inbounds double, double* %B, i64 %add18
-  store double 4.200000e+01, double* %arrayidx19, align 8, !llvm.access.group !6
+  %arrayidx19 = getelementptr inbounds double, ptr %B, i64 %add18
+  store double 4.200000e+01, ptr %arrayidx19, align 8, !llvm.access.group !6
   %add20 = add nuw nsw i64 %l.0, 1
   br label %for.cond10, !llvm.loop !11
 

diff  --git a/llvm/test/Analysis/LoopInfo/annotated-parallel-simple.ll b/llvm/test/Analysis/LoopInfo/annotated-parallel-simple.ll
index 24ed59c49ccc2..7307c2118f725 100644
--- a/llvm/test/Analysis/LoopInfo/annotated-parallel-simple.ll
+++ b/llvm/test/Analysis/LoopInfo/annotated-parallel-simple.ll
@@ -9,7 +9,7 @@
 ;
 target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
 
-define void @func(i64 %n, double* noalias nonnull %A) {
+define void @func(i64 %n, ptr noalias nonnull %A) {
 entry:
   br label %for.cond
 
@@ -19,8 +19,8 @@ for.cond:
   br i1 %cmp, label %for.body, label %for.end
 
 for.body:
-  %arrayidx = getelementptr inbounds double, double* %A, i64 %i.0
-  store double 2.100000e+01, double* %arrayidx, align 8, !llvm.access.group !6
+  %arrayidx = getelementptr inbounds double, ptr %A, i64 %i.0
+  store double 2.100000e+01, ptr %arrayidx, align 8, !llvm.access.group !6
   %add = add nuw nsw i64 %i.0, 1
   br label %for.cond, !llvm.loop !7
 

diff  --git a/llvm/test/Analysis/LoopNestAnalysis/duplicate-successors.ll b/llvm/test/Analysis/LoopNestAnalysis/duplicate-successors.ll
index 2b9b897081016..859d4c7667e42 100644
--- a/llvm/test/Analysis/LoopNestAnalysis/duplicate-successors.ll
+++ b/llvm/test/Analysis/LoopNestAnalysis/duplicate-successors.ll
@@ -23,10 +23,10 @@ inner.header.preheader:                           ; preds = %outer.header, %oute
 
 inner.header:                                     ; preds = %inner.header.preheader, %inner.header
   %inner.iv = phi i64 [ %inner.iv.next, %inner.header ], [ 5, %inner.header.preheader ]
-  %ptr = getelementptr inbounds [1000 x [1000 x i32]], [1000 x [1000 x i32]]* @global, i64 0, i64 %inner.iv, i64 %outer.iv
-  %lv = load i32, i32* %ptr, align 4
+  %ptr = getelementptr inbounds [1000 x [1000 x i32]], ptr @global, i64 0, i64 %inner.iv, i64 %outer.iv
+  %lv = load i32, ptr %ptr, align 4
   %v = mul i32 %lv, 100
-  store i32 %v, i32* %ptr, align 4
+  store i32 %v, ptr %ptr, align 4
   %inner.iv.next = add nsw i64 %inner.iv, 1
   %cond1 = icmp eq i64 %inner.iv.next, 1000
   br i1 %cond1, label %outer.latch, label %inner.header

diff  --git a/llvm/test/Analysis/LoopNestAnalysis/imperfectnest.ll b/llvm/test/Analysis/LoopNestAnalysis/imperfectnest.ll
index 77b361bc6baef..93db74af827e5 100644
--- a/llvm/test/Analysis/LoopNestAnalysis/imperfectnest.ll
+++ b/llvm/test/Analysis/LoopNestAnalysis/imperfectnest.ll
@@ -26,8 +26,8 @@ imperf_nest_1_loop_i:
 for.body:
   %conv = sitofp i32 %i2.0 to double
   %idxprom = sext i32 %i2.0 to i64
-  %arrayidx = getelementptr inbounds double, double* %vla1, i64 %idxprom
-  store double %conv, double* %arrayidx, align 8
+  %arrayidx = getelementptr inbounds double, ptr %vla1, i64 %idxprom
+  store double %conv, ptr %arrayidx, align 8
   br label %imperf_nest_1_loop_j
 
 imperf_nest_1_loop_j:
@@ -37,16 +37,16 @@ imperf_nest_1_loop_j:
 
 for.body7:
   %idxprom8 = sext i32 %i2.0 to i64
-  %arrayidx9 = getelementptr inbounds double, double* %vla1, i64 %idxprom8
-  %4 = load double, double* %arrayidx9, align 8
+  %arrayidx9 = getelementptr inbounds double, ptr %vla1, i64 %idxprom8
+  %4 = load double, ptr %arrayidx9, align 8
   %conv10 = sitofp i32 %j3.0 to double
   %add = fadd double %4, %conv10
   %idxprom11 = sext i32 %j3.0 to i64
   %5 = mul nsw i64 %idxprom11, %1
-  %arrayidx12 = getelementptr inbounds double, double* %vla, i64 %5
+  %arrayidx12 = getelementptr inbounds double, ptr %vla, i64 %5
   %idxprom13 = sext i32 %i2.0 to i64
-  %arrayidx14 = getelementptr inbounds double, double* %arrayidx12, i64 %idxprom13
-  store double %add, double* %arrayidx14, align 8
+  %arrayidx14 = getelementptr inbounds double, ptr %arrayidx12, i64 %idxprom13
+  store double %add, ptr %arrayidx14, align 8
   br label %for.inc
 
 for.inc:
@@ -97,16 +97,16 @@ imperf_nest_2_loop_j:
 
 for.body6:
   %idxprom = sext i32 %i2.0 to i64
-  %arrayidx = getelementptr inbounds double, double* %vla1, i64 %idxprom
-  %4 = load double, double* %arrayidx, align 8
+  %arrayidx = getelementptr inbounds double, ptr %vla1, i64 %idxprom
+  %4 = load double, ptr %arrayidx, align 8
   %conv = sitofp i32 %j3.0 to double
   %add = fadd double %4, %conv
   %idxprom7 = sext i32 %j3.0 to i64
   %5 = mul nsw i64 %idxprom7, %1
-  %arrayidx8 = getelementptr inbounds double, double* %vla, i64 %5
+  %arrayidx8 = getelementptr inbounds double, ptr %vla, i64 %5
   %idxprom9 = sext i32 %i2.0 to i64
-  %arrayidx10 = getelementptr inbounds double, double* %arrayidx8, i64 %idxprom9
-  store double %add, double* %arrayidx10, align 8
+  %arrayidx10 = getelementptr inbounds double, ptr %arrayidx8, i64 %idxprom9
+  store double %add, ptr %arrayidx10, align 8
   br label %for.inc
 
 for.inc:
@@ -116,12 +116,12 @@ for.inc:
 for.end:
   %conv11 = sitofp i32 %i2.0 to double
   %6 = mul nsw i64 0, %1
-  %arrayidx12 = getelementptr inbounds double, double* %vla, i64 %6
+  %arrayidx12 = getelementptr inbounds double, ptr %vla, i64 %6
   %idxprom13 = sext i32 %i2.0 to i64
-  %arrayidx14 = getelementptr inbounds double, double* %arrayidx12, i64 %idxprom13
-  %7 = load double, double* %arrayidx14, align 8
+  %arrayidx14 = getelementptr inbounds double, ptr %arrayidx12, i64 %idxprom13
+  %7 = load double, ptr %arrayidx14, align 8
   %add15 = fadd double %7, %conv11
-  store double %add15, double* %arrayidx14, align 8
+  store double %add15, ptr %arrayidx14, align 8
   br label %for.inc16
 
 for.inc16:
@@ -167,16 +167,16 @@ imperf_nest_3_loop_j:                                        ; preds = %for.inc,
 
 for.body4:                                        ; preds = %imperf_nest_3_loop_j
   %idxprom = sext i32 %i.0 to i64
-  %arrayidx = getelementptr inbounds double, double* %vla1, i64 %idxprom
-  %4 = load double, double* %arrayidx, align 8
+  %arrayidx = getelementptr inbounds double, ptr %vla1, i64 %idxprom
+  %4 = load double, ptr %arrayidx, align 8
   %conv = sitofp i32 %j.0 to double
   %add = fadd double %4, %conv
   %idxprom5 = sext i32 %i.0 to i64
   %5 = mul nsw i64 %idxprom5, %1
-  %arrayidx6 = getelementptr inbounds double, double* %vla, i64 %5
+  %arrayidx6 = getelementptr inbounds double, ptr %vla, i64 %5
   %idxprom7 = sext i32 %j.0 to i64
-  %arrayidx8 = getelementptr inbounds double, double* %arrayidx6, i64 %idxprom7
-  store double %add, double* %arrayidx8, align 8
+  %arrayidx8 = getelementptr inbounds double, ptr %arrayidx6, i64 %idxprom7
+  store double %add, ptr %arrayidx8, align 8
   br label %for.inc
 
 for.inc:                                          ; preds = %for.body4
@@ -194,16 +194,16 @@ imperf_nest_3_loop_k:                                       ; preds = %for.inc22
 
 for.body13:                                       ; preds = %imperf_nest_3_loop_k
   %idxprom14 = sext i32 %i.0 to i64
-  %arrayidx15 = getelementptr inbounds double, double* %vla1, i64 %idxprom14
-  %6 = load double, double* %arrayidx15, align 8
+  %arrayidx15 = getelementptr inbounds double, ptr %vla1, i64 %idxprom14
+  %6 = load double, ptr %arrayidx15, align 8
   %conv16 = sitofp i32 %j.1 to double
   %sub17 = fsub double %6, %conv16
   %idxprom18 = sext i32 %i.0 to i64
   %7 = mul nsw i64 %idxprom18, %1
-  %arrayidx19 = getelementptr inbounds double, double* %vla, i64 %7
+  %arrayidx19 = getelementptr inbounds double, ptr %vla, i64 %7
   %idxprom20 = sext i32 %j.1 to i64
-  %arrayidx21 = getelementptr inbounds double, double* %arrayidx19, i64 %idxprom20
-  store double %sub17, double* %arrayidx21, align 8
+  %arrayidx21 = getelementptr inbounds double, ptr %arrayidx19, i64 %idxprom20
+  store double %sub17, ptr %arrayidx21, align 8
   br label %for.inc22
 
 for.inc22:                                        ; preds = %for.body13
@@ -269,20 +269,20 @@ imperf_nest_4_loop_k:
   %k.0 = phi i32 [ 0, %imperf_nest_4_loop_k.lr.ph ], [ %inc, %for.inc ]
   %add = add nsw i32 %i.0, %j.0
   %idxprom = sext i32 %add to i64
-  %arrayidx = getelementptr inbounds double, double* %vla1, i64 %idxprom
-  %6 = load double, double* %arrayidx, align 8
+  %arrayidx = getelementptr inbounds double, ptr %vla1, i64 %idxprom
+  %6 = load double, ptr %arrayidx, align 8
   %conv = sitofp i32 %k.0 to double
   %add8 = fadd double %6, %conv
   %idxprom9 = sext i32 %i.0 to i64
   %7 = mul nuw i64 %1, %2
   %8 = mul nsw i64 %idxprom9, %7
-  %arrayidx10 = getelementptr inbounds double, double* %vla, i64 %8
+  %arrayidx10 = getelementptr inbounds double, ptr %vla, i64 %8
   %idxprom11 = sext i32 %j.0 to i64
   %9 = mul nsw i64 %idxprom11, %2
-  %arrayidx12 = getelementptr inbounds double, double* %arrayidx10, i64 %9
+  %arrayidx12 = getelementptr inbounds double, ptr %arrayidx10, i64 %9
   %idxprom13 = sext i32 %k.0 to i64
-  %arrayidx14 = getelementptr inbounds double, double* %arrayidx12, i64 %idxprom13
-  store double %add8, double* %arrayidx14, align 8
+  %arrayidx14 = getelementptr inbounds double, ptr %arrayidx12, i64 %idxprom13
+  store double %add8, ptr %arrayidx14, align 8
   br label %for.inc
 
 for.inc:
@@ -316,8 +316,8 @@ imperf_nest_4_loop_j2.lr.ph:
 imperf_nest_4_loop_j2:
   %j.1 = phi i32 [ %sub18, %imperf_nest_4_loop_j2.lr.ph ], [ %inc33, %for.inc32 ]
   %idxprom23 = sext i32 %i.0 to i64
-  %arrayidx24 = getelementptr inbounds double, double* %vla1, i64 %idxprom23
-  %10 = load double, double* %arrayidx24, align 8
+  %arrayidx24 = getelementptr inbounds double, ptr %vla1, i64 %idxprom23
+  %10 = load double, ptr %arrayidx24, align 8
   %conv25 = sitofp i32 %j.1 to double
   %sub26 = fsub double %10, %conv25
   %idxprom27 = sext i32 %i.0 to i64
@@ -325,10 +325,10 @@ imperf_nest_4_loop_j2:
   %11 = mul nsw i64 %idxprom29, %2
   %12 = mul nuw i64 %1, %2
   %13 = mul nsw i64 %idxprom27, %12
-  %arrayidx28 = getelementptr inbounds double, double* %vla, i64 %13
-  %arrayidx30 = getelementptr inbounds double, double* %arrayidx28, i64 %11
-  %arrayidx31 = getelementptr inbounds double, double* %arrayidx30, i64 0
-  store double %sub26, double* %arrayidx31, align 8
+  %arrayidx28 = getelementptr inbounds double, ptr %vla, i64 %13
+  %arrayidx30 = getelementptr inbounds double, ptr %arrayidx28, i64 %11
+  %arrayidx31 = getelementptr inbounds double, ptr %arrayidx30, i64 0
+  store double %sub26, ptr %arrayidx31, align 8
   br label %for.inc32
 
 for.inc32:
@@ -361,7 +361,7 @@ for.end37:
 ;         y[j][i] = x[i][j] + j;
 ;     }
 
-define void @imperf_nest_5(i32** %y, i32** %x, i32 signext %nx, i32 signext %ny) {
+define void @imperf_nest_5(ptr %y, ptr %x, i32 signext %nx, i32 signext %ny) {
 ; CHECK-LABEL: IsPerfect=false, Depth=2, OutermostLoop: imperf_nest_5_loop_i, Loops: ( imperf_nest_5_loop_i imperf_nest_5_loop_j )
 entry:
   %cmp2 = icmp slt i32 0, %nx
@@ -385,18 +385,18 @@ imperf_nest_5_loop_j.lr.ph:
 imperf_nest_5_loop_j:      
   %j.0 = phi i32 [ 0, %imperf_nest_5_loop_j.lr.ph ], [ %inc, %for.inc ]
   %idxprom = sext i32 %i.0 to i64
-  %arrayidx = getelementptr inbounds i32*, i32** %x, i64 %idxprom
-  %0 = load i32*, i32** %arrayidx, align 8
+  %arrayidx = getelementptr inbounds ptr, ptr %x, i64 %idxprom
+  %0 = load ptr, ptr %arrayidx, align 8
   %idxprom5 = sext i32 %j.0 to i64
-  %arrayidx6 = getelementptr inbounds i32, i32* %0, i64 %idxprom5
-  %1 = load i32, i32* %arrayidx6, align 4
+  %arrayidx6 = getelementptr inbounds i32, ptr %0, i64 %idxprom5
+  %1 = load i32, ptr %arrayidx6, align 4
   %add = add nsw i32 %1, %j.0
   %idxprom7 = sext i32 %j.0 to i64
-  %arrayidx8 = getelementptr inbounds i32*, i32** %y, i64 %idxprom7
-  %2 = load i32*, i32** %arrayidx8, align 8
+  %arrayidx8 = getelementptr inbounds ptr, ptr %y, i64 %idxprom7
+  %2 = load ptr, ptr %arrayidx8, align 8
   %idxprom9 = sext i32 %i.0 to i64
-  %arrayidx10 = getelementptr inbounds i32, i32* %2, i64 %idxprom9
-  store i32 %add, i32* %arrayidx10, align 4
+  %arrayidx10 = getelementptr inbounds i32, ptr %2, i64 %idxprom9
+  store i32 %add, ptr %arrayidx10, align 4
   br label %for.inc
 
 for.inc:

diff  --git a/llvm/test/Analysis/LoopNestAnalysis/infinite.ll b/llvm/test/Analysis/LoopNestAnalysis/infinite.ll
index 7a6cf21584fff..7013febdd3801 100644
--- a/llvm/test/Analysis/LoopNestAnalysis/infinite.ll
+++ b/llvm/test/Analysis/LoopNestAnalysis/infinite.ll
@@ -1,7 +1,7 @@
 ; RUN: opt < %s -passes='print<loopnest>' -disable-output 2>&1 | FileCheck %s
 
 ; Test that the loop nest analysis is able to analyze an infinite loop in a loop nest.
-define void @test1(i32** %A, i1 %cond) {
+define void @test1(ptr %A, i1 %cond) {
 ; CHECK-LABEL: IsPerfect=true, Depth=1, OutermostLoop: for.inner, Loops: ( for.inner )
 ; CHECK-LABEL: IsPerfect=false, Depth=2, OutermostLoop: for.outer, Loops: ( for.outer for.inner )
 ; CHECK-LABEL: IsPerfect=true, Depth=1, OutermostLoop: for.infinite, Loops: ( for.infinite )
@@ -14,10 +14,10 @@ for.outer:
 
 for.inner:
   %j = phi i64 [ 0, %for.outer ], [ %inc_j, %for.inner ]
-  %arrayidx_i = getelementptr inbounds i32*, i32** %A, i64 %i
-  %0 = load i32*, i32** %arrayidx_i, align 8
-  %arrayidx_j = getelementptr inbounds i32, i32* %0, i64 %j
-  store i32 0, i32* %arrayidx_j, align 4
+  %arrayidx_i = getelementptr inbounds ptr, ptr %A, i64 %i
+  %0 = load ptr, ptr %arrayidx_i, align 8
+  %arrayidx_j = getelementptr inbounds i32, ptr %0, i64 %j
+  store i32 0, ptr %arrayidx_j, align 4
   %inc_j = add nsw i64 %j, 1
   %cmp_j = icmp slt i64 %inc_j, 100
   br i1 %cmp_j, label %for.inner, label %for.outer.latch

diff  --git a/llvm/test/Analysis/LoopNestAnalysis/perfectnest.ll b/llvm/test/Analysis/LoopNestAnalysis/perfectnest.ll
index f8b0e6ad2c884..b2e33223a8167 100644
--- a/llvm/test/Analysis/LoopNestAnalysis/perfectnest.ll
+++ b/llvm/test/Analysis/LoopNestAnalysis/perfectnest.ll
@@ -5,7 +5,7 @@
 ;     for(j=0; j<nx; ++j)
 ;       y[i][j] = x[i][j];
 
-define void @perf_nest_2D_1(i32** %y, i32** %x, i64 signext %nx, i64 signext %ny) {
+define void @perf_nest_2D_1(ptr %y, ptr %x, i64 signext %nx, i64 signext %ny) {
 ; CHECK-LABEL: IsPerfect=true, Depth=1, OutermostLoop: perf_nest_2D_1_loop_j, Loops: ( perf_nest_2D_1_loop_j )
 ; CHECK-LABEL: IsPerfect=true, Depth=2, OutermostLoop: perf_nest_2D_1_loop_i, Loops: ( perf_nest_2D_1_loop_i perf_nest_2D_1_loop_j )
 entry:
@@ -18,14 +18,14 @@ perf_nest_2D_1_loop_i:
 
 perf_nest_2D_1_loop_j:
   %j = phi i64 [ 0, %perf_nest_2D_1_loop_i ], [ %inc, %inc_j ]
-  %arrayidx = getelementptr inbounds i32*, i32** %x, i64 %j
-  %0 = load i32*, i32** %arrayidx, align 8
-  %arrayidx6 = getelementptr inbounds i32, i32* %0, i64 %j
-  %1 = load i32, i32* %arrayidx6, align 4
-  %arrayidx8 = getelementptr inbounds i32*, i32** %y, i64 %j
-  %2 = load i32*, i32** %arrayidx8, align 8
-  %arrayidx11 = getelementptr inbounds i32, i32* %2, i64 %i
-  store i32 %1, i32* %arrayidx11, align 4
+  %arrayidx = getelementptr inbounds ptr, ptr %x, i64 %j
+  %0 = load ptr, ptr %arrayidx, align 8
+  %arrayidx6 = getelementptr inbounds i32, ptr %0, i64 %j
+  %1 = load i32, ptr %arrayidx6, align 4
+  %arrayidx8 = getelementptr inbounds ptr, ptr %y, i64 %j
+  %2 = load ptr, ptr %arrayidx8, align 8
+  %arrayidx11 = getelementptr inbounds i32, ptr %2, i64 %i
+  store i32 %1, ptr %arrayidx11, align 4
   br label %inc_j
 
 inc_j:
@@ -46,7 +46,7 @@ perf_nest_2D_1_loop_i_end:
 ;   for (i=0; i<100; ++i)
 ;     for (j=0; j<100; ++j)
 ;       y[i][j] = x[i][j];
-define void @perf_nest_2D_2(i32** %y, i32** %x) {
+define void @perf_nest_2D_2(ptr %y, ptr %x) {
 ; CHECK-LABEL: IsPerfect=true, Depth=1, OutermostLoop: perf_nest_2D_2_loop_j, Loops: ( perf_nest_2D_2_loop_j )
 ; CHECK-LABEL: IsPerfect=true, Depth=2, OutermostLoop: perf_nest_2D_2_loop_i, Loops: ( perf_nest_2D_2_loop_i perf_nest_2D_2_loop_j )
 entry:
@@ -58,14 +58,14 @@ perf_nest_2D_2_loop_i:
 
 perf_nest_2D_2_loop_j:
   %j = phi i64 [ 0, %perf_nest_2D_2_loop_i ], [ %inc, %inc_j ]
-  %arrayidx = getelementptr inbounds i32*, i32** %x, i64 %j
-  %0 = load i32*, i32** %arrayidx, align 8
-  %arrayidx6 = getelementptr inbounds i32, i32* %0, i64 %j
-  %1 = load i32, i32* %arrayidx6, align 4
-  %arrayidx8 = getelementptr inbounds i32*, i32** %y, i64 %j
-  %2 = load i32*, i32** %arrayidx8, align 8
-  %arrayidx11 = getelementptr inbounds i32, i32* %2, i64 %i
-  store i32 %1, i32* %arrayidx11, align 4
+  %arrayidx = getelementptr inbounds ptr, ptr %x, i64 %j
+  %0 = load ptr, ptr %arrayidx, align 8
+  %arrayidx6 = getelementptr inbounds i32, ptr %0, i64 %j
+  %1 = load i32, ptr %arrayidx6, align 4
+  %arrayidx8 = getelementptr inbounds ptr, ptr %y, i64 %j
+  %2 = load ptr, ptr %arrayidx8, align 8
+  %arrayidx11 = getelementptr inbounds i32, ptr %2, i64 %i
+  store i32 %1, ptr %arrayidx11, align 4
   br label %inc_j
 
 inc_j:
@@ -85,7 +85,7 @@ perf_nest_2D_2_loop_i_end:
   ret void
 }
 
-define void @perf_nest_2D_3(i32** %y, i32** %x, i64 signext %nx, i64 signext %ny) {
+define void @perf_nest_2D_3(ptr %y, ptr %x, i64 signext %nx, i64 signext %ny) {
 ; CHECK-LABEL: IsPerfect=true, Depth=1, OutermostLoop: perf_nest_2D_3_loop_j, Loops: ( perf_nest_2D_3_loop_j )
 ; CHECK-LABEL: IsPerfect=true, Depth=2, OutermostLoop: perf_nest_2D_3_loop_i, Loops: ( perf_nest_2D_3_loop_i perf_nest_2D_3_loop_j )
 entry:
@@ -104,14 +104,14 @@ preheader.j:
 
 perf_nest_2D_3_loop_j:
   %j = phi i64 [ 0, %preheader.j ], [ %inc, %inc_j ]
-  %arrayidx = getelementptr inbounds i32*, i32** %x, i64 %j
-  %0 = load i32*, i32** %arrayidx, align 8
-  %arrayidx6 = getelementptr inbounds i32, i32* %0, i64 %j
-  %1 = load i32, i32* %arrayidx6, align 4
-  %arrayidx8 = getelementptr inbounds i32*, i32** %y, i64 %j
-  %2 = load i32*, i32** %arrayidx8, align 8
-  %arrayidx11 = getelementptr inbounds i32, i32* %2, i64 %i
-  store i32 %1, i32* %arrayidx11, align 4
+  %arrayidx = getelementptr inbounds ptr, ptr %x, i64 %j
+  %0 = load ptr, ptr %arrayidx, align 8
+  %arrayidx6 = getelementptr inbounds i32, ptr %0, i64 %j
+  %1 = load i32, ptr %arrayidx6, align 4
+  %arrayidx8 = getelementptr inbounds ptr, ptr %y, i64 %j
+  %2 = load ptr, ptr %arrayidx8, align 8
+  %arrayidx11 = getelementptr inbounds i32, ptr %2, i64 %i
+  store i32 %1, ptr %arrayidx11, align 4
   br label %inc_j
 
 inc_j:
@@ -141,7 +141,7 @@ perf_nest_2D_3_loop_i_end:
 ;          y[j][j][k] = x[i][j][k];
 ;
 
-define void @perf_nest_3D_1(i32*** %y, i32*** %x, i32 signext %nx, i32 signext %ny, i32 signext %nk) {
+define void @perf_nest_3D_1(ptr %y, ptr %x, i32 signext %nx, i32 signext %ny, i32 signext %nk) {
 ; CHECK-LABEL: IsPerfect=true, Depth=1, OutermostLoop: perf_nest_3D_1_loop_k, Loops: ( perf_nest_3D_1_loop_k )
 ; CHECK-NEXT: IsPerfect=true, Depth=2, OutermostLoop: perf_nest_3D_1_loop_j, Loops: ( perf_nest_3D_1_loop_j perf_nest_3D_1_loop_k )
 ; CHECK-NEXT: IsPerfect=true, Depth=3, OutermostLoop: perf_nest_3D_1_loop_i, Loops: ( perf_nest_3D_1_loop_i perf_nest_3D_1_loop_j perf_nest_3D_1_loop_k )
@@ -161,23 +161,23 @@ perf_nest_3D_1_loop_j:
 perf_nest_3D_1_loop_k:
   %k = phi i32 [ 0, %perf_nest_3D_1_loop_j ], [ %inck, %for.inck ]
   %idxprom = sext i32 %i to i64
-  %arrayidx = getelementptr inbounds i32**, i32*** %x, i64 %idxprom
-  %0 = load i32**, i32*** %arrayidx, align 8
+  %arrayidx = getelementptr inbounds ptr, ptr %x, i64 %idxprom
+  %0 = load ptr, ptr %arrayidx, align 8
   %idxprom7 = sext i32 %j to i64
-  %arrayidx8 = getelementptr inbounds i32*, i32** %0, i64 %idxprom7
-  %1 = load i32*, i32** %arrayidx8, align 8
+  %arrayidx8 = getelementptr inbounds ptr, ptr %0, i64 %idxprom7
+  %1 = load ptr, ptr %arrayidx8, align 8
   %idxprom9 = sext i32 %k to i64
-  %arrayidx10 = getelementptr inbounds i32, i32* %1, i64 %idxprom9
-  %2 = load i32, i32* %arrayidx10, align 4
+  %arrayidx10 = getelementptr inbounds i32, ptr %1, i64 %idxprom9
+  %2 = load i32, ptr %arrayidx10, align 4
   %idxprom11 = sext i32 %j to i64
-  %arrayidx12 = getelementptr inbounds i32**, i32*** %y, i64 %idxprom11
-  %3 = load i32**, i32*** %arrayidx12, align 8
+  %arrayidx12 = getelementptr inbounds ptr, ptr %y, i64 %idxprom11
+  %3 = load ptr, ptr %arrayidx12, align 8
   %idxprom13 = sext i32 %j to i64
-  %arrayidx14 = getelementptr inbounds i32*, i32** %3, i64 %idxprom13
-  %4 = load i32*, i32** %arrayidx14, align 8
+  %arrayidx14 = getelementptr inbounds ptr, ptr %3, i64 %idxprom13
+  %4 = load ptr, ptr %arrayidx14, align 8
   %idxprom15 = sext i32 %k to i64
-  %arrayidx16 = getelementptr inbounds i32, i32* %4, i64 %idxprom15
-  store i32 %2, i32* %arrayidx16, align 4
+  %arrayidx16 = getelementptr inbounds i32, ptr %4, i64 %idxprom15
+  store i32 %2, ptr %arrayidx16, align 4
   br label %for.inck
 
 for.inck:
@@ -206,7 +206,7 @@ perf_nest_3D_1_loop_i_end:
 ;          y[j][j][k] = x[i][j][k];
 ;
 
-define void @perf_nest_3D_2(i32*** %y, i32*** %x) {
+define void @perf_nest_3D_2(ptr %y, ptr %x) {
 ; CHECK-LABEL: IsPerfect=true, Depth=1, OutermostLoop: perf_nest_3D_2_loop_k, Loops: ( perf_nest_3D_2_loop_k )
 ; CHECK-NEXT: IsPerfect=true, Depth=2, OutermostLoop: perf_nest_3D_2_loop_j, Loops: ( perf_nest_3D_2_loop_j perf_nest_3D_2_loop_k )
 ; CHECK-NEXT: IsPerfect=true, Depth=3, OutermostLoop: perf_nest_3D_2_loop_i, Loops: ( perf_nest_3D_2_loop_i perf_nest_3D_2_loop_j perf_nest_3D_2_loop_k )
@@ -224,23 +224,23 @@ perf_nest_3D_2_loop_j:
 perf_nest_3D_2_loop_k:
   %k = phi i32 [ 0, %perf_nest_3D_2_loop_j ], [ %inck, %for.inck ]
   %idxprom = sext i32 %i to i64
-  %arrayidx = getelementptr inbounds i32**, i32*** %x, i64 %idxprom
-  %0 = load i32**, i32*** %arrayidx, align 8
+  %arrayidx = getelementptr inbounds ptr, ptr %x, i64 %idxprom
+  %0 = load ptr, ptr %arrayidx, align 8
   %idxprom7 = sext i32 %j to i64
-  %arrayidx8 = getelementptr inbounds i32*, i32** %0, i64 %idxprom7
-  %1 = load i32*, i32** %arrayidx8, align 8
+  %arrayidx8 = getelementptr inbounds ptr, ptr %0, i64 %idxprom7
+  %1 = load ptr, ptr %arrayidx8, align 8
   %idxprom9 = sext i32 %k to i64
-  %arrayidx10 = getelementptr inbounds i32, i32* %1, i64 %idxprom9
-  %2 = load i32, i32* %arrayidx10, align 4
+  %arrayidx10 = getelementptr inbounds i32, ptr %1, i64 %idxprom9
+  %2 = load i32, ptr %arrayidx10, align 4
   %idxprom11 = sext i32 %j to i64
-  %arrayidx12 = getelementptr inbounds i32**, i32*** %y, i64 %idxprom11
-  %3 = load i32**, i32*** %arrayidx12, align 8
+  %arrayidx12 = getelementptr inbounds ptr, ptr %y, i64 %idxprom11
+  %3 = load ptr, ptr %arrayidx12, align 8
   %idxprom13 = sext i32 %j to i64
-  %arrayidx14 = getelementptr inbounds i32*, i32** %3, i64 %idxprom13
-  %4 = load i32*, i32** %arrayidx14, align 8
+  %arrayidx14 = getelementptr inbounds ptr, ptr %3, i64 %idxprom13
+  %4 = load ptr, ptr %arrayidx14, align 8
   %idxprom15 = sext i32 %k to i64
-  %arrayidx16 = getelementptr inbounds i32, i32* %4, i64 %idxprom15
-  store i32 %2, i32* %arrayidx16, align 4
+  %arrayidx16 = getelementptr inbounds i32, ptr %4, i64 %idxprom15
+  store i32 %2, ptr %arrayidx16, align 4
   br label %for.inck
 
 for.inck:
@@ -329,7 +329,7 @@ for.end7:
 ;       for (int j=i; j < ny; j+=1)
 ;         y[j][i] = x[i][j] + j;
 ;     }
-define double @perf_nest_guard_branch(i32** %y, i32** %x, i32 signext %nx, i32 signext %ny) {
+define double @perf_nest_guard_branch(ptr %y, ptr %x, i32 signext %nx, i32 signext %ny) {
 ; CHECK-LABEL: IsPerfect=true, Depth=1, OutermostLoop: test6Loop2, Loops: ( test6Loop2 )
 ; CHECK-LABEL: IsPerfect=true, Depth=2, OutermostLoop: test6Loop1, Loops: ( test6Loop1 test6Loop2 )
 entry:
@@ -350,18 +350,18 @@ test6Loop2.lr.ph:                                  ; preds = %if.then
 test6Loop2:                                        ; preds = %test6Loop2.lr.ph, %for.inc
   %j.0 = phi i32 [ %i.0, %test6Loop2.lr.ph ], [ %inc, %for.inc ]
   %idxprom = sext i32 %i.0 to i64
-  %arrayidx = getelementptr inbounds i32*, i32** %x, i64 %idxprom
-  %0 = load i32*, i32** %arrayidx, align 8
+  %arrayidx = getelementptr inbounds ptr, ptr %x, i64 %idxprom
+  %0 = load ptr, ptr %arrayidx, align 8
   %idxprom5 = sext i32 %j.0 to i64
-  %arrayidx6 = getelementptr inbounds i32, i32* %0, i64 %idxprom5
-  %1 = load i32, i32* %arrayidx6, align 4
+  %arrayidx6 = getelementptr inbounds i32, ptr %0, i64 %idxprom5
+  %1 = load i32, ptr %arrayidx6, align 4
   %add = add nsw i32 %1, %j.0
   %idxprom7 = sext i32 %j.0 to i64
-  %arrayidx8 = getelementptr inbounds i32*, i32** %y, i64 %idxprom7
-  %2 = load i32*, i32** %arrayidx8, align 8
+  %arrayidx8 = getelementptr inbounds ptr, ptr %y, i64 %idxprom7
+  %2 = load ptr, ptr %arrayidx8, align 8
   %idxprom9 = sext i32 %i.0 to i64
-  %arrayidx10 = getelementptr inbounds i32, i32* %2, i64 %idxprom9
-  store i32 %add, i32* %arrayidx10, align 4
+  %arrayidx10 = getelementptr inbounds i32, ptr %2, i64 %idxprom9
+  store i32 %add, ptr %arrayidx10, align 4
   br label %for.inc
 
 for.inc:                                          ; preds = %test6Loop2
@@ -387,10 +387,10 @@ for.cond.for.end13_crit_edge:                     ; preds = %for.inc11
   br label %for.end13
 
 for.end13:                                        ; preds = %for.cond.for.end13_crit_edge, %entry
-  %arrayidx14 = getelementptr inbounds i32*, i32** %y, i64 0
-  %3 = load i32*, i32** %arrayidx14, align 8
-  %arrayidx15 = getelementptr inbounds i32, i32* %3, i64 0
-  %4 = load i32, i32* %arrayidx15, align 4
+  %arrayidx14 = getelementptr inbounds ptr, ptr %y, i64 0
+  %3 = load ptr, ptr %arrayidx14, align 8
+  %arrayidx15 = getelementptr inbounds i32, ptr %3, i64 0
+  %4 = load i32, ptr %arrayidx15, align 4
   %conv = sitofp i32 %4 to double
   ret double %conv
 }
@@ -402,7 +402,7 @@ for.end13:                                        ; preds = %for.cond.for.end13_
 ;         y[j][i] = x[i][j] + j;
 ;     }
 
-define double @test6(i32** %y, i32** %x, i32 signext %nx, i32 signext %ny) {
+define double @test6(ptr %y, ptr %x, i32 signext %nx, i32 signext %ny) {
 ; CHECK-LABEL: IsPerfect=true, Depth=1, OutermostLoop: test6Loop2, Loops: ( test6Loop2 )
 ; CHECK-LABEL: IsPerfect=true, Depth=2, OutermostLoop: test6Loop1, Loops: ( test6Loop1 test6Loop2 )
 entry:
@@ -423,18 +423,18 @@ test6Loop2.lr.ph:                                  ; preds = %if.then
 test6Loop2:                                        ; preds = %test6Loop2.lr.ph, %for.inc
   %j.0 = phi i32 [ %i.0, %test6Loop2.lr.ph ], [ %inc, %for.inc ]
   %idxprom = sext i32 %i.0 to i64
-  %arrayidx = getelementptr inbounds i32*, i32** %x, i64 %idxprom
-  %0 = load i32*, i32** %arrayidx, align 8
+  %arrayidx = getelementptr inbounds ptr, ptr %x, i64 %idxprom
+  %0 = load ptr, ptr %arrayidx, align 8
   %idxprom5 = sext i32 %j.0 to i64
-  %arrayidx6 = getelementptr inbounds i32, i32* %0, i64 %idxprom5
-  %1 = load i32, i32* %arrayidx6, align 4
+  %arrayidx6 = getelementptr inbounds i32, ptr %0, i64 %idxprom5
+  %1 = load i32, ptr %arrayidx6, align 4
   %add = add nsw i32 %1, %j.0
   %idxprom7 = sext i32 %j.0 to i64
-  %arrayidx8 = getelementptr inbounds i32*, i32** %y, i64 %idxprom7
-  %2 = load i32*, i32** %arrayidx8, align 8
+  %arrayidx8 = getelementptr inbounds ptr, ptr %y, i64 %idxprom7
+  %2 = load ptr, ptr %arrayidx8, align 8
   %idxprom9 = sext i32 %i.0 to i64
-  %arrayidx10 = getelementptr inbounds i32, i32* %2, i64 %idxprom9
-  store i32 %add, i32* %arrayidx10, align 4
+  %arrayidx10 = getelementptr inbounds i32, ptr %2, i64 %idxprom9
+  store i32 %add, ptr %arrayidx10, align 4
   br label %for.inc
 
 for.inc:                                          ; preds = %test6Loop2
@@ -460,10 +460,10 @@ for.cond.for.end13_crit_edge:                     ; preds = %for.inc11
   br label %for.end13
 
 for.end13:                                        ; preds = %for.cond.for.end13_crit_edge, %entry
-  %arrayidx14 = getelementptr inbounds i32*, i32** %y, i64 0
-  %3 = load i32*, i32** %arrayidx14, align 8
-  %arrayidx15 = getelementptr inbounds i32, i32* %3, i64 0
-  %4 = load i32, i32* %arrayidx15, align 4
+  %arrayidx14 = getelementptr inbounds ptr, ptr %y, i64 0
+  %3 = load ptr, ptr %arrayidx14, align 8
+  %arrayidx15 = getelementptr inbounds i32, ptr %3, i64 0
+  %4 = load i32, ptr %arrayidx15, align 4
   %conv = sitofp i32 %4 to double
   ret double %conv
 }

diff  --git a/llvm/test/Analysis/UniformityAnalysis/AMDGPU/MIR/always-uniform-gmir.mir b/llvm/test/Analysis/UniformityAnalysis/AMDGPU/MIR/always-uniform-gmir.mir
index 288c809e8fa05..c4dd7adcf95af 100644
--- a/llvm/test/Analysis/UniformityAnalysis/AMDGPU/MIR/always-uniform-gmir.mir
+++ b/llvm/test/Analysis/UniformityAnalysis/AMDGPU/MIR/always-uniform-gmir.mir
@@ -11,7 +11,7 @@ body:             |
     %6:_(p1) = G_IMPLICIT_DEF
     %4:_(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.workitem.id.x)
     %5:_(s32) = G_INTRINSIC_CONVERGENT intrinsic(@llvm.amdgcn.readfirstlane), %4(s32)
-    G_STORE %5(s32), %6(p1) :: (store (s32) into `i32 addrspace(1)* undef`, addrspace 1)
+    G_STORE %5(s32), %6(p1) :: (store (s32) into `ptr addrspace(1) undef`, addrspace 1)
     S_ENDPGM 0
 ...
 ---
@@ -53,7 +53,7 @@ body:             |
     %13:_(s64) = G_CONSTANT i64 4
     %14:_(p4) = G_PTR_ADD %7, %13(s64)
     %15:_(s64) = G_INTRINSIC_CONVERGENT intrinsic(@llvm.amdgcn.fcmp), %9(s32), %11(s32), 33
-    G_STORE %15(s64), %16(p1) :: (volatile store (s64) into `i64 addrspace(1)* undef`, addrspace 1)
+    G_STORE %15(s64), %16(p1) :: (volatile store (s64) into `ptr addrspace(1) undef`, addrspace 1)
     S_ENDPGM 0
 
 ...
@@ -71,7 +71,7 @@ body:             |
     %7:_(s32) = G_LOAD %6(p4) :: (dereferenceable invariant load (s32), align 16, addrspace 4)
     %8:_(s1) = G_TRUNC %7(s32)
     %9:_(s64) = G_INTRINSIC_CONVERGENT intrinsic(@llvm.amdgcn.ballot), %8(s1)
-    G_STORE %9(s64), %10(p1) :: (volatile store (s64) into `i64 addrspace(1)* undef`, addrspace 1)
+    G_STORE %9(s64), %10(p1) :: (volatile store (s64) into `ptr addrspace(1) undef`, addrspace 1)
     S_ENDPGM 0
 
 ...
@@ -125,7 +125,7 @@ body:             |
     INLINEASM &"; def $0, $1, $2", 0 /* attdialect */, 1966090 /* regdef:SReg_32 */, def %1, 1835018 /* regdef:VGPR_32 */, def %2, 1835017 /* reguse:VGPR_32 */, %3
     %4:_(s32) = COPY %1
     %5:_(s32) = COPY %2
-    G_STORE %5(s32), %6(p1) :: (store (s32) into `i32 addrspace(1)* undef`, addrspace 1)
+    G_STORE %5(s32), %6(p1) :: (store (s32) into `ptr addrspace(1) undef`, addrspace 1)
     SI_RETURN
 
 ...

diff  --git a/llvm/test/Analysis/UniformityAnalysis/AMDGPU/MIR/hidden-diverge-gmir.mir b/llvm/test/Analysis/UniformityAnalysis/AMDGPU/MIR/hidden-diverge-gmir.mir
index b6cca696ffa87..17757b99ccab4 100644
--- a/llvm/test/Analysis/UniformityAnalysis/AMDGPU/MIR/hidden-diverge-gmir.mir
+++ b/llvm/test/Analysis/UniformityAnalysis/AMDGPU/MIR/hidden-diverge-gmir.mir
@@ -73,7 +73,7 @@ body:             |
   bb.5:
     %31:_(s32) = G_PHI %25(s32), %bb.3, %29(s32), %bb.4
     G_INTRINSIC_CONVERGENT_W_SIDE_EFFECTS intrinsic(@llvm.amdgcn.end.cf), %28(s64)
-    G_STORE %31(s32), %32(p1) :: (volatile store (s32) into `i32 addrspace(1)* undef`, addrspace 1)
+    G_STORE %31(s32), %32(p1) :: (volatile store (s32) into `ptr addrspace(1) undef`, addrspace 1)
     S_ENDPGM 0
 
 ...

diff  --git a/llvm/test/Analysis/UniformityAnalysis/AMDGPU/atomics.ll b/llvm/test/Analysis/UniformityAnalysis/AMDGPU/atomics.ll
index ea7c3e7175f12..15355ea139205 100644
--- a/llvm/test/Analysis/UniformityAnalysis/AMDGPU/atomics.ll
+++ b/llvm/test/Analysis/UniformityAnalysis/AMDGPU/atomics.ll
@@ -18,7 +18,7 @@ define amdgpu_kernel void @test2(ptr %ptr, i32 %cmp, i32 %new) {
 ; CHECK: DIVERGENT: %ret = call i32 @llvm.amdgcn.global.atomic.csub.p1(ptr addrspace(1) %ptr, i32 %val)
 define amdgpu_kernel void @test_atomic_csub_i32(ptr addrspace(1) %ptr, i32 %val) #0 {
   %ret = call i32 @llvm.amdgcn.global.atomic.csub.p1(ptr addrspace(1) %ptr, i32 %val)
-  store i32 %ret, i32 addrspace(1)* %ptr, align 4
+  store i32 %ret, ptr addrspace(1) %ptr, align 4
   ret void
 }
 


        


More information about the llvm-commits mailing list