[llvm] 54c1aa2 - [GVN] Convert tests to opaque pointers (NFC)

Nikita Popov via llvm-commits llvm-commits at lists.llvm.org
Wed Jan 11 05:19:42 PST 2023


Author: Nikita Popov
Date: 2023-01-11T14:19:34+01:00
New Revision: 54c1aa29666f4b99f915ca9f23ac8e127e2d0ee2

URL: https://github.com/llvm/llvm-project/commit/54c1aa29666f4b99f915ca9f23ac8e127e2d0ee2
DIFF: https://github.com/llvm/llvm-project/commit/54c1aa29666f4b99f915ca9f23ac8e127e2d0ee2.diff

LOG: [GVN] Convert tests to opaque pointers (NFC)

Added: 
    

Modified: 
    llvm/test/Transforms/GVN/2007-07-26-InterlockingLoops.ll
    llvm/test/Transforms/GVN/PRE/2018-06-08-pre-load-dbgloc-no-null-opt.ll
    llvm/test/Transforms/GVN/PRE/atomic.ll
    llvm/test/Transforms/GVN/bitcast-of-call.ll
    llvm/test/Transforms/GVN/no_speculative_loads_with_asan.ll
    llvm/test/Transforms/GVN/non-local-offset.ll
    llvm/test/Transforms/GVN/pr42605.ll
    llvm/test/Transforms/GVN/unreachable_block_infinite_loop.ll

Removed: 
    


################################################################################
diff  --git a/llvm/test/Transforms/GVN/2007-07-26-InterlockingLoops.ll b/llvm/test/Transforms/GVN/2007-07-26-InterlockingLoops.ll
index e6ab00cb7521e..e8fb8f1087cce 100644
--- a/llvm/test/Transforms/GVN/2007-07-26-InterlockingLoops.ll
+++ b/llvm/test/Transforms/GVN/2007-07-26-InterlockingLoops.ll
@@ -1,14 +1,14 @@
 ; RUN: opt < %s -passes=gvn -S | FileCheck %s
 
- at last = external global [65 x i32*]
+ at last = external global [65 x ptr]
 
 define i32 @NextRootMove(i32 %wtm, i32 %x, i32 %y, i32 %z) {
 entry:
-        %A = alloca i32*
-	%tmp17618 = load i32*, i32** getelementptr ([65 x i32*], [65 x i32*]* @last, i32 0, i32 1), align 4
-        store i32* %tmp17618, i32** %A
+        %A = alloca ptr
+	%tmp17618 = load ptr, ptr getelementptr ([65 x ptr], ptr @last, i32 0, i32 1), align 4
+        store ptr %tmp17618, ptr %A
 ; CHECK: entry:
-; CHECK-NEXT: alloca i32
+; CHECK-NEXT: alloca ptr
 ; CHECK-NEXT: %tmp17618 = load
 ; CHECK-NOT: load
 ; CHECK-NOT: phi
@@ -19,8 +19,8 @@ cond_true116:
 	br i1 %cmp, label %cond_true128, label %cond_true145
 
 cond_true128:
-	%tmp17625 = load i32*, i32** getelementptr ([65 x i32*], [65 x i32*]* @last, i32 0, i32 1), align 4
-        store i32* %tmp17625, i32** %A
+	%tmp17625 = load ptr, ptr getelementptr ([65 x ptr], ptr @last, i32 0, i32 1), align 4
+        store ptr %tmp17625, ptr %A
    %cmp1 = icmp eq i32 %x, %z
 	br i1 %cmp1 , label %bb98.backedge, label %return.loopexit
 
@@ -28,8 +28,8 @@ bb98.backedge:
 	br label %cond_true116
 
 cond_true145:
-	%tmp17631 = load i32*, i32** getelementptr ([65 x i32*], [65 x i32*]* @last, i32 0, i32 1), align 4
-        store i32* %tmp17631, i32** %A
+	%tmp17631 = load ptr, ptr getelementptr ([65 x ptr], ptr @last, i32 0, i32 1), align 4
+        store ptr %tmp17631, ptr %A
 	br i1 false, label %bb98.backedge, label %return.loopexit
 
 return.loopexit:

diff  --git a/llvm/test/Transforms/GVN/PRE/2018-06-08-pre-load-dbgloc-no-null-opt.ll b/llvm/test/Transforms/GVN/PRE/2018-06-08-pre-load-dbgloc-no-null-opt.ll
index d822083ed006f..fe6099ebf38d6 100644
--- a/llvm/test/Transforms/GVN/PRE/2018-06-08-pre-load-dbgloc-no-null-opt.ll
+++ b/llvm/test/Transforms/GVN/PRE/2018-06-08-pre-load-dbgloc-no-null-opt.ll
@@ -28,39 +28,36 @@
 target datalayout = "e-m:e-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128"
 target triple = "aarch64--linux-gnu"
 
-%struct.desc = type { %struct.node* }
-%struct.node = type { i32*, %struct.desc* }
+%struct.desc = type { ptr }
+%struct.node = type { ptr, ptr }
 
-define i32 @test_no_null_opt(%struct.desc* readonly %desc) local_unnamed_addr #0 !dbg !4 {
+define i32 @test_no_null_opt(ptr readonly %desc) local_unnamed_addr #0 !dbg !4 {
 entry:
-  %tobool = icmp eq %struct.desc* %desc, null
+  %tobool = icmp eq ptr %desc, null
   br i1 %tobool, label %cond.end, label %cond.false, !dbg !9
 ; ALL: br i1 %tobool, label %entry.cond.end_crit_edge, label %cond.false, !dbg [[LOC_15_6:![0-9]+]]
 ; ALL: entry.cond.end_crit_edge:
-; ALL: load %struct.node*, %struct.node** null, align {{[0-9]+}}, !dbg [[LOC_16_13:![0-9]+]]
+; ALL: load ptr, ptr null, align {{[0-9]+}}, !dbg [[LOC_16_13:![0-9]+]]
 
 cond.false:
-  %0 = bitcast %struct.desc* %desc to i8***, !dbg !11
-  %1 = load i8**, i8*** %0, align 8, !dbg !11
-  %2 = load i8*, i8** %1, align 8
+  %0 = load ptr, ptr %desc, align 8, !dbg !11
+  %1 = load ptr, ptr %0, align 8
   br label %cond.end, !dbg !9
 
 cond.end:
-; ALL: phi %struct.node* [ %3, %cond.false ], [ %.pre, %entry.cond.end_crit_edge ]
-; ALL: phi i8* [ %2, %cond.false ], [ null, %entry.cond.end_crit_edge ]
+; ALL: phi ptr [ %0, %cond.false ], [ %.pre, %entry.cond.end_crit_edge ]
+; ALL: phi ptr [ %1, %cond.false ], [ null, %entry.cond.end_crit_edge ]
 
-  %3 = phi i8* [ %2, %cond.false ], [ null, %entry ], !dbg !9
-  %node2 = getelementptr inbounds %struct.desc, %struct.desc* %desc, i64 0, i32 0
-  %4 = load %struct.node*, %struct.node** %node2, align 8, !dbg !10
-  %descs = getelementptr inbounds %struct.node, %struct.node* %4, i64 0, i32 1
-  %5 = bitcast %struct.desc** %descs to i8**
-  %6 = load i8*, i8** %5, align 8
-  %call = tail call i32 @bar(i8* %3, i8* %6)
+  %2 = phi ptr [ %1, %cond.false ], [ null, %entry ], !dbg !9
+  %3 = load ptr, ptr %desc, align 8, !dbg !10
+  %descs = getelementptr inbounds %struct.node, ptr %3, i64 0, i32 1
+  %4 = load ptr, ptr %descs, align 8
+  %call = tail call i32 @bar(ptr %2, ptr %4)
   ret i32 %call
 }
 attributes #0 = { null_pointer_is_valid }
 
-declare i32 @bar(i8*, i8*) local_unnamed_addr #1
+declare i32 @bar(ptr, ptr) local_unnamed_addr #1
 !llvm.dbg.cu = !{!0}
 !llvm.module.flags = !{!2, !3}
 

diff  --git a/llvm/test/Transforms/GVN/PRE/atomic.ll b/llvm/test/Transforms/GVN/PRE/atomic.ll
index 8cb49cb8652a5..185e96747a09f 100644
--- a/llvm/test/Transforms/GVN/PRE/atomic.ll
+++ b/llvm/test/Transforms/GVN/PRE/atomic.ll
@@ -11,9 +11,9 @@ define i32 @test1() nounwind uwtable ssp {
 ; CHECK-LABEL: test1
 ; CHECK: add i32 %x, %x
 entry:
-  %x = load i32, i32* @y
-  store atomic i32 %x, i32* @x unordered, align 4
-  %y = load i32, i32* @y
+  %x = load i32, ptr @y
+  store atomic i32 %x, ptr @x unordered, align 4
+  %y = load i32, ptr @y
   %z = add i32 %x, %y
   ret i32 %z
 }
@@ -23,9 +23,9 @@ define i32 @test3() nounwind uwtable ssp {
 ; CHECK-LABEL: test3
 ; CHECK: add i32 %x, %x
 entry:
-  %x = load i32, i32* @y
-  %y = load atomic i32, i32* @x unordered, align 4
-  %z = load i32, i32* @y
+  %x = load i32, ptr @y
+  %y = load atomic i32, ptr @x unordered, align 4
+  %z = load i32, ptr @y
   %a = add i32 %x, %z
   %b = add i32 %y, %a
   ret i32 %b
@@ -36,8 +36,8 @@ define i32 @test5() nounwind uwtable ssp {
 ; CHECK-LABEL: test5
 ; CHECK: add i32 %x, %x
 entry:
-  %x = load atomic i32, i32* @x unordered, align 4
-  %y = load i32, i32* @x
+  %x = load atomic i32, ptr @x unordered, align 4
+  %y = load i32, ptr @x
   %z = add i32 %x, %y
   ret i32 %z
 }
@@ -45,10 +45,10 @@ entry:
 ; GVN unordered load to load (unordered load must not be removed)
 define i32 @test6() nounwind uwtable ssp {
 ; CHECK-LABEL: test6
-; CHECK: load atomic i32, i32* @x unordered
+; CHECK: load atomic i32, ptr @x unordered
 entry:
-  %x = load i32, i32* @x
-  %x2 = load atomic i32, i32* @x unordered, align 4
+  %x = load i32, ptr @x
+  %x2 = load atomic i32, ptr @x unordered, align 4
   %x3 = add i32 %x, %x2
   ret i32 %x3
 }
@@ -58,10 +58,10 @@ define i32 @test7() nounwind uwtable ssp {
 ; CHECK-LABEL: test7
 ; CHECK: add i32 %x, %y
 entry:
-  %x = load i32, i32* @y
-  store atomic i32 %x, i32* @x release, align 4
-  %w = load atomic i32, i32* @x acquire, align 4
-  %y = load i32, i32* @y
+  %x = load i32, ptr @y
+  store atomic i32 %x, ptr @x release, align 4
+  %w = load atomic i32, ptr @x acquire, align 4
+  %y = load i32, ptr @y
   %z = add i32 %x, %y
   ret i32 %z
 }
@@ -71,9 +71,9 @@ define i32 @test9() nounwind uwtable ssp {
 ; CHECK-LABEL: test9
 ; CHECK: add i32 %x, %x
 entry:
-  %x = load i32, i32* @y
-  store atomic i32 %x, i32* @x monotonic, align 4
-  %y = load i32, i32* @y
+  %x = load i32, ptr @y
+  store atomic i32 %x, ptr @x monotonic, align 4
+  %y = load i32, ptr @y
   %z = add i32 %x, %y
   ret i32 %z
 }
@@ -83,9 +83,9 @@ define i32 @test10() nounwind uwtable ssp {
 ; CHECK-LABEL: test10
 ; CHECK: add i32 %x, %y
 entry:
-  %x = load atomic i32, i32* @y unordered, align 4
-  %clobber = load atomic i32, i32* @x monotonic, align 4
-  %y = load atomic i32, i32* @y monotonic, align 4
+  %x = load atomic i32, ptr @y unordered, align 4
+  %clobber = load atomic i32, ptr @x monotonic, align 4
+  %y = load atomic i32, ptr @y monotonic, align 4
   %z = add i32 %x, %y
   ret i32 %z
 }
@@ -96,70 +96,70 @@ entry:
   br i1 %flag, label %if.then, label %if.end
 
 if.then:
-  store i32 43, i32* @y, align 4
-; CHECK: store i32 43, i32* @y, align 4
+  store i32 43, ptr @y, align 4
+; CHECK: store i32 43, ptr @y, align 4
   br label %if.end
 
 if.end:
-  load atomic i32, i32* @x acquire, align 4
-  %load = load i32, i32* @y, align 4
-; CHECK: load atomic i32, i32* @x acquire, align 4
-; CHECK: load i32, i32* @y, align 4
+  load atomic i32, ptr @x acquire, align 4
+  %load = load i32, ptr @y, align 4
+; CHECK: load atomic i32, ptr @x acquire, align 4
+; CHECK: load i32, ptr @y, align 4
   ret i32 %load
 }
 
 ; CHECK-LABEL: @test12(
 ; Can't remove a load over a ordering barrier
-define i32 @test12(i1 %B, i32* %P1, i32* %P2) {
-  %load0 = load i32, i32* %P1
-  %1 = load atomic i32, i32* %P2 seq_cst, align 4
-  %load1 = load i32, i32* %P1
+define i32 @test12(i1 %B, ptr %P1, ptr %P2) {
+  %load0 = load i32, ptr %P1
+  %1 = load atomic i32, ptr %P2 seq_cst, align 4
+  %load1 = load i32, ptr %P1
   %sel = select i1 %B, i32 %load0, i32 %load1
   ret i32 %sel
-  ; CHECK: load i32, i32* %P1
-  ; CHECK: load i32, i32* %P1
+  ; CHECK: load i32, ptr %P1
+  ; CHECK: load i32, ptr %P1
 }
 
 ; CHECK-LABEL: @test13(
 ; atomic to non-atomic forwarding is legal
-define i32 @test13(i32* %P1) {
-  %a = load atomic i32, i32* %P1 seq_cst, align 4
-  %b = load i32, i32* %P1
+define i32 @test13(ptr %P1) {
+  %a = load atomic i32, ptr %P1 seq_cst, align 4
+  %b = load i32, ptr %P1
   %res = sub i32 %a, %b
   ret i32 %res
-  ; CHECK: load atomic i32, i32* %P1
+  ; CHECK: load atomic i32, ptr %P1
   ; CHECK: ret i32 0
 }
 
 ; CHECK-LABEL: @test13b(
-define i32 @test13b(i32* %P1) {
-  store  atomic i32 0, i32* %P1 unordered, align 4
-  %b = load i32, i32* %P1
+define i32 @test13b(ptr %P1) {
+  store  atomic i32 0, ptr %P1 unordered, align 4
+  %b = load i32, ptr %P1
   ret i32 %b
   ; CHECK: ret i32 0
 }
 
 ; CHECK-LABEL: @test14(
 ; atomic to unordered atomic forwarding is legal
-define i32 @test14(i32* %P1) {
-  %a = load atomic i32, i32* %P1 seq_cst, align 4
-  %b = load atomic i32, i32* %P1 unordered, align 4
+define i32 @test14(ptr %P1) {
+  %a = load atomic i32, ptr %P1 seq_cst, align 4
+  %b = load atomic i32, ptr %P1 unordered, align 4
   %res = sub i32 %a, %b
   ret i32 %res
-  ; CHECK: load atomic i32, i32* %P1 seq_cst
+  ; CHECK: load atomic i32, ptr %P1 seq_cst
   ; CHECK-NEXT: ret i32 0
 }
 
 ; CHECK-LABEL: @test15(
 ; implementation restriction: can't forward to stonger
 ; than unordered
-define i32 @test15(i32* %P1, i32* %P2) {
-  %a = load atomic i32, i32* %P1 seq_cst, align 4
-  %b = load atomic i32, i32* %P1 seq_cst, align 4
+define i32 @test15(ptr %P1, ptr %P2) {
+  %a = load atomic i32, ptr %P1 seq_cst, align 4
+  %b = load atomic i32, ptr %P1 seq_cst, align 4
   %res = sub i32 %a, %b
   ret i32 %res
-  ; CHECK: load atomic i32, i32* %P1
-  ; CHECK: load atomic i32, i32* %P1
+  ; CHECK: load atomic i32, ptr %P1
+  ; CHECK: load atomic i32, ptr %P1
 }
 
 ; CHECK-LABEL: @test16(
@@ -167,171 +167,168 @@ define i32 @test15(i32* %P1, i32* %P2) {
 ; it would be legal to use the later value in place of the
 ; former in this particular example.  We just don't
 ; do that right now.)
-define i32 @test16(i32* %P1, i32* %P2) {
-  %a = load i32, i32* %P1, align 4
-  %b = load atomic i32, i32* %P1 unordered, align 4
+define i32 @test16(ptr %P1, ptr %P2) {
+  %a = load i32, ptr %P1, align 4
+  %b = load atomic i32, ptr %P1 unordered, align 4
   %res = sub i32 %a, %b
   ret i32 %res
-  ; CHECK: load i32, i32* %P1
-  ; CHECK: load atomic i32, i32* %P1
+  ; CHECK: load i32, ptr %P1
+  ; CHECK: load atomic i32, ptr %P1
 }
 
 ; CHECK-LABEL: @test16b(
-define i32 @test16b(i32* %P1) {
-  store i32 0, i32* %P1
-  %b = load atomic i32, i32* %P1 unordered, align 4
+define i32 @test16b(ptr %P1) {
+  store i32 0, ptr %P1
+  %b = load atomic i32, ptr %P1 unordered, align 4
   ret i32 %b
-  ; CHECK: load atomic i32, i32* %P1
+  ; CHECK: load atomic i32, ptr %P1
 }
 
 ; Can't DSE across a full fence
-define void @fence_seq_cst_store(i32* %P1, i32* %P2) {
+define void @fence_seq_cst_store(ptr %P1, ptr %P2) {
 ; CHECK-LABEL: @fence_seq_cst_store(
 ; CHECK: store
 ; CHECK: store atomic
 ; CHECK: store
-  store i32 0, i32* %P1, align 4
-  store atomic i32 0, i32* %P2 seq_cst, align 4
-  store i32 0, i32* %P1, align 4
+  store i32 0, ptr %P1, align 4
+  store atomic i32 0, ptr %P2 seq_cst, align 4
+  store i32 0, ptr %P1, align 4
   ret void
 }
 
 ; Can't DSE across a full fence
-define void @fence_seq_cst(i32* %P1, i32* %P2) {
+define void @fence_seq_cst(ptr %P1, ptr %P2) {
 ; CHECK-LABEL: @fence_seq_cst(
 ; CHECK: store
 ; CHECK: fence seq_cst
 ; CHECK: store
-  store i32 0, i32* %P1, align 4
+  store i32 0, ptr %P1, align 4
   fence seq_cst
-  store i32 0, i32* %P1, align 4
+  store i32 0, ptr %P1, align 4
   ret void
 }
 
 ; Can't DSE across a full syncscope("singlethread") fence
-define void @fence_seq_cst_st(i32* %P1, i32* %P2) {
+define void @fence_seq_cst_st(ptr %P1, ptr %P2) {
 ; CHECK-LABEL: @fence_seq_cst_st(
 ; CHECK: store
 ; CHECK: fence syncscope("singlethread") seq_cst
 ; CHECK: store
-  store i32 0, i32* %P1, align 4
+  store i32 0, ptr %P1, align 4
   fence syncscope("singlethread") seq_cst
-  store i32 0, i32* %P1, align 4
+  store i32 0, ptr %P1, align 4
   ret void
 }
 
 ; Can't DSE across a full fence
-define void @fence_asm_sideeffect(i32* %P1, i32* %P2) {
+define void @fence_asm_sideeffect(ptr %P1, ptr %P2) {
 ; CHECK-LABEL: @fence_asm_sideeffect(
 ; CHECK: store
 ; CHECK: call void asm sideeffect
 ; CHECK: store
-  store i32 0, i32* %P1, align 4
+  store i32 0, ptr %P1, align 4
   call void asm sideeffect "", ""()
-  store i32 0, i32* %P1, align 4
+  store i32 0, ptr %P1, align 4
   ret void
 }
 
 ; Can't DSE across a full fence
-define void @fence_asm_memory(i32* %P1, i32* %P2) {
+define void @fence_asm_memory(ptr %P1, ptr %P2) {
 ; CHECK-LABEL: @fence_asm_memory(
 ; CHECK: store
 ; CHECK: call void asm
 ; CHECK: store
-  store i32 0, i32* %P1, align 4
+  store i32 0, ptr %P1, align 4
   call void asm "", "~{memory}"()
-  store i32 0, i32* %P1, align 4
+  store i32 0, ptr %P1, align 4
   ret void
 }
 
 ; Can't remove a volatile load
-define i32 @volatile_load(i32* %P1, i32* %P2) {
-  %a = load i32, i32* %P1, align 4
-  %b = load volatile i32, i32* %P1, align 4
+define i32 @volatile_load(ptr %P1, ptr %P2) {
+  %a = load i32, ptr %P1, align 4
+  %b = load volatile i32, ptr %P1, align 4
   %res = sub i32 %a, %b
   ret i32 %res
   ; CHECK-LABEL: @volatile_load(
-  ; CHECK: load i32, i32* %P1
-  ; CHECK: load volatile i32, i32* %P1
+  ; CHECK: load i32, ptr %P1
+  ; CHECK: load volatile i32, ptr %P1
 }
 
 ; Can't remove redundant volatile loads
-define i32 @redundant_volatile_load(i32* %P1, i32* %P2) {
-  %a = load volatile i32, i32* %P1, align 4
-  %b = load volatile i32, i32* %P1, align 4
+define i32 @redundant_volatile_load(ptr %P1, ptr %P2) {
+  %a = load volatile i32, ptr %P1, align 4
+  %b = load volatile i32, ptr %P1, align 4
   %res = sub i32 %a, %b
   ret i32 %res
   ; CHECK-LABEL: @redundant_volatile_load(
-  ; CHECK: load volatile i32, i32* %P1
-  ; CHECK: load volatile i32, i32* %P1
+  ; CHECK: load volatile i32, ptr %P1
+  ; CHECK: load volatile i32, ptr %P1
   ; CHECK: sub
 }
 
 ; Can't DSE a volatile store
-define void @volatile_store(i32* %P1, i32* %P2) {
+define void @volatile_store(ptr %P1, ptr %P2) {
 ; CHECK-LABEL: @volatile_store(
 ; CHECK: store volatile
 ; CHECK: store
-  store volatile i32 0, i32* %P1, align 4
-  store i32 3, i32* %P1, align 4
+  store volatile i32 0, ptr %P1, align 4
+  store i32 3, ptr %P1, align 4
   ret void
 }
 
 ; Can't DSE a redundant volatile store
-define void @redundant_volatile_store(i32* %P1, i32* %P2) {
+define void @redundant_volatile_store(ptr %P1, ptr %P2) {
 ; CHECK-LABEL: @redundant_volatile_store(
 ; CHECK: store volatile
 ; CHECK: store volatile
-  store volatile i32 0, i32* %P1, align 4
-  store volatile i32 0, i32* %P1, align 4
+  store volatile i32 0, ptr %P1, align 4
+  store volatile i32 0, ptr %P1, align 4
   ret void
 }
 
 ; Can value forward from volatiles
-define i32 @test20(i32* %P1, i32* %P2) {
-  %a = load volatile i32, i32* %P1, align 4
-  %b = load i32, i32* %P1, align 4
+define i32 @test20(ptr %P1, ptr %P2) {
+  %a = load volatile i32, ptr %P1, align 4
+  %b = load i32, ptr %P1, align 4
   %res = sub i32 %a, %b
   ret i32 %res
   ; CHECK-LABEL: @test20(
-  ; CHECK: load volatile i32, i32* %P1
+  ; CHECK: load volatile i32, ptr %P1
   ; CHECK: ret i32 0
 }
 
 ; We're currently conservative about widening
-define i64 @widen1(i32* %P1) {
+define i64 @widen1(ptr %P1) {
   ; CHECK-LABEL: @widen1(
-  ; CHECK: load atomic i32, i32* %P1
-  ; CHECK: load atomic i64, i64* %p2
-  %p2 = bitcast i32* %P1 to i64*
-  %a = load atomic i32, i32* %P1 unordered, align 4
-  %b = load atomic i64, i64* %p2 unordered, align 4
+  ; CHECK: load atomic i32, ptr %P1
+  ; CHECK: load atomic i64, ptr %P1
+  %a = load atomic i32, ptr %P1 unordered, align 4
+  %b = load atomic i64, ptr %P1 unordered, align 4
   %a64 = sext i32 %a to i64
   %res = sub i64 %a64, %b
   ret i64 %res
 }
 
 ; narrowing does work
-define i64 @narrow(i32* %P1) {
+define i64 @narrow(ptr %P1) {
   ; CHECK-LABEL: @narrow(
-  ; CHECK: load atomic i64, i64* %p2
-  ; CHECK-NOT: load atomic i32, i32* %P1
-  %p2 = bitcast i32* %P1 to i64*
-  %a64 = load atomic i64, i64* %p2 unordered, align 4
-  %b = load atomic i32, i32* %P1 unordered, align 4
+  ; CHECK: load atomic i64, ptr %P1
+  ; CHECK-NOT: load atomic i32, ptr %P1
+  %a64 = load atomic i64, ptr %P1 unordered, align 4
+  %b = load atomic i32, ptr %P1 unordered, align 4
   %b64 = sext i32 %b to i64
   %res = sub i64 %a64, %b64
   ret i64 %res
 }
 
 ; Missed optimization, we don't yet optimize ordered loads
-define i64 @narrow2(i32* %P1) {
+define i64 @narrow2(ptr %P1) {
   ; CHECK-LABEL: @narrow2(
-  ; CHECK: load atomic i64, i64* %p2
-  ; CHECK: load atomic i32, i32* %P1
-  %p2 = bitcast i32* %P1 to i64*
-  %a64 = load atomic i64, i64* %p2 acquire, align 4
-  %b = load atomic i32, i32* %P1 acquire, align 4
+  ; CHECK: load atomic i64, ptr %P1
+  ; CHECK: load atomic i32, ptr %P1
+  %a64 = load atomic i64, ptr %P1 acquire, align 4
+  %b = load atomic i32, ptr %P1 acquire, align 4
   %b64 = sext i32 %b to i64
   %res = sub i64 %a64, %b64
   ret i64 %res
@@ -342,53 +339,53 @@ define i64 @narrow2(i32* %P1) {
 ; are here only to show that we haven't obviously broken anything.
 
 ; unordered atomic to unordered atomic
-define i32 @non_local_fre(i32* %P1) {
+define i32 @non_local_fre(ptr %P1) {
 ; CHECK-LABEL: @non_local_fre(
-; CHECK: load atomic i32, i32* %P1
+; CHECK: load atomic i32, ptr %P1
 ; CHECK: ret i32 0
 ; CHECK: ret i32 0
-  %a = load atomic i32, i32* %P1 unordered, align 4
+  %a = load atomic i32, ptr %P1 unordered, align 4
   %cmp = icmp eq i32 %a, 0
   br i1 %cmp, label %early, label %next
 early:
   ret i32 %a
 next:
-  %b = load atomic i32, i32* %P1 unordered, align 4
+  %b = load atomic i32, ptr %P1 unordered, align 4
   %res = sub i32 %a, %b
   ret i32 %res
 }
 
 ; unordered atomic to non-atomic
-define i32 @non_local_fre2(i32* %P1) {
+define i32 @non_local_fre2(ptr %P1) {
 ; CHECK-LABEL: @non_local_fre2(
-; CHECK: load atomic i32, i32* %P1
+; CHECK: load atomic i32, ptr %P1
 ; CHECK: ret i32 0
 ; CHECK: ret i32 0
-  %a = load atomic i32, i32* %P1 unordered, align 4
+  %a = load atomic i32, ptr %P1 unordered, align 4
   %cmp = icmp eq i32 %a, 0
   br i1 %cmp, label %early, label %next
 early:
   ret i32 %a
 next:
-  %b = load i32, i32* %P1
+  %b = load i32, ptr %P1
   %res = sub i32 %a, %b
   ret i32 %res
 }
 
 ; Can't forward ordered atomics.
-define i32 @non_local_fre3(i32* %P1) {
+define i32 @non_local_fre3(ptr %P1) {
 ; CHECK-LABEL: @non_local_fre3(
-; CHECK: load atomic i32, i32* %P1 acquire
+; CHECK: load atomic i32, ptr %P1 acquire
 ; CHECK: ret i32 0
-; CHECK: load atomic i32, i32* %P1 acquire
+; CHECK: load atomic i32, ptr %P1 acquire
 ; CHECK: ret i32 %res
-  %a = load atomic i32, i32* %P1 acquire, align 4
+  %a = load atomic i32, ptr %P1 acquire, align 4
   %cmp = icmp eq i32 %a, 0
   br i1 %cmp, label %early, label %next
 early:
   ret i32 %a
 next:
-  %b = load atomic i32, i32* %P1 acquire, align 4
+  %b = load atomic i32, ptr %P1 acquire, align 4
   %res = sub i32 %a, %b
   ret i32 %res
 }
@@ -396,108 +393,108 @@ next:
 declare void @clobber()
 
 ; unordered atomic to unordered atomic
-define i32 @non_local_pre(i32* %P1) {
+define i32 @non_local_pre(ptr %P1) {
 ; CHECK-LABEL: @non_local_pre(
-; CHECK: load atomic i32, i32* %P1 unordered
-; CHECK: load atomic i32, i32* %P1 unordered
+; CHECK: load atomic i32, ptr %P1 unordered
+; CHECK: load atomic i32, ptr %P1 unordered
 ; CHECK: %b = phi i32 [ %b.pre, %early ], [ %a, %0 ]
 ; CHECK: ret i32 %b
-  %a = load atomic i32, i32* %P1 unordered, align 4
+  %a = load atomic i32, ptr %P1 unordered, align 4
   %cmp = icmp eq i32 %a, 0
   br i1 %cmp, label %early, label %next
 early:
   call void @clobber()
   br label %next
 next:
-  %b = load atomic i32, i32* %P1 unordered, align 4
+  %b = load atomic i32, ptr %P1 unordered, align 4
   ret i32 %b
 }
 
 ; unordered atomic to non-atomic
-define i32 @non_local_pre2(i32* %P1) {
+define i32 @non_local_pre2(ptr %P1) {
 ; CHECK-LABEL: @non_local_pre2(
-; CHECK: load atomic i32, i32* %P1 unordered
-; CHECK: load i32, i32* %P1
+; CHECK: load atomic i32, ptr %P1 unordered
+; CHECK: load i32, ptr %P1
 ; CHECK: %b = phi i32 [ %b.pre, %early ], [ %a, %0 ]
 ; CHECK: ret i32 %b
-  %a = load atomic i32, i32* %P1 unordered, align 4
+  %a = load atomic i32, ptr %P1 unordered, align 4
   %cmp = icmp eq i32 %a, 0
   br i1 %cmp, label %early, label %next
 early:
   call void @clobber()
   br label %next
 next:
-  %b = load i32, i32* %P1
+  %b = load i32, ptr %P1
   ret i32 %b
 }
 
 ; non-atomic to unordered atomic - can't forward!
-define i32 @non_local_pre3(i32* %P1) {
+define i32 @non_local_pre3(ptr %P1) {
 ; CHECK-LABEL: @non_local_pre3(
-; CHECK: %a = load i32, i32* %P1
-; CHECK: %b = load atomic i32, i32* %P1 unordered
+; CHECK: %a = load i32, ptr %P1
+; CHECK: %b = load atomic i32, ptr %P1 unordered
 ; CHECK: ret i32 %b
-  %a = load i32, i32* %P1
+  %a = load i32, ptr %P1
   %cmp = icmp eq i32 %a, 0
   br i1 %cmp, label %early, label %next
 early:
   call void @clobber()
   br label %next
 next:
-  %b = load atomic i32, i32* %P1 unordered, align 4
+  %b = load atomic i32, ptr %P1 unordered, align 4
   ret i32 %b
 }
 
 ; ordered atomic to ordered atomic - can't forward
-define i32 @non_local_pre4(i32* %P1) {
+define i32 @non_local_pre4(ptr %P1) {
 ; CHECK-LABEL: @non_local_pre4(
-; CHECK: %a = load atomic i32, i32* %P1 seq_cst
-; CHECK: %b = load atomic i32, i32* %P1 seq_cst
+; CHECK: %a = load atomic i32, ptr %P1 seq_cst
+; CHECK: %b = load atomic i32, ptr %P1 seq_cst
 ; CHECK: ret i32 %b
-  %a = load atomic i32, i32* %P1 seq_cst, align 4
+  %a = load atomic i32, ptr %P1 seq_cst, align 4
   %cmp = icmp eq i32 %a, 0
   br i1 %cmp, label %early, label %next
 early:
   call void @clobber()
   br label %next
 next:
-  %b = load atomic i32, i32* %P1 seq_cst, align 4
+  %b = load atomic i32, ptr %P1 seq_cst, align 4
   ret i32 %b
 }
 
 ; can't remove volatile on any path
-define i32 @non_local_pre5(i32* %P1) {
+define i32 @non_local_pre5(ptr %P1) {
 ; CHECK-LABEL: @non_local_pre5(
-; CHECK: %a = load atomic i32, i32* %P1 seq_cst
-; CHECK: %b = load volatile i32, i32* %P1
+; CHECK: %a = load atomic i32, ptr %P1 seq_cst
+; CHECK: %b = load volatile i32, ptr %P1
 ; CHECK: ret i32 %b
-  %a = load atomic i32, i32* %P1 seq_cst, align 4
+  %a = load atomic i32, ptr %P1 seq_cst, align 4
   %cmp = icmp eq i32 %a, 0
   br i1 %cmp, label %early, label %next
 early:
   call void @clobber()
   br label %next
 next:
-  %b = load volatile i32, i32* %P1
+  %b = load volatile i32, ptr %P1
   ret i32 %b
 }
 
 
 ; ordered atomic to unordered atomic
-define i32 @non_local_pre6(i32* %P1) {
+define i32 @non_local_pre6(ptr %P1) {
 ; CHECK-LABEL: @non_local_pre6(
-; CHECK: load atomic i32, i32* %P1 seq_cst
-; CHECK: load atomic i32, i32* %P1 unordered
+; CHECK: load atomic i32, ptr %P1 seq_cst
+; CHECK: load atomic i32, ptr %P1 unordered
 ; CHECK: %b = phi i32 [ %b.pre, %early ], [ %a, %0 ]
 ; CHECK: ret i32 %b
-  %a = load atomic i32, i32* %P1 seq_cst, align 4
+  %a = load atomic i32, ptr %P1 seq_cst, align 4
   %cmp = icmp eq i32 %a, 0
   br i1 %cmp, label %early, label %next
 early:
   call void @clobber()
   br label %next
 next:
-  %b = load atomic i32, i32* %P1 unordered, align 4
+  %b = load atomic i32, ptr %P1 unordered, align 4
   ret i32 %b
 }
 

diff  --git a/llvm/test/Transforms/GVN/bitcast-of-call.ll b/llvm/test/Transforms/GVN/bitcast-of-call.ll
index 9107e63720ff6..6c4e8d2989977 100644
--- a/llvm/test/Transforms/GVN/bitcast-of-call.ll
+++ b/llvm/test/Transforms/GVN/bitcast-of-call.ll
@@ -1,13 +1,13 @@
 ; RUN: opt < %s -passes=gvn -S | FileCheck %s
 ; PR2213
 
-define i32* @f(i8* %x) {
+define ptr @f(ptr %x) {
 entry:
-        %tmp = call i8* @m( i32 12 )            ; <i8*> [#uses=2]
-        %tmp1 = bitcast i8* %tmp to i32*                ; <i32*> [#uses=0]
-        %tmp2 = bitcast i8* %tmp to i32*                ; <i32*> [#uses=0]
+        %tmp = call ptr @m( i32 12 )            ; <ptr> [#uses=2]
+        %tmp1 = bitcast ptr %tmp to ptr                ; <ptr> [#uses=0]
+        %tmp2 = bitcast ptr %tmp to ptr                ; <ptr> [#uses=0]
 ; CHECK-NOT: %tmp2
-        ret i32* %tmp2
+        ret ptr %tmp2
 }
 
-declare i8* @m(i32)
+declare ptr @m(i32)

diff  --git a/llvm/test/Transforms/GVN/no_speculative_loads_with_asan.ll b/llvm/test/Transforms/GVN/no_speculative_loads_with_asan.ll
index dffc56dba577c..80a9c2dffecd5 100644
--- a/llvm/test/Transforms/GVN/no_speculative_loads_with_asan.ll
+++ b/llvm/test/Transforms/GVN/no_speculative_loads_with_asan.ll
@@ -1,7 +1,7 @@
 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
 ; RUN: opt -O3 -S %s | FileCheck %s
 target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
-declare noalias i8* @_Znam(i64) #1
+declare noalias ptr @_Znam(i64) #1
 
 define i32 @TestNoAsan() {
 ; CHECK-LABEL: @TestNoAsan(
@@ -9,19 +9,17 @@ define i32 @TestNoAsan() {
 ; CHECK-NEXT:    ret i32 0
 ;
 bb:
-  %i = tail call noalias i8* @_Znam(i64 2)
-  %i1 = getelementptr inbounds i8, i8* %i, i64 1
-  store i8 0, i8* %i1, align 1
-  store i8 0, i8* %i, align 1
-  %i2 = bitcast i8* %i to i16*
-  %i3 = load i16, i16* %i2, align 4
+  %i = tail call noalias ptr @_Znam(i64 2)
+  %i1 = getelementptr inbounds i8, ptr %i, i64 1
+  store i8 0, ptr %i1, align 1
+  store i8 0, ptr %i, align 1
+  %i3 = load i16, ptr %i, align 4
   %i4 = icmp eq i16 %i3, 0
   br i1 %i4, label %bb10, label %bb5
 
 bb5:                                              ; preds = %bb
-  %i6 = getelementptr inbounds i8, i8* %i, i64 2
-  %i7 = bitcast i8* %i6 to i16*
-  %i8 = load i16, i16* %i7, align 2
+  %i6 = getelementptr inbounds i8, ptr %i, i64 2
+  %i8 = load i16, ptr %i6, align 2
   %i9 = sext i16 %i8 to i32
   br label %bb10
 
@@ -33,18 +31,16 @@ bb10:                                             ; preds = %bb5, %bb
 define i32 @TestAsan() sanitize_address {
 ; CHECK-LABEL: @TestAsan(
 ; CHECK-NEXT:  bb:
-; CHECK-NEXT:    [[I:%.*]] = tail call noalias dereferenceable_or_null(2) i8* @_Znam(i64 2)
-; CHECK-NEXT:    [[I1:%.*]] = getelementptr inbounds i8, i8* [[I]], i64 1
-; CHECK-NEXT:    store i8 0, i8* [[I1]], align 1
-; CHECK-NEXT:    store i8 0, i8* [[I]], align 1
-; CHECK-NEXT:    [[I2:%.*]] = bitcast i8* [[I]] to i16*
-; CHECK-NEXT:    [[I3:%.*]] = load i16, i16* [[I2]], align 4
+; CHECK-NEXT:    [[I:%.*]] = tail call noalias dereferenceable_or_null(2) ptr @_Znam(i64 2)
+; CHECK-NEXT:    [[I1:%.*]] = getelementptr inbounds i8, ptr [[I]], i64 1
+; CHECK-NEXT:    store i8 0, ptr [[I1]], align 1
+; CHECK-NEXT:    store i8 0, ptr [[I]], align 1
+; CHECK-NEXT:    [[I3:%.*]] = load i16, ptr [[I]], align 4
 ; CHECK-NEXT:    [[I4:%.*]] = icmp eq i16 [[I3]], 0
 ; CHECK-NEXT:    br i1 [[I4]], label [[BB10:%.*]], label [[BB5:%.*]]
 ; CHECK:       bb5:
-; CHECK-NEXT:    [[I6:%.*]] = getelementptr inbounds i8, i8* [[I]], i64 2
-; CHECK-NEXT:    [[I7:%.*]] = bitcast i8* [[I6]] to i16*
-; CHECK-NEXT:    [[I8:%.*]] = load i16, i16* [[I7]], align 2
+; CHECK-NEXT:    [[I6:%.*]] = getelementptr inbounds i8, ptr [[I]], i64 2
+; CHECK-NEXT:    [[I8:%.*]] = load i16, ptr [[I6]], align 2
 ; CHECK-NEXT:    [[I9:%.*]] = sext i16 [[I8]] to i32
 ; CHECK-NEXT:    br label [[BB10]]
 ; CHECK:       bb10:
@@ -52,19 +48,17 @@ define i32 @TestAsan() sanitize_address {
 ; CHECK-NEXT:    ret i32 [[I11]]
 ;
 bb:
-  %i = tail call noalias i8* @_Znam(i64 2)
-  %i1 = getelementptr inbounds i8, i8* %i, i64 1
-  store i8 0, i8* %i1, align 1
-  store i8 0, i8* %i, align 1
-  %i2 = bitcast i8* %i to i16*
-  %i3 = load i16, i16* %i2, align 4
+  %i = tail call noalias ptr @_Znam(i64 2)
+  %i1 = getelementptr inbounds i8, ptr %i, i64 1
+  store i8 0, ptr %i1, align 1
+  store i8 0, ptr %i, align 1
+  %i3 = load i16, ptr %i, align 4
   %i4 = icmp eq i16 %i3, 0
   br i1 %i4, label %bb10, label %bb5
 
 bb5:                                              ; preds = %bb
-  %i6 = getelementptr inbounds i8, i8* %i, i64 2
-  %i7 = bitcast i8* %i6 to i16*
-  %i8 = load i16, i16* %i7, align 2
+  %i6 = getelementptr inbounds i8, ptr %i, i64 2
+  %i8 = load i16, ptr %i6, align 2
   %i9 = sext i16 %i8 to i32
   br label %bb10
 
@@ -76,18 +70,16 @@ bb10:                                             ; preds = %bb5, %bb
 define i32 @TestHWAsan() sanitize_hwaddress {
 ; CHECK-LABEL: @TestHWAsan(
 ; CHECK-NEXT:  bb:
-; CHECK-NEXT:    [[I:%.*]] = tail call noalias dereferenceable_or_null(2) i8* @_Znam(i64 2)
-; CHECK-NEXT:    [[I1:%.*]] = getelementptr inbounds i8, i8* [[I]], i64 1
-; CHECK-NEXT:    store i8 0, i8* [[I1]], align 1
-; CHECK-NEXT:    store i8 0, i8* [[I]], align 1
-; CHECK-NEXT:    [[I2:%.*]] = bitcast i8* [[I]] to i16*
-; CHECK-NEXT:    [[I3:%.*]] = load i16, i16* [[I2]], align 4
+; CHECK-NEXT:    [[I:%.*]] = tail call noalias dereferenceable_or_null(2) ptr @_Znam(i64 2)
+; CHECK-NEXT:    [[I1:%.*]] = getelementptr inbounds i8, ptr [[I]], i64 1
+; CHECK-NEXT:    store i8 0, ptr [[I1]], align 1
+; CHECK-NEXT:    store i8 0, ptr [[I]], align 1
+; CHECK-NEXT:    [[I3:%.*]] = load i16, ptr [[I]], align 4
 ; CHECK-NEXT:    [[I4:%.*]] = icmp eq i16 [[I3]], 0
 ; CHECK-NEXT:    br i1 [[I4]], label [[BB10:%.*]], label [[BB5:%.*]]
 ; CHECK:       bb5:
-; CHECK-NEXT:    [[I6:%.*]] = getelementptr inbounds i8, i8* [[I]], i64 2
-; CHECK-NEXT:    [[I7:%.*]] = bitcast i8* [[I6]] to i16*
-; CHECK-NEXT:    [[I8:%.*]] = load i16, i16* [[I7]], align 2
+; CHECK-NEXT:    [[I6:%.*]] = getelementptr inbounds i8, ptr [[I]], i64 2
+; CHECK-NEXT:    [[I8:%.*]] = load i16, ptr [[I6]], align 2
 ; CHECK-NEXT:    [[I9:%.*]] = sext i16 [[I8]] to i32
 ; CHECK-NEXT:    br label [[BB10]]
 ; CHECK:       bb10:
@@ -95,19 +87,17 @@ define i32 @TestHWAsan() sanitize_hwaddress {
 ; CHECK-NEXT:    ret i32 [[I11]]
 ;
 bb:
-  %i = tail call noalias i8* @_Znam(i64 2)
-  %i1 = getelementptr inbounds i8, i8* %i, i64 1
-  store i8 0, i8* %i1, align 1
-  store i8 0, i8* %i, align 1
-  %i2 = bitcast i8* %i to i16*
-  %i3 = load i16, i16* %i2, align 4
+  %i = tail call noalias ptr @_Znam(i64 2)
+  %i1 = getelementptr inbounds i8, ptr %i, i64 1
+  store i8 0, ptr %i1, align 1
+  store i8 0, ptr %i, align 1
+  %i3 = load i16, ptr %i, align 4
   %i4 = icmp eq i16 %i3, 0
   br i1 %i4, label %bb10, label %bb5
 
 bb5:                                              ; preds = %bb
-  %i6 = getelementptr inbounds i8, i8* %i, i64 2
-  %i7 = bitcast i8* %i6 to i16*
-  %i8 = load i16, i16* %i7, align 2
+  %i6 = getelementptr inbounds i8, ptr %i, i64 2
+  %i8 = load i16, ptr %i6, align 2
   %i9 = sext i16 %i8 to i32
   br label %bb10
 

diff  --git a/llvm/test/Transforms/GVN/non-local-offset.ll b/llvm/test/Transforms/GVN/non-local-offset.ll
index 582f3a83ac8b3..0467657f96555 100644
--- a/llvm/test/Transforms/GVN/non-local-offset.ll
+++ b/llvm/test/Transforms/GVN/non-local-offset.ll
@@ -7,19 +7,19 @@ target datalayout = "e-p:64:64:64"
 
 ; CHECK-LABEL: @yes(
 ; CHECK: if.then:
-; CHECK-NEXT: store i32 0, i32* %q
+; CHECK-NEXT: store i32 0, ptr %q
 ; CHECK-NEXT: ret void
 
-define void @yes(i1 %c, i32* %p, i32* %q) nounwind {
+define void @yes(i1 %c, ptr %p, ptr %q) nounwind {
 entry:
-  store i32 0, i32* %p
-  %p1 = getelementptr inbounds i32, i32* %p, i64 1
-  store i32 1, i32* %p1
+  store i32 0, ptr %p
+  %p1 = getelementptr inbounds i32, ptr %p, i64 1
+  store i32 1, ptr %p1
   br i1 %c, label %if.else, label %if.then
 
 if.then:
-  %t = load i32, i32* %p
-  store i32 %t, i32* %q
+  %t = load i32, ptr %p
+  store i32 %t, ptr %q
   ret void
 
 if.else:
@@ -32,28 +32,26 @@ if.else:
 
 ; CHECK-LABEL: @watch_out_for_size_change(
 ; CHECK: if.then:
-; CHECK-NEXT: store i32 0, i32* %q
+; CHECK-NEXT: store i32 0, ptr %q
 ; CHECK-NEXT: ret void
 ; CHECK: if.else:
-; CHECK: load i64, i64* %pc
+; CHECK: load i64, ptr %p
 ; CHECK: store i64
 
-define void @watch_out_for_size_change(i1 %c, i32* %p, i32* %q) nounwind {
+define void @watch_out_for_size_change(i1 %c, ptr %p, ptr %q) nounwind {
 entry:
-  store i32 0, i32* %p
-  %p1 = getelementptr inbounds i32, i32* %p, i64 1
-  store i32 1, i32* %p1
+  store i32 0, ptr %p
+  %p1 = getelementptr inbounds i32, ptr %p, i64 1
+  store i32 1, ptr %p1
   br i1 %c, label %if.else, label %if.then
 
 if.then:
-  %t = load i32, i32* %p
-  store i32 %t, i32* %q
+  %t = load i32, ptr %p
+  store i32 %t, ptr %q
   ret void
 
 if.else:
-  %pc = bitcast i32* %p to i64*
-  %qc = bitcast i32* %q to i64*
-  %t64 = load i64, i64* %pc
-  store i64 %t64, i64* %qc
+  %t64 = load i64, ptr %p
+  store i64 %t64, ptr %q
   ret void
 }

diff  --git a/llvm/test/Transforms/GVN/pr42605.ll b/llvm/test/Transforms/GVN/pr42605.ll
index 3dcb47f083fa3..f0ff6d9b23e1f 100644
--- a/llvm/test/Transforms/GVN/pr42605.ll
+++ b/llvm/test/Transforms/GVN/pr42605.ll
@@ -8,12 +8,12 @@ target triple = "x86_64-unknown-linux-gnu"
 @.str = private unnamed_addr constant [8 x i8] c"%d, %d\0A\00", align 1
 
 ; Function Attrs: nofree nounwind
-declare dso_local i32 @printf(i8* nocapture readonly, ...) local_unnamed_addr
+declare dso_local i32 @printf(ptr nocapture readonly, ...) local_unnamed_addr
 
 ; Function Attrs: noinline norecurse nounwind readonly uwtable
 define dso_local i32 @_Z3gooi(i32 %i) local_unnamed_addr #0 {
 entry:
-  %t0 = load i32, i32* @global, align 4, !tbaa !2
+  %t0 = load i32, ptr @global, align 4, !tbaa !2
   %add = add nsw i32 %t0, %i
   ret i32 %add
 }
@@ -34,14 +34,14 @@ if.then:                                          ; preds = %entry
 ; Check pre happens after phitranslate.
 ; CHECK-LABEL: @noclobber
 ; CHECK: %add4.pre-phi = phi i32 [ %add2, %if.then ], [ %add, %entry ]
-; CHECK: printf(i8* getelementptr inbounds {{.*}}, i32 %add4.pre-phi)
+; CHECK: printf(ptr @.str, i32 %global2.0, i32 %add4.pre-phi)
 
 if.end:                                           ; preds = %if.then, %entry
   %i.0 = phi i32 [ 3, %if.then ], [ 2, %entry ]
   %global2.0 = phi i32 [ %add2, %if.then ], [ %add, %entry ]
   %call3 = tail call i32 @_Z3gooi(i32 %i.0)
   %add4 = add nsw i32 %call3, 5
-  %call5 = tail call i32 (i8*, ...) @printf(i8* getelementptr inbounds ([8 x i8], [8 x i8]* @.str, i64 0, i64 0), i32 %global2.0, i32 %add4)
+  %call5 = tail call i32 (ptr, ...) @printf(ptr @.str, i32 %global2.0, i32 %add4)
   ret void
 }
 
@@ -62,15 +62,15 @@ if.then:                                          ; preds = %entry
 ; CHECK-LABEL: @hasclobber
 ; CHECK: %call3 = tail call i32 @_Z3gooi(i32 %i.0)
 ; CHECK-NEXT: %add4 = add nsw i32 %call3, 5
-; CHECK-NEXT: printf(i8* getelementptr inbounds ({{.*}}, i32 %global2.0, i32 %add4)
+; CHECK-NEXT: printf(ptr @.str, i32 %global2.0, i32 %add4)
 
 if.end:                                           ; preds = %if.then, %entry
   %i.0 = phi i32 [ 3, %if.then ], [ 2, %entry ]
   %global2.0 = phi i32 [ %add2, %if.then ], [ %add, %entry ]
-  store i32 5, i32* @global, align 4, !tbaa !2
+  store i32 5, ptr @global, align 4, !tbaa !2
   %call3 = tail call i32 @_Z3gooi(i32 %i.0)
   %add4 = add nsw i32 %call3, 5
-  %call5 = tail call i32 (i8*, ...) @printf(i8* getelementptr inbounds ([8 x i8], [8 x i8]* @.str, i64 0, i64 0), i32 %global2.0, i32 %add4)
+  %call5 = tail call i32 (ptr, ...) @printf(ptr @.str, i32 %global2.0, i32 %add4)
   ret void
 }
 

diff  --git a/llvm/test/Transforms/GVN/unreachable_block_infinite_loop.ll b/llvm/test/Transforms/GVN/unreachable_block_infinite_loop.ll
index cfd0ec6a486e6..67fae22a59d15 100644
--- a/llvm/test/Transforms/GVN/unreachable_block_infinite_loop.ll
+++ b/llvm/test/Transforms/GVN/unreachable_block_infinite_loop.ll
@@ -17,13 +17,13 @@ entry:
   br label %bb0
 
 bb1:
-  %ptr1 = ptrtoint i32* %ptr2 to i64
-  %ptr2 = inttoptr i64 %ptr1 to i32*
+  %ptr1 = ptrtoint ptr %ptr2 to i64
+  %ptr2 = inttoptr i64 %ptr1 to ptr
   br i1 undef, label %bb0, label %bb1
 
 bb0:
-  %phi = phi i32* [ undef, %entry ], [ %ptr2, %bb1 ]
-  %load = load i32, i32* %phi
+  %phi = phi ptr [ undef, %entry ], [ %ptr2, %bb1 ]
+  %load = load i32, ptr %phi
   ret i32 %load
 }
 
@@ -32,12 +32,12 @@ entry:
   br label %bb0
 
 bb1:
-  %ptr1 = getelementptr i32, i32* %ptr2, i32 0
-  %ptr2 = getelementptr i32, i32* %ptr1, i32 0
+  %ptr1 = getelementptr i32, ptr %ptr2, i32 0
+  %ptr2 = getelementptr i32, ptr %ptr1, i32 0
   br i1 undef, label %bb0, label %bb1
 
 bb0:
-  %phi = phi i32* [ undef, %entry ], [ %ptr2, %bb1 ]
-  %load = load i32, i32* %phi
+  %phi = phi ptr [ undef, %entry ], [ %ptr2, %bb1 ]
+  %load = load i32, ptr %phi
   ret i32 %load
 }


        


More information about the llvm-commits mailing list