[llvm] 9261995 - [SCEV] Convert some tests to opaque pointers (NFC)
Nikita Popov via llvm-commits
llvm-commits at lists.llvm.org
Thu Dec 15 01:01:41 PST 2022
Author: Nikita Popov
Date: 2022-12-15T10:00:45+01:00
New Revision: 92619956eb27ef08dd24045307593fc3d7f78db0
URL: https://github.com/llvm/llvm-project/commit/92619956eb27ef08dd24045307593fc3d7f78db0
DIFF: https://github.com/llvm/llvm-project/commit/92619956eb27ef08dd24045307593fc3d7f78db0.diff
LOG: [SCEV] Convert some tests to opaque pointers (NFC)
Added:
Modified:
llvm/test/Analysis/ScalarEvolution/2007-07-15-NegativeStride.ll
llvm/test/Analysis/ScalarEvolution/2008-06-12-BinomialInt64.ll
llvm/test/Analysis/ScalarEvolution/2008-07-12-UnneededSelect1.ll
llvm/test/Analysis/ScalarEvolution/2008-11-18-LessThanOrEqual.ll
llvm/test/Analysis/ScalarEvolution/2008-12-08-FiniteSGE.ll
llvm/test/Analysis/ScalarEvolution/2009-01-02-SignedNegativeStride.ll
llvm/test/Analysis/ScalarEvolution/2009-05-09-PointerEdgeCount.ll
llvm/test/Analysis/ScalarEvolution/2009-07-04-GroupConstantsWidthMismatch.ll
llvm/test/Analysis/ScalarEvolution/2012-03-26-LoadConstant.ll
llvm/test/Analysis/ScalarEvolution/SolveQuadraticEquation.ll
llvm/test/Analysis/ScalarEvolution/addrec-computed-during-addrec-calculation.ll
llvm/test/Analysis/ScalarEvolution/annotation-intrinsics.ll
llvm/test/Analysis/ScalarEvolution/avoid-infinite-recursion-0.ll
llvm/test/Analysis/ScalarEvolution/avoid-infinite-recursion-1.ll
llvm/test/Analysis/ScalarEvolution/avoid-smax-0.ll
llvm/test/Analysis/ScalarEvolution/avoid-smax-1.ll
llvm/test/Analysis/ScalarEvolution/cache_loop_exit_limit.ll
llvm/test/Analysis/ScalarEvolution/cycled_phis.ll
llvm/test/Analysis/ScalarEvolution/exit-count-select-safe.ll
llvm/test/Analysis/ScalarEvolution/exponential-behavior.ll
llvm/test/Analysis/ScalarEvolution/flags-from-poison-dbg.ll
llvm/test/Analysis/ScalarEvolution/flattened-0.ll
llvm/test/Analysis/ScalarEvolution/guards.ll
llvm/test/Analysis/ScalarEvolution/implied-via-addition.ll
llvm/test/Analysis/ScalarEvolution/incorrect-exit-count.ll
llvm/test/Analysis/ScalarEvolution/infer-prestart-no-wrap.ll
llvm/test/Analysis/ScalarEvolution/infer-via-ranges.ll
llvm/test/Analysis/ScalarEvolution/inner-loop-by-latch-cond-unknown.ll
llvm/test/Analysis/ScalarEvolution/load-with-range-metadata.ll
llvm/test/Analysis/ScalarEvolution/lt-overflow.ll
llvm/test/Analysis/ScalarEvolution/max-backedge-taken-count-guard-info-rewrite-expressions.ll
llvm/test/Analysis/ScalarEvolution/max-backedge-taken-count-guard-info.ll
llvm/test/Analysis/ScalarEvolution/max-backedge-taken-count-limit-by-wrapping.ll
llvm/test/Analysis/ScalarEvolution/max-expr-cache.ll
llvm/test/Analysis/ScalarEvolution/max-trip-count-address-space.ll
llvm/test/Analysis/ScalarEvolution/max-trip-count.ll
llvm/test/Analysis/ScalarEvolution/min-max-exprs.ll
llvm/test/Analysis/ScalarEvolution/ne-overflow.ll
llvm/test/Analysis/ScalarEvolution/no-wrap-symbolic-becount.ll
llvm/test/Analysis/ScalarEvolution/no-wrap-unknown-becount.ll
llvm/test/Analysis/ScalarEvolution/nowrap-preinc-limits.ll
llvm/test/Analysis/ScalarEvolution/nsw-offset-assume.ll
llvm/test/Analysis/ScalarEvolution/nsw-offset.ll
llvm/test/Analysis/ScalarEvolution/nsw.ll
llvm/test/Analysis/ScalarEvolution/nw-sub-is-not-nw-add.ll
llvm/test/Analysis/ScalarEvolution/pointer-sign-bits.ll
llvm/test/Analysis/ScalarEvolution/pr18606.ll
llvm/test/Analysis/ScalarEvolution/pr22179.ll
llvm/test/Analysis/ScalarEvolution/pr22674.ll
llvm/test/Analysis/ScalarEvolution/pr24757.ll
llvm/test/Analysis/ScalarEvolution/pr25369.ll
llvm/test/Analysis/ScalarEvolution/pr35890.ll
llvm/test/Analysis/ScalarEvolution/pr3909.ll
llvm/test/Analysis/ScalarEvolution/pr46786.ll
llvm/test/Analysis/ScalarEvolution/pr51869-scalar-evolution-prove-implications-via-truncation.ll
llvm/test/Analysis/ScalarEvolution/predicated-trip-count.ll
llvm/test/Analysis/ScalarEvolution/range-signedness.ll
llvm/test/Analysis/ScalarEvolution/range_nw_flag.ll
llvm/test/Analysis/ScalarEvolution/ranges.ll
llvm/test/Analysis/ScalarEvolution/returned.ll
llvm/test/Analysis/ScalarEvolution/scalable-vector.ll
llvm/test/Analysis/ScalarEvolution/scev-dispositions.ll
llvm/test/Analysis/ScalarEvolution/scev-expander-existing-value-offset.ll
llvm/test/Analysis/ScalarEvolution/scev-expander-reuse-vect.ll
llvm/test/Analysis/ScalarEvolution/scev-prestart-nowrap.ll
llvm/test/Analysis/ScalarEvolution/sext-inreg.ll
llvm/test/Analysis/ScalarEvolution/sext-iv-0.ll
llvm/test/Analysis/ScalarEvolution/sext-iv-1.ll
llvm/test/Analysis/ScalarEvolution/sext-iv-2.ll
llvm/test/Analysis/ScalarEvolution/sext-zero.ll
llvm/test/Analysis/ScalarEvolution/shift-op.ll
llvm/test/Analysis/ScalarEvolution/sle.ll
llvm/test/Analysis/ScalarEvolution/smax-br-phi-idioms.ll
llvm/test/Analysis/ScalarEvolution/solve-quadratic-i1.ll
llvm/test/Analysis/ScalarEvolution/solve-quadratic-overflow.ll
llvm/test/Analysis/ScalarEvolution/strip-injective-zext.ll
llvm/test/Analysis/ScalarEvolution/trip-count-implied-addrec.ll
llvm/test/Analysis/ScalarEvolution/trip-count-negative-stride.ll
llvm/test/Analysis/ScalarEvolution/trip-count-unknown-stride.ll
llvm/test/Analysis/ScalarEvolution/trip-count.ll
llvm/test/Analysis/ScalarEvolution/trip-count10.ll
llvm/test/Analysis/ScalarEvolution/trip-count11.ll
llvm/test/Analysis/ScalarEvolution/trip-count12.ll
llvm/test/Analysis/ScalarEvolution/trip-count14.ll
llvm/test/Analysis/ScalarEvolution/trip-count2.ll
llvm/test/Analysis/ScalarEvolution/trip-count3.ll
llvm/test/Analysis/ScalarEvolution/trip-count4.ll
llvm/test/Analysis/ScalarEvolution/trip-count5.ll
llvm/test/Analysis/ScalarEvolution/trip-count6.ll
llvm/test/Analysis/ScalarEvolution/trip-count7.ll
llvm/test/Analysis/ScalarEvolution/trip-count9.ll
llvm/test/Analysis/ScalarEvolution/tripmultiple_calculation.ll
llvm/test/Analysis/ScalarEvolution/trivial-phis.ll
llvm/test/Analysis/ScalarEvolution/trunc-simplify.ll
llvm/test/Analysis/ScalarEvolution/truncate.ll
llvm/test/Analysis/ScalarEvolution/unknown_phis.ll
llvm/test/Analysis/ScalarEvolution/values-at-scopes-consistency.ll
llvm/test/Analysis/ScalarEvolution/widenable-condition.ll
llvm/test/Analysis/ScalarEvolution/zext-signed-addrec.ll
Removed:
################################################################################
diff --git a/llvm/test/Analysis/ScalarEvolution/2007-07-15-NegativeStride.ll b/llvm/test/Analysis/ScalarEvolution/2007-07-15-NegativeStride.ll
index f0808ceec26f3..6af105e3c28df 100644
--- a/llvm/test/Analysis/ScalarEvolution/2007-07-15-NegativeStride.ll
+++ b/llvm/test/Analysis/ScalarEvolution/2007-07-15-NegativeStride.ll
@@ -1,7 +1,7 @@
; RUN: opt < %s -disable-output "-passes=print<scalar-evolution>" -scalar-evolution-max-iterations=0 2>&1 | FileCheck %s
; PR1533
- at array = weak global [101 x i32] zeroinitializer, align 32 ; <[100 x i32]*> [#uses=1]
+ at array = weak global [101 x i32] zeroinitializer, align 32 ; <ptr> [#uses=1]
; CHECK: Loop %bb: backedge-taken count is 100
@@ -11,8 +11,8 @@ entry:
bb: ; preds = %bb, %entry
%i.01.0 = phi i32 [ 100, %entry ], [ %tmp4, %bb ] ; <i32> [#uses=2]
- %tmp1 = getelementptr [101 x i32], [101 x i32]* @array, i32 0, i32 %i.01.0 ; <i32*> [#uses=1]
- store i32 %x, i32* %tmp1
+ %tmp1 = getelementptr [101 x i32], ptr @array, i32 0, i32 %i.01.0 ; <ptr> [#uses=1]
+ store i32 %x, ptr %tmp1
%tmp4 = add i32 %i.01.0, -1 ; <i32> [#uses=2]
%tmp7 = icmp sgt i32 %tmp4, -1 ; <i1> [#uses=1]
br i1 %tmp7, label %bb, label %return
diff --git a/llvm/test/Analysis/ScalarEvolution/2008-06-12-BinomialInt64.ll b/llvm/test/Analysis/ScalarEvolution/2008-06-12-BinomialInt64.ll
index 9cb665b41853b..4f1aa448caca8 100644
--- a/llvm/test/Analysis/ScalarEvolution/2008-06-12-BinomialInt64.ll
+++ b/llvm/test/Analysis/ScalarEvolution/2008-06-12-BinomialInt64.ll
@@ -1,7 +1,7 @@
; RUN: opt < %s -disable-output "-passes=print<scalar-evolution>" 2>/dev/null
; PR2433
-define i32 @main1(i32 %argc, i8** %argv) nounwind {
+define i32 @main1(i32 %argc, ptr %argv) nounwind {
entry:
br i1 false, label %bb10, label %bb23
@@ -20,9 +20,9 @@ bb23: ; preds = %bb10, %entry
ret i32 0
}
-define i32 @main2(i32 %argc, i8** %argv) {
+define i32 @main2(i32 %argc, ptr %argv) {
entry:
- %tmp8 = tail call i32 @atoi( i8* null ) nounwind readonly ; <i32> [#uses=1]
+ %tmp8 = tail call i32 @atoi( ptr null ) nounwind readonly ; <i32> [#uses=1]
br i1 false, label %bb9, label %bb21
bb9: ; preds = %bb9, %entry
@@ -40,4 +40,4 @@ bb21: ; preds = %bb9, %entry
ret i32 0
}
-declare i32 @atoi(i8*) nounwind readonly
+declare i32 @atoi(ptr) nounwind readonly
diff --git a/llvm/test/Analysis/ScalarEvolution/2008-07-12-UnneededSelect1.ll b/llvm/test/Analysis/ScalarEvolution/2008-07-12-UnneededSelect1.ll
index 7af5b6107c17f..ce087c0b74abc 100644
--- a/llvm/test/Analysis/ScalarEvolution/2008-07-12-UnneededSelect1.ll
+++ b/llvm/test/Analysis/ScalarEvolution/2008-07-12-UnneededSelect1.ll
@@ -4,9 +4,9 @@
; CHECK: Printing analysis 'Scalar Evolution Analysis' for function 'foo'
; CHECK-NOT: smax
- at lut = common global [256 x i8] zeroinitializer, align 32 ; <[256 x i8]*> [#uses=1]
+ at lut = common global [256 x i8] zeroinitializer, align 32 ; <ptr> [#uses=1]
-define void @foo(i32 %count, i32* %srcptr, i32* %dstptr) nounwind {
+define void @foo(i32 %count, ptr %srcptr, ptr %dstptr) nounwind {
entry:
icmp sgt i32 %count, 0 ; <i1>:0 [#uses=1]
br i1 %0, label %bb.nph, label %return
@@ -16,14 +16,14 @@ bb.nph: ; preds = %entry
bb: ; preds = %bb1, %bb.nph
%j.01 = phi i32 [ %8, %bb1 ], [ 0, %bb.nph ] ; <i32> [#uses=1]
- load i32, i32* %srcptr, align 4 ; <i32>:1 [#uses=2]
+ load i32, ptr %srcptr, align 4 ; <i32>:1 [#uses=2]
and i32 %1, 255 ; <i32>:2 [#uses=1]
and i32 %1, -256 ; <i32>:3 [#uses=1]
- getelementptr [256 x i8], [256 x i8]* @lut, i32 0, i32 %2 ; <i8*>:4 [#uses=1]
- load i8, i8* %4, align 1 ; <i8>:5 [#uses=1]
+ getelementptr [256 x i8], ptr @lut, i32 0, i32 %2 ; <ptr>:4 [#uses=1]
+ load i8, ptr %4, align 1 ; <i8>:5 [#uses=1]
zext i8 %5 to i32 ; <i32>:6 [#uses=1]
or i32 %6, %3 ; <i32>:7 [#uses=1]
- store i32 %7, i32* %dstptr, align 4
+ store i32 %7, ptr %dstptr, align 4
add i32 %j.01, 1 ; <i32>:8 [#uses=2]
br label %bb1
diff --git a/llvm/test/Analysis/ScalarEvolution/2008-11-18-LessThanOrEqual.ll b/llvm/test/Analysis/ScalarEvolution/2008-11-18-LessThanOrEqual.ll
index 7150801284a42..a67963dbde5cb 100644
--- a/llvm/test/Analysis/ScalarEvolution/2008-11-18-LessThanOrEqual.ll
+++ b/llvm/test/Analysis/ScalarEvolution/2008-11-18-LessThanOrEqual.ll
@@ -2,7 +2,7 @@
; CHECK: Loop %bb: backedge-taken count is (7 + (-1 * %argc))
-define i32 @main(i32 %argc, i8** %argv) nounwind {
+define i32 @main(i32 %argc, ptr %argv) nounwind {
entry:
%0 = icmp ugt i32 %argc, 7 ; <i1> [#uses=1]
br i1 %0, label %bb2, label %bb.nph
diff --git a/llvm/test/Analysis/ScalarEvolution/2008-12-08-FiniteSGE.ll b/llvm/test/Analysis/ScalarEvolution/2008-12-08-FiniteSGE.ll
index d406ded0d7716..fabd37a269359 100644
--- a/llvm/test/Analysis/ScalarEvolution/2008-12-08-FiniteSGE.ll
+++ b/llvm/test/Analysis/ScalarEvolution/2008-12-08-FiniteSGE.ll
@@ -2,17 +2,17 @@
; CHECK: backedge-taken count is 255
-define i32 @foo(i32 %x, i32 %y, i32* %lam, i32* %alp) nounwind {
+define i32 @foo(i32 %x, i32 %y, ptr %lam, ptr %alp) nounwind {
bb1.thread:
br label %bb1
bb1: ; preds = %bb1, %bb1.thread
%indvar = phi i32 [ 0, %bb1.thread ], [ %indvar.next, %bb1 ] ; <i32> [#uses=4]
%i.0.reg2mem.0 = sub i32 255, %indvar ; <i32> [#uses=2]
- %0 = getelementptr i32, i32* %alp, i32 %i.0.reg2mem.0 ; <i32*> [#uses=1]
- %1 = load i32, i32* %0, align 4 ; <i32> [#uses=1]
- %2 = getelementptr i32, i32* %lam, i32 %i.0.reg2mem.0 ; <i32*> [#uses=1]
- store i32 %1, i32* %2, align 4
+ %0 = getelementptr i32, ptr %alp, i32 %i.0.reg2mem.0 ; <ptr> [#uses=1]
+ %1 = load i32, ptr %0, align 4 ; <i32> [#uses=1]
+ %2 = getelementptr i32, ptr %lam, i32 %i.0.reg2mem.0 ; <ptr> [#uses=1]
+ store i32 %1, ptr %2, align 4
%3 = sub i32 254, %indvar ; <i32> [#uses=1]
%4 = icmp slt i32 %3, 0 ; <i1> [#uses=1]
%indvar.next = add i32 %indvar, 1 ; <i32> [#uses=1]
diff --git a/llvm/test/Analysis/ScalarEvolution/2009-01-02-SignedNegativeStride.ll b/llvm/test/Analysis/ScalarEvolution/2009-01-02-SignedNegativeStride.ll
index e500490c26f4c..60263cdc14f1c 100644
--- a/llvm/test/Analysis/ScalarEvolution/2009-01-02-SignedNegativeStride.ll
+++ b/llvm/test/Analysis/ScalarEvolution/2009-01-02-SignedNegativeStride.ll
@@ -4,17 +4,17 @@
; CHECK: Printing analysis 'Scalar Evolution Analysis' for function 'func_15'
; CHECK-NOT: /u -1
- at g_16 = external global i16 ; <i16*> [#uses=3]
- at .str = external constant [4 x i8] ; <[4 x i8]*> [#uses=0]
+ at g_16 = external global i16 ; <ptr> [#uses=3]
+ at .str = external constant [4 x i8] ; <ptr> [#uses=0]
define void @func_15() nounwind {
entry:
- %0 = load i16, i16* @g_16, align 2 ; <i16> [#uses=1]
+ %0 = load i16, ptr @g_16, align 2 ; <i16> [#uses=1]
%1 = icmp sgt i16 %0, 0 ; <i1> [#uses=1]
br i1 %1, label %bb2, label %bb.nph
bb.nph: ; preds = %entry
- %g_16.promoted = load i16, i16* @g_16 ; <i16> [#uses=1]
+ %g_16.promoted = load i16, ptr @g_16 ; <i16> [#uses=1]
br label %bb
bb: ; preds = %bb1, %bb.nph
@@ -27,7 +27,7 @@ bb1: ; preds = %bb
br i1 %3, label %bb1.bb2_crit_edge, label %bb
bb1.bb2_crit_edge: ; preds = %bb1
- store i16 %2, i16* @g_16
+ store i16 %2, ptr @g_16
br label %bb2
bb2: ; preds = %bb1.bb2_crit_edge, %entry
@@ -39,5 +39,5 @@ return: ; preds = %bb2
declare i32 @main() nounwind
-declare i32 @printf(i8*, ...) nounwind
+declare i32 @printf(ptr, ...) nounwind
diff --git a/llvm/test/Analysis/ScalarEvolution/2009-05-09-PointerEdgeCount.ll b/llvm/test/Analysis/ScalarEvolution/2009-05-09-PointerEdgeCount.ll
index 2d90556757f85..452ba1bd2edb7 100644
--- a/llvm/test/Analysis/ScalarEvolution/2009-05-09-PointerEdgeCount.ll
+++ b/llvm/test/Analysis/ScalarEvolution/2009-05-09-PointerEdgeCount.ll
@@ -10,19 +10,19 @@ target datalayout = "E-p:64:64:64-a0:0:8-f32:32:32-f64:64:64-i1:8:8-i8:8:8-i16:1
define void @_Z3foov() nounwind {
entry:
- %x = alloca %struct.NonPod, align 8 ; <%struct.NonPod*> [#uses=2]
- %0 = getelementptr %struct.NonPod, %struct.NonPod* %x, i32 0, i32 0 ; <[2 x %struct.Foo]*> [#uses=1]
- %1 = getelementptr [2 x %struct.Foo], [2 x %struct.Foo]* %0, i32 1, i32 0 ; <%struct.Foo*> [#uses=1]
+ %x = alloca %struct.NonPod, align 8 ; <ptr> [#uses=2]
+ %0 = getelementptr %struct.NonPod, ptr %x, i32 0, i32 0 ; <ptr> [#uses=1]
+ %1 = getelementptr [2 x %struct.Foo], ptr %0, i32 1, i32 0 ; <ptr> [#uses=1]
br label %bb1.i
bb1.i: ; preds = %bb2.i, %entry
- %.0.i = phi %struct.Foo* [ %1, %entry ], [ %4, %bb2.i ] ; <%struct.Foo*> [#uses=2]
- %2 = getelementptr %struct.NonPod, %struct.NonPod* %x, i32 0, i32 0, i32 0 ; <%struct.Foo*> [#uses=1]
- %3 = icmp eq %struct.Foo* %.0.i, %2 ; <i1> [#uses=1]
+ %.0.i = phi ptr [ %1, %entry ], [ %4, %bb2.i ] ; <ptr> [#uses=2]
+ %2 = getelementptr %struct.NonPod, ptr %x, i32 0, i32 0, i32 0 ; <ptr> [#uses=1]
+ %3 = icmp eq ptr %.0.i, %2 ; <i1> [#uses=1]
br i1 %3, label %_ZN6NonPodD1Ev.exit, label %bb2.i
bb2.i: ; preds = %bb1.i
- %4 = getelementptr %struct.Foo, %struct.Foo* %.0.i, i32 -1 ; <%struct.Foo*> [#uses=1]
+ %4 = getelementptr %struct.Foo, ptr %.0.i, i32 -1 ; <ptr> [#uses=1]
br label %bb1.i
_ZN6NonPodD1Ev.exit: ; preds = %bb1.i
diff --git a/llvm/test/Analysis/ScalarEvolution/2009-07-04-GroupConstantsWidthMismatch.ll b/llvm/test/Analysis/ScalarEvolution/2009-07-04-GroupConstantsWidthMismatch.ll
index 42d8eac0233be..db7935d21e247 100644
--- a/llvm/test/Analysis/ScalarEvolution/2009-07-04-GroupConstantsWidthMismatch.ll
+++ b/llvm/test/Analysis/ScalarEvolution/2009-07-04-GroupConstantsWidthMismatch.ll
@@ -3,11 +3,11 @@
define void @test() {
entry:
- %0 = load i16, i16* undef, align 1
+ %0 = load i16, ptr undef, align 1
%1 = lshr i16 %0, 8
%2 = and i16 %1, 3
%3 = zext i16 %2 to i32
- %4 = load i8, i8* undef, align 1
+ %4 = load i8, ptr undef, align 1
%5 = lshr i8 %4, 4
%6 = and i8 %5, 1
%7 = zext i8 %6 to i32
diff --git a/llvm/test/Analysis/ScalarEvolution/2012-03-26-LoadConstant.ll b/llvm/test/Analysis/ScalarEvolution/2012-03-26-LoadConstant.ll
index b78664e89923a..305b821029c03 100644
--- a/llvm/test/Analysis/ScalarEvolution/2012-03-26-LoadConstant.ll
+++ b/llvm/test/Analysis/ScalarEvolution/2012-03-26-LoadConstant.ll
@@ -15,24 +15,24 @@ entry:
lbl_818: ; preds = %for.end, %entry
call void (...) @func_27()
- store i32 0, i32* @g_814, align 4
+ store i32 0, ptr @g_814, align 4
br label %for.cond
for.cond: ; preds = %for.body, %lbl_818
- %0 = load i32, i32* @g_814, align 4
+ %0 = load i32, ptr @g_814, align 4
%cmp = icmp sle i32 %0, 0
br i1 %cmp, label %for.body, label %for.end
for.body: ; preds = %for.cond
%idxprom = sext i32 %0 to i64
- %arrayidx = getelementptr inbounds [0 x i32], [0 x i32]* getelementptr inbounds ([1 x [0 x i32]], [1 x [0 x i32]]* @g_244, i32 0, i64 0), i32 0, i64 %idxprom
- %1 = load i32, i32* %arrayidx, align 1
- store i32 %1, i32* @func_21_l_773, align 4
- store i32 1, i32* @g_814, align 4
+ %arrayidx = getelementptr inbounds [0 x i32], ptr @g_244, i32 0, i64 %idxprom
+ %1 = load i32, ptr %arrayidx, align 1
+ store i32 %1, ptr @func_21_l_773, align 4
+ store i32 1, ptr @g_814, align 4
br label %for.cond
for.end: ; preds = %for.cond
- %2 = load i32, i32* @func_21_l_773, align 4
+ %2 = load i32, ptr @func_21_l_773, align 4
%tobool = icmp ne i32 %2, 0
br i1 %tobool, label %lbl_818, label %if.end
diff --git a/llvm/test/Analysis/ScalarEvolution/SolveQuadraticEquation.ll b/llvm/test/Analysis/ScalarEvolution/SolveQuadraticEquation.ll
index 55470c0278e50..e09ebea43083e 100644
--- a/llvm/test/Analysis/ScalarEvolution/SolveQuadraticEquation.ll
+++ b/llvm/test/Analysis/ScalarEvolution/SolveQuadraticEquation.ll
@@ -10,8 +10,8 @@ entry:
br label %bb3
bb: ; preds = %bb3
- %tmp = getelementptr [1000 x i32], [1000 x i32]* @A, i32 0, i32 %i.0 ; <i32*> [#uses=1]
- store i32 123, i32* %tmp
+ %tmp = getelementptr [1000 x i32], ptr @A, i32 0, i32 %i.0 ; <ptr> [#uses=1]
+ store i32 123, ptr %tmp
%tmp2 = add i32 %i.0, 1 ; <i32> [#uses=1]
br label %bb3
diff --git a/llvm/test/Analysis/ScalarEvolution/addrec-computed-during-addrec-calculation.ll b/llvm/test/Analysis/ScalarEvolution/addrec-computed-during-addrec-calculation.ll
index fef5db9dcedc6..b03133ac6bea7 100644
--- a/llvm/test/Analysis/ScalarEvolution/addrec-computed-during-addrec-calculation.ll
+++ b/llvm/test/Analysis/ScalarEvolution/addrec-computed-during-addrec-calculation.ll
@@ -6,7 +6,7 @@
; inference, the exact SCEV calculated both times ends up being
diff erent,
; though both expressions are correct. Make sure we don't assert in this case.
-define void @test(i32* %p) {
+define void @test(ptr %p) {
; CHECK-LABEL: 'test'
; CHECK-NEXT: Classifying expressions for: @test
; CHECK-NEXT: %iv = phi i32 [ 0, %entry ], [ %iv.next, %loop.latch ]
@@ -15,7 +15,7 @@ define void @test(i32* %p) {
; CHECK-NEXT: --> {%iv,+,1}<nsw><%loop2> U: full-set S: full-set Exits: <<Unknown>> LoopDispositions: { %loop2: Computable, %loop.header: Variant }
; CHECK-NEXT: %iv2.next = add i32 %iv2, 1
; CHECK-NEXT: --> {(1 + %iv),+,1}<nw><%loop2> U: full-set S: full-set Exits: <<Unknown>> LoopDispositions: { %loop2: Computable, %loop.header: Variant }
-; CHECK-NEXT: %v = load i32, i32* %p, align 4
+; CHECK-NEXT: %v = load i32, ptr %p, align 4
; CHECK-NEXT: --> %v U: full-set S: full-set Exits: <<Unknown>> LoopDispositions: { %loop2: Variant, %loop.header: Variant }
; CHECK-NEXT: %iv2.ext = sext i32 %iv2 to i64
; CHECK-NEXT: --> (sext i32 {%iv,+,1}<nsw><%loop2> to i64) U: [-2147483648,2147483648) S: [-2147483648,2147483648) Exits: <<Unknown>> LoopDispositions: { %loop.header: Variant, %loop2: Computable, %loop3: Invariant }
@@ -51,7 +51,7 @@ loop.header:
loop2:
%iv2 = phi i32 [ %iv, %loop.header ], [ %iv2.next, %loop2 ]
%iv2.next = add i32 %iv2, 1
- %v = load i32, i32* %p
+ %v = load i32, ptr %p
%cmp = icmp slt i32 %iv2, %v
br i1 %cmp, label %loop2, label %loop2.end
diff --git a/llvm/test/Analysis/ScalarEvolution/annotation-intrinsics.ll b/llvm/test/Analysis/ScalarEvolution/annotation-intrinsics.ll
index 5593798d92c21..59313fb73ea40 100644
--- a/llvm/test/Analysis/ScalarEvolution/annotation-intrinsics.ll
+++ b/llvm/test/Analysis/ScalarEvolution/annotation-intrinsics.ll
@@ -1,28 +1,28 @@
; NOTE: Assertions have been autogenerated by utils/update_analyze_test_checks.py
; RUN: opt -disable-output "-passes=print<scalar-evolution>" < %s 2>&1 | FileCheck %s
-declare i64 @llvm.annotation.i64(i64, i8*, i8*, i32)
-declare i8* @llvm.ptr.annotation.p0i8(i8*, i8*, i8*, i32, i8*)
+declare i64 @llvm.annotation.i64(i64, ptr, ptr, i32)
+declare ptr @llvm.ptr.annotation.p0(ptr, ptr, ptr, i32, ptr)
define i64 @annotation(i64 %x) {
; CHECK-LABEL: 'annotation'
; CHECK-NEXT: Classifying expressions for: @annotation
-; CHECK-NEXT: %1 = tail call i64 @llvm.annotation.i64.p0i8(i64 %x, i8* null, i8* null, i32 0)
+; CHECK-NEXT: %1 = tail call i64 @llvm.annotation.i64.p0(i64 %x, ptr null, ptr null, i32 0)
; CHECK-NEXT: --> %x U: full-set S: full-set
; CHECK-NEXT: Determining loop execution counts for: @annotation
;
- %1 = tail call i64 @llvm.annotation.i64(i64 %x, i8* null, i8* null, i32 0)
+ %1 = tail call i64 @llvm.annotation.i64(i64 %x, ptr null, ptr null, i32 0)
ret i64 %1
}
-define i8* @ptr_annotation(i8* %x) {
+define ptr @ptr_annotation(ptr %x) {
; CHECK-LABEL: 'ptr_annotation'
; CHECK-NEXT: Classifying expressions for: @ptr_annotation
-; CHECK-NEXT: %1 = call i8* @llvm.ptr.annotation.p0i8.p0i8(i8* %x, i8* null, i8* null, i32 0, i8* null)
+; CHECK-NEXT: %1 = call ptr @llvm.ptr.annotation.p0.p0(ptr %x, ptr null, ptr null, i32 0, ptr null)
; CHECK-NEXT: --> %x U: full-set S: full-set
; CHECK-NEXT: Determining loop execution counts for: @ptr_annotation
;
- %1 = call i8* @llvm.ptr.annotation.p0i8(i8* %x, i8* null, i8* null, i32 0, i8* null)
- ret i8* %1
+ %1 = call ptr @llvm.ptr.annotation.p0(ptr %x, ptr null, ptr null, i32 0, ptr null)
+ ret ptr %1
}
diff --git a/llvm/test/Analysis/ScalarEvolution/avoid-infinite-recursion-0.ll b/llvm/test/Analysis/ScalarEvolution/avoid-infinite-recursion-0.ll
index 3c8d17edc647a..c0e6681828deb 100644
--- a/llvm/test/Analysis/ScalarEvolution/avoid-infinite-recursion-0.ll
+++ b/llvm/test/Analysis/ScalarEvolution/avoid-infinite-recursion-0.ll
@@ -7,8 +7,8 @@ target triple = "x86_64-unknown-linux-gnu"
define i32 @test() {
entry:
- %0 = load i32*, i32** undef, align 8 ; <i32*> [#uses=1]
- %1 = ptrtoint i32* %0 to i64 ; <i64> [#uses=1]
+ %0 = load ptr, ptr undef, align 8 ; <ptr> [#uses=1]
+ %1 = ptrtoint ptr %0 to i64 ; <i64> [#uses=1]
%2 = sub i64 undef, %1 ; <i64> [#uses=1]
%3 = lshr i64 %2, 3 ; <i64> [#uses=1]
%4 = trunc i64 %3 to i32 ; <i32> [#uses=2]
diff --git a/llvm/test/Analysis/ScalarEvolution/avoid-infinite-recursion-1.ll b/llvm/test/Analysis/ScalarEvolution/avoid-infinite-recursion-1.ll
index a67c678a40e04..49b8c88e5e54d 100644
--- a/llvm/test/Analysis/ScalarEvolution/avoid-infinite-recursion-1.ll
+++ b/llvm/test/Analysis/ScalarEvolution/avoid-infinite-recursion-1.ll
@@ -8,32 +8,32 @@ module asm ".ident\09\22$FreeBSD: head/sys/kern/vfs_subr.c 195285 2009-07-02 14:
module asm ".section set_pcpu, \22aw\22, @progbits"
module asm ".previous"
%0 = type <{ [40 x i8] }> ; type %0
- %1 = type <{ %struct.vm_object*, %struct.vm_object** }> ; type %1
- %2 = type <{ %struct.vm_object* }> ; type %2
- %3 = type <{ %struct.vm_page*, %struct.vm_page** }> ; type %3
- %4 = type <{ %struct.pv_entry*, %struct.pv_entry** }> ; type %4
- %5 = type <{ %struct.vm_reserv* }> ; type %5
- %6 = type <{ %struct.bufobj*, %struct.bufobj** }> ; type %6
- %7 = type <{ %struct.proc*, %struct.proc** }> ; type %7
- %8 = type <{ %struct.thread*, %struct.thread** }> ; type %8
- %9 = type <{ %struct.prison*, %struct.prison** }> ; type %9
- %10 = type <{ %struct.prison* }> ; type %10
- %11 = type <{ %struct.task* }> ; type %11
- %12 = type <{ %struct.osd*, %struct.osd** }> ; type %12
- %13 = type <{ %struct.proc* }> ; type %13
- %14 = type <{ %struct.ksiginfo*, %struct.ksiginfo** }> ; type %14
- %15 = type <{ %struct.pv_chunk*, %struct.pv_chunk** }> ; type %15
- %16 = type <{ %struct.pgrp*, %struct.pgrp** }> ; type %16
- %17 = type <{ %struct.knote*, %struct.knote** }> ; type %17
- %18 = type <{ %struct.ktr_request*, %struct.ktr_request** }> ; type %18
- %19 = type <{ %struct.mqueue_notifier* }> ; type %19
- %20 = type <{ %struct.turnstile* }> ; type %20
- %21 = type <{ %struct.namecache* }> ; type %21
- %22 = type <{ %struct.namecache*, %struct.namecache** }> ; type %22
- %23 = type <{ %struct.lockf*, %struct.lockf** }> ; type %23
- %24 = type <{ %struct.lockf_entry*, %struct.lockf_entry** }> ; type %24
- %25 = type <{ %struct.lockf_edge*, %struct.lockf_edge** }> ; type %25
- %struct.__siginfo = type <{ i32, i32, i32, i32, i32, i32, i8*, %union.sigval, %0 }>
+ %1 = type <{ ptr, ptr }> ; type %1
+ %2 = type <{ ptr }> ; type %2
+ %3 = type <{ ptr, ptr }> ; type %3
+ %4 = type <{ ptr, ptr }> ; type %4
+ %5 = type <{ ptr }> ; type %5
+ %6 = type <{ ptr, ptr }> ; type %6
+ %7 = type <{ ptr, ptr }> ; type %7
+ %8 = type <{ ptr, ptr }> ; type %8
+ %9 = type <{ ptr, ptr }> ; type %9
+ %10 = type <{ ptr }> ; type %10
+ %11 = type <{ ptr }> ; type %11
+ %12 = type <{ ptr, ptr }> ; type %12
+ %13 = type <{ ptr }> ; type %13
+ %14 = type <{ ptr, ptr }> ; type %14
+ %15 = type <{ ptr, ptr }> ; type %15
+ %16 = type <{ ptr, ptr }> ; type %16
+ %17 = type <{ ptr, ptr }> ; type %17
+ %18 = type <{ ptr, ptr }> ; type %18
+ %19 = type <{ ptr }> ; type %19
+ %20 = type <{ ptr }> ; type %20
+ %21 = type <{ ptr }> ; type %21
+ %22 = type <{ ptr, ptr }> ; type %22
+ %23 = type <{ ptr, ptr }> ; type %23
+ %24 = type <{ ptr, ptr }> ; type %24
+ %25 = type <{ ptr, ptr }> ; type %25
+ %struct.__siginfo = type <{ i32, i32, i32, i32, i32, i32, ptr, %union.sigval, %0 }>
%struct.__sigset = type <{ [4 x i32] }>
%struct.acl = type <{ i32, i32, [4 x i32], [254 x %struct.acl_entry] }>
%struct.acl_entry = type <{ i32, i32, i32, i16, i16 }>
@@ -41,173 +41,173 @@ module asm ".previous"
%struct.au_tid_addr = type <{ i32, i32, [4 x i32] }>
%struct.auditinfo_addr = type <{ i32, %struct.au_mask, %struct.au_tid_addr, i32, i64 }>
%struct.bintime = type <{ i64, i64 }>
- %struct.buf = type <{ %struct.bufobj*, i64, i8*, i8*, i32, i8, i8, i8, i8, i64, i64, void (%struct.buf*)*, i64, i64, %struct.buflists, %struct.buf*, %struct.buf*, i32, i8, i8, i8, i8, %struct.buflists, i16, i8, i8, i32, i8, i8, i8, i8, i8, i8, i8, i8, %struct.lock, i64, i64, i8*, i32, i8, i8, i8, i8, i64, %struct.vnode*, i32, i32, %struct.ucred*, %struct.ucred*, i8*, %union.pager_info, i8, i8, i8, i8, %union.anon, [32 x %struct.vm_page*], i32, i8, i8, i8, i8, %struct.workhead, i8*, i8*, i8*, i32, i8, i8, i8, i8 }>
- %struct.buf_ops = type <{ i8*, i32 (%struct.buf*)*, void (%struct.bufobj*, %struct.buf*)*, i32 (%struct.bufobj*, i32)*, void (%struct.bufobj*, %struct.buf*)* }>
- %struct.buflists = type <{ %struct.buf*, %struct.buf** }>
- %struct.bufobj = type <{ %struct.mtx, %struct.bufv, %struct.bufv, i64, i32, i8, i8, i8, i8, %struct.buf_ops*, i32, i8, i8, i8, i8, %struct.vm_object*, %6, i8*, %struct.vnode* }>
- %struct.bufv = type <{ %struct.buflists, %struct.buf*, i32, i8, i8, i8, i8 }>
- %struct.callout = type <{ %union.anon, i32, i8, i8, i8, i8, i8*, void (i8*)*, %struct.lock_object*, i32, i32 }>
+ %struct.buf = type <{ ptr, i64, ptr, ptr, i32, i8, i8, i8, i8, i64, i64, ptr, i64, i64, %struct.buflists, ptr, ptr, i32, i8, i8, i8, i8, %struct.buflists, i16, i8, i8, i32, i8, i8, i8, i8, i8, i8, i8, i8, %struct.lock, i64, i64, ptr, i32, i8, i8, i8, i8, i64, ptr, i32, i32, ptr, ptr, ptr, %union.pager_info, i8, i8, i8, i8, %union.anon, [32 x ptr], i32, i8, i8, i8, i8, %struct.workhead, ptr, ptr, ptr, i32, i8, i8, i8, i8 }>
+ %struct.buf_ops = type <{ ptr, ptr, ptr, ptr, ptr }>
+ %struct.buflists = type <{ ptr, ptr }>
+ %struct.bufobj = type <{ %struct.mtx, %struct.bufv, %struct.bufv, i64, i32, i8, i8, i8, i8, ptr, i32, i8, i8, i8, i8, ptr, %6, ptr, ptr }>
+ %struct.bufv = type <{ %struct.buflists, ptr, i32, i8, i8, i8, i8 }>
+ %struct.callout = type <{ %union.anon, i32, i8, i8, i8, i8, ptr, ptr, ptr, i32, i32 }>
%struct.cdev_privdata = type opaque
- %struct.cluster_save = type <{ i64, i64, i8*, i32, i8, i8, i8, i8, %struct.buf** }>
- %struct.componentname = type <{ i64, i64, %struct.thread*, %struct.ucred*, i32, i8, i8, i8, i8, i8*, i8*, i64, i64 }>
+ %struct.cluster_save = type <{ i64, i64, ptr, i32, i8, i8, i8, i8, ptr }>
+ %struct.componentname = type <{ i64, i64, ptr, ptr, i32, i8, i8, i8, i8, ptr, ptr, i64, i64 }>
%struct.cpuset = type opaque
- %struct.cv = type <{ i8*, i32, i8, i8, i8, i8 }>
+ %struct.cv = type <{ ptr, i32, i8, i8, i8, i8 }>
%struct.fid = type <{ i16, i16, [16 x i8] }>
- %struct.file = type <{ i8*, %struct.fileops*, %struct.ucred*, %struct.vnode*, i16, i16, i32, i32, i32, i64, %struct.cdev_privdata*, i64, i8* }>
+ %struct.file = type <{ ptr, ptr, ptr, ptr, i16, i16, i32, i32, i32, i64, ptr, i64, ptr }>
%struct.filedesc = type opaque
%struct.filedesc_to_leader = type opaque
- %struct.fileops = type <{ i32 (%struct.file*, %struct.uio*, %struct.ucred*, i32, %struct.thread*)*, i32 (%struct.file*, %struct.uio*, %struct.ucred*, i32, %struct.thread*)*, i32 (%struct.file*, i64, %struct.ucred*, %struct.thread*)*, i32 (%struct.file*, i64, i8*, %struct.ucred*, %struct.thread*)*, i32 (%struct.file*, i32, %struct.ucred*, %struct.thread*)*, i32 (%struct.file*, %struct.knote*)*, i32 (%struct.file*, %struct.stat*, %struct.ucred*, %struct.thread*)*, i32 (%struct.file*, %struct.thread*)*, i32, i8, i8, i8, i8 }>
- %struct.filterops = type <{ i32, i8, i8, i8, i8, i32 (%struct.knote*)*, void (%struct.knote*)*, i32 (%struct.knote*, i64)* }>
+ %struct.fileops = type <{ ptr, ptr, ptr, ptr, ptr, ptr, ptr, ptr, i32, i8, i8, i8, i8 }>
+ %struct.filterops = type <{ i32, i8, i8, i8, i8, ptr, ptr, ptr }>
%struct.flock = type <{ i64, i64, i32, i16, i16, i32, i8, i8, i8, i8 }>
- %struct.freelst = type <{ %struct.vnode*, %struct.vnode** }>
+ %struct.freelst = type <{ ptr, ptr }>
%struct.fsid = type <{ [2 x i32] }>
%struct.in6_addr = type opaque
%struct.in_addr = type opaque
%struct.inode = type opaque
- %struct.iovec = type <{ i8*, i64 }>
+ %struct.iovec = type <{ ptr, i64 }>
%struct.itimers = type opaque
%struct.itimerval = type <{ %struct.bintime, %struct.bintime }>
%struct.kaioinfo = type opaque
%struct.kaudit_record = type opaque
%struct.kdtrace_proc = type opaque
%struct.kdtrace_thread = type opaque
- %struct.kevent = type <{ i64, i16, i16, i32, i64, i8* }>
- %struct.klist = type <{ %struct.knote* }>
- %struct.knlist = type <{ %struct.klist, void (i8*)*, void (i8*)*, void (i8*)*, void (i8*)*, i8* }>
- %struct.knote = type <{ %struct.klist, %struct.klist, %struct.knlist*, %17, %struct.kqueue*, %struct.kevent, i32, i32, i64, %union.sigval, %struct.filterops*, i8* }>
+ %struct.kevent = type <{ i64, i16, i16, i32, i64, ptr }>
+ %struct.klist = type <{ ptr }>
+ %struct.knlist = type <{ %struct.klist, ptr, ptr, ptr, ptr, ptr }>
+ %struct.knote = type <{ %struct.klist, %struct.klist, ptr, %17, ptr, %struct.kevent, i32, i32, i64, %union.sigval, ptr, ptr }>
%struct.kqueue = type opaque
- %struct.ksiginfo = type <{ %14, %struct.__siginfo, i32, i8, i8, i8, i8, %struct.sigqueue* }>
+ %struct.ksiginfo = type <{ %14, %struct.__siginfo, i32, i8, i8, i8, i8, ptr }>
%struct.ktr_request = type opaque
%struct.label = type opaque
%struct.lock = type <{ %struct.lock_object, i64, i32, i32 }>
%struct.lock_list_entry = type opaque
- %struct.lock_object = type <{ i8*, i32, i32, %struct.witness* }>
+ %struct.lock_object = type <{ ptr, i32, i32, ptr }>
%struct.lock_owner = type opaque
%struct.lock_profile_object = type opaque
%struct.lockf = type <{ %23, %struct.mtx, %struct.lockf_entry_list, %struct.lockf_entry_list, i32, i8, i8, i8, i8 }>
- %struct.lockf_edge = type <{ %25, %25, %struct.lockf_entry*, %struct.lockf_entry* }>
- %struct.lockf_edge_list = type <{ %struct.lockf_edge* }>
- %struct.lockf_entry = type <{ i16, i16, i8, i8, i8, i8, i64, i64, %struct.lock_owner*, %struct.vnode*, %struct.inode*, %struct.task*, %24, %struct.lockf_edge_list, %struct.lockf_edge_list, i32, i8, i8, i8, i8 }>
- %struct.lockf_entry_list = type <{ %struct.lockf_entry* }>
- %struct.lpohead = type <{ %struct.lock_profile_object* }>
+ %struct.lockf_edge = type <{ %25, %25, ptr, ptr }>
+ %struct.lockf_edge_list = type <{ ptr }>
+ %struct.lockf_entry = type <{ i16, i16, i8, i8, i8, i8, i64, i64, ptr, ptr, ptr, ptr, %24, %struct.lockf_edge_list, %struct.lockf_edge_list, i32, i8, i8, i8, i8 }>
+ %struct.lockf_entry_list = type <{ ptr }>
+ %struct.lpohead = type <{ ptr }>
%struct.md_page = type <{ %4 }>
- %struct.mdproc = type <{ %struct.cv*, %struct.system_segment_descriptor }>
+ %struct.mdproc = type <{ ptr, %struct.system_segment_descriptor }>
%struct.mdthread = type <{ i32, i8, i8, i8, i8, i64 }>
%struct.mntarg = type opaque
- %struct.mntlist = type <{ %struct.mount*, %struct.mount** }>
- %struct.mount = type <{ %struct.mtx, i32, i8, i8, i8, i8, %struct.mntlist, %struct.vfsops*, %struct.vfsconf*, %struct.vnode*, %struct.vnode*, i32, i8, i8, i8, i8, %struct.freelst, i32, i32, i32, i32, i32, i32, %struct.vfsoptlist*, %struct.vfsoptlist*, i32, i8, i8, i8, i8, %struct.statfs, %struct.ucred*, i8*, i64, i32, i8, i8, i8, i8, %struct.netexport*, %struct.label*, i32, i32, i32, i32, %struct.thread*, i8*, %struct.lock }>
+ %struct.mntlist = type <{ ptr, ptr }>
+ %struct.mount = type <{ %struct.mtx, i32, i8, i8, i8, i8, %struct.mntlist, ptr, ptr, ptr, ptr, i32, i8, i8, i8, i8, %struct.freelst, i32, i32, i32, i32, i32, i32, ptr, ptr, i32, i8, i8, i8, i8, %struct.statfs, ptr, ptr, i64, i32, i8, i8, i8, i8, ptr, ptr, i32, i32, i32, i32, ptr, ptr, %struct.lock }>
%struct.mqueue_notifier = type opaque
%struct.mtx = type <{ %struct.lock_object, i64 }>
%struct.namecache = type opaque
%struct.netexport = type opaque
%struct.nlminfo = type opaque
- %struct.osd = type <{ i32, i8, i8, i8, i8, i8**, %12 }>
+ %struct.osd = type <{ i32, i8, i8, i8, i8, ptr, %12 }>
%struct.p_sched = type opaque
%struct.pargs = type <{ i32, i32, [1 x i8], i8, i8, i8 }>
%struct.pcb = type opaque
- %struct.pgrp = type <{ %16, %13, %struct.session*, %struct.sigiolst, i32, i32, %struct.mtx }>
+ %struct.pgrp = type <{ %16, %13, ptr, %struct.sigiolst, i32, i32, %struct.mtx }>
%struct.plimit = type opaque
- %struct.pmap = type <{ %struct.mtx, i64*, %15, i32, i8, i8, i8, i8, %struct.bintime, %struct.vm_page* }>
- %struct.prison = type <{ %9, i32, i32, i32, i32, %10, %9, %struct.prison*, %struct.mtx, %struct.task, %struct.osd, %struct.cpuset*, %struct.vnet*, %struct.vnode*, i32, i32, %struct.in_addr*, %struct.in6_addr*, [4 x i8*], i32, i32, i32, i32, i32, [5 x i32], i64, [256 x i8], [1024 x i8], [256 x i8], [256 x i8], [64 x i8] }>
- %struct.proc = type <{ %7, %8, %struct.mtx, %struct.ucred*, %struct.filedesc*, %struct.filedesc_to_leader*, %struct.pstats*, %struct.plimit*, %struct.callout, %struct.sigacts*, i32, i32, i32, i8, i8, i8, i8, %7, %7, %struct.proc*, %7, %13, %struct.mtx, %struct.ksiginfo*, %struct.sigqueue, i32, i8, i8, i8, i8, %struct.vmspace*, i32, i8, i8, i8, i8, %struct.itimerval, %struct.rusage, %struct.rusage_ext, %struct.rusage_ext, i32, i32, i32, i8, i8, i8, i8, %struct.vnode*, %struct.ucred*, %struct.vnode*, i32, i8, i8, i8, i8, %struct.sigiolst, i32, i32, i64, i32, i32, i8, i8, i8, i8, i8, i8, i8, i8, %struct.nlminfo*, %struct.kaioinfo*, %struct.thread*, i32, i8, i8, i8, i8, %struct.thread*, i32, i32, %struct.itimers*, i32, i32, [20 x i8], i8, i8, i8, i8, %struct.pgrp*, %struct.sysentvec*, %struct.pargs*, i64, i8, i8, i8, i8, i32, i16, i8, i8, i8, i8, i8, i8, %struct.knlist, i32, i8, i8, i8, i8, %struct.mdproc, %struct.callout, i16, i8, i8, i8, i8, i8, i8, %struct.proc*, %struct.proc*, i8*, %struct.label*, %struct.p_sched*, %18, %19, %struct.kdtrace_proc*, %struct.cv }>
+ %struct.pmap = type <{ %struct.mtx, ptr, %15, i32, i8, i8, i8, i8, %struct.bintime, ptr }>
+ %struct.prison = type <{ %9, i32, i32, i32, i32, %10, %9, ptr, %struct.mtx, %struct.task, %struct.osd, ptr, ptr, ptr, i32, i32, ptr, ptr, [4 x ptr], i32, i32, i32, i32, i32, [5 x i32], i64, [256 x i8], [1024 x i8], [256 x i8], [256 x i8], [64 x i8] }>
+ %struct.proc = type <{ %7, %8, %struct.mtx, ptr, ptr, ptr, ptr, ptr, %struct.callout, ptr, i32, i32, i32, i8, i8, i8, i8, %7, %7, ptr, %7, %13, %struct.mtx, ptr, %struct.sigqueue, i32, i8, i8, i8, i8, ptr, i32, i8, i8, i8, i8, %struct.itimerval, %struct.rusage, %struct.rusage_ext, %struct.rusage_ext, i32, i32, i32, i8, i8, i8, i8, ptr, ptr, ptr, i32, i8, i8, i8, i8, %struct.sigiolst, i32, i32, i64, i32, i32, i8, i8, i8, i8, i8, i8, i8, i8, ptr, ptr, ptr, i32, i8, i8, i8, i8, ptr, i32, i32, ptr, i32, i32, [20 x i8], i8, i8, i8, i8, ptr, ptr, ptr, i64, i8, i8, i8, i8, i32, i16, i8, i8, i8, i8, i8, i8, %struct.knlist, i32, i8, i8, i8, i8, %struct.mdproc, %struct.callout, i16, i8, i8, i8, i8, i8, i8, ptr, ptr, ptr, ptr, ptr, %18, %19, ptr, %struct.cv }>
%struct.pstats = type opaque
- %struct.pv_chunk = type <{ %struct.pmap*, %15, [3 x i64], [2 x i64], [168 x %struct.pv_entry] }>
+ %struct.pv_chunk = type <{ ptr, %15, [3 x i64], [2 x i64], [168 x %struct.pv_entry] }>
%struct.pv_entry = type <{ i64, %4 }>
%struct.rusage = type <{ %struct.bintime, %struct.bintime, i64, i64, i64, i64, i64, i64, i64, i64, i64, i64, i64, i64, i64, i64 }>
%struct.rusage_ext = type <{ i64, i64, i64, i64, i64, i64, i64 }>
%struct.selfd = type opaque
- %struct.selfdlist = type <{ %struct.selfd*, %struct.selfd** }>
- %struct.selinfo = type <{ %struct.selfdlist, %struct.knlist, %struct.mtx* }>
+ %struct.selfdlist = type <{ ptr, ptr }>
+ %struct.selinfo = type <{ %struct.selfdlist, %struct.knlist, ptr }>
%struct.seltd = type opaque
- %struct.session = type <{ i32, i8, i8, i8, i8, %struct.proc*, %struct.vnode*, %struct.tty*, i32, [24 x i8], i8, i8, i8, i8, %struct.mtx }>
+ %struct.session = type <{ i32, i8, i8, i8, i8, ptr, ptr, ptr, i32, [24 x i8], i8, i8, i8, i8, %struct.mtx }>
%struct.shmmap_state = type opaque
- %struct.sigacts = type <{ [128 x void (i32)*], [128 x %struct.__sigset], %struct.__sigset, %struct.__sigset, %struct.__sigset, %struct.__sigset, %struct.__sigset, %struct.__sigset, %struct.__sigset, %struct.__sigset, %struct.__sigset, %struct.__sigset, i32, i32, %struct.mtx }>
- %struct.sigaltstack = type <{ i8*, i64, i32, i8, i8, i8, i8 }>
- %struct.sigio = type <{ %union.sigval, %struct.sigiolst, %struct.sigio**, %struct.ucred*, i32, i8, i8, i8, i8 }>
- %struct.sigiolst = type <{ %struct.sigio* }>
- %struct.sigqueue = type <{ %struct.__sigset, %struct.__sigset, %14, %struct.proc*, i32, i8, i8, i8, i8 }>
+ %struct.sigacts = type <{ [128 x ptr], [128 x %struct.__sigset], %struct.__sigset, %struct.__sigset, %struct.__sigset, %struct.__sigset, %struct.__sigset, %struct.__sigset, %struct.__sigset, %struct.__sigset, %struct.__sigset, %struct.__sigset, i32, i32, %struct.mtx }>
+ %struct.sigaltstack = type <{ ptr, i64, i32, i8, i8, i8, i8 }>
+ %struct.sigio = type <{ %union.sigval, %struct.sigiolst, ptr, ptr, i32, i8, i8, i8, i8 }>
+ %struct.sigiolst = type <{ ptr }>
+ %struct.sigqueue = type <{ %struct.__sigset, %struct.__sigset, %14, ptr, i32, i8, i8, i8, i8 }>
%struct.sleepqueue = type opaque
%struct.sockaddr = type opaque
%struct.stat = type <{ i32, i32, i16, i16, i32, i32, i32, %struct.bintime, %struct.bintime, %struct.bintime, i64, i64, i32, i32, i32, i32, %struct.bintime }>
%struct.statfs = type <{ i32, i32, i64, i64, i64, i64, i64, i64, i64, i64, i64, i64, i64, i64, [10 x i64], i32, i32, %struct.fsid, [80 x i8], [16 x i8], [88 x i8], [88 x i8] }>
- %struct.sysctl_req = type <{ %struct.thread*, i32, i8, i8, i8, i8, i8*, i64, i64, i32 (%struct.sysctl_req*, i8*, i64)*, i8*, i64, i64, i32 (%struct.sysctl_req*, i8*, i64)*, i64, i32, i8, i8, i8, i8 }>
+ %struct.sysctl_req = type <{ ptr, i32, i8, i8, i8, i8, ptr, i64, i64, ptr, ptr, i64, i64, ptr, i64, i32, i8, i8, i8, i8 }>
%struct.sysentvec = type opaque
%struct.system_segment_descriptor = type <{ i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }>
- %struct.task = type <{ %11, i16, i16, i8, i8, i8, i8, void (i8*, i32)*, i8* }>
+ %struct.task = type <{ %11, i16, i16, i8, i8, i8, i8, ptr, ptr }>
%struct.td_sched = type opaque
- %struct.thread = type <{ %struct.mtx*, %struct.proc*, %8, %8, %8, %8, %struct.cpuset*, %struct.seltd*, %struct.sleepqueue*, %struct.turnstile*, %struct.umtx_q*, i32, i8, i8, i8, i8, %struct.sigqueue, i32, i32, i32, i32, i32, i8, i8, i8, i8, i8*, i8*, i8, i8, i8, i8, i16, i16, i16, i8, i8, i8, i8, i8, i8, %struct.turnstile*, i8*, %20, %struct.lock_list_entry*, i32, i32, %struct.ucred*, i32, i32, %struct.rusage, i64, i64, i32, i32, i32, i32, i32, %struct.__sigset, %struct.__sigset, i32, %struct.sigaltstack, i32, i8, i8, i8, i8, i64, i32, [20 x i8], %struct.file*, i32, i32, %struct.osd, i8, i8, i8, i8, i8, i8, i8, i8, %struct.pcb*, i32, i8, i8, i8, i8, [2 x i64], %struct.callout, %struct.trapframe*, %struct.vm_object*, i64, i32, i8, i8, i8, i8, %struct.vm_object*, i64, i32, i32, %struct.mdthread, %struct.td_sched*, %struct.kaudit_record*, i32, i8, i8, i8, i8, [2 x %struct.lpohead], %struct.kdtrace_thread*, i32, i8, i8, i8, i8, %struct.vnet*, i8* }>
+ %struct.thread = type <{ ptr, ptr, %8, %8, %8, %8, ptr, ptr, ptr, ptr, ptr, i32, i8, i8, i8, i8, %struct.sigqueue, i32, i32, i32, i32, i32, i8, i8, i8, i8, ptr, ptr, i8, i8, i8, i8, i16, i16, i16, i8, i8, i8, i8, i8, i8, ptr, ptr, %20, ptr, i32, i32, ptr, i32, i32, %struct.rusage, i64, i64, i32, i32, i32, i32, i32, %struct.__sigset, %struct.__sigset, i32, %struct.sigaltstack, i32, i8, i8, i8, i8, i64, i32, [20 x i8], ptr, i32, i32, %struct.osd, i8, i8, i8, i8, i8, i8, i8, i8, ptr, i32, i8, i8, i8, i8, [2 x i64], %struct.callout, ptr, ptr, i64, i32, i8, i8, i8, i8, ptr, i64, i32, i32, %struct.mdthread, ptr, ptr, i32, i8, i8, i8, i8, [2 x %struct.lpohead], ptr, i32, i8, i8, i8, i8, ptr, ptr }>
%struct.trapframe = type <{ i64, i64, i64, i64, i64, i64, i64, i64, i64, i64, i64, i64, i64, i64, i64, i32, i16, i16, i64, i32, i16, i16, i64, i64, i64, i64, i64, i64 }>
%struct.tty = type opaque
%struct.turnstile = type opaque
- %struct.ucred = type <{ i32, i32, i32, i32, i32, i32, i32, i8, i8, i8, i8, %struct.uidinfo*, %struct.uidinfo*, %struct.prison*, %struct.vimage*, i32, i8, i8, i8, i8, [2 x i8*], %struct.label*, %struct.auditinfo_addr, i32*, i32, i8, i8, i8, i8 }>
+ %struct.ucred = type <{ i32, i32, i32, i32, i32, i32, i32, i8, i8, i8, i8, ptr, ptr, ptr, ptr, i32, i8, i8, i8, i8, [2 x ptr], ptr, %struct.auditinfo_addr, ptr, i32, i8, i8, i8, i8 }>
%struct.uidinfo = type opaque
- %struct.uio = type <{ %struct.iovec*, i32, i8, i8, i8, i8, i64, i64, i32, i32, %struct.thread* }>
+ %struct.uio = type <{ ptr, i32, i8, i8, i8, i8, i64, i64, i32, i32, ptr }>
%struct.umtx_q = type opaque
%struct.vattr = type <{ i32, i16, i16, i32, i32, i32, i8, i8, i8, i8, i64, i64, i64, %struct.bintime, %struct.bintime, %struct.bintime, %struct.bintime, i64, i64, i32, i8, i8, i8, i8, i64, i64, i32, i8, i8, i8, i8, i64 }>
- %struct.vfsconf = type <{ i32, [16 x i8], i8, i8, i8, i8, %struct.vfsops*, i32, i32, i32, i8, i8, i8, i8, %struct.vfsoptdecl*, %struct.vfsconfhead }>
- %struct.vfsconfhead = type <{ %struct.vfsconf*, %struct.vfsconf** }>
- %struct.vfsops = type <{ i32 (%struct.mount*)*, i32 (%struct.mntarg*, i8*, i32)*, i32 (%struct.mount*, i32)*, i32 (%struct.mount*, i32, %struct.vnode**)*, i32 (%struct.mount*, i32, i32, i8*)*, i32 (%struct.mount*, %struct.statfs*)*, i32 (%struct.mount*, i32)*, i32 (%struct.mount*, i32, i32, %struct.vnode**)*, i32 (%struct.mount*, %struct.fid*, %struct.vnode**)*, i32 (%struct.mount*, %struct.sockaddr*, i32*, %struct.ucred**, i32*, i32**)*, i32 (%struct.vfsconf*)*, i32 (%struct.vfsconf*)*, i32 (%struct.mount*, i32, %struct.vnode*, i32, i8*)*, i32 (%struct.mount*, i32, %struct.sysctl_req*)*, void (%struct.mount*)* }>
- %struct.vfsopt = type <{ %struct.vfsoptlist, i8*, i8*, i32, i32, i32, i8, i8, i8, i8 }>
+ %struct.vfsconf = type <{ i32, [16 x i8], i8, i8, i8, i8, ptr, i32, i32, i32, i8, i8, i8, i8, ptr, %struct.vfsconfhead }>
+ %struct.vfsconfhead = type <{ ptr, ptr }>
+ %struct.vfsops = type <{ ptr, ptr, ptr, ptr, ptr, ptr, ptr, ptr, ptr, ptr, ptr, ptr, ptr, ptr, ptr }>
+ %struct.vfsopt = type <{ %struct.vfsoptlist, ptr, ptr, i32, i32, i32, i8, i8, i8, i8 }>
%struct.vfsoptdecl = type opaque
- %struct.vfsoptlist = type <{ %struct.vfsopt*, %struct.vfsopt** }>
+ %struct.vfsoptlist = type <{ ptr, ptr }>
%struct.vimage = type opaque
- %struct.vm_map = type <{ %struct.vm_map_entry, %struct.mtx, %struct.mtx, i32, i8, i8, i8, i8, i64, i32, i8, i8, i8, i8, %struct.vm_map_entry*, %struct.pmap*, %struct.vm_map_entry* }>
- %struct.vm_map_entry = type <{ %struct.vm_map_entry*, %struct.vm_map_entry*, %struct.vm_map_entry*, %struct.vm_map_entry*, i64, i64, i64, i64, i64, %union.sigval, i64, i32, i8, i8, i8, i8, i32, i8, i8, i8, i8, i64, %struct.uidinfo* }>
- %struct.vm_object = type <{ %struct.mtx, %1, %2, %1, %3, %struct.vm_page*, i64, i32, i32, i32, i8, i8, i16, i16, i16, i32, %struct.vm_object*, i64, %1, %5, %struct.vm_page*, i8*, %union.anon, %struct.uidinfo*, i64 }>
- %struct.vm_page = type <{ %3, %3, %struct.vm_page*, %struct.vm_page*, %struct.vm_object*, i64, i64, %struct.md_page, i8, i8, i16, i8, i8, i16, i32, i16, i16, i8, i8, i8, i8, i8, i8, i8, i8 }>
+ %struct.vm_map = type <{ %struct.vm_map_entry, %struct.mtx, %struct.mtx, i32, i8, i8, i8, i8, i64, i32, i8, i8, i8, i8, ptr, ptr, ptr }>
+ %struct.vm_map_entry = type <{ ptr, ptr, ptr, ptr, i64, i64, i64, i64, i64, %union.sigval, i64, i32, i8, i8, i8, i8, i32, i8, i8, i8, i8, i64, ptr }>
+ %struct.vm_object = type <{ %struct.mtx, %1, %2, %1, %3, ptr, i64, i32, i32, i32, i8, i8, i16, i16, i16, i32, ptr, i64, %1, %5, ptr, ptr, %union.anon, ptr, i64 }>
+ %struct.vm_page = type <{ %3, %3, ptr, ptr, ptr, i64, i64, %struct.md_page, i8, i8, i16, i8, i8, i16, i32, i16, i16, i8, i8, i8, i8, i8, i8, i8, i8 }>
%struct.vm_reserv = type opaque
- %struct.vmspace = type <{ %struct.vm_map, %struct.shmmap_state*, i64, i64, i64, i64, i8*, i8*, i8*, i32, i8, i8, i8, i8, %struct.pmap }>
+ %struct.vmspace = type <{ %struct.vm_map, ptr, i64, i64, i64, i64, ptr, ptr, ptr, i32, i8, i8, i8, i8, %struct.pmap }>
%struct.vnet = type opaque
- %struct.vnode = type <{ i32, i8, i8, i8, i8, i8*, %struct.vop_vector*, i8*, %struct.mount*, %struct.freelst, %union.sigval, %struct.freelst, i32, i8, i8, i8, i8, %21, %22, %struct.namecache*, i64, i64, i64, i32, i8, i8, i8, i8, %struct.lock, %struct.mtx, %struct.lock*, i32, i32, i64, i64, i32, i8, i8, i8, i8, %struct.freelst, %struct.bufobj, %struct.vpollinfo*, %struct.label*, %struct.lockf* }>
- %struct.vnodeop_desc = type <{ i8*, i32, i8, i8, i8, i8, i32 (%struct.vop_generic_args*)*, i32*, i32, i32, i32, i32 }>
- %struct.vop_access_args = type <{ %struct.vop_generic_args, %struct.vnode*, i32, i8, i8, i8, i8, %struct.ucred*, %struct.thread* }>
- %struct.vop_aclcheck_args = type <{ %struct.vop_generic_args, %struct.vnode*, i32, i8, i8, i8, i8, %struct.acl*, %struct.ucred*, %struct.thread* }>
- %struct.vop_advlock_args = type <{ %struct.vop_generic_args, %struct.vnode*, i8*, i32, i8, i8, i8, i8, %struct.flock*, i32, i8, i8, i8, i8 }>
- %struct.vop_advlockasync_args = type <{ %struct.vop_generic_args, %struct.vnode*, i8*, i32, i8, i8, i8, i8, %struct.flock*, i32, i8, i8, i8, i8, %struct.task*, i8** }>
- %struct.vop_bmap_args = type <{ %struct.vop_generic_args, %struct.vnode*, i64, %struct.bufobj**, i64*, i32*, i32* }>
- %struct.vop_cachedlookup_args = type <{ %struct.vop_generic_args, %struct.vnode*, %struct.vnode**, %struct.componentname* }>
- %struct.vop_create_args = type <{ %struct.vop_generic_args, %struct.vnode*, %struct.vnode**, %struct.componentname*, %struct.vattr* }>
- %struct.vop_deleteextattr_args = type <{ %struct.vop_generic_args, %struct.vnode*, i32, i8, i8, i8, i8, i8*, %struct.ucred*, %struct.thread* }>
- %struct.vop_fsync_args = type <{ %struct.vop_generic_args, %struct.vnode*, i32, i8, i8, i8, i8, %struct.thread* }>
- %struct.vop_generic_args = type <{ %struct.vnodeop_desc* }>
- %struct.vop_getattr_args = type <{ %struct.vop_generic_args, %struct.vnode*, %struct.vattr*, %struct.ucred* }>
- %struct.vop_getextattr_args = type <{ %struct.vop_generic_args, %struct.vnode*, i32, i8, i8, i8, i8, i8*, %struct.uio*, i64*, %struct.ucred*, %struct.thread* }>
- %struct.vop_getpages_args = type <{ %struct.vop_generic_args, %struct.vnode*, %struct.vm_page**, i32, i32, i64 }>
- %struct.vop_getwritemount_args = type <{ %struct.vop_generic_args, %struct.vnode*, %struct.mount** }>
- %struct.vop_inactive_args = type <{ %struct.vop_generic_args, %struct.vnode*, %struct.thread* }>
- %struct.vop_ioctl_args = type <{ %struct.vop_generic_args, %struct.vnode*, i64, i8*, i32, i8, i8, i8, i8, %struct.ucred*, %struct.thread* }>
- %struct.vop_islocked_args = type <{ %struct.vop_generic_args, %struct.vnode* }>
- %struct.vop_kqfilter_args = type <{ %struct.vop_generic_args, %struct.vnode*, %struct.knote* }>
- %struct.vop_link_args = type <{ %struct.vop_generic_args, %struct.vnode*, %struct.vnode*, %struct.componentname* }>
- %struct.vop_listextattr_args = type <{ %struct.vop_generic_args, %struct.vnode*, i32, i8, i8, i8, i8, %struct.uio*, i64*, %struct.ucred*, %struct.thread* }>
- %struct.vop_lock1_args = type <{ %struct.vop_generic_args, %struct.vnode*, i32, i8, i8, i8, i8, i8*, i32, i8, i8, i8, i8 }>
- %struct.vop_open_args = type <{ %struct.vop_generic_args, %struct.vnode*, i32, i8, i8, i8, i8, %struct.ucred*, %struct.thread*, %struct.file* }>
- %struct.vop_openextattr_args = type <{ %struct.vop_generic_args, %struct.vnode*, %struct.ucred*, %struct.thread* }>
- %struct.vop_pathconf_args = type <{ %struct.vop_generic_args, %struct.vnode*, i32, i8, i8, i8, i8, i64* }>
- %struct.vop_putpages_args = type <{ %struct.vop_generic_args, %struct.vnode*, %struct.vm_page**, i32, i32, i32*, i64 }>
- %struct.vop_read_args = type <{ %struct.vop_generic_args, %struct.vnode*, %struct.uio*, i32, i8, i8, i8, i8, %struct.ucred* }>
- %struct.vop_readdir_args = type <{ %struct.vop_generic_args, %struct.vnode*, %struct.uio*, %struct.ucred*, i32*, i32*, i64** }>
- %struct.vop_readlink_args = type <{ %struct.vop_generic_args, %struct.vnode*, %struct.uio*, %struct.ucred* }>
- %struct.vop_reallocblks_args = type <{ %struct.vop_generic_args, %struct.vnode*, %struct.cluster_save* }>
- %struct.vop_rename_args = type <{ %struct.vop_generic_args, %struct.vnode*, %struct.vnode*, %struct.componentname*, %struct.vnode*, %struct.vnode*, %struct.componentname* }>
- %struct.vop_revoke_args = type <{ %struct.vop_generic_args, %struct.vnode*, i32, i8, i8, i8, i8 }>
- %struct.vop_setextattr_args = type <{ %struct.vop_generic_args, %struct.vnode*, i32, i8, i8, i8, i8, i8*, %struct.uio*, %struct.ucred*, %struct.thread* }>
- %struct.vop_setlabel_args = type <{ %struct.vop_generic_args, %struct.vnode*, %struct.label*, %struct.ucred*, %struct.thread* }>
- %struct.vop_strategy_args = type <{ %struct.vop_generic_args, %struct.vnode*, %struct.buf* }>
- %struct.vop_symlink_args = type <{ %struct.vop_generic_args, %struct.vnode*, %struct.vnode**, %struct.componentname*, %struct.vattr*, i8* }>
- %struct.vop_vector = type <{ %struct.vop_vector*, i32 (%struct.vop_generic_args*)*, i32 (%struct.vop_islocked_args*)*, i32 (%struct.vop_cachedlookup_args*)*, i32 (%struct.vop_cachedlookup_args*)*, i32 (%struct.vop_create_args*)*, i32 (%struct.vop_whiteout_args*)*, i32 (%struct.vop_create_args*)*, i32 (%struct.vop_open_args*)*, i32 (%struct.vop_access_args*)*, i32 (%struct.vop_access_args*)*, i32 (%struct.vop_access_args*)*, i32 (%struct.vop_getattr_args*)*, i32 (%struct.vop_getattr_args*)*, i32 (%struct.vop_islocked_args*)*, i32 (%struct.vop_read_args*)*, i32 (%struct.vop_read_args*)*, i32 (%struct.vop_ioctl_args*)*, i32 (%struct.vop_access_args*)*, i32 (%struct.vop_kqfilter_args*)*, i32 (%struct.vop_revoke_args*)*, i32 (%struct.vop_fsync_args*)*, i32 (%struct.vop_link_args*)*, i32 (%struct.vop_link_args*)*, i32 (%struct.vop_rename_args*)*, i32 (%struct.vop_create_args*)*, i32 (%struct.vop_link_args*)*, i32 (%struct.vop_symlink_args*)*, i32 (%struct.vop_readdir_args*)*, i32 (%struct.vop_readlink_args*)*, i32 (%struct.vop_inactive_args*)*, i32 (%struct.vop_inactive_args*)*, i32 (%struct.vop_lock1_args*)*, i32 (%struct.vop_revoke_args*)*, i32 (%struct.vop_bmap_args*)*, i32 (%struct.vop_strategy_args*)*, i32 (%struct.vop_getwritemount_args*)*, i32 (%struct.vop_islocked_args*)*, i32 (%struct.vop_pathconf_args*)*, i32 (%struct.vop_advlock_args*)*, i32 (%struct.vop_advlockasync_args*)*, i32 (%struct.vop_reallocblks_args*)*, i32 (%struct.vop_getpages_args*)*, i32 (%struct.vop_putpages_args*)*, i32 (%struct.vop_aclcheck_args*)*, i32 (%struct.vop_aclcheck_args*)*, i32 (%struct.vop_aclcheck_args*)*, i32 (%struct.vop_access_args*)*, i32 (%struct.vop_getextattr_args*)*, i32 (%struct.vop_listextattr_args*)*, i32 (%struct.vop_openextattr_args*)*, i32 (%struct.vop_deleteextattr_args*)*, i32 (%struct.vop_setextattr_args*)*, i32 (%struct.vop_setlabel_args*)*, i32 (%struct.vop_vptofh_args*)*, i32 (%struct.vop_vptocnp_args*)* }>
- %struct.vop_vptocnp_args = type <{ %struct.vop_generic_args, %struct.vnode*, %struct.vnode**, %struct.ucred*, i8*, i32* }>
- %struct.vop_vptofh_args = type <{ %struct.vop_generic_args, %struct.vnode*, %struct.fid* }>
- %struct.vop_whiteout_args = type <{ %struct.vop_generic_args, %struct.vnode*, %struct.componentname*, i32, i8, i8, i8, i8 }>
+ %struct.vnode = type <{ i32, i8, i8, i8, i8, ptr, ptr, ptr, ptr, %struct.freelst, %union.sigval, %struct.freelst, i32, i8, i8, i8, i8, %21, %22, ptr, i64, i64, i64, i32, i8, i8, i8, i8, %struct.lock, %struct.mtx, ptr, i32, i32, i64, i64, i32, i8, i8, i8, i8, %struct.freelst, %struct.bufobj, ptr, ptr, ptr }>
+ %struct.vnodeop_desc = type <{ ptr, i32, i8, i8, i8, i8, ptr, ptr, i32, i32, i32, i32 }>
+ %struct.vop_access_args = type <{ %struct.vop_generic_args, ptr, i32, i8, i8, i8, i8, ptr, ptr }>
+ %struct.vop_aclcheck_args = type <{ %struct.vop_generic_args, ptr, i32, i8, i8, i8, i8, ptr, ptr, ptr }>
+ %struct.vop_advlock_args = type <{ %struct.vop_generic_args, ptr, ptr, i32, i8, i8, i8, i8, ptr, i32, i8, i8, i8, i8 }>
+ %struct.vop_advlockasync_args = type <{ %struct.vop_generic_args, ptr, ptr, i32, i8, i8, i8, i8, ptr, i32, i8, i8, i8, i8, ptr, ptr }>
+ %struct.vop_bmap_args = type <{ %struct.vop_generic_args, ptr, i64, ptr, ptr, ptr, ptr }>
+ %struct.vop_cachedlookup_args = type <{ %struct.vop_generic_args, ptr, ptr, ptr }>
+ %struct.vop_create_args = type <{ %struct.vop_generic_args, ptr, ptr, ptr, ptr }>
+ %struct.vop_deleteextattr_args = type <{ %struct.vop_generic_args, ptr, i32, i8, i8, i8, i8, ptr, ptr, ptr }>
+ %struct.vop_fsync_args = type <{ %struct.vop_generic_args, ptr, i32, i8, i8, i8, i8, ptr }>
+ %struct.vop_generic_args = type <{ ptr }>
+ %struct.vop_getattr_args = type <{ %struct.vop_generic_args, ptr, ptr, ptr }>
+ %struct.vop_getextattr_args = type <{ %struct.vop_generic_args, ptr, i32, i8, i8, i8, i8, ptr, ptr, ptr, ptr, ptr }>
+ %struct.vop_getpages_args = type <{ %struct.vop_generic_args, ptr, ptr, i32, i32, i64 }>
+ %struct.vop_getwritemount_args = type <{ %struct.vop_generic_args, ptr, ptr }>
+ %struct.vop_inactive_args = type <{ %struct.vop_generic_args, ptr, ptr }>
+ %struct.vop_ioctl_args = type <{ %struct.vop_generic_args, ptr, i64, ptr, i32, i8, i8, i8, i8, ptr, ptr }>
+ %struct.vop_islocked_args = type <{ %struct.vop_generic_args, ptr }>
+ %struct.vop_kqfilter_args = type <{ %struct.vop_generic_args, ptr, ptr }>
+ %struct.vop_link_args = type <{ %struct.vop_generic_args, ptr, ptr, ptr }>
+ %struct.vop_listextattr_args = type <{ %struct.vop_generic_args, ptr, i32, i8, i8, i8, i8, ptr, ptr, ptr, ptr }>
+ %struct.vop_lock1_args = type <{ %struct.vop_generic_args, ptr, i32, i8, i8, i8, i8, ptr, i32, i8, i8, i8, i8 }>
+ %struct.vop_open_args = type <{ %struct.vop_generic_args, ptr, i32, i8, i8, i8, i8, ptr, ptr, ptr }>
+ %struct.vop_openextattr_args = type <{ %struct.vop_generic_args, ptr, ptr, ptr }>
+ %struct.vop_pathconf_args = type <{ %struct.vop_generic_args, ptr, i32, i8, i8, i8, i8, ptr }>
+ %struct.vop_putpages_args = type <{ %struct.vop_generic_args, ptr, ptr, i32, i32, ptr, i64 }>
+ %struct.vop_read_args = type <{ %struct.vop_generic_args, ptr, ptr, i32, i8, i8, i8, i8, ptr }>
+ %struct.vop_readdir_args = type <{ %struct.vop_generic_args, ptr, ptr, ptr, ptr, ptr, ptr }>
+ %struct.vop_readlink_args = type <{ %struct.vop_generic_args, ptr, ptr, ptr }>
+ %struct.vop_reallocblks_args = type <{ %struct.vop_generic_args, ptr, ptr }>
+ %struct.vop_rename_args = type <{ %struct.vop_generic_args, ptr, ptr, ptr, ptr, ptr, ptr }>
+ %struct.vop_revoke_args = type <{ %struct.vop_generic_args, ptr, i32, i8, i8, i8, i8 }>
+ %struct.vop_setextattr_args = type <{ %struct.vop_generic_args, ptr, i32, i8, i8, i8, i8, ptr, ptr, ptr, ptr }>
+ %struct.vop_setlabel_args = type <{ %struct.vop_generic_args, ptr, ptr, ptr, ptr }>
+ %struct.vop_strategy_args = type <{ %struct.vop_generic_args, ptr, ptr }>
+ %struct.vop_symlink_args = type <{ %struct.vop_generic_args, ptr, ptr, ptr, ptr, ptr }>
+ %struct.vop_vector = type <{ ptr, ptr, ptr, ptr, ptr, ptr, ptr, ptr, ptr, ptr, ptr, ptr, ptr, ptr, ptr, ptr, ptr, ptr, ptr, ptr, ptr, ptr, ptr, ptr, ptr, ptr, ptr, ptr, ptr, ptr, ptr, ptr, ptr, ptr, ptr, ptr, ptr, ptr, ptr, ptr, ptr, ptr, ptr, ptr, ptr, ptr, ptr, ptr, ptr, ptr, ptr, ptr, ptr, ptr, ptr, ptr }>
+ %struct.vop_vptocnp_args = type <{ %struct.vop_generic_args, ptr, ptr, ptr, ptr, ptr }>
+ %struct.vop_vptofh_args = type <{ %struct.vop_generic_args, ptr, ptr }>
+ %struct.vop_whiteout_args = type <{ %struct.vop_generic_args, ptr, ptr, i32, i8, i8, i8, i8 }>
%struct.vpollinfo = type <{ %struct.mtx, %struct.selinfo, i16, i16, i8, i8, i8, i8 }>
%struct.witness = type opaque
- %struct.workhead = type <{ %struct.worklist* }>
+ %struct.workhead = type <{ ptr }>
%struct.worklist = type opaque
%union.anon = type <{ [16 x i8] }>
%union.pager_info = type <{ [4 x i8] }>
%union.sigval = type <{ [8 x i8] }>
-define i32 @vlrureclaim(%struct.mount* %mp) nounwind {
+define i32 @vlrureclaim(ptr %mp) nounwind {
entry:
br i1 undef, label %if.then11, label %do.end
diff --git a/llvm/test/Analysis/ScalarEvolution/avoid-smax-0.ll b/llvm/test/Analysis/ScalarEvolution/avoid-smax-0.ll
index a0d8f10eb2adf..0078c3c2b03d8 100644
--- a/llvm/test/Analysis/ScalarEvolution/avoid-smax-0.ll
+++ b/llvm/test/Analysis/ScalarEvolution/avoid-smax-0.ll
@@ -5,13 +5,13 @@
; We don't want to use a max in the trip count expression in
; this testcase.
-define void @foo(i32 %n, i32* %p, i32* %q) nounwind {
+define void @foo(i32 %n, ptr %p, ptr %q) nounwind {
entry:
icmp sgt i32 %n, 0
br i1 %0, label %bb, label %return
bb:
- load i32, i32* %q, align 4
+ load i32, ptr %q, align 4
icmp eq i32 %1, 0
br i1 %2, label %return, label %bb3.preheader
@@ -20,11 +20,11 @@ bb3.preheader:
bb3:
%i.0 = phi i32 [ %7, %bb3 ], [ 0, %bb3.preheader ]
- getelementptr i32, i32* %p, i32 %i.0
- load i32, i32* %3, align 4
+ getelementptr i32, ptr %p, i32 %i.0
+ load i32, ptr %3, align 4
add i32 %4, 1
- getelementptr i32, i32* %p, i32 %i.0
- store i32 %5, i32* %6, align 4
+ getelementptr i32, ptr %p, i32 %i.0
+ store i32 %5, ptr %6, align 4
add i32 %i.0, 1
icmp slt i32 %7, %n
br i1 %8, label %bb3, label %return.loopexit
diff --git a/llvm/test/Analysis/ScalarEvolution/avoid-smax-1.ll b/llvm/test/Analysis/ScalarEvolution/avoid-smax-1.ll
index a1c3a2912d673..c7896a3581290 100644
--- a/llvm/test/Analysis/ScalarEvolution/avoid-smax-1.ll
+++ b/llvm/test/Analysis/ScalarEvolution/avoid-smax-1.ll
@@ -11,7 +11,7 @@
target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f80:128:128"
target triple = "i386-apple-darwin9"
-define void @foo(i8* %r, i32 %s, i32 %w, i32 %x, i8* %j, i32 %d) nounwind {
+define void @foo(ptr %r, i32 %s, i32 %w, i32 %x, ptr %j, i32 %d) nounwind {
entry:
%0 = mul i32 %x, %w ; <i32> [#uses=2]
%1 = mul i32 %x, %w ; <i32> [#uses=1]
@@ -35,10 +35,10 @@ bb6: ; preds = %bb7, %bb.nph7
%7 = add i32 %x.06, %4 ; <i32> [#uses=1]
%8 = shl i32 %x.06, 1 ; <i32> [#uses=1]
%9 = add i32 %6, %8 ; <i32> [#uses=1]
- %10 = getelementptr i8, i8* %r, i32 %9 ; <i8*> [#uses=1]
- %11 = load i8, i8* %10, align 1 ; <i8> [#uses=1]
- %12 = getelementptr i8, i8* %j, i32 %7 ; <i8*> [#uses=1]
- store i8 %11, i8* %12, align 1
+ %10 = getelementptr i8, ptr %r, i32 %9 ; <ptr> [#uses=1]
+ %11 = load i8, ptr %10, align 1 ; <i8> [#uses=1]
+ %12 = getelementptr i8, ptr %j, i32 %7 ; <ptr> [#uses=1]
+ store i8 %11, ptr %12, align 1
%13 = add i32 %x.06, 1 ; <i32> [#uses=2]
br label %bb7
@@ -102,19 +102,19 @@ bb14: ; preds = %bb15, %bb.nph3
%x.12 = phi i32 [ %40, %bb15 ], [ 0, %bb.nph3 ] ; <i32> [#uses=5]
%29 = shl i32 %x.12, 2 ; <i32> [#uses=1]
%30 = add i32 %29, %25 ; <i32> [#uses=1]
- %31 = getelementptr i8, i8* %r, i32 %30 ; <i8*> [#uses=1]
- %32 = load i8, i8* %31, align 1 ; <i8> [#uses=1]
+ %31 = getelementptr i8, ptr %r, i32 %30 ; <ptr> [#uses=1]
+ %32 = load i8, ptr %31, align 1 ; <i8> [#uses=1]
%.sum = add i32 %26, %x.12 ; <i32> [#uses=1]
- %33 = getelementptr i8, i8* %j, i32 %.sum ; <i8*> [#uses=1]
- store i8 %32, i8* %33, align 1
+ %33 = getelementptr i8, ptr %j, i32 %.sum ; <ptr> [#uses=1]
+ store i8 %32, ptr %33, align 1
%34 = shl i32 %x.12, 2 ; <i32> [#uses=1]
%35 = or i32 %34, 2 ; <i32> [#uses=1]
%36 = add i32 %35, %25 ; <i32> [#uses=1]
- %37 = getelementptr i8, i8* %r, i32 %36 ; <i8*> [#uses=1]
- %38 = load i8, i8* %37, align 1 ; <i8> [#uses=1]
+ %37 = getelementptr i8, ptr %r, i32 %36 ; <ptr> [#uses=1]
+ %38 = load i8, ptr %37, align 1 ; <i8> [#uses=1]
%.sum6 = add i32 %27, %x.12 ; <i32> [#uses=1]
- %39 = getelementptr i8, i8* %j, i32 %.sum6 ; <i8*> [#uses=1]
- store i8 %38, i8* %39, align 1
+ %39 = getelementptr i8, ptr %j, i32 %.sum6 ; <ptr> [#uses=1]
+ store i8 %38, ptr %39, align 1
%40 = add i32 %x.12, 1 ; <i32> [#uses=2]
br label %bb15
@@ -168,11 +168,11 @@ bb23: ; preds = %bb24, %bb.nph
%y.21 = phi i32 [ %57, %bb24 ], [ 0, %bb.nph ] ; <i32> [#uses=3]
%53 = mul i32 %y.21, %50 ; <i32> [#uses=1]
%.sum1 = add i32 %53, %51 ; <i32> [#uses=1]
- %54 = getelementptr i8, i8* %r, i32 %.sum1 ; <i8*> [#uses=1]
+ %54 = getelementptr i8, ptr %r, i32 %.sum1 ; <ptr> [#uses=1]
%55 = mul i32 %y.21, %w ; <i32> [#uses=1]
%.sum5 = add i32 %55, %.sum3 ; <i32> [#uses=1]
- %56 = getelementptr i8, i8* %j, i32 %.sum5 ; <i8*> [#uses=1]
- tail call void @llvm.memcpy.p0i8.p0i8.i32(i8* %56, i8* %54, i32 %w, i1 false)
+ %56 = getelementptr i8, ptr %j, i32 %.sum5 ; <ptr> [#uses=1]
+ tail call void @llvm.memcpy.p0.p0.i32(ptr %56, ptr %54, i32 %w, i1 false)
%57 = add i32 %y.21, 1 ; <i32> [#uses=2]
br label %bb24
@@ -186,10 +186,10 @@ bb24.bb26_crit_edge: ; preds = %bb24
bb26: ; preds = %bb24.bb26_crit_edge, %bb22
%59 = mul i32 %x, %w ; <i32> [#uses=1]
%.sum4 = add i32 %.sum3, %59 ; <i32> [#uses=1]
- %60 = getelementptr i8, i8* %j, i32 %.sum4 ; <i8*> [#uses=1]
+ %60 = getelementptr i8, ptr %j, i32 %.sum4 ; <ptr> [#uses=1]
%61 = mul i32 %x, %w ; <i32> [#uses=1]
%62 = sdiv i32 %61, 2 ; <i32> [#uses=1]
- tail call void @llvm.memset.p0i8.i32(i8* %60, i8 -128, i32 %62, i1 false)
+ tail call void @llvm.memset.p0.i32(ptr %60, i8 -128, i32 %62, i1 false)
ret void
bb29: ; preds = %bb20, %entry
@@ -204,10 +204,10 @@ bb.nph11: ; preds = %bb29
bb30: ; preds = %bb31, %bb.nph11
%y.310 = phi i32 [ %70, %bb31 ], [ 0, %bb.nph11 ] ; <i32> [#uses=3]
%66 = mul i32 %y.310, %64 ; <i32> [#uses=1]
- %67 = getelementptr i8, i8* %r, i32 %66 ; <i8*> [#uses=1]
+ %67 = getelementptr i8, ptr %r, i32 %66 ; <ptr> [#uses=1]
%68 = mul i32 %y.310, %w ; <i32> [#uses=1]
- %69 = getelementptr i8, i8* %j, i32 %68 ; <i8*> [#uses=1]
- tail call void @llvm.memcpy.p0i8.p0i8.i32(i8* %69, i8* %67, i32 %w, i1 false)
+ %69 = getelementptr i8, ptr %j, i32 %68 ; <ptr> [#uses=1]
+ tail call void @llvm.memcpy.p0.p0.i32(ptr %69, ptr %67, i32 %w, i1 false)
%70 = add i32 %y.310, 1 ; <i32> [#uses=2]
br label %bb31
@@ -220,15 +220,15 @@ bb31.bb33_crit_edge: ; preds = %bb31
bb33: ; preds = %bb31.bb33_crit_edge, %bb29
%72 = mul i32 %x, %w ; <i32> [#uses=1]
- %73 = getelementptr i8, i8* %j, i32 %72 ; <i8*> [#uses=1]
+ %73 = getelementptr i8, ptr %j, i32 %72 ; <ptr> [#uses=1]
%74 = mul i32 %x, %w ; <i32> [#uses=1]
%75 = sdiv i32 %74, 2 ; <i32> [#uses=1]
- tail call void @llvm.memset.p0i8.i32(i8* %73, i8 -128, i32 %75, i1 false)
+ tail call void @llvm.memset.p0.i32(ptr %73, i8 -128, i32 %75, i1 false)
ret void
return: ; preds = %bb20
ret void
}
-declare void @llvm.memcpy.p0i8.p0i8.i32(i8* nocapture, i8* nocapture, i32, i1) nounwind
-declare void @llvm.memset.p0i8.i32(i8* nocapture, i8, i32, i1) nounwind
+declare void @llvm.memcpy.p0.p0.i32(ptr nocapture, ptr nocapture, i32, i1) nounwind
+declare void @llvm.memset.p0.i32(ptr nocapture, i8, i32, i1) nounwind
diff --git a/llvm/test/Analysis/ScalarEvolution/cache_loop_exit_limit.ll b/llvm/test/Analysis/ScalarEvolution/cache_loop_exit_limit.ll
index 38ca77411d29f..9dfa94c5021cc 100644
--- a/llvm/test/Analysis/ScalarEvolution/cache_loop_exit_limit.ll
+++ b/llvm/test/Analysis/ScalarEvolution/cache_loop_exit_limit.ll
@@ -1,14 +1,14 @@
; RUN: opt -scalar-evolution-max-arith-depth=4 -scalar-evolution-max-add-rec-size=4 -loop-reduce -S < %s | FileCheck %s
; Check that the test does not hang.
-define void @test_01(i32* nocapture %a) local_unnamed_addr {
+define void @test_01(ptr nocapture %a) local_unnamed_addr {
; CHECK-LABEL: @test_01(
while.body.outer:
- %arrayidx2 = getelementptr inbounds i32, i32* %a, i64 96
- %arrayidx2.promoted50 = load i32, i32* %arrayidx2, align 1
- %a.promoted = load i32, i32* %a, align 1
+ %arrayidx2 = getelementptr inbounds i32, ptr %a, i64 96
+ %arrayidx2.promoted50 = load i32, ptr %arrayidx2, align 1
+ %a.promoted = load i32, ptr %a, align 1
%add347.peel = xor i32 %arrayidx2.promoted50, -1
%tobool48.peel = icmp eq i32 %a.promoted, %add347.peel
br i1 %tobool48.peel, label %while.body.preheader, label %while.body4.preheader
diff --git a/llvm/test/Analysis/ScalarEvolution/cycled_phis.ll b/llvm/test/Analysis/ScalarEvolution/cycled_phis.ll
index 324268815c44f..a04a93bd005c7 100644
--- a/llvm/test/Analysis/ScalarEvolution/cycled_phis.ll
+++ b/llvm/test/Analysis/ScalarEvolution/cycled_phis.ll
@@ -33,16 +33,16 @@ exit:
}
; FIXME: Both inner and outer loop Phis should have the same range [0, 3000).
-define void @test_02(i32* %p, i32* %q) {
+define void @test_02(ptr %p, ptr %q) {
; CHECK-LABEL: 'test_02'
; CHECK-NEXT: Classifying expressions for: @test_02
-; CHECK-NEXT: %start = load i32, i32* %p, align 4, !range !0
+; CHECK-NEXT: %start = load i32, ptr %p, align 4, !range !0
; CHECK-NEXT: --> %start U: [0,1000) S: [0,1000)
; CHECK-NEXT: %outer_phi = phi i32 [ %start, %entry ], [ %inner_lcssa, %outer_backedge ]
; CHECK-NEXT: --> %outer_phi U: full-set S: full-set Exits: <<Unknown>> LoopDispositions: { %outer_loop: Variant, %inner_loop: Invariant }
; CHECK-NEXT: %inner_phi = phi i32 [ %outer_phi, %outer_loop ], [ %inner_load, %inner_loop ]
; CHECK-NEXT: --> %inner_phi U: full-set S: full-set Exits: <<Unknown>> LoopDispositions: { %inner_loop: Variant, %outer_loop: Variant }
-; CHECK-NEXT: %inner_load = load i32, i32* %q, align 4, !range !1
+; CHECK-NEXT: %inner_load = load i32, ptr %q, align 4, !range !1
; CHECK-NEXT: --> %inner_load U: [2000,3000) S: [2000,3000) Exits: <<Unknown>> LoopDispositions: { %inner_loop: Variant, %outer_loop: Variant }
; CHECK-NEXT: %inner_cond = call i1 @cond()
; CHECK-NEXT: --> %inner_cond U: full-set S: full-set Exits: <<Unknown>> LoopDispositions: { %inner_loop: Variant, %outer_loop: Variant }
@@ -61,7 +61,7 @@ define void @test_02(i32* %p, i32* %q) {
; CHECK-NEXT: Loop %outer_loop: Unpredictable predicated backedge-taken count.
;
entry:
- %start = load i32, i32* %p, !range !0
+ %start = load i32, ptr %p, !range !0
br label %outer_loop
outer_loop:
@@ -70,7 +70,7 @@ outer_loop:
inner_loop:
%inner_phi = phi i32 [%outer_phi, %outer_loop], [%inner_load, %inner_loop]
- %inner_load = load i32, i32* %q, !range !1
+ %inner_load = load i32, ptr %q, !range !1
%inner_cond = call i1 @cond()
br i1 %inner_cond, label %inner_loop, label %outer_backedge
@@ -84,12 +84,12 @@ exit:
}
; FIXME: All phis should have range [0, 3000)
-define void @test_03(i32* %p, i32* %q) {
+define void @test_03(ptr %p, ptr %q) {
; CHECK-LABEL: 'test_03'
; CHECK-NEXT: Classifying expressions for: @test_03
-; CHECK-NEXT: %start_1 = load i32, i32* %p, align 4, !range !0
+; CHECK-NEXT: %start_1 = load i32, ptr %p, align 4, !range !0
; CHECK-NEXT: --> %start_1 U: [0,1000) S: [0,1000)
-; CHECK-NEXT: %start_2 = load i32, i32* %q, align 4, !range !1
+; CHECK-NEXT: %start_2 = load i32, ptr %q, align 4, !range !1
; CHECK-NEXT: --> %start_2 U: [2000,3000) S: [2000,3000)
; CHECK-NEXT: %outer_phi = phi i32 [ %start_1, %entry ], [ %inner_lcssa, %outer_backedge ]
; CHECK-NEXT: --> %outer_phi U: full-set S: full-set Exits: <<Unknown>> LoopDispositions: { %outer_loop: Variant, %inner_loop: Invariant }
@@ -114,8 +114,8 @@ define void @test_03(i32* %p, i32* %q) {
; CHECK-NEXT: Loop %outer_loop: Unpredictable predicated backedge-taken count.
;
entry:
- %start_1 = load i32, i32* %p, !range !0
- %start_2 = load i32, i32* %q, !range !1
+ %start_1 = load i32, ptr %p, !range !0
+ %start_2 = load i32, ptr %q, !range !1
br label %outer_loop
outer_loop:
diff --git a/llvm/test/Analysis/ScalarEvolution/exit-count-select-safe.ll b/llvm/test/Analysis/ScalarEvolution/exit-count-select-safe.ll
index d42474184a0ac..d95f26f3ef42f 100644
--- a/llvm/test/Analysis/ScalarEvolution/exit-count-select-safe.ll
+++ b/llvm/test/Analysis/ScalarEvolution/exit-count-select-safe.ll
@@ -528,7 +528,7 @@ for.body5: ; preds = %for.cond4
br label %for.cond4
}
-define i64 @uminseq_vs_ptrtoint_complexity(i64 %n, i64 %m, i64* %ptr) {
+define i64 @uminseq_vs_ptrtoint_complexity(i64 %n, i64 %m, ptr %ptr) {
; CHECK-LABEL: 'uminseq_vs_ptrtoint_complexity'
; CHECK-NEXT: Classifying expressions for: @uminseq_vs_ptrtoint_complexity
; CHECK-NEXT: %i = phi i64 [ 0, %entry ], [ %i.next, %loop ]
@@ -537,10 +537,10 @@ define i64 @uminseq_vs_ptrtoint_complexity(i64 %n, i64 %m, i64* %ptr) {
; CHECK-NEXT: --> {1,+,1}<%loop> U: full-set S: full-set Exits: (1 + (%n umin_seq %m)) LoopDispositions: { %loop: Computable }
; CHECK-NEXT: %cond = select i1 %cond_p0, i1 %cond_p1, i1 false
; CHECK-NEXT: --> (%cond_p0 umin_seq %cond_p1) U: full-set S: full-set Exits: <<Unknown>> LoopDispositions: { %loop: Variant }
-; CHECK-NEXT: %ptr.int = ptrtoint i64* %ptr to i64
-; CHECK-NEXT: --> (ptrtoint i64* %ptr to i64) U: full-set S: full-set
+; CHECK-NEXT: %ptr.int = ptrtoint ptr %ptr to i64
+; CHECK-NEXT: --> (ptrtoint ptr %ptr to i64) U: full-set S: full-set
; CHECK-NEXT: %r = add i64 %i, %ptr.int
-; CHECK-NEXT: --> {(ptrtoint i64* %ptr to i64),+,1}<%loop> U: full-set S: full-set --> ((%n umin_seq %m) + (ptrtoint i64* %ptr to i64)) U: full-set S: full-set
+; CHECK-NEXT: --> {(ptrtoint ptr %ptr to i64),+,1}<%loop> U: full-set S: full-set --> ((%n umin_seq %m) + (ptrtoint ptr %ptr to i64)) U: full-set S: full-set
; CHECK-NEXT: Determining loop execution counts for: @uminseq_vs_ptrtoint_complexity
; CHECK-NEXT: Loop %loop: backedge-taken count is (%n umin_seq %m)
; CHECK-NEXT: Loop %loop: constant max backedge-taken count is -1
@@ -559,7 +559,7 @@ loop:
%cond = select i1 %cond_p0, i1 %cond_p1, i1 false
br i1 %cond, label %loop, label %exit
exit:
- %ptr.int = ptrtoint i64* %ptr to i64
+ %ptr.int = ptrtoint ptr %ptr to i64
%r = add i64 %i, %ptr.int
ret i64 %r
}
diff --git a/llvm/test/Analysis/ScalarEvolution/exponential-behavior.ll b/llvm/test/Analysis/ScalarEvolution/exponential-behavior.ll
index e27b0b28c520d..6632e64974fde 100644
--- a/llvm/test/Analysis/ScalarEvolution/exponential-behavior.ll
+++ b/llvm/test/Analysis/ScalarEvolution/exponential-behavior.ll
@@ -7,7 +7,7 @@
; CHECK: Loop %loop: Unpredictable predicated backedge-taken count.
-define void @f(i32 %n, i32* %ptr) {
+define void @f(i32 %n, ptr %ptr) {
entry:
br label %loop
@@ -48,7 +48,7 @@ loop:
br i1 %us.29, label %leave, label %be
be:
- store volatile i32 0, i32* %ptr
+ store volatile i32 0, ptr %ptr
%becond = icmp ult i32 %iv.inc, %n
br i1 %becond, label %leave, label %loop
diff --git a/llvm/test/Analysis/ScalarEvolution/flags-from-poison-dbg.ll b/llvm/test/Analysis/ScalarEvolution/flags-from-poison-dbg.ll
index 9caac5eddeb27..2370fe1468b4e 100644
--- a/llvm/test/Analysis/ScalarEvolution/flags-from-poison-dbg.ll
+++ b/llvm/test/Analysis/ScalarEvolution/flags-from-poison-dbg.ll
@@ -4,7 +4,7 @@
; intrinsics. Unfortunately, I was not able to reduce this file
; further while still keeping the debug info well formed.
-define void @foo(i32 %n, i32* %arr) !dbg !7 {
+define void @foo(i32 %n, ptr %arr) !dbg !7 {
; CHECK-LABEL: Classifying expressions for: @foo
entry:
%cmp1 = icmp slt i32 0, %n, !dbg !12
@@ -22,8 +22,8 @@ for.body: ; preds = %for.inc, %for.body.
; CHECK: %idxprom = sext i32 %add to i64
; CHECK-NEXT: --> {50,+,1}<nuw><nsw><%for.body>
- %arrayidx = getelementptr inbounds i32, i32* %arr, i64 %idxprom, !dbg !21
- store i32 100, i32* %arrayidx, align 4, !dbg !22
+ %arrayidx = getelementptr inbounds i32, ptr %arr, i64 %idxprom, !dbg !21
+ store i32 100, ptr %arrayidx, align 4, !dbg !22
br label %for.inc, !dbg !23
for.inc: ; preds = %for.body
diff --git a/llvm/test/Analysis/ScalarEvolution/flattened-0.ll b/llvm/test/Analysis/ScalarEvolution/flattened-0.ll
index 31e96f84f2818..9a0c7c4f84d25 100644
--- a/llvm/test/Analysis/ScalarEvolution/flattened-0.ll
+++ b/llvm/test/Analysis/ScalarEvolution/flattened-0.ll
@@ -1,6 +1,6 @@
; RUN: opt < %s "-passes=print<scalar-evolution>" -disable-output 2>&1 | FileCheck %s
-define void @foo([7 x i8]* %a) {
+define void @foo(ptr %a) {
; CHECK-LABEL: @foo
entry:
br label %bb
@@ -9,10 +9,10 @@ bb:
%idx = phi i64 [ 0, %entry ], [ %idx.incr, %bb ]
%i = udiv i64 %idx, 7
%j = urem i64 %idx, 7
- %a.ptr = getelementptr [7 x i8], [7 x i8]* %a, i64 %i, i64 %j
-; CHECK: %a.ptr = getelementptr [7 x i8], [7 x i8]* %a, i64 %i, i64 %j
+ %a.ptr = getelementptr [7 x i8], ptr %a, i64 %i, i64 %j
+; CHECK: %a.ptr = getelementptr [7 x i8], ptr %a, i64 %i, i64 %j
; CHECK-NEXT: --> {%a,+,1}<nw><%bb>
- %val = load i8, i8* %a.ptr
+ %val = load i8, ptr %a.ptr
%idx.incr = add i64 %idx, 1
%test = icmp ne i64 %idx.incr, 35
br i1 %test, label %bb, label %exit
diff --git a/llvm/test/Analysis/ScalarEvolution/guards.ll b/llvm/test/Analysis/ScalarEvolution/guards.ll
index da0f3d34370f9..3922775f3c830 100644
--- a/llvm/test/Analysis/ScalarEvolution/guards.ll
+++ b/llvm/test/Analysis/ScalarEvolution/guards.ll
@@ -12,10 +12,10 @@ declare void @llvm.experimental.guard(i1, ...)
declare void @use(i64 %x)
-define void @test_1(i1* %cond_buf, i32* %len_buf) {
+define void @test_1(ptr %cond_buf, ptr %len_buf) {
; CHECK-LABEL: @test_1(
entry:
- %len = load i32, i32* %len_buf, !range !{i32 1, i32 2147483648}
+ %len = load i32, ptr %len_buf, !range !{i32 1, i32 2147483648}
br label %loop
loop:
@@ -34,20 +34,20 @@ loop:
%iv.inc.cmp = icmp slt i32 %iv.inc, %len
call void(i1, ...) @llvm.experimental.guard(i1 %iv.inc.cmp) [ "deopt"() ]
- %becond = load volatile i1, i1* %cond_buf
+ %becond = load volatile i1, ptr %cond_buf
br i1 %becond, label %loop, label %leave
leave:
ret void
}
-define void @test_2(i32 %n, i32* %len_buf) {
+define void @test_2(i32 %n, ptr %len_buf) {
; CHECK-LABEL: @test_2(
; CHECK: [[LEN_ZEXT:%[^ ]+]] = zext i32 %len to i64
; CHECK: br label %loop
entry:
- %len = load i32, i32* %len_buf, !range !{i32 0, i32 2147483648}
+ %len = load i32, ptr %len_buf, !range !{i32 0, i32 2147483648}
br label %loop
loop:
@@ -74,11 +74,11 @@ leave:
ret void
}
-define void @test_3(i1* %cond_buf, i32* %len_buf) {
+define void @test_3(ptr %cond_buf, ptr %len_buf) {
; CHECK-LABEL: @test_3(
entry:
- %len = load i32, i32* %len_buf
+ %len = load i32, ptr %len_buf
%entry.cond = icmp sgt i32 %len, 0
call void(i1, ...) @llvm.experimental.guard(i1 %entry.cond) [ "deopt"() ]
br label %loop
@@ -98,18 +98,18 @@ loop:
%iv.inc.cmp = icmp slt i32 %iv.inc, %len
call void(i1, ...) @llvm.experimental.guard(i1 %iv.inc.cmp) [ "deopt"() ]
- %becond = load volatile i1, i1* %cond_buf
+ %becond = load volatile i1, ptr %cond_buf
br i1 %becond, label %loop, label %leave
leave:
ret void
}
-define void @test_4(i1* %cond_buf, i32* %len_buf) {
+define void @test_4(ptr %cond_buf, ptr %len_buf) {
; CHECK-LABEL: @test_4(
entry:
- %len = load i32, i32* %len_buf
+ %len = load i32, ptr %len_buf
%entry.cond = icmp sgt i32 %len, 0
call void(i1, ...) @llvm.experimental.guard(i1 %entry.cond) [ "deopt"() ]
br label %loop
@@ -118,7 +118,7 @@ loop:
%iv = phi i32 [ 0, %entry ], [ %iv.inc, %be ]
%iv.inc = add i32 %iv, 1
- %cond = load volatile i1, i1* %cond_buf
+ %cond = load volatile i1, ptr %cond_buf
br i1 %cond, label %left, label %be
left:
@@ -136,7 +136,7 @@ be:
%iv.cmp = icmp slt i32 %iv, %len
call void(i1, ...) @llvm.experimental.guard(i1 %iv.cmp) [ "deopt"() ]
- %becond = load volatile i1, i1* %cond_buf
+ %becond = load volatile i1, ptr %cond_buf
br i1 %becond, label %loop, label %leave
leave:
diff --git a/llvm/test/Analysis/ScalarEvolution/implied-via-addition.ll b/llvm/test/Analysis/ScalarEvolution/implied-via-addition.ll
index d082a6bb7ccef..4a0ebf810568e 100644
--- a/llvm/test/Analysis/ScalarEvolution/implied-via-addition.ll
+++ b/llvm/test/Analysis/ScalarEvolution/implied-via-addition.ll
@@ -29,20 +29,20 @@ define void @test_01(i8 %t) {
define void @test_02(i8 %t) {
; CHECK-LABEL: test_02
entry:
- %t.ptr = inttoptr i8 %t to i8*
- %p.42 = inttoptr i8 42 to i8*
- %cmp1 = icmp slt i8* %t.ptr, %p.42
+ %t.ptr = inttoptr i8 %t to ptr
+ %p.42 = inttoptr i8 42 to ptr
+ %cmp1 = icmp slt ptr %t.ptr, %p.42
call void(i1, ...) @llvm.experimental.guard(i1 %cmp1) [ "deopt"() ]
br label %loop
loop:
; CHECK-LABEL: loop
- %idx = phi i8* [ %t.ptr, %entry ], [ %snext, %loop ]
- %snext = getelementptr inbounds i8, i8* %idx, i64 1
- %c = icmp slt i8* %idx, %p.42
+ %idx = phi ptr [ %t.ptr, %entry ], [ %snext, %loop ]
+ %snext = getelementptr inbounds i8, ptr %idx, i64 1
+ %c = icmp slt ptr %idx, %p.42
; CHECK: call void @use(i1 true)
call void @use(i1 %c)
- %be = icmp slt i8* %snext, %p.42
+ %be = icmp slt ptr %snext, %p.42
br i1 %be, label %loop, label %exit
exit:
diff --git a/llvm/test/Analysis/ScalarEvolution/incorrect-exit-count.ll b/llvm/test/Analysis/ScalarEvolution/incorrect-exit-count.ll
index 999f9eea4bb75..765bfb2b5e546 100644
--- a/llvm/test/Analysis/ScalarEvolution/incorrect-exit-count.ll
+++ b/llvm/test/Analysis/ScalarEvolution/incorrect-exit-count.ll
@@ -20,9 +20,9 @@ define dso_local i32 @f() {
; CHECK-NEXT: --> {3,+,-1}<nuw><nsw><%for.cond6> U: [3,4) S: [3,4) Exits: <<Unknown>> LoopDispositions: { %for.cond6: Computable, %outer.loop: Variant }
; CHECK-NEXT: %idxprom20 = zext i32 %storemerge1921 to i64
; CHECK-NEXT: --> {3,+,4294967295}<nuw><nsw><%for.cond6> U: [3,4) S: [3,4) Exits: <<Unknown>> LoopDispositions: { %for.cond6: Computable, %outer.loop: Variant }
-; CHECK-NEXT: %arrayidx7 = getelementptr inbounds [1 x [4 x i16]], [1 x [4 x i16]]* @__const.f.g, i64 0, i64 0, i64 %idxprom20
+; CHECK-NEXT: %arrayidx7 = getelementptr inbounds [1 x [4 x i16]], ptr @__const.f.g, i64 0, i64 0, i64 %idxprom20
; CHECK-NEXT: --> {(6 + @__const.f.g),+,8589934590}<nuw><%for.cond6> U: [0,-1) S: [-9223372036854775808,9223372036854775807) Exits: <<Unknown>> LoopDispositions: { %for.cond6: Computable, %outer.loop: Variant }
-; CHECK-NEXT: %i = load i16, i16* %arrayidx7, align 2
+; CHECK-NEXT: %i = load i16, ptr %arrayidx7, align 2
; CHECK-NEXT: --> %i U: full-set S: full-set Exits: <<Unknown>> LoopDispositions: { %for.cond6: Variant, %outer.loop: Variant }
; CHECK-NEXT: %storemerge1822.lcssa.ph = phi i32 [ 0, %for.cond6 ]
; CHECK-NEXT: --> 0 U: [0,1) S: [0,1)
@@ -30,9 +30,9 @@ define dso_local i32 @f() {
; CHECK-NEXT: --> 3 U: [3,4) S: [3,4)
; CHECK-NEXT: %storemerge1822.lcssa = phi i32 [ %storemerge1822.lcssa.ph, %if.end.loopexit ], [ %storemerge1822.lcssa.ph32, %if.end.loopexit31 ]
; CHECK-NEXT: --> %storemerge1822.lcssa U: [0,4) S: [0,4)
-; CHECK-NEXT: %i1 = load i32, i32* @e, align 4
+; CHECK-NEXT: %i1 = load i32, ptr @e, align 4
; CHECK-NEXT: --> %i1 U: full-set S: full-set
-; CHECK-NEXT: %i2 = load volatile i32, i32* @b, align 4
+; CHECK-NEXT: %i2 = load volatile i32, ptr @b, align 4
; CHECK-NEXT: --> %i2 U: full-set S: full-set Exits: <<Unknown>> LoopDispositions: { %for.cond6: Variant, %outer.loop: Variant }
; CHECK-NEXT: %dec = add nsw i32 %storemerge1921, -1
; CHECK-NEXT: --> {2,+,-1}<nsw><%for.cond6> U: [2,3) S: [2,3) Exits: <<Unknown>> LoopDispositions: { %for.cond6: Computable, %outer.loop: Variant }
@@ -44,11 +44,11 @@ define dso_local i32 @f() {
; CHECK-NEXT: --> {3,+,-1}<nuw><nsw><%inner.loop> U: [3,4) S: [3,4) Exits: <<Unknown>> LoopDispositions: { %inner.loop: Computable, %outer.loop: Variant }
; CHECK-NEXT: %idxprom20.3 = zext i32 %storemerge1921.3 to i64
; CHECK-NEXT: --> {3,+,4294967295}<nuw><nsw><%inner.loop> U: [3,4) S: [3,4) Exits: <<Unknown>> LoopDispositions: { %inner.loop: Computable, %outer.loop: Variant }
-; CHECK-NEXT: %arrayidx7.3 = getelementptr inbounds [1 x [4 x i16]], [1 x [4 x i16]]* @__const.f.g, i64 0, i64 0, i64 %idxprom20.3
+; CHECK-NEXT: %arrayidx7.3 = getelementptr inbounds [1 x [4 x i16]], ptr @__const.f.g, i64 0, i64 0, i64 %idxprom20.3
; CHECK-NEXT: --> {(6 + @__const.f.g),+,8589934590}<nuw><%inner.loop> U: [0,-1) S: [-9223372036854775808,9223372036854775807) Exits: <<Unknown>> LoopDispositions: { %inner.loop: Computable, %outer.loop: Variant }
-; CHECK-NEXT: %i7 = load i16, i16* %arrayidx7.3, align 2
+; CHECK-NEXT: %i7 = load i16, ptr %arrayidx7.3, align 2
; CHECK-NEXT: --> %i7 U: full-set S: full-set Exits: <<Unknown>> LoopDispositions: { %inner.loop: Variant, %outer.loop: Variant }
-; CHECK-NEXT: %i8 = load volatile i32, i32* @b, align 4
+; CHECK-NEXT: %i8 = load volatile i32, ptr @b, align 4
; CHECK-NEXT: --> %i8 U: full-set S: full-set Exits: <<Unknown>> LoopDispositions: { %inner.loop: Variant, %outer.loop: Variant }
; CHECK-NEXT: %dec.3 = add nsw i32 %storemerge1921.3, -1
; CHECK-NEXT: --> {2,+,-1}<nsw><%inner.loop> U: [2,3) S: [2,3) Exits: <<Unknown>> LoopDispositions: { %inner.loop: Computable, %outer.loop: Variant }
@@ -85,7 +85,7 @@ define dso_local i32 @f() {
; CHECK-NEXT: Loop %outer.loop: Unpredictable predicated backedge-taken count.
;
entry:
- store i32 3, i32* @a, align 4
+ store i32 3, ptr @a, align 4
br label %outer.loop
outer.loop: ; preds = %for.inc13.3, %entry
@@ -95,8 +95,8 @@ outer.loop: ; preds = %for.inc13.3, %entry
for.cond6: ; preds = %for.end, %outer.loop
%storemerge1921 = phi i32 [ 3, %outer.loop ], [ %dec, %for.end ]
%idxprom20 = zext i32 %storemerge1921 to i64
- %arrayidx7 = getelementptr inbounds [1 x [4 x i16]], [1 x [4 x i16]]* @__const.f.g, i64 0, i64 0, i64 %idxprom20
- %i = load i16, i16* %arrayidx7, align 2
+ %arrayidx7 = getelementptr inbounds [1 x [4 x i16]], ptr @__const.f.g, i64 0, i64 0, i64 %idxprom20
+ %i = load i16, ptr %arrayidx7, align 2
%tobool8 = icmp eq i16 %i, 0
br i1 %tobool8, label %if.end.loopexit, label %for.end
@@ -110,20 +110,20 @@ if.end.loopexit31: ; preds = %inner.loop
if.end: ; preds = %if.end.loopexit31, %if.end.loopexit
%storemerge1822.lcssa = phi i32 [ %storemerge1822.lcssa.ph, %if.end.loopexit ], [ %storemerge1822.lcssa.ph32, %if.end.loopexit31 ]
- store i32 %storemerge1822.lcssa, i32* @c, align 4
- store i32 2, i32* @d, align 4
- %i1 = load i32, i32* @e, align 4
+ store i32 %storemerge1822.lcssa, ptr @c, align 4
+ store i32 2, ptr @d, align 4
+ %i1 = load i32, ptr @e, align 4
br label %cleanup
for.end: ; preds = %for.cond6
- %i2 = load volatile i32, i32* @b, align 4
+ %i2 = load volatile i32, ptr @b, align 4
%tobool9 = icmp eq i32 %i2, 0
%dec = add nsw i32 %storemerge1921, -1
br i1 %tobool9, label %for.cond6, label %inner.loop
cleanup.loopexit: ; preds = %for.inc13.3
%inc.lcssa.lcssa = phi i32 [ 4, %for.inc13.3 ]
- store i32 %inc.lcssa.lcssa, i32* @c, align 4
+ store i32 %inc.lcssa.lcssa, ptr @c, align 4
br label %cleanup
cleanup: ; preds = %cleanup.loopexit, %if.end
@@ -133,22 +133,22 @@ cleanup: ; preds = %cleanup.loopexit, %
inner.loop: ; preds = %for.end.3, %for.end
%storemerge1921.3 = phi i32 [ 3, %for.end ], [ %dec.3, %for.end.3 ]
%idxprom20.3 = zext i32 %storemerge1921.3 to i64
- %arrayidx7.3 = getelementptr inbounds [1 x [4 x i16]], [1 x [4 x i16]]* @__const.f.g, i64 0, i64 0, i64 %idxprom20.3
- %i7 = load i16, i16* %arrayidx7.3, align 2
+ %arrayidx7.3 = getelementptr inbounds [1 x [4 x i16]], ptr @__const.f.g, i64 0, i64 0, i64 %idxprom20.3
+ %i7 = load i16, ptr %arrayidx7.3, align 2
%tobool8.3 = icmp eq i16 %i7, 0
br i1 %tobool8.3, label %if.end.loopexit31, label %for.end.3
for.end.3: ; preds = %inner.loop
- %i8 = load volatile i32, i32* @b, align 4
+ %i8 = load volatile i32, ptr @b, align 4
%tobool9.3 = icmp eq i32 %i8, 0
%dec.3 = add nsw i32 %storemerge1921.3, -1
br i1 %tobool9.3, label %inner.loop, label %for.inc13.3
for.inc13.3: ; preds = %for.end.3
%storemerge1921.lcssa25.3 = phi i32 [ %storemerge1921.3, %for.end.3 ]
- store i32 %storemerge1921.lcssa25.3, i32* @d, align 4
+ store i32 %storemerge1921.lcssa25.3, ptr @d, align 4
%dec16 = add nsw i32 %storemerge23, -1
- store i32 %dec16, i32* @a, align 4
+ store i32 %dec16, ptr @a, align 4
%tobool = icmp eq i32 %dec16, 0
br i1 %tobool, label %cleanup.loopexit, label %outer.loop
}
diff --git a/llvm/test/Analysis/ScalarEvolution/infer-prestart-no-wrap.ll b/llvm/test/Analysis/ScalarEvolution/infer-prestart-no-wrap.ll
index 7a7024f293fad..a8b891b5afb23 100644
--- a/llvm/test/Analysis/ScalarEvolution/infer-prestart-no-wrap.ll
+++ b/llvm/test/Analysis/ScalarEvolution/infer-prestart-no-wrap.ll
@@ -1,6 +1,6 @@
; ; RUN: opt -disable-output "-passes=print<scalar-evolution>" < %s 2>&1 | FileCheck %s
-define void @infer.sext.0(i1* %c, i32 %start, i32* %buf) {
+define void @infer.sext.0(ptr %c, i32 %start, ptr %buf) {
; CHECK-LABEL: Classifying expressions for: @infer.sext.0
entry:
br label %loop
@@ -13,8 +13,8 @@ define void @infer.sext.0(i1* %c, i32 %start, i32* %buf) {
; CHECK: %idx.inc.sext = sext i32 %idx.inc to i64
; CHECK-NEXT: --> {(1 + (sext i32 %start to i64))<nsw>,+,1}<nsw><%loop>
- %buf.gep = getelementptr inbounds i32, i32* %buf, i32 %idx.inc
- %val = load i32, i32* %buf.gep
+ %buf.gep = getelementptr inbounds i32, ptr %buf, i32 %idx.inc
+ %val = load i32, ptr %buf.gep
%condition = icmp eq i32 %counter, 1
%counter.inc = add i32 %counter, 1
@@ -24,7 +24,7 @@ define void @infer.sext.0(i1* %c, i32 %start, i32* %buf) {
ret void
}
-define void @infer.zext.0(i1* %c, i32 %start, i32* %buf) {
+define void @infer.zext.0(ptr %c, i32 %start, ptr %buf) {
; CHECK-LABEL: Classifying expressions for: @infer.zext.0
entry:
br label %loop
@@ -37,8 +37,8 @@ define void @infer.zext.0(i1* %c, i32 %start, i32* %buf) {
; CHECK: %idx.inc.sext = zext i32 %idx.inc to i64
; CHECK-NEXT: --> {(1 + (zext i32 %start to i64))<nuw><nsw>,+,1}<nuw><%loop>
- %buf.gep = getelementptr inbounds i32, i32* %buf, i32 %idx.inc
- %val = load i32, i32* %buf.gep
+ %buf.gep = getelementptr inbounds i32, ptr %buf, i32 %idx.inc
+ %val = load i32, ptr %buf.gep
%condition = icmp eq i32 %counter, 1
%counter.inc = add i32 %counter, 1
@@ -48,7 +48,7 @@ define void @infer.zext.0(i1* %c, i32 %start, i32* %buf) {
ret void
}
-define void @infer.sext.1(i32 %start, i1* %c) {
+define void @infer.sext.1(i32 %start, ptr %c) {
; CHECK-LABEL: Classifying expressions for: @infer.sext.1
entry:
%start.mul = mul i32 %start, 4
@@ -61,14 +61,14 @@ define void @infer.sext.1(i32 %start, i1* %c) {
; CHECK: %idx.sext = sext i32 %idx to i64
; CHECK-NEXT: --> {(2 + (sext i32 (4 * %start) to i64))<nuw><nsw>,+,2}<nsw><%loop>
%idx.inc = add nsw i32 %idx, 2
- %condition = load i1, i1* %c
+ %condition = load i1, ptr %c
br i1 %condition, label %exit, label %loop
exit:
ret void
}
-define void @infer.sext.2(i1* %c, i8 %start) {
+define void @infer.sext.2(ptr %c, i8 %start) {
; CHECK-LABEL: Classifying expressions for: @infer.sext.2
entry:
%start.inc = add i8 %start, 1
@@ -81,14 +81,14 @@ define void @infer.sext.2(i1* %c, i8 %start) {
; CHECK: %idx.sext = sext i8 %idx to i16
; CHECK-NEXT: --> {(1 + (sext i8 %start to i16))<nsw>,+,1}<nsw><%loop>
%idx.inc = add nsw i8 %idx, 1
- %condition = load volatile i1, i1* %c
+ %condition = load volatile i1, ptr %c
br i1 %condition, label %exit, label %loop
exit:
ret void
}
-define void @infer.zext.1(i1* %c, i8 %start) {
+define void @infer.zext.1(ptr %c, i8 %start) {
; CHECK-LABEL: Classifying expressions for: @infer.zext.1
entry:
%start.inc = add i8 %start, 1
@@ -101,7 +101,7 @@ define void @infer.zext.1(i1* %c, i8 %start) {
; CHECK: %idx.zext = zext i8 %idx to i16
; CHECK-NEXT: --> {(1 + (zext i8 %start to i16))<nuw><nsw>,+,1}<nuw><%loop>
%idx.inc = add nuw i8 %idx, 1
- %condition = load volatile i1, i1* %c
+ %condition = load volatile i1, ptr %c
br i1 %condition, label %exit, label %loop
exit:
diff --git a/llvm/test/Analysis/ScalarEvolution/infer-via-ranges.ll b/llvm/test/Analysis/ScalarEvolution/infer-via-ranges.ll
index 83292508c4825..9aa096b952be5 100644
--- a/llvm/test/Analysis/ScalarEvolution/infer-via-ranges.ll
+++ b/llvm/test/Analysis/ScalarEvolution/infer-via-ranges.ll
@@ -1,6 +1,6 @@
; RUN: opt -passes=indvars -S < %s | FileCheck %s
-define void @infer_via_ranges(i32 *%arr, i32 %n) {
+define void @infer_via_ranges(ptr %arr, i32 %n) {
; CHECK-LABEL: @infer_via_ranges
entry:
%first.itr.check = icmp sgt i32 %n, 0
@@ -17,8 +17,8 @@ define void @infer_via_ranges(i32 *%arr, i32 %n) {
in.bounds:
; CHECK-LABEL: in.bounds:
- %addr = getelementptr i32, i32* %arr, i32 %idx
- store i32 0, i32* %addr
+ %addr = getelementptr i32, ptr %arr, i32 %idx
+ store i32 0, ptr %addr
%next = icmp sgt i32 %idx.dec, -1
br i1 %next, label %loop, label %exit
diff --git a/llvm/test/Analysis/ScalarEvolution/inner-loop-by-latch-cond-unknown.ll b/llvm/test/Analysis/ScalarEvolution/inner-loop-by-latch-cond-unknown.ll
index c0a6666f2e93c..fdcacaa7f1d03 100644
--- a/llvm/test/Analysis/ScalarEvolution/inner-loop-by-latch-cond-unknown.ll
+++ b/llvm/test/Analysis/ScalarEvolution/inner-loop-by-latch-cond-unknown.ll
@@ -5,7 +5,7 @@
target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128-ni:1"
target triple = "x86_64-unknown-linux-gnu"
-define void @test(i64 %a, i64* %p) {
+define void @test(i64 %a, ptr %p) {
entry:
br label %first_loop
@@ -16,7 +16,7 @@ first_loop:
br i1 %cond1, label %first_loop, label %middle_block
middle_block:
- %b = load i64, i64* %p
+ %b = load i64, ptr %p
%cmp = icmp ult i64 %i, %b
; When SCEV will try to compute the initial value for %j
; it will observe umax generated by this select.
diff --git a/llvm/test/Analysis/ScalarEvolution/load-with-range-metadata.ll b/llvm/test/Analysis/ScalarEvolution/load-with-range-metadata.ll
index eb979f406551b..74d109eff754e 100644
--- a/llvm/test/Analysis/ScalarEvolution/load-with-range-metadata.ll
+++ b/llvm/test/Analysis/ScalarEvolution/load-with-range-metadata.ll
@@ -1,9 +1,9 @@
; RUN: opt -disable-output "-passes=print<scalar-evolution>" < %s 2>&1 | FileCheck %s
-define i32 @slt_trip_count_with_range(i32 *%ptr0, i32 *%ptr1) {
+define i32 @slt_trip_count_with_range(ptr %ptr0, ptr %ptr1) {
; CHECK-LABEL: slt_trip_count_with_range
entry:
- %limit = load i32, i32* %ptr0, !range !0
+ %limit = load i32, ptr %ptr0, !range !0
br label %loop
loop:
@@ -17,10 +17,10 @@ define i32 @slt_trip_count_with_range(i32 *%ptr0, i32 *%ptr1) {
ret i32 0
}
-define i32 @ult_trip_count_with_range(i32 *%ptr0, i32 *%ptr1) {
+define i32 @ult_trip_count_with_range(ptr %ptr0, ptr %ptr1) {
; CHECK-LABEL: ult_trip_count_with_range
entry:
- %limit = load i32, i32* %ptr0, !range !0
+ %limit = load i32, ptr %ptr0, !range !0
br label %loop
loop:
diff --git a/llvm/test/Analysis/ScalarEvolution/lt-overflow.ll b/llvm/test/Analysis/ScalarEvolution/lt-overflow.ll
index 62a14c4525445..81d0444eb5b79 100644
--- a/llvm/test/Analysis/ScalarEvolution/lt-overflow.ll
+++ b/llvm/test/Analysis/ScalarEvolution/lt-overflow.ll
@@ -74,7 +74,7 @@ entry:
for.body:
%iv = phi i32 [ %iv.next, %for.body ], [ 0, %entry ]
%iv.next = add i32 %iv, 2
- store volatile i32 0, i32* @G
+ store volatile i32 0, ptr @G
%cmp = icmp ult i32 %iv.next, %N
br i1 %cmp, label %for.body, label %for.cond.cleanup
@@ -89,7 +89,7 @@ entry:
for.body:
%iv = phi i32 [ %iv.next, %for.body ], [ 0, %entry ]
%iv.next = add i32 %iv, 2
- %val = load volatile i32, i32* @G
+ %val = load volatile i32, ptr @G
%cmp = icmp ult i32 %iv.next, %N
br i1 %cmp, label %for.body, label %for.cond.cleanup
@@ -148,7 +148,7 @@ entry:
for.body:
%iv = phi i32 [ %iv.next, %for.body ], [ 0, %entry ]
%iv.next = add i32 %iv, 2
- %N = load i32, i32* @G
+ %N = load i32, ptr @G
%cmp = icmp ult i32 %iv.next, %N
br i1 %cmp, label %for.body, label %for.cond.cleanup
diff --git a/llvm/test/Analysis/ScalarEvolution/max-backedge-taken-count-guard-info-rewrite-expressions.ll b/llvm/test/Analysis/ScalarEvolution/max-backedge-taken-count-guard-info-rewrite-expressions.ll
index 0431b4b5dcb82..6b5075b4ac9bb 100644
--- a/llvm/test/Analysis/ScalarEvolution/max-backedge-taken-count-guard-info-rewrite-expressions.ll
+++ b/llvm/test/Analysis/ScalarEvolution/max-backedge-taken-count-guard-info-rewrite-expressions.ll
@@ -44,7 +44,7 @@ exit:
}
; Test case from PR40961.
-define i32 @rewrite_zext_min_max(i32 %N, i32* %arr) {
+define i32 @rewrite_zext_min_max(i32 %N, ptr %arr) {
; CHECK-LABEL: 'rewrite_zext_min_max'
; CHECK-NEXT: Classifying expressions for: @rewrite_zext_min_max
; CHECK-NEXT: %umin = call i32 @llvm.umin.i32(i32 %N, i32 16)
@@ -55,7 +55,7 @@ define i32 @rewrite_zext_min_max(i32 %N, i32* %arr) {
; CHECK-NEXT: --> (4 * ((zext i32 (16 umin %N) to i64) /u 4))<nuw><nsw> U: [0,17) S: [0,17)
; CHECK-NEXT: %index = phi i64 [ 0, %loop.ph ], [ %index.next, %loop ]
; CHECK-NEXT: --> {0,+,4}<nuw><%loop> U: [0,13) S: [0,13) Exits: (4 * ((-4 + (4 * ((zext i32 (16 umin %N) to i64) /u 4))<nuw><nsw>)<nsw> /u 4))<nuw> LoopDispositions: { %loop: Computable }
-; CHECK-NEXT: %gep = getelementptr inbounds i32, i32* %arr, i64 %index
+; CHECK-NEXT: %gep = getelementptr inbounds i32, ptr %arr, i64 %index
; CHECK-NEXT: --> {%arr,+,16}<nuw><%loop> U: full-set S: full-set Exits: ((16 * ((-4 + (4 * ((zext i32 (16 umin %N) to i64) /u 4))<nuw><nsw>)<nsw> /u 4)) + %arr) LoopDispositions: { %loop: Computable }
; CHECK-NEXT: %index.next = add nuw i64 %index, 4
; CHECK-NEXT: --> {4,+,4}<nuw><%loop> U: [4,17) S: [4,17) Exits: (4 + (4 * ((-4 + (4 * ((zext i32 (16 umin %N) to i64) /u 4))<nuw><nsw>)<nsw> /u 4))<nuw>) LoopDispositions: { %loop: Computable }
@@ -80,8 +80,8 @@ loop.ph:
; %n.vec is [4, 16) and a multiple of 4.
loop:
%index = phi i64 [ 0, %loop.ph ], [ %index.next, %loop ]
- %gep = getelementptr inbounds i32, i32* %arr, i64 %index
- store i32 0, i32* %gep
+ %gep = getelementptr inbounds i32, ptr %arr, i64 %index
+ store i32 0, ptr %gep
%index.next = add nuw i64 %index, 4
%ec = icmp eq i64 %index.next, %n.vec
br i1 %ec, label %exit, label %loop
diff --git a/llvm/test/Analysis/ScalarEvolution/max-backedge-taken-count-guard-info.ll b/llvm/test/Analysis/ScalarEvolution/max-backedge-taken-count-guard-info.ll
index ceefa2db3f050..0a6b3f9e77543 100644
--- a/llvm/test/Analysis/ScalarEvolution/max-backedge-taken-count-guard-info.ll
+++ b/llvm/test/Analysis/ScalarEvolution/max-backedge-taken-count-guard-info.ll
@@ -3,12 +3,12 @@
; Test case for PR40961. The loop guard limit the constant max backedge-taken count.
-define void @test_guard_less_than_16(i32* nocapture %a, i64 %i) {
+define void @test_guard_less_than_16(ptr nocapture %a, i64 %i) {
; CHECK-LABEL: 'test_guard_less_than_16'
; CHECK-NEXT: Classifying expressions for: @test_guard_less_than_16
; CHECK-NEXT: %iv = phi i64 [ %iv.next, %loop ], [ %i, %entry ]
; CHECK-NEXT: --> {%i,+,1}<nuw><nsw><%loop> U: full-set S: full-set Exits: 15 LoopDispositions: { %loop: Computable }
-; CHECK-NEXT: %idx = getelementptr inbounds i32, i32* %a, i64 %iv
+; CHECK-NEXT: %idx = getelementptr inbounds i32, ptr %a, i64 %iv
; CHECK-NEXT: --> {((4 * %i) + %a),+,4}<nw><%loop> U: full-set S: full-set Exits: (60 + %a) LoopDispositions: { %loop: Computable }
; CHECK-NEXT: %iv.next = add nuw nsw i64 %iv, 1
; CHECK-NEXT: --> {(1 + %i),+,1}<nuw><nsw><%loop> U: full-set S: full-set Exits: 16 LoopDispositions: { %loop: Computable }
@@ -26,8 +26,8 @@ entry:
loop:
%iv = phi i64 [ %iv.next, %loop ], [ %i, %entry ]
- %idx = getelementptr inbounds i32, i32* %a, i64 %iv
- store i32 1, i32* %idx, align 4
+ %idx = getelementptr inbounds i32, ptr %a, i64 %iv
+ store i32 1, ptr %idx, align 4
%iv.next = add nuw nsw i64 %iv, 1
%exitcond = icmp eq i64 %iv.next, 16
br i1 %exitcond, label %exit, label %loop
@@ -36,12 +36,12 @@ exit:
ret void
}
-define void @test_guard_less_than_16_operands_swapped(i32* nocapture %a, i64 %i) {
+define void @test_guard_less_than_16_operands_swapped(ptr nocapture %a, i64 %i) {
; CHECK-LABEL: 'test_guard_less_than_16_operands_swapped'
; CHECK-NEXT: Classifying expressions for: @test_guard_less_than_16_operands_swapped
; CHECK-NEXT: %iv = phi i64 [ %iv.next, %loop ], [ %i, %entry ]
; CHECK-NEXT: --> {%i,+,1}<nuw><nsw><%loop> U: full-set S: full-set Exits: 15 LoopDispositions: { %loop: Computable }
-; CHECK-NEXT: %idx = getelementptr inbounds i32, i32* %a, i64 %iv
+; CHECK-NEXT: %idx = getelementptr inbounds i32, ptr %a, i64 %iv
; CHECK-NEXT: --> {((4 * %i) + %a),+,4}<nw><%loop> U: full-set S: full-set Exits: (60 + %a) LoopDispositions: { %loop: Computable }
; CHECK-NEXT: %iv.next = add nuw nsw i64 %iv, 1
; CHECK-NEXT: --> {(1 + %i),+,1}<nuw><nsw><%loop> U: full-set S: full-set Exits: 16 LoopDispositions: { %loop: Computable }
@@ -59,8 +59,8 @@ entry:
loop:
%iv = phi i64 [ %iv.next, %loop ], [ %i, %entry ]
- %idx = getelementptr inbounds i32, i32* %a, i64 %iv
- store i32 1, i32* %idx, align 4
+ %idx = getelementptr inbounds i32, ptr %a, i64 %iv
+ store i32 1, ptr %idx, align 4
%iv.next = add nuw nsw i64 %iv, 1
%exitcond = icmp eq i64 %iv.next, 16
br i1 %exitcond, label %exit, label %loop
@@ -69,12 +69,12 @@ exit:
ret void
}
-define void @test_guard_less_than_16_branches_flipped(i32* nocapture %a, i64 %i) {
+define void @test_guard_less_than_16_branches_flipped(ptr nocapture %a, i64 %i) {
; CHECK-LABEL: 'test_guard_less_than_16_branches_flipped'
; CHECK-NEXT: Classifying expressions for: @test_guard_less_than_16_branches_flipped
; CHECK-NEXT: %iv = phi i64 [ %iv.next, %loop ], [ %i, %entry ]
; CHECK-NEXT: --> {%i,+,1}<nuw><nsw><%loop> U: full-set S: full-set Exits: 15 LoopDispositions: { %loop: Computable }
-; CHECK-NEXT: %idx = getelementptr inbounds i32, i32* %a, i64 %iv
+; CHECK-NEXT: %idx = getelementptr inbounds i32, ptr %a, i64 %iv
; CHECK-NEXT: --> {((4 * %i) + %a),+,4}<nw><%loop> U: full-set S: full-set Exits: (60 + %a) LoopDispositions: { %loop: Computable }
; CHECK-NEXT: %iv.next = add nuw nsw i64 %iv, 1
; CHECK-NEXT: --> {(1 + %i),+,1}<nuw><nsw><%loop> U: full-set S: full-set Exits: 16 LoopDispositions: { %loop: Computable }
@@ -92,8 +92,8 @@ entry:
loop:
%iv = phi i64 [ %iv.next, %loop ], [ %i, %entry ]
- %idx = getelementptr inbounds i32, i32* %a, i64 %iv
- store i32 1, i32* %idx, align 4
+ %idx = getelementptr inbounds i32, ptr %a, i64 %iv
+ store i32 1, ptr %idx, align 4
%iv.next = add nuw nsw i64 %iv, 1
%exitcond = icmp eq i64 %iv.next, 16
br i1 %exitcond, label %exit, label %loop
@@ -102,12 +102,12 @@ exit:
ret void
}
-define void @test_guard_uge_16_branches_flipped(i32* nocapture %a, i64 %i) {
+define void @test_guard_uge_16_branches_flipped(ptr nocapture %a, i64 %i) {
; CHECK-LABEL: 'test_guard_uge_16_branches_flipped'
; CHECK-NEXT: Classifying expressions for: @test_guard_uge_16_branches_flipped
; CHECK-NEXT: %iv = phi i64 [ %iv.next, %loop ], [ %i, %entry ]
; CHECK-NEXT: --> {%i,+,1}<nuw><nsw><%loop> U: full-set S: full-set Exits: 15 LoopDispositions: { %loop: Computable }
-; CHECK-NEXT: %idx = getelementptr inbounds i32, i32* %a, i64 %iv
+; CHECK-NEXT: %idx = getelementptr inbounds i32, ptr %a, i64 %iv
; CHECK-NEXT: --> {((4 * %i) + %a),+,4}<nw><%loop> U: full-set S: full-set Exits: (60 + %a) LoopDispositions: { %loop: Computable }
; CHECK-NEXT: %iv.next = add nuw nsw i64 %iv, 1
; CHECK-NEXT: --> {(1 + %i),+,1}<nuw><nsw><%loop> U: full-set S: full-set Exits: 16 LoopDispositions: { %loop: Computable }
@@ -125,8 +125,8 @@ entry:
loop:
%iv = phi i64 [ %iv.next, %loop ], [ %i, %entry ]
- %idx = getelementptr inbounds i32, i32* %a, i64 %iv
- store i32 1, i32* %idx, align 4
+ %idx = getelementptr inbounds i32, ptr %a, i64 %iv
+ store i32 1, ptr %idx, align 4
%iv.next = add nuw nsw i64 %iv, 1
%exitcond = icmp eq i64 %iv.next, 16
br i1 %exitcond, label %exit, label %loop
@@ -135,12 +135,12 @@ exit:
ret void
}
-define void @test_guard_eq_12(i32* nocapture %a, i64 %N) {
+define void @test_guard_eq_12(ptr nocapture %a, i64 %N) {
; CHECK-LABEL: 'test_guard_eq_12'
; CHECK-NEXT: Classifying expressions for: @test_guard_eq_12
; CHECK-NEXT: %iv = phi i64 [ %iv.next, %loop ], [ 0, %entry ]
; CHECK-NEXT: --> {0,+,1}<nuw><nsw><%loop> U: [0,13) S: [0,13) Exits: %N LoopDispositions: { %loop: Computable }
-; CHECK-NEXT: %idx = getelementptr inbounds i32, i32* %a, i64 %iv
+; CHECK-NEXT: %idx = getelementptr inbounds i32, ptr %a, i64 %iv
; CHECK-NEXT: --> {%a,+,4}<nuw><%loop> U: full-set S: full-set Exits: ((4 * %N) + %a) LoopDispositions: { %loop: Computable }
; CHECK-NEXT: %iv.next = add nuw nsw i64 %iv, 1
; CHECK-NEXT: --> {1,+,1}<nuw><nsw><%loop> U: [1,14) S: [1,14) Exits: (1 + %N) LoopDispositions: { %loop: Computable }
@@ -158,8 +158,8 @@ entry:
loop:
%iv = phi i64 [ %iv.next, %loop ], [ 0, %entry ]
- %idx = getelementptr inbounds i32, i32* %a, i64 %iv
- store i32 1, i32* %idx, align 4
+ %idx = getelementptr inbounds i32, ptr %a, i64 %iv
+ store i32 1, ptr %idx, align 4
%iv.next = add nuw nsw i64 %iv, 1
%exitcond = icmp eq i64 %iv, %N
br i1 %exitcond, label %exit, label %loop
@@ -168,12 +168,12 @@ exit:
ret void
}
-define void @test_guard_ule_12(i32* nocapture %a, i64 %N) {
+define void @test_guard_ule_12(ptr nocapture %a, i64 %N) {
; CHECK-LABEL: 'test_guard_ule_12'
; CHECK-NEXT: Classifying expressions for: @test_guard_ule_12
; CHECK-NEXT: %iv = phi i64 [ %iv.next, %loop ], [ 0, %entry ]
; CHECK-NEXT: --> {0,+,1}<nuw><nsw><%loop> U: [0,13) S: [0,13) Exits: %N LoopDispositions: { %loop: Computable }
-; CHECK-NEXT: %idx = getelementptr inbounds i32, i32* %a, i64 %iv
+; CHECK-NEXT: %idx = getelementptr inbounds i32, ptr %a, i64 %iv
; CHECK-NEXT: --> {%a,+,4}<nuw><%loop> U: full-set S: full-set Exits: ((4 * %N) + %a) LoopDispositions: { %loop: Computable }
; CHECK-NEXT: %iv.next = add nuw nsw i64 %iv, 1
; CHECK-NEXT: --> {1,+,1}<nuw><nsw><%loop> U: [1,14) S: [1,14) Exits: (1 + %N) LoopDispositions: { %loop: Computable }
@@ -191,8 +191,8 @@ entry:
loop:
%iv = phi i64 [ %iv.next, %loop ], [ 0, %entry ]
- %idx = getelementptr inbounds i32, i32* %a, i64 %iv
- store i32 1, i32* %idx, align 4
+ %idx = getelementptr inbounds i32, ptr %a, i64 %iv
+ store i32 1, ptr %idx, align 4
%iv.next = add nuw nsw i64 %iv, 1
%exitcond = icmp eq i64 %iv, %N
br i1 %exitcond, label %exit, label %loop
@@ -201,12 +201,12 @@ exit:
ret void
}
-define void @test_guard_ule_12_step2(i32* nocapture %a, i64 %N) {
+define void @test_guard_ule_12_step2(ptr nocapture %a, i64 %N) {
; CHECK-LABEL: 'test_guard_ule_12_step2'
; CHECK-NEXT: Classifying expressions for: @test_guard_ule_12_step2
; CHECK-NEXT: %iv = phi i64 [ %iv.next, %loop ], [ 0, %entry ]
; CHECK-NEXT: --> {0,+,2}<nuw><nsw><%loop> U: [0,13) S: [0,13) Exits: (2 * (%N /u 2))<nuw> LoopDispositions: { %loop: Computable }
-; CHECK-NEXT: %idx = getelementptr inbounds i32, i32* %a, i64 %iv
+; CHECK-NEXT: %idx = getelementptr inbounds i32, ptr %a, i64 %iv
; CHECK-NEXT: --> {%a,+,8}<nuw><%loop> U: full-set S: full-set Exits: ((8 * (%N /u 2)) + %a) LoopDispositions: { %loop: Computable }
; CHECK-NEXT: %iv.next = add nuw nsw i64 %iv, 2
; CHECK-NEXT: --> {2,+,2}<nuw><nsw><%loop> U: [2,15) S: [2,15) Exits: (2 + (2 * (%N /u 2))<nuw>) LoopDispositions: { %loop: Computable }
@@ -224,8 +224,8 @@ entry:
loop:
%iv = phi i64 [ %iv.next, %loop ], [ 0, %entry ]
- %idx = getelementptr inbounds i32, i32* %a, i64 %iv
- store i32 1, i32* %idx, align 4
+ %idx = getelementptr inbounds i32, ptr %a, i64 %iv
+ store i32 1, ptr %idx, align 4
%iv.next = add nuw nsw i64 %iv, 2
%exitcond = icmp eq i64 %iv, %N
br i1 %exitcond, label %exit, label %loop
@@ -234,12 +234,12 @@ exit:
ret void
}
-define void @test_multiple_const_guards_order1(i32* nocapture %a, i64 %i) {
+define void @test_multiple_const_guards_order1(ptr nocapture %a, i64 %i) {
; CHECK-LABEL: 'test_multiple_const_guards_order1'
; CHECK-NEXT: Classifying expressions for: @test_multiple_const_guards_order1
; CHECK-NEXT: %iv = phi i64 [ %iv.next, %loop ], [ 0, %guardbb ]
; CHECK-NEXT: --> {0,+,1}<nuw><nsw><%loop> U: [0,10) S: [0,10) Exits: %i LoopDispositions: { %loop: Computable }
-; CHECK-NEXT: %idx = getelementptr inbounds i32, i32* %a, i64 %iv
+; CHECK-NEXT: %idx = getelementptr inbounds i32, ptr %a, i64 %iv
; CHECK-NEXT: --> {%a,+,4}<nuw><%loop> U: full-set S: full-set Exits: ((4 * %i) + %a) LoopDispositions: { %loop: Computable }
; CHECK-NEXT: %iv.next = add nuw nsw i64 %iv, 1
; CHECK-NEXT: --> {1,+,1}<nuw><nsw><%loop> U: [1,11) S: [1,11) Exits: (1 + %i) LoopDispositions: { %loop: Computable }
@@ -261,8 +261,8 @@ guardbb:
loop:
%iv = phi i64 [ %iv.next, %loop ], [ 0, %guardbb ]
- %idx = getelementptr inbounds i32, i32* %a, i64 %iv
- store i32 1, i32* %idx, align 4
+ %idx = getelementptr inbounds i32, ptr %a, i64 %iv
+ store i32 1, ptr %idx, align 4
%iv.next = add nuw nsw i64 %iv, 1
%exitcond = icmp eq i64 %iv, %i
br i1 %exitcond, label %exit, label %loop
@@ -271,12 +271,12 @@ exit:
ret void
}
-define void @test_multiple_const_guards_order2(i32* nocapture %a, i64 %i) {
+define void @test_multiple_const_guards_order2(ptr nocapture %a, i64 %i) {
; CHECK-LABEL: 'test_multiple_const_guards_order2'
; CHECK-NEXT: Classifying expressions for: @test_multiple_const_guards_order2
; CHECK-NEXT: %iv = phi i64 [ %iv.next, %loop ], [ 0, %guardbb ]
; CHECK-NEXT: --> {0,+,1}<nuw><nsw><%loop> U: [0,10) S: [0,10) Exits: %i LoopDispositions: { %loop: Computable }
-; CHECK-NEXT: %idx = getelementptr inbounds i32, i32* %a, i64 %iv
+; CHECK-NEXT: %idx = getelementptr inbounds i32, ptr %a, i64 %iv
; CHECK-NEXT: --> {%a,+,4}<nuw><%loop> U: full-set S: full-set Exits: ((4 * %i) + %a) LoopDispositions: { %loop: Computable }
; CHECK-NEXT: %iv.next = add nuw nsw i64 %iv, 1
; CHECK-NEXT: --> {1,+,1}<nuw><nsw><%loop> U: [1,11) S: [1,11) Exits: (1 + %i) LoopDispositions: { %loop: Computable }
@@ -298,8 +298,8 @@ guardbb:
loop:
%iv = phi i64 [ %iv.next, %loop ], [ 0, %guardbb ]
- %idx = getelementptr inbounds i32, i32* %a, i64 %iv
- store i32 1, i32* %idx, align 4
+ %idx = getelementptr inbounds i32, ptr %a, i64 %iv
+ store i32 1, ptr %idx, align 4
%iv.next = add nuw nsw i64 %iv, 1
%exitcond = icmp eq i64 %iv, %i
br i1 %exitcond, label %exit, label %loop
@@ -309,12 +309,12 @@ exit:
}
; TODO: Currently we miss getting the tightest constant max backedge-taken count (11).
-define void @test_multiple_var_guards_order1(i32* nocapture %a, i64 %i, i64 %N) {
+define void @test_multiple_var_guards_order1(ptr nocapture %a, i64 %i, i64 %N) {
; CHECK-LABEL: 'test_multiple_var_guards_order1'
; CHECK-NEXT: Classifying expressions for: @test_multiple_var_guards_order1
; CHECK-NEXT: %iv = phi i64 [ %iv.next, %loop ], [ 0, %guardbb ]
; CHECK-NEXT: --> {0,+,1}<nuw><nsw><%loop> U: [0,-9223372036854775808) S: [0,-9223372036854775808) Exits: %i LoopDispositions: { %loop: Computable }
-; CHECK-NEXT: %idx = getelementptr inbounds i32, i32* %a, i64 %iv
+; CHECK-NEXT: %idx = getelementptr inbounds i32, ptr %a, i64 %iv
; CHECK-NEXT: --> {%a,+,4}<nuw><%loop> U: full-set S: full-set Exits: ((4 * %i) + %a) LoopDispositions: { %loop: Computable }
; CHECK-NEXT: %iv.next = add nuw nsw i64 %iv, 1
; CHECK-NEXT: --> {1,+,1}<nuw><%loop> U: [1,0) S: [1,0) Exits: (1 + %i) LoopDispositions: { %loop: Computable }
@@ -336,8 +336,8 @@ guardbb:
loop:
%iv = phi i64 [ %iv.next, %loop ], [ 0, %guardbb ]
- %idx = getelementptr inbounds i32, i32* %a, i64 %iv
- store i32 1, i32* %idx, align 4
+ %idx = getelementptr inbounds i32, ptr %a, i64 %iv
+ store i32 1, ptr %idx, align 4
%iv.next = add nuw nsw i64 %iv, 1
%exitcond = icmp eq i64 %iv, %i
br i1 %exitcond, label %exit, label %loop
@@ -347,12 +347,12 @@ exit:
}
; TODO: Currently we miss getting the tightest constant max backedge-taken count (11).
-define void @test_multiple_var_guards_order2(i32* nocapture %a, i64 %i, i64 %N) {
+define void @test_multiple_var_guards_order2(ptr nocapture %a, i64 %i, i64 %N) {
; CHECK-LABEL: 'test_multiple_var_guards_order2'
; CHECK-NEXT: Classifying expressions for: @test_multiple_var_guards_order2
; CHECK-NEXT: %iv = phi i64 [ %iv.next, %loop ], [ 0, %guardbb ]
; CHECK-NEXT: --> {0,+,1}<nuw><nsw><%loop> U: [0,-9223372036854775808) S: [0,-9223372036854775808) Exits: %i LoopDispositions: { %loop: Computable }
-; CHECK-NEXT: %idx = getelementptr inbounds i32, i32* %a, i64 %iv
+; CHECK-NEXT: %idx = getelementptr inbounds i32, ptr %a, i64 %iv
; CHECK-NEXT: --> {%a,+,4}<nuw><%loop> U: full-set S: full-set Exits: ((4 * %i) + %a) LoopDispositions: { %loop: Computable }
; CHECK-NEXT: %iv.next = add nuw nsw i64 %iv, 1
; CHECK-NEXT: --> {1,+,1}<nuw><%loop> U: [1,0) S: [1,0) Exits: (1 + %i) LoopDispositions: { %loop: Computable }
@@ -374,8 +374,8 @@ guardbb:
loop:
%iv = phi i64 [ %iv.next, %loop ], [ 0, %guardbb ]
- %idx = getelementptr inbounds i32, i32* %a, i64 %iv
- store i32 1, i32* %idx, align 4
+ %idx = getelementptr inbounds i32, ptr %a, i64 %iv
+ store i32 1, ptr %idx, align 4
%iv.next = add nuw nsw i64 %iv, 1
%exitcond = icmp eq i64 %iv, %i
br i1 %exitcond, label %exit, label %loop
@@ -385,12 +385,12 @@ exit:
}
; The guards here reference each other in a cycle.
-define void @test_multiple_var_guards_cycle(i32* nocapture %a, i64 %i, i64 %N) {
+define void @test_multiple_var_guards_cycle(ptr nocapture %a, i64 %i, i64 %N) {
; CHECK-LABEL: 'test_multiple_var_guards_cycle'
; CHECK-NEXT: Classifying expressions for: @test_multiple_var_guards_cycle
; CHECK-NEXT: %iv = phi i64 [ %iv.next, %loop ], [ 0, %guardbb ]
; CHECK-NEXT: --> {0,+,1}<nuw><nsw><%loop> U: [0,-9223372036854775808) S: [0,-9223372036854775808) Exits: %N LoopDispositions: { %loop: Computable }
-; CHECK-NEXT: %idx = getelementptr inbounds i32, i32* %a, i64 %iv
+; CHECK-NEXT: %idx = getelementptr inbounds i32, ptr %a, i64 %iv
; CHECK-NEXT: --> {%a,+,4}<nuw><%loop> U: full-set S: full-set Exits: ((4 * %N) + %a) LoopDispositions: { %loop: Computable }
; CHECK-NEXT: %iv.next = add nuw nsw i64 %iv, 1
; CHECK-NEXT: --> {1,+,1}<nuw><%loop> U: [1,0) S: [1,0) Exits: (1 + %N) LoopDispositions: { %loop: Computable }
@@ -412,8 +412,8 @@ guardbb:
loop:
%iv = phi i64 [ %iv.next, %loop ], [ 0, %guardbb ]
- %idx = getelementptr inbounds i32, i32* %a, i64 %iv
- store i32 1, i32* %idx, align 4
+ %idx = getelementptr inbounds i32, ptr %a, i64 %iv
+ store i32 1, ptr %idx, align 4
%iv.next = add nuw nsw i64 %iv, 1
%exitcond = icmp eq i64 %iv, %N
br i1 %exitcond, label %exit, label %loop
@@ -422,12 +422,12 @@ exit:
ret void
}
-define void @test_guard_ult_ne(i32* nocapture readonly %data, i64 %count) {
+define void @test_guard_ult_ne(ptr nocapture readonly %data, i64 %count) {
; CHECK-LABEL: 'test_guard_ult_ne'
; CHECK-NEXT: Classifying expressions for: @test_guard_ult_ne
; CHECK-NEXT: %iv = phi i64 [ %iv.next, %loop ], [ 0, %guardbb ]
; CHECK-NEXT: --> {0,+,1}<nuw><%loop> U: [0,4) S: [0,4) Exits: (-1 + %count) LoopDispositions: { %loop: Computable }
-; CHECK-NEXT: %idx = getelementptr inbounds i32, i32* %data, i64 %iv
+; CHECK-NEXT: %idx = getelementptr inbounds i32, ptr %data, i64 %iv
; CHECK-NEXT: --> {%data,+,4}<nuw><%loop> U: full-set S: full-set Exits: (-4 + (4 * %count) + %data) LoopDispositions: { %loop: Computable }
; CHECK-NEXT: %iv.next = add nuw i64 %iv, 1
; CHECK-NEXT: --> {1,+,1}<nuw><%loop> U: [1,5) S: [1,5) Exits: %count LoopDispositions: { %loop: Computable }
@@ -449,8 +449,8 @@ guardbb:
loop:
%iv = phi i64 [ %iv.next, %loop ], [ 0, %guardbb ]
- %idx = getelementptr inbounds i32, i32* %data, i64 %iv
- store i32 1, i32* %idx, align 4
+ %idx = getelementptr inbounds i32, ptr %data, i64 %iv
+ store i32 1, ptr %idx, align 4
%iv.next = add nuw i64 %iv, 1
%exitcond.not = icmp eq i64 %iv.next, %count
br i1 %exitcond.not, label %exit, label %loop
@@ -459,12 +459,12 @@ exit:
ret void
}
-define void @test_guard_ne_ult(i32* nocapture readonly %data, i64 %count) {
+define void @test_guard_ne_ult(ptr nocapture readonly %data, i64 %count) {
; CHECK-LABEL: 'test_guard_ne_ult'
; CHECK-NEXT: Classifying expressions for: @test_guard_ne_ult
; CHECK-NEXT: %iv = phi i64 [ %iv.next, %loop ], [ 0, %guardbb ]
; CHECK-NEXT: --> {0,+,1}<nuw><%loop> U: [0,4) S: [0,4) Exits: (-1 + %count) LoopDispositions: { %loop: Computable }
-; CHECK-NEXT: %idx = getelementptr inbounds i32, i32* %data, i64 %iv
+; CHECK-NEXT: %idx = getelementptr inbounds i32, ptr %data, i64 %iv
; CHECK-NEXT: --> {%data,+,4}<nuw><%loop> U: full-set S: full-set Exits: (-4 + (4 * %count) + %data) LoopDispositions: { %loop: Computable }
; CHECK-NEXT: %iv.next = add nuw i64 %iv, 1
; CHECK-NEXT: --> {1,+,1}<nuw><%loop> U: [1,5) S: [1,5) Exits: %count LoopDispositions: { %loop: Computable }
@@ -486,8 +486,8 @@ guardbb:
loop:
%iv = phi i64 [ %iv.next, %loop ], [ 0, %guardbb ]
- %idx = getelementptr inbounds i32, i32* %data, i64 %iv
- store i32 1, i32* %idx, align 4
+ %idx = getelementptr inbounds i32, ptr %data, i64 %iv
+ store i32 1, ptr %idx, align 4
%iv.next = add nuw i64 %iv, 1
%exitcond.not = icmp eq i64 %iv.next, %count
br i1 %exitcond.not, label %exit, label %loop
@@ -496,14 +496,14 @@ exit:
ret void
}
-define void @test_guard_if_and_enter(i32* nocapture readonly %data, i64 %count) {
+define void @test_guard_if_and_enter(ptr nocapture readonly %data, i64 %count) {
; CHECK-LABEL: 'test_guard_if_and_enter'
; CHECK-NEXT: Classifying expressions for: @test_guard_if_and_enter
; CHECK-NEXT: %cmp.and = and i1 %cmp.ult, %cmp.ne
; CHECK-NEXT: --> (%cmp.ult umin %cmp.ne) U: full-set S: full-set
; CHECK-NEXT: %iv = phi i64 [ %iv.next, %loop ], [ 0, %entry ]
; CHECK-NEXT: --> {0,+,1}<nuw><%loop> U: [0,4) S: [0,4) Exits: (-1 + %count) LoopDispositions: { %loop: Computable }
-; CHECK-NEXT: %idx = getelementptr inbounds i32, i32* %data, i64 %iv
+; CHECK-NEXT: %idx = getelementptr inbounds i32, ptr %data, i64 %iv
; CHECK-NEXT: --> {%data,+,4}<nuw><%loop> U: full-set S: full-set Exits: (-4 + (4 * %count) + %data) LoopDispositions: { %loop: Computable }
; CHECK-NEXT: %iv.next = add nuw i64 %iv, 1
; CHECK-NEXT: --> {1,+,1}<nuw><%loop> U: [1,5) S: [1,5) Exits: %count LoopDispositions: { %loop: Computable }
@@ -523,8 +523,8 @@ entry:
loop:
%iv = phi i64 [ %iv.next, %loop ], [ 0, %entry ]
- %idx = getelementptr inbounds i32, i32* %data, i64 %iv
- store i32 1, i32* %idx, align 4
+ %idx = getelementptr inbounds i32, ptr %data, i64 %iv
+ store i32 1, ptr %idx, align 4
%iv.next = add nuw i64 %iv, 1
%exitcond.not = icmp eq i64 %iv.next, %count
br i1 %exitcond.not, label %exit, label %loop
@@ -533,14 +533,14 @@ exit:
ret void
}
-define void @test_guard_if_and_skip(i32* nocapture readonly %data, i64 %count) {
+define void @test_guard_if_and_skip(ptr nocapture readonly %data, i64 %count) {
; CHECK-LABEL: 'test_guard_if_and_skip'
; CHECK-NEXT: Classifying expressions for: @test_guard_if_and_skip
; CHECK-NEXT: %cmp.and = and i1 %cmp.ult, %cmp.ne
; CHECK-NEXT: --> (%cmp.ult umin %cmp.ne) U: full-set S: full-set
; CHECK-NEXT: %iv = phi i64 [ %iv.next, %loop ], [ 0, %entry ]
; CHECK-NEXT: --> {0,+,1}<nuw><%loop> U: full-set S: full-set Exits: (-1 + %count) LoopDispositions: { %loop: Computable }
-; CHECK-NEXT: %idx = getelementptr inbounds i32, i32* %data, i64 %iv
+; CHECK-NEXT: %idx = getelementptr inbounds i32, ptr %data, i64 %iv
; CHECK-NEXT: --> {%data,+,4}<%loop> U: full-set S: full-set Exits: (-4 + (4 * %count) + %data) LoopDispositions: { %loop: Computable }
; CHECK-NEXT: %iv.next = add nuw i64 %iv, 1
; CHECK-NEXT: --> {1,+,1}<nuw><%loop> U: [1,0) S: [1,0) Exits: %count LoopDispositions: { %loop: Computable }
@@ -560,8 +560,8 @@ entry:
loop:
%iv = phi i64 [ %iv.next, %loop ], [ 0, %entry ]
- %idx = getelementptr inbounds i32, i32* %data, i64 %iv
- store i32 1, i32* %idx, align 4
+ %idx = getelementptr inbounds i32, ptr %data, i64 %iv
+ store i32 1, ptr %idx, align 4
%iv.next = add nuw i64 %iv, 1
%exitcond.not = icmp eq i64 %iv.next, %count
br i1 %exitcond.not, label %exit, label %loop
@@ -570,7 +570,7 @@ exit:
ret void
}
-define void @test_guard_if_and_and(i32* nocapture readonly %data, i64 %count, i1 %c) {
+define void @test_guard_if_and_and(ptr nocapture readonly %data, i64 %count, i1 %c) {
; CHECK-LABEL: 'test_guard_if_and_and'
; CHECK-NEXT: Classifying expressions for: @test_guard_if_and_and
; CHECK-NEXT: %cmp.and1 = and i1 %c, %cmp.ne
@@ -579,7 +579,7 @@ define void @test_guard_if_and_and(i32* nocapture readonly %data, i64 %count, i1
; CHECK-NEXT: --> (%c umin %cmp.ult umin %cmp.ne) U: full-set S: full-set
; CHECK-NEXT: %iv = phi i64 [ %iv.next, %loop ], [ 0, %entry ]
; CHECK-NEXT: --> {0,+,1}<nuw><%loop> U: [0,4) S: [0,4) Exits: (-1 + %count) LoopDispositions: { %loop: Computable }
-; CHECK-NEXT: %idx = getelementptr inbounds i32, i32* %data, i64 %iv
+; CHECK-NEXT: %idx = getelementptr inbounds i32, ptr %data, i64 %iv
; CHECK-NEXT: --> {%data,+,4}<nuw><%loop> U: full-set S: full-set Exits: (-4 + (4 * %count) + %data) LoopDispositions: { %loop: Computable }
; CHECK-NEXT: %iv.next = add nuw i64 %iv, 1
; CHECK-NEXT: --> {1,+,1}<nuw><%loop> U: [1,5) S: [1,5) Exits: %count LoopDispositions: { %loop: Computable }
@@ -600,8 +600,8 @@ entry:
loop:
%iv = phi i64 [ %iv.next, %loop ], [ 0, %entry ]
- %idx = getelementptr inbounds i32, i32* %data, i64 %iv
- store i32 1, i32* %idx, align 4
+ %idx = getelementptr inbounds i32, ptr %data, i64 %iv
+ store i32 1, ptr %idx, align 4
%iv.next = add nuw i64 %iv, 1
%exitcond.not = icmp eq i64 %iv.next, %count
br i1 %exitcond.not, label %exit, label %loop
@@ -610,7 +610,7 @@ exit:
ret void
}
-define void @test_guard_if_and_or(i32* nocapture readonly %data, i64 %count, i1 %c) {
+define void @test_guard_if_and_or(ptr nocapture readonly %data, i64 %count, i1 %c) {
; CHECK-LABEL: 'test_guard_if_and_or'
; CHECK-NEXT: Classifying expressions for: @test_guard_if_and_or
; CHECK-NEXT: %cmp.or = or i1 %c, %cmp.ne
@@ -619,7 +619,7 @@ define void @test_guard_if_and_or(i32* nocapture readonly %data, i64 %count, i1
; CHECK-NEXT: --> ((%c umax %cmp.ne) umin %cmp.ult) U: full-set S: full-set
; CHECK-NEXT: %iv = phi i64 [ %iv.next, %loop ], [ 0, %entry ]
; CHECK-NEXT: --> {0,+,1}<nuw><%loop> U: full-set S: full-set Exits: (-1 + %count) LoopDispositions: { %loop: Computable }
-; CHECK-NEXT: %idx = getelementptr inbounds i32, i32* %data, i64 %iv
+; CHECK-NEXT: %idx = getelementptr inbounds i32, ptr %data, i64 %iv
; CHECK-NEXT: --> {%data,+,4}<%loop> U: full-set S: full-set Exits: (-4 + (4 * %count) + %data) LoopDispositions: { %loop: Computable }
; CHECK-NEXT: %iv.next = add nuw i64 %iv, 1
; CHECK-NEXT: --> {1,+,1}<nuw><%loop> U: [1,0) S: [1,0) Exits: %count LoopDispositions: { %loop: Computable }
@@ -640,8 +640,8 @@ entry:
loop:
%iv = phi i64 [ %iv.next, %loop ], [ 0, %entry ]
- %idx = getelementptr inbounds i32, i32* %data, i64 %iv
- store i32 1, i32* %idx, align 4
+ %idx = getelementptr inbounds i32, ptr %data, i64 %iv
+ store i32 1, ptr %idx, align 4
%iv.next = add nuw i64 %iv, 1
%exitcond.not = icmp eq i64 %iv.next, %count
br i1 %exitcond.not, label %exit, label %loop
@@ -650,14 +650,14 @@ exit:
ret void
}
-define void @test_guard_if_or_skip(i32* nocapture readonly %data, i64 %count) {
+define void @test_guard_if_or_skip(ptr nocapture readonly %data, i64 %count) {
; CHECK-LABEL: 'test_guard_if_or_skip'
; CHECK-NEXT: Classifying expressions for: @test_guard_if_or_skip
; CHECK-NEXT: %cmp.or = or i1 %cmp.uge, %cmp.eq
; CHECK-NEXT: --> (%cmp.uge umax %cmp.eq) U: full-set S: full-set
; CHECK-NEXT: %iv = phi i64 [ %iv.next, %loop ], [ 0, %entry ]
; CHECK-NEXT: --> {0,+,1}<nuw><%loop> U: [0,4) S: [0,4) Exits: (-1 + %count) LoopDispositions: { %loop: Computable }
-; CHECK-NEXT: %idx = getelementptr inbounds i32, i32* %data, i64 %iv
+; CHECK-NEXT: %idx = getelementptr inbounds i32, ptr %data, i64 %iv
; CHECK-NEXT: --> {%data,+,4}<nuw><%loop> U: full-set S: full-set Exits: (-4 + (4 * %count) + %data) LoopDispositions: { %loop: Computable }
; CHECK-NEXT: %iv.next = add nuw i64 %iv, 1
; CHECK-NEXT: --> {1,+,1}<nuw><%loop> U: [1,5) S: [1,5) Exits: %count LoopDispositions: { %loop: Computable }
@@ -677,8 +677,8 @@ entry:
loop:
%iv = phi i64 [ %iv.next, %loop ], [ 0, %entry ]
- %idx = getelementptr inbounds i32, i32* %data, i64 %iv
- store i32 1, i32* %idx, align 4
+ %idx = getelementptr inbounds i32, ptr %data, i64 %iv
+ store i32 1, ptr %idx, align 4
%iv.next = add nuw i64 %iv, 1
%exitcond.not = icmp eq i64 %iv.next, %count
br i1 %exitcond.not, label %exit, label %loop
@@ -687,14 +687,14 @@ exit:
ret void
}
-define void @test_guard_if_or_enter(i32* nocapture readonly %data, i64 %count) {
+define void @test_guard_if_or_enter(ptr nocapture readonly %data, i64 %count) {
; CHECK-LABEL: 'test_guard_if_or_enter'
; CHECK-NEXT: Classifying expressions for: @test_guard_if_or_enter
; CHECK-NEXT: %cmp.or = or i1 %cmp.uge, %cmp.eq
; CHECK-NEXT: --> (%cmp.uge umax %cmp.eq) U: full-set S: full-set
; CHECK-NEXT: %iv = phi i64 [ %iv.next, %loop ], [ 0, %entry ]
; CHECK-NEXT: --> {0,+,1}<nuw><%loop> U: full-set S: full-set Exits: (-1 + %count) LoopDispositions: { %loop: Computable }
-; CHECK-NEXT: %idx = getelementptr inbounds i32, i32* %data, i64 %iv
+; CHECK-NEXT: %idx = getelementptr inbounds i32, ptr %data, i64 %iv
; CHECK-NEXT: --> {%data,+,4}<%loop> U: full-set S: full-set Exits: (-4 + (4 * %count) + %data) LoopDispositions: { %loop: Computable }
; CHECK-NEXT: %iv.next = add nuw i64 %iv, 1
; CHECK-NEXT: --> {1,+,1}<nuw><%loop> U: [1,0) S: [1,0) Exits: %count LoopDispositions: { %loop: Computable }
@@ -714,8 +714,8 @@ entry:
loop:
%iv = phi i64 [ %iv.next, %loop ], [ 0, %entry ]
- %idx = getelementptr inbounds i32, i32* %data, i64 %iv
- store i32 1, i32* %idx, align 4
+ %idx = getelementptr inbounds i32, ptr %data, i64 %iv
+ store i32 1, ptr %idx, align 4
%iv.next = add nuw i64 %iv, 1
%exitcond.not = icmp eq i64 %iv.next, %count
br i1 %exitcond.not, label %exit, label %loop
@@ -724,7 +724,7 @@ exit:
ret void
}
-define void @test_guard_if_or_or(i32* nocapture readonly %data, i64 %count, i1 %c) {
+define void @test_guard_if_or_or(ptr nocapture readonly %data, i64 %count, i1 %c) {
; CHECK-LABEL: 'test_guard_if_or_or'
; CHECK-NEXT: Classifying expressions for: @test_guard_if_or_or
; CHECK-NEXT: %cmp.or1 = or i1 %c, %cmp.eq
@@ -733,7 +733,7 @@ define void @test_guard_if_or_or(i32* nocapture readonly %data, i64 %count, i1 %
; CHECK-NEXT: --> (%c umax %cmp.uge umax %cmp.eq) U: full-set S: full-set
; CHECK-NEXT: %iv = phi i64 [ %iv.next, %loop ], [ 0, %entry ]
; CHECK-NEXT: --> {0,+,1}<nuw><%loop> U: [0,4) S: [0,4) Exits: (-1 + %count) LoopDispositions: { %loop: Computable }
-; CHECK-NEXT: %idx = getelementptr inbounds i32, i32* %data, i64 %iv
+; CHECK-NEXT: %idx = getelementptr inbounds i32, ptr %data, i64 %iv
; CHECK-NEXT: --> {%data,+,4}<nuw><%loop> U: full-set S: full-set Exits: (-4 + (4 * %count) + %data) LoopDispositions: { %loop: Computable }
; CHECK-NEXT: %iv.next = add nuw i64 %iv, 1
; CHECK-NEXT: --> {1,+,1}<nuw><%loop> U: [1,5) S: [1,5) Exits: %count LoopDispositions: { %loop: Computable }
@@ -754,8 +754,8 @@ entry:
loop:
%iv = phi i64 [ %iv.next, %loop ], [ 0, %entry ]
- %idx = getelementptr inbounds i32, i32* %data, i64 %iv
- store i32 1, i32* %idx, align 4
+ %idx = getelementptr inbounds i32, ptr %data, i64 %iv
+ store i32 1, ptr %idx, align 4
%iv.next = add nuw i64 %iv, 1
%exitcond.not = icmp eq i64 %iv.next, %count
br i1 %exitcond.not, label %exit, label %loop
@@ -764,7 +764,7 @@ exit:
ret void
}
-define void @test_guard_if_or_and(i32* nocapture readonly %data, i64 %count, i1 %c) {
+define void @test_guard_if_or_and(ptr nocapture readonly %data, i64 %count, i1 %c) {
; CHECK-LABEL: 'test_guard_if_or_and'
; CHECK-NEXT: Classifying expressions for: @test_guard_if_or_and
; CHECK-NEXT: %cmp.and = and i1 %c, %cmp.eq
@@ -773,7 +773,7 @@ define void @test_guard_if_or_and(i32* nocapture readonly %data, i64 %count, i1
; CHECK-NEXT: --> ((%c umin %cmp.eq) umax %cmp.uge) U: full-set S: full-set
; CHECK-NEXT: %iv = phi i64 [ %iv.next, %loop ], [ 0, %entry ]
; CHECK-NEXT: --> {0,+,1}<nuw><%loop> U: full-set S: full-set Exits: (-1 + %count) LoopDispositions: { %loop: Computable }
-; CHECK-NEXT: %idx = getelementptr inbounds i32, i32* %data, i64 %iv
+; CHECK-NEXT: %idx = getelementptr inbounds i32, ptr %data, i64 %iv
; CHECK-NEXT: --> {%data,+,4}<%loop> U: full-set S: full-set Exits: (-4 + (4 * %count) + %data) LoopDispositions: { %loop: Computable }
; CHECK-NEXT: %iv.next = add nuw i64 %iv, 1
; CHECK-NEXT: --> {1,+,1}<nuw><%loop> U: [1,0) S: [1,0) Exits: %count LoopDispositions: { %loop: Computable }
@@ -794,8 +794,8 @@ entry:
loop:
%iv = phi i64 [ %iv.next, %loop ], [ 0, %entry ]
- %idx = getelementptr inbounds i32, i32* %data, i64 %iv
- store i32 1, i32* %idx, align 4
+ %idx = getelementptr inbounds i32, ptr %data, i64 %iv
+ store i32 1, ptr %idx, align 4
%iv.next = add nuw i64 %iv, 1
%exitcond.not = icmp eq i64 %iv.next, %count
br i1 %exitcond.not, label %exit, label %loop
@@ -807,12 +807,12 @@ exit:
; Test case for PR47247. Both the guard condition and the assume limit the
; constant max backedge-taken count.
-define void @test_guard_and_assume(i32* nocapture readonly %data, i64 %count) {
+define void @test_guard_and_assume(ptr nocapture readonly %data, i64 %count) {
; CHECK-LABEL: 'test_guard_and_assume'
; CHECK-NEXT: Classifying expressions for: @test_guard_and_assume
; CHECK-NEXT: %iv = phi i64 [ %iv.next, %loop ], [ 0, %entry ]
; CHECK-NEXT: --> {0,+,1}<nuw><%loop> U: [0,4) S: [0,4) Exits: (-1 + %count) LoopDispositions: { %loop: Computable }
-; CHECK-NEXT: %idx = getelementptr inbounds i32, i32* %data, i64 %iv
+; CHECK-NEXT: %idx = getelementptr inbounds i32, ptr %data, i64 %iv
; CHECK-NEXT: --> {%data,+,4}<nuw><%loop> U: full-set S: full-set Exits: (-4 + (4 * %count) + %data) LoopDispositions: { %loop: Computable }
; CHECK-NEXT: %iv.next = add nuw i64 %iv, 1
; CHECK-NEXT: --> {1,+,1}<nuw><%loop> U: [1,5) S: [1,5) Exits: %count LoopDispositions: { %loop: Computable }
@@ -832,8 +832,8 @@ entry:
loop:
%iv = phi i64 [ %iv.next, %loop ], [ 0, %entry ]
- %idx = getelementptr inbounds i32, i32* %data, i64 %iv
- store i32 1, i32* %idx, align 4
+ %idx = getelementptr inbounds i32, ptr %data, i64 %iv
+ store i32 1, ptr %idx, align 4
%iv.next = add nuw i64 %iv, 1
%exitcond.not = icmp eq i64 %iv.next, %count
br i1 %exitcond.not, label %exit, label %loop
@@ -842,14 +842,14 @@ exit:
ret void
}
-define void @test_guard_assume_and(i32* nocapture readonly %data, i64 %count) {
+define void @test_guard_assume_and(ptr nocapture readonly %data, i64 %count) {
; CHECK-LABEL: 'test_guard_assume_and'
; CHECK-NEXT: Classifying expressions for: @test_guard_assume_and
; CHECK-NEXT: %cmp.and = and i1 %cmp.ult, %cmp.ne
; CHECK-NEXT: --> (%cmp.ult umin %cmp.ne) U: full-set S: full-set
; CHECK-NEXT: %iv = phi i64 [ %iv.next, %loop ], [ 0, %entry ]
; CHECK-NEXT: --> {0,+,1}<nuw><%loop> U: [0,4) S: [0,4) Exits: (-1 + %count) LoopDispositions: { %loop: Computable }
-; CHECK-NEXT: %idx = getelementptr inbounds i32, i32* %data, i64 %iv
+; CHECK-NEXT: %idx = getelementptr inbounds i32, ptr %data, i64 %iv
; CHECK-NEXT: --> {%data,+,4}<nuw><%loop> U: full-set S: full-set Exits: (-4 + (4 * %count) + %data) LoopDispositions: { %loop: Computable }
; CHECK-NEXT: %iv.next = add nuw i64 %iv, 1
; CHECK-NEXT: --> {1,+,1}<nuw><%loop> U: [1,5) S: [1,5) Exits: %count LoopDispositions: { %loop: Computable }
@@ -870,8 +870,8 @@ entry:
loop:
%iv = phi i64 [ %iv.next, %loop ], [ 0, %entry ]
- %idx = getelementptr inbounds i32, i32* %data, i64 %iv
- store i32 1, i32* %idx, align 4
+ %idx = getelementptr inbounds i32, ptr %data, i64 %iv
+ store i32 1, ptr %idx, align 4
%iv.next = add nuw i64 %iv, 1
%exitcond.not = icmp eq i64 %iv.next, %count
br i1 %exitcond.not, label %exit, label %loop
@@ -964,22 +964,22 @@ loop:
exit:
ret void
}
-define void @crash(i8* %ptr) {
+define void @crash(ptr %ptr) {
; CHECK-LABEL: 'crash'
; CHECK-NEXT: Classifying expressions for: @crash
-; CHECK-NEXT: %text.addr.5 = phi i8* [ %incdec.ptr112, %while.cond111 ], [ null, %while.body ]
+; CHECK-NEXT: %text.addr.5 = phi ptr [ %incdec.ptr112, %while.cond111 ], [ null, %while.body ]
; CHECK-NEXT: --> {null,+,-1}<nw><%while.cond111> U: full-set S: full-set Exits: <<Unknown>> LoopDispositions: { %while.cond111: Computable, %while.body: Variant }
-; CHECK-NEXT: %incdec.ptr112 = getelementptr inbounds i8, i8* %text.addr.5, i64 -1
+; CHECK-NEXT: %incdec.ptr112 = getelementptr inbounds i8, ptr %text.addr.5, i64 -1
; CHECK-NEXT: --> {(-1 + null)<nuw><nsw>,+,-1}<nw><%while.cond111> U: full-set S: full-set Exits: <<Unknown>> LoopDispositions: { %while.cond111: Computable, %while.body: Variant }
-; CHECK-NEXT: %lastout.2271 = phi i8* [ %incdec.ptr126, %while.body125 ], [ %ptr, %while.end117 ]
-; CHECK-NEXT: --> {%ptr,+,1}<nuw><%while.body125> U: full-set S: full-set Exits: {(-2 + (-1 * (ptrtoint i8* %ptr to i64)) + %ptr),+,-1}<nw><%while.cond111> LoopDispositions: { %while.body125: Computable }
-; CHECK-NEXT: %incdec.ptr126 = getelementptr inbounds i8, i8* %lastout.2271, i64 1
-; CHECK-NEXT: --> {(1 + %ptr),+,1}<nuw><%while.body125> U: full-set S: full-set Exits: {(-1 + (-1 * (ptrtoint i8* %ptr to i64)) + %ptr),+,-1}<nw><%while.cond111> LoopDispositions: { %while.body125: Computable }
+; CHECK-NEXT: %lastout.2271 = phi ptr [ %incdec.ptr126, %while.body125 ], [ %ptr, %while.end117 ]
+; CHECK-NEXT: --> {%ptr,+,1}<nuw><%while.body125> U: full-set S: full-set Exits: {(-2 + (-1 * (ptrtoint ptr %ptr to i64)) + %ptr),+,-1}<nw><%while.cond111> LoopDispositions: { %while.body125: Computable }
+; CHECK-NEXT: %incdec.ptr126 = getelementptr inbounds i8, ptr %lastout.2271, i64 1
+; CHECK-NEXT: --> {(1 + %ptr),+,1}<nuw><%while.body125> U: full-set S: full-set Exits: {(-1 + (-1 * (ptrtoint ptr %ptr to i64)) + %ptr),+,-1}<nw><%while.cond111> LoopDispositions: { %while.body125: Computable }
; CHECK-NEXT: Determining loop execution counts for: @crash
-; CHECK-NEXT: Loop %while.body125: backedge-taken count is {(-2 + (-1 * (ptrtoint i8* %ptr to i64))),+,-1}<nw><%while.cond111>
+; CHECK-NEXT: Loop %while.body125: backedge-taken count is {(-2 + (-1 * (ptrtoint ptr %ptr to i64))),+,-1}<nw><%while.cond111>
; CHECK-NEXT: Loop %while.body125: constant max backedge-taken count is -2
-; CHECK-NEXT: Loop %while.body125: symbolic max backedge-taken count is {(-2 + (-1 * (ptrtoint i8* %ptr to i64))),+,-1}<nw><%while.cond111>
-; CHECK-NEXT: Loop %while.body125: Predicated backedge-taken count is {(-2 + (-1 * (ptrtoint i8* %ptr to i64))),+,-1}<nw><%while.cond111>
+; CHECK-NEXT: Loop %while.body125: symbolic max backedge-taken count is {(-2 + (-1 * (ptrtoint ptr %ptr to i64))),+,-1}<nw><%while.cond111>
+; CHECK-NEXT: Loop %while.body125: Predicated backedge-taken count is {(-2 + (-1 * (ptrtoint ptr %ptr to i64))),+,-1}<nw><%while.cond111>
; CHECK-NEXT: Predicates:
; CHECK: Loop %while.body125: Trip multiple is 1
; CHECK-NEXT: Loop %while.cond111: Unpredictable backedge-taken count.
@@ -998,12 +998,12 @@ while.body:
br label %while.cond111
while.cond111:
- %text.addr.5 = phi i8* [ %incdec.ptr112, %while.cond111 ], [ null, %while.body ]
- %incdec.ptr112 = getelementptr inbounds i8, i8* %text.addr.5, i64 -1
+ %text.addr.5 = phi ptr [ %incdec.ptr112, %while.cond111 ], [ null, %while.body ]
+ %incdec.ptr112 = getelementptr inbounds i8, ptr %text.addr.5, i64 -1
br i1 false, label %while.end117, label %while.cond111
while.end117:
- %cmp118 = icmp ult i8* %ptr, %incdec.ptr112
+ %cmp118 = icmp ult ptr %ptr, %incdec.ptr112
br i1 %cmp118, label %while.body125, label %while.cond134.preheader
@@ -1011,9 +1011,9 @@ while.cond134.preheader:
br label %while.body
while.body125:
- %lastout.2271 = phi i8* [ %incdec.ptr126, %while.body125 ], [ %ptr, %while.end117 ]
- %incdec.ptr126 = getelementptr inbounds i8, i8* %lastout.2271, i64 1
- %exitcond.not = icmp eq i8* %incdec.ptr126, %incdec.ptr112
+ %lastout.2271 = phi ptr [ %incdec.ptr126, %while.body125 ], [ %ptr, %while.end117 ]
+ %incdec.ptr126 = getelementptr inbounds i8, ptr %lastout.2271, i64 1
+ %exitcond.not = icmp eq ptr %incdec.ptr126, %incdec.ptr112
br i1 %exitcond.not, label %while.end129, label %while.body125
while.end129: ; preds = %while.body125
@@ -1176,14 +1176,14 @@ while.end:
ret void
}
-define void @test_guard_slt_sgt_1(i32* nocapture %a, i64 %N) {
+define void @test_guard_slt_sgt_1(ptr nocapture %a, i64 %N) {
; CHECK-LABEL: 'test_guard_slt_sgt_1'
; CHECK-NEXT: Classifying expressions for: @test_guard_slt_sgt_1
; CHECK-NEXT: %and = and i1 %c.0, %c.1
; CHECK-NEXT: --> (%c.0 umin %c.1) U: full-set S: full-set
; CHECK-NEXT: %iv = phi i64 [ 0, %entry ], [ %iv.next, %loop ]
; CHECK-NEXT: --> {0,+,1}<nuw><nsw><%loop> U: [0,11) S: [0,11) Exits: (-1 + %N) LoopDispositions: { %loop: Computable }
-; CHECK-NEXT: %idx = getelementptr inbounds i32, i32* %a, i64 %iv
+; CHECK-NEXT: %idx = getelementptr inbounds i32, ptr %a, i64 %iv
; CHECK-NEXT: --> {%a,+,4}<nuw><%loop> U: full-set S: full-set Exits: (-4 + (4 * %N) + %a) LoopDispositions: { %loop: Computable }
; CHECK-NEXT: %iv.next = add nuw nsw i64 %iv, 1
; CHECK-NEXT: --> {1,+,1}<nuw><nsw><%loop> U: [1,12) S: [1,12) Exits: %N LoopDispositions: { %loop: Computable }
@@ -1203,8 +1203,8 @@ entry:
loop:
%iv = phi i64 [ 0, %entry ], [ %iv.next, %loop ]
- %idx = getelementptr inbounds i32, i32* %a, i64 %iv
- store i32 1, i32* %idx, align 4
+ %idx = getelementptr inbounds i32, ptr %a, i64 %iv
+ store i32 1, ptr %idx, align 4
%iv.next = add nuw nsw i64 %iv, 1
%exitcond = icmp eq i64 %iv.next, %N
br i1 %exitcond, label %exit, label %loop
@@ -1213,14 +1213,14 @@ exit:
ret void
}
-define void @test_guard_slt_sgt_2(i32* nocapture %a, i64 %i) {
+define void @test_guard_slt_sgt_2(ptr nocapture %a, i64 %i) {
; CHECK-LABEL: 'test_guard_slt_sgt_2'
; CHECK-NEXT: Classifying expressions for: @test_guard_slt_sgt_2
; CHECK-NEXT: %and = and i1 %c.0, %c.1
; CHECK-NEXT: --> (%c.0 umin %c.1) U: full-set S: full-set
; CHECK-NEXT: %iv = phi i64 [ %iv.next, %loop ], [ %i, %entry ]
; CHECK-NEXT: --> {%i,+,1}<nuw><nsw><%loop> U: full-set S: full-set Exits: 17 LoopDispositions: { %loop: Computable }
-; CHECK-NEXT: %idx = getelementptr inbounds i32, i32* %a, i64 %iv
+; CHECK-NEXT: %idx = getelementptr inbounds i32, ptr %a, i64 %iv
; CHECK-NEXT: --> {((4 * %i) + %a),+,4}<nw><%loop> U: full-set S: full-set Exits: (68 + %a) LoopDispositions: { %loop: Computable }
; CHECK-NEXT: %iv.next = add nuw nsw i64 %iv, 1
; CHECK-NEXT: --> {(1 + %i),+,1}<nuw><nsw><%loop> U: full-set S: full-set Exits: 18 LoopDispositions: { %loop: Computable }
@@ -1240,8 +1240,8 @@ entry:
loop:
%iv = phi i64 [ %iv.next, %loop ], [ %i, %entry ]
- %idx = getelementptr inbounds i32, i32* %a, i64 %iv
- store i32 1, i32* %idx, align 4
+ %idx = getelementptr inbounds i32, ptr %a, i64 %iv
+ store i32 1, ptr %idx, align 4
%iv.next = add nuw nsw i64 %iv, 1
%exitcond = icmp eq i64 %iv.next, 18
br i1 %exitcond, label %exit, label %loop
@@ -1250,14 +1250,14 @@ exit:
ret void
}
-define void @test_guard_sle_sge_1(i32* nocapture %a, i64 %N) {
+define void @test_guard_sle_sge_1(ptr nocapture %a, i64 %N) {
; CHECK-LABEL: 'test_guard_sle_sge_1'
; CHECK-NEXT: Classifying expressions for: @test_guard_sle_sge_1
; CHECK-NEXT: %and = and i1 %c.0, %c.1
; CHECK-NEXT: --> (%c.0 umin %c.1) U: full-set S: full-set
; CHECK-NEXT: %iv = phi i64 [ 0, %entry ], [ %iv.next, %loop ]
; CHECK-NEXT: --> {0,+,1}<nuw><nsw><%loop> U: [0,12) S: [0,12) Exits: (-1 + %N) LoopDispositions: { %loop: Computable }
-; CHECK-NEXT: %idx = getelementptr inbounds i32, i32* %a, i64 %iv
+; CHECK-NEXT: %idx = getelementptr inbounds i32, ptr %a, i64 %iv
; CHECK-NEXT: --> {%a,+,4}<nuw><%loop> U: full-set S: full-set Exits: (-4 + (4 * %N) + %a) LoopDispositions: { %loop: Computable }
; CHECK-NEXT: %iv.next = add nuw nsw i64 %iv, 1
; CHECK-NEXT: --> {1,+,1}<nuw><nsw><%loop> U: [1,13) S: [1,13) Exits: %N LoopDispositions: { %loop: Computable }
@@ -1277,8 +1277,8 @@ entry:
loop:
%iv = phi i64 [ 0, %entry ], [ %iv.next, %loop ]
- %idx = getelementptr inbounds i32, i32* %a, i64 %iv
- store i32 1, i32* %idx, align 4
+ %idx = getelementptr inbounds i32, ptr %a, i64 %iv
+ store i32 1, ptr %idx, align 4
%iv.next = add nuw nsw i64 %iv, 1
%exitcond = icmp eq i64 %iv.next, %N
br i1 %exitcond, label %exit, label %loop
@@ -1287,14 +1287,14 @@ exit:
ret void
}
-define void @test_guard_sle_sge_2(i32* nocapture %a, i64 %i) {
+define void @test_guard_sle_sge_2(ptr nocapture %a, i64 %i) {
; CHECK-LABEL: 'test_guard_sle_sge_2'
; CHECK-NEXT: Classifying expressions for: @test_guard_sle_sge_2
; CHECK-NEXT: %and = and i1 %c.0, %c.1
; CHECK-NEXT: --> (%c.0 umin %c.1) U: full-set S: full-set
; CHECK-NEXT: %iv = phi i64 [ %iv.next, %loop ], [ %i, %entry ]
; CHECK-NEXT: --> {%i,+,1}<nuw><nsw><%loop> U: full-set S: full-set Exits: 17 LoopDispositions: { %loop: Computable }
-; CHECK-NEXT: %idx = getelementptr inbounds i32, i32* %a, i64 %iv
+; CHECK-NEXT: %idx = getelementptr inbounds i32, ptr %a, i64 %iv
; CHECK-NEXT: --> {((4 * %i) + %a),+,4}<nw><%loop> U: full-set S: full-set Exits: (68 + %a) LoopDispositions: { %loop: Computable }
; CHECK-NEXT: %iv.next = add nuw nsw i64 %iv, 1
; CHECK-NEXT: --> {(1 + %i),+,1}<nuw><nsw><%loop> U: full-set S: full-set Exits: 18 LoopDispositions: { %loop: Computable }
@@ -1314,8 +1314,8 @@ entry:
loop:
%iv = phi i64 [ %iv.next, %loop ], [ %i, %entry ]
- %idx = getelementptr inbounds i32, i32* %a, i64 %iv
- store i32 1, i32* %idx, align 4
+ %idx = getelementptr inbounds i32, ptr %a, i64 %iv
+ store i32 1, ptr %idx, align 4
%iv.next = add nuw nsw i64 %iv, 1
%exitcond = icmp eq i64 %iv.next, 18
br i1 %exitcond, label %exit, label %loop
@@ -1327,14 +1327,14 @@ exit:
; The function below uses a single condition to ensure %N > 0 && %N < 8.
; InstCombine transforms such checks with 2 conditions to a single check as in
; the test function.
-define void @optimized_range_check_unsigned(i16* %pred, i32 %N) {
+define void @optimized_range_check_unsigned(ptr %pred, i32 %N) {
; CHECK-LABEL: 'optimized_range_check_unsigned'
; CHECK-NEXT: Classifying expressions for: @optimized_range_check_unsigned
; CHECK-NEXT: %N.off = add i32 %N, -1
; CHECK-NEXT: --> (-1 + %N) U: full-set S: full-set
; CHECK-NEXT: %iv = phi i32 [ 0, %entry ], [ %iv.next, %loop ]
; CHECK-NEXT: --> {0,+,1}<nuw><nsw><%loop> U: [0,7) S: [0,7) Exits: (-1 + %N) LoopDispositions: { %loop: Computable }
-; CHECK-NEXT: %gep = getelementptr inbounds i16, i16* %pred, i32 %iv
+; CHECK-NEXT: %gep = getelementptr inbounds i16, ptr %pred, i32 %iv
; CHECK-NEXT: --> {%pred,+,2}<nuw><%loop> U: full-set S: full-set Exits: ((2 * (zext i32 (-1 + %N) to i64))<nuw><nsw> + %pred) LoopDispositions: { %loop: Computable }
; CHECK-NEXT: %iv.next = add nuw nsw i32 %iv, 1
; CHECK-NEXT: --> {1,+,1}<nuw><nsw><%loop> U: [1,8) S: [1,8) Exits: %N LoopDispositions: { %loop: Computable }
@@ -1353,8 +1353,8 @@ entry:
loop:
%iv = phi i32 [ 0, %entry ], [ %iv.next, %loop ]
- %gep = getelementptr inbounds i16, i16* %pred, i32 %iv
- store i16 0, i16* %gep, align 2
+ %gep = getelementptr inbounds i16, ptr %pred, i32 %iv
+ store i16 0, ptr %gep, align 2
%iv.next = add nuw nsw i32 %iv, 1
%ec = icmp eq i32 %iv.next, %N
br i1 %ec, label %exit, label %loop
@@ -1364,14 +1364,14 @@ exit:
}
; Same as @optimized_range_check_unsigned, but with the icmp operands swapped.
-define void @optimized_range_check_unsigned_icmp_ops_swapped(i16* %pred, i32 %N) {
+define void @optimized_range_check_unsigned_icmp_ops_swapped(ptr %pred, i32 %N) {
; CHECK-LABEL: 'optimized_range_check_unsigned_icmp_ops_swapped'
; CHECK-NEXT: Classifying expressions for: @optimized_range_check_unsigned_icmp_ops_swapped
; CHECK-NEXT: %N.off = add i32 %N, -1
; CHECK-NEXT: --> (-1 + %N) U: full-set S: full-set
; CHECK-NEXT: %iv = phi i32 [ 0, %entry ], [ %iv.next, %loop ]
; CHECK-NEXT: --> {0,+,1}<nuw><nsw><%loop> U: [0,7) S: [0,7) Exits: (-1 + %N) LoopDispositions: { %loop: Computable }
-; CHECK-NEXT: %gep = getelementptr inbounds i16, i16* %pred, i32 %iv
+; CHECK-NEXT: %gep = getelementptr inbounds i16, ptr %pred, i32 %iv
; CHECK-NEXT: --> {%pred,+,2}<nuw><%loop> U: full-set S: full-set Exits: ((2 * (zext i32 (-1 + %N) to i64))<nuw><nsw> + %pred) LoopDispositions: { %loop: Computable }
; CHECK-NEXT: %iv.next = add nuw nsw i32 %iv, 1
; CHECK-NEXT: --> {1,+,1}<nuw><nsw><%loop> U: [1,8) S: [1,8) Exits: %N LoopDispositions: { %loop: Computable }
@@ -1390,8 +1390,8 @@ entry:
loop:
%iv = phi i32 [ 0, %entry ], [ %iv.next, %loop ]
- %gep = getelementptr inbounds i16, i16* %pred, i32 %iv
- store i16 0, i16* %gep, align 2
+ %gep = getelementptr inbounds i16, ptr %pred, i32 %iv
+ store i16 0, ptr %gep, align 2
%iv.next = add nuw nsw i32 %iv, 1
%ec = icmp eq i32 %iv.next, %N
br i1 %ec, label %exit, label %loop
@@ -1403,14 +1403,14 @@ exit:
; The function below uses a single condition to ensure %N > 2 && %N < 22.
; InstCombine transforms such checks with 2 conditions to a single check as in
; the test function.
-define void @optimized_range_check_unsigned2(i16* %pred, i32 %N) {
+define void @optimized_range_check_unsigned2(ptr %pred, i32 %N) {
; CHECK-LABEL: 'optimized_range_check_unsigned2'
; CHECK-NEXT: Classifying expressions for: @optimized_range_check_unsigned2
; CHECK-NEXT: %N.off = add i32 %N, -2
; CHECK-NEXT: --> (-2 + %N) U: full-set S: full-set
; CHECK-NEXT: %iv = phi i32 [ 0, %entry ], [ %iv.next, %loop ]
; CHECK-NEXT: --> {0,+,1}<nuw><nsw><%loop> U: [0,21) S: [0,21) Exits: (-1 + %N) LoopDispositions: { %loop: Computable }
-; CHECK-NEXT: %gep = getelementptr inbounds i16, i16* %pred, i32 %iv
+; CHECK-NEXT: %gep = getelementptr inbounds i16, ptr %pred, i32 %iv
; CHECK-NEXT: --> {%pred,+,2}<nuw><%loop> U: full-set S: full-set Exits: ((2 * (zext i32 (-1 + %N) to i64))<nuw><nsw> + %pred) LoopDispositions: { %loop: Computable }
; CHECK-NEXT: %iv.next = add nuw nsw i32 %iv, 1
; CHECK-NEXT: --> {1,+,1}<nuw><nsw><%loop> U: [1,22) S: [1,22) Exits: %N LoopDispositions: { %loop: Computable }
@@ -1429,8 +1429,8 @@ entry:
loop:
%iv = phi i32 [ 0, %entry ], [ %iv.next, %loop ]
- %gep = getelementptr inbounds i16, i16* %pred, i32 %iv
- store i16 0, i16* %gep, align 2
+ %gep = getelementptr inbounds i16, ptr %pred, i32 %iv
+ store i16 0, ptr %gep, align 2
%iv.next = add nuw nsw i32 %iv, 1
%ec = icmp eq i32 %iv.next, %N
br i1 %ec, label %exit, label %loop
@@ -1441,7 +1441,7 @@ exit:
; Same as @optimized_range_check_unsigned, but %N already has a range limited
; to [2,4) beforehand.
-define void @optimized_range_check_unsigned3(i16* %pred, i1 %c) {
+define void @optimized_range_check_unsigned3(ptr %pred, i1 %c) {
; CHECK-LABEL: 'optimized_range_check_unsigned3'
; CHECK-NEXT: Classifying expressions for: @optimized_range_check_unsigned3
; CHECK-NEXT: %N = select i1 %c, i32 2, i32 3
@@ -1450,7 +1450,7 @@ define void @optimized_range_check_unsigned3(i16* %pred, i1 %c) {
; CHECK-NEXT: --> (-1 + %N)<nsw> U: [1,3) S: [1,3)
; CHECK-NEXT: %iv = phi i32 [ 0, %entry ], [ %iv.next, %loop ]
; CHECK-NEXT: --> {0,+,1}<nuw><nsw><%loop> U: [0,3) S: [0,3) Exits: (-1 + %N)<nsw> LoopDispositions: { %loop: Computable }
-; CHECK-NEXT: %gep = getelementptr inbounds i16, i16* %pred, i32 %iv
+; CHECK-NEXT: %gep = getelementptr inbounds i16, ptr %pred, i32 %iv
; CHECK-NEXT: --> {%pred,+,2}<nuw><%loop> U: full-set S: full-set Exits: ((2 * (zext i32 (-1 + %N)<nsw> to i64))<nuw><nsw> + %pred) LoopDispositions: { %loop: Computable }
; CHECK-NEXT: %iv.next = add nuw nsw i32 %iv, 1
; CHECK-NEXT: --> {1,+,1}<nuw><nsw><%loop> U: [1,4) S: [1,4) Exits: %N LoopDispositions: { %loop: Computable }
@@ -1470,8 +1470,8 @@ entry:
loop:
%iv = phi i32 [ 0, %entry ], [ %iv.next, %loop ]
- %gep = getelementptr inbounds i16, i16* %pred, i32 %iv
- store i16 0, i16* %gep, align 2
+ %gep = getelementptr inbounds i16, ptr %pred, i32 %iv
+ store i16 0, ptr %gep, align 2
%iv.next = add nuw nsw i32 %iv, 1
%ec = icmp eq i32 %iv.next, %N
br i1 %ec, label %exit, label %loop
@@ -1482,14 +1482,14 @@ exit:
; Similar to @optimized_range_check_unsigned, but the initial compare checks
; against unsigned max (-1), which breaks the range check idiom.
-define void @not_optimized_range_check_unsigned1(i16* %pred, i32 %N) {
+define void @not_optimized_range_check_unsigned1(ptr %pred, i32 %N) {
; CHECK-LABEL: 'not_optimized_range_check_unsigned1'
; CHECK-NEXT: Classifying expressions for: @not_optimized_range_check_unsigned1
; CHECK-NEXT: %N.off = add i32 %N, -1
; CHECK-NEXT: --> (-1 + %N) U: full-set S: full-set
; CHECK-NEXT: %iv = phi i32 [ 0, %entry ], [ %iv.next, %loop ]
; CHECK-NEXT: --> {0,+,1}<nuw><nsw><%loop> U: [0,-2147483648) S: [0,-2147483648) Exits: (-1 + %N) LoopDispositions: { %loop: Computable }
-; CHECK-NEXT: %gep = getelementptr inbounds i16, i16* %pred, i32 %iv
+; CHECK-NEXT: %gep = getelementptr inbounds i16, ptr %pred, i32 %iv
; CHECK-NEXT: --> {%pred,+,2}<nuw><%loop> U: full-set S: full-set Exits: ((2 * (zext i32 (-1 + %N) to i64))<nuw><nsw> + %pred) LoopDispositions: { %loop: Computable }
; CHECK-NEXT: %iv.next = add nuw nsw i32 %iv, 1
; CHECK-NEXT: --> {1,+,1}<nuw><nsw><%loop> U: [1,-2147483648) S: [1,-2147483648) Exits: %N LoopDispositions: { %loop: Computable }
@@ -1508,8 +1508,8 @@ entry:
loop:
%iv = phi i32 [ 0, %entry ], [ %iv.next, %loop ]
- %gep = getelementptr inbounds i16, i16* %pred, i32 %iv
- store i16 0, i16* %gep, align 2
+ %gep = getelementptr inbounds i16, ptr %pred, i32 %iv
+ store i16 0, ptr %gep, align 2
%iv.next = add nuw nsw i32 %iv, 1
%ec = icmp eq i32 %iv.next, %N
br i1 %ec, label %exit, label %loop
@@ -1520,14 +1520,14 @@ exit:
; Similar to @optimized_range_check_unsigned, but the initial compare checks
; against 0, which breaks the range check idiom.
-define void @not_optimized_range_check_unsigned2(i16* %pred, i32 %N) {
+define void @not_optimized_range_check_unsigned2(ptr %pred, i32 %N) {
; CHECK-LABEL: 'not_optimized_range_check_unsigned2'
; CHECK-NEXT: Classifying expressions for: @not_optimized_range_check_unsigned2
; CHECK-NEXT: %N.off = add i32 %N, -1
; CHECK-NEXT: --> (-1 + %N) U: full-set S: full-set
; CHECK-NEXT: %iv = phi i32 [ 0, %entry ], [ %iv.next, %loop ]
; CHECK-NEXT: --> {0,+,1}<nuw><nsw><%loop> U: [0,-2147483648) S: [0,-2147483648) Exits: (-1 + %N) LoopDispositions: { %loop: Computable }
-; CHECK-NEXT: %gep = getelementptr inbounds i16, i16* %pred, i32 %iv
+; CHECK-NEXT: %gep = getelementptr inbounds i16, ptr %pred, i32 %iv
; CHECK-NEXT: --> {%pred,+,2}<nuw><%loop> U: full-set S: full-set Exits: ((2 * (zext i32 (-1 + %N) to i64))<nuw><nsw> + %pred) LoopDispositions: { %loop: Computable }
; CHECK-NEXT: %iv.next = add nuw nsw i32 %iv, 1
; CHECK-NEXT: --> {1,+,1}<nuw><nsw><%loop> U: [1,-2147483648) S: [1,-2147483648) Exits: %N LoopDispositions: { %loop: Computable }
@@ -1546,8 +1546,8 @@ entry:
loop:
%iv = phi i32 [ 0, %entry ], [ %iv.next, %loop ]
- %gep = getelementptr inbounds i16, i16* %pred, i32 %iv
- store i16 0, i16* %gep, align 2
+ %gep = getelementptr inbounds i16, ptr %pred, i32 %iv
+ store i16 0, ptr %gep, align 2
%iv.next = add nuw nsw i32 %iv, 1
%ec = icmp eq i32 %iv.next, %N
br i1 %ec, label %exit, label %loop
diff --git a/llvm/test/Analysis/ScalarEvolution/max-backedge-taken-count-limit-by-wrapping.ll b/llvm/test/Analysis/ScalarEvolution/max-backedge-taken-count-limit-by-wrapping.ll
index 45978ee35c5b0..90a8b02ab7b68 100644
--- a/llvm/test/Analysis/ScalarEvolution/max-backedge-taken-count-limit-by-wrapping.ll
+++ b/llvm/test/Analysis/ScalarEvolution/max-backedge-taken-count-limit-by-wrapping.ll
@@ -4,7 +4,7 @@
target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
target triple = "x86_64-unknown-linux-gnu"
-define void @max_backedge_taken_count_by_wrapping1_nsw_nuw(i8 %N, i8* %ptr) {
+define void @max_backedge_taken_count_by_wrapping1_nsw_nuw(i8 %N, ptr %ptr) {
; CHECK-LABEL: 'max_backedge_taken_count_by_wrapping1_nsw_nuw'
; CHECK-NEXT: Determining loop execution counts for: @max_backedge_taken_count_by_wrapping1_nsw_nuw
; CHECK-NEXT: Loop %loop: backedge-taken count is (%N /u 4)
@@ -19,8 +19,8 @@ entry:
loop:
%iv = phi i8 [ 0, %entry ], [ %iv.next, %loop ]
- %gep = getelementptr i8, i8* %ptr, i8 %iv
- store i8 %iv, i8* %gep
+ %gep = getelementptr i8, ptr %ptr, i8 %iv
+ store i8 %iv, ptr %gep
%iv.next = add nuw nsw i8 %iv, 4
%ec = icmp ne i8 %iv, %N
br i1 %ec, label %loop, label %exit
@@ -29,7 +29,7 @@ exit:
ret void
}
-define void @max_backedge_taken_count_by_wrapping1_nuw(i8 %N, i8* %ptr) {
+define void @max_backedge_taken_count_by_wrapping1_nuw(i8 %N, ptr %ptr) {
; CHECK-LABEL: 'max_backedge_taken_count_by_wrapping1_nuw'
; CHECK-NEXT: Determining loop execution counts for: @max_backedge_taken_count_by_wrapping1_nuw
; CHECK-NEXT: Loop %loop: backedge-taken count is (%N /u 4)
@@ -44,8 +44,8 @@ entry:
loop:
%iv = phi i8 [ 0, %entry ], [ %iv.next, %loop ]
- %gep = getelementptr i8, i8* %ptr, i8 %iv
- store i8 %iv, i8* %gep
+ %gep = getelementptr i8, ptr %ptr, i8 %iv
+ store i8 %iv, ptr %gep
%iv.next = add nuw i8 %iv, 4
%ec = icmp ne i8 %iv, %N
br i1 %ec, label %loop, label %exit
@@ -54,7 +54,7 @@ exit:
ret void
}
-define void @max_backedge_taken_count_by_wrapping2_nsw_nuw(i8 %N, i8* %ptr) {
+define void @max_backedge_taken_count_by_wrapping2_nsw_nuw(i8 %N, ptr %ptr) {
; CHECK-LABEL: 'max_backedge_taken_count_by_wrapping2_nsw_nuw'
; CHECK-NEXT: Determining loop execution counts for: @max_backedge_taken_count_by_wrapping2_nsw_nuw
; CHECK-NEXT: Loop %loop: backedge-taken count is ((-64 + %N) /u 4)
@@ -69,8 +69,8 @@ entry:
loop:
%iv = phi i8 [ 64, %entry ], [ %iv.next, %loop ]
- %gep = getelementptr i8, i8* %ptr, i8 %iv
- store i8 %iv, i8* %gep
+ %gep = getelementptr i8, ptr %ptr, i8 %iv
+ store i8 %iv, ptr %gep
%iv.next = add nuw nsw i8 %iv, 4
%ec = icmp ne i8 %iv, %N
br i1 %ec, label %loop, label %exit
@@ -79,7 +79,7 @@ exit:
ret void
}
-define void @max_backedge_taken_count_by_wrapping2_nuw(i8 %N, i8* %ptr) {
+define void @max_backedge_taken_count_by_wrapping2_nuw(i8 %N, ptr %ptr) {
; CHECK-LABEL: 'max_backedge_taken_count_by_wrapping2_nuw'
; CHECK-NEXT: Determining loop execution counts for: @max_backedge_taken_count_by_wrapping2_nuw
; CHECK-NEXT: Loop %loop: backedge-taken count is ((-64 + %N) /u 4)
@@ -94,8 +94,8 @@ entry:
loop:
%iv = phi i8 [ 64, %entry ], [ %iv.next, %loop ]
- %gep = getelementptr i8, i8* %ptr, i8 %iv
- store i8 %iv, i8* %gep
+ %gep = getelementptr i8, ptr %ptr, i8 %iv
+ store i8 %iv, ptr %gep
%iv.next = add nuw i8 %iv, 4
%ec = icmp ne i8 %iv, %N
br i1 %ec, label %loop, label %exit
diff --git a/llvm/test/Analysis/ScalarEvolution/max-expr-cache.ll b/llvm/test/Analysis/ScalarEvolution/max-expr-cache.ll
index a37306884f617..aeb31c36f617c 100644
--- a/llvm/test/Analysis/ScalarEvolution/max-expr-cache.ll
+++ b/llvm/test/Analysis/ScalarEvolution/max-expr-cache.ll
@@ -69,8 +69,8 @@ bb53:
%tmp55 = trunc i64 %tmp54 to i32
%tmp56 = shl nsw i32 %tmp55, 3
%tmp57 = sext i32 %tmp56 to i64
- %tmp58 = getelementptr inbounds i8, i8* null, i64 %tmp57
- store i8 undef, i8* %tmp58, align 8
+ %tmp58 = getelementptr inbounds i8, ptr null, i64 %tmp57
+ store i8 undef, ptr %tmp58, align 8
%tmp59 = add nsw i64 %tmp54, 1
%tmp60 = icmp eq i64 %tmp59, %tmp52
br i1 %tmp60, label %bb61, label %bb53
@@ -144,8 +144,8 @@ bb53:
%tmp55 = trunc i64 %tmp54 to i32
%tmp56 = shl nsw i32 %tmp55, 3
%tmp57 = sext i32 %tmp56 to i64
- %tmp58 = getelementptr inbounds i8, i8* null, i64 %tmp57
- store i8 undef, i8* %tmp58, align 8
+ %tmp58 = getelementptr inbounds i8, ptr null, i64 %tmp57
+ store i8 undef, ptr %tmp58, align 8
%tmp59 = add nsw i64 %tmp54, 1
%tmp60 = icmp eq i64 %tmp59, %tmp52
br i1 %tmp60, label %bb61, label %bb53
diff --git a/llvm/test/Analysis/ScalarEvolution/max-trip-count-address-space.ll b/llvm/test/Analysis/ScalarEvolution/max-trip-count-address-space.ll
index 50d5f4d2c726f..4376548b4c6eb 100644
--- a/llvm/test/Analysis/ScalarEvolution/max-trip-count-address-space.ll
+++ b/llvm/test/Analysis/ScalarEvolution/max-trip-count-address-space.ll
@@ -7,7 +7,7 @@ target datalayout = "e-p:32:32:32-p1:16:16:16-p2:8:8:8-p4:64:64:64-n16:32:64"
; CHECK: {%d,+,4}<%bb>{{ U: [^ ]+ S: [^ ]+}}{{ *}} Exits: (-4 + (4 * (trunc i32 %n to i16)) + %d)
-define void @foo(i32 addrspace(1)* nocapture %d, i32 %n) nounwind {
+define void @foo(ptr addrspace(1) nocapture %d, i32 %n) nounwind {
; CHECK: @foo
entry:
%0 = icmp sgt i32 %n, 0 ; <i1> [#uses=1]
@@ -21,8 +21,8 @@ bb: ; preds = %bb1, %bb.nph
%p.01 = phi i8 [ %4, %bb1 ], [ -1, %bb.nph ] ; <i8> [#uses=2]
%1 = sext i8 %p.01 to i32 ; <i32> [#uses=1]
%2 = sext i32 %i.02 to i64 ; <i64> [#uses=1]
- %3 = getelementptr i32, i32 addrspace(1)* %d, i64 %2 ; <i32*> [#uses=1]
- store i32 %1, i32 addrspace(1)* %3, align 4
+ %3 = getelementptr i32, ptr addrspace(1) %d, i64 %2 ; <ptr> [#uses=1]
+ store i32 %1, ptr addrspace(1) %3, align 4
%4 = add i8 %p.01, 1 ; <i8> [#uses=1]
%5 = add i32 %i.02, 1 ; <i32> [#uses=2]
br label %bb1
@@ -38,7 +38,7 @@ return: ; preds = %bb1.return_crit_edge, %entry
ret void
}
-define void @test(i8 addrspace(1)* %a, i32 %n) nounwind {
+define void @test(ptr addrspace(1) %a, i32 %n) nounwind {
; CHECK: @test
entry:
%cmp1 = icmp sgt i32 %n, 0
@@ -50,8 +50,8 @@ for.body.lr.ph: ; preds = %entry
for.body: ; preds = %for.body, %for.body.lr.ph
%indvar = phi i64 [ %indvar.next, %for.body ], [ 0, %for.body.lr.ph ]
- %arrayidx = getelementptr i8, i8 addrspace(1)* %a, i64 %indvar
- store i8 0, i8 addrspace(1)* %arrayidx, align 1
+ %arrayidx = getelementptr i8, ptr addrspace(1) %a, i64 %indvar
+ store i8 0, ptr addrspace(1) %arrayidx, align 1
%indvar.next = add i64 %indvar, 1
%exitcond = icmp ne i64 %indvar.next, %tmp
br i1 %exitcond, label %for.body, label %for.cond.for.end_crit_edge
diff --git a/llvm/test/Analysis/ScalarEvolution/max-trip-count.ll b/llvm/test/Analysis/ScalarEvolution/max-trip-count.ll
index e785799950d2d..eab605e08d2b2 100644
--- a/llvm/test/Analysis/ScalarEvolution/max-trip-count.ll
+++ b/llvm/test/Analysis/ScalarEvolution/max-trip-count.ll
@@ -4,7 +4,7 @@
; CHECK: {%d,+,4}
-define void @foo(i32* nocapture %d, i32 %n) nounwind {
+define void @foo(ptr nocapture %d, i32 %n) nounwind {
entry:
%0 = icmp sgt i32 %n, 0 ; <i1> [#uses=1]
br i1 %0, label %bb.nph, label %return
@@ -17,8 +17,8 @@ bb: ; preds = %bb1, %bb.nph
%p.01 = phi i8 [ %4, %bb1 ], [ -1, %bb.nph ] ; <i8> [#uses=2]
%1 = sext i8 %p.01 to i32 ; <i32> [#uses=1]
%2 = sext i32 %i.02 to i64 ; <i64> [#uses=1]
- %3 = getelementptr i32, i32* %d, i64 %2 ; <i32*> [#uses=1]
- store i32 %1, i32* %3, align 4
+ %3 = getelementptr i32, ptr %d, i64 %2 ; <ptr> [#uses=1]
+ store i32 %1, ptr %3, align 4
%4 = add i8 %p.01, 1 ; <i8> [#uses=1]
%5 = add i32 %i.02, 1 ; <i32> [#uses=2]
br label %bb1
@@ -42,7 +42,7 @@ return: ; preds = %bb1.return_crit_edge, %entry
; CHECK: Loop %for.cond: <multiple exits> Unpredictable backedge-taken count.
; CHECK: Loop %for.cond: constant max backedge-taken count is 5
- at .str = private constant [4 x i8] c"%d\0A\00" ; <[4 x i8]*> [#uses=2]
+ at .str = private constant [4 x i8] c"%d\0A\00" ; <ptr> [#uses=2]
define i32 @main() nounwind {
entry:
@@ -65,13 +65,13 @@ for.inc: ; preds = %for.body
br label %for.cond
for.end: ; preds = %for.body, %for.cond
- %call = call i32 (i8*, ...) @printf(i8* getelementptr inbounds ([4 x i8], [4 x i8]* @.str, i64 0, i64 0), i32 %g_4.0) nounwind ; <i32> [#uses=0]
+ %call = call i32 (ptr, ...) @printf(ptr @.str, i32 %g_4.0) nounwind ; <i32> [#uses=0]
ret i32 0
}
-declare i32 @printf(i8*, ...)
+declare i32 @printf(ptr, ...)
-define void @test(i8* %a, i32 %n) nounwind {
+define void @test(ptr %a, i32 %n) nounwind {
entry:
%cmp1 = icmp sgt i32 %n, 0
br i1 %cmp1, label %for.body.lr.ph, label %for.end
@@ -82,8 +82,8 @@ for.body.lr.ph: ; preds = %entry
for.body: ; preds = %for.body, %for.body.lr.ph
%indvar = phi i64 [ %indvar.next, %for.body ], [ 0, %for.body.lr.ph ]
- %arrayidx = getelementptr i8, i8* %a, i64 %indvar
- store i8 0, i8* %arrayidx, align 1
+ %arrayidx = getelementptr i8, ptr %a, i64 %indvar
+ store i8 0, ptr %arrayidx, align 1
%indvar.next = add i64 %indvar, 1
%exitcond = icmp ne i64 %indvar.next, %tmp
br i1 %exitcond, label %for.body, label %for.cond.for.end_crit_edge
@@ -107,7 +107,7 @@ for.end: ; preds = %for.cond.for.end_cr
define i32 @pr19799() {
entry:
- store i32 -1, i32* @a, align 4
+ store i32 -1, ptr @a, align 4
br label %for.body.i
for.body.i: ; preds = %for.cond.i, %entry
@@ -117,7 +117,7 @@ for.body.i: ; preds = %for.cond.i, %entry
br i1 %tobool.i, label %bar.exit, label %for.cond.i
for.cond.i: ; preds = %for.body.i
- store i32 %add.i.i, i32* @a, align 4
+ store i32 %add.i.i, ptr @a, align 4
%cmp.i = icmp slt i32 %storemerge1.i, 0
br i1 %cmp.i, label %for.body.i, label %bar.exit
@@ -133,7 +133,7 @@ bar.exit: ; preds = %for.cond.i, %for.bo
define i32 @pr18886() {
entry:
- store i64 -21, i64* @aa, align 8
+ store i64 -21, ptr @aa, align 8
br label %for.body
for.body:
@@ -143,7 +143,7 @@ for.body:
br i1 %tobool, label %return, label %for.cond
for.cond:
- store i64 %add, i64* @aa, align 8
+ store i64 %add, ptr @aa, align 8
%cmp = icmp slt i64 %add, 9
br i1 %cmp, label %for.body, label %return
@@ -163,7 +163,7 @@ return:
define i32 @cannot_compute_mustexit() {
entry:
- store i32 -1, i32* @a, align 4
+ store i32 -1, ptr @a, align 4
br label %for.body.i
for.body.i: ; preds = %for.cond.i, %entry
@@ -173,8 +173,8 @@ for.body.i: ; preds = %for.cond.i, %entry
br i1 %tobool.i, label %bar.exit, label %for.cond.i
for.cond.i: ; preds = %for.body.i
- store i32 %add.i.i, i32* @a, align 4
- %ld = load volatile i32, i32* @b
+ store i32 %add.i.i, ptr @a, align 4
+ %ld = load volatile i32, ptr @b
%cmp.i = icmp ne i32 %ld, 0
br i1 %cmp.i, label %for.body.i, label %bar.exit
@@ -190,7 +190,7 @@ bar.exit: ; preds = %for.cond.i, %for.bo
; CHECK: Loop %for.body.i: constant max backedge-taken count is 1
define i32 @two_mustexit() {
entry:
- store i32 -1, i32* @a, align 4
+ store i32 -1, ptr @a, align 4
br label %for.body.i
for.body.i: ; preds = %for.cond.i, %entry
@@ -200,7 +200,7 @@ for.body.i: ; preds = %for.cond.i, %entry
br i1 %tobool.i, label %bar.exit, label %for.cond.i
for.cond.i: ; preds = %for.body.i
- store i32 %add.i.i, i32* @a, align 4
+ store i32 %add.i.i, ptr @a, align 4
%cmp.i = icmp slt i32 %storemerge1.i, 3
br i1 %cmp.i, label %for.body.i, label %bar.exit
@@ -291,7 +291,7 @@ exit:
; The end bound of the loop can change between iterations, so the exact trip
; count is unknown, but SCEV can calculate the max trip count.
-define void @changing_end_bound(i32* %n_addr, i32* %addr) {
+define void @changing_end_bound(ptr %n_addr, ptr %addr) {
; CHECK-LABEL: Determining loop execution counts for: @changing_end_bound
; CHECK: Loop %loop: Unpredictable backedge-taken count.
; CHECK: Loop %loop: constant max backedge-taken count is 2147483646
@@ -301,11 +301,11 @@ entry:
loop:
%iv = phi i32 [ 0, %entry ], [ %iv.next, %loop ]
%acc = phi i32 [ 0, %entry ], [ %acc.next, %loop ]
- %val = load atomic i32, i32* %addr unordered, align 4
+ %val = load atomic i32, ptr %addr unordered, align 4
fence acquire
%acc.next = add i32 %acc, %val
%iv.next = add nsw i32 %iv, 1
- %n = load atomic i32, i32* %n_addr unordered, align 4
+ %n = load atomic i32, ptr %n_addr unordered, align 4
%cmp = icmp slt i32 %iv.next, %n
br i1 %cmp, label %loop, label %loop.exit
@@ -316,7 +316,7 @@ loop.exit:
; Similar test as above, but unknown start value.
; Also, there's no nsw on the iv.next, but SCEV knows
; the termination condition is LT, so the IV cannot wrap.
-define void @changing_end_bound2(i32 %start, i32* %n_addr, i32* %addr) {
+define void @changing_end_bound2(i32 %start, ptr %n_addr, ptr %addr) {
; CHECK-LABEL: Determining loop execution counts for: @changing_end_bound2
; CHECK: Loop %loop: Unpredictable backedge-taken count.
; CHECK: Loop %loop: constant max backedge-taken count is -1
@@ -326,11 +326,11 @@ entry:
loop:
%iv = phi i32 [ %start, %entry ], [ %iv.next, %loop ]
%acc = phi i32 [ 0, %entry ], [ %acc.next, %loop ]
- %val = load atomic i32, i32* %addr unordered, align 4
+ %val = load atomic i32, ptr %addr unordered, align 4
fence acquire
%acc.next = add i32 %acc, %val
%iv.next = add i32 %iv, 1
- %n = load atomic i32, i32* %n_addr unordered, align 4
+ %n = load atomic i32, ptr %n_addr unordered, align 4
%cmp = icmp slt i32 %iv.next, %n
br i1 %cmp, label %loop, label %loop.exit
@@ -339,7 +339,7 @@ loop.exit:
}
; changing end bound and greater than one stride
-define void @changing_end_bound3(i32 %start, i32* %n_addr, i32* %addr) {
+define void @changing_end_bound3(i32 %start, ptr %n_addr, ptr %addr) {
; CHECK-LABEL: Determining loop execution counts for: @changing_end_bound3
; CHECK: Loop %loop: Unpredictable backedge-taken count.
; CHECK: Loop %loop: constant max backedge-taken count is 1073741823
@@ -349,11 +349,11 @@ entry:
loop:
%iv = phi i32 [ %start, %entry ], [ %iv.next, %loop ]
%acc = phi i32 [ 0, %entry ], [ %acc.next, %loop ]
- %val = load atomic i32, i32* %addr unordered, align 4
+ %val = load atomic i32, ptr %addr unordered, align 4
fence acquire
%acc.next = add i32 %acc, %val
%iv.next = add nsw i32 %iv, 4
- %n = load atomic i32, i32* %n_addr unordered, align 4
+ %n = load atomic i32, ptr %n_addr unordered, align 4
%cmp = icmp slt i32 %iv.next, %n
br i1 %cmp, label %loop, label %loop.exit
@@ -363,7 +363,7 @@ loop.exit:
; same as above test, but the IV can wrap around.
; so the max backedge taken count is unpredictable.
-define void @changing_end_bound4(i32 %start, i32* %n_addr, i32* %addr) {
+define void @changing_end_bound4(i32 %start, ptr %n_addr, ptr %addr) {
; CHECK-LABEL: Determining loop execution counts for: @changing_end_bound4
; CHECK: Loop %loop: Unpredictable backedge-taken count.
; CHECK: Loop %loop: Unpredictable constant max backedge-taken count.
@@ -373,11 +373,11 @@ entry:
loop:
%iv = phi i32 [ %start, %entry ], [ %iv.next, %loop ]
%acc = phi i32 [ 0, %entry ], [ %acc.next, %loop ]
- %val = load atomic i32, i32* %addr unordered, align 4
+ %val = load atomic i32, ptr %addr unordered, align 4
fence acquire
%acc.next = add i32 %acc, %val
%iv.next = add i32 %iv, 4
- %n = load atomic i32, i32* %n_addr unordered, align 4
+ %n = load atomic i32, ptr %n_addr unordered, align 4
%cmp = icmp slt i32 %iv.next, %n
br i1 %cmp, label %loop, label %loop.exit
@@ -387,7 +387,7 @@ loop.exit:
; unknown stride. Since it's not knownPositive, we do not estimate the max
; backedge taken count.
-define void @changing_end_bound5(i32 %stride, i32 %start, i32* %n_addr, i32* %addr) {
+define void @changing_end_bound5(i32 %stride, i32 %start, ptr %n_addr, ptr %addr) {
; CHECK-LABEL: Determining loop execution counts for: @changing_end_bound5
; CHECK: Loop %loop: Unpredictable backedge-taken count.
; CHECK: Loop %loop: Unpredictable constant max backedge-taken count.
@@ -397,11 +397,11 @@ entry:
loop:
%iv = phi i32 [ %start, %entry ], [ %iv.next, %loop ]
%acc = phi i32 [ 0, %entry ], [ %acc.next, %loop ]
- %val = load atomic i32, i32* %addr unordered, align 4
+ %val = load atomic i32, ptr %addr unordered, align 4
fence acquire
%acc.next = add i32 %acc, %val
%iv.next = add nsw i32 %iv, %stride
- %n = load atomic i32, i32* %n_addr unordered, align 4
+ %n = load atomic i32, ptr %n_addr unordered, align 4
%cmp = icmp slt i32 %iv.next, %n
br i1 %cmp, label %loop, label %loop.exit
@@ -410,7 +410,7 @@ loop.exit:
}
; negative stride value
-define void @changing_end_bound6(i32 %start, i32* %n_addr, i32* %addr) {
+define void @changing_end_bound6(i32 %start, ptr %n_addr, ptr %addr) {
; CHECK-LABEL: Determining loop execution counts for: @changing_end_bound6
; CHECK: Loop %loop: Unpredictable backedge-taken count.
; CHECK: Loop %loop: Unpredictable constant max backedge-taken count.
@@ -420,11 +420,11 @@ entry:
loop:
%iv = phi i32 [ %start, %entry ], [ %iv.next, %loop ]
%acc = phi i32 [ 0, %entry ], [ %acc.next, %loop ]
- %val = load atomic i32, i32* %addr unordered, align 4
+ %val = load atomic i32, ptr %addr unordered, align 4
fence acquire
%acc.next = add i32 %acc, %val
%iv.next = add nsw i32 %iv, -1
- %n = load atomic i32, i32* %n_addr unordered, align 4
+ %n = load atomic i32, ptr %n_addr unordered, align 4
%cmp = icmp slt i32 %iv.next, %n
br i1 %cmp, label %loop, label %loop.exit
@@ -433,7 +433,7 @@ loop.exit:
}
; sgt with negative stride
-define void @changing_end_bound7(i32 %start, i32* %n_addr, i32* %addr) {
+define void @changing_end_bound7(i32 %start, ptr %n_addr, ptr %addr) {
; CHECK-LABEL: Determining loop execution counts for: @changing_end_bound7
; CHECK: Loop %loop: Unpredictable backedge-taken count.
; CHECK: Loop %loop: Unpredictable constant max backedge-taken count.
@@ -443,11 +443,11 @@ entry:
loop:
%iv = phi i32 [ %start, %entry ], [ %iv.next, %loop ]
%acc = phi i32 [ 0, %entry ], [ %acc.next, %loop ]
- %val = load atomic i32, i32* %addr unordered, align 4
+ %val = load atomic i32, ptr %addr unordered, align 4
fence acquire
%acc.next = add i32 %acc, %val
%iv.next = add i32 %iv, -1
- %n = load atomic i32, i32* %n_addr unordered, align 4
+ %n = load atomic i32, ptr %n_addr unordered, align 4
%cmp = icmp sgt i32 %iv.next, %n
br i1 %cmp, label %loop, label %loop.exit
diff --git a/llvm/test/Analysis/ScalarEvolution/min-max-exprs.ll b/llvm/test/Analysis/ScalarEvolution/min-max-exprs.ll
index a9615aff02444..d65da6fc7e5ac 100644
--- a/llvm/test/Analysis/ScalarEvolution/min-max-exprs.ll
+++ b/llvm/test/Analysis/ScalarEvolution/min-max-exprs.ll
@@ -10,13 +10,13 @@
;
; void f(int *A, int N) {
; for (int i = 0; i < N; i++) {
-; A[max(0, i - 3)] = A[min(N, i + 3)] * 2;
+; A[max(0, i - 3)] = Aptr 2;
; }
; }
;
target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
-define void @f(i32* %A, i32 %N) {
+define void @f(ptr %A, i32 %N) {
; CHECK-LABEL: 'f'
; CHECK-NEXT: Classifying expressions for: @f
; CHECK-NEXT: %i.0 = phi i32 [ 0, %bb ], [ %tmp23, %bb2 ]
@@ -31,9 +31,9 @@ define void @f(i32* %A, i32 %N) {
; CHECK-NEXT: --> (sext i32 %N to i64) U: [-2147483648,2147483648) S: [-2147483648,2147483648) Exits: (sext i32 %N to i64) LoopDispositions: { %bb1: Invariant }
; CHECK-NEXT: %tmp9 = select i1 %tmp4, i64 %tmp5, i64 %tmp6
; CHECK-NEXT: --> ((sext i32 {3,+,1}<nuw><%bb1> to i64) smin (sext i32 %N to i64)) U: [-2147483648,2147483648) S: [-2147483648,2147483648) Exits: ((sext i32 (3 + (0 smax %N))<nuw> to i64) smin (sext i32 %N to i64)) LoopDispositions: { %bb1: Computable }
-; CHECK-NEXT: %tmp11 = getelementptr inbounds i32, i32* %A, i64 %tmp9
+; CHECK-NEXT: %tmp11 = getelementptr inbounds i32, ptr %A, i64 %tmp9
; CHECK-NEXT: --> ((4 * ((sext i32 {3,+,1}<nuw><%bb1> to i64) smin (sext i32 %N to i64)))<nsw> + %A) U: full-set S: full-set Exits: ((4 * ((sext i32 (3 + (0 smax %N))<nuw> to i64) smin (sext i32 %N to i64)))<nsw> + %A) LoopDispositions: { %bb1: Computable }
-; CHECK-NEXT: %tmp12 = load i32, i32* %tmp11, align 4
+; CHECK-NEXT: %tmp12 = load i32, ptr %tmp11, align 4
; CHECK-NEXT: --> %tmp12 U: full-set S: full-set Exits: <<Unknown>> LoopDispositions: { %bb1: Variant }
; CHECK-NEXT: %tmp13 = shl nsw i32 %tmp12, 1
; CHECK-NEXT: --> (2 * %tmp12) U: [0,-1) S: [-2147483648,2147483647) Exits: <<Unknown>> LoopDispositions: { %bb1: Variant }
@@ -41,7 +41,7 @@ define void @f(i32* %A, i32 %N) {
; CHECK-NEXT: --> {-3,+,1}<nsw><%bb1> U: [-3,2147483645) S: [-3,2147483645) Exits: (-3 + (zext i32 (0 smax %N) to i64))<nsw> LoopDispositions: { %bb1: Computable }
; CHECK-NEXT: %tmp19 = select i1 %tmp14, i64 0, i64 %tmp17
; CHECK-NEXT: --> (-3 + (3 smax {0,+,1}<nuw><nsw><%bb1>))<nsw> U: [0,2147483645) S: [0,2147483645) Exits: (-3 + (3 smax (zext i32 (0 smax %N) to i64)))<nsw> LoopDispositions: { %bb1: Computable }
-; CHECK-NEXT: %tmp21 = getelementptr inbounds i32, i32* %A, i64 %tmp19
+; CHECK-NEXT: %tmp21 = getelementptr inbounds i32, ptr %A, i64 %tmp19
; CHECK-NEXT: --> (-12 + (4 * (3 smax {0,+,1}<nuw><nsw><%bb1>))<nuw><nsw> + %A) U: full-set S: full-set Exits: (-12 + (4 * (3 smax (zext i32 (0 smax %N) to i64)))<nuw><nsw> + %A) LoopDispositions: { %bb1: Computable }
; CHECK-NEXT: %tmp23 = add nuw nsw i32 %i.0, 1
; CHECK-NEXT: --> {1,+,1}<nuw><%bb1> U: [1,-2147483647) S: [1,-2147483647) Exits: (1 + (0 smax %N))<nuw> LoopDispositions: { %bb1: Computable }
@@ -69,15 +69,15 @@ bb2: ; preds = %bb1
%tmp6 = sext i32 %N to i64
%tmp9 = select i1 %tmp4, i64 %tmp5, i64 %tmp6
; min(N, i+3)
- %tmp11 = getelementptr inbounds i32, i32* %A, i64 %tmp9
- %tmp12 = load i32, i32* %tmp11, align 4
+ %tmp11 = getelementptr inbounds i32, ptr %A, i64 %tmp9
+ %tmp12 = load i32, ptr %tmp11, align 4
%tmp13 = shl nsw i32 %tmp12, 1
%tmp14 = icmp sge i32 3, %i.0
%tmp17 = add nsw i64 %i.0.1, -3
%tmp19 = select i1 %tmp14, i64 0, i64 %tmp17
; max(0, i - 3)
- %tmp21 = getelementptr inbounds i32, i32* %A, i64 %tmp19
- store i32 %tmp13, i32* %tmp21, align 4
+ %tmp21 = getelementptr inbounds i32, ptr %A, i64 %tmp19
+ store i32 %tmp13, ptr %tmp21, align 4
%tmp23 = add nuw nsw i32 %i.0, 1
br label %bb1
diff --git a/llvm/test/Analysis/ScalarEvolution/ne-overflow.ll b/llvm/test/Analysis/ScalarEvolution/ne-overflow.ll
index fb69e9889d265..cca56bcd6c3b8 100644
--- a/llvm/test/Analysis/ScalarEvolution/ne-overflow.ll
+++ b/llvm/test/Analysis/ScalarEvolution/ne-overflow.ll
@@ -70,7 +70,7 @@ entry:
for.body:
%iv = phi i32 [ %iv.next, %for.body ], [ 0, %entry ]
%iv.next = add i32 %iv, 2
- store volatile i32 0, i32* @G
+ store volatile i32 0, ptr @G
%cmp = icmp ne i32 %iv.next, %N
br i1 %cmp, label %for.body, label %for.cond.cleanup
@@ -92,7 +92,7 @@ entry:
for.body:
%iv = phi i32 [ %iv.next, %for.body ], [ 0, %entry ]
%iv.next = add i32 %iv, 2
- %val = load volatile i32, i32* @G
+ %val = load volatile i32, ptr @G
%cmp = icmp ne i32 %iv.next, %N
br i1 %cmp, label %for.body, label %for.cond.cleanup
@@ -183,7 +183,7 @@ entry:
for.body:
%iv = phi i32 [ %iv.next, %for.body ], [ 0, %entry ]
%iv.next = add i32 %iv, 2
- %N = load i32, i32* @G
+ %N = load i32, ptr @G
%cmp = icmp ne i32 %iv.next, %N
br i1 %cmp, label %for.body, label %for.cond.cleanup
diff --git a/llvm/test/Analysis/ScalarEvolution/no-wrap-symbolic-becount.ll b/llvm/test/Analysis/ScalarEvolution/no-wrap-symbolic-becount.ll
index 5b3b254c4b685..b44be462067cc 100644
--- a/llvm/test/Analysis/ScalarEvolution/no-wrap-symbolic-becount.ll
+++ b/llvm/test/Analysis/ScalarEvolution/no-wrap-symbolic-becount.ll
@@ -1,7 +1,7 @@
; NOTE: Assertions have been autogenerated by utils/update_analyze_test_checks.py
; RUN: opt < %s -disable-output -scalar-evolution-use-expensive-range-sharpening -passes='print<scalar-evolution>' 2>&1 | FileCheck %s
-define i32 @test_01(i32 %start, i32* %p, i32* %q) {
+define i32 @test_01(i32 %start, ptr %p, ptr %q) {
; CHECK-LABEL: 'test_01'
; CHECK-NEXT: Classifying expressions for: @test_01
; CHECK-NEXT: %0 = zext i32 %start to i64
@@ -14,11 +14,11 @@ define i32 @test_01(i32 %start, i32* %p, i32* %q) {
; CHECK-NEXT: --> {(-1 + %start),+,-1}<%loop> U: full-set S: full-set Exits: <<Unknown>> LoopDispositions: { %loop: Computable }
; CHECK-NEXT: %index = zext i32 %iv.next to i64
; CHECK-NEXT: --> (zext i32 {(-1 + %start),+,-1}<%loop> to i64) U: [0,4294967296) S: [0,4294967296) Exits: <<Unknown>> LoopDispositions: { %loop: Computable }
-; CHECK-NEXT: %store.addr = getelementptr i32, i32* %p, i64 %index
+; CHECK-NEXT: %store.addr = getelementptr i32, ptr %p, i64 %index
; CHECK-NEXT: --> ((4 * (zext i32 {(-1 + %start),+,-1}<%loop> to i64))<nuw><nsw> + %p) U: full-set S: full-set Exits: <<Unknown>> LoopDispositions: { %loop: Computable }
-; CHECK-NEXT: %load.addr = getelementptr i32, i32* %q, i64 %index
+; CHECK-NEXT: %load.addr = getelementptr i32, ptr %q, i64 %index
; CHECK-NEXT: --> ((4 * (zext i32 {(-1 + %start),+,-1}<%loop> to i64))<nuw><nsw> + %q) U: full-set S: full-set Exits: <<Unknown>> LoopDispositions: { %loop: Computable }
-; CHECK-NEXT: %stop = load i32, i32* %load.addr, align 4
+; CHECK-NEXT: %stop = load i32, ptr %load.addr, align 4
; CHECK-NEXT: --> %stop U: full-set S: full-set Exits: <<Unknown>> LoopDispositions: { %loop: Variant }
; CHECK-NEXT: %indvars.iv.next = add nsw i64 %indvars.iv, -1
; CHECK-NEXT: --> {(-1 + (zext i32 %start to i64))<nsw>,+,-1}<nsw><%loop> U: [-4294967296,4294967295) S: [-1,4294967295) Exits: <<Unknown>> LoopDispositions: { %loop: Computable }
@@ -45,10 +45,10 @@ loop: ; preds = %backedge, %entry
backedge: ; preds = %loop
%iv.next = add i32 %iv, -1
%index = zext i32 %iv.next to i64
- %store.addr = getelementptr i32, i32* %p, i64 %index
- store i32 1, i32* %store.addr, align 4
- %load.addr = getelementptr i32, i32* %q, i64 %index
- %stop = load i32, i32* %load.addr, align 4
+ %store.addr = getelementptr i32, ptr %p, i64 %index
+ store i32 1, ptr %store.addr, align 4
+ %load.addr = getelementptr i32, ptr %q, i64 %index
+ %stop = load i32, ptr %load.addr, align 4
%loop.cond = icmp eq i32 %stop, 0
%indvars.iv.next = add nsw i64 %indvars.iv, -1
br i1 %loop.cond, label %loop, label %failure
@@ -61,7 +61,7 @@ failure: ; preds = %backedge
}
; Check that we do not mess up with wrapping ranges.
-define i32 @test_02(i32 %start, i32* %p, i32* %q) {
+define i32 @test_02(i32 %start, ptr %p, ptr %q) {
; CHECK-LABEL: 'test_02'
; CHECK-NEXT: Classifying expressions for: @test_02
; CHECK-NEXT: %zext = zext i32 %start to i64
@@ -95,14 +95,14 @@ exit: ; preds = %loop
ret i32 0
}
-define void @pointer_iv_nowrap(i8* %startptr, i8* %endptr) local_unnamed_addr {
+define void @pointer_iv_nowrap(ptr %startptr, ptr %endptr) local_unnamed_addr {
; CHECK-LABEL: 'pointer_iv_nowrap'
; CHECK-NEXT: Classifying expressions for: @pointer_iv_nowrap
-; CHECK-NEXT: %init = getelementptr inbounds i8, i8* %startptr, i64 2000
+; CHECK-NEXT: %init = getelementptr inbounds i8, ptr %startptr, i64 2000
; CHECK-NEXT: --> (2000 + %startptr) U: full-set S: full-set
-; CHECK-NEXT: %iv = phi i8* [ %init, %entry ], [ %iv.next, %loop ]
+; CHECK-NEXT: %iv = phi ptr [ %init, %entry ], [ %iv.next, %loop ]
; CHECK-NEXT: --> {(2000 + %startptr),+,1}<nuw><%loop> U: full-set S: full-set Exits: <<Unknown>> LoopDispositions: { %loop: Computable }
-; CHECK-NEXT: %iv.next = getelementptr inbounds i8, i8* %iv, i64 1
+; CHECK-NEXT: %iv.next = getelementptr inbounds i8, ptr %iv, i64 1
; CHECK-NEXT: --> {(2001 + %startptr),+,1}<nuw><%loop> U: full-set S: full-set Exits: <<Unknown>> LoopDispositions: { %loop: Computable }
; CHECK-NEXT: Determining loop execution counts for: @pointer_iv_nowrap
; CHECK-NEXT: Loop %loop: Unpredictable backedge-taken count.
@@ -111,45 +111,45 @@ define void @pointer_iv_nowrap(i8* %startptr, i8* %endptr) local_unnamed_addr {
; CHECK-NEXT: Loop %loop: Unpredictable predicated backedge-taken count.
;
entry:
- %init = getelementptr inbounds i8, i8* %startptr, i64 2000
+ %init = getelementptr inbounds i8, ptr %startptr, i64 2000
br label %loop
loop:
- %iv = phi i8* [ %init, %entry ], [ %iv.next, %loop ]
- %iv.next = getelementptr inbounds i8, i8* %iv, i64 1
- %ec = icmp ugt i8* %iv.next, %endptr
+ %iv = phi ptr [ %init, %entry ], [ %iv.next, %loop ]
+ %iv.next = getelementptr inbounds i8, ptr %iv, i64 1
+ %ec = icmp ugt ptr %iv.next, %endptr
br i1 %ec, label %end, label %loop
end:
ret void
}
-define void @pointer_iv_nowrap_guard(i32* %startptr, i32* %endptr) local_unnamed_addr {
+define void @pointer_iv_nowrap_guard(ptr %startptr, ptr %endptr) local_unnamed_addr {
; CHECK-LABEL: 'pointer_iv_nowrap_guard'
; CHECK-NEXT: Classifying expressions for: @pointer_iv_nowrap_guard
-; CHECK-NEXT: %init = getelementptr inbounds i32, i32* %startptr, i64 2000
+; CHECK-NEXT: %init = getelementptr inbounds i32, ptr %startptr, i64 2000
; CHECK-NEXT: --> (8000 + %startptr)<nuw> U: [8000,0) S: [8000,0)
-; CHECK-NEXT: %iv = phi i32* [ %init, %entry ], [ %iv.next, %loop ]
-; CHECK-NEXT: --> {(8000 + %startptr)<nuw>,+,4}<nuw><%loop> U: [8000,0) S: [8000,0) Exits: (8000 + (4 * ((-8001 + (-1 * (ptrtoint i32* %startptr to i64)) + ((8004 + (ptrtoint i32* %startptr to i64)) umax (ptrtoint i32* %endptr to i64))) /u 4))<nuw> + %startptr) LoopDispositions: { %loop: Computable }
-; CHECK-NEXT: %iv.next = getelementptr inbounds i32, i32* %iv, i64 1
-; CHECK-NEXT: --> {(8004 + %startptr),+,4}<nuw><%loop> U: full-set S: full-set Exits: (8004 + (4 * ((-8001 + (-1 * (ptrtoint i32* %startptr to i64)) + ((8004 + (ptrtoint i32* %startptr to i64)) umax (ptrtoint i32* %endptr to i64))) /u 4))<nuw> + %startptr) LoopDispositions: { %loop: Computable }
+; CHECK-NEXT: %iv = phi ptr [ %init, %entry ], [ %iv.next, %loop ]
+; CHECK-NEXT: --> {(8000 + %startptr)<nuw>,+,4}<nuw><%loop> U: [8000,0) S: [8000,0) Exits: (8000 + (4 * ((-8001 + (-1 * (ptrtoint ptr %startptr to i64)) + ((8004 + (ptrtoint ptr %startptr to i64)) umax (ptrtoint ptr %endptr to i64))) /u 4))<nuw> + %startptr) LoopDispositions: { %loop: Computable }
+; CHECK-NEXT: %iv.next = getelementptr inbounds i32, ptr %iv, i64 1
+; CHECK-NEXT: --> {(8004 + %startptr),+,4}<nuw><%loop> U: full-set S: full-set Exits: (8004 + (4 * ((-8001 + (-1 * (ptrtoint ptr %startptr to i64)) + ((8004 + (ptrtoint ptr %startptr to i64)) umax (ptrtoint ptr %endptr to i64))) /u 4))<nuw> + %startptr) LoopDispositions: { %loop: Computable }
; CHECK-NEXT: Determining loop execution counts for: @pointer_iv_nowrap_guard
-; CHECK-NEXT: Loop %loop: backedge-taken count is ((-8001 + (-1 * (ptrtoint i32* %startptr to i64)) + ((8004 + (ptrtoint i32* %startptr to i64)) umax (ptrtoint i32* %endptr to i64))) /u 4)
+; CHECK-NEXT: Loop %loop: backedge-taken count is ((-8001 + (-1 * (ptrtoint ptr %startptr to i64)) + ((8004 + (ptrtoint ptr %startptr to i64)) umax (ptrtoint ptr %endptr to i64))) /u 4)
; CHECK-NEXT: Loop %loop: constant max backedge-taken count is 4611686018427387903
-; CHECK-NEXT: Loop %loop: symbolic max backedge-taken count is ((-8001 + (-1 * (ptrtoint i32* %startptr to i64)) + ((8004 + (ptrtoint i32* %startptr to i64)) umax (ptrtoint i32* %endptr to i64))) /u 4)
-; CHECK-NEXT: Loop %loop: Predicated backedge-taken count is ((-8001 + (-1 * (ptrtoint i32* %startptr to i64)) + ((8004 + (ptrtoint i32* %startptr to i64)) umax (ptrtoint i32* %endptr to i64))) /u 4)
+; CHECK-NEXT: Loop %loop: symbolic max backedge-taken count is ((-8001 + (-1 * (ptrtoint ptr %startptr to i64)) + ((8004 + (ptrtoint ptr %startptr to i64)) umax (ptrtoint ptr %endptr to i64))) /u 4)
+; CHECK-NEXT: Loop %loop: Predicated backedge-taken count is ((-8001 + (-1 * (ptrtoint ptr %startptr to i64)) + ((8004 + (ptrtoint ptr %startptr to i64)) umax (ptrtoint ptr %endptr to i64))) /u 4)
; CHECK-NEXT: Predicates:
; CHECK: Loop %loop: Trip multiple is 1
;
entry:
- %init = getelementptr inbounds i32, i32* %startptr, i64 2000
- %cmp2 = icmp ult i32* %init, %endptr
+ %init = getelementptr inbounds i32, ptr %startptr, i64 2000
+ %cmp2 = icmp ult ptr %init, %endptr
br i1 %cmp2, label %loop, label %end
loop:
- %iv = phi i32* [ %init, %entry ], [ %iv.next, %loop ]
- %iv.next = getelementptr inbounds i32, i32* %iv, i64 1
- %ec = icmp uge i32* %iv.next, %endptr
+ %iv = phi ptr [ %init, %entry ], [ %iv.next, %loop ]
+ %iv.next = getelementptr inbounds i32, ptr %iv, i64 1
+ %ec = icmp uge ptr %iv.next, %endptr
br i1 %ec, label %end, label %loop
end:
diff --git a/llvm/test/Analysis/ScalarEvolution/no-wrap-unknown-becount.ll b/llvm/test/Analysis/ScalarEvolution/no-wrap-unknown-becount.ll
index a3698d40b6191..a46063527b7b8 100644
--- a/llvm/test/Analysis/ScalarEvolution/no-wrap-unknown-becount.ll
+++ b/llvm/test/Analysis/ScalarEvolution/no-wrap-unknown-becount.ll
@@ -3,7 +3,7 @@
declare void @llvm.experimental.guard(i1, ...)
declare void @llvm.assume(i1)
-define void @s_0(i32 %n, i1* %cond) {
+define void @s_0(i32 %n, ptr %cond) {
; CHECK-LABEL: Classifying expressions for: @s_0
entry:
br label %loop
@@ -16,14 +16,14 @@ loop:
; CHECK-NEXT: --> {0,+,1}<nuw><nsw><%loop>
%cmp = icmp slt i32 %iv, %n
call void(i1, ...) @llvm.experimental.guard(i1 %cmp) [ "deopt"() ]
- %c = load volatile i1, i1* %cond
+ %c = load volatile i1, ptr %cond
br i1 %c, label %loop, label %leave
leave:
ret void
}
-define void @s_1(i1* %cond) {
+define void @s_1(ptr %cond) {
; CHECK-LABEL: Classifying expressions for: @s_1
entry:
br label %loop
@@ -36,14 +36,14 @@ loop:
; CHECK-NEXT: --> {0,+,3}<nuw><nsw><%loop>
%cmp = icmp slt i32 %iv, 10000
call void(i1, ...) @llvm.experimental.guard(i1 %cmp) [ "deopt"() ]
- %c = load volatile i1, i1* %cond
+ %c = load volatile i1, ptr %cond
br i1 %c, label %loop, label %leave
leave:
ret void
}
-define void @s_2(i1* %cond) {
+define void @s_2(ptr %cond) {
; CHECK-LABEL: Classifying expressions for: @s_2
entry:
br label %loop
@@ -56,14 +56,14 @@ loop:
; CHECK: %iv.sext = sext i32 %iv to i64
; CHECK-NEXT: --> {0,+,3}<nuw><nsw><%loop>
call void @llvm.assume(i1 %cmp)
- %c = load volatile i1, i1* %cond
+ %c = load volatile i1, ptr %cond
br i1 %c, label %loop, label %leave
leave:
ret void
}
-define void @s_3(i32 %start, i1* %cond) {
+define void @s_3(i32 %start, ptr %cond) {
; CHECK-LABEL: Classifying expressions for: @s_3
entry:
br label %loop
@@ -78,14 +78,14 @@ be:
%iv.inc.sext = sext i32 %iv.inc to i64
; CHECK: %iv.inc.sext = sext i32 %iv.inc to i64
; CHECK-NEXT: --> {(sext i32 (3 + %start) to i64),+,3}<nsw><%loop>
- %c = load volatile i1, i1* %cond
+ %c = load volatile i1, ptr %cond
br i1 %c, label %loop, label %leave
leave:
ret void
}
-define void @s_4(i32 %start, i1* %cond) {
+define void @s_4(i32 %start, ptr %cond) {
; CHECK-LABEL: Classifying expressions for: @s_4
entry:
br label %loop
@@ -100,14 +100,14 @@ be:
%iv.inc.sext = sext i32 %iv.inc to i64
; CHECK: %iv.inc.sext = sext i32 %iv.inc to i64
; CHECK-NEXT: --> {(sext i32 (-3 + %start) to i64),+,-3}<nsw><%loop>
- %c = load volatile i1, i1* %cond
+ %c = load volatile i1, ptr %cond
br i1 %c, label %loop, label %leave
leave:
ret void
}
-define void @u_0(i32 %n, i1* %cond) {
+define void @u_0(i32 %n, ptr %cond) {
; CHECK-LABEL: Classifying expressions for: @u_0
entry:
br label %loop
@@ -120,14 +120,14 @@ loop:
; CHECK-NEXT: --> {0,+,1}<nuw><%loop>
%cmp = icmp ult i32 %iv, %n
call void(i1, ...) @llvm.experimental.guard(i1 %cmp) [ "deopt"() ]
- %c = load volatile i1, i1* %cond
+ %c = load volatile i1, ptr %cond
br i1 %c, label %loop, label %leave
leave:
ret void
}
-define void @u_1(i1* %cond) {
+define void @u_1(ptr %cond) {
; CHECK-LABEL: Classifying expressions for: @u_1
entry:
br label %loop
@@ -140,14 +140,14 @@ loop:
; CHECK-NEXT: --> {0,+,3}<nuw><%loop>
%cmp = icmp ult i32 %iv, 10000
call void(i1, ...) @llvm.experimental.guard(i1 %cmp) [ "deopt"() ]
- %c = load volatile i1, i1* %cond
+ %c = load volatile i1, ptr %cond
br i1 %c, label %loop, label %leave
leave:
ret void
}
-define void @u_2(i1* %cond) {
+define void @u_2(ptr %cond) {
; CHECK-LABEL: Classifying expressions for: @u_2
entry:
br label %loop
@@ -160,14 +160,14 @@ loop:
; CHECK: %iv.zext = zext i32 %iv to i64
; CHECK-NEXT: --> {30000,+,-2}<nw><%loop>
call void @llvm.assume(i1 %cmp)
- %c = load volatile i1, i1* %cond
+ %c = load volatile i1, ptr %cond
br i1 %c, label %loop, label %leave
leave:
ret void
}
-define void @u_3(i32 %start, i1* %cond) {
+define void @u_3(i32 %start, ptr %cond) {
; CHECK-LABEL: Classifying expressions for: @u_3
entry:
br label %loop
@@ -182,7 +182,7 @@ be:
%iv.inc.zext = zext i32 %iv.inc to i64
; CHECK: %iv.inc.zext = zext i32 %iv.inc to i64
; CHECK-NEXT: --> {(zext i32 (3 + %start) to i64),+,3}<nuw><%loop>
- %c = load volatile i1, i1* %cond
+ %c = load volatile i1, ptr %cond
br i1 %c, label %loop, label %leave
leave:
diff --git a/llvm/test/Analysis/ScalarEvolution/nowrap-preinc-limits.ll b/llvm/test/Analysis/ScalarEvolution/nowrap-preinc-limits.ll
index 49798028b6685..3b414f2182ed5 100644
--- a/llvm/test/Analysis/ScalarEvolution/nowrap-preinc-limits.ll
+++ b/llvm/test/Analysis/ScalarEvolution/nowrap-preinc-limits.ll
@@ -1,6 +1,6 @@
; RUN: opt -disable-output "-passes=print<scalar-evolution>" < %s 2>&1 | FileCheck %s
-define void @f(i1* %condition) {
+define void @f(ptr %condition) {
; CHECK-LABEL: Classifying expressions for: @f
entry:
br label %loop
@@ -15,14 +15,14 @@ define void @f(i1* %condition) {
; CHECK: %idx.inc2.zext = zext i32 %idx.inc2 to i64
; CHECK-NEXT: --> {2,+,1}<nuw><%loop>
- %c = load volatile i1, i1* %condition
+ %c = load volatile i1, ptr %condition
br i1 %c, label %loop, label %exit
exit:
ret void
}
-define void @g(i1* %condition) {
+define void @g(ptr %condition) {
; CHECK-LABEL: Classifying expressions for: @g
entry:
br label %loop
@@ -36,8 +36,8 @@ define void @g(i1* %condition) {
; CHECK: %idx.inc2.sext = sext i32 %idx.inc2 to i64
; CHECK-NEXT: --> {2,+,3}<nuw><nsw><%loop>
- %cond.gep = getelementptr inbounds i1, i1* %condition, i32 %idx.inc
- %c = load volatile i1, i1* %cond.gep
+ %cond.gep = getelementptr inbounds i1, ptr %condition, i32 %idx.inc
+ %c = load volatile i1, ptr %cond.gep
br i1 %c, label %loop, label %exit
exit:
diff --git a/llvm/test/Analysis/ScalarEvolution/nsw-offset-assume.ll b/llvm/test/Analysis/ScalarEvolution/nsw-offset-assume.ll
index d927e693184ee..117f249704d99 100644
--- a/llvm/test/Analysis/ScalarEvolution/nsw-offset-assume.ll
+++ b/llvm/test/Analysis/ScalarEvolution/nsw-offset-assume.ll
@@ -10,7 +10,7 @@ target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f3
; Note: Without the preheader assume, there is an 'smax' in the
; backedge-taken count expression:
-define void @foo(i32 %no, double* nocapture %d, double* nocapture %q) nounwind {
+define void @foo(i32 %no, ptr nocapture %d, ptr nocapture %q) nounwind {
; CHECK-LABEL: 'foo'
; CHECK-NEXT: Classifying expressions for: @foo
; CHECK-NEXT: %n = and i32 %no, -2
@@ -19,27 +19,27 @@ define void @foo(i32 %no, double* nocapture %d, double* nocapture %q) nounwind {
; CHECK-NEXT: --> {0,+,2}<nuw><nsw><%bb> U: [0,2147483645) S: [0,2147483645) Exits: (2 * ((-1 + (2 * (%no /u 2))<nuw>) /u 2))<nuw> LoopDispositions: { %bb: Computable }
; CHECK-NEXT: %1 = sext i32 %i.01 to i64
; CHECK-NEXT: --> {0,+,2}<nuw><nsw><%bb> U: [0,2147483645) S: [0,2147483645) Exits: (2 * ((1 + (zext i32 (-2 + (2 * (%no /u 2))<nuw>) to i64))<nuw><nsw> /u 2))<nuw><nsw> LoopDispositions: { %bb: Computable }
-; CHECK-NEXT: %2 = getelementptr inbounds double, double* %d, i64 %1
+; CHECK-NEXT: %2 = getelementptr inbounds double, ptr %d, i64 %1
; CHECK-NEXT: --> {%d,+,16}<nuw><%bb> U: full-set S: full-set Exits: ((16 * ((1 + (zext i32 (-2 + (2 * (%no /u 2))<nuw>) to i64))<nuw><nsw> /u 2))<nuw><nsw> + %d) LoopDispositions: { %bb: Computable }
; CHECK-NEXT: %4 = sext i32 %i.01 to i64
; CHECK-NEXT: --> {0,+,2}<nuw><nsw><%bb> U: [0,2147483645) S: [0,2147483645) Exits: (2 * ((1 + (zext i32 (-2 + (2 * (%no /u 2))<nuw>) to i64))<nuw><nsw> /u 2))<nuw><nsw> LoopDispositions: { %bb: Computable }
-; CHECK-NEXT: %5 = getelementptr inbounds double, double* %q, i64 %4
+; CHECK-NEXT: %5 = getelementptr inbounds double, ptr %q, i64 %4
; CHECK-NEXT: --> {%q,+,16}<nuw><%bb> U: full-set S: full-set Exits: ((16 * ((1 + (zext i32 (-2 + (2 * (%no /u 2))<nuw>) to i64))<nuw><nsw> /u 2))<nuw><nsw> + %q) LoopDispositions: { %bb: Computable }
; CHECK-NEXT: %7 = or i32 %i.01, 1
; CHECK-NEXT: --> {1,+,2}<nuw><nsw><%bb> U: [1,2147483646) S: [1,2147483646) Exits: (1 + (2 * ((-1 + (2 * (%no /u 2))<nuw>) /u 2))<nuw>)<nuw><nsw> LoopDispositions: { %bb: Computable }
; CHECK-NEXT: %8 = sext i32 %7 to i64
; CHECK-NEXT: --> {1,+,2}<nuw><nsw><%bb> U: [1,2147483646) S: [1,2147483646) Exits: (1 + (2 * ((1 + (zext i32 (-2 + (2 * (%no /u 2))<nuw>) to i64))<nuw><nsw> /u 2))<nuw><nsw>)<nuw><nsw> LoopDispositions: { %bb: Computable }
-; CHECK-NEXT: %9 = getelementptr inbounds double, double* %q, i64 %8
+; CHECK-NEXT: %9 = getelementptr inbounds double, ptr %q, i64 %8
; CHECK-NEXT: --> {(8 + %q),+,16}<nuw><%bb> U: full-set S: full-set Exits: (8 + (16 * ((1 + (zext i32 (-2 + (2 * (%no /u 2))<nuw>) to i64))<nuw><nsw> /u 2))<nuw><nsw> + %q) LoopDispositions: { %bb: Computable }
; CHECK-NEXT: %t7 = add nsw i32 %i.01, 1
; CHECK-NEXT: --> {1,+,2}<nuw><nsw><%bb> U: [1,2147483646) S: [1,2147483646) Exits: (1 + (2 * ((-1 + (2 * (%no /u 2))<nuw>) /u 2))<nuw>)<nuw><nsw> LoopDispositions: { %bb: Computable }
; CHECK-NEXT: %t8 = sext i32 %t7 to i64
; CHECK-NEXT: --> {1,+,2}<nuw><nsw><%bb> U: [1,2147483646) S: [1,2147483646) Exits: (1 + (2 * ((1 + (zext i32 (-2 + (2 * (%no /u 2))<nuw>) to i64))<nuw><nsw> /u 2))<nuw><nsw>)<nuw><nsw> LoopDispositions: { %bb: Computable }
-; CHECK-NEXT: %t9 = getelementptr inbounds double, double* %q, i64 %t8
+; CHECK-NEXT: %t9 = getelementptr inbounds double, ptr %q, i64 %t8
; CHECK-NEXT: --> {(8 + %q),+,16}<nuw><%bb> U: full-set S: full-set Exits: (8 + (16 * ((1 + (zext i32 (-2 + (2 * (%no /u 2))<nuw>) to i64))<nuw><nsw> /u 2))<nuw><nsw> + %q) LoopDispositions: { %bb: Computable }
; CHECK-NEXT: %14 = sext i32 %i.01 to i64
; CHECK-NEXT: --> {0,+,2}<nuw><nsw><%bb> U: [0,2147483645) S: [0,2147483645) Exits: (2 * ((1 + (zext i32 (-2 + (2 * (%no /u 2))<nuw>) to i64))<nuw><nsw> /u 2))<nuw><nsw> LoopDispositions: { %bb: Computable }
-; CHECK-NEXT: %15 = getelementptr inbounds double, double* %d, i64 %14
+; CHECK-NEXT: %15 = getelementptr inbounds double, ptr %d, i64 %14
; CHECK-NEXT: --> {%d,+,16}<nuw><%bb> U: full-set S: full-set Exits: ((16 * ((1 + (zext i32 (-2 + (2 * (%no /u 2))<nuw>) to i64))<nuw><nsw> /u 2))<nuw><nsw> + %d) LoopDispositions: { %bb: Computable }
; CHECK-NEXT: %16 = add nsw i32 %i.01, 2
; CHECK-NEXT: --> {2,+,2}<nuw><nsw><%bb> U: [2,2147483647) S: [2,2147483647) Exits: (2 + (2 * ((-1 + (2 * (%no /u 2))<nuw>) /u 2))<nuw>) LoopDispositions: { %bb: Computable }
@@ -65,17 +65,17 @@ bb: ; preds = %bb.nph, %bb1
%1 = sext i32 %i.01 to i64 ; <i64> [#uses=1]
- %2 = getelementptr inbounds double, double* %d, i64 %1 ; <double*> [#uses=1]
+ %2 = getelementptr inbounds double, ptr %d, i64 %1 ; <ptr> [#uses=1]
- %3 = load double, double* %2, align 8 ; <double> [#uses=1]
+ %3 = load double, ptr %2, align 8 ; <double> [#uses=1]
%4 = sext i32 %i.01 to i64 ; <i64> [#uses=1]
- %5 = getelementptr inbounds double, double* %q, i64 %4 ; <double*> [#uses=1]
- %6 = load double, double* %5, align 8 ; <double> [#uses=1]
+ %5 = getelementptr inbounds double, ptr %q, i64 %4 ; <ptr> [#uses=1]
+ %6 = load double, ptr %5, align 8 ; <double> [#uses=1]
%7 = or i32 %i.01, 1 ; <i32> [#uses=1]
%8 = sext i32 %7 to i64 ; <i64> [#uses=1]
- %9 = getelementptr inbounds double, double* %q, i64 %8 ; <double*> [#uses=1]
+ %9 = getelementptr inbounds double, ptr %q, i64 %8 ; <ptr> [#uses=1]
; Artificially repeat the above three instructions, this time using
; add nsw instead of or.
@@ -83,15 +83,15 @@ bb: ; preds = %bb.nph, %bb1
%t8 = sext i32 %t7 to i64 ; <i64> [#uses=1]
- %t9 = getelementptr inbounds double, double* %q, i64 %t8 ; <double*> [#uses=1]
+ %t9 = getelementptr inbounds double, ptr %q, i64 %t8 ; <ptr> [#uses=1]
- %10 = load double, double* %9, align 8 ; <double> [#uses=1]
+ %10 = load double, ptr %9, align 8 ; <double> [#uses=1]
%11 = fadd double %6, %10 ; <double> [#uses=1]
%12 = fadd double %11, 3.200000e+00 ; <double> [#uses=1]
%13 = fmul double %3, %12 ; <double> [#uses=1]
%14 = sext i32 %i.01 to i64 ; <i64> [#uses=1]
- %15 = getelementptr inbounds double, double* %d, i64 %14 ; <double*> [#uses=1]
- store double %13, double* %15, align 8
+ %15 = getelementptr inbounds double, ptr %d, i64 %14 ; <ptr> [#uses=1]
+ store double %13, ptr %15, align 8
%16 = add nsw i32 %i.01, 2 ; <i32> [#uses=2]
br label %bb1
diff --git a/llvm/test/Analysis/ScalarEvolution/nsw-offset.ll b/llvm/test/Analysis/ScalarEvolution/nsw-offset.ll
index 2c4eba8ac218f..4e9e91d294e20 100644
--- a/llvm/test/Analysis/ScalarEvolution/nsw-offset.ll
+++ b/llvm/test/Analysis/ScalarEvolution/nsw-offset.ll
@@ -7,7 +7,7 @@
target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128"
-define void @foo(i32 %no, double* nocapture %d, double* nocapture %q) nounwind {
+define void @foo(i32 %no, ptr nocapture %d, ptr nocapture %q) nounwind {
; CHECK-LABEL: 'foo'
; CHECK-NEXT: Classifying expressions for: @foo
; CHECK-NEXT: %n = and i32 %no, -2
@@ -16,27 +16,27 @@ define void @foo(i32 %no, double* nocapture %d, double* nocapture %q) nounwind {
; CHECK-NEXT: --> {0,+,2}<nuw><nsw><%bb> U: [0,2147483645) S: [0,2147483645) Exits: (2 * ((-1 + (2 * (%no /u 2))<nuw>) /u 2))<nuw> LoopDispositions: { %bb: Computable }
; CHECK-NEXT: %1 = sext i32 %i.01 to i64
; CHECK-NEXT: --> {0,+,2}<nuw><nsw><%bb> U: [0,2147483645) S: [0,2147483645) Exits: (2 * ((1 + (zext i32 (-2 + (2 * (%no /u 2))<nuw>) to i64))<nuw><nsw> /u 2))<nuw><nsw> LoopDispositions: { %bb: Computable }
-; CHECK-NEXT: %2 = getelementptr inbounds double, double* %d, i64 %1
+; CHECK-NEXT: %2 = getelementptr inbounds double, ptr %d, i64 %1
; CHECK-NEXT: --> {%d,+,16}<nuw><%bb> U: full-set S: full-set Exits: ((16 * ((1 + (zext i32 (-2 + (2 * (%no /u 2))<nuw>) to i64))<nuw><nsw> /u 2))<nuw><nsw> + %d) LoopDispositions: { %bb: Computable }
; CHECK-NEXT: %4 = sext i32 %i.01 to i64
; CHECK-NEXT: --> {0,+,2}<nuw><nsw><%bb> U: [0,2147483645) S: [0,2147483645) Exits: (2 * ((1 + (zext i32 (-2 + (2 * (%no /u 2))<nuw>) to i64))<nuw><nsw> /u 2))<nuw><nsw> LoopDispositions: { %bb: Computable }
-; CHECK-NEXT: %5 = getelementptr inbounds double, double* %q, i64 %4
+; CHECK-NEXT: %5 = getelementptr inbounds double, ptr %q, i64 %4
; CHECK-NEXT: --> {%q,+,16}<nuw><%bb> U: full-set S: full-set Exits: ((16 * ((1 + (zext i32 (-2 + (2 * (%no /u 2))<nuw>) to i64))<nuw><nsw> /u 2))<nuw><nsw> + %q) LoopDispositions: { %bb: Computable }
; CHECK-NEXT: %7 = or i32 %i.01, 1
; CHECK-NEXT: --> {1,+,2}<nuw><nsw><%bb> U: [1,2147483646) S: [1,2147483646) Exits: (1 + (2 * ((-1 + (2 * (%no /u 2))<nuw>) /u 2))<nuw>)<nuw><nsw> LoopDispositions: { %bb: Computable }
; CHECK-NEXT: %8 = sext i32 %7 to i64
; CHECK-NEXT: --> {1,+,2}<nuw><nsw><%bb> U: [1,2147483646) S: [1,2147483646) Exits: (1 + (2 * ((1 + (zext i32 (-2 + (2 * (%no /u 2))<nuw>) to i64))<nuw><nsw> /u 2))<nuw><nsw>)<nuw><nsw> LoopDispositions: { %bb: Computable }
-; CHECK-NEXT: %9 = getelementptr inbounds double, double* %q, i64 %8
+; CHECK-NEXT: %9 = getelementptr inbounds double, ptr %q, i64 %8
; CHECK-NEXT: --> {(8 + %q),+,16}<nuw><%bb> U: full-set S: full-set Exits: (8 + (16 * ((1 + (zext i32 (-2 + (2 * (%no /u 2))<nuw>) to i64))<nuw><nsw> /u 2))<nuw><nsw> + %q) LoopDispositions: { %bb: Computable }
; CHECK-NEXT: %t7 = add nsw i32 %i.01, 1
; CHECK-NEXT: --> {1,+,2}<nuw><nsw><%bb> U: [1,2147483646) S: [1,2147483646) Exits: (1 + (2 * ((-1 + (2 * (%no /u 2))<nuw>) /u 2))<nuw>)<nuw><nsw> LoopDispositions: { %bb: Computable }
; CHECK-NEXT: %t8 = sext i32 %t7 to i64
; CHECK-NEXT: --> {1,+,2}<nuw><nsw><%bb> U: [1,2147483646) S: [1,2147483646) Exits: (1 + (2 * ((1 + (zext i32 (-2 + (2 * (%no /u 2))<nuw>) to i64))<nuw><nsw> /u 2))<nuw><nsw>)<nuw><nsw> LoopDispositions: { %bb: Computable }
-; CHECK-NEXT: %t9 = getelementptr inbounds double, double* %q, i64 %t8
+; CHECK-NEXT: %t9 = getelementptr inbounds double, ptr %q, i64 %t8
; CHECK-NEXT: --> {(8 + %q),+,16}<nuw><%bb> U: full-set S: full-set Exits: (8 + (16 * ((1 + (zext i32 (-2 + (2 * (%no /u 2))<nuw>) to i64))<nuw><nsw> /u 2))<nuw><nsw> + %q) LoopDispositions: { %bb: Computable }
; CHECK-NEXT: %14 = sext i32 %i.01 to i64
; CHECK-NEXT: --> {0,+,2}<nuw><nsw><%bb> U: [0,2147483645) S: [0,2147483645) Exits: (2 * ((1 + (zext i32 (-2 + (2 * (%no /u 2))<nuw>) to i64))<nuw><nsw> /u 2))<nuw><nsw> LoopDispositions: { %bb: Computable }
-; CHECK-NEXT: %15 = getelementptr inbounds double, double* %d, i64 %14
+; CHECK-NEXT: %15 = getelementptr inbounds double, ptr %d, i64 %14
; CHECK-NEXT: --> {%d,+,16}<nuw><%bb> U: full-set S: full-set Exits: ((16 * ((1 + (zext i32 (-2 + (2 * (%no /u 2))<nuw>) to i64))<nuw><nsw> /u 2))<nuw><nsw> + %d) LoopDispositions: { %bb: Computable }
; CHECK-NEXT: %16 = add nsw i32 %i.01, 2
; CHECK-NEXT: --> {2,+,2}<nuw><nsw><%bb> U: [2,2147483647) S: [2,2147483647) Exits: (2 + (2 * ((-1 + (2 * (%no /u 2))<nuw>) /u 2))<nuw>) LoopDispositions: { %bb: Computable }
@@ -61,17 +61,17 @@ bb: ; preds = %bb.nph, %bb1
%1 = sext i32 %i.01 to i64 ; <i64> [#uses=1]
- %2 = getelementptr inbounds double, double* %d, i64 %1 ; <double*> [#uses=1]
+ %2 = getelementptr inbounds double, ptr %d, i64 %1 ; <ptr> [#uses=1]
- %3 = load double, double* %2, align 8 ; <double> [#uses=1]
+ %3 = load double, ptr %2, align 8 ; <double> [#uses=1]
%4 = sext i32 %i.01 to i64 ; <i64> [#uses=1]
- %5 = getelementptr inbounds double, double* %q, i64 %4 ; <double*> [#uses=1]
- %6 = load double, double* %5, align 8 ; <double> [#uses=1]
+ %5 = getelementptr inbounds double, ptr %q, i64 %4 ; <ptr> [#uses=1]
+ %6 = load double, ptr %5, align 8 ; <double> [#uses=1]
%7 = or i32 %i.01, 1 ; <i32> [#uses=1]
%8 = sext i32 %7 to i64 ; <i64> [#uses=1]
- %9 = getelementptr inbounds double, double* %q, i64 %8 ; <double*> [#uses=1]
+ %9 = getelementptr inbounds double, ptr %q, i64 %8 ; <ptr> [#uses=1]
; Artificially repeat the above three instructions, this time using
; add nsw instead of or.
@@ -79,15 +79,15 @@ bb: ; preds = %bb.nph, %bb1
%t8 = sext i32 %t7 to i64 ; <i64> [#uses=1]
- %t9 = getelementptr inbounds double, double* %q, i64 %t8 ; <double*> [#uses=1]
+ %t9 = getelementptr inbounds double, ptr %q, i64 %t8 ; <ptr> [#uses=1]
- %10 = load double, double* %9, align 8 ; <double> [#uses=1]
+ %10 = load double, ptr %9, align 8 ; <double> [#uses=1]
%11 = fadd double %6, %10 ; <double> [#uses=1]
%12 = fadd double %11, 3.200000e+00 ; <double> [#uses=1]
%13 = fmul double %3, %12 ; <double> [#uses=1]
%14 = sext i32 %i.01 to i64 ; <i64> [#uses=1]
- %15 = getelementptr inbounds double, double* %d, i64 %14 ; <double*> [#uses=1]
- store double %13, double* %15, align 8
+ %15 = getelementptr inbounds double, ptr %d, i64 %14 ; <ptr> [#uses=1]
+ store double %13, ptr %15, align 8
%16 = add nsw i32 %i.01, 2 ; <i32> [#uses=2]
br label %bb1
diff --git a/llvm/test/Analysis/ScalarEvolution/nsw.ll b/llvm/test/Analysis/ScalarEvolution/nsw.ll
index ff5282d466b93..e21f951aca59d 100644
--- a/llvm/test/Analysis/ScalarEvolution/nsw.ll
+++ b/llvm/test/Analysis/ScalarEvolution/nsw.ll
@@ -5,26 +5,26 @@
target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64"
-define void @test1(double* %p) nounwind {
+define void @test1(ptr %p) nounwind {
; CHECK-LABEL: 'test1'
; CHECK-NEXT: Classifying expressions for: @test1
; CHECK-NEXT: %i.01 = phi i32 [ %tmp8, %bb1 ], [ 0, %bb.nph ]
; CHECK-NEXT: --> {0,+,1}<nuw><nsw><%bb> U: [0,-2147483648) S: [0,-2147483648) Exits: <<Unknown>> LoopDispositions: { %bb: Computable }
; CHECK-NEXT: %tmp2 = sext i32 %i.01 to i64
; CHECK-NEXT: --> {0,+,1}<nuw><nsw><%bb> U: [0,-9223372036854775808) S: [0,-9223372036854775808) Exits: <<Unknown>> LoopDispositions: { %bb: Computable }
-; CHECK-NEXT: %tmp3 = getelementptr double, double* %p, i64 %tmp2
+; CHECK-NEXT: %tmp3 = getelementptr double, ptr %p, i64 %tmp2
; CHECK-NEXT: --> {%p,+,8}<%bb> U: full-set S: full-set Exits: <<Unknown>> LoopDispositions: { %bb: Computable }
; CHECK-NEXT: %tmp6 = sext i32 %i.01 to i64
; CHECK-NEXT: --> {0,+,1}<nuw><nsw><%bb> U: [0,-9223372036854775808) S: [0,-9223372036854775808) Exits: <<Unknown>> LoopDispositions: { %bb: Computable }
-; CHECK-NEXT: %tmp7 = getelementptr double, double* %p, i64 %tmp6
+; CHECK-NEXT: %tmp7 = getelementptr double, ptr %p, i64 %tmp6
; CHECK-NEXT: --> {%p,+,8}<%bb> U: full-set S: full-set Exits: <<Unknown>> LoopDispositions: { %bb: Computable }
; CHECK-NEXT: %tmp8 = add nsw i32 %i.01, 1
; CHECK-NEXT: --> {1,+,1}<nuw><nsw><%bb> U: [1,-2147483648) S: [1,-2147483648) Exits: <<Unknown>> LoopDispositions: { %bb: Computable }
-; CHECK-NEXT: %p.gep = getelementptr double, double* %p, i32 %tmp8
+; CHECK-NEXT: %p.gep = getelementptr double, ptr %p, i32 %tmp8
; CHECK-NEXT: --> {(8 + %p),+,8}<%bb> U: full-set S: full-set Exits: <<Unknown>> LoopDispositions: { %bb: Computable }
; CHECK-NEXT: %phitmp = sext i32 %tmp8 to i64
; CHECK-NEXT: --> {1,+,1}<nuw><nsw><%bb> U: [1,-9223372036854775808) S: [1,-9223372036854775808) Exits: <<Unknown>> LoopDispositions: { %bb: Computable }
-; CHECK-NEXT: %tmp9 = getelementptr inbounds double, double* %p, i64 %phitmp
+; CHECK-NEXT: %tmp9 = getelementptr inbounds double, ptr %p, i64 %phitmp
; CHECK-NEXT: --> {(8 + %p),+,8}<%bb> U: full-set S: full-set Exits: <<Unknown>> LoopDispositions: { %bb: Computable }
; CHECK-NEXT: Determining loop execution counts for: @test1
; CHECK-NEXT: Loop %bb: Unpredictable backedge-taken count.
@@ -33,7 +33,7 @@ define void @test1(double* %p) nounwind {
; CHECK-NEXT: Loop %bb: Unpredictable predicated backedge-taken count.
;
entry:
- %tmp = load double, double* %p, align 8 ; <double> [#uses=1]
+ %tmp = load double, ptr %p, align 8 ; <double> [#uses=1]
%tmp1 = fcmp ogt double %tmp, 2.000000e+00 ; <i1> [#uses=1]
br i1 %tmp1, label %bb.nph, label %return
@@ -43,21 +43,21 @@ bb.nph: ; preds = %entry
bb: ; preds = %bb1, %bb.nph
%i.01 = phi i32 [ %tmp8, %bb1 ], [ 0, %bb.nph ] ; <i32> [#uses=3]
%tmp2 = sext i32 %i.01 to i64 ; <i64> [#uses=1]
- %tmp3 = getelementptr double, double* %p, i64 %tmp2 ; <double*> [#uses=1]
- %tmp4 = load double, double* %tmp3, align 8 ; <double> [#uses=1]
+ %tmp3 = getelementptr double, ptr %p, i64 %tmp2 ; <ptr> [#uses=1]
+ %tmp4 = load double, ptr %tmp3, align 8 ; <double> [#uses=1]
%tmp5 = fmul double %tmp4, 9.200000e+00 ; <double> [#uses=1]
%tmp6 = sext i32 %i.01 to i64 ; <i64> [#uses=1]
- %tmp7 = getelementptr double, double* %p, i64 %tmp6 ; <double*> [#uses=1]
- store double %tmp5, double* %tmp7, align 8
+ %tmp7 = getelementptr double, ptr %p, i64 %tmp6 ; <ptr> [#uses=1]
+ store double %tmp5, ptr %tmp7, align 8
%tmp8 = add nsw i32 %i.01, 1 ; <i32> [#uses=2]
- %p.gep = getelementptr double, double* %p, i32 %tmp8
- %p.val = load double, double* %p.gep
+ %p.gep = getelementptr double, ptr %p, i32 %tmp8
+ %p.val = load double, ptr %p.gep
br label %bb1
bb1: ; preds = %bb
%phitmp = sext i32 %tmp8 to i64 ; <i64> [#uses=1]
- %tmp9 = getelementptr inbounds double, double* %p, i64 %phitmp ; <double*> [#uses=1]
- %tmp10 = load double, double* %tmp9, align 8 ; <double> [#uses=1]
+ %tmp9 = getelementptr inbounds double, ptr %p, i64 %phitmp ; <ptr> [#uses=1]
+ %tmp10 = load double, ptr %tmp9, align 8 ; <double> [#uses=1]
%tmp11 = fcmp ogt double %tmp10, 2.000000e+00 ; <i1> [#uses=1]
br i1 %tmp11, label %bb, label %bb1.return_crit_edge
@@ -68,33 +68,33 @@ return: ; preds = %bb1.return_crit_edge, %entry
ret void
}
-define void @test2(i32* %begin, i32* %end) ssp {
+define void @test2(ptr %begin, ptr %end) ssp {
; CHECK-LABEL: 'test2'
; CHECK-NEXT: Classifying expressions for: @test2
-; CHECK-NEXT: %__first.addr.02.i.i = phi i32* [ %begin, %for.body.lr.ph.i.i ], [ %ptrincdec.i.i, %for.body.i.i ]
-; CHECK-NEXT: --> {%begin,+,4}<nuw><%for.body.i.i> U: full-set S: full-set Exits: ((4 * ((-4 + (-1 * (ptrtoint i32* %begin to i64)) + (ptrtoint i32* %end to i64)) /u 4))<nuw> + %begin) LoopDispositions: { %for.body.i.i: Computable }
-; CHECK-NEXT: %ptrincdec.i.i = getelementptr inbounds i32, i32* %__first.addr.02.i.i, i64 1
-; CHECK-NEXT: --> {(4 + %begin),+,4}<nuw><%for.body.i.i> U: full-set S: full-set Exits: (4 + (4 * ((-4 + (-1 * (ptrtoint i32* %begin to i64)) + (ptrtoint i32* %end to i64)) /u 4))<nuw> + %begin) LoopDispositions: { %for.body.i.i: Computable }
+; CHECK-NEXT: %__first.addr.02.i.i = phi ptr [ %begin, %for.body.lr.ph.i.i ], [ %ptrincdec.i.i, %for.body.i.i ]
+; CHECK-NEXT: --> {%begin,+,4}<nuw><%for.body.i.i> U: full-set S: full-set Exits: ((4 * ((-4 + (-1 * (ptrtoint ptr %begin to i64)) + (ptrtoint ptr %end to i64)) /u 4))<nuw> + %begin) LoopDispositions: { %for.body.i.i: Computable }
+; CHECK-NEXT: %ptrincdec.i.i = getelementptr inbounds i32, ptr %__first.addr.02.i.i, i64 1
+; CHECK-NEXT: --> {(4 + %begin),+,4}<nuw><%for.body.i.i> U: full-set S: full-set Exits: (4 + (4 * ((-4 + (-1 * (ptrtoint ptr %begin to i64)) + (ptrtoint ptr %end to i64)) /u 4))<nuw> + %begin) LoopDispositions: { %for.body.i.i: Computable }
; CHECK-NEXT: Determining loop execution counts for: @test2
-; CHECK-NEXT: Loop %for.body.i.i: backedge-taken count is ((-4 + (-1 * (ptrtoint i32* %begin to i64)) + (ptrtoint i32* %end to i64)) /u 4)
+; CHECK-NEXT: Loop %for.body.i.i: backedge-taken count is ((-4 + (-1 * (ptrtoint ptr %begin to i64)) + (ptrtoint ptr %end to i64)) /u 4)
; CHECK-NEXT: Loop %for.body.i.i: constant max backedge-taken count is 4611686018427387903
-; CHECK-NEXT: Loop %for.body.i.i: symbolic max backedge-taken count is ((-4 + (-1 * (ptrtoint i32* %begin to i64)) + (ptrtoint i32* %end to i64)) /u 4)
-; CHECK-NEXT: Loop %for.body.i.i: Predicated backedge-taken count is ((-4 + (-1 * (ptrtoint i32* %begin to i64)) + (ptrtoint i32* %end to i64)) /u 4)
+; CHECK-NEXT: Loop %for.body.i.i: symbolic max backedge-taken count is ((-4 + (-1 * (ptrtoint ptr %begin to i64)) + (ptrtoint ptr %end to i64)) /u 4)
+; CHECK-NEXT: Loop %for.body.i.i: Predicated backedge-taken count is ((-4 + (-1 * (ptrtoint ptr %begin to i64)) + (ptrtoint ptr %end to i64)) /u 4)
; CHECK-NEXT: Predicates:
; CHECK: Loop %for.body.i.i: Trip multiple is 1
;
entry:
- %cmp1.i.i = icmp eq i32* %begin, %end
+ %cmp1.i.i = icmp eq ptr %begin, %end
br i1 %cmp1.i.i, label %_ZSt4fillIPiiEvT_S1_RKT0_.exit, label %for.body.lr.ph.i.i
for.body.lr.ph.i.i: ; preds = %entry
br label %for.body.i.i
for.body.i.i: ; preds = %for.body.i.i, %for.body.lr.ph.i.i
- %__first.addr.02.i.i = phi i32* [ %begin, %for.body.lr.ph.i.i ], [ %ptrincdec.i.i, %for.body.i.i ]
- store i32 0, i32* %__first.addr.02.i.i, align 4
- %ptrincdec.i.i = getelementptr inbounds i32, i32* %__first.addr.02.i.i, i64 1
- %cmp.i.i = icmp eq i32* %ptrincdec.i.i, %end
+ %__first.addr.02.i.i = phi ptr [ %begin, %for.body.lr.ph.i.i ], [ %ptrincdec.i.i, %for.body.i.i ]
+ store i32 0, ptr %__first.addr.02.i.i, align 4
+ %ptrincdec.i.i = getelementptr inbounds i32, ptr %__first.addr.02.i.i, i64 1
+ %cmp.i.i = icmp eq ptr %ptrincdec.i.i, %end
br i1 %cmp.i.i, label %for.cond.for.end_crit_edge.i.i, label %for.body.i.i
for.cond.for.end_crit_edge.i.i: ; preds = %for.body.i.i
@@ -105,36 +105,36 @@ _ZSt4fillIPiiEvT_S1_RKT0_.exit: ; preds = %entry, %for.cond.fo
}
; Various checks for inbounds geps.
-define void @test3(i32* %begin, i32* %end) nounwind ssp {
+define void @test3(ptr %begin, ptr %end) nounwind ssp {
; CHECK-LABEL: 'test3'
; CHECK-NEXT: Classifying expressions for: @test3
; CHECK-NEXT: %indvar.i.i = phi i64 [ %tmp, %for.body.i.i ], [ 0, %entry ]
-; CHECK-NEXT: --> {0,+,1}<nuw><nsw><%for.body.i.i> U: [0,4611686018427387904) S: [0,4611686018427387904) Exits: ((-4 + (-1 * (ptrtoint i32* %begin to i64)) + (ptrtoint i32* %end to i64)) /u 4) LoopDispositions: { %for.body.i.i: Computable }
+; CHECK-NEXT: --> {0,+,1}<nuw><nsw><%for.body.i.i> U: [0,4611686018427387904) S: [0,4611686018427387904) Exits: ((-4 + (-1 * (ptrtoint ptr %begin to i64)) + (ptrtoint ptr %end to i64)) /u 4) LoopDispositions: { %for.body.i.i: Computable }
; CHECK-NEXT: %tmp = add nsw i64 %indvar.i.i, 1
-; CHECK-NEXT: --> {1,+,1}<nuw><nsw><%for.body.i.i> U: [1,4611686018427387905) S: [1,4611686018427387905) Exits: (1 + ((-4 + (-1 * (ptrtoint i32* %begin to i64)) + (ptrtoint i32* %end to i64)) /u 4))<nuw><nsw> LoopDispositions: { %for.body.i.i: Computable }
-; CHECK-NEXT: %ptrincdec.i.i = getelementptr inbounds i32, i32* %begin, i64 %tmp
-; CHECK-NEXT: --> {(4 + %begin),+,4}<nuw><%for.body.i.i> U: full-set S: full-set Exits: (4 + (4 * ((-4 + (-1 * (ptrtoint i32* %begin to i64)) + (ptrtoint i32* %end to i64)) /u 4))<nuw> + %begin) LoopDispositions: { %for.body.i.i: Computable }
-; CHECK-NEXT: %__first.addr.08.i.i = getelementptr inbounds i32, i32* %begin, i64 %indvar.i.i
-; CHECK-NEXT: --> {%begin,+,4}<nuw><%for.body.i.i> U: full-set S: full-set Exits: ((4 * ((-4 + (-1 * (ptrtoint i32* %begin to i64)) + (ptrtoint i32* %end to i64)) /u 4))<nuw> + %begin) LoopDispositions: { %for.body.i.i: Computable }
+; CHECK-NEXT: --> {1,+,1}<nuw><nsw><%for.body.i.i> U: [1,4611686018427387905) S: [1,4611686018427387905) Exits: (1 + ((-4 + (-1 * (ptrtoint ptr %begin to i64)) + (ptrtoint ptr %end to i64)) /u 4))<nuw><nsw> LoopDispositions: { %for.body.i.i: Computable }
+; CHECK-NEXT: %ptrincdec.i.i = getelementptr inbounds i32, ptr %begin, i64 %tmp
+; CHECK-NEXT: --> {(4 + %begin),+,4}<nuw><%for.body.i.i> U: full-set S: full-set Exits: (4 + (4 * ((-4 + (-1 * (ptrtoint ptr %begin to i64)) + (ptrtoint ptr %end to i64)) /u 4))<nuw> + %begin) LoopDispositions: { %for.body.i.i: Computable }
+; CHECK-NEXT: %__first.addr.08.i.i = getelementptr inbounds i32, ptr %begin, i64 %indvar.i.i
+; CHECK-NEXT: --> {%begin,+,4}<nuw><%for.body.i.i> U: full-set S: full-set Exits: ((4 * ((-4 + (-1 * (ptrtoint ptr %begin to i64)) + (ptrtoint ptr %end to i64)) /u 4))<nuw> + %begin) LoopDispositions: { %for.body.i.i: Computable }
; CHECK-NEXT: Determining loop execution counts for: @test3
-; CHECK-NEXT: Loop %for.body.i.i: backedge-taken count is ((-4 + (-1 * (ptrtoint i32* %begin to i64)) + (ptrtoint i32* %end to i64)) /u 4)
+; CHECK-NEXT: Loop %for.body.i.i: backedge-taken count is ((-4 + (-1 * (ptrtoint ptr %begin to i64)) + (ptrtoint ptr %end to i64)) /u 4)
; CHECK-NEXT: Loop %for.body.i.i: constant max backedge-taken count is 4611686018427387903
-; CHECK-NEXT: Loop %for.body.i.i: symbolic max backedge-taken count is ((-4 + (-1 * (ptrtoint i32* %begin to i64)) + (ptrtoint i32* %end to i64)) /u 4)
-; CHECK-NEXT: Loop %for.body.i.i: Predicated backedge-taken count is ((-4 + (-1 * (ptrtoint i32* %begin to i64)) + (ptrtoint i32* %end to i64)) /u 4)
+; CHECK-NEXT: Loop %for.body.i.i: symbolic max backedge-taken count is ((-4 + (-1 * (ptrtoint ptr %begin to i64)) + (ptrtoint ptr %end to i64)) /u 4)
+; CHECK-NEXT: Loop %for.body.i.i: Predicated backedge-taken count is ((-4 + (-1 * (ptrtoint ptr %begin to i64)) + (ptrtoint ptr %end to i64)) /u 4)
; CHECK-NEXT: Predicates:
; CHECK: Loop %for.body.i.i: Trip multiple is 1
;
entry:
- %cmp7.i.i = icmp eq i32* %begin, %end
+ %cmp7.i.i = icmp eq ptr %begin, %end
br i1 %cmp7.i.i, label %_ZSt4fillIPiiEvT_S1_RKT0_.exit, label %for.body.i.i
for.body.i.i: ; preds = %entry, %for.body.i.i
%indvar.i.i = phi i64 [ %tmp, %for.body.i.i ], [ 0, %entry ]
%tmp = add nsw i64 %indvar.i.i, 1
- %ptrincdec.i.i = getelementptr inbounds i32, i32* %begin, i64 %tmp
- %__first.addr.08.i.i = getelementptr inbounds i32, i32* %begin, i64 %indvar.i.i
- store i32 0, i32* %__first.addr.08.i.i, align 4
- %cmp.i.i = icmp eq i32* %ptrincdec.i.i, %end
+ %ptrincdec.i.i = getelementptr inbounds i32, ptr %begin, i64 %tmp
+ %__first.addr.08.i.i = getelementptr inbounds i32, ptr %begin, i64 %indvar.i.i
+ store i32 0, ptr %__first.addr.08.i.i, align 4
+ %cmp.i.i = icmp eq ptr %ptrincdec.i.i, %end
br i1 %cmp.i.i, label %_ZSt4fillIPiiEvT_S1_RKT0_.exit, label %for.body.i.i
_ZSt4fillIPiiEvT_S1_RKT0_.exit: ; preds = %for.body.i.i, %entry
ret void
@@ -166,18 +166,18 @@ exit:
ret i32 %result
}
-define i32 @PR12375(i32* readnone %arg) {
+define i32 @PR12375(ptr readnone %arg) {
; CHECK-LABEL: 'PR12375'
; CHECK-NEXT: Classifying expressions for: @PR12375
-; CHECK-NEXT: %tmp = getelementptr inbounds i32, i32* %arg, i64 2
+; CHECK-NEXT: %tmp = getelementptr inbounds i32, ptr %arg, i64 2
; CHECK-NEXT: --> (8 + %arg)<nuw> U: [8,0) S: [8,0)
-; CHECK-NEXT: %tmp2 = phi i32* [ %arg, %bb ], [ %tmp5, %bb1 ]
+; CHECK-NEXT: %tmp2 = phi ptr [ %arg, %bb ], [ %tmp5, %bb1 ]
; CHECK-NEXT: --> {%arg,+,4}<nuw><%bb1> U: full-set S: full-set Exits: (4 + %arg)<nuw> LoopDispositions: { %bb1: Computable }
; CHECK-NEXT: %tmp3 = phi i32 [ 0, %bb ], [ %tmp4, %bb1 ]
; CHECK-NEXT: --> {0,+,1}<nuw><nsw><%bb1> U: [0,-2147483648) S: [0,-2147483648) Exits: 1 LoopDispositions: { %bb1: Computable }
; CHECK-NEXT: %tmp4 = add nsw i32 %tmp3, 1
; CHECK-NEXT: --> {1,+,1}<nuw><%bb1> U: [1,0) S: [1,0) Exits: 2 LoopDispositions: { %bb1: Computable }
-; CHECK-NEXT: %tmp5 = getelementptr inbounds i32, i32* %tmp2, i64 1
+; CHECK-NEXT: %tmp5 = getelementptr inbounds i32, ptr %tmp2, i64 1
; CHECK-NEXT: --> {(4 + %arg)<nuw>,+,4}<nuw><%bb1> U: [4,0) S: [4,0) Exits: (8 + %arg)<nuw> LoopDispositions: { %bb1: Computable }
; CHECK-NEXT: Determining loop execution counts for: @PR12375
; CHECK-NEXT: Loop %bb1: backedge-taken count is 1
@@ -188,33 +188,33 @@ define i32 @PR12375(i32* readnone %arg) {
; CHECK: Loop %bb1: Trip multiple is 2
;
bb:
- %tmp = getelementptr inbounds i32, i32* %arg, i64 2
+ %tmp = getelementptr inbounds i32, ptr %arg, i64 2
br label %bb1
bb1: ; preds = %bb1, %bb
- %tmp2 = phi i32* [ %arg, %bb ], [ %tmp5, %bb1 ]
+ %tmp2 = phi ptr [ %arg, %bb ], [ %tmp5, %bb1 ]
%tmp3 = phi i32 [ 0, %bb ], [ %tmp4, %bb1 ]
%tmp4 = add nsw i32 %tmp3, 1
- %tmp5 = getelementptr inbounds i32, i32* %tmp2, i64 1
- %tmp6 = icmp ult i32* %tmp5, %tmp
+ %tmp5 = getelementptr inbounds i32, ptr %tmp2, i64 1
+ %tmp6 = icmp ult ptr %tmp5, %tmp
br i1 %tmp6, label %bb1, label %bb7
bb7: ; preds = %bb1
ret i32 %tmp4
}
-define void @PR12376(i32* nocapture %arg, i32* nocapture %arg1) {
+define void @PR12376(ptr nocapture %arg, ptr nocapture %arg1) {
; CHECK-LABEL: 'PR12376'
; CHECK-NEXT: Classifying expressions for: @PR12376
-; CHECK-NEXT: %tmp = phi i32* [ %arg, %bb ], [ %tmp4, %bb2 ]
-; CHECK-NEXT: --> {%arg,+,4}<nuw><%bb2> U: full-set S: full-set Exits: ((4 * ((-1 + (-1 * (ptrtoint i32* %arg to i64)) + ((4 + (ptrtoint i32* %arg to i64))<nuw> umax (ptrtoint i32* %arg1 to i64))) /u 4))<nuw> + %arg) LoopDispositions: { %bb2: Computable }
-; CHECK-NEXT: %tmp4 = getelementptr inbounds i32, i32* %tmp, i64 1
-; CHECK-NEXT: --> {(4 + %arg)<nuw>,+,4}<nuw><%bb2> U: [4,0) S: [4,0) Exits: (4 + (4 * ((-1 + (-1 * (ptrtoint i32* %arg to i64)) + ((4 + (ptrtoint i32* %arg to i64))<nuw> umax (ptrtoint i32* %arg1 to i64))) /u 4))<nuw> + %arg) LoopDispositions: { %bb2: Computable }
+; CHECK-NEXT: %tmp = phi ptr [ %arg, %bb ], [ %tmp4, %bb2 ]
+; CHECK-NEXT: --> {%arg,+,4}<nuw><%bb2> U: full-set S: full-set Exits: ((4 * ((-1 + (-1 * (ptrtoint ptr %arg to i64)) + ((4 + (ptrtoint ptr %arg to i64))<nuw> umax (ptrtoint ptr %arg1 to i64))) /u 4))<nuw> + %arg) LoopDispositions: { %bb2: Computable }
+; CHECK-NEXT: %tmp4 = getelementptr inbounds i32, ptr %tmp, i64 1
+; CHECK-NEXT: --> {(4 + %arg)<nuw>,+,4}<nuw><%bb2> U: [4,0) S: [4,0) Exits: (4 + (4 * ((-1 + (-1 * (ptrtoint ptr %arg to i64)) + ((4 + (ptrtoint ptr %arg to i64))<nuw> umax (ptrtoint ptr %arg1 to i64))) /u 4))<nuw> + %arg) LoopDispositions: { %bb2: Computable }
; CHECK-NEXT: Determining loop execution counts for: @PR12376
-; CHECK-NEXT: Loop %bb2: backedge-taken count is ((-1 + (-1 * (ptrtoint i32* %arg to i64)) + ((4 + (ptrtoint i32* %arg to i64))<nuw> umax (ptrtoint i32* %arg1 to i64))) /u 4)
+; CHECK-NEXT: Loop %bb2: backedge-taken count is ((-1 + (-1 * (ptrtoint ptr %arg to i64)) + ((4 + (ptrtoint ptr %arg to i64))<nuw> umax (ptrtoint ptr %arg1 to i64))) /u 4)
; CHECK-NEXT: Loop %bb2: constant max backedge-taken count is 4611686018427387902
-; CHECK-NEXT: Loop %bb2: symbolic max backedge-taken count is ((-1 + (-1 * (ptrtoint i32* %arg to i64)) + ((4 + (ptrtoint i32* %arg to i64))<nuw> umax (ptrtoint i32* %arg1 to i64))) /u 4)
-; CHECK-NEXT: Loop %bb2: Predicated backedge-taken count is ((-1 + (-1 * (ptrtoint i32* %arg to i64)) + ((4 + (ptrtoint i32* %arg to i64))<nuw> umax (ptrtoint i32* %arg1 to i64))) /u 4)
+; CHECK-NEXT: Loop %bb2: symbolic max backedge-taken count is ((-1 + (-1 * (ptrtoint ptr %arg to i64)) + ((4 + (ptrtoint ptr %arg to i64))<nuw> umax (ptrtoint ptr %arg1 to i64))) /u 4)
+; CHECK-NEXT: Loop %bb2: Predicated backedge-taken count is ((-1 + (-1 * (ptrtoint ptr %arg to i64)) + ((4 + (ptrtoint ptr %arg to i64))<nuw> umax (ptrtoint ptr %arg1 to i64))) /u 4)
; CHECK-NEXT: Predicates:
; CHECK: Loop %bb2: Trip multiple is 1
;
@@ -222,9 +222,9 @@ bb:
br label %bb2
bb2: ; preds = %bb2, %bb
- %tmp = phi i32* [ %arg, %bb ], [ %tmp4, %bb2 ]
- %tmp4 = getelementptr inbounds i32, i32* %tmp, i64 1
- %tmp3 = icmp ult i32* %tmp4, %arg1
+ %tmp = phi ptr [ %arg, %bb ], [ %tmp4, %bb2 ]
+ %tmp4 = getelementptr inbounds i32, ptr %tmp, i64 1
+ %tmp3 = icmp ult ptr %tmp4, %arg1
br i1 %tmp3, label %bb2, label %bb5
bb5: ; preds = %bb2
@@ -233,7 +233,7 @@ bb5: ; preds = %bb2
declare void @f(i32)
-define void @nswnowrap(i32 %v, i32* %buf) {
+define void @nswnowrap(i32 %v, ptr %buf) {
; CHECK-LABEL: 'nswnowrap'
; CHECK-NEXT: Classifying expressions for: @nswnowrap
; CHECK-NEXT: %add = add nsw i32 %v, 1
@@ -242,9 +242,9 @@ define void @nswnowrap(i32 %v, i32* %buf) {
; CHECK-NEXT: --> {%v,+,1}<nsw><%for.body> U: full-set S: full-set Exits: ((1 + %v) smax %v) LoopDispositions: { %for.body: Computable }
; CHECK-NEXT: %inc = add nsw i32 %i.04, 1
; CHECK-NEXT: --> {(1 + %v)<nsw>,+,1}<nsw><%for.body> U: full-set S: full-set Exits: (1 + ((1 + %v)<nsw> smax %v)) LoopDispositions: { %for.body: Computable }
-; CHECK-NEXT: %buf.gep = getelementptr inbounds i32, i32* %buf, i32 %inc
+; CHECK-NEXT: %buf.gep = getelementptr inbounds i32, ptr %buf, i32 %inc
; CHECK-NEXT: --> {(4 + (4 * (sext i32 %v to i64))<nsw> + %buf),+,4}<nw><%for.body> U: full-set S: full-set Exits: (4 + (4 * (zext i32 ((-1 * %v) + ((1 + %v)<nsw> smax %v)) to i64))<nuw><nsw> + (4 * (sext i32 %v to i64))<nsw> + %buf) LoopDispositions: { %for.body: Computable }
-; CHECK-NEXT: %buf.val = load i32, i32* %buf.gep, align 4
+; CHECK-NEXT: %buf.val = load i32, ptr %buf.gep, align 4
; CHECK-NEXT: --> %buf.val U: full-set S: full-set Exits: <<Unknown>> LoopDispositions: { %for.body: Variant }
; CHECK-NEXT: Determining loop execution counts for: @nswnowrap
; CHECK-NEXT: Loop %for.body: backedge-taken count is ((-1 * %v) + ((1 + %v)<nsw> smax %v))
@@ -261,8 +261,8 @@ entry:
for.body:
%i.04 = phi i32 [ %v, %entry ], [ %inc, %for.body ]
%inc = add nsw i32 %i.04, 1
- %buf.gep = getelementptr inbounds i32, i32* %buf, i32 %inc
- %buf.val = load i32, i32* %buf.gep
+ %buf.gep = getelementptr inbounds i32, ptr %buf, i32 %inc
+ %buf.val = load i32, ptr %buf.gep
%cmp = icmp slt i32 %i.04, %add
tail call void @f(i32 %i.04)
br i1 %cmp, label %for.body, label %for.end
@@ -283,9 +283,9 @@ define void @test4(i32 %arg) {
; CHECK-NEXT: --> {(-2 + %arg)<nsw>,+,1}<nsw><%for.body> U: full-set S: full-set Exits: (-3 + (10 smax (1 + %arg)<nsw>))<nsw> LoopDispositions: { %for.body: Computable }
; CHECK-NEXT: %idxprom = sext i32 %sub to i64
; CHECK-NEXT: --> {(-2 + (sext i32 %arg to i64))<nsw>,+,1}<nsw><%for.body> U: [-2147483650,4294967304) S: [-2147483650,4294967304) Exits: (-2 + (zext i32 (-1 + (-1 * %arg) + (10 smax (1 + %arg)<nsw>)) to i64) + (sext i32 %arg to i64)) LoopDispositions: { %for.body: Computable }
-; CHECK-NEXT: %arrayidx = getelementptr inbounds [10 x i32], [10 x i32]* %array, i64 0, i64 %idxprom
+; CHECK-NEXT: %arrayidx = getelementptr inbounds [10 x i32], ptr %array, i64 0, i64 %idxprom
; CHECK-NEXT: --> {(-8 + (4 * (sext i32 %arg to i64))<nsw> + %array),+,4}<nw><%for.body> U: [0,-3) S: [-9223372036854775808,9223372036854775805) Exits: (-8 + (4 * (zext i32 (-1 + (-1 * %arg) + (10 smax (1 + %arg)<nsw>)) to i64))<nuw><nsw> + (4 * (sext i32 %arg to i64))<nsw> + %array) LoopDispositions: { %for.body: Computable }
-; CHECK-NEXT: %data = load i32, i32* %arrayidx, align 4
+; CHECK-NEXT: %data = load i32, ptr %arrayidx, align 4
; CHECK-NEXT: --> %data U: full-set S: full-set Exits: <<Unknown>> LoopDispositions: { %for.body: Variant }
; CHECK-NEXT: %inc5 = add nsw i32 %index, 1
; CHECK-NEXT: --> {(1 + %arg)<nsw>,+,1}<nsw><%for.body> U: full-set S: full-set Exits: (10 smax (1 + %arg)<nsw>) LoopDispositions: { %for.body: Computable }
@@ -305,8 +305,8 @@ for.body:
%index = phi i32 [ %inc5, %for.body ], [ %arg, %entry ]
%sub = add nsw i32 %index, -2
%idxprom = sext i32 %sub to i64
- %arrayidx = getelementptr inbounds [10 x i32], [10 x i32]* %array, i64 0, i64 %idxprom
- %data = load i32, i32* %arrayidx, align 4
+ %arrayidx = getelementptr inbounds [10 x i32], ptr %array, i64 0, i64 %idxprom
+ %data = load i32, ptr %arrayidx, align 4
%inc5 = add nsw i32 %index, 1
%cmp2 = icmp slt i32 %inc5, 10
br i1 %cmp2, label %for.body, label %for.end
@@ -406,7 +406,7 @@ leave:
ret void
}
-define void @select_cond_poison_propagation(double* %p, i32 %x) nounwind {
+define void @select_cond_poison_propagation(ptr %p, i32 %x) nounwind {
; CHECK-LABEL: 'select_cond_poison_propagation'
; CHECK-NEXT: Classifying expressions for: @select_cond_poison_propagation
; CHECK-NEXT: %iv = phi i32 [ %iv.next, %loop ], [ 0, %entry ]
diff --git a/llvm/test/Analysis/ScalarEvolution/nw-sub-is-not-nw-add.ll b/llvm/test/Analysis/ScalarEvolution/nw-sub-is-not-nw-add.ll
index 3615a17b25c89..fbb552a203631 100644
--- a/llvm/test/Analysis/ScalarEvolution/nw-sub-is-not-nw-add.ll
+++ b/llvm/test/Analysis/ScalarEvolution/nw-sub-is-not-nw-add.ll
@@ -1,14 +1,14 @@
; RUN: opt -S -passes=indvars < %s | FileCheck %s
; Check that SCEV does not assume sub nuw X Y == add nuw X, -Y
-define void @f(i32* %loc) {
+define void @f(ptr %loc) {
; CHECK-LABEL: @f
entry:
br label %loop
loop:
%idx = phi i32 [ 6, %entry ], [ %idx.dec, %loop ]
- store i32 %idx, i32* %loc
+ store i32 %idx, ptr %loc
%idx.dec = sub nuw i32 %idx, 1
%cond = icmp uge i32 %idx.dec, 5
br i1 %cond, label %loop, label %exit
diff --git a/llvm/test/Analysis/ScalarEvolution/pointer-sign-bits.ll b/llvm/test/Analysis/ScalarEvolution/pointer-sign-bits.ll
index aaf96b8311baa..bfd43c82d3a07 100644
--- a/llvm/test/Analysis/ScalarEvolution/pointer-sign-bits.ll
+++ b/llvm/test/Analysis/ScalarEvolution/pointer-sign-bits.ll
@@ -1,9 +1,9 @@
; RUN: opt < %s -passes='print<scalar-evolution>'
target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f80:32:32"
- %JavaObject = type { [0 x i32 (...)*]*, i8* }
+ %JavaObject = type { ptr, ptr }
-define void @JnJVM_antlr_CSharpCodeGenerator_genBitSet__Lantlr_collections_impl_BitSet_2I(%JavaObject*, %JavaObject*, i32) {
+define void @JnJVM_antlr_CSharpCodeGenerator_genBitSet__Lantlr_collections_impl_BitSet_2I(ptr, ptr, i32) {
start:
br i1 undef, label %"stack overflow", label %"no stack overflow"
@@ -180,7 +180,7 @@ verifyNullExit80: ; preds = %"verifyNullCont78.GOTO or IF*4_crit_e
ret void
verifyNullCont81: ; preds = %"verifyNullCont78.GOTO or IF*4_crit_edge"
- %4 = ptrtoint i8* undef to i32 ; <i32> [#uses=2]
+ %4 = ptrtoint ptr undef to i32 ; <i32> [#uses=2]
%5 = icmp slt i32 0, %4 ; <i1> [#uses=1]
br i1 %5, label %verifyNullCont84, label %verifyNullCont172
diff --git a/llvm/test/Analysis/ScalarEvolution/pr18606.ll b/llvm/test/Analysis/ScalarEvolution/pr18606.ll
index 22db4099f6bd6..65fe5d5f2353a 100644
--- a/llvm/test/Analysis/ScalarEvolution/pr18606.ll
+++ b/llvm/test/Analysis/ScalarEvolution/pr18606.ll
@@ -14,7 +14,7 @@ target triple = "x86_64-unknown-linux-gnu"
; Function Attrs: norecurse nounwind uwtable
define i32 @main() local_unnamed_addr {
entry:
- %a.promoted4 = load i32, i32* @a, align 4
+ %a.promoted4 = load i32, ptr @a, align 4
br label %for.cond1.preheader
for.cond1.preheader: ; preds = %entry, %for.body3
@@ -61,7 +61,7 @@ for.body3: ; preds = %for.cond1.preheader
for.end6: ; preds = %for.body3
%mul.lcssa.lcssa = phi i32 [ %mul.30, %for.body3 ]
%inc.lcssa.lcssa = phi i32 [ 31, %for.body3 ]
- store i32 %mul.lcssa.lcssa, i32* @a, align 4
- store i32 %inc.lcssa.lcssa, i32* @b, align 4
+ store i32 %mul.lcssa.lcssa, ptr @a, align 4
+ store i32 %inc.lcssa.lcssa, ptr @b, align 4
ret i32 0
}
diff --git a/llvm/test/Analysis/ScalarEvolution/pr22179.ll b/llvm/test/Analysis/ScalarEvolution/pr22179.ll
index 6ac2f4f00dbb1..8a2249fb4b209 100644
--- a/llvm/test/Analysis/ScalarEvolution/pr22179.ll
+++ b/llvm/test/Analysis/ScalarEvolution/pr22179.ll
@@ -9,12 +9,12 @@
; Function Attrs: nounwind ssp uwtable
define i32 @main() {
; CHECK-LABEL: Classifying expressions for: @main
- store i8 0, i8* getelementptr inbounds (%struct.anon, %struct.anon* @a, i64 0, i32 0), align 1
+ store i8 0, ptr @a, align 1
br label %loop
loop:
%storemerge1 = phi i8 [ 0, %0 ], [ %inc, %loop ]
- %m = load volatile i32, i32* getelementptr inbounds (%struct.S, %struct.S* @b, i64 0, i32 0), align 4
+ %m = load volatile i32, ptr @b, align 4
%inc = add nuw i8 %storemerge1, 1
; CHECK: %inc = add nuw i8 %storemerge1, 1
; CHECK-NEXT: --> {1,+,1}<nuw><%loop>
@@ -23,6 +23,6 @@ loop:
br i1 %exitcond, label %exit, label %loop
exit:
- store i8 -128, i8* getelementptr inbounds (%struct.anon, %struct.anon* @a, i64 0, i32 0), align 1
+ store i8 -128, ptr @a, align 1
ret i32 0
}
diff --git a/llvm/test/Analysis/ScalarEvolution/pr22674.ll b/llvm/test/Analysis/ScalarEvolution/pr22674.ll
index 7b382d86bded3..f175b1b360230 100644
--- a/llvm/test/Analysis/ScalarEvolution/pr22674.ll
+++ b/llvm/test/Analysis/ScalarEvolution/pr22674.ll
@@ -5,10 +5,10 @@ target datalayout = "e-m:e-p:32:32-i64:64-f80:128-n8:16:32:64-S128"
target triple = "x86_64-pc-linux-gnux32"
%"class.llvm::AttributeSetNode.230.2029.3828.6141.6912.7683.8454.9482.9996.10253.18506" = type { %"class.llvm::FoldingSetImpl::Node.1.1801.3600.5913.6684.7455.8226.9254.9768.10025.18505", i32 }
-%"class.llvm::FoldingSetImpl::Node.1.1801.3600.5913.6684.7455.8226.9254.9768.10025.18505" = type { i8* }
-%"struct.std::pair.241.2040.3839.6152.6923.7694.8465.9493.10007.10264.18507" = type { i32, %"class.llvm::AttributeSetNode.230.2029.3828.6141.6912.7683.8454.9482.9996.10253.18506"* }
-%"class.llvm::Attribute.222.2021.3820.6133.6904.7675.8446.9474.9988.10245.18509" = type { %"class.llvm::AttributeImpl.2.1802.3601.5914.6685.7456.8227.9255.9769.10026.18508"* }
-%"class.llvm::AttributeImpl.2.1802.3601.5914.6685.7456.8227.9255.9769.10026.18508" = type <{ i32 (...)**, %"class.llvm::FoldingSetImpl::Node.1.1801.3600.5913.6684.7455.8226.9254.9768.10025.18505", i8, [3 x i8] }>
+%"class.llvm::FoldingSetImpl::Node.1.1801.3600.5913.6684.7455.8226.9254.9768.10025.18505" = type { ptr }
+%"struct.std::pair.241.2040.3839.6152.6923.7694.8465.9493.10007.10264.18507" = type { i32, ptr }
+%"class.llvm::Attribute.222.2021.3820.6133.6904.7675.8446.9474.9988.10245.18509" = type { ptr }
+%"class.llvm::AttributeImpl.2.1802.3601.5914.6685.7456.8227.9255.9769.10026.18508" = type <{ ptr, %"class.llvm::FoldingSetImpl::Node.1.1801.3600.5913.6684.7455.8226.9254.9768.10025.18505", i8, [3 x i8] }>
; Function Attrs: nounwind uwtable
define void @_ZNK4llvm11AttrBuilder13hasAttributesENS_12AttributeSetEy() #0 align 2 {
@@ -44,22 +44,21 @@ cond.false: ; preds = %for.end, %for.inc,
unreachable
_ZNK4llvm12AttributeSet3endEj.exit: ; preds = %for.end
- %second.i.i.i = getelementptr inbounds %"struct.std::pair.241.2040.3839.6152.6923.7694.8465.9493.10007.10264.18507", %"struct.std::pair.241.2040.3839.6152.6923.7694.8465.9493.10007.10264.18507"* undef, i32 %I.099.lcssa129, i32 1
- %0 = load %"class.llvm::AttributeSetNode.230.2029.3828.6141.6912.7683.8454.9482.9996.10253.18506"*, %"class.llvm::AttributeSetNode.230.2029.3828.6141.6912.7683.8454.9482.9996.10253.18506"** %second.i.i.i, align 4, !tbaa !2
- %NumAttrs.i.i.i = getelementptr inbounds %"class.llvm::AttributeSetNode.230.2029.3828.6141.6912.7683.8454.9482.9996.10253.18506", %"class.llvm::AttributeSetNode.230.2029.3828.6141.6912.7683.8454.9482.9996.10253.18506"* %0, i32 0, i32 1
- %1 = load i32, i32* %NumAttrs.i.i.i, align 4, !tbaa !8
- %add.ptr.i.i.i55 = getelementptr inbounds %"class.llvm::Attribute.222.2021.3820.6133.6904.7675.8446.9474.9988.10245.18509", %"class.llvm::Attribute.222.2021.3820.6133.6904.7675.8446.9474.9988.10245.18509"* undef, i32 %1
+ %second.i.i.i = getelementptr inbounds %"struct.std::pair.241.2040.3839.6152.6923.7694.8465.9493.10007.10264.18507", ptr undef, i32 %I.099.lcssa129, i32 1
+ %0 = load ptr, ptr %second.i.i.i, align 4, !tbaa !2
+ %NumAttrs.i.i.i = getelementptr inbounds %"class.llvm::AttributeSetNode.230.2029.3828.6141.6912.7683.8454.9482.9996.10253.18506", ptr %0, i32 0, i32 1
+ %1 = load i32, ptr %NumAttrs.i.i.i, align 4, !tbaa !8
+ %add.ptr.i.i.i55 = getelementptr inbounds %"class.llvm::Attribute.222.2021.3820.6133.6904.7675.8446.9474.9988.10245.18509", ptr undef, i32 %1
br i1 undef, label %return, label %for.body11
for.cond9: ; preds = %_ZNK4llvm9Attribute13getKindAsEnumEv.exit
- %cmp10 = icmp eq %"class.llvm::Attribute.222.2021.3820.6133.6904.7675.8446.9474.9988.10245.18509"* %incdec.ptr, %add.ptr.i.i.i55
+ %cmp10 = icmp eq ptr %incdec.ptr, %add.ptr.i.i.i55
br i1 %cmp10, label %return, label %for.body11
for.body11: ; preds = %for.cond9, %_ZNK4llvm12AttributeSet3endEj.exit
- %I5.096 = phi %"class.llvm::Attribute.222.2021.3820.6133.6904.7675.8446.9474.9988.10245.18509"* [ %incdec.ptr, %for.cond9 ], [ undef, %_ZNK4llvm12AttributeSet3endEj.exit ]
- %2 = bitcast %"class.llvm::Attribute.222.2021.3820.6133.6904.7675.8446.9474.9988.10245.18509"* %I5.096 to i32*
- %3 = load i32, i32* %2, align 4, !tbaa !10
- %tobool.i59 = icmp eq i32 %3, 0
+ %I5.096 = phi ptr [ %incdec.ptr, %for.cond9 ], [ undef, %_ZNK4llvm12AttributeSet3endEj.exit ]
+ %2 = load i32, ptr %I5.096, align 4, !tbaa !10
+ %tobool.i59 = icmp eq i32 %2, 0
br i1 %tobool.i59, label %cond.false21, label %_ZNK4llvm9Attribute15isEnumAttributeEv.exit
_ZNK4llvm9Attribute15isEnumAttributeEv.exit: ; preds = %for.body11
@@ -70,7 +69,7 @@ _ZNK4llvm9Attribute15isEnumAttributeEv.exit: ; preds = %for.body11
]
_ZNK4llvm9Attribute13getKindAsEnumEv.exit: ; preds = %_ZNK4llvm9Attribute15isEnumAttributeEv.exit, %_ZNK4llvm9Attribute15isEnumAttributeEv.exit
- %incdec.ptr = getelementptr inbounds %"class.llvm::Attribute.222.2021.3820.6133.6904.7675.8446.9474.9988.10245.18509", %"class.llvm::Attribute.222.2021.3820.6133.6904.7675.8446.9474.9988.10245.18509"* %I5.096, i32 1
+ %incdec.ptr = getelementptr inbounds %"class.llvm::Attribute.222.2021.3820.6133.6904.7675.8446.9474.9988.10245.18509", ptr %I5.096, i32 1
br i1 undef, label %for.cond9, label %return
cond.false21: ; preds = %_ZNK4llvm9Attribute15isEnumAttributeEv.exit, %for.body11
diff --git a/llvm/test/Analysis/ScalarEvolution/pr24757.ll b/llvm/test/Analysis/ScalarEvolution/pr24757.ll
index 117c150a27b8b..a8e6bba863488 100644
--- a/llvm/test/Analysis/ScalarEvolution/pr24757.ll
+++ b/llvm/test/Analysis/ScalarEvolution/pr24757.ll
@@ -12,7 +12,7 @@ declare void @use(i32)
define i32 @main() {
bb:
- %a.promoted = load i8, i8* @a
+ %a.promoted = load i8, ptr @a
br label %bb1
bb1: ; preds = %bb1, %bb
@@ -27,8 +27,8 @@ bb1: ; preds = %bb1, %bb
br i1 %tmp7, label %bb8, label %bb1
bb8: ; preds = %bb1
- store i8 %tmp2, i8* @a
- store i32 %tmp4, i32* @b
+ store i8 %tmp2, ptr @a
+ store i32 %tmp4, ptr @b
%tmp9 = sext i8 %tmp2 to i32
call void @use(i32 %tmp9)
ret i32 0
diff --git a/llvm/test/Analysis/ScalarEvolution/pr25369.ll b/llvm/test/Analysis/ScalarEvolution/pr25369.ll
index a944cf8042106..fce008bbfca1f 100644
--- a/llvm/test/Analysis/ScalarEvolution/pr25369.ll
+++ b/llvm/test/Analysis/ScalarEvolution/pr25369.ll
@@ -18,7 +18,7 @@ bb3: ; preds = %bb4
bb4: ; preds = %bb4, %bb2, %bb
%tmp5 = phi i64 [ %tmp11, %bb4 ], [ 1, %bb2 ], [ 1, %bb ]
%tmp6 = phi i32 [ %tmp10, %bb4 ], [ 0, %bb2 ], [ 0, %bb ]
- %tmp7 = load i32, i32* undef, align 4
+ %tmp7 = load i32, ptr undef, align 4
%tmp8 = add i32 %tmp7, %tmp6
%tmp9 = add i32 undef, %tmp8
%tmp10 = add i32 undef, %tmp9
@@ -55,7 +55,7 @@ bb3: ; preds = %bb4
bb4: ; preds = %bb4, %bb2, %bb
%tmp5 = phi i64 [ %tmp11, %bb4 ], [ 1, %bb2 ], [ 3, %bb ]
%tmp6 = phi i32 [ %tmp10, %bb4 ], [ 0, %bb2 ], [ 0, %bb ]
- %tmp7 = load i32, i32* undef, align 4
+ %tmp7 = load i32, ptr undef, align 4
%tmp8 = add i32 %tmp7, %tmp6
%tmp9 = add i32 undef, %tmp8
%tmp10 = add i32 undef, %tmp9
diff --git a/llvm/test/Analysis/ScalarEvolution/pr35890.ll b/llvm/test/Analysis/ScalarEvolution/pr35890.ll
index 591e46a5b4136..cf91da4ab0bbb 100644
--- a/llvm/test/Analysis/ScalarEvolution/pr35890.ll
+++ b/llvm/test/Analysis/ScalarEvolution/pr35890.ll
@@ -5,12 +5,12 @@ target triple = "x86_64-unknown-linux-gnu"
; Check that it does not crash because SCEVAddRec's step is not an AddRec.
-define void @pr35890(i32* %inc_ptr, i32 %a) {
+define void @pr35890(ptr %inc_ptr, i32 %a) {
; CHECK-LABEL: @pr35890(
entry:
- %inc = load i32, i32* %inc_ptr, !range !0
+ %inc = load i32, ptr %inc_ptr, !range !0
%ne.cond = icmp ne i32 %inc, 0
br i1 %ne.cond, label %loop, label %bail
diff --git a/llvm/test/Analysis/ScalarEvolution/pr3909.ll b/llvm/test/Analysis/ScalarEvolution/pr3909.ll
index 3f62d3bcfd174..af04d3b7ecc05 100644
--- a/llvm/test/Analysis/ScalarEvolution/pr3909.ll
+++ b/llvm/test/Analysis/ScalarEvolution/pr3909.ll
@@ -2,8 +2,8 @@
; PR 3909
- %0 = type { i32, %1* } ; type %0
- %1 = type { i32, i8* } ; type %1
+ %0 = type { i32, ptr } ; type %0
+ %1 = type { i32, ptr } ; type %1
define x86_stdcallcc i32 @_Dmain(%0 %unnamed) {
entry:
diff --git a/llvm/test/Analysis/ScalarEvolution/pr46786.ll b/llvm/test/Analysis/ScalarEvolution/pr46786.ll
index 71feb7fcae0b3..89cc110a45df4 100644
--- a/llvm/test/Analysis/ScalarEvolution/pr46786.ll
+++ b/llvm/test/Analysis/ScalarEvolution/pr46786.ll
@@ -5,128 +5,128 @@ source_filename = "input.cpp"
target datalayout = "e-m:e-p:32:32-i64:64-n32:64-S128"
; Function Attrs: nofree
-define i8* @FSE_decompress_usingDTable(i8* %arg, i32 %arg1, i32 %arg2, i32 %arg3) local_unnamed_addr #0 {
+define ptr @FSE_decompress_usingDTable(ptr %arg, i32 %arg1, i32 %arg2, i32 %arg3) local_unnamed_addr #0 {
; CHECK-LABEL: 'FSE_decompress_usingDTable'
; CHECK-NEXT: Classifying expressions for: @FSE_decompress_usingDTable
-; CHECK-NEXT: %i = getelementptr inbounds i8, i8* %arg, i32 %arg2
+; CHECK-NEXT: %i = getelementptr inbounds i8, ptr %arg, i32 %arg2
; CHECK-NEXT: --> (%arg2 + %arg) U: full-set S: full-set
; CHECK-NEXT: %i4 = sub nsw i32 0, %arg1
; CHECK-NEXT: --> (-1 * %arg1) U: full-set S: full-set
-; CHECK-NEXT: %i5 = getelementptr inbounds i8, i8* %i, i32 %i4
+; CHECK-NEXT: %i5 = getelementptr inbounds i8, ptr %i, i32 %i4
; CHECK-NEXT: --> ((-1 * %arg1) + %arg2 + %arg) U: full-set S: full-set
; CHECK-NEXT: %i7 = select i1 %i6, i32 %arg2, i32 %arg1
-; CHECK-NEXT: --> ((-1 * (ptrtoint i8* %arg to i32)) + (((-1 * %arg1) + (ptrtoint i8* %arg to i32) + %arg2) umin (ptrtoint i8* %arg to i32)) + %arg1) U: full-set S: full-set
+; CHECK-NEXT: --> ((-1 * (ptrtoint ptr %arg to i32)) + (((-1 * %arg1) + (ptrtoint ptr %arg to i32) + %arg2) umin (ptrtoint ptr %arg to i32)) + %arg1) U: full-set S: full-set
; CHECK-NEXT: %i8 = sub i32 %arg3, %i7
-; CHECK-NEXT: --> ((-1 * (((-1 * %arg1) + (ptrtoint i8* %arg to i32) + %arg2) umin (ptrtoint i8* %arg to i32))) + (-1 * %arg1) + (ptrtoint i8* %arg to i32) + %arg3) U: full-set S: full-set
-; CHECK-NEXT: %i9 = getelementptr inbounds i8, i8* %arg, i32 %i8
-; CHECK-NEXT: --> ((-1 * (((-1 * %arg1) + (ptrtoint i8* %arg to i32) + %arg2) umin (ptrtoint i8* %arg to i32))) + (-1 * %arg1) + (ptrtoint i8* %arg to i32) + %arg3 + %arg) U: full-set S: full-set
+; CHECK-NEXT: --> ((-1 * (((-1 * %arg1) + (ptrtoint ptr %arg to i32) + %arg2) umin (ptrtoint ptr %arg to i32))) + (-1 * %arg1) + (ptrtoint ptr %arg to i32) + %arg3) U: full-set S: full-set
+; CHECK-NEXT: %i9 = getelementptr inbounds i8, ptr %arg, i32 %i8
+; CHECK-NEXT: --> ((-1 * (((-1 * %arg1) + (ptrtoint ptr %arg to i32) + %arg2) umin (ptrtoint ptr %arg to i32))) + (-1 * %arg1) + (ptrtoint ptr %arg to i32) + %arg3 + %arg) U: full-set S: full-set
; CHECK-NEXT: Determining loop execution counts for: @FSE_decompress_usingDTable
;
bb:
- %i = getelementptr inbounds i8, i8* %arg, i32 %arg2
+ %i = getelementptr inbounds i8, ptr %arg, i32 %arg2
%i4 = sub nsw i32 0, %arg1
- %i5 = getelementptr inbounds i8, i8* %i, i32 %i4
- %i6 = icmp ult i8* %i5, %arg
+ %i5 = getelementptr inbounds i8, ptr %i, i32 %i4
+ %i6 = icmp ult ptr %i5, %arg
%i7 = select i1 %i6, i32 %arg2, i32 %arg1
%i8 = sub i32 %arg3, %i7
- %i9 = getelementptr inbounds i8, i8* %arg, i32 %i8
- ret i8* %i9
+ %i9 = getelementptr inbounds i8, ptr %arg, i32 %i8
+ ret ptr %i9
}
-define i8* @test_01(i8* %p) {
+define ptr @test_01(ptr %p) {
; CHECK-LABEL: 'test_01'
; CHECK-NEXT: Classifying expressions for: @test_01
-; CHECK-NEXT: %p1 = getelementptr i8, i8* %p, i32 2
+; CHECK-NEXT: %p1 = getelementptr i8, ptr %p, i32 2
; CHECK-NEXT: --> (2 + %p) U: full-set S: full-set
-; CHECK-NEXT: %p2 = getelementptr i8, i8* %p, i32 1
+; CHECK-NEXT: %p2 = getelementptr i8, ptr %p, i32 1
; CHECK-NEXT: --> (1 + %p) U: full-set S: full-set
; CHECK-NEXT: %index = select i1 %cmp, i32 2, i32 1
-; CHECK-NEXT: --> ((-1 * (ptrtoint i8* %p to i32)) + ((1 + (ptrtoint i8* %p to i32)) umax (2 + (ptrtoint i8* %p to i32)))) U: full-set S: full-set
+; CHECK-NEXT: --> ((-1 * (ptrtoint ptr %p to i32)) + ((1 + (ptrtoint ptr %p to i32)) umax (2 + (ptrtoint ptr %p to i32)))) U: full-set S: full-set
; CHECK-NEXT: %neg_index = sub i32 0, %index
-; CHECK-NEXT: --> ((-1 * ((1 + (ptrtoint i8* %p to i32)) umax (2 + (ptrtoint i8* %p to i32)))) + (ptrtoint i8* %p to i32)) U: full-set S: full-set
-; CHECK-NEXT: %gep = getelementptr i8, i8* %p, i32 %neg_index
-; CHECK-NEXT: --> ((-1 * ((1 + (ptrtoint i8* %p to i32)) umax (2 + (ptrtoint i8* %p to i32)))) + (ptrtoint i8* %p to i32) + %p) U: full-set S: full-set
+; CHECK-NEXT: --> ((-1 * ((1 + (ptrtoint ptr %p to i32)) umax (2 + (ptrtoint ptr %p to i32)))) + (ptrtoint ptr %p to i32)) U: full-set S: full-set
+; CHECK-NEXT: %gep = getelementptr i8, ptr %p, i32 %neg_index
+; CHECK-NEXT: --> ((-1 * ((1 + (ptrtoint ptr %p to i32)) umax (2 + (ptrtoint ptr %p to i32)))) + (ptrtoint ptr %p to i32) + %p) U: full-set S: full-set
; CHECK-NEXT: Determining loop execution counts for: @test_01
;
- %p1 = getelementptr i8, i8* %p, i32 2
- %p2 = getelementptr i8, i8* %p, i32 1
- %cmp = icmp ugt i8* %p1, %p2
+ %p1 = getelementptr i8, ptr %p, i32 2
+ %p2 = getelementptr i8, ptr %p, i32 1
+ %cmp = icmp ugt ptr %p1, %p2
%index = select i1 %cmp, i32 2, i32 1
%neg_index = sub i32 0, %index
- %gep = getelementptr i8, i8* %p, i32 %neg_index
- ret i8* %gep
+ %gep = getelementptr i8, ptr %p, i32 %neg_index
+ ret ptr %gep
}
-define i8* @test_02(i8* %p) {
+define ptr @test_02(ptr %p) {
; CHECK-LABEL: 'test_02'
; CHECK-NEXT: Classifying expressions for: @test_02
-; CHECK-NEXT: %p1 = getelementptr i8, i8* %p, i32 2
+; CHECK-NEXT: %p1 = getelementptr i8, ptr %p, i32 2
; CHECK-NEXT: --> (2 + %p) U: full-set S: full-set
-; CHECK-NEXT: %p2 = getelementptr i8, i8* %p, i32 1
+; CHECK-NEXT: %p2 = getelementptr i8, ptr %p, i32 1
; CHECK-NEXT: --> (1 + %p) U: full-set S: full-set
; CHECK-NEXT: %index = select i1 %cmp, i32 2, i32 1
-; CHECK-NEXT: --> ((-1 * (ptrtoint i8* %p to i32)) + ((1 + (ptrtoint i8* %p to i32)) smax (2 + (ptrtoint i8* %p to i32)))) U: full-set S: full-set
+; CHECK-NEXT: --> ((-1 * (ptrtoint ptr %p to i32)) + ((1 + (ptrtoint ptr %p to i32)) smax (2 + (ptrtoint ptr %p to i32)))) U: full-set S: full-set
; CHECK-NEXT: %neg_index = sub i32 0, %index
-; CHECK-NEXT: --> ((-1 * ((1 + (ptrtoint i8* %p to i32)) smax (2 + (ptrtoint i8* %p to i32)))) + (ptrtoint i8* %p to i32)) U: full-set S: full-set
-; CHECK-NEXT: %gep = getelementptr i8, i8* %p, i32 %neg_index
-; CHECK-NEXT: --> ((-1 * ((1 + (ptrtoint i8* %p to i32)) smax (2 + (ptrtoint i8* %p to i32)))) + (ptrtoint i8* %p to i32) + %p) U: full-set S: full-set
+; CHECK-NEXT: --> ((-1 * ((1 + (ptrtoint ptr %p to i32)) smax (2 + (ptrtoint ptr %p to i32)))) + (ptrtoint ptr %p to i32)) U: full-set S: full-set
+; CHECK-NEXT: %gep = getelementptr i8, ptr %p, i32 %neg_index
+; CHECK-NEXT: --> ((-1 * ((1 + (ptrtoint ptr %p to i32)) smax (2 + (ptrtoint ptr %p to i32)))) + (ptrtoint ptr %p to i32) + %p) U: full-set S: full-set
; CHECK-NEXT: Determining loop execution counts for: @test_02
;
- %p1 = getelementptr i8, i8* %p, i32 2
- %p2 = getelementptr i8, i8* %p, i32 1
- %cmp = icmp sgt i8* %p1, %p2
+ %p1 = getelementptr i8, ptr %p, i32 2
+ %p2 = getelementptr i8, ptr %p, i32 1
+ %cmp = icmp sgt ptr %p1, %p2
%index = select i1 %cmp, i32 2, i32 1
%neg_index = sub i32 0, %index
- %gep = getelementptr i8, i8* %p, i32 %neg_index
- ret i8* %gep
+ %gep = getelementptr i8, ptr %p, i32 %neg_index
+ ret ptr %gep
}
-define i8* @test_03(i8* %p) {
+define ptr @test_03(ptr %p) {
; CHECK-LABEL: 'test_03'
; CHECK-NEXT: Classifying expressions for: @test_03
-; CHECK-NEXT: %p1 = getelementptr i8, i8* %p, i32 2
+; CHECK-NEXT: %p1 = getelementptr i8, ptr %p, i32 2
; CHECK-NEXT: --> (2 + %p) U: full-set S: full-set
-; CHECK-NEXT: %p2 = getelementptr i8, i8* %p, i32 1
+; CHECK-NEXT: %p2 = getelementptr i8, ptr %p, i32 1
; CHECK-NEXT: --> (1 + %p) U: full-set S: full-set
; CHECK-NEXT: %index = select i1 %cmp, i32 2, i32 1
-; CHECK-NEXT: --> ((-1 * (ptrtoint i8* %p to i32)) + ((1 + (ptrtoint i8* %p to i32)) umin (2 + (ptrtoint i8* %p to i32)))) U: full-set S: full-set
+; CHECK-NEXT: --> ((-1 * (ptrtoint ptr %p to i32)) + ((1 + (ptrtoint ptr %p to i32)) umin (2 + (ptrtoint ptr %p to i32)))) U: full-set S: full-set
; CHECK-NEXT: %neg_index = sub i32 0, %index
-; CHECK-NEXT: --> ((-1 * ((1 + (ptrtoint i8* %p to i32)) umin (2 + (ptrtoint i8* %p to i32)))) + (ptrtoint i8* %p to i32)) U: full-set S: full-set
-; CHECK-NEXT: %gep = getelementptr i8, i8* %p, i32 %neg_index
-; CHECK-NEXT: --> ((-1 * ((1 + (ptrtoint i8* %p to i32)) umin (2 + (ptrtoint i8* %p to i32)))) + (ptrtoint i8* %p to i32) + %p) U: full-set S: full-set
+; CHECK-NEXT: --> ((-1 * ((1 + (ptrtoint ptr %p to i32)) umin (2 + (ptrtoint ptr %p to i32)))) + (ptrtoint ptr %p to i32)) U: full-set S: full-set
+; CHECK-NEXT: %gep = getelementptr i8, ptr %p, i32 %neg_index
+; CHECK-NEXT: --> ((-1 * ((1 + (ptrtoint ptr %p to i32)) umin (2 + (ptrtoint ptr %p to i32)))) + (ptrtoint ptr %p to i32) + %p) U: full-set S: full-set
; CHECK-NEXT: Determining loop execution counts for: @test_03
;
- %p1 = getelementptr i8, i8* %p, i32 2
- %p2 = getelementptr i8, i8* %p, i32 1
- %cmp = icmp ult i8* %p1, %p2
+ %p1 = getelementptr i8, ptr %p, i32 2
+ %p2 = getelementptr i8, ptr %p, i32 1
+ %cmp = icmp ult ptr %p1, %p2
%index = select i1 %cmp, i32 2, i32 1
%neg_index = sub i32 0, %index
- %gep = getelementptr i8, i8* %p, i32 %neg_index
- ret i8* %gep
+ %gep = getelementptr i8, ptr %p, i32 %neg_index
+ ret ptr %gep
}
-define i8* @test_04(i8* %p) {
+define ptr @test_04(ptr %p) {
; CHECK-LABEL: 'test_04'
; CHECK-NEXT: Classifying expressions for: @test_04
-; CHECK-NEXT: %p1 = getelementptr i8, i8* %p, i32 2
+; CHECK-NEXT: %p1 = getelementptr i8, ptr %p, i32 2
; CHECK-NEXT: --> (2 + %p) U: full-set S: full-set
-; CHECK-NEXT: %p2 = getelementptr i8, i8* %p, i32 1
+; CHECK-NEXT: %p2 = getelementptr i8, ptr %p, i32 1
; CHECK-NEXT: --> (1 + %p) U: full-set S: full-set
; CHECK-NEXT: %index = select i1 %cmp, i32 2, i32 1
-; CHECK-NEXT: --> ((-1 * (ptrtoint i8* %p to i32)) + ((1 + (ptrtoint i8* %p to i32)) smin (2 + (ptrtoint i8* %p to i32)))) U: full-set S: full-set
+; CHECK-NEXT: --> ((-1 * (ptrtoint ptr %p to i32)) + ((1 + (ptrtoint ptr %p to i32)) smin (2 + (ptrtoint ptr %p to i32)))) U: full-set S: full-set
; CHECK-NEXT: %neg_index = sub i32 0, %index
-; CHECK-NEXT: --> ((-1 * ((1 + (ptrtoint i8* %p to i32)) smin (2 + (ptrtoint i8* %p to i32)))) + (ptrtoint i8* %p to i32)) U: full-set S: full-set
-; CHECK-NEXT: %gep = getelementptr i8, i8* %p, i32 %neg_index
-; CHECK-NEXT: --> ((-1 * ((1 + (ptrtoint i8* %p to i32)) smin (2 + (ptrtoint i8* %p to i32)))) + (ptrtoint i8* %p to i32) + %p) U: full-set S: full-set
+; CHECK-NEXT: --> ((-1 * ((1 + (ptrtoint ptr %p to i32)) smin (2 + (ptrtoint ptr %p to i32)))) + (ptrtoint ptr %p to i32)) U: full-set S: full-set
+; CHECK-NEXT: %gep = getelementptr i8, ptr %p, i32 %neg_index
+; CHECK-NEXT: --> ((-1 * ((1 + (ptrtoint ptr %p to i32)) smin (2 + (ptrtoint ptr %p to i32)))) + (ptrtoint ptr %p to i32) + %p) U: full-set S: full-set
; CHECK-NEXT: Determining loop execution counts for: @test_04
;
- %p1 = getelementptr i8, i8* %p, i32 2
- %p2 = getelementptr i8, i8* %p, i32 1
- %cmp = icmp slt i8* %p1, %p2
+ %p1 = getelementptr i8, ptr %p, i32 2
+ %p2 = getelementptr i8, ptr %p, i32 1
+ %cmp = icmp slt ptr %p1, %p2
%index = select i1 %cmp, i32 2, i32 1
%neg_index = sub i32 0, %index
- %gep = getelementptr i8, i8* %p, i32 %neg_index
- ret i8* %gep
+ %gep = getelementptr i8, ptr %p, i32 %neg_index
+ ret ptr %gep
}
attributes #0 = { nofree }
diff --git a/llvm/test/Analysis/ScalarEvolution/pr51869-scalar-evolution-prove-implications-via-truncation.ll b/llvm/test/Analysis/ScalarEvolution/pr51869-scalar-evolution-prove-implications-via-truncation.ll
index ec732ec3d38d9..0b7d0bd0a39d9 100644
--- a/llvm/test/Analysis/ScalarEvolution/pr51869-scalar-evolution-prove-implications-via-truncation.ll
+++ b/llvm/test/Analysis/ScalarEvolution/pr51869-scalar-evolution-prove-implications-via-truncation.ll
@@ -14,11 +14,11 @@
@v_228 = external dso_local global i32, align 1
; Function Attrs: nounwind
-define dso_local i16 @main(i16* %0, i16* %1, i16* %2, i16* %3, i16* %4, i16* %5, i16* %6, i16* %7, i16* %8, i16* %9, i16* %10, i1 %11) #0 {
+define dso_local i16 @main(ptr %0, ptr %1, ptr %2, ptr %3, ptr %4, ptr %5, ptr %6, ptr %7, ptr %8, ptr %9, ptr %10, i1 %11) #0 {
br i1 %11, label %27, label %13
13: ; preds = %12
- %14 = load i32, i32* @v_228, align 1
+ %14 = load i32, ptr @v_228, align 1
%15 = trunc i32 %14 to i16
%16 = mul i16 %15, 2
%17 = sub i16 10, %16
@@ -36,7 +36,7 @@ define dso_local i16 @main(i16* %0, i16* %1, i16* %2, i16* %3, i16* %4, i16* %5,
br i1 %26, label %139, label %27
27: ; preds = %21, %12
- %28 = load i16, i16* %1, align 1
+ %28 = load i16, ptr %1, align 1
br label %29
29: ; preds = %29, %27
@@ -46,7 +46,7 @@ define dso_local i16 @main(i16* %0, i16* %1, i16* %2, i16* %3, i16* %4, i16* %5,
br i1 %32, label %29, label %33
33: ; preds = %29
- %34 = load i16, i16* %2, align 1
+ %34 = load i16, ptr %2, align 1
br label %35
35: ; preds = %43, %33
@@ -65,7 +65,7 @@ define dso_local i16 @main(i16* %0, i16* %1, i16* %2, i16* %3, i16* %4, i16* %5,
br i1 %45, label %35, label %46
46: ; preds = %43
- %47 = load i16, i16* %3, align 1
+ %47 = load i16, ptr %3, align 1
br label %48
48: ; preds = %55, %46
@@ -83,7 +83,7 @@ define dso_local i16 @main(i16* %0, i16* %1, i16* %2, i16* %3, i16* %4, i16* %5,
br i1 %57, label %48, label %58
58: ; preds = %55
- %59 = load i16, i16* %4, align 1
+ %59 = load i16, ptr %4, align 1
br label %60
60: ; preds = %67, %58
@@ -101,7 +101,7 @@ define dso_local i16 @main(i16* %0, i16* %1, i16* %2, i16* %3, i16* %4, i16* %5,
br i1 %69, label %60, label %70
70: ; preds = %67
- %71 = load i16, i16* %5, align 1
+ %71 = load i16, ptr %5, align 1
br label %72
72: ; preds = %79, %70
@@ -119,7 +119,7 @@ define dso_local i16 @main(i16* %0, i16* %1, i16* %2, i16* %3, i16* %4, i16* %5,
br i1 %81, label %72, label %82
82: ; preds = %79
- %83 = load i16, i16* %6, align 1
+ %83 = load i16, ptr %6, align 1
br label %84
84: ; preds = %91, %82
@@ -137,7 +137,7 @@ define dso_local i16 @main(i16* %0, i16* %1, i16* %2, i16* %3, i16* %4, i16* %5,
br i1 %93, label %84, label %94
94: ; preds = %91
- %95 = load i16, i16* %7, align 1
+ %95 = load i16, ptr %7, align 1
br label %96
96: ; preds = %103, %94
@@ -155,7 +155,7 @@ define dso_local i16 @main(i16* %0, i16* %1, i16* %2, i16* %3, i16* %4, i16* %5,
br i1 %105, label %96, label %106
106: ; preds = %103
- %107 = load i16, i16* %8, align 1
+ %107 = load i16, ptr %8, align 1
br label %108
108: ; preds = %115, %106
@@ -173,7 +173,7 @@ define dso_local i16 @main(i16* %0, i16* %1, i16* %2, i16* %3, i16* %4, i16* %5,
br i1 %117, label %108, label %118
118: ; preds = %115
- %119 = load i16, i16* %9, align 1
+ %119 = load i16, ptr %9, align 1
br label %120
120: ; preds = %128, %118
@@ -192,7 +192,7 @@ define dso_local i16 @main(i16* %0, i16* %1, i16* %2, i16* %3, i16* %4, i16* %5,
br i1 %130, label %120, label %131
131: ; preds = %128
- %132 = load i16, i16* %10, align 1
+ %132 = load i16, ptr %10, align 1
br label %133
133: ; preds = %133, %131
@@ -202,7 +202,7 @@ define dso_local i16 @main(i16* %0, i16* %1, i16* %2, i16* %3, i16* %4, i16* %5,
br i1 %136, label %133, label %137
137: ; preds = %133
- %138 = load i16, i16* %0, align 1
+ %138 = load i16, ptr %0, align 1
ret i16 %138
139: ; preds = %120, %108, %96, %84, %72, %60, %48, %35, %21, %13
diff --git a/llvm/test/Analysis/ScalarEvolution/predicated-trip-count.ll b/llvm/test/Analysis/ScalarEvolution/predicated-trip-count.ll
index 0d2a655cebac9..7764cf45df6b8 100644
--- a/llvm/test/Analysis/ScalarEvolution/predicated-trip-count.ll
+++ b/llvm/test/Analysis/ScalarEvolution/predicated-trip-count.ll
@@ -21,7 +21,7 @@ target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
define void @test1(i32 %N, i32 %M) {
; CHECK-LABEL: 'test1'
; CHECK-NEXT: Classifying expressions for: @test1
-; CHECK-NEXT: %tmp = getelementptr [1000 x i32], [1000 x i32]* @A, i32 0, i16 %i.0
+; CHECK-NEXT: %tmp = getelementptr [1000 x i32], ptr @A, i32 0, i16 %i.0
; CHECK-NEXT: --> ((4 * (sext i16 {0,+,1}<%bb3> to i64))<nsw> + @A) U: [0,-3) S: [-9223372036854775808,9223372036854775805) Exits: <<Unknown>> LoopDispositions: { %bb3: Computable }
; CHECK-NEXT: %tmp2 = add i16 %i.0, 1
; CHECK-NEXT: --> {1,+,1}<%bb3> U: full-set S: full-set Exits: <<Unknown>> LoopDispositions: { %bb3: Computable }
@@ -41,8 +41,8 @@ entry:
br label %bb3
bb: ; preds = %bb3
- %tmp = getelementptr [1000 x i32], [1000 x i32]* @A, i32 0, i16 %i.0 ; <i32*> [#uses=1]
- store i32 123, i32* %tmp
+ %tmp = getelementptr [1000 x i32], ptr @A, i32 0, i16 %i.0 ; <ptr> [#uses=1]
+ store i32 123, ptr %tmp
%tmp2 = add i16 %i.0, 1 ; <i32> [#uses=1]
br label %bb3
@@ -88,7 +88,7 @@ return: ; preds = %bb5
define void @test2(i32 %N, i32 %M, i16 %Start) {
; CHECK-LABEL: 'test2'
; CHECK-NEXT: Classifying expressions for: @test2
-; CHECK-NEXT: %tmp = getelementptr [1000 x i32], [1000 x i32]* @A, i32 0, i16 %i.0
+; CHECK-NEXT: %tmp = getelementptr [1000 x i32], ptr @A, i32 0, i16 %i.0
; CHECK-NEXT: --> ((4 * (sext i16 {%Start,+,-1}<%bb3> to i64))<nsw> + @A) U: [0,-3) S: [-9223372036854775808,9223372036854775805) Exits: <<Unknown>> LoopDispositions: { %bb3: Computable }
; CHECK-NEXT: %tmp2 = sub i16 %i.0, 1
; CHECK-NEXT: --> {(-1 + %Start),+,-1}<%bb3> U: full-set S: full-set Exits: <<Unknown>> LoopDispositions: { %bb3: Computable }
@@ -108,8 +108,8 @@ entry:
br label %bb3
bb: ; preds = %bb3
- %tmp = getelementptr [1000 x i32], [1000 x i32]* @A, i32 0, i16 %i.0 ; <i32*> [#uses=1]
- store i32 123, i32* %tmp
+ %tmp = getelementptr [1000 x i32], ptr @A, i32 0, i16 %i.0 ; <ptr> [#uses=1]
+ store i32 123, ptr %tmp
%tmp2 = sub i16 %i.0, 1 ; <i32> [#uses=1]
br label %bb3
diff --git a/llvm/test/Analysis/ScalarEvolution/range-signedness.ll b/llvm/test/Analysis/ScalarEvolution/range-signedness.ll
index b5fc033096df9..452e880e2ae85 100644
--- a/llvm/test/Analysis/ScalarEvolution/range-signedness.ll
+++ b/llvm/test/Analysis/ScalarEvolution/range-signedness.ll
@@ -1,6 +1,6 @@
; RUN: opt -disable-output "-passes=print<scalar-evolution>" < %s 2>&1 | FileCheck %s
-define void @x(i1* %cond) {
+define void @x(ptr %cond) {
; CHECK-LABEL: Classifying expressions for: @x
entry:
br label %loop
@@ -12,14 +12,14 @@ define void @x(i1* %cond) {
%idx.inc = add nsw i8 %idx, 1
- %c = load volatile i1, i1* %cond
+ %c = load volatile i1, ptr %cond
br i1 %c, label %loop, label %exit
exit:
ret void
}
-define void @y(i8* %addr) {
+define void @y(ptr %addr) {
; CHECK-LABEL: Classifying expressions for: @y
entry:
br label %loop
diff --git a/llvm/test/Analysis/ScalarEvolution/range_nw_flag.ll b/llvm/test/Analysis/ScalarEvolution/range_nw_flag.ll
index 4d32876de089e..12887117598b7 100644
--- a/llvm/test/Analysis/ScalarEvolution/range_nw_flag.ll
+++ b/llvm/test/Analysis/ScalarEvolution/range_nw_flag.ll
@@ -2,7 +2,7 @@
; RUN: opt < %s -S -disable-output "-passes=print<scalar-evolution>" 2>&1 | FileCheck %s
; copied from flags-from-poison.ll
-define void @test-add-nuw(float* %input, i32 %offset, i32 %numIterations) {
+define void @test-add-nuw(ptr %input, i32 %offset, i32 %numIterations) {
; CHECK-LABEL: 'test-add-nuw'
; CHECK-NEXT: Classifying expressions for: @test-add-nuw
; CHECK-NEXT: %i = phi i32 [ %nexti, %loop ], [ 0, %entry ]
@@ -11,7 +11,7 @@ define void @test-add-nuw(float* %input, i32 %offset, i32 %numIterations) {
; CHECK-NEXT: --> {1,+,1}<nuw><%loop> U: [1,0) S: [1,0) Exits: %numIterations LoopDispositions: { %loop: Computable }
; CHECK-NEXT: %index32 = add nuw i32 %nexti, %offset
; CHECK-NEXT: --> {(1 + %offset)<nuw>,+,1}<nuw><%loop> U: [1,0) S: [1,0) Exits: (%offset + %numIterations) LoopDispositions: { %loop: Computable }
-; CHECK-NEXT: %ptr = getelementptr inbounds float, float* %input, i32 %index32
+; CHECK-NEXT: %ptr = getelementptr inbounds float, ptr %input, i32 %index32
; CHECK-NEXT: --> ((4 * (sext i32 {(1 + %offset)<nuw>,+,1}<nuw><%loop> to i64))<nsw> + %input) U: full-set S: full-set Exits: ((4 * (sext i32 (%offset + %numIterations) to i64))<nsw> + %input) LoopDispositions: { %loop: Computable }
; CHECK-NEXT: Determining loop execution counts for: @test-add-nuw
; CHECK-NEXT: Loop %loop: backedge-taken count is (-1 + %numIterations)
@@ -27,8 +27,8 @@ loop:
%i = phi i32 [ %nexti, %loop ], [ 0, %entry ]
%nexti = add nuw i32 %i, 1
%index32 = add nuw i32 %nexti, %offset
- %ptr = getelementptr inbounds float, float* %input, i32 %index32
- %f = load float, float* %ptr, align 4
+ %ptr = getelementptr inbounds float, ptr %input, i32 %index32
+ %f = load float, ptr %ptr, align 4
%exitcond = icmp eq i32 %nexti, %numIterations
br i1 %exitcond, label %exit, label %loop
@@ -36,7 +36,7 @@ exit:
ret void
}
-define void @test-addrec-nuw(float* %input, i32 %offset, i32 %numIterations) {
+define void @test-addrec-nuw(ptr %input, i32 %offset, i32 %numIterations) {
; CHECK-LABEL: 'test-addrec-nuw'
; CHECK-NEXT: Classifying expressions for: @test-addrec-nuw
; CHECK-NEXT: %min.10 = select i1 %cmp, i32 %offset, i32 10
@@ -47,7 +47,7 @@ define void @test-addrec-nuw(float* %input, i32 %offset, i32 %numIterations) {
; CHECK-NEXT: --> {1,+,1}<nuw><%loop> U: [1,0) S: [1,0) Exits: %numIterations LoopDispositions: { %loop: Computable }
; CHECK-NEXT: %index32 = add nuw i32 %nexti, %min.10
; CHECK-NEXT: --> {(1 + (10 smax %offset))<nuw>,+,1}<nuw><%loop> U: [11,0) S: [11,0) Exits: ((10 smax %offset) + %numIterations) LoopDispositions: { %loop: Computable }
-; CHECK-NEXT: %ptr = getelementptr inbounds float, float* %input, i32 %index32
+; CHECK-NEXT: %ptr = getelementptr inbounds float, ptr %input, i32 %index32
; CHECK-NEXT: --> ((4 * (sext i32 {(1 + (10 smax %offset))<nuw>,+,1}<nuw><%loop> to i64))<nsw> + %input) U: full-set S: full-set Exits: ((4 * (sext i32 ((10 smax %offset) + %numIterations) to i64))<nsw> + %input) LoopDispositions: { %loop: Computable }
; CHECK-NEXT: Determining loop execution counts for: @test-addrec-nuw
; CHECK-NEXT: Loop %loop: backedge-taken count is (-1 + %numIterations)
@@ -65,8 +65,8 @@ loop:
%i = phi i32 [ %nexti, %loop ], [ 0, %entry ]
%nexti = add nuw i32 %i, 1
%index32 = add nuw i32 %nexti, %min.10
- %ptr = getelementptr inbounds float, float* %input, i32 %index32
- %f = load float, float* %ptr, align 4
+ %ptr = getelementptr inbounds float, ptr %input, i32 %index32
+ %f = load float, ptr %ptr, align 4
%exitcond = icmp eq i32 %nexti, %numIterations
br i1 %exitcond, label %exit, label %loop
@@ -74,7 +74,7 @@ exit:
ret void
}
-define void @test-addrec-nsw-start-neg-strip-neg(float* %input, i32 %offset, i32 %numIterations) {
+define void @test-addrec-nsw-start-neg-strip-neg(ptr %input, i32 %offset, i32 %numIterations) {
; CHECK-LABEL: 'test-addrec-nsw-start-neg-strip-neg'
; CHECK-NEXT: Classifying expressions for: @test-addrec-nsw-start-neg-strip-neg
; CHECK-NEXT: %max = select i1 %cmp, i32 %offset, i32 -10
@@ -85,7 +85,7 @@ define void @test-addrec-nsw-start-neg-strip-neg(float* %input, i32 %offset, i32
; CHECK-NEXT: --> {-1,+,-1}<nsw><%loop> U: [-2147483648,0) S: [-2147483648,0) Exits: %numIterations LoopDispositions: { %loop: Computable }
; CHECK-NEXT: %index32 = add nsw i32 %nexti, %max
; CHECK-NEXT: --> {(-1 + (-10 smin %offset))<nsw>,+,-1}<nsw><%loop> U: [-2147483648,-10) S: [-2147483648,-10) Exits: ((-10 smin %offset) + %numIterations) LoopDispositions: { %loop: Computable }
-; CHECK-NEXT: %ptr = getelementptr inbounds float, float* %input, i32 %index32
+; CHECK-NEXT: %ptr = getelementptr inbounds float, ptr %input, i32 %index32
; CHECK-NEXT: --> {(-4 + (4 * (sext i32 (-10 smin %offset) to i64))<nsw> + %input),+,-4}<nw><%loop> U: full-set S: full-set Exits: (-4 + (4 * (sext i32 (-10 smin %offset) to i64))<nsw> + (-4 * (zext i32 (-1 + (-1 * %numIterations)) to i64))<nsw> + %input) LoopDispositions: { %loop: Computable }
; CHECK-NEXT: Determining loop execution counts for: @test-addrec-nsw-start-neg-strip-neg
; CHECK-NEXT: Loop %loop: backedge-taken count is (-1 + (-1 * %numIterations))
@@ -103,8 +103,8 @@ loop:
%i = phi i32 [ %nexti, %loop ], [ 0, %entry ]
%nexti = add nsw i32 %i, -1
%index32 = add nsw i32 %nexti, %max
- %ptr = getelementptr inbounds float, float* %input, i32 %index32
- %f = load float, float* %ptr, align 4
+ %ptr = getelementptr inbounds float, ptr %input, i32 %index32
+ %f = load float, ptr %ptr, align 4
%exitcond = icmp eq i32 %nexti, %numIterations
br i1 %exitcond, label %exit, label %loop
@@ -112,7 +112,7 @@ exit:
ret void
}
-define void @test-addrec-nsw-start-pos-strip-neg(float* %input, i32 %offset, i32 %numIterations) {
+define void @test-addrec-nsw-start-pos-strip-neg(ptr %input, i32 %offset, i32 %numIterations) {
; CHECK-LABEL: 'test-addrec-nsw-start-pos-strip-neg'
; CHECK-NEXT: Classifying expressions for: @test-addrec-nsw-start-pos-strip-neg
; CHECK-NEXT: %max = select i1 %cmp, i32 %offset, i32 10
@@ -123,7 +123,7 @@ define void @test-addrec-nsw-start-pos-strip-neg(float* %input, i32 %offset, i32
; CHECK-NEXT: --> {-1,+,-1}<nsw><%loop> U: [-2147483648,0) S: [-2147483648,0) Exits: %numIterations LoopDispositions: { %loop: Computable }
; CHECK-NEXT: %index32 = add nsw i32 %nexti, %max
; CHECK-NEXT: --> {(-1 + (10 smin %offset))<nsw>,+,-1}<nsw><%loop> U: [-2147483648,10) S: [-2147483648,10) Exits: ((10 smin %offset) + %numIterations) LoopDispositions: { %loop: Computable }
-; CHECK-NEXT: %ptr = getelementptr inbounds float, float* %input, i32 %index32
+; CHECK-NEXT: %ptr = getelementptr inbounds float, ptr %input, i32 %index32
; CHECK-NEXT: --> {(-4 + (4 * (sext i32 (10 smin %offset) to i64))<nsw> + %input),+,-4}<nw><%loop> U: full-set S: full-set Exits: (-4 + (4 * (sext i32 (10 smin %offset) to i64))<nsw> + (-4 * (zext i32 (-1 + (-1 * %numIterations)) to i64))<nsw> + %input) LoopDispositions: { %loop: Computable }
; CHECK-NEXT: Determining loop execution counts for: @test-addrec-nsw-start-pos-strip-neg
; CHECK-NEXT: Loop %loop: backedge-taken count is (-1 + (-1 * %numIterations))
@@ -141,8 +141,8 @@ loop:
%i = phi i32 [ %nexti, %loop ], [ 0, %entry ]
%nexti = add nsw i32 %i, -1
%index32 = add nsw i32 %nexti, %max
- %ptr = getelementptr inbounds float, float* %input, i32 %index32
- %f = load float, float* %ptr, align 4
+ %ptr = getelementptr inbounds float, ptr %input, i32 %index32
+ %f = load float, ptr %ptr, align 4
%exitcond = icmp eq i32 %nexti, %numIterations
br i1 %exitcond, label %exit, label %loop
@@ -150,7 +150,7 @@ exit:
ret void
}
-define void @test-addrec-nsw-start-pos-strip-pos(float* %input, i32 %offset, i32 %numIterations) {
+define void @test-addrec-nsw-start-pos-strip-pos(ptr %input, i32 %offset, i32 %numIterations) {
; CHECK-LABEL: 'test-addrec-nsw-start-pos-strip-pos'
; CHECK-NEXT: Classifying expressions for: @test-addrec-nsw-start-pos-strip-pos
; CHECK-NEXT: %min = select i1 %cmp, i32 %offset, i32 10
@@ -161,7 +161,7 @@ define void @test-addrec-nsw-start-pos-strip-pos(float* %input, i32 %offset, i32
; CHECK-NEXT: --> {1,+,1}<nuw><nsw><%loop> U: [1,-2147483648) S: [1,-2147483648) Exits: %numIterations LoopDispositions: { %loop: Computable }
; CHECK-NEXT: %index32 = add nsw i32 %nexti, %min
; CHECK-NEXT: --> {(1 + (10 smax %offset))<nuw><nsw>,+,1}<nuw><nsw><%loop> U: [11,-2147483648) S: [11,-2147483648) Exits: ((10 smax %offset) + %numIterations) LoopDispositions: { %loop: Computable }
-; CHECK-NEXT: %ptr = getelementptr inbounds float, float* %input, i32 %index32
+; CHECK-NEXT: %ptr = getelementptr inbounds float, ptr %input, i32 %index32
; CHECK-NEXT: --> {(4 + (4 * (zext i32 (10 smax %offset) to i64))<nuw><nsw> + %input)<nuw>,+,4}<nuw><%loop> U: [44,0) S: [44,0) Exits: (4 + (4 * (zext i32 (-1 + %numIterations) to i64))<nuw><nsw> + (4 * (zext i32 (10 smax %offset) to i64))<nuw><nsw> + %input) LoopDispositions: { %loop: Computable }
; CHECK-NEXT: Determining loop execution counts for: @test-addrec-nsw-start-pos-strip-pos
; CHECK-NEXT: Loop %loop: backedge-taken count is (-1 + %numIterations)
@@ -179,8 +179,8 @@ loop:
%i = phi i32 [ %nexti, %loop ], [ 0, %entry ]
%nexti = add nsw i32 %i, 1
%index32 = add nsw i32 %nexti, %min
- %ptr = getelementptr inbounds float, float* %input, i32 %index32
- %f = load float, float* %ptr, align 4
+ %ptr = getelementptr inbounds float, ptr %input, i32 %index32
+ %f = load float, ptr %ptr, align 4
%exitcond = icmp eq i32 %nexti, %numIterations
br i1 %exitcond, label %exit, label %loop
@@ -188,7 +188,7 @@ exit:
ret void
}
-define void @test-addrec-nsw-start-neg-strip-pos(float* %input, i32 %offset, i32 %numIterations) {
+define void @test-addrec-nsw-start-neg-strip-pos(ptr %input, i32 %offset, i32 %numIterations) {
; CHECK-LABEL: 'test-addrec-nsw-start-neg-strip-pos'
; CHECK-NEXT: Classifying expressions for: @test-addrec-nsw-start-neg-strip-pos
; CHECK-NEXT: %min = select i1 %cmp, i32 %offset, i32 -10
@@ -199,7 +199,7 @@ define void @test-addrec-nsw-start-neg-strip-pos(float* %input, i32 %offset, i32
; CHECK-NEXT: --> {1,+,1}<nuw><nsw><%loop> U: [1,-2147483648) S: [1,-2147483648) Exits: %numIterations LoopDispositions: { %loop: Computable }
; CHECK-NEXT: %index32 = add nsw i32 %nexti, %min
; CHECK-NEXT: --> {(1 + (-10 smax %offset))<nsw>,+,1}<nsw><%loop> U: [-9,-2147483648) S: [-9,-2147483648) Exits: ((-10 smax %offset) + %numIterations) LoopDispositions: { %loop: Computable }
-; CHECK-NEXT: %ptr = getelementptr inbounds float, float* %input, i32 %index32
+; CHECK-NEXT: %ptr = getelementptr inbounds float, ptr %input, i32 %index32
; CHECK-NEXT: --> {(4 + (4 * (sext i32 (-10 smax %offset) to i64))<nsw> + %input),+,4}<nw><%loop> U: full-set S: full-set Exits: (4 + (4 * (zext i32 (-1 + %numIterations) to i64))<nuw><nsw> + (4 * (sext i32 (-10 smax %offset) to i64))<nsw> + %input) LoopDispositions: { %loop: Computable }
; CHECK-NEXT: Determining loop execution counts for: @test-addrec-nsw-start-neg-strip-pos
; CHECK-NEXT: Loop %loop: backedge-taken count is (-1 + %numIterations)
@@ -217,8 +217,8 @@ loop:
%i = phi i32 [ %nexti, %loop ], [ 0, %entry ]
%nexti = add nsw i32 %i, 1
%index32 = add nsw i32 %nexti, %min
- %ptr = getelementptr inbounds float, float* %input, i32 %index32
- %f = load float, float* %ptr, align 4
+ %ptr = getelementptr inbounds float, ptr %input, i32 %index32
+ %f = load float, ptr %ptr, align 4
%exitcond = icmp eq i32 %nexti, %numIterations
br i1 %exitcond, label %exit, label %loop
diff --git a/llvm/test/Analysis/ScalarEvolution/ranges.ll b/llvm/test/Analysis/ScalarEvolution/ranges.ll
index ab307000dd59f..868462a3f4240 100644
--- a/llvm/test/Analysis/ScalarEvolution/ranges.ll
+++ b/llvm/test/Analysis/ScalarEvolution/ranges.ll
@@ -28,12 +28,12 @@ define i32 @ashr(i32 %a) {
define i64 @ashr_global() {
; CHECK-LABEL: 'ashr_global'
; CHECK-NEXT: Classifying expressions for: @ashr_global
-; CHECK-NEXT: %ashr = ashr i64 ptrtoint (i8* @G to i64), 63
+; CHECK-NEXT: %ashr = ashr i64 ptrtoint (ptr @G to i64), 63
; CHECK-NEXT: --> %ashr U: [-1,1) S: [-1,1)
; CHECK-NEXT: Determining loop execution counts for: @ashr_global
;
- %ashr = ashr i64 ptrtoint (i8* @G to i64), 63
- %pos = icmp sge i8* @G, null
+ %ashr = ashr i64 ptrtoint (ptr @G to i64), 63
+ %pos = icmp sge ptr @G, null
call void @llvm.assume(i1 %pos)
ret i64 %ashr
}
diff --git a/llvm/test/Analysis/ScalarEvolution/returned.ll b/llvm/test/Analysis/ScalarEvolution/returned.ll
index 16d2b5ec9b9e9..008dfc4211aad 100644
--- a/llvm/test/Analysis/ScalarEvolution/returned.ll
+++ b/llvm/test/Analysis/ScalarEvolution/returned.ll
@@ -1,16 +1,16 @@
; RUN: opt < %s -S -disable-output "-passes=print<scalar-evolution>" 2>&1 | FileCheck %s
target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128"
-define i8* @foo(i32 %no, i8* nocapture %d) nounwind {
+define ptr @foo(i32 %no, ptr nocapture %d) nounwind {
entry:
- %v = call i8* @func1(i8* %d)
- %w = getelementptr i8, i8* %v, i64 5
- ret i8* %w
+ %v = call ptr @func1(ptr %d)
+ %w = getelementptr i8, ptr %v, i64 5
+ ret ptr %w
}
; CHECK-LABEL: Classifying expressions for: @foo
-; CHECK: %w = getelementptr i8, i8* %v, i64 5
+; CHECK: %w = getelementptr i8, ptr %v, i64 5
; CHECK-NEXT: (5 + %d)
-declare i8* @func1(i8* returned) nounwind argmemonly
+declare ptr @func1(ptr returned) nounwind argmemonly
diff --git a/llvm/test/Analysis/ScalarEvolution/scalable-vector.ll b/llvm/test/Analysis/ScalarEvolution/scalable-vector.ll
index 8a1ce93c32748..798a36023235d 100644
--- a/llvm/test/Analysis/ScalarEvolution/scalable-vector.ll
+++ b/llvm/test/Analysis/ScalarEvolution/scalable-vector.ll
@@ -1,16 +1,16 @@
; NOTE: Assertions have been autogenerated by utils/update_analyze_test_checks.py
; RUN: opt "-passes=print<scalar-evolution>" -disable-output < %s 2>&1 | FileCheck %s
-define void @a(<vscale x 1 x i64> *%p) {
+define void @a(ptr %p) {
; CHECK-LABEL: 'a'
; CHECK-NEXT: Classifying expressions for: @a
-; CHECK-NEXT: %1 = getelementptr <vscale x 4 x i32>, <vscale x 4 x i32>* null, i32 3
+; CHECK-NEXT: %1 = getelementptr <vscale x 4 x i32>, ptr null, i32 3
; CHECK-NEXT: --> ((3 * sizeof(<vscale x 4 x i32>)) + null) U: [0,-15) S: [-9223372036854775808,9223372036854775793)
-; CHECK-NEXT: %2 = getelementptr <vscale x 1 x i64>, <vscale x 1 x i64>* %p, i32 1
+; CHECK-NEXT: %2 = getelementptr <vscale x 1 x i64>, ptr %p, i32 1
; CHECK-NEXT: --> (sizeof(<vscale x 1 x i64>) + %p) U: full-set S: full-set
; CHECK-NEXT: Determining loop execution counts for: @a
;
- getelementptr <vscale x 4 x i32>, <vscale x 4 x i32> *null, i32 3
- getelementptr <vscale x 1 x i64>, <vscale x 1 x i64> *%p, i32 1
+ getelementptr <vscale x 4 x i32>, ptr null, i32 3
+ getelementptr <vscale x 1 x i64>, ptr %p, i32 1
ret void
}
diff --git a/llvm/test/Analysis/ScalarEvolution/scev-dispositions.ll b/llvm/test/Analysis/ScalarEvolution/scev-dispositions.ll
index 7b083d6bb2116..19bc48157d4ad 100644
--- a/llvm/test/Analysis/ScalarEvolution/scev-dispositions.ll
+++ b/llvm/test/Analysis/ScalarEvolution/scev-dispositions.ll
@@ -1,6 +1,6 @@
; RUN: opt -disable-output "-passes=print<scalar-evolution>" < %s 2>&1 | FileCheck %s
-define void @single_loop(i32* %buf, i32 %start) {
+define void @single_loop(ptr %buf, i32 %start) {
; CHECK-LABEL: Classifying expressions for: @single_loop
entry:
%val = add i32 %start, 400
@@ -18,7 +18,7 @@ define void @single_loop(i32* %buf, i32 %start) {
; CHECK-NEXT: --> {{.*}} LoopDispositions: { %loop: Invariant }
; CHECK: %idx.inc = add nsw i32 %idx, 1
; CHECK-NEXT: --> {{.*}} LoopDispositions: { %loop: Computable }
-; CHECK: %val3 = load volatile i32, i32* %buf
+; CHECK: %val3 = load volatile i32, ptr %buf
; CHECK-NEXT: --> {{.*}} LoopDispositions: { %loop: Variant }
%val2 = add i32 %start, 400
@@ -26,7 +26,7 @@ define void @single_loop(i32* %buf, i32 %start) {
%idx.inc.sext = sext i32 %idx.inc to i64
%condition = icmp eq i32 %counter, 1
%counter.inc = add i32 %counter, 1
- %val3 = load volatile i32, i32* %buf
+ %val3 = load volatile i32, ptr %buf
br i1 %condition, label %exit, label %loop
exit:
@@ -34,7 +34,7 @@ define void @single_loop(i32* %buf, i32 %start) {
}
-define void @nested_loop(double* %p, i64 %m) {
+define void @nested_loop(ptr %p, i64 %m) {
; CHECK-LABEL: Classifying expressions for: @nested_loop
; CHECK: %j = phi i64 [ 0, %entry ], [ %j.next, %outer.latch ]
diff --git a/llvm/test/Analysis/ScalarEvolution/scev-expander-existing-value-offset.ll b/llvm/test/Analysis/ScalarEvolution/scev-expander-existing-value-offset.ll
index 7af320fdc9a90..5bdfd19639210 100644
--- a/llvm/test/Analysis/ScalarEvolution/scev-expander-existing-value-offset.ll
+++ b/llvm/test/Analysis/ScalarEvolution/scev-expander-existing-value-offset.ll
@@ -4,7 +4,7 @@
; CHECK: select
; CHECK-NOT: select
- at ySrcL = common global i8* null, align 8
+ at ySrcL = common global ptr null, align 8
@smL = common global i32 0, align 4
define void @foo(i32 %rwL, i32 %kL, i32 %xfL) {
@@ -17,7 +17,7 @@ entry:
br i1 %cmp6, label %for.body.lr.ph, label %for.end
for.body.lr.ph: ; preds = %entry
- %tmp = load i8*, i8** @ySrcL, align 8
+ %tmp = load ptr, ptr @ySrcL, align 8
%tmp1 = sext i32 %kL to i64
%tmp2 = sext i32 %cond.i to i64
br label %for.body
@@ -25,8 +25,8 @@ for.body.lr.ph: ; preds = %entry
for.body: ; preds = %for.body, %for.body.lr.ph
%indvars.iv = phi i64 [ %tmp1, %for.body.lr.ph ], [ %indvars.iv.next, %for.body ]
%reduct.07 = phi i32 [ 0, %for.body.lr.ph ], [ %add, %for.body ]
- %arrayidx = getelementptr inbounds i8, i8* %tmp, i64 %indvars.iv
- %tmp3 = load i8, i8* %arrayidx, align 1
+ %arrayidx = getelementptr inbounds i8, ptr %tmp, i64 %indvars.iv
+ %tmp3 = load i8, ptr %arrayidx, align 1
%conv = zext i8 %tmp3 to i32
%add = add nsw i32 %conv, %reduct.07
%indvars.iv.next = add nsw i64 %indvars.iv, 1
@@ -39,6 +39,6 @@ for.end.loopexit: ; preds = %for.body
for.end: ; preds = %for.end.loopexit, %entry
%reduct.0.lcssa = phi i32 [ 0, %entry ], [ %add.lcssa, %for.end.loopexit ]
- store i32 %reduct.0.lcssa, i32* @smL, align 4
+ store i32 %reduct.0.lcssa, ptr @smL, align 4
ret void
}
diff --git a/llvm/test/Analysis/ScalarEvolution/scev-expander-reuse-vect.ll b/llvm/test/Analysis/ScalarEvolution/scev-expander-reuse-vect.ll
index ef8a6f49a8353..c092d8c1d93ca 100644
--- a/llvm/test/Analysis/ScalarEvolution/scev-expander-reuse-vect.ll
+++ b/llvm/test/Analysis/ScalarEvolution/scev-expander-reuse-vect.ll
@@ -22,8 +22,8 @@ for.body.lr.ph: ; preds = %entry
for.body: ; preds = %for.body, %for.body.lr.ph
%indvars.iv = phi i64 [ 0, %for.body.lr.ph ], [ %indvars.iv.next, %for.body ]
%total.011 = phi i32 [ 0, %for.body.lr.ph ], [ %add, %for.body ]
- %arrayidx = getelementptr inbounds [1000 x i16], [1000 x i16]* @a, i64 0, i64 %indvars.iv
- %tmp1 = load i16, i16* %arrayidx, align 2
+ %arrayidx = getelementptr inbounds [1000 x i16], ptr @a, i64 0, i64 %indvars.iv
+ %tmp1 = load i16, ptr %arrayidx, align 2
%conv = sext i16 %tmp1 to i32
%add = add nsw i32 %conv, %total.011
%indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
diff --git a/llvm/test/Analysis/ScalarEvolution/scev-prestart-nowrap.ll b/llvm/test/Analysis/ScalarEvolution/scev-prestart-nowrap.ll
index ead5186ec2551..4e381b0bb3d18 100644
--- a/llvm/test/Analysis/ScalarEvolution/scev-prestart-nowrap.ll
+++ b/llvm/test/Analysis/ScalarEvolution/scev-prestart-nowrap.ll
@@ -49,7 +49,7 @@ define i64 @bad.0(i32 %start, i32 %low.limit, i32 %high.limit) {
ret i64 %postinc.sext
}
-define i64 @bad.1(i32 %start, i32 %low.limit, i32 %high.limit, i1* %unknown) {
+define i64 @bad.1(i32 %start, i32 %low.limit, i32 %high.limit, ptr %unknown) {
; CHECK-LABEL: Classifying expressions for: @bad.1
entry:
%postinc.start = add i32 %start, 1
@@ -66,7 +66,7 @@ define i64 @bad.1(i32 %start, i32 %low.limit, i32 %high.limit, i1* %unknown) {
br i1 %break.early, label %continue.1, label %early.exit
continue.1:
- %cond = load volatile i1, i1* %unknown
+ %cond = load volatile i1, ptr %unknown
%idx.inc = add nsw i32 %idx, 1
br i1 %cond, label %loop, label %continue
diff --git a/llvm/test/Analysis/ScalarEvolution/sext-inreg.ll b/llvm/test/Analysis/ScalarEvolution/sext-inreg.ll
index fda68afc42b65..af5b2d1b040d9 100644
--- a/llvm/test/Analysis/ScalarEvolution/sext-inreg.ll
+++ b/llvm/test/Analysis/ScalarEvolution/sext-inreg.ll
@@ -3,7 +3,7 @@
target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f80:128:128"
target triple = "i386-apple-darwin9.6"
-define i64 @foo(i64* nocapture %x, i64 %n) nounwind {
+define i64 @foo(ptr nocapture %x, i64 %n) nounwind {
; CHECK-LABEL: Classifying expressions for: @foo
entry:
%t0 = icmp sgt i64 %n, 0
@@ -21,8 +21,8 @@ bb:
; CHECK-SAME: Exits: (sext i59 (-199 + (trunc i64 %n to i59)) to i64)
%s1 = shl i64 %i.01, 5
%s2 = ashr i64 %s1, 5
- %t3 = getelementptr i64, i64* %x, i64 %i.01
- store i64 0, i64* %t3, align 1
+ %t3 = getelementptr i64, ptr %x, i64 %i.01
+ store i64 0, ptr %t3, align 1
%indvar.next = add i64 %i.01, 199
%exitcond = icmp eq i64 %indvar.next, %n
br i1 %exitcond, label %return, label %bb
diff --git a/llvm/test/Analysis/ScalarEvolution/sext-iv-0.ll b/llvm/test/Analysis/ScalarEvolution/sext-iv-0.ll
index fae94c5d5d15b..268e9b5cfb6d9 100644
--- a/llvm/test/Analysis/ScalarEvolution/sext-iv-0.ll
+++ b/llvm/test/Analysis/ScalarEvolution/sext-iv-0.ll
@@ -6,7 +6,7 @@
target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128"
target triple = "x86_64-unknown-linux-gnu"
-define void @foo(double* nocapture %x) nounwind {
+define void @foo(ptr nocapture %x) nounwind {
bb1.thread:
br label %bb1
@@ -23,12 +23,12 @@ bb1: ; preds = %bb1, %bb1.thread
%2 = sext i9 %1 to i64 ; <i64> [#uses=1]
; CHECK: %2
; CHECK-NEXT: --> {-128,+,1}<nsw><%bb1>{{ U: [^ ]+ S: [^ ]+}}{{ *}}Exits: 127
- %3 = getelementptr double, double* %x, i64 %2 ; <double*> [#uses=1]
- %4 = load double, double* %3, align 8 ; <double> [#uses=1]
+ %3 = getelementptr double, ptr %x, i64 %2 ; <ptr> [#uses=1]
+ %4 = load double, ptr %3, align 8 ; <double> [#uses=1]
%5 = fmul double %4, 3.900000e+00 ; <double> [#uses=1]
%6 = sext i8 %0 to i64 ; <i64> [#uses=1]
- %7 = getelementptr double, double* %x, i64 %6 ; <double*> [#uses=1]
- store double %5, double* %7, align 8
+ %7 = getelementptr double, ptr %x, i64 %6 ; <ptr> [#uses=1]
+ store double %5, ptr %7, align 8
%8 = add i64 %i.0.reg2mem.0, 1 ; <i64> [#uses=2]
%9 = icmp sgt i64 %8, 127 ; <i1> [#uses=1]
br i1 %9, label %return, label %bb1
diff --git a/llvm/test/Analysis/ScalarEvolution/sext-iv-1.ll b/llvm/test/Analysis/ScalarEvolution/sext-iv-1.ll
index de9613f2a397b..a1dccb74751c8 100644
--- a/llvm/test/Analysis/ScalarEvolution/sext-iv-1.ll
+++ b/llvm/test/Analysis/ScalarEvolution/sext-iv-1.ll
@@ -13,7 +13,7 @@
target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128"
target triple = "x86_64-unknown-linux-gnu"
-define void @foo0(double* nocapture %x) nounwind {
+define void @foo0(ptr nocapture %x) nounwind {
bb1.thread:
br label %bb1
@@ -22,12 +22,12 @@ bb1: ; preds = %bb1, %bb1.thread
%0 = trunc i64 %i.0.reg2mem.0 to i7 ; <i8> [#uses=1]
%1 = trunc i64 %i.0.reg2mem.0 to i9 ; <i8> [#uses=1]
%2 = sext i9 %1 to i64 ; <i64> [#uses=1]
- %3 = getelementptr double, double* %x, i64 %2 ; <double*> [#uses=1]
- %4 = load double, double* %3, align 8 ; <double> [#uses=1]
+ %3 = getelementptr double, ptr %x, i64 %2 ; <ptr> [#uses=1]
+ %4 = load double, ptr %3, align 8 ; <double> [#uses=1]
%5 = fmul double %4, 3.900000e+00 ; <double> [#uses=1]
%6 = sext i7 %0 to i64 ; <i64> [#uses=1]
- %7 = getelementptr double, double* %x, i64 %6 ; <double*> [#uses=1]
- store double %5, double* %7, align 8
+ %7 = getelementptr double, ptr %x, i64 %6 ; <ptr> [#uses=1]
+ store double %5, ptr %7, align 8
%8 = add i64 %i.0.reg2mem.0, 1 ; <i64> [#uses=2]
%9 = icmp sgt i64 %8, 127 ; <i1> [#uses=1]
br i1 %9, label %return, label %bb1
@@ -36,7 +36,7 @@ return: ; preds = %bb1
ret void
}
-define void @foo1(double* nocapture %x) nounwind {
+define void @foo1(ptr nocapture %x) nounwind {
bb1.thread:
br label %bb1
@@ -45,12 +45,12 @@ bb1: ; preds = %bb1, %bb1.thread
%0 = trunc i64 %i.0.reg2mem.0 to i8 ; <i8> [#uses=1]
%1 = trunc i64 %i.0.reg2mem.0 to i9 ; <i8> [#uses=1]
%2 = sext i9 %1 to i64 ; <i64> [#uses=1]
- %3 = getelementptr double, double* %x, i64 %2 ; <double*> [#uses=1]
- %4 = load double, double* %3, align 8 ; <double> [#uses=1]
+ %3 = getelementptr double, ptr %x, i64 %2 ; <ptr> [#uses=1]
+ %4 = load double, ptr %3, align 8 ; <double> [#uses=1]
%5 = fmul double %4, 3.900000e+00 ; <double> [#uses=1]
%6 = sext i8 %0 to i64 ; <i64> [#uses=1]
- %7 = getelementptr double, double* %x, i64 %6 ; <double*> [#uses=1]
- store double %5, double* %7, align 8
+ %7 = getelementptr double, ptr %x, i64 %6 ; <ptr> [#uses=1]
+ store double %5, ptr %7, align 8
%8 = add i64 %i.0.reg2mem.0, 1 ; <i64> [#uses=2]
%9 = icmp sgt i64 %8, 128 ; <i1> [#uses=1]
br i1 %9, label %return, label %bb1
@@ -59,7 +59,7 @@ return: ; preds = %bb1
ret void
}
-define void @foo2(double* nocapture %x) nounwind {
+define void @foo2(ptr nocapture %x) nounwind {
bb1.thread:
br label %bb1
@@ -68,12 +68,12 @@ bb1: ; preds = %bb1, %bb1.thread
%0 = trunc i64 %i.0.reg2mem.0 to i8 ; <i8> [#uses=1]
%1 = trunc i64 %i.0.reg2mem.0 to i9 ; <i8> [#uses=1]
%2 = sext i9 %1 to i64 ; <i64> [#uses=1]
- %3 = getelementptr double, double* %x, i64 %2 ; <double*> [#uses=1]
- %4 = load double, double* %3, align 8 ; <double> [#uses=1]
+ %3 = getelementptr double, ptr %x, i64 %2 ; <ptr> [#uses=1]
+ %4 = load double, ptr %3, align 8 ; <double> [#uses=1]
%5 = fmul double %4, 3.900000e+00 ; <double> [#uses=1]
%6 = sext i8 %0 to i64 ; <i64> [#uses=1]
- %7 = getelementptr double, double* %x, i64 %6 ; <double*> [#uses=1]
- store double %5, double* %7, align 8
+ %7 = getelementptr double, ptr %x, i64 %6 ; <ptr> [#uses=1]
+ store double %5, ptr %7, align 8
%8 = add i64 %i.0.reg2mem.0, 1 ; <i64> [#uses=2]
%9 = icmp sgt i64 %8, 127 ; <i1> [#uses=1]
br i1 %9, label %return, label %bb1
@@ -82,7 +82,7 @@ return: ; preds = %bb1
ret void
}
-define void @foo3(double* nocapture %x) nounwind {
+define void @foo3(ptr nocapture %x) nounwind {
bb1.thread:
br label %bb1
@@ -91,12 +91,12 @@ bb1: ; preds = %bb1, %bb1.thread
%0 = trunc i64 %i.0.reg2mem.0 to i8 ; <i8> [#uses=1]
%1 = trunc i64 %i.0.reg2mem.0 to i9 ; <i8> [#uses=1]
%2 = sext i9 %1 to i64 ; <i64> [#uses=1]
- %3 = getelementptr double, double* %x, i64 %2 ; <double*> [#uses=1]
- %4 = load double, double* %3, align 8 ; <double> [#uses=1]
+ %3 = getelementptr double, ptr %x, i64 %2 ; <ptr> [#uses=1]
+ %4 = load double, ptr %3, align 8 ; <double> [#uses=1]
%5 = fmul double %4, 3.900000e+00 ; <double> [#uses=1]
%6 = sext i8 %0 to i64 ; <i64> [#uses=1]
- %7 = getelementptr double, double* %x, i64 %6 ; <double*> [#uses=1]
- store double %5, double* %7, align 8
+ %7 = getelementptr double, ptr %x, i64 %6 ; <ptr> [#uses=1]
+ store double %5, ptr %7, align 8
%8 = add i64 %i.0.reg2mem.0, -1 ; <i64> [#uses=2]
%9 = icmp sgt i64 %8, 127 ; <i1> [#uses=1]
br i1 %9, label %return, label %bb1
diff --git a/llvm/test/Analysis/ScalarEvolution/sext-iv-2.ll b/llvm/test/Analysis/ScalarEvolution/sext-iv-2.ll
index 730ecda49dfaa..7337f7f787a64 100644
--- a/llvm/test/Analysis/ScalarEvolution/sext-iv-2.ll
+++ b/llvm/test/Analysis/ScalarEvolution/sext-iv-2.ll
@@ -9,7 +9,7 @@
target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64"
- at table = common global [32 x [256 x i32]] zeroinitializer, align 32 ; <[32 x [256 x i32]]*> [#uses=2]
+ at table = common global [32 x [256 x i32]] zeroinitializer, align 32 ; <ptr> [#uses=2]
define i32 @main() nounwind {
entry:
@@ -32,8 +32,8 @@ bb1: ; preds = %bb2, %bb.nph
%tmp4 = mul i32 %tmp3, %i.02 ; <i32> [#uses=1]
%tmp5 = sext i32 %i.02 to i64 ; <i64> [#uses=1]
%tmp6 = sext i32 %j.01 to i64 ; <i64> [#uses=1]
- %tmp7 = getelementptr [32 x [256 x i32]], [32 x [256 x i32]]* @table, i64 0, i64 %tmp5, i64 %tmp6 ; <i32*> [#uses=1]
- store i32 %tmp4, i32* %tmp7, align 4
+ %tmp7 = getelementptr [32 x [256 x i32]], ptr @table, i64 0, i64 %tmp5, i64 %tmp6 ; <ptr> [#uses=1]
+ store i32 %tmp4, ptr %tmp7, align 4
%tmp8 = add i32 %j.01, 1 ; <i32> [#uses=2]
br label %bb2
@@ -56,7 +56,7 @@ bb4.bb5_crit_edge: ; preds = %bb4
br label %bb5
bb5: ; preds = %bb4.bb5_crit_edge, %entry
- %tmp12 = load i32, i32* getelementptr ([32 x [256 x i32]], [32 x [256 x i32]]* @table, i64 0, i64 9, i64 132), align 16 ; <i32> [#uses=1]
+ %tmp12 = load i32, ptr getelementptr ([32 x [256 x i32]], ptr @table, i64 0, i64 9, i64 132), align 16 ; <i32> [#uses=1]
%tmp13 = icmp eq i32 %tmp12, -1116 ; <i1> [#uses=1]
br i1 %tmp13, label %bb7, label %bb6
diff --git a/llvm/test/Analysis/ScalarEvolution/sext-zero.ll b/llvm/test/Analysis/ScalarEvolution/sext-zero.ll
index 434766a961866..53beadb2e8eb5 100644
--- a/llvm/test/Analysis/ScalarEvolution/sext-zero.ll
+++ b/llvm/test/Analysis/ScalarEvolution/sext-zero.ll
@@ -5,7 +5,7 @@
; CHECK-NEXT: %tmp10 = ashr exact i64 %tmp9, 0
; CHECK-NEXT: --> {{.*}} Exits: (-8589934592 + (8589934592 * (zext i32 %arg2 to i64)))
-define void @foo(i32* nocapture %arg, i32 %arg1, i32 %arg2) {
+define void @foo(ptr nocapture %arg, i32 %arg1, i32 %arg2) {
bb:
%tmp = icmp sgt i32 %arg2, 0
br i1 %tmp, label %bb3, label %bb6
@@ -24,15 +24,15 @@ bb7: ; preds = %bb7, %bb3
%tmp8 = phi i64 [ %tmp18, %bb7 ], [ 0, %bb3 ]
%tmp9 = shl i64 %tmp8, 33
%tmp10 = ashr exact i64 %tmp9, 0
- %tmp11 = getelementptr inbounds i32, i32* %arg, i64 %tmp10
- %tmp12 = load i32, i32* %tmp11, align 4
+ %tmp11 = getelementptr inbounds i32, ptr %arg, i64 %tmp10
+ %tmp12 = load i32, ptr %tmp11, align 4
%tmp13 = sub nsw i32 %tmp12, %arg1
- store i32 %tmp13, i32* %tmp11, align 4
+ store i32 %tmp13, ptr %tmp11, align 4
%tmp14 = or i64 %tmp10, 1
- %tmp15 = getelementptr inbounds i32, i32* %arg, i64 %tmp14
- %tmp16 = load i32, i32* %tmp15, align 4
+ %tmp15 = getelementptr inbounds i32, ptr %arg, i64 %tmp14
+ %tmp16 = load i32, ptr %tmp15, align 4
%tmp17 = mul nsw i32 %tmp16, %arg1
- store i32 %tmp17, i32* %tmp15, align 4
+ store i32 %tmp17, ptr %tmp15, align 4
%tmp18 = add nuw nsw i64 %tmp8, 1
%tmp19 = icmp eq i64 %tmp18, %tmp4
br i1 %tmp19, label %bb5, label %bb7
diff --git a/llvm/test/Analysis/ScalarEvolution/shift-op.ll b/llvm/test/Analysis/ScalarEvolution/shift-op.ll
index e35c2c98b7a24..c4099ac7b8dc2 100644
--- a/llvm/test/Analysis/ScalarEvolution/shift-op.ll
+++ b/llvm/test/Analysis/ScalarEvolution/shift-op.ll
@@ -51,11 +51,11 @@ define void @test2(i32 %init) {
ret void
}
-define void @test3(i32* %init.ptr) {
+define void @test3(ptr %init.ptr) {
; CHECK-LABEL: Determining loop execution counts for: @test3
; CHECK: Loop %loop: constant max backedge-taken count is 32
entry:
- %init = load i32, i32* %init.ptr, !range !0
+ %init = load i32, ptr %init.ptr, !range !0
br label %loop
loop:
@@ -68,11 +68,11 @@ define void @test3(i32* %init.ptr) {
ret void
}
-define void @test4(i32* %init.ptr) {
+define void @test4(ptr %init.ptr) {
; CHECK-LABEL: Classifying expressions for: @test4
; CHECK-LABEL: Loop %loop: constant max backedge-taken count is 32
entry:
- %init = load i32, i32* %init.ptr, !range !1
+ %init = load i32, ptr %init.ptr, !range !1
br label %loop
loop:
@@ -85,13 +85,13 @@ define void @test4(i32* %init.ptr) {
ret void
}
-define void @test5(i32* %init.ptr) {
+define void @test5(ptr %init.ptr) {
; CHECK-LABEL: Determining loop execution counts for: @test5
; CHECK: Loop %loop: Unpredictable constant max backedge-taken count.
; %iv will "stabilize" to -1, so this is an infinite loop
entry:
- %init = load i32, i32* %init.ptr, !range !1
+ %init = load i32, ptr %init.ptr, !range !1
br label %loop
loop:
diff --git a/llvm/test/Analysis/ScalarEvolution/sle.ll b/llvm/test/Analysis/ScalarEvolution/sle.ll
index a6b1aa7d2286a..e36333cf85b6f 100644
--- a/llvm/test/Analysis/ScalarEvolution/sle.ll
+++ b/llvm/test/Analysis/ScalarEvolution/sle.ll
@@ -7,17 +7,17 @@
; CHECK: Loop %for.body: backedge-taken count is %n
; CHECK: Loop %for.body: constant max backedge-taken count is 9223372036854775807
-define void @le(i64 %n, double* nocapture %p) nounwind {
+define void @le(i64 %n, ptr nocapture %p) nounwind {
entry:
%cmp6 = icmp slt i64 %n, 0 ; <i1> [#uses=1]
br i1 %cmp6, label %for.end, label %for.body
for.body: ; preds = %for.body, %entry
%i = phi i64 [ %i.next, %for.body ], [ 0, %entry ] ; <i64> [#uses=2]
- %arrayidx = getelementptr double, double* %p, i64 %i ; <double*> [#uses=2]
- %t4 = load double, double* %arrayidx ; <double> [#uses=1]
+ %arrayidx = getelementptr double, ptr %p, i64 %i ; <ptr> [#uses=2]
+ %t4 = load double, ptr %arrayidx ; <double> [#uses=1]
%mul = fmul double %t4, 2.200000e+00 ; <double> [#uses=1]
- store double %mul, double* %arrayidx
+ store double %mul, ptr %arrayidx
%i.next = add nsw i64 %i, 1 ; <i64> [#uses=2]
%cmp = icmp sgt i64 %i.next, %n ; <i1> [#uses=1]
br i1 %cmp, label %for.end, label %for.body
diff --git a/llvm/test/Analysis/ScalarEvolution/smax-br-phi-idioms.ll b/llvm/test/Analysis/ScalarEvolution/smax-br-phi-idioms.ll
index 15431e33e9b17..6c4e77cffb1c6 100644
--- a/llvm/test/Analysis/ScalarEvolution/smax-br-phi-idioms.ll
+++ b/llvm/test/Analysis/ScalarEvolution/smax-br-phi-idioms.ll
@@ -34,14 +34,14 @@ define i32 @f1(i32 %x, i32 %y) {
ret i32 %v
}
-define i32 @f2(i32 %x, i32 %y, i32* %ptr) {
+define i32 @f2(i32 %x, i32 %y, ptr %ptr) {
; CHECK-LABEL: Classifying expressions for: @f2
entry:
%c = icmp sge i32 %y, 0
br i1 %c, label %add, label %merge
add:
- %lv = load i32, i32* %ptr
+ %lv = load i32, ptr %ptr
br label %merge
merge:
@@ -103,7 +103,7 @@ define i32 @f4(i32 %x, i32 %init, i32 %lim) {
ret i32 %v
}
-define i32 @f5(i32* %val) {
+define i32 @f5(ptr %val) {
; CHECK-LABEL: Classifying expressions for: @f5
entry:
br label %for.end
@@ -112,7 +112,7 @@ for.condt:
br i1 true, label %for.cond.0, label %for.end
for.end:
- %inc = load i32, i32* %val
+ %inc = load i32, ptr %val
br i1 false, label %for.condt, label %for.cond.0
for.cond.0:
diff --git a/llvm/test/Analysis/ScalarEvolution/solve-quadratic-i1.ll b/llvm/test/Analysis/ScalarEvolution/solve-quadratic-i1.ll
index a21ed0bfdb219..a0cdb00c9b3aa 100644
--- a/llvm/test/Analysis/ScalarEvolution/solve-quadratic-i1.ll
+++ b/llvm/test/Analysis/ScalarEvolution/solve-quadratic-i1.ll
@@ -42,7 +42,7 @@ b2: ; preds = %b1
@g0 = common dso_local global i16 0, align 2
@g1 = common dso_local global i32 0, align 4
- at g2 = common dso_local global i32* null, align 8
+ at g2 = common dso_local global ptr null, align 8
define void @f1() #0 {
; CHECK-LABEL: 'f1'
@@ -76,8 +76,8 @@ define void @f1() #0 {
; CHECK: Loop %b1: Trip multiple is 3
;
b0:
- store i16 0, i16* @g0, align 2
- store i32* @g1, i32** @g2, align 8
+ store i16 0, ptr @g0, align 2
+ store ptr @g1, ptr @g2, align 8
br label %b1
b1: ; preds = %b1, %b0
@@ -93,8 +93,8 @@ b1: ; preds = %b1, %b0
b2: ; preds = %b1
%v7 = phi i32 [ %v1, %b1 ]
%v8 = phi i16 [ %v3, %b1 ]
- store i32 %v7, i32* @g1, align 4
- store i16 %v8, i16* @g0, align 2
+ store i32 %v7, ptr @g1, align 4
+ store i16 %v8, ptr @g0, align 2
br label %b3
b3: ; preds = %b3, %b2
diff --git a/llvm/test/Analysis/ScalarEvolution/solve-quadratic-overflow.ll b/llvm/test/Analysis/ScalarEvolution/solve-quadratic-overflow.ll
index 9af15ab823a0e..1537c49040d41 100644
--- a/llvm/test/Analysis/ScalarEvolution/solve-quadratic-overflow.ll
+++ b/llvm/test/Analysis/ScalarEvolution/solve-quadratic-overflow.ll
@@ -44,8 +44,8 @@ b2: ; preds = %b1
%v5 = phi i16 [ %v2, %b1 ]
%v6 = phi i16 [ %v3, %b1 ]
%v7 = sext i16 %v5 to i32
- store i32 %v7, i32* @g0, align 4
- store i16 %v6, i16* @g1, align 2
+ store i32 %v7, ptr @g0, align 4
+ store i16 %v6, ptr @g1, align 2
ret i32 0
}
diff --git a/llvm/test/Analysis/ScalarEvolution/strip-injective-zext.ll b/llvm/test/Analysis/ScalarEvolution/strip-injective-zext.ll
index 285fb610d4012..55485d4865d4e 100644
--- a/llvm/test/Analysis/ScalarEvolution/strip-injective-zext.ll
+++ b/llvm/test/Analysis/ScalarEvolution/strip-injective-zext.ll
@@ -13,7 +13,7 @@
target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64"
-define i32 @f0(i32 %a0, i32 %a1, i32* nocapture %a2) #0 {
+define i32 @f0(i32 %a0, i32 %a1, ptr nocapture %a2) #0 {
b0:
%v0 = and i32 %a1, 3
%v1 = icmp eq i32 %v0, 0
@@ -26,10 +26,9 @@ b1: ; preds = %b0
b2: ; preds = %b2, %b1
%v4 = phi i32 [ %a1, %b1 ], [ %v9, %b2 ]
- %v5 = phi i32* [ %a2, %b1 ], [ %v8, %b2 ]
- %v6 = getelementptr inbounds i32, i32* %v5, i32 0
- store i32 %v3, i32* %v6, align 4
- %v8 = getelementptr inbounds i32, i32* %v5, i32 1
+ %v5 = phi ptr [ %a2, %b1 ], [ %v8, %b2 ]
+ store i32 %v3, ptr %v5, align 4
+ %v8 = getelementptr inbounds i32, ptr %v5, i32 1
%v9 = add nsw i32 %v4, 1
%v10 = and i32 %v9, 3
%v11 = icmp eq i32 %v10, 0
diff --git a/llvm/test/Analysis/ScalarEvolution/trip-count-implied-addrec.ll b/llvm/test/Analysis/ScalarEvolution/trip-count-implied-addrec.ll
index dfbcd98ff4652..98423bf246e88 100644
--- a/llvm/test/Analysis/ScalarEvolution/trip-count-implied-addrec.ll
+++ b/llvm/test/Analysis/ScalarEvolution/trip-count-implied-addrec.ll
@@ -117,7 +117,7 @@ entry:
for.body: ; preds = %entry, %for.body
%iv = phi i8 [ %iv.next, %for.body ], [ 0, %entry ]
- store volatile i8 %iv, i8* @G
+ store volatile i8 %iv, ptr @G
%iv.next = add i8 %iv, 1
%zext = zext i8 %iv to i16
%cmp = icmp ult i16 %zext, 257
@@ -145,7 +145,7 @@ entry:
for.body: ; preds = %entry, %for.body
%iv = phi i8 [ %iv.next, %for.body ], [ 0, %entry ]
%iv.next = add i8 %iv, 1
- store i8 %iv, i8* @G
+ store i8 %iv, ptr @G
%zext = zext i8 %iv.next to i16
%cmp = icmp ult i16 %zext, %n
br i1 %cmp, label %for.body, label %for.end
@@ -170,7 +170,7 @@ entry:
for.body: ; preds = %entry, %for.body
%iv = phi i8 [ %iv.next, %for.body ], [ 0, %entry ]
%iv.next = add i8 %iv, 3
- store i8 %iv, i8* @G
+ store i8 %iv, ptr @G
%zext = zext i8 %iv.next to i16
%cmp = icmp ult i16 %zext, %n
br i1 %cmp, label %for.body, label %for.end
@@ -197,7 +197,7 @@ entry:
for.body: ; preds = %entry, %for.body
%iv = phi i8 [ %iv.next, %for.body ], [ 0, %entry ]
%iv.next = add i8 %iv, %step
- store i8 %iv, i8* @G
+ store i8 %iv, ptr @G
%zext = zext i8 %iv.next to i16
%cmp = icmp ult i16 %zext, %n
br i1 %cmp, label %for.body, label %for.end
@@ -222,7 +222,7 @@ entry:
for.body: ; preds = %entry, %for.body
%iv = phi i8 [ %iv.next, %for.body ], [ 0, %entry ]
%iv.next = add i8 %iv, %step
- store i8 %iv, i8* @G
+ store i8 %iv, ptr @G
%zext = zext i8 %iv.next to i16
%cmp = icmp ult i16 %zext, %n
br i1 %cmp, label %for.body, label %for.end
@@ -249,7 +249,7 @@ entry:
for.body: ; preds = %entry, %for.body
%iv = phi i8 [ %iv.next, %for.body ], [ 0, %entry ]
%iv.next = add i8 %iv, 2
- store i8 %iv, i8* @G
+ store i8 %iv, ptr @G
%zext = zext i8 %iv.next to i16
%cmp = icmp ult i16 %zext, %n
br i1 %cmp, label %for.body, label %for.end
@@ -276,7 +276,7 @@ entry:
for.body: ; preds = %entry, %for.body
%iv = phi i8 [ %iv.next, %for.body ], [ 0, %entry ]
%iv.next = add i8 %iv, 1
- store i8 %iv, i8* @G
+ store i8 %iv, ptr @G
%zext = zext i8 %iv.next to i16
%cmp = icmp ult i16 %zext, %n
br i1 %cmp, label %for.body, label %for.end
@@ -304,7 +304,7 @@ entry:
for.body: ; preds = %entry, %for.body
%iv = phi i8 [ %iv.next, %for.body ], [ 0, %entry ]
%iv.next = add i8 %iv, 1
- store i8 %iv, i8* @G
+ store i8 %iv, ptr @G
%zext = zext i8 %iv.next to i16
%cmp = icmp ult i16 %zext, %n
br i1 %cmp, label %for.body, label %for.end
diff --git a/llvm/test/Analysis/ScalarEvolution/trip-count-negative-stride.ll b/llvm/test/Analysis/ScalarEvolution/trip-count-negative-stride.ll
index 5a0f7f2a74b76..21f3a96914b29 100644
--- a/llvm/test/Analysis/ScalarEvolution/trip-count-negative-stride.ll
+++ b/llvm/test/Analysis/ScalarEvolution/trip-count-negative-stride.ll
@@ -253,7 +253,7 @@ for.end: ; preds = %for.body, %entry
ret void
}
-define void @ult_129_varying_rhs(i8* %n_p) {
+define void @ult_129_varying_rhs(ptr %n_p) {
; CHECK-LABEL: 'ult_129_varying_rhs'
; CHECK-NEXT: Determining loop execution counts for: @ult_129_varying_rhs
; CHECK-NEXT: Loop %for.body: Unpredictable backedge-taken count.
@@ -267,7 +267,7 @@ entry:
for.body: ; preds = %entry, %for.body
%i.05 = phi i8 [ %add, %for.body ], [ 0, %entry ]
%add = add nuw i8 %i.05, 129
- %n = load i8, i8* %n_p
+ %n = load i8, ptr %n_p
%cmp = icmp ult i8 %add, %n
br i1 %cmp, label %for.body, label %for.end
@@ -275,7 +275,7 @@ for.end: ; preds = %for.body, %entry
ret void
}
-define void @ult_symbolic_varying_rhs(i8* %n_p, i8 %step) {
+define void @ult_symbolic_varying_rhs(ptr %n_p, i8 %step) {
; CHECK-LABEL: 'ult_symbolic_varying_rhs'
; CHECK-NEXT: Determining loop execution counts for: @ult_symbolic_varying_rhs
; CHECK-NEXT: Loop %for.body: Unpredictable backedge-taken count.
@@ -291,7 +291,7 @@ entry:
for.body: ; preds = %entry, %for.body
%i.05 = phi i8 [ %add, %for.body ], [ 0, %entry ]
%add = add nuw i8 %i.05, %step
- %n = load i8, i8* %n_p
+ %n = load i8, ptr %n_p
%cmp = icmp ult i8 %add, %n
br i1 %cmp, label %for.body, label %for.end
@@ -549,7 +549,7 @@ for.end: ; preds = %for.body, %entry
ret void
}
-define void @slt_129_varying_rhs(i8* %n_p) {
+define void @slt_129_varying_rhs(ptr %n_p) {
; CHECK-LABEL: 'slt_129_varying_rhs'
; CHECK-NEXT: Determining loop execution counts for: @slt_129_varying_rhs
; CHECK-NEXT: Loop %for.body: Unpredictable backedge-taken count.
@@ -563,7 +563,7 @@ entry:
for.body: ; preds = %entry, %for.body
%i.05 = phi i8 [ %add, %for.body ], [ -128, %entry ]
%add = add nsw i8 %i.05, 129
- %n = load i8, i8* %n_p
+ %n = load i8, ptr %n_p
%cmp = icmp slt i8 %add, %n
br i1 %cmp, label %for.body, label %for.end
@@ -571,7 +571,7 @@ for.end: ; preds = %for.body, %entry
ret void
}
-define void @slt_symbolic_varying_rhs(i8* %n_p, i8 %step) {
+define void @slt_symbolic_varying_rhs(ptr %n_p, i8 %step) {
; CHECK-LABEL: 'slt_symbolic_varying_rhs'
; CHECK-NEXT: Determining loop execution counts for: @slt_symbolic_varying_rhs
; CHECK-NEXT: Loop %for.body: Unpredictable backedge-taken count.
@@ -587,7 +587,7 @@ entry:
for.body: ; preds = %entry, %for.body
%i.05 = phi i8 [ %add, %for.body ], [ -128, %entry ]
%add = add nsw i8 %i.05, %step
- %n = load i8, i8* %n_p
+ %n = load i8, ptr %n_p
%cmp = icmp slt i8 %add, %n
br i1 %cmp, label %for.body, label %for.end
diff --git a/llvm/test/Analysis/ScalarEvolution/trip-count-unknown-stride.ll b/llvm/test/Analysis/ScalarEvolution/trip-count-unknown-stride.ll
index 530e786f59595..fb559ad2f4e42 100644
--- a/llvm/test/Analysis/ScalarEvolution/trip-count-unknown-stride.ll
+++ b/llvm/test/Analysis/ScalarEvolution/trip-count-unknown-stride.ll
@@ -12,17 +12,17 @@
target datalayout = "e-m:e-p:32:32-f64:32:64-f80:32-n8:16:32-S128"
-define void @foo1(i32* nocapture %A, i32 %n, i32 %s) mustprogress {
+define void @foo1(ptr nocapture %A, i32 %n, i32 %s) mustprogress {
entry:
%cmp4 = icmp sgt i32 %n, 0
br i1 %cmp4, label %for.body, label %for.end
for.body: ; preds = %entry, %for.body
%i.05 = phi i32 [ %add, %for.body ], [ 0, %entry ]
- %arrayidx = getelementptr inbounds i32, i32* %A, i32 %i.05
- %0 = load i32, i32* %arrayidx, align 4
+ %arrayidx = getelementptr inbounds i32, ptr %A, i32 %i.05
+ %0 = load i32, ptr %arrayidx, align 4
%inc = add nsw i32 %0, 1
- store i32 %inc, i32* %arrayidx, align 4
+ store i32 %inc, ptr %arrayidx, align 4
%add = add nsw i32 %i.05, %s
%cmp = icmp slt i32 %add, %n
br i1 %cmp, label %for.body, label %for.end
@@ -40,16 +40,16 @@ for.end: ; preds = %for.body, %entry
; loops with unknown stride.
; CHECK: constant max backedge-taken count is -1
-define void @foo2(i32* nocapture %A, i32 %n, i32 %s) mustprogress {
+define void @foo2(ptr nocapture %A, i32 %n, i32 %s) mustprogress {
entry:
br label %for.body
for.body: ; preds = %entry, %for.body
%i.05 = phi i32 [ %add, %for.body ], [ 0, %entry ]
- %arrayidx = getelementptr inbounds i32, i32* %A, i32 %i.05
- %0 = load i32, i32* %arrayidx, align 4
+ %arrayidx = getelementptr inbounds i32, ptr %A, i32 %i.05
+ %0 = load i32, ptr %arrayidx, align 4
%inc = add nsw i32 %0, 1
- store i32 %inc, i32* %arrayidx, align 4
+ store i32 %inc, ptr %arrayidx, align 4
%add = add nsw i32 %i.05, %s
%cmp = icmp slt i32 %add, %n
br i1 %cmp, label %for.body, label %for.end
@@ -64,16 +64,16 @@ for.end: ; preds = %for.body, %entry
; CHECK: Loop %for.body: Unpredictable backedge-taken count.
; CHECK: Loop %for.body: Unpredictable constant max backedge-taken count.
-define void @foo3(i32* nocapture %A, i32 %n, i32 %s) {
+define void @foo3(ptr nocapture %A, i32 %n, i32 %s) {
entry:
br label %for.body
for.body: ; preds = %entry, %for.body
%i.05 = phi i32 [ %add, %for.body ], [ 0, %entry ]
- %arrayidx = getelementptr inbounds i32, i32* %A, i32 %i.05
- %0 = load i32, i32* %arrayidx, align 4
+ %arrayidx = getelementptr inbounds i32, ptr %A, i32 %i.05
+ %0 = load i32, ptr %arrayidx, align 4
%inc = add nsw i32 %0, 1
- store i32 %inc, i32* %arrayidx, align 4
+ store i32 %inc, ptr %arrayidx, align 4
%add = add nsw i32 %i.05, %s
%cmp = icmp slt i32 %add, %n
br i1 %cmp, label %for.body, label %for.end
@@ -87,16 +87,16 @@ for.end: ; preds = %for.body, %entry
; CHECK: backedge-taken count is ((((-1 * (1 umin ((-1 * %s) + (%n smax %s))))<nuw><nsw> + (-1 * %s) + (%n smax %s)) /u (1 umax %s)) + (1 umin ((-1 * %s) + (%n smax %s))))
; CHECK: constant max backedge-taken count is -1
-define void @foo4(i32* nocapture %A, i32 %n, i32 %s) {
+define void @foo4(ptr nocapture %A, i32 %n, i32 %s) {
entry:
br label %for.body
for.body: ; preds = %entry, %for.body
%i.05 = phi i32 [ %add, %for.body ], [ 0, %entry ]
- %arrayidx = getelementptr inbounds i32, i32* %A, i32 %i.05
- %0 = load i32, i32* %arrayidx, align 4
+ %arrayidx = getelementptr inbounds i32, ptr %A, i32 %i.05
+ %0 = load i32, ptr %arrayidx, align 4
%inc = add nsw i32 %0, 1
- store i32 %inc, i32* %arrayidx, align 4
+ store i32 %inc, ptr %arrayidx, align 4
%add = add nsw i32 %i.05, %s
%cmp = icmp slt i32 %add, %n
br i1 %cmp, label %for.body, label %for.end, !llvm.loop !8
@@ -113,16 +113,16 @@ for.end: ; preds = %for.body, %entry
; loops with unknown stride.
; CHECK: constant max backedge-taken count is -1
-define void @foo5(i32* nocapture %A, i32 %n, i32 %s, i32 %start) mustprogress {
+define void @foo5(ptr nocapture %A, i32 %n, i32 %s, i32 %start) mustprogress {
entry:
br label %for.body
for.body: ; preds = %entry, %for.body
%i.05 = phi i32 [ %add, %for.body ], [ %start, %entry ]
- %arrayidx = getelementptr inbounds i32, i32* %A, i32 %i.05
- %0 = load i32, i32* %arrayidx, align 4
+ %arrayidx = getelementptr inbounds i32, ptr %A, i32 %i.05
+ %0 = load i32, ptr %arrayidx, align 4
%inc = add nsw i32 %0, 1
- store i32 %inc, i32* %arrayidx, align 4
+ store i32 %inc, ptr %arrayidx, align 4
%add = add nsw i32 %i.05, %s
%cmp = icmp slt i32 %i.05, %n
br i1 %cmp, label %for.body, label %for.end
@@ -138,16 +138,16 @@ for.end: ; preds = %for.body, %entry
; CHECK: Loop %for.body: Unpredictable constant max backedge-taken count.
; CHECK: Loop %for.body: Unpredictable predicated backedge-taken count.
; Note that this function is well defined only when %n <=s 0
-define void @zero_stride(i32* nocapture %A, i32 %n) {
+define void @zero_stride(ptr nocapture %A, i32 %n) {
entry:
br label %for.body
for.body: ; preds = %entry, %for.body
%i.05 = phi i32 [ %add, %for.body ], [ 0, %entry ]
- %arrayidx = getelementptr inbounds i32, i32* %A, i32 %i.05
- %0 = load i32, i32* %arrayidx, align 4
+ %arrayidx = getelementptr inbounds i32, ptr %A, i32 %i.05
+ %0 = load i32, ptr %arrayidx, align 4
%inc = add nsw i32 %0, 1
- store i32 %inc, i32* %arrayidx, align 4
+ store i32 %inc, ptr %arrayidx, align 4
%add = add nsw i32 %i.05, 0
%cmp = icmp slt i32 %add, %n
br i1 %cmp, label %for.body, label %for.end, !llvm.loop !8
@@ -162,16 +162,16 @@ for.end: ; preds = %for.body, %entry
; CHECK: Loop %for.body: Unpredictable predicated backedge-taken count.
; Note that this function will always execute undefined behavior and thus
; any value is valid for a backedge taken count.
-define void @zero_stride_ub(i32* nocapture %A) {
+define void @zero_stride_ub(ptr nocapture %A) {
entry:
br label %for.body
for.body: ; preds = %entry, %for.body
%i.05 = phi i32 [ %add, %for.body ], [ 0, %entry ]
- %arrayidx = getelementptr inbounds i32, i32* %A, i32 %i.05
- %0 = load i32, i32* %arrayidx, align 4
+ %arrayidx = getelementptr inbounds i32, ptr %A, i32 %i.05
+ %0 = load i32, ptr %arrayidx, align 4
%inc = add nsw i32 %0, 1
- store i32 %inc, i32* %arrayidx, align 4
+ store i32 %inc, ptr %arrayidx, align 4
%add = add nsw i32 %i.05, 0
%cmp = icmp slt i32 %add, 2
br i1 %cmp, label %for.body, label %for.end, !llvm.loop !8
@@ -185,16 +185,16 @@ for.end: ; preds = %for.body, %entry
; CHECK: Loop %for.body: backedge-taken count is ((((-1 * (1 umin ((-1 * %zero) + (%n smax %zero))))<nuw><nsw> + (-1 * %zero) + (%n smax %zero)) /u (1 umax %zero)) + (1 umin ((-1 * %zero) + (%n smax %zero))))
; CHECK: Loop %for.body: constant max backedge-taken count is -1
-define void @zero_stride_symbolic(i32* nocapture %A, i32 %n, i32 %zero) {
+define void @zero_stride_symbolic(ptr nocapture %A, i32 %n, i32 %zero) {
entry:
br label %for.body
for.body: ; preds = %entry, %for.body
%i.05 = phi i32 [ %add, %for.body ], [ 0, %entry ]
- %arrayidx = getelementptr inbounds i32, i32* %A, i32 %i.05
- %0 = load i32, i32* %arrayidx, align 4
+ %arrayidx = getelementptr inbounds i32, ptr %A, i32 %i.05
+ %0 = load i32, ptr %arrayidx, align 4
%inc = add nsw i32 %0, 1
- store i32 %inc, i32* %arrayidx, align 4
+ store i32 %inc, ptr %arrayidx, align 4
%add = add nsw i32 %i.05, %zero
%cmp = icmp slt i32 %add, %n
br i1 %cmp, label %for.body, label %for.end, !llvm.loop !8
@@ -208,18 +208,18 @@ for.end: ; preds = %for.body, %entry
; CHECK: Loop %for.body: Unpredictable backedge-taken count.
; CHECK: Loop %for.body: Unpredictable constant max backedge-taken count
-define void @zero_stride_varying_rhs(i32* nocapture %A, i32* %n_p, i32 %zero) {
+define void @zero_stride_varying_rhs(ptr nocapture %A, ptr %n_p, i32 %zero) {
entry:
br label %for.body
for.body: ; preds = %entry, %for.body
%i.05 = phi i32 [ %add, %for.body ], [ 0, %entry ]
- %arrayidx = getelementptr inbounds i32, i32* %A, i32 %i.05
- %0 = load i32, i32* %arrayidx, align 4
+ %arrayidx = getelementptr inbounds i32, ptr %A, i32 %i.05
+ %0 = load i32, ptr %arrayidx, align 4
%inc = add nsw i32 %0, 1
- store i32 %inc, i32* %arrayidx, align 4
+ store i32 %inc, ptr %arrayidx, align 4
%add = add nsw i32 %i.05, %zero
- %n = load i32, i32* %n_p
+ %n = load i32, ptr %n_p
%cmp = icmp slt i32 %add, %n
br i1 %cmp, label %for.body, label %for.end, !llvm.loop !8
diff --git a/llvm/test/Analysis/ScalarEvolution/trip-count.ll b/llvm/test/Analysis/ScalarEvolution/trip-count.ll
index 5c8dfd6603c58..0e8898f55a7c2 100644
--- a/llvm/test/Analysis/ScalarEvolution/trip-count.ll
+++ b/llvm/test/Analysis/ScalarEvolution/trip-count.ll
@@ -20,8 +20,8 @@ entry:
br label %bb3
bb: ; preds = %bb3
- %tmp = getelementptr [1000 x i32], [1000 x i32]* @A, i32 0, i32 %i.0 ; <i32*> [#uses=1]
- store i32 123, i32* %tmp
+ %tmp = getelementptr [1000 x i32], ptr @A, i32 0, i32 %i.0 ; <ptr> [#uses=1]
+ store i32 123, ptr %tmp
%tmp2 = add i32 %i.0, 1 ; <i32> [#uses=1]
br label %bb3
@@ -49,8 +49,7 @@ define i32 @PR22795() {
;
entry:
%bins = alloca [16 x i64], align 16
- %0 = bitcast [16 x i64]* %bins to i8*
- call void @llvm.memset.p0i8.i64(i8* align 16 %0, i8 0, i64 128, i1 false)
+ call void @llvm.memset.p0.i64(ptr align 16 %bins, i8 0, i64 128, i1 false)
br label %preheader
preheader: ; preds = %for.inc.1, %entry
@@ -61,11 +60,11 @@ preheader: ; preds = %for.inc.1, %entry
for.body: ; preds = %preheader
%zext = zext i32 %iv to i64
- %arrayidx = getelementptr [16 x i64], [16 x i64]* %bins, i64 0, i64 %v11
- %loaded = load i64, i64* %arrayidx, align 8
+ %arrayidx = getelementptr [16 x i64], ptr %bins, i64 0, i64 %v11
+ %loaded = load i64, ptr %arrayidx, align 8
%add = add i64 %loaded, 1
%add2 = add i64 %add, %zext
- store i64 %add2, i64* %arrayidx, align 8
+ store i64 %add2, ptr %arrayidx, align 8
br label %for.inc
for.inc: ; preds = %for.body, %preheader
@@ -74,19 +73,19 @@ for.inc: ; preds = %for.body, %preheade
br i1 true, label %for.body.1, label %for.inc.1
end: ; preds = %for.inc.1
- %arrayidx8 = getelementptr [16 x i64], [16 x i64]* %bins, i64 0, i64 2
- %load = load i64, i64* %arrayidx8, align 16
+ %arrayidx8 = getelementptr [16 x i64], ptr %bins, i64 0, i64 2
+ %load = load i64, ptr %arrayidx8, align 16
%shr4 = lshr i64 %load, 32
%conv = trunc i64 %shr4 to i32
ret i32 %conv
for.body.1: ; preds = %for.inc
%zext.1 = zext i32 %next to i64
- %arrayidx.1 = getelementptr [16 x i64], [16 x i64]* %bins, i64 0, i64 %next12
- %loaded.1 = load i64, i64* %arrayidx.1, align 8
+ %arrayidx.1 = getelementptr [16 x i64], ptr %bins, i64 0, i64 %next12
+ %loaded.1 = load i64, ptr %arrayidx.1, align 8
%add.1 = add i64 %loaded.1, 1
%add2.1 = add i64 %add.1, %zext.1
- store i64 %add2.1, i64* %arrayidx.1, align 8
+ store i64 %add2.1, ptr %arrayidx.1, align 8
br label %for.inc.1
for.inc.1: ; preds = %for.body.1, %for.inc
@@ -97,7 +96,7 @@ for.inc.1: ; preds = %for.body.1, %for.in
}
; Function Attrs: nounwind
-declare void @llvm.memset.p0i8.i64(i8* nocapture, i8, i64, i1) #0
+declare void @llvm.memset.p0.i64(ptr nocapture, i8, i64, i1) #0
declare void @may_exit() nounwind
diff --git a/llvm/test/Analysis/ScalarEvolution/trip-count10.ll b/llvm/test/Analysis/ScalarEvolution/trip-count10.ll
index fc486a33702db..9131094f9cb59 100644
--- a/llvm/test/Analysis/ScalarEvolution/trip-count10.ll
+++ b/llvm/test/Analysis/ScalarEvolution/trip-count10.ll
@@ -154,7 +154,7 @@ return:
; One side of the expression test against a value that will be skipped.
; We can't assume undefined behavior just because we have an NSW flag.
;
-define void @exit_orcond_nsw(i32 *%a) nounwind {
+define void @exit_orcond_nsw(ptr %a) nounwind {
; CHECK-LABEL: 'exit_orcond_nsw'
; CHECK-NEXT: Determining loop execution counts for: @exit_orcond_nsw
; CHECK-NEXT: Loop %for.body.i: Unpredictable backedge-taken count.
@@ -175,6 +175,6 @@ for.body.i: ; preds = %for.body.i, %entry
exit: ; preds = %for.body.i
%b.01.i.lcssa = phi i32 [ %b.01.i, %for.body.i ]
- store i32 %b.01.i.lcssa, i32* %a, align 4
+ store i32 %b.01.i.lcssa, ptr %a, align 4
ret void
}
diff --git a/llvm/test/Analysis/ScalarEvolution/trip-count11.ll b/llvm/test/Analysis/ScalarEvolution/trip-count11.ll
index 94fd9152f86eb..5bba500bb2fca 100644
--- a/llvm/test/Analysis/ScalarEvolution/trip-count11.ll
+++ b/llvm/test/Analysis/ScalarEvolution/trip-count11.ll
@@ -20,8 +20,8 @@ for.cond: ; preds = %for.inc, %entry
for.inc: ; preds = %for.cond
%idxprom = sext i32 %i.0 to i64
- %arrayidx = getelementptr inbounds [8 x i32], [8 x i32]* @foo.a, i64 0, i64 %idxprom
- %0 = load i32, i32* %arrayidx, align 4
+ %arrayidx = getelementptr inbounds [8 x i32], ptr @foo.a, i64 0, i64 %idxprom
+ %0 = load i32, ptr %arrayidx, align 4
%add = add nsw i32 %sum.0, %0
%inc = add nsw i32 %i.0, 1
br label %for.cond
@@ -43,8 +43,8 @@ for.cond: ; preds = %for.inc, %entry
for.inc: ; preds = %for.cond
%idxprom = sext i32 %i.0 to i64
- %arrayidx = getelementptr inbounds [8 x i32], [8 x i32] addrspace(1)* @foo.a_as1, i64 0, i64 %idxprom
- %0 = load i32, i32 addrspace(1)* %arrayidx, align 4
+ %arrayidx = getelementptr inbounds [8 x i32], ptr addrspace(1) @foo.a_as1, i64 0, i64 %idxprom
+ %0 = load i32, ptr addrspace(1) %arrayidx, align 4
%add = add nsw i32 %sum.0, %0
%inc = add nsw i32 %i.0, 1
br label %for.cond
diff --git a/llvm/test/Analysis/ScalarEvolution/trip-count12.ll b/llvm/test/Analysis/ScalarEvolution/trip-count12.ll
index 356f9c826dee8..6fc804ac229d4 100644
--- a/llvm/test/Analysis/ScalarEvolution/trip-count12.ll
+++ b/llvm/test/Analysis/ScalarEvolution/trip-count12.ll
@@ -4,7 +4,7 @@
; CHECK: Loop %for.body: backedge-taken count is ((-2 + %len) /u 2)
; CHECK: Loop %for.body: constant max backedge-taken count is 1073741823
-define zeroext i16 @test(i16* nocapture %p, i32 %len) nounwind readonly {
+define zeroext i16 @test(ptr nocapture %p, i32 %len) nounwind readonly {
entry:
%cmp2 = icmp sgt i32 %len, 1
br i1 %cmp2, label %for.body.preheader, label %for.end
@@ -13,11 +13,11 @@ for.body.preheader: ; preds = %entry
br label %for.body
for.body: ; preds = %for.body, %for.body.preheader
- %p.addr.05 = phi i16* [ %incdec.ptr, %for.body ], [ %p, %for.body.preheader ]
+ %p.addr.05 = phi ptr [ %incdec.ptr, %for.body ], [ %p, %for.body.preheader ]
%len.addr.04 = phi i32 [ %sub, %for.body ], [ %len, %for.body.preheader ]
%res.03 = phi i32 [ %add, %for.body ], [ 0, %for.body.preheader ]
- %incdec.ptr = getelementptr inbounds i16, i16* %p.addr.05, i32 1
- %0 = load i16, i16* %p.addr.05, align 2
+ %incdec.ptr = getelementptr inbounds i16, ptr %p.addr.05, i32 1
+ %0 = load i16, ptr %p.addr.05, align 2
%conv = zext i16 %0 to i32
%add = add i32 %conv, %res.03
%sub = add nsw i32 %len.addr.04, -2
diff --git a/llvm/test/Analysis/ScalarEvolution/trip-count14.ll b/llvm/test/Analysis/ScalarEvolution/trip-count14.ll
index 37a0e7dda1cad..1e83530314668 100644
--- a/llvm/test/Analysis/ScalarEvolution/trip-count14.ll
+++ b/llvm/test/Analysis/ScalarEvolution/trip-count14.ll
@@ -1,7 +1,7 @@
; NOTE: Assertions have been autogenerated by utils/update_analyze_test_checks.py
; RUN: opt -S -disable-output "-passes=print<scalar-evolution>" -scalar-evolution-classify-expressions=0 < %s 2>&1 | FileCheck %s
-define void @s32_max1(i32 %n, i32* %p) {
+define void @s32_max1(i32 %n, ptr %p) {
; CHECK-LABEL: 's32_max1'
; CHECK-NEXT: Determining loop execution counts for: @s32_max1
; CHECK-NEXT: Loop %do.body: backedge-taken count is ((-1 * %n) + ((1 + %n) smax %n))
@@ -17,8 +17,8 @@ entry:
do.body:
%i.0 = phi i32 [ %n, %entry ], [ %inc, %do.body ]
- %arrayidx = getelementptr i32, i32* %p, i32 %i.0
- store i32 %i.0, i32* %arrayidx, align 4
+ %arrayidx = getelementptr i32, ptr %p, i32 %i.0
+ store i32 %i.0, ptr %arrayidx, align 4
%inc = add i32 %i.0, 1
%cmp = icmp slt i32 %i.0, %add
br i1 %cmp, label %do.body, label %do.end ; taken either 0 or 1 times
@@ -27,7 +27,7 @@ do.end:
ret void
}
-define void @s32_max2(i32 %n, i32* %p) {
+define void @s32_max2(i32 %n, ptr %p) {
; CHECK-LABEL: 's32_max2'
; CHECK-NEXT: Determining loop execution counts for: @s32_max2
; CHECK-NEXT: Loop %do.body: backedge-taken count is ((-1 * %n) + ((2 + %n) smax %n))
@@ -43,8 +43,8 @@ entry:
do.body:
%i.0 = phi i32 [ %n, %entry ], [ %inc, %do.body ]
- %arrayidx = getelementptr i32, i32* %p, i32 %i.0
- store i32 %i.0, i32* %arrayidx, align 4
+ %arrayidx = getelementptr i32, ptr %p, i32 %i.0
+ store i32 %i.0, ptr %arrayidx, align 4
%inc = add i32 %i.0, 1
%cmp = icmp slt i32 %i.0, %add
br i1 %cmp, label %do.body, label %do.end ; taken either 0 or 2 times
@@ -53,7 +53,7 @@ do.end:
ret void
}
-define void @s32_maxx(i32 %n, i32 %x, i32* %p) {
+define void @s32_maxx(i32 %n, i32 %x, ptr %p) {
; CHECK-LABEL: 's32_maxx'
; CHECK-NEXT: Determining loop execution counts for: @s32_maxx
; CHECK-NEXT: Loop %do.body: backedge-taken count is ((-1 * %n) + ((%n + %x) smax %n))
@@ -69,8 +69,8 @@ entry:
do.body:
%i.0 = phi i32 [ %n, %entry ], [ %inc, %do.body ]
- %arrayidx = getelementptr i32, i32* %p, i32 %i.0
- store i32 %i.0, i32* %arrayidx, align 4
+ %arrayidx = getelementptr i32, ptr %p, i32 %i.0
+ store i32 %i.0, ptr %arrayidx, align 4
%inc = add i32 %i.0, 1
%cmp = icmp slt i32 %i.0, %add
br i1 %cmp, label %do.body, label %do.end ; taken either 0 or x times
@@ -79,7 +79,7 @@ do.end:
ret void
}
-define void @s32_max2_unpredictable_exit(i32 %n, i32 %x, i32* %p) {
+define void @s32_max2_unpredictable_exit(i32 %n, i32 %x, ptr %p) {
; CHECK-LABEL: 's32_max2_unpredictable_exit'
; CHECK-NEXT: Determining loop execution counts for: @s32_max2_unpredictable_exit
; CHECK-NEXT: Loop %do.body: <multiple exits> backedge-taken count is (((-1 * %n) + ((2 + %n) smax %n)) umin ((-1 * %n) + %x))
@@ -103,8 +103,8 @@ do.body:
br i1 %cmp, label %do.end, label %if.end ; unpredictable
if.end:
- %arrayidx = getelementptr i32, i32* %p, i32 %i.0
- store i32 %i.0, i32* %arrayidx, align 4
+ %arrayidx = getelementptr i32, ptr %p, i32 %i.0
+ store i32 %i.0, ptr %arrayidx, align 4
%inc = add i32 %i.0, 1
%cmp1 = icmp slt i32 %i.0, %add
br i1 %cmp1, label %do.body, label %do.end ; taken either 0 or 2 times
@@ -113,7 +113,7 @@ do.end:
ret void
}
-define void @u32_max1(i32 %n, i32* %p) {
+define void @u32_max1(i32 %n, ptr %p) {
; CHECK-LABEL: 'u32_max1'
; CHECK-NEXT: Determining loop execution counts for: @u32_max1
; CHECK-NEXT: Loop %do.body: backedge-taken count is ((-1 * %n) + ((1 + %n) umax %n))
@@ -129,8 +129,8 @@ entry:
do.body:
%i.0 = phi i32 [ %n, %entry ], [ %inc, %do.body ]
- %arrayidx = getelementptr i32, i32* %p, i32 %i.0
- store i32 %i.0, i32* %arrayidx, align 4
+ %arrayidx = getelementptr i32, ptr %p, i32 %i.0
+ store i32 %i.0, ptr %arrayidx, align 4
%inc = add i32 %i.0, 1
%cmp = icmp ult i32 %i.0, %add
br i1 %cmp, label %do.body, label %do.end ; taken either 0 or 1 times
@@ -139,7 +139,7 @@ do.end:
ret void
}
-define void @u32_max2(i32 %n, i32* %p) {
+define void @u32_max2(i32 %n, ptr %p) {
; CHECK-LABEL: 'u32_max2'
; CHECK-NEXT: Determining loop execution counts for: @u32_max2
; CHECK-NEXT: Loop %do.body: backedge-taken count is ((-1 * %n) + ((2 + %n) umax %n))
@@ -155,8 +155,8 @@ entry:
do.body:
%i.0 = phi i32 [ %n, %entry ], [ %inc, %do.body ]
- %arrayidx = getelementptr i32, i32* %p, i32 %i.0
- store i32 %i.0, i32* %arrayidx, align 4
+ %arrayidx = getelementptr i32, ptr %p, i32 %i.0
+ store i32 %i.0, ptr %arrayidx, align 4
%inc = add i32 %i.0, 1
%cmp = icmp ult i32 %i.0, %add
br i1 %cmp, label %do.body, label %do.end ; taken either 0 or 2 times
@@ -165,7 +165,7 @@ do.end:
ret void
}
-define void @u32_maxx(i32 %n, i32 %x, i32* %p) {
+define void @u32_maxx(i32 %n, i32 %x, ptr %p) {
; CHECK-LABEL: 'u32_maxx'
; CHECK-NEXT: Determining loop execution counts for: @u32_maxx
; CHECK-NEXT: Loop %do.body: backedge-taken count is ((-1 * %n) + ((%n + %x) umax %n))
@@ -181,8 +181,8 @@ entry:
do.body:
%i.0 = phi i32 [ %n, %entry ], [ %inc, %do.body ]
- %arrayidx = getelementptr i32, i32* %p, i32 %i.0
- store i32 %i.0, i32* %arrayidx, align 4
+ %arrayidx = getelementptr i32, ptr %p, i32 %i.0
+ store i32 %i.0, ptr %arrayidx, align 4
%inc = add i32 %i.0, 1
%cmp = icmp ult i32 %i.0, %add
br i1 %cmp, label %do.body, label %do.end ; taken either 0 or x times
@@ -191,7 +191,7 @@ do.end:
ret void
}
-define void @u32_max2_unpredictable_exit(i32 %n, i32 %x, i32* %p) {
+define void @u32_max2_unpredictable_exit(i32 %n, i32 %x, ptr %p) {
; CHECK-LABEL: 'u32_max2_unpredictable_exit'
; CHECK-NEXT: Determining loop execution counts for: @u32_max2_unpredictable_exit
; CHECK-NEXT: Loop %do.body: <multiple exits> backedge-taken count is (((-1 * %n) + ((2 + %n) umax %n)) umin ((-1 * %n) + %x))
@@ -215,8 +215,8 @@ do.body:
br i1 %cmp, label %do.end, label %if.end ; unpredictable
if.end:
- %arrayidx = getelementptr i32, i32* %p, i32 %i.0
- store i32 %i.0, i32* %arrayidx, align 4
+ %arrayidx = getelementptr i32, ptr %p, i32 %i.0
+ store i32 %i.0, ptr %arrayidx, align 4
%inc = add i32 %i.0, 1
%cmp1 = icmp ult i32 %i.0, %add
br i1 %cmp1, label %do.body, label %do.end ; taken either 0 or 2 times
diff --git a/llvm/test/Analysis/ScalarEvolution/trip-count2.ll b/llvm/test/Analysis/ScalarEvolution/trip-count2.ll
index df6ab7cbcfdb2..a71c9020d3fbe 100644
--- a/llvm/test/Analysis/ScalarEvolution/trip-count2.ll
+++ b/llvm/test/Analysis/ScalarEvolution/trip-count2.ll
@@ -17,8 +17,8 @@ entry:
br label %bb3
bb: ; preds = %bb3
- %tmp = getelementptr [1000 x i32], [1000 x i32]* @A, i32 0, i32 %i.0 ; <i32*> [#uses=1]
- store i32 123, i32* %tmp
+ %tmp = getelementptr [1000 x i32], ptr @A, i32 0, i32 %i.0 ; <ptr> [#uses=1]
+ store i32 123, ptr %tmp
%tmp4 = mul i32 %i.0, 4 ; <i32> [#uses=1]
%tmp5 = or i32 %tmp4, 1
%tmp61 = xor i32 %tmp5, -2147483648
diff --git a/llvm/test/Analysis/ScalarEvolution/trip-count3.ll b/llvm/test/Analysis/ScalarEvolution/trip-count3.ll
index c0a29d88575e4..e48bd5b9e62eb 100644
--- a/llvm/test/Analysis/ScalarEvolution/trip-count3.ll
+++ b/llvm/test/Analysis/ScalarEvolution/trip-count3.ll
@@ -5,38 +5,38 @@
; dividing by the stride will have a remainder. This could theoretically
; be teaching it how to use a more elaborate trip count computation.
-%struct.FILE = type { i32, i8*, i8*, i8*, i8*, i8*, i8*, i8*, i8*, i8*, i8*, i8*, %struct._IO_marker*, %struct.FILE*, i32, i32, i64, i16, i8, [1 x i8], i8*, i64, i8*, i8*, i8*, i8*, i64, i32, [20 x i8] }
+%struct.FILE = type { i32, ptr, ptr, ptr, ptr, ptr, ptr, ptr, ptr, ptr, ptr, ptr, ptr, ptr, i32, i32, i64, i16, i8, [1 x i8], ptr, i64, ptr, ptr, ptr, ptr, i64, i32, [20 x i8] }
%struct.SHA_INFO = type { [5 x i32], i32, i32, [16 x i32] }
-%struct._IO_marker = type { %struct._IO_marker*, %struct.FILE*, i32 }
+%struct._IO_marker = type { ptr, ptr, i32 }
@_2E_str = external constant [26 x i8]
- at stdin = external global %struct.FILE*
+ at stdin = external global ptr
@_2E_str1 = external constant [3 x i8]
@_2E_str12 = external constant [30 x i8]
-declare void @sha_init(%struct.SHA_INFO* nocapture) nounwind
+declare void @sha_init(ptr nocapture) nounwind
-declare fastcc void @sha_transform(%struct.SHA_INFO* nocapture) nounwind
+declare fastcc void @sha_transform(ptr nocapture) nounwind
-declare void @sha_print(%struct.SHA_INFO* nocapture) nounwind
+declare void @sha_print(ptr nocapture) nounwind
-declare i32 @printf(i8* nocapture, ...) nounwind
+declare i32 @printf(ptr nocapture, ...) nounwind
-declare void @sha_final(%struct.SHA_INFO* nocapture) nounwind
+declare void @sha_final(ptr nocapture) nounwind
-declare void @sha_update(%struct.SHA_INFO* nocapture, i8* nocapture, i32) nounwind
+declare void @sha_update(ptr nocapture, ptr nocapture, i32) nounwind
-declare i64 @fread(i8* noalias nocapture, i64, i64, %struct.FILE* noalias nocapture) nounwind
+declare i64 @fread(ptr noalias nocapture, i64, i64, ptr noalias nocapture) nounwind
-declare i32 @main(i32, i8** nocapture) nounwind
+declare i32 @main(i32, ptr nocapture) nounwind
-declare noalias %struct.FILE* @fopen(i8* noalias nocapture, i8* noalias nocapture) nounwind
+declare noalias ptr @fopen(ptr noalias nocapture, ptr noalias nocapture) nounwind
-declare i32 @fclose(%struct.FILE* nocapture) nounwind
+declare i32 @fclose(ptr nocapture) nounwind
-declare void @sha_stream(%struct.SHA_INFO* nocapture, %struct.FILE* nocapture) nounwind
+declare void @sha_stream(ptr nocapture, ptr nocapture) nounwind
-define void @sha_stream_bb3_2E_i(%struct.SHA_INFO* %sha_info, i8* %data1, i32, i8** %buffer_addr.0.i.out, i32* %count_addr.0.i.out) nounwind {
+define void @sha_stream_bb3_2E_i(ptr %sha_info, ptr %data1, i32, ptr %buffer_addr.0.i.out, ptr %count_addr.0.i.out) nounwind {
; CHECK-LABEL: 'sha_stream_bb3_2E_i'
; CHECK-NEXT: Determining loop execution counts for: @sha_stream_bb3_2E_i
; CHECK-NEXT: Loop %bb3.i: backedge-taken count is ((63 + (-1 * (63 smin %0)) + %0) /u 64)
@@ -50,38 +50,36 @@ newFuncRoot:
br label %bb3.i
sha_update.exit.exitStub: ; preds = %bb3.i
- store i8* %buffer_addr.0.i, i8** %buffer_addr.0.i.out
- store i32 %count_addr.0.i, i32* %count_addr.0.i.out
+ store ptr %buffer_addr.0.i, ptr %buffer_addr.0.i.out
+ store i32 %count_addr.0.i, ptr %count_addr.0.i.out
ret void
bb2.i: ; preds = %bb3.i
- %1 = getelementptr %struct.SHA_INFO, %struct.SHA_INFO* %sha_info, i64 0, i32 3
- %2 = bitcast [16 x i32]* %1 to i8*
- call void @llvm.memcpy.p0i8.p0i8.i64(i8* %2, i8* %buffer_addr.0.i, i64 64, i1 false)
- %3 = getelementptr %struct.SHA_INFO, %struct.SHA_INFO* %sha_info, i64 0, i32 3, i64 0
- %4 = bitcast i32* %3 to i8*
+ %1 = getelementptr %struct.SHA_INFO, ptr %sha_info, i64 0, i32 3
+ call void @llvm.memcpy.p0.p0.i64(ptr %1, ptr %buffer_addr.0.i, i64 64, i1 false)
+ %2 = getelementptr %struct.SHA_INFO, ptr %sha_info, i64 0, i32 3, i64 0
br label %codeRepl
codeRepl: ; preds = %bb2.i
- call void @sha_stream_bb3_2E_i_bb1_2E_i_2E_i(i8* %4)
+ call void @sha_stream_bb3_2E_i_bb1_2E_i_2E_i(ptr %2)
br label %byte_reverse.exit.i
byte_reverse.exit.i: ; preds = %codeRepl
- call fastcc void @sha_transform(%struct.SHA_INFO* %sha_info) nounwind
- %5 = getelementptr i8, i8* %buffer_addr.0.i, i64 64
- %6 = add i32 %count_addr.0.i, -64
+ call fastcc void @sha_transform(ptr %sha_info) nounwind
+ %3 = getelementptr i8, ptr %buffer_addr.0.i, i64 64
+ %4 = add i32 %count_addr.0.i, -64
br label %bb3.i
bb3.i: ; preds = %byte_reverse.exit.i, %newFuncRoot
- %buffer_addr.0.i = phi i8* [ %data1, %newFuncRoot ], [ %5, %byte_reverse.exit.i ]
- %count_addr.0.i = phi i32 [ %0, %newFuncRoot ], [ %6, %byte_reverse.exit.i ]
- %7 = icmp sgt i32 %count_addr.0.i, 63
- br i1 %7, label %bb2.i, label %sha_update.exit.exitStub
+ %buffer_addr.0.i = phi ptr [ %data1, %newFuncRoot ], [ %3, %byte_reverse.exit.i ]
+ %count_addr.0.i = phi i32 [ %0, %newFuncRoot ], [ %4, %byte_reverse.exit.i ]
+ %5 = icmp sgt i32 %count_addr.0.i, 63
+ br i1 %5, label %bb2.i, label %sha_update.exit.exitStub
}
-declare void @sha_stream_bb3_2E_i_bb1_2E_i_2E_i(i8*) nounwind
+declare void @sha_stream_bb3_2E_i_bb1_2E_i_2E_i(ptr) nounwind
-declare void @llvm.memset.p0i8.i64(i8* nocapture, i8, i64, i1) nounwind
+declare void @llvm.memset.p0.i64(ptr nocapture, i8, i64, i1) nounwind
-declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture, i8* nocapture, i64, i1) nounwind
+declare void @llvm.memcpy.p0.p0.i64(ptr nocapture, ptr nocapture, i64, i1) nounwind
diff --git a/llvm/test/Analysis/ScalarEvolution/trip-count4.ll b/llvm/test/Analysis/ScalarEvolution/trip-count4.ll
index 634ea554a3dc7..29829eba45ef5 100644
--- a/llvm/test/Analysis/ScalarEvolution/trip-count4.ll
+++ b/llvm/test/Analysis/ScalarEvolution/trip-count4.ll
@@ -3,7 +3,7 @@
; ScalarEvolution should be able to compute a loop exit value for %indvar.i8.
-define void @another_count_down_signed(double* %d, i64 %n) nounwind {
+define void @another_count_down_signed(ptr %d, i64 %n) nounwind {
; CHECK-LABEL: 'another_count_down_signed'
; CHECK-NEXT: Determining loop execution counts for: @another_count_down_signed
; CHECK-NEXT: Loop %loop: backedge-taken count is (-11 + %n)
@@ -20,10 +20,10 @@ loop: ; preds = %loop, %entry
%indvar = phi i64 [ %n, %entry ], [ %indvar.next, %loop ] ; <i64> [#uses=4]
%s0 = shl i64 %indvar, 8 ; <i64> [#uses=1]
%indvar.i8 = ashr i64 %s0, 8 ; <i64> [#uses=1]
- %t0 = getelementptr double, double* %d, i64 %indvar.i8 ; <double*> [#uses=2]
- %t1 = load double, double* %t0 ; <double> [#uses=1]
+ %t0 = getelementptr double, ptr %d, i64 %indvar.i8 ; <ptr> [#uses=2]
+ %t1 = load double, ptr %t0 ; <double> [#uses=1]
%t2 = fmul double %t1, 1.000000e-01 ; <double> [#uses=1]
- store double %t2, double* %t0
+ store double %t2, ptr %t0
%indvar.next = sub i64 %indvar, 1 ; <i64> [#uses=2]
%exitcond = icmp eq i64 %indvar.next, 10 ; <i1> [#uses=1]
br i1 %exitcond, label %return, label %loop
diff --git a/llvm/test/Analysis/ScalarEvolution/trip-count5.ll b/llvm/test/Analysis/ScalarEvolution/trip-count5.ll
index 95bb126d4c2ad..a55adea372866 100644
--- a/llvm/test/Analysis/ScalarEvolution/trip-count5.ll
+++ b/llvm/test/Analysis/ScalarEvolution/trip-count5.ll
@@ -5,10 +5,10 @@
target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128"
-define float @t(float* %pTmp1, float* %peakWeight, float* %nrgReducePeakrate, i32 %bim) nounwind {
+define float @t(ptr %pTmp1, ptr %peakWeight, ptr %nrgReducePeakrate, i32 %bim) nounwind {
; CHECK-LABEL: Classifying expressions for: @t
entry:
- %tmp3 = load float, float* %peakWeight, align 4
+ %tmp3 = load float, ptr %peakWeight, align 4
%tmp2538 = icmp sgt i32 %bim, 0
br i1 %tmp2538, label %bb.nph, label %bb4
@@ -20,17 +20,17 @@ bb:
%hiPart.035 = phi i32 [ %tmp12, %bb1 ], [ 0, %bb.nph ]
%peakCount.034 = phi float [ %tmp19, %bb1 ], [ %tmp3, %bb.nph ]
%tmp6 = sext i32 %hiPart.035 to i64
- %tmp7 = getelementptr float, float* %pTmp1, i64 %tmp6
+ %tmp7 = getelementptr float, ptr %pTmp1, i64 %tmp6
; CHECK: %tmp6 = sext i32 %hiPart.035 to i64
; CHECK-NEXT: --> {0,+,1}<nuw><nsw><%bb>
- %tmp8 = load float, float* %tmp7, align 4
+ %tmp8 = load float, ptr %tmp7, align 4
%tmp10 = fadd float %tmp8, %distERBhi.036
%tmp12 = add i32 %hiPart.035, 1
%tmp15 = sext i32 %tmp12 to i64
- %tmp16 = getelementptr float, float* %peakWeight, i64 %tmp15
+ %tmp16 = getelementptr float, ptr %peakWeight, i64 %tmp15
; CHECK: %tmp15 = sext i32 %tmp12 to i64
; CHECK-NEXT: --> {1,+,1}<nuw><nsw><%bb>
- %tmp17 = load float, float* %tmp16, align 4
+ %tmp17 = load float, ptr %tmp16, align 4
%tmp19 = fadd float %tmp17, %peakCount.034
br label %bb1
diff --git a/llvm/test/Analysis/ScalarEvolution/trip-count6.ll b/llvm/test/Analysis/ScalarEvolution/trip-count6.ll
index 12d0e2f317e51..e0ab31b1fd104 100644
--- a/llvm/test/Analysis/ScalarEvolution/trip-count6.ll
+++ b/llvm/test/Analysis/ScalarEvolution/trip-count6.ll
@@ -1,7 +1,7 @@
; NOTE: Assertions have been autogenerated by utils/update_analyze_test_checks.py
; RUN: opt < %s -disable-output "-passes=print<scalar-evolution>" -scalar-evolution-classify-expressions=0 2>&1 | FileCheck %s
- at mode_table = global [4 x i32] zeroinitializer ; <[4 x i32]*> [#uses=1]
+ at mode_table = global [4 x i32] zeroinitializer ; <ptr> [#uses=1]
define i8 @f() {
; CHECK-LABEL: 'f'
@@ -22,8 +22,8 @@ entry:
bb: ; preds = %bb4, %entry
%mode.0 = phi i8 [ 0, %entry ], [ %indvar.next, %bb4 ] ; <i8> [#uses=4]
zext i8 %mode.0 to i32 ; <i32>:1 [#uses=1]
- getelementptr [4 x i32], [4 x i32]* @mode_table, i32 0, i32 %1 ; <i32*>:2 [#uses=1]
- load i32, i32* %2, align 4 ; <i32>:3 [#uses=1]
+ getelementptr [4 x i32], ptr @mode_table, i32 0, i32 %1 ; <ptr>:2 [#uses=1]
+ load i32, ptr %2, align 4 ; <i32>:3 [#uses=1]
icmp eq i32 %3, %0 ; <i1>:4 [#uses=1]
br i1 %4, label %bb1, label %bb2
diff --git a/llvm/test/Analysis/ScalarEvolution/trip-count7.ll b/llvm/test/Analysis/ScalarEvolution/trip-count7.ll
index 498b5b6b63e4f..3eba6b3c1740c 100644
--- a/llvm/test/Analysis/ScalarEvolution/trip-count7.ll
+++ b/llvm/test/Analysis/ScalarEvolution/trip-count7.ll
@@ -5,62 +5,62 @@ target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f3
%struct.complex = type { float, float }
%struct.element = type { i32, i32 }
- %struct.node = type { %struct.node*, %struct.node*, i32 }
- at seed = external global i64 ; <i64*> [#uses=0]
- at _2E_str = external constant [18 x i8], align 1 ; <[18 x i8]*> [#uses=0]
- at _2E_str1 = external constant [4 x i8], align 1 ; <[4 x i8]*> [#uses=0]
- at value = external global float ; <float*> [#uses=0]
- at fixed = external global float ; <float*> [#uses=0]
- at floated = external global float ; <float*> [#uses=0]
- at permarray = external global [11 x i32], align 32 ; <[11 x i32]*> [#uses=0]
- at pctr = external global i32 ; <i32*> [#uses=0]
- at tree = external global %struct.node* ; <%struct.node**> [#uses=0]
- at stack = external global [4 x i32], align 16 ; <[4 x i32]*> [#uses=0]
- at cellspace = external global [19 x %struct.element], align 32 ; <[19 x %struct.element]*> [#uses=0]
- at freelist = external global i32 ; <i32*> [#uses=0]
- at movesdone = external global i32 ; <i32*> [#uses=0]
- at ima = external global [41 x [41 x i32]], align 32 ; <[41 x [41 x i32]]*> [#uses=0]
- at imb = external global [41 x [41 x i32]], align 32 ; <[41 x [41 x i32]]*> [#uses=0]
- at imr = external global [41 x [41 x i32]], align 32 ; <[41 x [41 x i32]]*> [#uses=0]
- at rma = external global [41 x [41 x float]], align 32 ; <[41 x [41 x float]]*> [#uses=0]
- at rmb = external global [41 x [41 x float]], align 32 ; <[41 x [41 x float]]*> [#uses=0]
- at rmr = external global [41 x [41 x float]], align 32 ; <[41 x [41 x float]]*> [#uses=0]
- at piececount = external global [4 x i32], align 16 ; <[4 x i32]*> [#uses=0]
- at class = external global [13 x i32], align 32 ; <[13 x i32]*> [#uses=0]
- at piecemax = external global [13 x i32], align 32 ; <[13 x i32]*> [#uses=0]
- at puzzl = external global [512 x i32], align 32 ; <[512 x i32]*> [#uses=0]
- at p = external global [13 x [512 x i32]], align 32 ; <[13 x [512 x i32]]*> [#uses=0]
- at n = external global i32 ; <i32*> [#uses=0]
- at kount = external global i32 ; <i32*> [#uses=0]
- at sortlist = external global [5001 x i32], align 32 ; <[5001 x i32]*> [#uses=0]
- at biggest = external global i32 ; <i32*> [#uses=0]
- at littlest = external global i32 ; <i32*> [#uses=0]
- at top = external global i32 ; <i32*> [#uses=0]
- at z = external global [257 x %struct.complex], align 32 ; <[257 x %struct.complex]*> [#uses=0]
- at w = external global [257 x %struct.complex], align 32 ; <[257 x %struct.complex]*> [#uses=0]
- at e = external global [130 x %struct.complex], align 32 ; <[130 x %struct.complex]*> [#uses=0]
- at zr = external global float ; <float*> [#uses=0]
- at zi = external global float ; <float*> [#uses=0]
+ %struct.node = type { ptr, ptr, i32 }
+ at seed = external global i64 ; <ptr> [#uses=0]
+ at _2E_str = external constant [18 x i8], align 1 ; <ptr> [#uses=0]
+ at _2E_str1 = external constant [4 x i8], align 1 ; <ptr> [#uses=0]
+ at value = external global float ; <ptr> [#uses=0]
+ at fixed = external global float ; <ptr> [#uses=0]
+ at floated = external global float ; <ptr> [#uses=0]
+ at permarray = external global [11 x i32], align 32 ; <ptr> [#uses=0]
+ at pctr = external global i32 ; <ptr> [#uses=0]
+ at tree = external global ptr ; <ptr> [#uses=0]
+ at stack = external global [4 x i32], align 16 ; <ptr> [#uses=0]
+ at cellspace = external global [19 x %struct.element], align 32 ; <ptr> [#uses=0]
+ at freelist = external global i32 ; <ptr> [#uses=0]
+ at movesdone = external global i32 ; <ptr> [#uses=0]
+ at ima = external global [41 x [41 x i32]], align 32 ; <ptr> [#uses=0]
+ at imb = external global [41 x [41 x i32]], align 32 ; <ptr> [#uses=0]
+ at imr = external global [41 x [41 x i32]], align 32 ; <ptr> [#uses=0]
+ at rma = external global [41 x [41 x float]], align 32 ; <ptr> [#uses=0]
+ at rmb = external global [41 x [41 x float]], align 32 ; <ptr> [#uses=0]
+ at rmr = external global [41 x [41 x float]], align 32 ; <ptr> [#uses=0]
+ at piececount = external global [4 x i32], align 16 ; <ptr> [#uses=0]
+ at class = external global [13 x i32], align 32 ; <ptr> [#uses=0]
+ at piecemax = external global [13 x i32], align 32 ; <ptr> [#uses=0]
+ at puzzl = external global [512 x i32], align 32 ; <ptr> [#uses=0]
+ at p = external global [13 x [512 x i32]], align 32 ; <ptr> [#uses=0]
+ at n = external global i32 ; <ptr> [#uses=0]
+ at kount = external global i32 ; <ptr> [#uses=0]
+ at sortlist = external global [5001 x i32], align 32 ; <ptr> [#uses=0]
+ at biggest = external global i32 ; <ptr> [#uses=0]
+ at littlest = external global i32 ; <ptr> [#uses=0]
+ at top = external global i32 ; <ptr> [#uses=0]
+ at z = external global [257 x %struct.complex], align 32 ; <ptr> [#uses=0]
+ at w = external global [257 x %struct.complex], align 32 ; <ptr> [#uses=0]
+ at e = external global [130 x %struct.complex], align 32 ; <ptr> [#uses=0]
+ at zr = external global float ; <ptr> [#uses=0]
+ at zi = external global float ; <ptr> [#uses=0]
declare void @Initrand() nounwind
declare i32 @Rand() nounwind
-declare void @Try(i32, i32*, i32*, i32*, i32*, i32*) nounwind
+declare void @Try(i32, ptr, ptr, ptr, ptr, ptr) nounwind
-declare i32 @puts(i8* nocapture) nounwind
+declare i32 @puts(ptr nocapture) nounwind
declare void @Queens(i32) nounwind
-declare i32 @printf(i8* nocapture, ...) nounwind
+declare i32 @printf(ptr nocapture, ...) nounwind
declare i32 @main() nounwind
declare void @Doit() nounwind
-declare void @Doit_bb7([15 x i32]*, [17 x i32]*, [9 x i32]*) nounwind
+declare void @Doit_bb7(ptr, ptr, ptr) nounwind
-define void @Doit_bb7_2E_i([9 x i32]* %x1, [15 x i32]* %c, [17 x i32]* %b, [9 x i32]* %a, i32* %q, i32* %x1.sub, i32* %b9, i32* %a10, i32* %c11) nounwind {
+define void @Doit_bb7_2E_i(ptr %x1, ptr %c, ptr %b, ptr %a, ptr %q, ptr %x1.sub, ptr %b9, ptr %a10, ptr %c11) nounwind {
; CHECK-LABEL: 'Doit_bb7_2E_i'
; CHECK-NEXT: Determining loop execution counts for: @Doit_bb7_2E_i
; CHECK-NEXT: Loop %bb7.i: Unpredictable backedge-taken count.
@@ -76,60 +76,60 @@ Try.exit.exitStub: ; preds = %bb7.i
bb.i: ; preds = %bb7.i
%tmp = add i32 %j.0.i, 1 ; <i32> [#uses=5]
- store i32 0, i32* %q, align 4
+ store i32 0, ptr %q, align 4
%tmp1 = sext i32 %tmp to i64 ; <i64> [#uses=1]
- %tmp2 = getelementptr [9 x i32], [9 x i32]* %a, i64 0, i64 %tmp1 ; <i32*> [#uses=1]
- %tmp3 = load i32, i32* %tmp2, align 4 ; <i32> [#uses=1]
+ %tmp2 = getelementptr [9 x i32], ptr %a, i64 0, i64 %tmp1 ; <ptr> [#uses=1]
+ %tmp3 = load i32, ptr %tmp2, align 4 ; <i32> [#uses=1]
%tmp4 = icmp eq i32 %tmp3, 0 ; <i1> [#uses=1]
br i1 %tmp4, label %bb.i.bb7.i.backedge_crit_edge, label %bb1.i
bb1.i: ; preds = %bb.i
%tmp5 = add i32 %j.0.i, 2 ; <i32> [#uses=1]
%tmp6 = sext i32 %tmp5 to i64 ; <i64> [#uses=1]
- %tmp7 = getelementptr [17 x i32], [17 x i32]* %b, i64 0, i64 %tmp6 ; <i32*> [#uses=1]
- %tmp8 = load i32, i32* %tmp7, align 4 ; <i32> [#uses=1]
+ %tmp7 = getelementptr [17 x i32], ptr %b, i64 0, i64 %tmp6 ; <ptr> [#uses=1]
+ %tmp8 = load i32, ptr %tmp7, align 4 ; <i32> [#uses=1]
%tmp9 = icmp eq i32 %tmp8, 0 ; <i1> [#uses=1]
br i1 %tmp9, label %bb1.i.bb7.i.backedge_crit_edge, label %bb2.i
bb2.i: ; preds = %bb1.i
%tmp10 = sub i32 7, %j.0.i ; <i32> [#uses=1]
%tmp11 = sext i32 %tmp10 to i64 ; <i64> [#uses=1]
- %tmp12 = getelementptr [15 x i32], [15 x i32]* %c, i64 0, i64 %tmp11 ; <i32*> [#uses=1]
- %tmp13 = load i32, i32* %tmp12, align 4 ; <i32> [#uses=1]
+ %tmp12 = getelementptr [15 x i32], ptr %c, i64 0, i64 %tmp11 ; <ptr> [#uses=1]
+ %tmp13 = load i32, ptr %tmp12, align 4 ; <i32> [#uses=1]
%tmp14 = icmp eq i32 %tmp13, 0 ; <i1> [#uses=1]
br i1 %tmp14, label %bb2.i.bb7.i.backedge_crit_edge, label %bb3.i
bb3.i: ; preds = %bb2.i
- %tmp15 = getelementptr [9 x i32], [9 x i32]* %x1, i64 0, i64 1 ; <i32*> [#uses=1]
- store i32 %tmp, i32* %tmp15, align 4
+ %tmp15 = getelementptr [9 x i32], ptr %x1, i64 0, i64 1 ; <ptr> [#uses=1]
+ store i32 %tmp, ptr %tmp15, align 4
%tmp16 = sext i32 %tmp to i64 ; <i64> [#uses=1]
- %tmp17 = getelementptr [9 x i32], [9 x i32]* %a, i64 0, i64 %tmp16 ; <i32*> [#uses=1]
- store i32 0, i32* %tmp17, align 4
+ %tmp17 = getelementptr [9 x i32], ptr %a, i64 0, i64 %tmp16 ; <ptr> [#uses=1]
+ store i32 0, ptr %tmp17, align 4
%tmp18 = add i32 %j.0.i, 2 ; <i32> [#uses=1]
%tmp19 = sext i32 %tmp18 to i64 ; <i64> [#uses=1]
- %tmp20 = getelementptr [17 x i32], [17 x i32]* %b, i64 0, i64 %tmp19 ; <i32*> [#uses=1]
- store i32 0, i32* %tmp20, align 4
+ %tmp20 = getelementptr [17 x i32], ptr %b, i64 0, i64 %tmp19 ; <ptr> [#uses=1]
+ store i32 0, ptr %tmp20, align 4
%tmp21 = sub i32 7, %j.0.i ; <i32> [#uses=1]
%tmp22 = sext i32 %tmp21 to i64 ; <i64> [#uses=1]
- %tmp23 = getelementptr [15 x i32], [15 x i32]* %c, i64 0, i64 %tmp22 ; <i32*> [#uses=1]
- store i32 0, i32* %tmp23, align 4
- call void @Try(i32 2, i32* %q, i32* %b9, i32* %a10, i32* %c11, i32* %x1.sub) nounwind
- %tmp24 = load i32, i32* %q, align 4 ; <i32> [#uses=1]
+ %tmp23 = getelementptr [15 x i32], ptr %c, i64 0, i64 %tmp22 ; <ptr> [#uses=1]
+ store i32 0, ptr %tmp23, align 4
+ call void @Try(i32 2, ptr %q, ptr %b9, ptr %a10, ptr %c11, ptr %x1.sub) nounwind
+ %tmp24 = load i32, ptr %q, align 4 ; <i32> [#uses=1]
%tmp25 = icmp eq i32 %tmp24, 0 ; <i1> [#uses=1]
br i1 %tmp25, label %bb5.i, label %bb3.i.bb7.i.backedge_crit_edge
bb5.i: ; preds = %bb3.i
%tmp26 = sext i32 %tmp to i64 ; <i64> [#uses=1]
- %tmp27 = getelementptr [9 x i32], [9 x i32]* %a, i64 0, i64 %tmp26 ; <i32*> [#uses=1]
- store i32 1, i32* %tmp27, align 4
+ %tmp27 = getelementptr [9 x i32], ptr %a, i64 0, i64 %tmp26 ; <ptr> [#uses=1]
+ store i32 1, ptr %tmp27, align 4
%tmp28 = add i32 %j.0.i, 2 ; <i32> [#uses=1]
%tmp29 = sext i32 %tmp28 to i64 ; <i64> [#uses=1]
- %tmp30 = getelementptr [17 x i32], [17 x i32]* %b, i64 0, i64 %tmp29 ; <i32*> [#uses=1]
- store i32 1, i32* %tmp30, align 4
+ %tmp30 = getelementptr [17 x i32], ptr %b, i64 0, i64 %tmp29 ; <ptr> [#uses=1]
+ store i32 1, ptr %tmp30, align 4
%tmp31 = sub i32 7, %j.0.i ; <i32> [#uses=1]
%tmp32 = sext i32 %tmp31 to i64 ; <i64> [#uses=1]
- %tmp33 = getelementptr [15 x i32], [15 x i32]* %c, i64 0, i64 %tmp32 ; <i32*> [#uses=1]
- store i32 1, i32* %tmp33, align 4
+ %tmp33 = getelementptr [15 x i32], ptr %c, i64 0, i64 %tmp32 ; <ptr> [#uses=1]
+ store i32 1, ptr %tmp33, align 4
br label %bb7.i.backedge
bb7.i.backedge: ; preds = %bb3.i.bb7.i.backedge_crit_edge, %bb2.i.bb7.i.backedge_crit_edge, %bb1.i.bb7.i.backedge_crit_edge, %bb.i.bb7.i.backedge_crit_edge, %bb5.i
@@ -137,7 +137,7 @@ bb7.i.backedge: ; preds = %bb3.i.bb7.i.backedge_crit_edge, %bb2.i.bb7.i.backedg
bb7.i: ; preds = %bb7.i.backedge, %newFuncRoot
%j.0.i = phi i32 [ 0, %newFuncRoot ], [ %tmp, %bb7.i.backedge ] ; <i32> [#uses=8]
- %tmp34 = load i32, i32* %q, align 4 ; <i32> [#uses=1]
+ %tmp34 = load i32, ptr %q, align 4 ; <i32> [#uses=1]
%tmp35 = icmp eq i32 %tmp34, 0 ; <i1> [#uses=1]
%tmp36 = icmp ne i32 %j.0.i, 8 ; <i1> [#uses=1]
%tmp37 = and i1 %tmp35, %tmp36 ; <i1> [#uses=1]
diff --git a/llvm/test/Analysis/ScalarEvolution/trip-count9.ll b/llvm/test/Analysis/ScalarEvolution/trip-count9.ll
index 232c5a8b6c846..55d299c82dd44 100644
--- a/llvm/test/Analysis/ScalarEvolution/trip-count9.ll
+++ b/llvm/test/Analysis/ScalarEvolution/trip-count9.ll
@@ -4,7 +4,7 @@
; Every combination of
; - starting at 0, 1, or %x
; - steping by 1 or 2
-; - stopping at %n or %n*2
+; - stopping at %n or ptr2
; - using nsw, or not
; Some of these represent missed opportunities.
diff --git a/llvm/test/Analysis/ScalarEvolution/tripmultiple_calculation.ll b/llvm/test/Analysis/ScalarEvolution/tripmultiple_calculation.ll
index 0e0023daa21fa..46bd7cf275ce6 100644
--- a/llvm/test/Analysis/ScalarEvolution/tripmultiple_calculation.ll
+++ b/llvm/test/Analysis/ScalarEvolution/tripmultiple_calculation.ll
@@ -13,7 +13,7 @@
;
; CHECK: Loop %for.body: Trip multiple is 1
-define i32 @foo(i32 %a, i32 %b, i32* %c) {
+define i32 @foo(i32 %a, i32 %b, ptr %c) {
entry:
%cmp = icmp ult i32 %a, %b
%cond = select i1 %cmp, i32 %a, i32 %b
@@ -32,8 +32,8 @@ for.cond.cleanup: ; preds = %for.cond.cleanup.lo
for.body: ; preds = %for.body.preheader, %for.body
%i.09 = phi i32 [ %inc, %for.body ], [ 0, %for.body.preheader ]
- %arrayidx = getelementptr inbounds i32, i32* %c, i32 %i.09
- store i32 %i.09, i32* %arrayidx, align 4
+ %arrayidx = getelementptr inbounds i32, ptr %c, i32 %i.09
+ store i32 %i.09, ptr %arrayidx, align 4
%inc = add nuw i32 %i.09, 1
%cmp1 = icmp ult i32 %inc, %add
br i1 %cmp1, label %for.body, label %for.cond.cleanup.loopexit
@@ -70,13 +70,13 @@ for.cond.cleanup: ; preds = %for.cond.cleanup.lo
for.body: ; preds = %for.body.preheader, %for.body
%i.05 = phi i32 [ %inc, %for.body ], [ 0, %for.body.preheader ]
- %call = tail call i32 (i8*, ...) @printf(i8* getelementptr inbounds ([8 x i8], [8 x i8]* @.str2, i32 0, i32 0), i32 %i.05)
+ %call = tail call i32 (ptr, ...) @printf(ptr @.str2, i32 %i.05)
%inc = add nuw i32 %i.05, 1
%cmp = icmp eq i32 %inc, %mul
br i1 %cmp, label %for.cond.cleanup.loopexit, label %for.body
}
-declare i32 @printf(i8* nocapture readonly, ...)
+declare i32 @printf(ptr nocapture readonly, ...)
; If we couldn't prove no overflow for the multiply expression 24 * n,
diff --git a/llvm/test/Analysis/ScalarEvolution/trivial-phis.ll b/llvm/test/Analysis/ScalarEvolution/trivial-phis.ll
index 45a567bfbe3c2..a2d979dd45271 100644
--- a/llvm/test/Analysis/ScalarEvolution/trivial-phis.ll
+++ b/llvm/test/Analysis/ScalarEvolution/trivial-phis.ll
@@ -4,15 +4,15 @@
; CHECK: %add.lcssa.wide = phi i64 [ %indvars.iv.next, %do.body ]
; CHECK-NEXT: --> {1,+,1}<nuw><nsw><%do.body> U: [1,2147483648) S: [1,2147483648)
-define i64 @test1(i32 signext %n, float* %A) {
+define i64 @test1(i32 signext %n, ptr %A) {
entry:
%0 = sext i32 %n to i64
br label %do.body
do.body: ; preds = %do.body, %entry
%indvars.iv = phi i64 [ %indvars.iv.next, %do.body ], [ 0, %entry ]
- %arrayidx = getelementptr inbounds float, float* %A, i64 %indvars.iv
- store float 1.000000e+00, float* %arrayidx, align 4
+ %arrayidx = getelementptr inbounds float, ptr %A, i64 %indvars.iv
+ store float 1.000000e+00, ptr %arrayidx, align 4
%indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
%cmp = icmp slt i64 %indvars.iv.next, %0
br i1 %cmp, label %do.body, label %do.end
@@ -26,7 +26,7 @@ do.end: ; preds = %do.body
; CHECK: %tmp24 = phi i64 [ %tmp14, %bb22 ], [ %tmp14, %bb13 ]
; CHECK-NEXT: --> %tmp24 U: full-set S: full-set Exits: <<Unknown>> LoopDispositions: { %bb13: Variant, %bb8: Variant, %bb17: Invariant, %bb27: Invariant }
-define void @test2(i64 %arg, i32* noalias %arg1) {
+define void @test2(i64 %arg, ptr noalias %arg1) {
bb:
%tmp = icmp slt i64 0, %arg
br i1 %tmp, label %bb7, label %bb48
@@ -74,9 +74,9 @@ bb26: ; preds = %bb23
bb27: ; preds = %bb33, %bb26
%tmp28 = phi i64 [ 0, %bb26 ], [ %tmp34, %bb33 ]
%tmp29 = mul nsw i64 %tmp9, %arg
- %tmp30 = getelementptr inbounds i32, i32* %arg1, i64 %tmp24
- %tmp31 = getelementptr inbounds i32, i32* %tmp30, i64 %tmp29
- %tmp32 = load i32, i32* %tmp31, align 4
+ %tmp30 = getelementptr inbounds i32, ptr %arg1, i64 %tmp24
+ %tmp31 = getelementptr inbounds i32, ptr %tmp30, i64 %tmp29
+ %tmp32 = load i32, ptr %tmp31, align 4
br label %bb33
bb33: ; preds = %bb27
@@ -134,7 +134,7 @@ bb48: ; preds = %bb47, %bb
; CHECK-NEXT: --> {1,+,1}<%bb13> U: [1,9223372036854775807) S: [1,9223372036854775807)
; CHECK-SAME: Exits: (-2 + %arg) LoopDispositions: { %bb13: Computable, %bb8: Variant, %bb17_a: Invariant, %bb27: Invariant }
-define void @test3(i64 %arg, i32* %arg1) {
+define void @test3(i64 %arg, ptr %arg1) {
bb:
%tmp = icmp slt i64 0, %arg
br i1 %tmp, label %bb8, label %bb48
@@ -168,9 +168,9 @@ bb23: ; preds = %bb17, %bb13
bb27: ; preds = %bb23, %bb27
%tmp28 = phi i64 [ %tmp34, %bb27 ], [ 0, %bb23 ]
%tmp29 = mul nsw i64 %tmp9, %arg
- %tmp30 = getelementptr inbounds i32, i32* %arg1, i64 %tmp24
- %tmp31 = getelementptr inbounds i32, i32* %tmp30, i64 %tmp29
- %tmp32 = load i32, i32* %tmp31, align 4
+ %tmp30 = getelementptr inbounds i32, ptr %arg1, i64 %tmp24
+ %tmp31 = getelementptr inbounds i32, ptr %tmp30, i64 %tmp29
+ %tmp32 = load i32, ptr %tmp31, align 4
%tmp34 = add nuw nsw i64 %tmp28, 1
%tmp35 = icmp slt i64 %tmp34, %arg
br i1 %tmp35, label %bb27, label %bb39
diff --git a/llvm/test/Analysis/ScalarEvolution/trunc-simplify.ll b/llvm/test/Analysis/ScalarEvolution/trunc-simplify.ll
index dc812ac7f34bb..f26478cb13fa3 100644
--- a/llvm/test/Analysis/ScalarEvolution/trunc-simplify.ll
+++ b/llvm/test/Analysis/ScalarEvolution/trunc-simplify.ll
@@ -27,8 +27,8 @@ define i8 @trunc_of_add(i32 %a) {
; Check that we truncate to zero values assumed to have at least as many
; trailing zeros as the target type.
; CHECK-LABEL: @trunc_to_assumed_zeros
-define i8 @trunc_to_assumed_zeros(i32* %p) {
- %a = load i32, i32* %p
+define i8 @trunc_to_assumed_zeros(ptr %p) {
+ %a = load i32, ptr %p
%and = and i32 %a, 255
%cmp = icmp eq i32 %and, 0
tail call void @llvm.assume(i1 %cmp)
diff --git a/llvm/test/Analysis/ScalarEvolution/truncate.ll b/llvm/test/Analysis/ScalarEvolution/truncate.ll
index 676f3f43bec13..1998b15ef2e6e 100644
--- a/llvm/test/Analysis/ScalarEvolution/truncate.ll
+++ b/llvm/test/Analysis/ScalarEvolution/truncate.ll
@@ -5,7 +5,7 @@
target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128-ni:1"
target triple = "x86_64-unknown-linux-gnu"
-define void @snork(i8* %arg, i8 %arg1, i64 %arg2) {
+define void @snork(ptr %arg, i8 %arg1, i64 %arg2) {
; CHECK-LABEL: Classifying expressions for: @snork
diff --git a/llvm/test/Analysis/ScalarEvolution/unknown_phis.ll b/llvm/test/Analysis/ScalarEvolution/unknown_phis.ll
index 34e488bb27669..2ab7788e62ccc 100644
--- a/llvm/test/Analysis/ScalarEvolution/unknown_phis.ll
+++ b/llvm/test/Analysis/ScalarEvolution/unknown_phis.ll
@@ -1,12 +1,12 @@
; NOTE: Assertions have been autogenerated by utils/update_analyze_test_checks.py
; RUN: opt < %s -disable-output "-passes=print<scalar-evolution>" 2>&1 | FileCheck %s
-define void @merge_values_with_ranges(i32 *%a_len_ptr, i32 *%b_len_ptr, i1 %unknown_cond) {
+define void @merge_values_with_ranges(ptr %a_len_ptr, ptr %b_len_ptr, i1 %unknown_cond) {
; CHECK-LABEL: 'merge_values_with_ranges'
; CHECK-NEXT: Classifying expressions for: @merge_values_with_ranges
-; CHECK-NEXT: %len_a = load i32, i32* %a_len_ptr, align 4, !range !0
+; CHECK-NEXT: %len_a = load i32, ptr %a_len_ptr, align 4, !range !0
; CHECK-NEXT: --> %len_a U: [0,2147483647) S: [0,2147483647)
-; CHECK-NEXT: %len_b = load i32, i32* %b_len_ptr, align 4, !range !0
+; CHECK-NEXT: %len_b = load i32, ptr %b_len_ptr, align 4, !range !0
; CHECK-NEXT: --> %len_b U: [0,2147483647) S: [0,2147483647)
; CHECK-NEXT: %len = phi i32 [ %len_a, %if.true ], [ %len_b, %if.false ]
; CHECK-NEXT: --> %len U: [0,2147483647) S: [0,2147483647)
@@ -17,11 +17,11 @@ define void @merge_values_with_ranges(i32 *%a_len_ptr, i32 *%b_len_ptr, i1 %unkn
br i1 %unknown_cond, label %if.true, label %if.false
if.true:
- %len_a = load i32, i32* %a_len_ptr, !range !0
+ %len_a = load i32, ptr %a_len_ptr, !range !0
br label %merge
if.false:
- %len_b = load i32, i32* %b_len_ptr, !range !0
+ %len_b = load i32, ptr %b_len_ptr, !range !0
br label %merge
merge:
@@ -29,14 +29,14 @@ merge:
ret void
}
-define void @merge_values_with_ranges_looped(i32 *%a_len_ptr, i32 *%b_len_ptr) {
+define void @merge_values_with_ranges_looped(ptr %a_len_ptr, ptr %b_len_ptr) {
; TODO: We could be much smarter here. So far we just make sure that we do not
; go into infinite loop analyzing these Phis.
; CHECK-LABEL: 'merge_values_with_ranges_looped'
; CHECK-NEXT: Classifying expressions for: @merge_values_with_ranges_looped
-; CHECK-NEXT: %len_a = load i32, i32* %a_len_ptr, align 4, !range !0
+; CHECK-NEXT: %len_a = load i32, ptr %a_len_ptr, align 4, !range !0
; CHECK-NEXT: --> %len_a U: [0,2147483647) S: [0,2147483647)
-; CHECK-NEXT: %len_b = load i32, i32* %b_len_ptr, align 4, !range !0
+; CHECK-NEXT: %len_b = load i32, ptr %b_len_ptr, align 4, !range !0
; CHECK-NEXT: --> %len_b U: [0,2147483647) S: [0,2147483647)
; CHECK-NEXT: %p1 = phi i32 [ %len_a, %entry ], [ %p2, %loop ]
; CHECK-NEXT: --> %p1 U: full-set S: full-set Exits: <<Unknown>> LoopDispositions: { %loop: Variant }
@@ -56,8 +56,8 @@ define void @merge_values_with_ranges_looped(i32 *%a_len_ptr, i32 *%b_len_ptr) {
;
entry:
- %len_a = load i32, i32* %a_len_ptr, !range !0
- %len_b = load i32, i32* %b_len_ptr, !range !0
+ %len_a = load i32, ptr %a_len_ptr, !range !0
+ %len_b = load i32, ptr %b_len_ptr, !range !0
br label %loop
loop:
diff --git a/llvm/test/Analysis/ScalarEvolution/values-at-scopes-consistency.ll b/llvm/test/Analysis/ScalarEvolution/values-at-scopes-consistency.ll
index 2aba1c7c64990..c05da88e430f8 100644
--- a/llvm/test/Analysis/ScalarEvolution/values-at-scopes-consistency.ll
+++ b/llvm/test/Analysis/ScalarEvolution/values-at-scopes-consistency.ll
@@ -4,7 +4,7 @@
; This used to register a ValuesAtScopes user, even though nothing was
; added to ValuesAtScope due to a prior invalidation.
-define void @main(i8* %p) {
+define void @main(ptr %p) {
entry:
br label %loop1
@@ -14,8 +14,8 @@ loop1:
loop2:
%i = phi i64 [ 0, %loop1 ], [ %i.next, %loop2.latch ]
%i.next = add nuw nsw i64 %i, 1
- %gep = getelementptr i8, i8* %p, i64 %i
- %val = load i8, i8* %gep
+ %gep = getelementptr i8, ptr %p, i64 %i
+ %val = load i8, ptr %gep
%c = icmp eq i8 %val, 0
br i1 %c, label %loop2.latch, label %exit
diff --git a/llvm/test/Analysis/ScalarEvolution/widenable-condition.ll b/llvm/test/Analysis/ScalarEvolution/widenable-condition.ll
index 133afa4939f36..b7e58bf5067eb 100644
--- a/llvm/test/Analysis/ScalarEvolution/widenable-condition.ll
+++ b/llvm/test/Analysis/ScalarEvolution/widenable-condition.ll
@@ -32,7 +32,7 @@ entry:
loop:
%iv = phi i32 [0, %entry], [%iv.next, %loop]
%iv.next = add i32 %iv, 1
- store i32 %iv, i32 *@G
+ store i32 %iv, ptr @G
%cond_1 = icmp slt i32 %iv.next, 2000
%widenable_cond3 = call i1 @llvm.experimental.widenable.condition()
%exiplicit_guard_cond4 = and i1 %cond_1, %widenable_cond3
diff --git a/llvm/test/Analysis/ScalarEvolution/zext-signed-addrec.ll b/llvm/test/Analysis/ScalarEvolution/zext-signed-addrec.ll
index 2b12b33158fb0..899d31d266e51 100644
--- a/llvm/test/Analysis/ScalarEvolution/zext-signed-addrec.ll
+++ b/llvm/test/Analysis/ScalarEvolution/zext-signed-addrec.ll
@@ -15,16 +15,16 @@ target triple = "x86_64-unknown-linux-gnu"
; CHECK-LABEL: foo
define i32 @foo() {
entry:
- %.pr = load i32, i32* @b, align 4
+ %.pr = load i32, ptr @b, align 4
%cmp10 = icmp slt i32 %.pr, 1
br i1 %cmp10, label %for.cond1.preheader.lr.ph, label %entry.for.end9_crit_edge
entry.for.end9_crit_edge: ; preds = %entry
- %.pre = load i32, i32* @c, align 4
+ %.pre = load i32, ptr @c, align 4
br label %for.end9
for.cond1.preheader.lr.ph: ; preds = %entry
- %0 = load i32, i32* @a, align 4
+ %0 = load i32, ptr @a, align 4
%tobool = icmp eq i32 %0, 0
br i1 %tobool, label %for.cond1.preheader.for.cond1.preheader.split_crit_edge, label %return.loopexit.split
@@ -41,7 +41,7 @@ if.end: ; preds = %if.end, %for.cond1.
%2 = phi i8 [ 1, %for.cond1.preheader.for.cond1.preheader.split_crit_edge ], [ %dec, %if.end ]
%conv7 = mul i32 %indvars.iv, 258
%shl = and i32 %conv7, 510
- store i32 %shl, i32* @c, align 4
+ store i32 %shl, ptr @c, align 4
; CHECK: %lsr.iv.next = add nsw i32 %lsr.iv, -258
%dec = add i8 %2, -1
@@ -51,24 +51,24 @@ if.end: ; preds = %if.end, %for.cond1.
br i1 %cmp2, label %if.end, label %for.inc8
for.inc8: ; preds = %if.end
- store i32 0, i32* @d, align 4
+ store i32 0, ptr @d, align 4
%inc = add nsw i32 %1, 1
- store i32 %inc, i32* @b, align 4
+ store i32 %inc, ptr @b, align 4
%cmp = icmp slt i32 %1, 0
br i1 %cmp, label %for.cond1.preheader.for.cond1.preheader.split_crit_edge, label %for.cond.for.end9_crit_edge
for.cond.for.end9_crit_edge: ; preds = %for.inc8
- store i8 %dec, i8* @e, align 1
+ store i8 %dec, ptr @e, align 1
br label %for.end9
for.end9: ; preds = %entry.for.end9_crit_edge, %for.cond.for.end9_crit_edge
%3 = phi i32 [ %.pre, %entry.for.end9_crit_edge ], [ %shl, %for.cond.for.end9_crit_edge ]
- %call = tail call i32 (i8*, ...) @printf(i8* getelementptr inbounds ([4 x i8], [4 x i8]* @.str, i64 0, i64 0), i32 %3) #2
+ %call = tail call i32 (ptr, ...) @printf(ptr @.str, i32 %3) #2
br label %return
return.loopexit.split: ; preds = %for.cond1.preheader.lr.ph
- store i8 1, i8* @e, align 1
- store i32 0, i32* @d, align 4
+ store i8 1, ptr @e, align 1
+ store i32 0, ptr @d, align 4
br label %return
return: ; preds = %return.loopexit.split, %for.end9
@@ -77,5 +77,5 @@ return: ; preds = %return.loopexit.spl
}
; Function Attrs: nounwind optsize
-declare i32 @printf(i8* nocapture readonly, ...)
+declare i32 @printf(ptr nocapture readonly, ...)
More information about the llvm-commits
mailing list