[llvm] ba89c66 - [LoopPredication] Convert tests to opaque pointers (NFC)
Nikita Popov via llvm-commits
llvm-commits at lists.llvm.org
Mon Jan 2 07:52:11 PST 2023
Author: Nikita Popov
Date: 2023-01-02T16:52:03+01:00
New Revision: ba89c66771a8512ddbee7391e8ea11f7149f7ae2
URL: https://github.com/llvm/llvm-project/commit/ba89c66771a8512ddbee7391e8ea11f7149f7ae2
DIFF: https://github.com/llvm/llvm-project/commit/ba89c66771a8512ddbee7391e8ea11f7149f7ae2.diff
LOG: [LoopPredication] Convert tests to opaque pointers (NFC)
Added:
Modified:
llvm/test/Transforms/LoopPredication/assumes.ll
llvm/test/Transforms/LoopPredication/basic.ll
llvm/test/Transforms/LoopPredication/basic_widenable_branch_guards.ll
llvm/test/Transforms/LoopPredication/invariant_load.ll
llvm/test/Transforms/LoopPredication/nested.ll
llvm/test/Transforms/LoopPredication/predicate-exits.ll
llvm/test/Transforms/LoopPredication/preserve-bpi.ll
llvm/test/Transforms/LoopPredication/profitability.ll
llvm/test/Transforms/LoopPredication/reverse.ll
llvm/test/Transforms/LoopPredication/unswitch-exit-loop.ll
llvm/test/Transforms/LoopPredication/visited.ll
llvm/test/Transforms/LoopPredication/widened.ll
Removed:
################################################################################
diff --git a/llvm/test/Transforms/LoopPredication/assumes.ll b/llvm/test/Transforms/LoopPredication/assumes.ll
index 5b23a1f97038a..9babdb4780405 100644
--- a/llvm/test/Transforms/LoopPredication/assumes.ll
+++ b/llvm/test/Transforms/LoopPredication/assumes.ll
@@ -4,15 +4,15 @@
target datalayout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128-ni:1-p2:32:8:8:32-ni:2"
target triple = "x86_64-unknown-linux-gnu"
-define i32 @test0(i32* %p1, i8* %p2, i32* %p3, i8* %p4, i8* %p5, i1 %c, i32 %x) {
+define i32 @test0(ptr %p1, ptr %p2, ptr %p3, ptr %p4, ptr %p5, i1 %c, i32 %x) {
; CHECK-LABEL: @test0(
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[P1_1:%.*]] = getelementptr i32, i32* [[P1:%.*]], i64 1
-; CHECK-NEXT: [[P1_2:%.*]] = getelementptr i32, i32* [[P1]], i64 2
-; CHECK-NEXT: [[P1_3:%.*]] = getelementptr i32, i32* [[P1]], i64 3
-; CHECK-NEXT: [[IV_1_START:%.*]] = load i32, i32* [[P1_1]], align 4, !range [[RNG0:![0-9]+]]
-; CHECK-NEXT: [[IV_1_END:%.*]] = load i32, i32* [[P1_2]], align 4, !range [[RNG0]]
-; CHECK-NEXT: [[IV_2_END:%.*]] = load i32, i32* [[P1_3]], align 4, !range [[RNG0]]
+; CHECK-NEXT: [[P1_1:%.*]] = getelementptr i32, ptr [[P1:%.*]], i64 1
+; CHECK-NEXT: [[P1_2:%.*]] = getelementptr i32, ptr [[P1]], i64 2
+; CHECK-NEXT: [[P1_3:%.*]] = getelementptr i32, ptr [[P1]], i64 3
+; CHECK-NEXT: [[IV_1_START:%.*]] = load i32, ptr [[P1_1]], align 4, !range [[RNG0:![0-9]+]]
+; CHECK-NEXT: [[IV_1_END:%.*]] = load i32, ptr [[P1_2]], align 4, !range [[RNG0]]
+; CHECK-NEXT: [[IV_2_END:%.*]] = load i32, ptr [[P1_3]], align 4, !range [[RNG0]]
; CHECK-NEXT: [[LOOP_COND:%.*]] = icmp ult i32 [[IV_2_END]], [[IV_1_END]]
; CHECK-NEXT: br i1 [[LOOP_COND]], label [[LOOP_PREHEADER:%.*]], label [[EXIT:%.*]]
; CHECK: loop.preheader:
@@ -25,16 +25,16 @@ define i32 @test0(i32* %p1, i8* %p2, i32* %p3, i8* %p4, i8* %p5, i1 %c, i32 %x)
; CHECK: loop:
; CHECK-NEXT: [[IV_1:%.*]] = phi i32 [ [[IV_1_NEXT:%.*]], [[LOOP_NEXT:%.*]] ], [ [[IV_1_START]], [[LOOP_PREHEADER]] ]
; CHECK-NEXT: [[IV_2:%.*]] = phi i32 [ [[IV_2_NEXT:%.*]], [[LOOP_NEXT]] ], [ 0, [[LOOP_PREHEADER]] ]
-; CHECK-NEXT: [[GEP_1:%.*]] = getelementptr i8, i8* [[P2:%.*]], i32 [[IV_1]]
-; CHECK-NEXT: [[VALUE:%.*]] = load i8, i8* [[GEP_1]], align 1
+; CHECK-NEXT: [[GEP_1:%.*]] = getelementptr i8, ptr [[P2:%.*]], i32 [[IV_1]]
+; CHECK-NEXT: [[VALUE:%.*]] = load i8, ptr [[GEP_1]], align 1
; CHECK-NEXT: [[COND_1:%.*]] = icmp ult i32 [[IV_1]], [[IV_1_END]]
; CHECK-NEXT: [[WC:%.*]] = call i1 @llvm.experimental.widenable.condition()
; CHECK-NEXT: [[TMP5:%.*]] = and i1 [[TMP4]], [[WC]]
; CHECK-NEXT: br i1 [[TMP5]], label [[LOOP_NEXT]], label [[DEOPT:%.*]]
; CHECK: loop.next:
; CHECK-NEXT: call void @llvm.assume(i1 [[COND_1]])
-; CHECK-NEXT: [[GEP_3:%.*]] = getelementptr i8, i8* [[P4:%.*]], i32 [[IV_1]]
-; CHECK-NEXT: store i8 [[VALUE]], i8* [[GEP_3]], align 1
+; CHECK-NEXT: [[GEP_3:%.*]] = getelementptr i8, ptr [[P4:%.*]], i32 [[IV_1]]
+; CHECK-NEXT: store i8 [[VALUE]], ptr [[GEP_3]], align 1
; CHECK-NEXT: [[IV_1_NEXT]] = add nuw nsw i32 [[IV_1]], 1
; CHECK-NEXT: [[IV_2_NEXT]] = add nuw nsw i32 [[IV_2]], 1
; CHECK-NEXT: [[LATCH_COND:%.*]] = icmp ult i32 [[IV_2]], [[IV_2_END]]
@@ -47,20 +47,20 @@ define i32 @test0(i32* %p1, i8* %p2, i32* %p3, i8* %p4, i8* %p5, i1 %c, i32 %x)
; CHECK-NEXT: ret i32 [[RES]]
;
entry:
- %p1.1 = getelementptr i32, i32* %p1, i64 1
- %p1.2 = getelementptr i32, i32* %p1, i64 2
- %p1.3 = getelementptr i32, i32* %p1, i64 3
- %iv.1.start = load i32, i32* %p1.1, !range !0
- %iv.1.end = load i32, i32* %p1.2, !range !0
- %iv.2.end = load i32, i32* %p1.3, !range !0
+ %p1.1 = getelementptr i32, ptr %p1, i64 1
+ %p1.2 = getelementptr i32, ptr %p1, i64 2
+ %p1.3 = getelementptr i32, ptr %p1, i64 3
+ %iv.1.start = load i32, ptr %p1.1, !range !0
+ %iv.1.end = load i32, ptr %p1.2, !range !0
+ %iv.2.end = load i32, ptr %p1.3, !range !0
%loop.cond = icmp ult i32 %iv.2.end, %iv.1.end
br i1 %loop.cond, label %loop, label %exit
loop:
%iv.1 = phi i32 [ %iv.1.start, %entry ], [ %iv.1.next, %latch ]
%iv.2 = phi i32 [ 0, %entry ], [ %iv.2.next, %latch ]
- %gep.1 = getelementptr i8, i8* %p2, i32 %iv.1
- %value = load i8, i8* %gep.1
+ %gep.1 = getelementptr i8, ptr %p2, i32 %iv.1
+ %value = load i8, ptr %gep.1
%cond.1 = icmp ult i32 %iv.1, %iv.1.end
%wc = call i1 @llvm.experimental.widenable.condition()
%explicit_guard_cond = and i1 %cond.1, %wc
@@ -70,13 +70,13 @@ loop.next:
br i1 %cond.1, label %if.true, label %if.false
if.true:
- %gep.3 = getelementptr i8, i8* %p4, i32 %iv.1
- store i8 %value, i8* %gep.3
+ %gep.3 = getelementptr i8, ptr %p4, i32 %iv.1
+ store i8 %value, ptr %gep.3
br label %latch
if.false:
- %gep.4 = getelementptr i8, i8* %p4, i32 %iv.2
- store i8 %value, i8* %gep.4
+ %gep.4 = getelementptr i8, ptr %p4, i32 %iv.2
+ store i8 %value, ptr %gep.4
br label %latch
latch:
diff --git a/llvm/test/Transforms/LoopPredication/basic.ll b/llvm/test/Transforms/LoopPredication/basic.ll
index 12a90da41c86a..c930a41ae1d3c 100644
--- a/llvm/test/Transforms/LoopPredication/basic.ll
+++ b/llvm/test/Transforms/LoopPredication/basic.ll
@@ -4,7 +4,7 @@
declare void @llvm.experimental.guard(i1, ...)
-define i32 @unsigned_loop_0_to_n_ult_check(i32* %array, i32 %length, i32 %n) {
+define i32 @unsigned_loop_0_to_n_ult_check(ptr %array, i32 %length, i32 %n) {
; CHECK-LABEL: @unsigned_loop_0_to_n_ult_check(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP5:%.*]] = icmp eq i32 [[N:%.*]], 0
@@ -21,8 +21,8 @@ define i32 @unsigned_loop_0_to_n_ult_check(i32* %array, i32 %length, i32 %n) {
; CHECK-NEXT: call void (i1, ...) @llvm.experimental.guard(i1 [[TMP2]], i32 9) [ "deopt"() ]
; CHECK-NEXT: call void @llvm.assume(i1 [[WITHIN_BOUNDS]])
; CHECK-NEXT: [[I_I64:%.*]] = zext i32 [[I]] to i64
-; CHECK-NEXT: [[ARRAY_I_PTR:%.*]] = getelementptr inbounds i32, i32* [[ARRAY:%.*]], i64 [[I_I64]]
-; CHECK-NEXT: [[ARRAY_I:%.*]] = load i32, i32* [[ARRAY_I_PTR]], align 4
+; CHECK-NEXT: [[ARRAY_I_PTR:%.*]] = getelementptr inbounds i32, ptr [[ARRAY:%.*]], i64 [[I_I64]]
+; CHECK-NEXT: [[ARRAY_I:%.*]] = load i32, ptr [[ARRAY_I_PTR]], align 4
; CHECK-NEXT: [[LOOP_ACC_NEXT]] = add i32 [[LOOP_ACC]], [[ARRAY_I]]
; CHECK-NEXT: [[I_NEXT]] = add nuw i32 [[I]], 1
; CHECK-NEXT: [[CONTINUE:%.*]] = icmp ult i32 [[I_NEXT]], [[N]]
@@ -48,8 +48,8 @@ loop:
call void (i1, ...) @llvm.experimental.guard(i1 %within.bounds, i32 9) [ "deopt"() ]
%i.i64 = zext i32 %i to i64
- %array.i.ptr = getelementptr inbounds i32, i32* %array, i64 %i.i64
- %array.i = load i32, i32* %array.i.ptr, align 4
+ %array.i.ptr = getelementptr inbounds i32, ptr %array, i64 %i.i64
+ %array.i = load i32, ptr %array.i.ptr, align 4
%loop.acc.next = add i32 %loop.acc, %array.i
%i.next = add nuw i32 %i, 1
@@ -61,7 +61,7 @@ exit:
ret i32 %result
}
-define i32 @unsigned_loop_0_to_n_ule_latch_ult_check(i32* %array, i32 %length, i32 %n) {
+define i32 @unsigned_loop_0_to_n_ule_latch_ult_check(ptr %array, i32 %length, i32 %n) {
; CHECK-LABEL: @unsigned_loop_0_to_n_ule_latch_ult_check(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP5:%.*]] = icmp eq i32 [[N:%.*]], 0
@@ -78,8 +78,8 @@ define i32 @unsigned_loop_0_to_n_ule_latch_ult_check(i32* %array, i32 %length, i
; CHECK-NEXT: call void (i1, ...) @llvm.experimental.guard(i1 [[TMP2]], i32 9) [ "deopt"() ]
; CHECK-NEXT: call void @llvm.assume(i1 [[WITHIN_BOUNDS]])
; CHECK-NEXT: [[I_I64:%.*]] = zext i32 [[I]] to i64
-; CHECK-NEXT: [[ARRAY_I_PTR:%.*]] = getelementptr inbounds i32, i32* [[ARRAY:%.*]], i64 [[I_I64]]
-; CHECK-NEXT: [[ARRAY_I:%.*]] = load i32, i32* [[ARRAY_I_PTR]], align 4
+; CHECK-NEXT: [[ARRAY_I_PTR:%.*]] = getelementptr inbounds i32, ptr [[ARRAY:%.*]], i64 [[I_I64]]
+; CHECK-NEXT: [[ARRAY_I:%.*]] = load i32, ptr [[ARRAY_I_PTR]], align 4
; CHECK-NEXT: [[LOOP_ACC_NEXT]] = add i32 [[LOOP_ACC]], [[ARRAY_I]]
; CHECK-NEXT: [[I_NEXT]] = add nuw i32 [[I]], 1
; CHECK-NEXT: [[CONTINUE:%.*]] = icmp ule i32 [[I_NEXT]], [[N]]
@@ -105,8 +105,8 @@ loop:
call void (i1, ...) @llvm.experimental.guard(i1 %within.bounds, i32 9) [ "deopt"() ]
%i.i64 = zext i32 %i to i64
- %array.i.ptr = getelementptr inbounds i32, i32* %array, i64 %i.i64
- %array.i = load i32, i32* %array.i.ptr, align 4
+ %array.i.ptr = getelementptr inbounds i32, ptr %array, i64 %i.i64
+ %array.i = load i32, ptr %array.i.ptr, align 4
%loop.acc.next = add i32 %loop.acc, %array.i
%i.next = add nuw i32 %i, 1
@@ -118,7 +118,7 @@ exit:
ret i32 %result
}
-define i32 @unsigned_loop_0_to_n_ugt_check(i32* %array, i32 %length, i32 %n) {
+define i32 @unsigned_loop_0_to_n_ugt_check(ptr %array, i32 %length, i32 %n) {
; CHECK-LABEL: @unsigned_loop_0_to_n_ugt_check(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP5:%.*]] = icmp eq i32 [[N:%.*]], 0
@@ -135,8 +135,8 @@ define i32 @unsigned_loop_0_to_n_ugt_check(i32* %array, i32 %length, i32 %n) {
; CHECK-NEXT: call void (i1, ...) @llvm.experimental.guard(i1 [[TMP2]], i32 9) [ "deopt"() ]
; CHECK-NEXT: call void @llvm.assume(i1 [[WITHIN_BOUNDS]])
; CHECK-NEXT: [[I_I64:%.*]] = zext i32 [[I]] to i64
-; CHECK-NEXT: [[ARRAY_I_PTR:%.*]] = getelementptr inbounds i32, i32* [[ARRAY:%.*]], i64 [[I_I64]]
-; CHECK-NEXT: [[ARRAY_I:%.*]] = load i32, i32* [[ARRAY_I_PTR]], align 4
+; CHECK-NEXT: [[ARRAY_I_PTR:%.*]] = getelementptr inbounds i32, ptr [[ARRAY:%.*]], i64 [[I_I64]]
+; CHECK-NEXT: [[ARRAY_I:%.*]] = load i32, ptr [[ARRAY_I_PTR]], align 4
; CHECK-NEXT: [[LOOP_ACC_NEXT]] = add i32 [[LOOP_ACC]], [[ARRAY_I]]
; CHECK-NEXT: [[I_NEXT]] = add nuw i32 [[I]], 1
; CHECK-NEXT: [[CONTINUE:%.*]] = icmp ult i32 [[I_NEXT]], [[N]]
@@ -162,8 +162,8 @@ loop:
call void (i1, ...) @llvm.experimental.guard(i1 %within.bounds, i32 9) [ "deopt"() ]
%i.i64 = zext i32 %i to i64
- %array.i.ptr = getelementptr inbounds i32, i32* %array, i64 %i.i64
- %array.i = load i32, i32* %array.i.ptr, align 4
+ %array.i.ptr = getelementptr inbounds i32, ptr %array, i64 %i.i64
+ %array.i = load i32, ptr %array.i.ptr, align 4
%loop.acc.next = add i32 %loop.acc, %array.i
%i.next = add nuw i32 %i, 1
@@ -175,7 +175,7 @@ exit:
ret i32 %result
}
-define i32 @signed_loop_0_to_n_ult_check(i32* %array, i32 %length, i32 %n) {
+define i32 @signed_loop_0_to_n_ult_check(ptr %array, i32 %length, i32 %n) {
; CHECK-LABEL: @signed_loop_0_to_n_ult_check(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP5:%.*]] = icmp sle i32 [[N:%.*]], 0
@@ -192,8 +192,8 @@ define i32 @signed_loop_0_to_n_ult_check(i32* %array, i32 %length, i32 %n) {
; CHECK-NEXT: call void (i1, ...) @llvm.experimental.guard(i1 [[TMP2]], i32 9) [ "deopt"() ]
; CHECK-NEXT: call void @llvm.assume(i1 [[WITHIN_BOUNDS]])
; CHECK-NEXT: [[I_I64:%.*]] = zext i32 [[I]] to i64
-; CHECK-NEXT: [[ARRAY_I_PTR:%.*]] = getelementptr inbounds i32, i32* [[ARRAY:%.*]], i64 [[I_I64]]
-; CHECK-NEXT: [[ARRAY_I:%.*]] = load i32, i32* [[ARRAY_I_PTR]], align 4
+; CHECK-NEXT: [[ARRAY_I_PTR:%.*]] = getelementptr inbounds i32, ptr [[ARRAY:%.*]], i64 [[I_I64]]
+; CHECK-NEXT: [[ARRAY_I:%.*]] = load i32, ptr [[ARRAY_I_PTR]], align 4
; CHECK-NEXT: [[LOOP_ACC_NEXT]] = add i32 [[LOOP_ACC]], [[ARRAY_I]]
; CHECK-NEXT: [[I_NEXT]] = add nuw i32 [[I]], 1
; CHECK-NEXT: [[CONTINUE:%.*]] = icmp slt i32 [[I_NEXT]], [[N]]
@@ -219,8 +219,8 @@ loop:
call void (i1, ...) @llvm.experimental.guard(i1 %within.bounds, i32 9) [ "deopt"() ]
%i.i64 = zext i32 %i to i64
- %array.i.ptr = getelementptr inbounds i32, i32* %array, i64 %i.i64
- %array.i = load i32, i32* %array.i.ptr, align 4
+ %array.i.ptr = getelementptr inbounds i32, ptr %array, i64 %i.i64
+ %array.i = load i32, ptr %array.i.ptr, align 4
%loop.acc.next = add i32 %loop.acc, %array.i
%i.next = add nuw i32 %i, 1
@@ -232,11 +232,11 @@ exit:
ret i32 %result
}
-define i32 @signed_loop_0_to_n_ult_check_length_range_known(i32* %array, i32* %length.ptr, i32 %n) {
+define i32 @signed_loop_0_to_n_ult_check_length_range_known(ptr %array, ptr %length.ptr, i32 %n) {
; CHECK-LABEL: @signed_loop_0_to_n_ult_check_length_range_known(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP5:%.*]] = icmp sle i32 [[N:%.*]], 0
-; CHECK-NEXT: [[LENGTH:%.*]] = load i32, i32* [[LENGTH_PTR:%.*]], align 4, !range [[RNG0:![0-9]+]]
+; CHECK-NEXT: [[LENGTH:%.*]] = load i32, ptr [[LENGTH_PTR:%.*]], align 4, !range [[RNG0:![0-9]+]]
; CHECK-NEXT: br i1 [[TMP5]], label [[EXIT:%.*]], label [[LOOP_PREHEADER:%.*]]
; CHECK: loop.preheader:
; CHECK-NEXT: [[TMP0:%.*]] = icmp sle i32 [[N]], [[LENGTH]]
@@ -249,8 +249,8 @@ define i32 @signed_loop_0_to_n_ult_check_length_range_known(i32* %array, i32* %l
; CHECK-NEXT: call void (i1, ...) @llvm.experimental.guard(i1 [[TMP1]], i32 9) [ "deopt"() ]
; CHECK-NEXT: call void @llvm.assume(i1 [[WITHIN_BOUNDS]])
; CHECK-NEXT: [[I_I64:%.*]] = zext i32 [[I]] to i64
-; CHECK-NEXT: [[ARRAY_I_PTR:%.*]] = getelementptr inbounds i32, i32* [[ARRAY:%.*]], i64 [[I_I64]]
-; CHECK-NEXT: [[ARRAY_I:%.*]] = load i32, i32* [[ARRAY_I_PTR]], align 4
+; CHECK-NEXT: [[ARRAY_I_PTR:%.*]] = getelementptr inbounds i32, ptr [[ARRAY:%.*]], i64 [[I_I64]]
+; CHECK-NEXT: [[ARRAY_I:%.*]] = load i32, ptr [[ARRAY_I_PTR]], align 4
; CHECK-NEXT: [[LOOP_ACC_NEXT]] = add i32 [[LOOP_ACC]], [[ARRAY_I]]
; CHECK-NEXT: [[I_NEXT]] = add nuw i32 [[I]], 1
; CHECK-NEXT: [[CONTINUE:%.*]] = icmp slt i32 [[I_NEXT]], [[N]]
@@ -264,7 +264,7 @@ define i32 @signed_loop_0_to_n_ult_check_length_range_known(i32* %array, i32* %l
;
entry:
%tmp5 = icmp sle i32 %n, 0
- %length = load i32, i32* %length.ptr, !range !{i32 1, i32 2147483648}
+ %length = load i32, ptr %length.ptr, !range !{i32 1, i32 2147483648}
br i1 %tmp5, label %exit, label %loop.preheader
loop.preheader:
@@ -277,8 +277,8 @@ loop:
call void (i1, ...) @llvm.experimental.guard(i1 %within.bounds, i32 9) [ "deopt"() ]
%i.i64 = zext i32 %i to i64
- %array.i.ptr = getelementptr inbounds i32, i32* %array, i64 %i.i64
- %array.i = load i32, i32* %array.i.ptr, align 4
+ %array.i.ptr = getelementptr inbounds i32, ptr %array, i64 %i.i64
+ %array.i = load i32, ptr %array.i.ptr, align 4
%loop.acc.next = add i32 %loop.acc, %array.i
%i.next = add nuw i32 %i, 1
@@ -290,7 +290,7 @@ exit:
ret i32 %result
}
-define i32 @signed_loop_0_to_n_inverse_latch_predicate(i32* %array, i32 %length, i32 %n) {
+define i32 @signed_loop_0_to_n_inverse_latch_predicate(ptr %array, i32 %length, i32 %n) {
; CHECK-LABEL: @signed_loop_0_to_n_inverse_latch_predicate(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP5:%.*]] = icmp sle i32 [[N:%.*]], 0
@@ -307,8 +307,8 @@ define i32 @signed_loop_0_to_n_inverse_latch_predicate(i32* %array, i32 %length,
; CHECK-NEXT: call void (i1, ...) @llvm.experimental.guard(i1 [[TMP2]], i32 9) [ "deopt"() ]
; CHECK-NEXT: call void @llvm.assume(i1 [[WITHIN_BOUNDS]])
; CHECK-NEXT: [[I_I64:%.*]] = zext i32 [[I]] to i64
-; CHECK-NEXT: [[ARRAY_I_PTR:%.*]] = getelementptr inbounds i32, i32* [[ARRAY:%.*]], i64 [[I_I64]]
-; CHECK-NEXT: [[ARRAY_I:%.*]] = load i32, i32* [[ARRAY_I_PTR]], align 4
+; CHECK-NEXT: [[ARRAY_I_PTR:%.*]] = getelementptr inbounds i32, ptr [[ARRAY:%.*]], i64 [[I_I64]]
+; CHECK-NEXT: [[ARRAY_I:%.*]] = load i32, ptr [[ARRAY_I_PTR]], align 4
; CHECK-NEXT: [[LOOP_ACC_NEXT]] = add i32 [[LOOP_ACC]], [[ARRAY_I]]
; CHECK-NEXT: [[I_NEXT]] = add nuw i32 [[I]], 1
; CHECK-NEXT: [[CONTINUE:%.*]] = icmp sgt i32 [[I_NEXT]], [[N]]
@@ -334,8 +334,8 @@ loop:
call void (i1, ...) @llvm.experimental.guard(i1 %within.bounds, i32 9) [ "deopt"() ]
%i.i64 = zext i32 %i to i64
- %array.i.ptr = getelementptr inbounds i32, i32* %array, i64 %i.i64
- %array.i = load i32, i32* %array.i.ptr, align 4
+ %array.i.ptr = getelementptr inbounds i32, ptr %array, i64 %i.i64
+ %array.i = load i32, ptr %array.i.ptr, align 4
%loop.acc.next = add i32 %loop.acc, %array.i
%i.next = add nuw i32 %i, 1
@@ -347,7 +347,7 @@ exit:
ret i32 %result
}
-define i32 @signed_loop_0_to_n_sle_latch_ult_check(i32* %array, i32 %length, i32 %n) {
+define i32 @signed_loop_0_to_n_sle_latch_ult_check(ptr %array, i32 %length, i32 %n) {
; CHECK-LABEL: @signed_loop_0_to_n_sle_latch_ult_check(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP5:%.*]] = icmp sle i32 [[N:%.*]], 0
@@ -364,8 +364,8 @@ define i32 @signed_loop_0_to_n_sle_latch_ult_check(i32* %array, i32 %length, i32
; CHECK-NEXT: call void (i1, ...) @llvm.experimental.guard(i1 [[TMP2]], i32 9) [ "deopt"() ]
; CHECK-NEXT: call void @llvm.assume(i1 [[WITHIN_BOUNDS]])
; CHECK-NEXT: [[I_I64:%.*]] = zext i32 [[I]] to i64
-; CHECK-NEXT: [[ARRAY_I_PTR:%.*]] = getelementptr inbounds i32, i32* [[ARRAY:%.*]], i64 [[I_I64]]
-; CHECK-NEXT: [[ARRAY_I:%.*]] = load i32, i32* [[ARRAY_I_PTR]], align 4
+; CHECK-NEXT: [[ARRAY_I_PTR:%.*]] = getelementptr inbounds i32, ptr [[ARRAY:%.*]], i64 [[I_I64]]
+; CHECK-NEXT: [[ARRAY_I:%.*]] = load i32, ptr [[ARRAY_I_PTR]], align 4
; CHECK-NEXT: [[LOOP_ACC_NEXT]] = add i32 [[LOOP_ACC]], [[ARRAY_I]]
; CHECK-NEXT: [[I_NEXT]] = add nuw i32 [[I]], 1
; CHECK-NEXT: [[CONTINUE:%.*]] = icmp sle i32 [[I_NEXT]], [[N]]
@@ -391,8 +391,8 @@ loop:
call void (i1, ...) @llvm.experimental.guard(i1 %within.bounds, i32 9) [ "deopt"() ]
%i.i64 = zext i32 %i to i64
- %array.i.ptr = getelementptr inbounds i32, i32* %array, i64 %i.i64
- %array.i = load i32, i32* %array.i.ptr, align 4
+ %array.i.ptr = getelementptr inbounds i32, ptr %array, i64 %i.i64
+ %array.i = load i32, ptr %array.i.ptr, align 4
%loop.acc.next = add i32 %loop.acc, %array.i
%i.next = add nuw i32 %i, 1
@@ -404,7 +404,7 @@ exit:
ret i32 %result
}
-define i32 @signed_loop_0_to_n_preincrement_latch_check(i32* %array, i32 %length, i32 %n) {
+define i32 @signed_loop_0_to_n_preincrement_latch_check(ptr %array, i32 %length, i32 %n) {
; CHECK-LABEL: @signed_loop_0_to_n_preincrement_latch_check(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP5:%.*]] = icmp sle i32 [[N:%.*]], 0
@@ -422,8 +422,8 @@ define i32 @signed_loop_0_to_n_preincrement_latch_check(i32* %array, i32 %length
; CHECK-NEXT: call void (i1, ...) @llvm.experimental.guard(i1 [[TMP3]], i32 9) [ "deopt"() ]
; CHECK-NEXT: call void @llvm.assume(i1 [[WITHIN_BOUNDS]])
; CHECK-NEXT: [[I_I64:%.*]] = zext i32 [[I]] to i64
-; CHECK-NEXT: [[ARRAY_I_PTR:%.*]] = getelementptr inbounds i32, i32* [[ARRAY:%.*]], i64 [[I_I64]]
-; CHECK-NEXT: [[ARRAY_I:%.*]] = load i32, i32* [[ARRAY_I_PTR]], align 4
+; CHECK-NEXT: [[ARRAY_I_PTR:%.*]] = getelementptr inbounds i32, ptr [[ARRAY:%.*]], i64 [[I_I64]]
+; CHECK-NEXT: [[ARRAY_I:%.*]] = load i32, ptr [[ARRAY_I_PTR]], align 4
; CHECK-NEXT: [[LOOP_ACC_NEXT]] = add i32 [[LOOP_ACC]], [[ARRAY_I]]
; CHECK-NEXT: [[I_NEXT]] = add i32 [[I]], 1
; CHECK-NEXT: [[CONTINUE:%.*]] = icmp slt i32 [[I]], [[N]]
@@ -449,8 +449,8 @@ loop:
call void (i1, ...) @llvm.experimental.guard(i1 %within.bounds, i32 9) [ "deopt"() ]
%i.i64 = zext i32 %i to i64
- %array.i.ptr = getelementptr inbounds i32, i32* %array, i64 %i.i64
- %array.i = load i32, i32* %array.i.ptr, align 4
+ %array.i.ptr = getelementptr inbounds i32, ptr %array, i64 %i.i64
+ %array.i = load i32, ptr %array.i.ptr, align 4
%loop.acc.next = add i32 %loop.acc, %array.i
%i.next = add i32 %i, 1
@@ -462,7 +462,7 @@ exit:
ret i32 %result
}
-define i32 @signed_loop_0_to_n_preincrement_latch_check_postincrement_guard_check(i32* %array, i32 %length, i32 %n) {
+define i32 @signed_loop_0_to_n_preincrement_latch_check_postincrement_guard_check(ptr %array, i32 %length, i32 %n) {
; CHECK-LABEL: @signed_loop_0_to_n_preincrement_latch_check_postincrement_guard_check(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP5:%.*]] = icmp sle i32 [[N:%.*]], 0
@@ -481,8 +481,8 @@ define i32 @signed_loop_0_to_n_preincrement_latch_check_postincrement_guard_chec
; CHECK-NEXT: call void (i1, ...) @llvm.experimental.guard(i1 [[TMP3]], i32 9) [ "deopt"() ]
; CHECK-NEXT: call void @llvm.assume(i1 [[WITHIN_BOUNDS]])
; CHECK-NEXT: [[I_I64:%.*]] = zext i32 [[I]] to i64
-; CHECK-NEXT: [[ARRAY_I_PTR:%.*]] = getelementptr inbounds i32, i32* [[ARRAY:%.*]], i64 [[I_I64]]
-; CHECK-NEXT: [[ARRAY_I:%.*]] = load i32, i32* [[ARRAY_I_PTR]], align 4
+; CHECK-NEXT: [[ARRAY_I_PTR:%.*]] = getelementptr inbounds i32, ptr [[ARRAY:%.*]], i64 [[I_I64]]
+; CHECK-NEXT: [[ARRAY_I:%.*]] = load i32, ptr [[ARRAY_I_PTR]], align 4
; CHECK-NEXT: [[LOOP_ACC_NEXT]] = add i32 [[LOOP_ACC]], [[ARRAY_I]]
; CHECK-NEXT: [[CONTINUE:%.*]] = icmp slt i32 [[I]], [[N]]
; CHECK-NEXT: br i1 [[CONTINUE]], label [[LOOP]], label [[EXIT_LOOPEXIT:%.*]]
@@ -509,8 +509,8 @@ loop:
call void (i1, ...) @llvm.experimental.guard(i1 %within.bounds, i32 9) [ "deopt"() ]
%i.i64 = zext i32 %i to i64
- %array.i.ptr = getelementptr inbounds i32, i32* %array, i64 %i.i64
- %array.i = load i32, i32* %array.i.ptr, align 4
+ %array.i.ptr = getelementptr inbounds i32, ptr %array, i64 %i.i64
+ %array.i = load i32, ptr %array.i.ptr, align 4
%loop.acc.next = add i32 %loop.acc, %array.i
%continue = icmp slt i32 %i, %n
@@ -521,7 +521,7 @@ exit:
ret i32 %result
}
-define i32 @signed_loop_0_to_n_sle_latch_offset_ult_check(i32* %array, i32 %length, i32 %n) {
+define i32 @signed_loop_0_to_n_sle_latch_offset_ult_check(ptr %array, i32 %length, i32 %n) {
; CHECK-LABEL: @signed_loop_0_to_n_sle_latch_offset_ult_check(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP5:%.*]] = icmp sle i32 [[N:%.*]], 0
@@ -540,8 +540,8 @@ define i32 @signed_loop_0_to_n_sle_latch_offset_ult_check(i32* %array, i32 %leng
; CHECK-NEXT: call void (i1, ...) @llvm.experimental.guard(i1 [[TMP3]], i32 9) [ "deopt"() ]
; CHECK-NEXT: call void @llvm.assume(i1 [[WITHIN_BOUNDS]])
; CHECK-NEXT: [[I_I64:%.*]] = zext i32 [[I]] to i64
-; CHECK-NEXT: [[ARRAY_I_PTR:%.*]] = getelementptr inbounds i32, i32* [[ARRAY:%.*]], i64 [[I_I64]]
-; CHECK-NEXT: [[ARRAY_I:%.*]] = load i32, i32* [[ARRAY_I_PTR]], align 4
+; CHECK-NEXT: [[ARRAY_I_PTR:%.*]] = getelementptr inbounds i32, ptr [[ARRAY:%.*]], i64 [[I_I64]]
+; CHECK-NEXT: [[ARRAY_I:%.*]] = load i32, ptr [[ARRAY_I_PTR]], align 4
; CHECK-NEXT: [[LOOP_ACC_NEXT]] = add i32 [[LOOP_ACC]], [[ARRAY_I]]
; CHECK-NEXT: [[I_NEXT]] = add i32 [[I]], 1
; CHECK-NEXT: [[CONTINUE:%.*]] = icmp sle i32 [[I_NEXT]], [[N]]
@@ -568,8 +568,8 @@ loop:
call void (i1, ...) @llvm.experimental.guard(i1 %within.bounds, i32 9) [ "deopt"() ]
%i.i64 = zext i32 %i to i64
- %array.i.ptr = getelementptr inbounds i32, i32* %array, i64 %i.i64
- %array.i = load i32, i32* %array.i.ptr, align 4
+ %array.i.ptr = getelementptr inbounds i32, ptr %array, i64 %i.i64
+ %array.i = load i32, ptr %array.i.ptr, align 4
%loop.acc.next = add i32 %loop.acc, %array.i
%i.next = add i32 %i, 1
@@ -581,7 +581,7 @@ exit:
ret i32 %result
}
-define i32 @signed_loop_0_to_n_offset_sle_latch_offset_ult_check(i32* %array, i32 %length, i32 %n) {
+define i32 @signed_loop_0_to_n_offset_sle_latch_offset_ult_check(ptr %array, i32 %length, i32 %n) {
; CHECK-LABEL: @signed_loop_0_to_n_offset_sle_latch_offset_ult_check(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP5:%.*]] = icmp sle i32 [[N:%.*]], 0
@@ -599,8 +599,8 @@ define i32 @signed_loop_0_to_n_offset_sle_latch_offset_ult_check(i32* %array, i3
; CHECK-NEXT: call void (i1, ...) @llvm.experimental.guard(i1 [[TMP2]], i32 9) [ "deopt"() ]
; CHECK-NEXT: call void @llvm.assume(i1 [[WITHIN_BOUNDS]])
; CHECK-NEXT: [[I_I64:%.*]] = zext i32 [[I]] to i64
-; CHECK-NEXT: [[ARRAY_I_PTR:%.*]] = getelementptr inbounds i32, i32* [[ARRAY:%.*]], i64 [[I_I64]]
-; CHECK-NEXT: [[ARRAY_I:%.*]] = load i32, i32* [[ARRAY_I_PTR]], align 4
+; CHECK-NEXT: [[ARRAY_I_PTR:%.*]] = getelementptr inbounds i32, ptr [[ARRAY:%.*]], i64 [[I_I64]]
+; CHECK-NEXT: [[ARRAY_I:%.*]] = load i32, ptr [[ARRAY_I_PTR]], align 4
; CHECK-NEXT: [[LOOP_ACC_NEXT]] = add i32 [[LOOP_ACC]], [[ARRAY_I]]
; CHECK-NEXT: [[I_NEXT]] = add i32 [[I]], 1
; CHECK-NEXT: [[I_NEXT_OFFSET:%.*]] = add i32 [[I_NEXT]], 1
@@ -628,8 +628,8 @@ loop:
call void (i1, ...) @llvm.experimental.guard(i1 %within.bounds, i32 9) [ "deopt"() ]
%i.i64 = zext i32 %i to i64
- %array.i.ptr = getelementptr inbounds i32, i32* %array, i64 %i.i64
- %array.i = load i32, i32* %array.i.ptr, align 4
+ %array.i.ptr = getelementptr inbounds i32, ptr %array, i64 %i.i64
+ %array.i = load i32, ptr %array.i.ptr, align 4
%loop.acc.next = add i32 %loop.acc, %array.i
%i.next = add i32 %i, 1
@@ -642,7 +642,7 @@ exit:
ret i32 %result
}
-define i32 @unsupported_latch_pred_loop_0_to_n(i32* %array, i32 %length, i32 %n) {
+define i32 @unsupported_latch_pred_loop_0_to_n(ptr %array, i32 %length, i32 %n) {
; CHECK-LABEL: @unsupported_latch_pred_loop_0_to_n(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP5:%.*]] = icmp sle i32 [[N:%.*]], 0
@@ -655,8 +655,8 @@ define i32 @unsupported_latch_pred_loop_0_to_n(i32* %array, i32 %length, i32 %n)
; CHECK-NEXT: [[WITHIN_BOUNDS:%.*]] = icmp ult i32 [[I]], [[LENGTH:%.*]]
; CHECK-NEXT: call void (i1, ...) @llvm.experimental.guard(i1 [[WITHIN_BOUNDS]], i32 9) [ "deopt"() ]
; CHECK-NEXT: [[I_I64:%.*]] = zext i32 [[I]] to i64
-; CHECK-NEXT: [[ARRAY_I_PTR:%.*]] = getelementptr inbounds i32, i32* [[ARRAY:%.*]], i64 [[I_I64]]
-; CHECK-NEXT: [[ARRAY_I:%.*]] = load i32, i32* [[ARRAY_I_PTR]], align 4
+; CHECK-NEXT: [[ARRAY_I_PTR:%.*]] = getelementptr inbounds i32, ptr [[ARRAY:%.*]], i64 [[I_I64]]
+; CHECK-NEXT: [[ARRAY_I:%.*]] = load i32, ptr [[ARRAY_I_PTR]], align 4
; CHECK-NEXT: [[LOOP_ACC_NEXT]] = add i32 [[LOOP_ACC]], [[ARRAY_I]]
; CHECK-NEXT: [[I_NEXT]] = add nsw i32 [[I]], 1
; CHECK-NEXT: [[CONTINUE:%.*]] = icmp ne i32 [[I_NEXT]], [[N]]
@@ -682,8 +682,8 @@ loop:
call void (i1, ...) @llvm.experimental.guard(i1 %within.bounds, i32 9) [ "deopt"() ]
%i.i64 = zext i32 %i to i64
- %array.i.ptr = getelementptr inbounds i32, i32* %array, i64 %i.i64
- %array.i = load i32, i32* %array.i.ptr, align 4
+ %array.i.ptr = getelementptr inbounds i32, ptr %array, i64 %i.i64
+ %array.i = load i32, ptr %array.i.ptr, align 4
%loop.acc.next = add i32 %loop.acc, %array.i
%i.next = add nsw i32 %i, 1
@@ -695,7 +695,7 @@ exit:
ret i32 %result
}
-define i32 @signed_loop_0_to_n_unsupported_iv_step(i32* %array, i32 %length, i32 %n) {
+define i32 @signed_loop_0_to_n_unsupported_iv_step(ptr %array, i32 %length, i32 %n) {
; CHECK-LABEL: @signed_loop_0_to_n_unsupported_iv_step(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP5:%.*]] = icmp sle i32 [[N:%.*]], 0
@@ -708,8 +708,8 @@ define i32 @signed_loop_0_to_n_unsupported_iv_step(i32* %array, i32 %length, i32
; CHECK-NEXT: [[WITHIN_BOUNDS:%.*]] = icmp ult i32 [[I]], [[LENGTH:%.*]]
; CHECK-NEXT: call void (i1, ...) @llvm.experimental.guard(i1 [[WITHIN_BOUNDS]], i32 9) [ "deopt"() ]
; CHECK-NEXT: [[I_I64:%.*]] = zext i32 [[I]] to i64
-; CHECK-NEXT: [[ARRAY_I_PTR:%.*]] = getelementptr inbounds i32, i32* [[ARRAY:%.*]], i64 [[I_I64]]
-; CHECK-NEXT: [[ARRAY_I:%.*]] = load i32, i32* [[ARRAY_I_PTR]], align 4
+; CHECK-NEXT: [[ARRAY_I_PTR:%.*]] = getelementptr inbounds i32, ptr [[ARRAY:%.*]], i64 [[I_I64]]
+; CHECK-NEXT: [[ARRAY_I:%.*]] = load i32, ptr [[ARRAY_I_PTR]], align 4
; CHECK-NEXT: [[LOOP_ACC_NEXT]] = add i32 [[LOOP_ACC]], [[ARRAY_I]]
; CHECK-NEXT: [[I_NEXT]] = add nsw i32 [[I]], 2
; CHECK-NEXT: [[CONTINUE:%.*]] = icmp slt i32 [[I_NEXT]], [[N]]
@@ -735,8 +735,8 @@ loop:
call void (i1, ...) @llvm.experimental.guard(i1 %within.bounds, i32 9) [ "deopt"() ]
%i.i64 = zext i32 %i to i64
- %array.i.ptr = getelementptr inbounds i32, i32* %array, i64 %i.i64
- %array.i = load i32, i32* %array.i.ptr, align 4
+ %array.i.ptr = getelementptr inbounds i32, ptr %array, i64 %i.i64
+ %array.i = load i32, ptr %array.i.ptr, align 4
%loop.acc.next = add i32 %loop.acc, %array.i
%i.next = add nsw i32 %i, 2
@@ -748,7 +748,7 @@ exit:
ret i32 %result
}
-define i32 @signed_loop_0_to_n_equal_iv_range_check(i32* %array, i32 %length, i32 %n) {
+define i32 @signed_loop_0_to_n_equal_iv_range_check(ptr %array, i32 %length, i32 %n) {
; CHECK-LABEL: @signed_loop_0_to_n_equal_iv_range_check(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP5:%.*]] = icmp sle i32 [[N:%.*]], 0
@@ -766,8 +766,8 @@ define i32 @signed_loop_0_to_n_equal_iv_range_check(i32* %array, i32 %length, i3
; CHECK-NEXT: call void (i1, ...) @llvm.experimental.guard(i1 [[TMP2]], i32 9) [ "deopt"() ]
; CHECK-NEXT: call void @llvm.assume(i1 [[WITHIN_BOUNDS]])
; CHECK-NEXT: [[I_I64:%.*]] = zext i32 [[I]] to i64
-; CHECK-NEXT: [[ARRAY_I_PTR:%.*]] = getelementptr inbounds i32, i32* [[ARRAY:%.*]], i64 [[I_I64]]
-; CHECK-NEXT: [[ARRAY_I:%.*]] = load i32, i32* [[ARRAY_I_PTR]], align 4
+; CHECK-NEXT: [[ARRAY_I_PTR:%.*]] = getelementptr inbounds i32, ptr [[ARRAY:%.*]], i64 [[I_I64]]
+; CHECK-NEXT: [[ARRAY_I:%.*]] = load i32, ptr [[ARRAY_I_PTR]], align 4
; CHECK-NEXT: [[LOOP_ACC_NEXT]] = add i32 [[LOOP_ACC]], [[ARRAY_I]]
; CHECK-NEXT: [[J_NEXT]] = add nsw i32 [[J]], 1
; CHECK-NEXT: [[I_NEXT]] = add nsw i32 [[I]], 1
@@ -796,8 +796,8 @@ loop:
call void (i1, ...) @llvm.experimental.guard(i1 %within.bounds, i32 9) [ "deopt"() ]
%i.i64 = zext i32 %i to i64
- %array.i.ptr = getelementptr inbounds i32, i32* %array, i64 %i.i64
- %array.i = load i32, i32* %array.i.ptr, align 4
+ %array.i.ptr = getelementptr inbounds i32, ptr %array, i64 %i.i64
+ %array.i = load i32, ptr %array.i.ptr, align 4
%loop.acc.next = add i32 %loop.acc, %array.i
%j.next = add nsw i32 %j, 1
@@ -810,7 +810,7 @@ exit:
ret i32 %result
}
-define i32 @signed_loop_start_to_n_offset_iv_range_check(i32* %array, i32 %start.i,
+define i32 @signed_loop_start_to_n_offset_iv_range_check(ptr %array, i32 %start.i,
; CHECK-LABEL: @signed_loop_start_to_n_offset_iv_range_check(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP5:%.*]] = icmp sle i32 [[N:%.*]], 0
@@ -830,8 +830,8 @@ define i32 @signed_loop_start_to_n_offset_iv_range_check(i32* %array, i32 %start
; CHECK-NEXT: call void (i1, ...) @llvm.experimental.guard(i1 [[TMP4]], i32 9) [ "deopt"() ]
; CHECK-NEXT: call void @llvm.assume(i1 [[WITHIN_BOUNDS]])
; CHECK-NEXT: [[I_I64:%.*]] = zext i32 [[I]] to i64
-; CHECK-NEXT: [[ARRAY_I_PTR:%.*]] = getelementptr inbounds i32, i32* [[ARRAY:%.*]], i64 [[I_I64]]
-; CHECK-NEXT: [[ARRAY_I:%.*]] = load i32, i32* [[ARRAY_I_PTR]], align 4
+; CHECK-NEXT: [[ARRAY_I_PTR:%.*]] = getelementptr inbounds i32, ptr [[ARRAY:%.*]], i64 [[I_I64]]
+; CHECK-NEXT: [[ARRAY_I:%.*]] = load i32, ptr [[ARRAY_I_PTR]], align 4
; CHECK-NEXT: [[LOOP_ACC_NEXT]] = add i32 [[LOOP_ACC]], [[ARRAY_I]]
; CHECK-NEXT: [[J_NEXT]] = add i32 [[J]], 1
; CHECK-NEXT: [[I_NEXT]] = add i32 [[I]], 1
@@ -862,8 +862,8 @@ loop:
call void (i1, ...) @llvm.experimental.guard(i1 %within.bounds, i32 9) [ "deopt"() ]
%i.i64 = zext i32 %i to i64
- %array.i.ptr = getelementptr inbounds i32, i32* %array, i64 %i.i64
- %array.i = load i32, i32* %array.i.ptr, align 4
+ %array.i.ptr = getelementptr inbounds i32, ptr %array, i64 %i.i64
+ %array.i = load i32, ptr %array.i.ptr, align 4
%loop.acc.next = add i32 %loop.acc, %array.i
%j.next = add i32 %j, 1
@@ -876,7 +876,7 @@ exit:
ret i32 %result
}
-define i32 @signed_loop_0_to_n_
diff erent_iv_types(i32* %array, i16 %length, i32 %n) {
+define i32 @signed_loop_0_to_n_
diff erent_iv_types(ptr %array, i16 %length, i32 %n) {
; CHECK-LABEL: @signed_loop_0_to_n_
diff erent_iv_types(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP5:%.*]] = icmp sle i32 [[N:%.*]], 0
@@ -890,8 +890,8 @@ define i32 @signed_loop_0_to_n_
diff erent_iv_types(i32* %array, i16 %length, i32
; CHECK-NEXT: [[WITHIN_BOUNDS:%.*]] = icmp ult i16 [[J]], [[LENGTH:%.*]]
; CHECK-NEXT: call void (i1, ...) @llvm.experimental.guard(i1 [[WITHIN_BOUNDS]], i32 9) [ "deopt"() ]
; CHECK-NEXT: [[I_I64:%.*]] = zext i32 [[I]] to i64
-; CHECK-NEXT: [[ARRAY_I_PTR:%.*]] = getelementptr inbounds i32, i32* [[ARRAY:%.*]], i64 [[I_I64]]
-; CHECK-NEXT: [[ARRAY_I:%.*]] = load i32, i32* [[ARRAY_I_PTR]], align 4
+; CHECK-NEXT: [[ARRAY_I_PTR:%.*]] = getelementptr inbounds i32, ptr [[ARRAY:%.*]], i64 [[I_I64]]
+; CHECK-NEXT: [[ARRAY_I:%.*]] = load i32, ptr [[ARRAY_I_PTR]], align 4
; CHECK-NEXT: [[LOOP_ACC_NEXT]] = add i32 [[LOOP_ACC]], [[ARRAY_I]]
; CHECK-NEXT: [[J_NEXT]] = add i16 [[J]], 1
; CHECK-NEXT: [[I_NEXT]] = add i32 [[I]], 1
@@ -920,8 +920,8 @@ loop:
call void (i1, ...) @llvm.experimental.guard(i1 %within.bounds, i32 9) [ "deopt"() ]
%i.i64 = zext i32 %i to i64
- %array.i.ptr = getelementptr inbounds i32, i32* %array, i64 %i.i64
- %array.i = load i32, i32* %array.i.ptr, align 4
+ %array.i.ptr = getelementptr inbounds i32, ptr %array, i64 %i.i64
+ %array.i = load i32, ptr %array.i.ptr, align 4
%loop.acc.next = add i32 %loop.acc, %array.i
%j.next = add i16 %j, 1
@@ -934,7 +934,7 @@ exit:
ret i32 %result
}
-define i32 @signed_loop_0_to_n_
diff erent_iv_strides(i32* %array, i32 %length, i32 %n) {
+define i32 @signed_loop_0_to_n_
diff erent_iv_strides(ptr %array, i32 %length, i32 %n) {
; CHECK-LABEL: @signed_loop_0_to_n_
diff erent_iv_strides(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP5:%.*]] = icmp sle i32 [[N:%.*]], 0
@@ -948,8 +948,8 @@ define i32 @signed_loop_0_to_n_
diff erent_iv_strides(i32* %array, i32 %length, i3
; CHECK-NEXT: [[WITHIN_BOUNDS:%.*]] = icmp ult i32 [[J]], [[LENGTH:%.*]]
; CHECK-NEXT: call void (i1, ...) @llvm.experimental.guard(i1 [[WITHIN_BOUNDS]], i32 9) [ "deopt"() ]
; CHECK-NEXT: [[I_I64:%.*]] = zext i32 [[I]] to i64
-; CHECK-NEXT: [[ARRAY_I_PTR:%.*]] = getelementptr inbounds i32, i32* [[ARRAY:%.*]], i64 [[I_I64]]
-; CHECK-NEXT: [[ARRAY_I:%.*]] = load i32, i32* [[ARRAY_I_PTR]], align 4
+; CHECK-NEXT: [[ARRAY_I_PTR:%.*]] = getelementptr inbounds i32, ptr [[ARRAY:%.*]], i64 [[I_I64]]
+; CHECK-NEXT: [[ARRAY_I:%.*]] = load i32, ptr [[ARRAY_I_PTR]], align 4
; CHECK-NEXT: [[LOOP_ACC_NEXT]] = add i32 [[LOOP_ACC]], [[ARRAY_I]]
; CHECK-NEXT: [[J_NEXT]] = add nsw i32 [[J]], 2
; CHECK-NEXT: [[I_NEXT]] = add nsw i32 [[I]], 1
@@ -978,8 +978,8 @@ loop:
call void (i1, ...) @llvm.experimental.guard(i1 %within.bounds, i32 9) [ "deopt"() ]
%i.i64 = zext i32 %i to i64
- %array.i.ptr = getelementptr inbounds i32, i32* %array, i64 %i.i64
- %array.i = load i32, i32* %array.i.ptr, align 4
+ %array.i.ptr = getelementptr inbounds i32, ptr %array, i64 %i.i64
+ %array.i = load i32, ptr %array.i.ptr, align 4
%loop.acc.next = add i32 %loop.acc, %array.i
%j.next = add nsw i32 %j, 2
@@ -992,7 +992,7 @@ exit:
ret i32 %result
}
-define i32 @two_range_checks(i32* %array.1, i32 %length.1,
+define i32 @two_range_checks(ptr %array.1, i32 %length.1,
; CHECK-LABEL: @two_range_checks(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP5:%.*]] = icmp eq i32 [[N:%.*]], 0
@@ -1015,11 +1015,11 @@ define i32 @two_range_checks(i32* %array.1, i32 %length.1,
; CHECK-NEXT: call void (i1, ...) @llvm.experimental.guard(i1 [[TMP6]], i32 9) [ "deopt"() ]
; CHECK-NEXT: call void @llvm.assume(i1 [[WITHIN_BOUNDS]])
; CHECK-NEXT: [[I_I64:%.*]] = zext i32 [[I]] to i64
-; CHECK-NEXT: [[ARRAY_1_I_PTR:%.*]] = getelementptr inbounds i32, i32* [[ARRAY_1:%.*]], i64 [[I_I64]]
-; CHECK-NEXT: [[ARRAY_1_I:%.*]] = load i32, i32* [[ARRAY_1_I_PTR]], align 4
+; CHECK-NEXT: [[ARRAY_1_I_PTR:%.*]] = getelementptr inbounds i32, ptr [[ARRAY_1:%.*]], i64 [[I_I64]]
+; CHECK-NEXT: [[ARRAY_1_I:%.*]] = load i32, ptr [[ARRAY_1_I_PTR]], align 4
; CHECK-NEXT: [[LOOP_ACC_1:%.*]] = add i32 [[LOOP_ACC]], [[ARRAY_1_I]]
-; CHECK-NEXT: [[ARRAY_2_I_PTR:%.*]] = getelementptr inbounds i32, i32* [[ARRAY_2:%.*]], i64 [[I_I64]]
-; CHECK-NEXT: [[ARRAY_2_I:%.*]] = load i32, i32* [[ARRAY_2_I_PTR]], align 4
+; CHECK-NEXT: [[ARRAY_2_I_PTR:%.*]] = getelementptr inbounds i32, ptr [[ARRAY_2:%.*]], i64 [[I_I64]]
+; CHECK-NEXT: [[ARRAY_2_I:%.*]] = load i32, ptr [[ARRAY_2_I_PTR]], align 4
; CHECK-NEXT: [[LOOP_ACC_NEXT]] = add i32 [[LOOP_ACC_1]], [[ARRAY_2_I]]
; CHECK-NEXT: [[I_NEXT]] = add nuw i32 [[I]], 1
; CHECK-NEXT: [[CONTINUE:%.*]] = icmp ult i32 [[I_NEXT]], [[N]]
@@ -1031,7 +1031,7 @@ define i32 @two_range_checks(i32* %array.1, i32 %length.1,
; CHECK-NEXT: [[RESULT:%.*]] = phi i32 [ 0, [[ENTRY:%.*]] ], [ [[LOOP_ACC_NEXT_LCSSA]], [[EXIT_LOOPEXIT]] ]
; CHECK-NEXT: ret i32 [[RESULT]]
;
- i32* %array.2, i32 %length.2, i32 %n) {
+ ptr %array.2, i32 %length.2, i32 %n) {
entry:
%tmp5 = icmp eq i32 %n, 0
br i1 %tmp5, label %exit, label %loop.preheader
@@ -1048,12 +1048,12 @@ loop:
call void (i1, ...) @llvm.experimental.guard(i1 %within.bounds, i32 9) [ "deopt"() ]
%i.i64 = zext i32 %i to i64
- %array.1.i.ptr = getelementptr inbounds i32, i32* %array.1, i64 %i.i64
- %array.1.i = load i32, i32* %array.1.i.ptr, align 4
+ %array.1.i.ptr = getelementptr inbounds i32, ptr %array.1, i64 %i.i64
+ %array.1.i = load i32, ptr %array.1.i.ptr, align 4
%loop.acc.1 = add i32 %loop.acc, %array.1.i
- %array.2.i.ptr = getelementptr inbounds i32, i32* %array.2, i64 %i.i64
- %array.2.i = load i32, i32* %array.2.i.ptr, align 4
+ %array.2.i.ptr = getelementptr inbounds i32, ptr %array.2, i64 %i.i64
+ %array.2.i = load i32, ptr %array.2.i.ptr, align 4
%loop.acc.next = add i32 %loop.acc.1, %array.2.i
%i.next = add nuw i32 %i, 1
@@ -1065,7 +1065,7 @@ exit:
ret i32 %result
}
-define i32 @three_range_checks(i32* %array.1, i32 %length.1,
+define i32 @three_range_checks(ptr %array.1, i32 %length.1,
; CHECK-LABEL: @three_range_checks(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP5:%.*]] = icmp eq i32 [[N:%.*]], 0
@@ -1094,14 +1094,14 @@ define i32 @three_range_checks(i32* %array.1, i32 %length.1,
; CHECK-NEXT: call void (i1, ...) @llvm.experimental.guard(i1 [[TMP10]], i32 9) [ "deopt"() ]
; CHECK-NEXT: call void @llvm.assume(i1 [[WITHIN_BOUNDS]])
; CHECK-NEXT: [[I_I64:%.*]] = zext i32 [[I]] to i64
-; CHECK-NEXT: [[ARRAY_1_I_PTR:%.*]] = getelementptr inbounds i32, i32* [[ARRAY_1:%.*]], i64 [[I_I64]]
-; CHECK-NEXT: [[ARRAY_1_I:%.*]] = load i32, i32* [[ARRAY_1_I_PTR]], align 4
+; CHECK-NEXT: [[ARRAY_1_I_PTR:%.*]] = getelementptr inbounds i32, ptr [[ARRAY_1:%.*]], i64 [[I_I64]]
+; CHECK-NEXT: [[ARRAY_1_I:%.*]] = load i32, ptr [[ARRAY_1_I_PTR]], align 4
; CHECK-NEXT: [[LOOP_ACC_1:%.*]] = add i32 [[LOOP_ACC]], [[ARRAY_1_I]]
-; CHECK-NEXT: [[ARRAY_2_I_PTR:%.*]] = getelementptr inbounds i32, i32* [[ARRAY_2:%.*]], i64 [[I_I64]]
-; CHECK-NEXT: [[ARRAY_2_I:%.*]] = load i32, i32* [[ARRAY_2_I_PTR]], align 4
+; CHECK-NEXT: [[ARRAY_2_I_PTR:%.*]] = getelementptr inbounds i32, ptr [[ARRAY_2:%.*]], i64 [[I_I64]]
+; CHECK-NEXT: [[ARRAY_2_I:%.*]] = load i32, ptr [[ARRAY_2_I_PTR]], align 4
; CHECK-NEXT: [[LOOP_ACC_2:%.*]] = add i32 [[LOOP_ACC_1]], [[ARRAY_2_I]]
-; CHECK-NEXT: [[ARRAY_3_I_PTR:%.*]] = getelementptr inbounds i32, i32* [[ARRAY_3:%.*]], i64 [[I_I64]]
-; CHECK-NEXT: [[ARRAY_3_I:%.*]] = load i32, i32* [[ARRAY_3_I_PTR]], align 4
+; CHECK-NEXT: [[ARRAY_3_I_PTR:%.*]] = getelementptr inbounds i32, ptr [[ARRAY_3:%.*]], i64 [[I_I64]]
+; CHECK-NEXT: [[ARRAY_3_I:%.*]] = load i32, ptr [[ARRAY_3_I_PTR]], align 4
; CHECK-NEXT: [[LOOP_ACC_NEXT]] = add i32 [[LOOP_ACC_2]], [[ARRAY_3_I]]
; CHECK-NEXT: [[I_NEXT]] = add nuw i32 [[I]], 1
; CHECK-NEXT: [[CONTINUE:%.*]] = icmp ult i32 [[I_NEXT]], [[N]]
@@ -1113,8 +1113,8 @@ define i32 @three_range_checks(i32* %array.1, i32 %length.1,
; CHECK-NEXT: [[RESULT:%.*]] = phi i32 [ 0, [[ENTRY:%.*]] ], [ [[LOOP_ACC_NEXT_LCSSA]], [[EXIT_LOOPEXIT]] ]
; CHECK-NEXT: ret i32 [[RESULT]]
;
- i32* %array.2, i32 %length.2,
- i32* %array.3, i32 %length.3, i32 %n) {
+ ptr %array.2, i32 %length.2,
+ ptr %array.3, i32 %length.3, i32 %n) {
entry:
%tmp5 = icmp eq i32 %n, 0
br i1 %tmp5, label %exit, label %loop.preheader
@@ -1133,16 +1133,16 @@ loop:
call void (i1, ...) @llvm.experimental.guard(i1 %within.bounds, i32 9) [ "deopt"() ]
%i.i64 = zext i32 %i to i64
- %array.1.i.ptr = getelementptr inbounds i32, i32* %array.1, i64 %i.i64
- %array.1.i = load i32, i32* %array.1.i.ptr, align 4
+ %array.1.i.ptr = getelementptr inbounds i32, ptr %array.1, i64 %i.i64
+ %array.1.i = load i32, ptr %array.1.i.ptr, align 4
%loop.acc.1 = add i32 %loop.acc, %array.1.i
- %array.2.i.ptr = getelementptr inbounds i32, i32* %array.2, i64 %i.i64
- %array.2.i = load i32, i32* %array.2.i.ptr, align 4
+ %array.2.i.ptr = getelementptr inbounds i32, ptr %array.2, i64 %i.i64
+ %array.2.i = load i32, ptr %array.2.i.ptr, align 4
%loop.acc.2 = add i32 %loop.acc.1, %array.2.i
- %array.3.i.ptr = getelementptr inbounds i32, i32* %array.3, i64 %i.i64
- %array.3.i = load i32, i32* %array.3.i.ptr, align 4
+ %array.3.i.ptr = getelementptr inbounds i32, ptr %array.3, i64 %i.i64
+ %array.3.i = load i32, ptr %array.3.i.ptr, align 4
%loop.acc.next = add i32 %loop.acc.2, %array.3.i
%i.next = add nuw i32 %i, 1
@@ -1154,7 +1154,7 @@ exit:
ret i32 %result
}
-define i32 @three_guards(i32* %array.1, i32 %length.1,
+define i32 @three_guards(ptr %array.1, i32 %length.1,
; CHECK-LABEL: @three_guards(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP5:%.*]] = icmp eq i32 [[N:%.*]], 0
@@ -1177,20 +1177,20 @@ define i32 @three_guards(i32* %array.1, i32 %length.1,
; CHECK-NEXT: call void (i1, ...) @llvm.experimental.guard(i1 [[TMP2]], i32 9) [ "deopt"() ]
; CHECK-NEXT: call void @llvm.assume(i1 [[WITHIN_BOUNDS_1]])
; CHECK-NEXT: [[I_I64:%.*]] = zext i32 [[I]] to i64
-; CHECK-NEXT: [[ARRAY_1_I_PTR:%.*]] = getelementptr inbounds i32, i32* [[ARRAY_1:%.*]], i64 [[I_I64]]
-; CHECK-NEXT: [[ARRAY_1_I:%.*]] = load i32, i32* [[ARRAY_1_I_PTR]], align 4
+; CHECK-NEXT: [[ARRAY_1_I_PTR:%.*]] = getelementptr inbounds i32, ptr [[ARRAY_1:%.*]], i64 [[I_I64]]
+; CHECK-NEXT: [[ARRAY_1_I:%.*]] = load i32, ptr [[ARRAY_1_I_PTR]], align 4
; CHECK-NEXT: [[LOOP_ACC_1:%.*]] = add i32 [[LOOP_ACC]], [[ARRAY_1_I]]
; CHECK-NEXT: [[WITHIN_BOUNDS_2:%.*]] = icmp ult i32 [[I]], [[LENGTH_2]]
; CHECK-NEXT: call void (i1, ...) @llvm.experimental.guard(i1 [[TMP5]], i32 9) [ "deopt"() ]
; CHECK-NEXT: call void @llvm.assume(i1 [[WITHIN_BOUNDS_2]])
-; CHECK-NEXT: [[ARRAY_2_I_PTR:%.*]] = getelementptr inbounds i32, i32* [[ARRAY_2:%.*]], i64 [[I_I64]]
-; CHECK-NEXT: [[ARRAY_2_I:%.*]] = load i32, i32* [[ARRAY_2_I_PTR]], align 4
+; CHECK-NEXT: [[ARRAY_2_I_PTR:%.*]] = getelementptr inbounds i32, ptr [[ARRAY_2:%.*]], i64 [[I_I64]]
+; CHECK-NEXT: [[ARRAY_2_I:%.*]] = load i32, ptr [[ARRAY_2_I_PTR]], align 4
; CHECK-NEXT: [[LOOP_ACC_2:%.*]] = add i32 [[LOOP_ACC_1]], [[ARRAY_2_I]]
; CHECK-NEXT: [[WITHIN_BOUNDS_3:%.*]] = icmp ult i32 [[I]], [[LENGTH_3]]
; CHECK-NEXT: call void (i1, ...) @llvm.experimental.guard(i1 [[TMP8]], i32 9) [ "deopt"() ]
; CHECK-NEXT: call void @llvm.assume(i1 [[WITHIN_BOUNDS_3]])
-; CHECK-NEXT: [[ARRAY_3_I_PTR:%.*]] = getelementptr inbounds i32, i32* [[ARRAY_3:%.*]], i64 [[I_I64]]
-; CHECK-NEXT: [[ARRAY_3_I:%.*]] = load i32, i32* [[ARRAY_3_I_PTR]], align 4
+; CHECK-NEXT: [[ARRAY_3_I_PTR:%.*]] = getelementptr inbounds i32, ptr [[ARRAY_3:%.*]], i64 [[I_I64]]
+; CHECK-NEXT: [[ARRAY_3_I:%.*]] = load i32, ptr [[ARRAY_3_I_PTR]], align 4
; CHECK-NEXT: [[LOOP_ACC_NEXT]] = add i32 [[LOOP_ACC_2]], [[ARRAY_3_I]]
; CHECK-NEXT: [[I_NEXT]] = add nuw i32 [[I]], 1
; CHECK-NEXT: [[CONTINUE:%.*]] = icmp ult i32 [[I_NEXT]], [[N]]
@@ -1202,8 +1202,8 @@ define i32 @three_guards(i32* %array.1, i32 %length.1,
; CHECK-NEXT: [[RESULT:%.*]] = phi i32 [ 0, [[ENTRY:%.*]] ], [ [[LOOP_ACC_NEXT_LCSSA]], [[EXIT_LOOPEXIT]] ]
; CHECK-NEXT: ret i32 [[RESULT]]
;
- i32* %array.2, i32 %length.2,
- i32* %array.3, i32 %length.3, i32 %n) {
+ ptr %array.2, i32 %length.2,
+ ptr %array.3, i32 %length.3, i32 %n) {
entry:
%tmp5 = icmp eq i32 %n, 0
br i1 %tmp5, label %exit, label %loop.preheader
@@ -1220,22 +1220,22 @@ loop:
call void (i1, ...) @llvm.experimental.guard(i1 %within.bounds.1, i32 9) [ "deopt"() ]
%i.i64 = zext i32 %i to i64
- %array.1.i.ptr = getelementptr inbounds i32, i32* %array.1, i64 %i.i64
- %array.1.i = load i32, i32* %array.1.i.ptr, align 4
+ %array.1.i.ptr = getelementptr inbounds i32, ptr %array.1, i64 %i.i64
+ %array.1.i = load i32, ptr %array.1.i.ptr, align 4
%loop.acc.1 = add i32 %loop.acc, %array.1.i
%within.bounds.2 = icmp ult i32 %i, %length.2
call void (i1, ...) @llvm.experimental.guard(i1 %within.bounds.2, i32 9) [ "deopt"() ]
- %array.2.i.ptr = getelementptr inbounds i32, i32* %array.2, i64 %i.i64
- %array.2.i = load i32, i32* %array.2.i.ptr, align 4
+ %array.2.i.ptr = getelementptr inbounds i32, ptr %array.2, i64 %i.i64
+ %array.2.i = load i32, ptr %array.2.i.ptr, align 4
%loop.acc.2 = add i32 %loop.acc.1, %array.2.i
%within.bounds.3 = icmp ult i32 %i, %length.3
call void (i1, ...) @llvm.experimental.guard(i1 %within.bounds.3, i32 9) [ "deopt"() ]
- %array.3.i.ptr = getelementptr inbounds i32, i32* %array.3, i64 %i.i64
- %array.3.i = load i32, i32* %array.3.i.ptr, align 4
+ %array.3.i.ptr = getelementptr inbounds i32, ptr %array.3, i64 %i.i64
+ %array.3.i = load i32, ptr %array.3.i.ptr, align 4
%loop.acc.next = add i32 %loop.acc.2, %array.3.i
%i.next = add nuw i32 %i, 1
@@ -1247,7 +1247,7 @@ exit:
ret i32 %result
}
-define i32 @unsigned_loop_0_to_n_unrelated_condition(i32* %array, i32 %length, i32 %n, i32 %x) {
+define i32 @unsigned_loop_0_to_n_unrelated_condition(ptr %array, i32 %length, i32 %n, i32 %x) {
; CHECK-LABEL: @unsigned_loop_0_to_n_unrelated_condition(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP5:%.*]] = icmp eq i32 [[N:%.*]], 0
@@ -1267,8 +1267,8 @@ define i32 @unsigned_loop_0_to_n_unrelated_condition(i32* %array, i32 %length, i
; CHECK-NEXT: call void (i1, ...) @llvm.experimental.guard(i1 [[TMP3]], i32 9) [ "deopt"() ]
; CHECK-NEXT: call void @llvm.assume(i1 [[GUARD_COND]])
; CHECK-NEXT: [[I_I64:%.*]] = zext i32 [[I]] to i64
-; CHECK-NEXT: [[ARRAY_I_PTR:%.*]] = getelementptr inbounds i32, i32* [[ARRAY:%.*]], i64 [[I_I64]]
-; CHECK-NEXT: [[ARRAY_I:%.*]] = load i32, i32* [[ARRAY_I_PTR]], align 4
+; CHECK-NEXT: [[ARRAY_I_PTR:%.*]] = getelementptr inbounds i32, ptr [[ARRAY:%.*]], i64 [[I_I64]]
+; CHECK-NEXT: [[ARRAY_I:%.*]] = load i32, ptr [[ARRAY_I_PTR]], align 4
; CHECK-NEXT: [[LOOP_ACC_NEXT]] = add i32 [[LOOP_ACC]], [[ARRAY_I]]
; CHECK-NEXT: [[I_NEXT]] = add nuw i32 [[I]], 1
; CHECK-NEXT: [[CONTINUE:%.*]] = icmp ult i32 [[I_NEXT]], [[N]]
@@ -1296,8 +1296,8 @@ loop:
call void (i1, ...) @llvm.experimental.guard(i1 %guard.cond, i32 9) [ "deopt"() ]
%i.i64 = zext i32 %i to i64
- %array.i.ptr = getelementptr inbounds i32, i32* %array, i64 %i.i64
- %array.i = load i32, i32* %array.i.ptr, align 4
+ %array.i.ptr = getelementptr inbounds i32, ptr %array, i64 %i.i64
+ %array.i = load i32, ptr %array.i.ptr, align 4
%loop.acc.next = add i32 %loop.acc, %array.i
%i.next = add nuw i32 %i, 1
@@ -1310,7 +1310,7 @@ exit:
}
; Don't change the guard condition if there were no widened subconditions
-define i32 @test_no_widened_conditions(i32* %array, i32 %length, i32 %n, i32 %x1, i32 %x2, i32 %x3) {
+define i32 @test_no_widened_conditions(ptr %array, i32 %length, i32 %n, i32 %x1, i32 %x2, i32 %x3) {
; CHECK-LABEL: @test_no_widened_conditions(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP5:%.*]] = icmp eq i32 [[N:%.*]], 0
@@ -1327,8 +1327,8 @@ define i32 @test_no_widened_conditions(i32* %array, i32 %length, i32 %n, i32 %x1
; CHECK-NEXT: [[GUARD_COND:%.*]] = and i1 [[UNRELATED_COND_AND_1]], [[UNRELATED_COND_3]]
; CHECK-NEXT: call void (i1, ...) @llvm.experimental.guard(i1 [[GUARD_COND]], i32 9) [ "deopt"() ]
; CHECK-NEXT: [[I_I64:%.*]] = zext i32 [[I]] to i64
-; CHECK-NEXT: [[ARRAY_I_PTR:%.*]] = getelementptr inbounds i32, i32* [[ARRAY:%.*]], i64 [[I_I64]]
-; CHECK-NEXT: [[ARRAY_I:%.*]] = load i32, i32* [[ARRAY_I_PTR]], align 4
+; CHECK-NEXT: [[ARRAY_I_PTR:%.*]] = getelementptr inbounds i32, ptr [[ARRAY:%.*]], i64 [[I_I64]]
+; CHECK-NEXT: [[ARRAY_I:%.*]] = load i32, ptr [[ARRAY_I_PTR]], align 4
; CHECK-NEXT: [[LOOP_ACC_NEXT]] = add i32 [[LOOP_ACC]], [[ARRAY_I]]
; CHECK-NEXT: [[I_NEXT]] = add nuw i32 [[I]], 1
; CHECK-NEXT: [[CONTINUE:%.*]] = icmp ult i32 [[I_NEXT]], [[N]]
@@ -1359,8 +1359,8 @@ loop:
call void (i1, ...) @llvm.experimental.guard(i1 %guard.cond, i32 9) [ "deopt"() ]
%i.i64 = zext i32 %i to i64
- %array.i.ptr = getelementptr inbounds i32, i32* %array, i64 %i.i64
- %array.i = load i32, i32* %array.i.ptr, align 4
+ %array.i.ptr = getelementptr inbounds i32, ptr %array, i64 %i.i64
+ %array.i = load i32, ptr %array.i.ptr, align 4
%loop.acc.next = add i32 %loop.acc, %array.i
%i.next = add nuw i32 %i, 1
@@ -1372,7 +1372,7 @@ exit:
ret i32 %result
}
-define i32 @signed_loop_start_to_n_loop_variant_bound(i32* %array, i32 %x, i32 %start, i32 %n) {
+define i32 @signed_loop_start_to_n_loop_variant_bound(ptr %array, i32 %x, i32 %start, i32 %n) {
; CHECK-LABEL: @signed_loop_start_to_n_loop_variant_bound(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP5:%.*]] = icmp sle i32 [[N:%.*]], 0
@@ -1386,8 +1386,8 @@ define i32 @signed_loop_start_to_n_loop_variant_bound(i32* %array, i32 %x, i32 %
; CHECK-NEXT: [[WITHIN_BOUNDS:%.*]] = icmp ult i32 [[I]], [[BOUND]]
; CHECK-NEXT: call void (i1, ...) @llvm.experimental.guard(i1 [[WITHIN_BOUNDS]], i32 9) [ "deopt"() ]
; CHECK-NEXT: [[I_I64:%.*]] = zext i32 [[I]] to i64
-; CHECK-NEXT: [[ARRAY_I_PTR:%.*]] = getelementptr inbounds i32, i32* [[ARRAY:%.*]], i64 [[I_I64]]
-; CHECK-NEXT: [[ARRAY_I:%.*]] = load i32, i32* [[ARRAY_I_PTR]], align 4
+; CHECK-NEXT: [[ARRAY_I_PTR:%.*]] = getelementptr inbounds i32, ptr [[ARRAY:%.*]], i64 [[I_I64]]
+; CHECK-NEXT: [[ARRAY_I:%.*]] = load i32, ptr [[ARRAY_I_PTR]], align 4
; CHECK-NEXT: [[LOOP_ACC_NEXT]] = add i32 [[LOOP_ACC]], [[ARRAY_I]]
; CHECK-NEXT: [[I_NEXT]] = add nsw i32 [[I]], 1
; CHECK-NEXT: [[CONTINUE:%.*]] = icmp slt i32 [[I_NEXT]], [[N]]
@@ -1414,8 +1414,8 @@ loop:
call void (i1, ...) @llvm.experimental.guard(i1 %within.bounds, i32 9) [ "deopt"() ]
%i.i64 = zext i32 %i to i64
- %array.i.ptr = getelementptr inbounds i32, i32* %array, i64 %i.i64
- %array.i = load i32, i32* %array.i.ptr, align 4
+ %array.i.ptr = getelementptr inbounds i32, ptr %array, i64 %i.i64
+ %array.i = load i32, ptr %array.i.ptr, align 4
%loop.acc.next = add i32 %loop.acc, %array.i
%i.next = add nsw i32 %i, 1
@@ -1427,7 +1427,7 @@ exit:
ret i32 %result
}
-define i32 @signed_loop_start_to_n_non_monotonic_predicate(i32* %array, i32 %x, i32 %start, i32 %n) {
+define i32 @signed_loop_start_to_n_non_monotonic_predicate(ptr %array, i32 %x, i32 %start, i32 %n) {
; CHECK-LABEL: @signed_loop_start_to_n_non_monotonic_predicate(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP5:%.*]] = icmp sle i32 [[N:%.*]], 0
@@ -1440,8 +1440,8 @@ define i32 @signed_loop_start_to_n_non_monotonic_predicate(i32* %array, i32 %x,
; CHECK-NEXT: [[GUARD_COND:%.*]] = icmp eq i32 [[I]], [[X:%.*]]
; CHECK-NEXT: call void (i1, ...) @llvm.experimental.guard(i1 [[GUARD_COND]], i32 9) [ "deopt"() ]
; CHECK-NEXT: [[I_I64:%.*]] = zext i32 [[I]] to i64
-; CHECK-NEXT: [[ARRAY_I_PTR:%.*]] = getelementptr inbounds i32, i32* [[ARRAY:%.*]], i64 [[I_I64]]
-; CHECK-NEXT: [[ARRAY_I:%.*]] = load i32, i32* [[ARRAY_I_PTR]], align 4
+; CHECK-NEXT: [[ARRAY_I_PTR:%.*]] = getelementptr inbounds i32, ptr [[ARRAY:%.*]], i64 [[I_I64]]
+; CHECK-NEXT: [[ARRAY_I:%.*]] = load i32, ptr [[ARRAY_I_PTR]], align 4
; CHECK-NEXT: [[LOOP_ACC_NEXT]] = add i32 [[LOOP_ACC]], [[ARRAY_I]]
; CHECK-NEXT: [[I_NEXT]] = add nsw i32 [[I]], 1
; CHECK-NEXT: [[CONTINUE:%.*]] = icmp slt i32 [[I_NEXT]], [[N]]
@@ -1467,8 +1467,8 @@ loop:
call void (i1, ...) @llvm.experimental.guard(i1 %guard.cond, i32 9) [ "deopt"() ]
%i.i64 = zext i32 %i to i64
- %array.i.ptr = getelementptr inbounds i32, i32* %array, i64 %i.i64
- %array.i = load i32, i32* %array.i.ptr, align 4
+ %array.i.ptr = getelementptr inbounds i32, ptr %array, i64 %i.i64
+ %array.i = load i32, ptr %array.i.ptr, align 4
%loop.acc.next = add i32 %loop.acc, %array.i
%i.next = add nsw i32 %i, 1
@@ -1480,7 +1480,7 @@ exit:
ret i32 %result
}
-define i32 @unsigned_loop_0_to_n_hoist_length(i32* %array, i16 %length.i16, i32 %n) {
+define i32 @unsigned_loop_0_to_n_hoist_length(ptr %array, i16 %length.i16, i32 %n) {
; CHECK-LABEL: @unsigned_loop_0_to_n_hoist_length(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP5:%.*]] = icmp eq i32 [[N:%.*]], 0
@@ -1499,8 +1499,8 @@ define i32 @unsigned_loop_0_to_n_hoist_length(i32* %array, i16 %length.i16, i32
; CHECK-NEXT: call void (i1, ...) @llvm.experimental.guard(i1 [[TMP3]], i32 9) [ "deopt"() ]
; CHECK-NEXT: call void @llvm.assume(i1 [[WITHIN_BOUNDS]])
; CHECK-NEXT: [[I_I64:%.*]] = zext i32 [[I]] to i64
-; CHECK-NEXT: [[ARRAY_I_PTR:%.*]] = getelementptr inbounds i32, i32* [[ARRAY:%.*]], i64 [[I_I64]]
-; CHECK-NEXT: [[ARRAY_I:%.*]] = load i32, i32* [[ARRAY_I_PTR]], align 4
+; CHECK-NEXT: [[ARRAY_I_PTR:%.*]] = getelementptr inbounds i32, ptr [[ARRAY:%.*]], i64 [[I_I64]]
+; CHECK-NEXT: [[ARRAY_I:%.*]] = load i32, ptr [[ARRAY_I_PTR]], align 4
; CHECK-NEXT: [[LOOP_ACC_NEXT]] = add i32 [[LOOP_ACC]], [[ARRAY_I]]
; CHECK-NEXT: [[I_NEXT]] = add nuw i32 [[I]], 1
; CHECK-NEXT: [[CONTINUE:%.*]] = icmp ult i32 [[I_NEXT]], [[N]]
@@ -1527,8 +1527,8 @@ loop:
call void (i1, ...) @llvm.experimental.guard(i1 %within.bounds, i32 9) [ "deopt"() ]
%i.i64 = zext i32 %i to i64
- %array.i.ptr = getelementptr inbounds i32, i32* %array, i64 %i.i64
- %array.i = load i32, i32* %array.i.ptr, align 4
+ %array.i.ptr = getelementptr inbounds i32, ptr %array, i64 %i.i64
+ %array.i = load i32, ptr %array.i.ptr, align 4
%loop.acc.next = add i32 %loop.acc, %array.i
%i.next = add nuw i32 %i, 1
@@ -1540,7 +1540,7 @@ exit:
ret i32 %result
}
-define i32 @unsigned_loop_0_to_n_cant_hoist_length(i32* %array, i32 %length, i32 %divider, i32 %n) {
+define i32 @unsigned_loop_0_to_n_cant_hoist_length(ptr %array, i32 %length, i32 %divider, i32 %n) {
; CHECK-LABEL: @unsigned_loop_0_to_n_cant_hoist_length(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP5:%.*]] = icmp eq i32 [[N:%.*]], 0
@@ -1558,8 +1558,8 @@ define i32 @unsigned_loop_0_to_n_cant_hoist_length(i32* %array, i32 %length, i32
; CHECK-NEXT: call void (i1, ...) @llvm.experimental.guard(i1 [[TMP2]], i32 9) [ "deopt"() ]
; CHECK-NEXT: call void @llvm.assume(i1 [[WITHIN_BOUNDS]])
; CHECK-NEXT: [[I_I64:%.*]] = zext i32 [[I]] to i64
-; CHECK-NEXT: [[ARRAY_I_PTR:%.*]] = getelementptr inbounds i32, i32* [[ARRAY:%.*]], i64 [[I_I64]]
-; CHECK-NEXT: [[ARRAY_I:%.*]] = load i32, i32* [[ARRAY_I_PTR]], align 4
+; CHECK-NEXT: [[ARRAY_I_PTR:%.*]] = getelementptr inbounds i32, ptr [[ARRAY:%.*]], i64 [[I_I64]]
+; CHECK-NEXT: [[ARRAY_I:%.*]] = load i32, ptr [[ARRAY_I_PTR]], align 4
; CHECK-NEXT: [[LOOP_ACC_NEXT]] = add i32 [[LOOP_ACC]], [[ARRAY_I]]
; CHECK-NEXT: [[I_NEXT]] = add nuw i32 [[I]], 1
; CHECK-NEXT: [[CONTINUE:%.*]] = icmp ult i32 [[I_NEXT]], [[N]]
@@ -1586,8 +1586,8 @@ loop:
call void (i1, ...) @llvm.experimental.guard(i1 %within.bounds, i32 9) [ "deopt"() ]
%i.i64 = zext i32 %i to i64
- %array.i.ptr = getelementptr inbounds i32, i32* %array, i64 %i.i64
- %array.i = load i32, i32* %array.i.ptr, align 4
+ %array.i.ptr = getelementptr inbounds i32, ptr %array, i64 %i.i64
+ %array.i = load i32, ptr %array.i.ptr, align 4
%loop.acc.next = add i32 %loop.acc, %array.i
%i.next = add nuw i32 %i, 1
@@ -1602,10 +1602,10 @@ exit:
; This is a case where the length information tells us that the guard
; must trigger on some iteration.
-define i32 @provably_taken(i32* %array, i32* %length.ptr) {
+define i32 @provably_taken(ptr %array, ptr %length.ptr) {
; CHECK-LABEL: @provably_taken(
; CHECK-NEXT: loop.preheader:
-; CHECK-NEXT: [[LENGTH:%.*]] = load i32, i32* [[LENGTH_PTR:%.*]], align 4, !range [[RNG1:![0-9]+]]
+; CHECK-NEXT: [[LENGTH:%.*]] = load i32, ptr [[LENGTH_PTR:%.*]], align 4, !range [[RNG1:![0-9]+]]
; CHECK-NEXT: [[TMP0:%.*]] = icmp ult i32 0, [[LENGTH]]
; CHECK-NEXT: [[TMP1:%.*]] = and i1 [[TMP0]], false
; CHECK-NEXT: br label [[LOOP:%.*]]
@@ -1616,8 +1616,8 @@ define i32 @provably_taken(i32* %array, i32* %length.ptr) {
; CHECK-NEXT: call void (i1, ...) @llvm.experimental.guard(i1 [[TMP1]], i32 9) [ "deopt"() ]
; CHECK-NEXT: call void @llvm.assume(i1 [[WITHIN_BOUNDS]])
; CHECK-NEXT: [[I_I64:%.*]] = zext i32 [[I]] to i64
-; CHECK-NEXT: [[ARRAY_I_PTR:%.*]] = getelementptr inbounds i32, i32* [[ARRAY:%.*]], i64 [[I_I64]]
-; CHECK-NEXT: [[ARRAY_I:%.*]] = load i32, i32* [[ARRAY_I_PTR]], align 4
+; CHECK-NEXT: [[ARRAY_I_PTR:%.*]] = getelementptr inbounds i32, ptr [[ARRAY:%.*]], i64 [[I_I64]]
+; CHECK-NEXT: [[ARRAY_I:%.*]] = load i32, ptr [[ARRAY_I_PTR]], align 4
; CHECK-NEXT: [[LOOP_ACC_NEXT]] = add i32 [[LOOP_ACC]], [[ARRAY_I]]
; CHECK-NEXT: [[I_NEXT]] = add nuw i32 [[I]], 1
; CHECK-NEXT: [[CONTINUE:%.*]] = icmp slt i32 [[I_NEXT]], 200
@@ -1627,7 +1627,7 @@ define i32 @provably_taken(i32* %array, i32* %length.ptr) {
; CHECK-NEXT: ret i32 [[RESULT]]
;
loop.preheader:
- %length = load i32, i32* %length.ptr, !range !{i32 0, i32 50}
+ %length = load i32, ptr %length.ptr, !range !{i32 0, i32 50}
br label %loop
loop:
@@ -1637,8 +1637,8 @@ loop:
call void (i1, ...) @llvm.experimental.guard(i1 %within.bounds, i32 9) [ "deopt"() ]
%i.i64 = zext i32 %i to i64
- %array.i.ptr = getelementptr inbounds i32, i32* %array, i64 %i.i64
- %array.i = load i32, i32* %array.i.ptr, align 4
+ %array.i.ptr = getelementptr inbounds i32, ptr %array, i64 %i.i64
+ %array.i = load i32, ptr %array.i.ptr, align 4
%loop.acc.next = add i32 %loop.acc, %array.i
%i.next = add nuw i32 %i, 1
@@ -1652,7 +1652,7 @@ exit:
; NE Check (as produced by LFTR) where we can prove Start < End via simple
; instruction analysis
-define i32 @ne_latch_zext(i32* %array, i32 %length, i16 %n16) {
+define i32 @ne_latch_zext(ptr %array, i32 %length, i16 %n16) {
; CHECK-LABEL: @ne_latch_zext(
; CHECK-NEXT: loop.preheader:
; CHECK-NEXT: [[N:%.*]] = zext i16 [[N16:%.*]] to i32
@@ -1691,7 +1691,7 @@ exit:
}
; Same as previous, but with a pre-increment test since this is easier to match
-define i32 @ne_latch_zext_preinc(i32* %array, i32 %length, i16 %n16) {
+define i32 @ne_latch_zext_preinc(ptr %array, i32 %length, i16 %n16) {
; CHECK-LABEL: @ne_latch_zext_preinc(
; CHECK-NEXT: loop.preheader:
; CHECK-NEXT: [[N:%.*]] = zext i16 [[N16:%.*]] to i32
@@ -1730,7 +1730,7 @@ exit:
; NE Check (as produced by LFTR) where we can prove Start < End via the
; condition guarding the loop entry.
-define i32 @ne_latch_dom_check(i32* %array, i32 %length, i32 %n) {
+define i32 @ne_latch_dom_check(ptr %array, i32 %length, i32 %n) {
; CHECK-LABEL: @ne_latch_dom_check(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP5:%.*]] = icmp sle i32 [[N:%.*]], 0
@@ -1772,7 +1772,7 @@ exit:
}
; Same as previous, but easier to match
-define i32 @ne_latch_dom_check_preinc(i32* %array, i32 %length, i32 %n) {
+define i32 @ne_latch_dom_check_preinc(ptr %array, i32 %length, i32 %n) {
; CHECK-LABEL: @ne_latch_dom_check_preinc(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP5:%.*]] = icmp sle i32 [[N:%.*]], 0
@@ -1817,7 +1817,7 @@ exit:
}
; Same as previous, except swapped br/cmp
-define i32 @eq_latch_dom_check_preinc(i32* %array, i32 %length, i32 %n) {
+define i32 @eq_latch_dom_check_preinc(ptr %array, i32 %length, i32 %n) {
; CHECK-LABEL: @eq_latch_dom_check_preinc(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP5:%.*]] = icmp sle i32 [[N:%.*]], 0
@@ -1864,7 +1864,7 @@ exit:
; NE latch - can't prove (end-start) mod step == 0 (i.e. might wrap
; around several times or even be infinite)
-define i32 @neg_ne_latch_mod_step(i32* %array, i32 %length, i16 %n16) {
+define i32 @neg_ne_latch_mod_step(ptr %array, i32 %length, i16 %n16) {
; CHECK-LABEL: @neg_ne_latch_mod_step(
; CHECK-NEXT: loop.preheader:
; CHECK-NEXT: [[N:%.*]] = zext i16 [[N16:%.*]] to i32
@@ -1897,7 +1897,7 @@ exit:
}
; NE latch - TODO: could prove (end-start) mod step == 0
-define i32 @ne_latch_mod_step(i32* %array, i32 %length) {
+define i32 @ne_latch_mod_step(ptr %array, i32 %length) {
; CHECK-LABEL: @ne_latch_mod_step(
; CHECK-NEXT: loop.preheader:
; CHECK-NEXT: br label [[LOOP:%.*]]
@@ -1928,7 +1928,7 @@ exit:
}
; NE Latch - but end > start so wraps around and not equivelent to a ult
-define i32 @neg_ne_latch_swapped_order(i32* %array, i32 %length) {
+define i32 @neg_ne_latch_swapped_order(ptr %array, i32 %length) {
; CHECK-LABEL: @neg_ne_latch_swapped_order(
; CHECK-NEXT: loop.preheader:
; CHECK-NEXT: br label [[LOOP:%.*]]
@@ -1961,7 +1961,7 @@ exit:
; Negative test, make sure we don't crash on unconditional latches
; TODO: there's no reason we shouldn't be able to predicate the
; condition for an statically infinite loop.
-define i32 @unconditional_latch(i32* %a, i32 %length) {
+define i32 @unconditional_latch(ptr %a, i32 %length) {
; CHECK-LABEL: @unconditional_latch(
; CHECK-NEXT: loop.preheader:
; CHECK-NEXT: br label [[LOOP:%.*]]
@@ -1969,7 +1969,7 @@ define i32 @unconditional_latch(i32* %a, i32 %length) {
; CHECK-NEXT: [[I:%.*]] = phi i32 [ [[I_NEXT:%.*]], [[LOOP]] ], [ 400, [[LOOP_PREHEADER:%.*]] ]
; CHECK-NEXT: [[WITHIN_BOUNDS:%.*]] = icmp ult i32 [[I]], [[LENGTH:%.*]]
; CHECK-NEXT: call void (i1, ...) @llvm.experimental.guard(i1 [[WITHIN_BOUNDS]], i32 9) [ "deopt"() ]
-; CHECK-NEXT: store volatile i32 0, i32* [[A:%.*]], align 4
+; CHECK-NEXT: store volatile i32 0, ptr [[A:%.*]], align 4
; CHECK-NEXT: [[I_NEXT]] = add i32 [[I]], 1
; CHECK-NEXT: br label [[LOOP]]
;
@@ -1980,7 +1980,7 @@ loop:
%i = phi i32 [ %i.next, %loop ], [ 400, %loop.preheader ]
%within.bounds = icmp ult i32 %i, %length
call void (i1, ...) @llvm.experimental.guard(i1 %within.bounds, i32 9) [ "deopt"() ]
- store volatile i32 0, i32* %a
+ store volatile i32 0, ptr %a
%i.next = add i32 %i, 1
br label %loop
}
diff --git a/llvm/test/Transforms/LoopPredication/basic_widenable_branch_guards.ll b/llvm/test/Transforms/LoopPredication/basic_widenable_branch_guards.ll
index a3e10f81d7b64..0e5d3e78ead31 100644
--- a/llvm/test/Transforms/LoopPredication/basic_widenable_branch_guards.ll
+++ b/llvm/test/Transforms/LoopPredication/basic_widenable_branch_guards.ll
@@ -5,7 +5,7 @@
declare void @llvm.experimental.guard(i1, ...)
-define i32 @unsigned_loop_0_to_n_ult_check(i32* %array, i32 %length, i32 %n) {
+define i32 @unsigned_loop_0_to_n_ult_check(ptr %array, i32 %length, i32 %n) {
; CHECK-LABEL: @unsigned_loop_0_to_n_ult_check(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP5:%.*]] = icmp eq i32 [[N:%.*]], 0
@@ -28,8 +28,8 @@ define i32 @unsigned_loop_0_to_n_ult_check(i32* %array, i32 %length, i32 %n) {
; CHECK: guarded:
; CHECK-NEXT: call void @llvm.assume(i1 [[WITHIN_BOUNDS]])
; CHECK-NEXT: [[I_I64:%.*]] = zext i32 [[I]] to i64
-; CHECK-NEXT: [[ARRAY_I_PTR:%.*]] = getelementptr inbounds i32, i32* [[ARRAY:%.*]], i64 [[I_I64]]
-; CHECK-NEXT: [[ARRAY_I:%.*]] = load i32, i32* [[ARRAY_I_PTR]], align 4
+; CHECK-NEXT: [[ARRAY_I_PTR:%.*]] = getelementptr inbounds i32, ptr [[ARRAY:%.*]], i64 [[I_I64]]
+; CHECK-NEXT: [[ARRAY_I:%.*]] = load i32, ptr [[ARRAY_I_PTR]], align 4
; CHECK-NEXT: [[LOOP_ACC_NEXT]] = add i32 [[LOOP_ACC]], [[ARRAY_I]]
; CHECK-NEXT: [[I_NEXT]] = add nuw i32 [[I]], 1
; CHECK-NEXT: [[CONTINUE:%.*]] = icmp ult i32 [[I_NEXT]], [[N]]
@@ -62,8 +62,8 @@ deopt: ; preds = %loop
guarded: ; preds = %loop
%i.i64 = zext i32 %i to i64
- %array.i.ptr = getelementptr inbounds i32, i32* %array, i64 %i.i64
- %array.i = load i32, i32* %array.i.ptr, align 4
+ %array.i.ptr = getelementptr inbounds i32, ptr %array, i64 %i.i64
+ %array.i = load i32, ptr %array.i.ptr, align 4
%loop.acc.next = add i32 %loop.acc, %array.i
%i.next = add nuw i32 %i, 1
%continue = icmp ult i32 %i.next, %n
@@ -74,7 +74,7 @@ exit: ; preds = %guarded, %entry
ret i32 %result
}
-define i32 @unsigned_loop_0_to_n_ule_latch_ult_check(i32* %array, i32 %length, i32 %n) {
+define i32 @unsigned_loop_0_to_n_ule_latch_ult_check(ptr %array, i32 %length, i32 %n) {
; CHECK-LABEL: @unsigned_loop_0_to_n_ule_latch_ult_check(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP5:%.*]] = icmp eq i32 [[N:%.*]], 0
@@ -97,8 +97,8 @@ define i32 @unsigned_loop_0_to_n_ule_latch_ult_check(i32* %array, i32 %length, i
; CHECK: guarded:
; CHECK-NEXT: call void @llvm.assume(i1 [[WITHIN_BOUNDS]])
; CHECK-NEXT: [[I_I64:%.*]] = zext i32 [[I]] to i64
-; CHECK-NEXT: [[ARRAY_I_PTR:%.*]] = getelementptr inbounds i32, i32* [[ARRAY:%.*]], i64 [[I_I64]]
-; CHECK-NEXT: [[ARRAY_I:%.*]] = load i32, i32* [[ARRAY_I_PTR]], align 4
+; CHECK-NEXT: [[ARRAY_I_PTR:%.*]] = getelementptr inbounds i32, ptr [[ARRAY:%.*]], i64 [[I_I64]]
+; CHECK-NEXT: [[ARRAY_I:%.*]] = load i32, ptr [[ARRAY_I_PTR]], align 4
; CHECK-NEXT: [[LOOP_ACC_NEXT]] = add i32 [[LOOP_ACC]], [[ARRAY_I]]
; CHECK-NEXT: [[I_NEXT]] = add nuw i32 [[I]], 1
; CHECK-NEXT: [[CONTINUE:%.*]] = icmp ule i32 [[I_NEXT]], [[N]]
@@ -131,8 +131,8 @@ deopt: ; preds = %loop
guarded: ; preds = %loop
%i.i64 = zext i32 %i to i64
- %array.i.ptr = getelementptr inbounds i32, i32* %array, i64 %i.i64
- %array.i = load i32, i32* %array.i.ptr, align 4
+ %array.i.ptr = getelementptr inbounds i32, ptr %array, i64 %i.i64
+ %array.i = load i32, ptr %array.i.ptr, align 4
%loop.acc.next = add i32 %loop.acc, %array.i
%i.next = add nuw i32 %i, 1
%continue = icmp ule i32 %i.next, %n
@@ -143,7 +143,7 @@ exit: ; preds = %guarded, %entry
ret i32 %result
}
-define i32 @unsigned_loop_0_to_n_ugt_check(i32* %array, i32 %length, i32 %n) {
+define i32 @unsigned_loop_0_to_n_ugt_check(ptr %array, i32 %length, i32 %n) {
; CHECK-LABEL: @unsigned_loop_0_to_n_ugt_check(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP5:%.*]] = icmp eq i32 [[N:%.*]], 0
@@ -166,8 +166,8 @@ define i32 @unsigned_loop_0_to_n_ugt_check(i32* %array, i32 %length, i32 %n) {
; CHECK: guarded:
; CHECK-NEXT: call void @llvm.assume(i1 [[WITHIN_BOUNDS]])
; CHECK-NEXT: [[I_I64:%.*]] = zext i32 [[I]] to i64
-; CHECK-NEXT: [[ARRAY_I_PTR:%.*]] = getelementptr inbounds i32, i32* [[ARRAY:%.*]], i64 [[I_I64]]
-; CHECK-NEXT: [[ARRAY_I:%.*]] = load i32, i32* [[ARRAY_I_PTR]], align 4
+; CHECK-NEXT: [[ARRAY_I_PTR:%.*]] = getelementptr inbounds i32, ptr [[ARRAY:%.*]], i64 [[I_I64]]
+; CHECK-NEXT: [[ARRAY_I:%.*]] = load i32, ptr [[ARRAY_I_PTR]], align 4
; CHECK-NEXT: [[LOOP_ACC_NEXT]] = add i32 [[LOOP_ACC]], [[ARRAY_I]]
; CHECK-NEXT: [[I_NEXT]] = add nuw i32 [[I]], 1
; CHECK-NEXT: [[CONTINUE:%.*]] = icmp ult i32 [[I_NEXT]], [[N]]
@@ -200,8 +200,8 @@ deopt: ; preds = %loop
guarded: ; preds = %loop
%i.i64 = zext i32 %i to i64
- %array.i.ptr = getelementptr inbounds i32, i32* %array, i64 %i.i64
- %array.i = load i32, i32* %array.i.ptr, align 4
+ %array.i.ptr = getelementptr inbounds i32, ptr %array, i64 %i.i64
+ %array.i = load i32, ptr %array.i.ptr, align 4
%loop.acc.next = add i32 %loop.acc, %array.i
%i.next = add nuw i32 %i, 1
%continue = icmp ult i32 %i.next, %n
@@ -212,7 +212,7 @@ exit: ; preds = %guarded, %entry
ret i32 %result
}
-define i32 @signed_loop_0_to_n_ult_check(i32* %array, i32 %length, i32 %n) {
+define i32 @signed_loop_0_to_n_ult_check(ptr %array, i32 %length, i32 %n) {
; CHECK-LABEL: @signed_loop_0_to_n_ult_check(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP5:%.*]] = icmp sle i32 [[N:%.*]], 0
@@ -235,8 +235,8 @@ define i32 @signed_loop_0_to_n_ult_check(i32* %array, i32 %length, i32 %n) {
; CHECK: guarded:
; CHECK-NEXT: call void @llvm.assume(i1 [[WITHIN_BOUNDS]])
; CHECK-NEXT: [[I_I64:%.*]] = zext i32 [[I]] to i64
-; CHECK-NEXT: [[ARRAY_I_PTR:%.*]] = getelementptr inbounds i32, i32* [[ARRAY:%.*]], i64 [[I_I64]]
-; CHECK-NEXT: [[ARRAY_I:%.*]] = load i32, i32* [[ARRAY_I_PTR]], align 4
+; CHECK-NEXT: [[ARRAY_I_PTR:%.*]] = getelementptr inbounds i32, ptr [[ARRAY:%.*]], i64 [[I_I64]]
+; CHECK-NEXT: [[ARRAY_I:%.*]] = load i32, ptr [[ARRAY_I_PTR]], align 4
; CHECK-NEXT: [[LOOP_ACC_NEXT]] = add i32 [[LOOP_ACC]], [[ARRAY_I]]
; CHECK-NEXT: [[I_NEXT]] = add nuw i32 [[I]], 1
; CHECK-NEXT: [[CONTINUE:%.*]] = icmp slt i32 [[I_NEXT]], [[N]]
@@ -270,8 +270,8 @@ deopt: ; preds = %loop
guarded: ; preds = %loop
%i.i64 = zext i32 %i to i64
- %array.i.ptr = getelementptr inbounds i32, i32* %array, i64 %i.i64
- %array.i = load i32, i32* %array.i.ptr, align 4
+ %array.i.ptr = getelementptr inbounds i32, ptr %array, i64 %i.i64
+ %array.i = load i32, ptr %array.i.ptr, align 4
%loop.acc.next = add i32 %loop.acc, %array.i
%i.next = add nuw i32 %i, 1
%continue = icmp slt i32 %i.next, %n
@@ -282,11 +282,11 @@ exit: ; preds = %guarded, %entry
ret i32 %result
}
-define i32 @signed_loop_0_to_n_ult_check_length_range_known(i32* %array, i32* %length.ptr, i32 %n) {
+define i32 @signed_loop_0_to_n_ult_check_length_range_known(ptr %array, ptr %length.ptr, i32 %n) {
; CHECK-LABEL: @signed_loop_0_to_n_ult_check_length_range_known(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP5:%.*]] = icmp sle i32 [[N:%.*]], 0
-; CHECK-NEXT: [[LENGTH:%.*]] = load i32, i32* [[LENGTH_PTR:%.*]], align 4, !range [[RNG2:![0-9]+]]
+; CHECK-NEXT: [[LENGTH:%.*]] = load i32, ptr [[LENGTH_PTR:%.*]], align 4, !range [[RNG2:![0-9]+]]
; CHECK-NEXT: br i1 [[TMP5]], label [[EXIT:%.*]], label [[LOOP_PREHEADER:%.*]]
; CHECK: loop.preheader:
; CHECK-NEXT: [[TMP0:%.*]] = icmp sle i32 [[N]], [[LENGTH]]
@@ -305,8 +305,8 @@ define i32 @signed_loop_0_to_n_ult_check_length_range_known(i32* %array, i32* %l
; CHECK: guarded:
; CHECK-NEXT: call void @llvm.assume(i1 [[WITHIN_BOUNDS]])
; CHECK-NEXT: [[I_I64:%.*]] = zext i32 [[I]] to i64
-; CHECK-NEXT: [[ARRAY_I_PTR:%.*]] = getelementptr inbounds i32, i32* [[ARRAY:%.*]], i64 [[I_I64]]
-; CHECK-NEXT: [[ARRAY_I:%.*]] = load i32, i32* [[ARRAY_I_PTR]], align 4
+; CHECK-NEXT: [[ARRAY_I_PTR:%.*]] = getelementptr inbounds i32, ptr [[ARRAY:%.*]], i64 [[I_I64]]
+; CHECK-NEXT: [[ARRAY_I:%.*]] = load i32, ptr [[ARRAY_I_PTR]], align 4
; CHECK-NEXT: [[LOOP_ACC_NEXT]] = add i32 [[LOOP_ACC]], [[ARRAY_I]]
; CHECK-NEXT: [[I_NEXT]] = add nuw i32 [[I]], 1
; CHECK-NEXT: [[CONTINUE:%.*]] = icmp slt i32 [[I_NEXT]], [[N]]
@@ -320,7 +320,7 @@ define i32 @signed_loop_0_to_n_ult_check_length_range_known(i32* %array, i32* %l
;
entry:
%tmp5 = icmp sle i32 %n, 0
- %length = load i32, i32* %length.ptr, !range !1
+ %length = load i32, ptr %length.ptr, !range !1
br i1 %tmp5, label %exit, label %loop.preheader
loop.preheader: ; preds = %entry
@@ -340,8 +340,8 @@ deopt: ; preds = %loop
guarded: ; preds = %loop
%i.i64 = zext i32 %i to i64
- %array.i.ptr = getelementptr inbounds i32, i32* %array, i64 %i.i64
- %array.i = load i32, i32* %array.i.ptr, align 4
+ %array.i.ptr = getelementptr inbounds i32, ptr %array, i64 %i.i64
+ %array.i = load i32, ptr %array.i.ptr, align 4
%loop.acc.next = add i32 %loop.acc, %array.i
%i.next = add nuw i32 %i, 1
%continue = icmp slt i32 %i.next, %n
@@ -352,7 +352,7 @@ exit: ; preds = %guarded, %entry
ret i32 %result
}
-define i32 @signed_loop_0_to_n_inverse_latch_predicate(i32* %array, i32 %length, i32 %n) {
+define i32 @signed_loop_0_to_n_inverse_latch_predicate(ptr %array, i32 %length, i32 %n) {
; CHECK-LABEL: @signed_loop_0_to_n_inverse_latch_predicate(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP5:%.*]] = icmp sle i32 [[N:%.*]], 0
@@ -375,8 +375,8 @@ define i32 @signed_loop_0_to_n_inverse_latch_predicate(i32* %array, i32 %length,
; CHECK: guarded:
; CHECK-NEXT: call void @llvm.assume(i1 [[WITHIN_BOUNDS]])
; CHECK-NEXT: [[I_I64:%.*]] = zext i32 [[I]] to i64
-; CHECK-NEXT: [[ARRAY_I_PTR:%.*]] = getelementptr inbounds i32, i32* [[ARRAY:%.*]], i64 [[I_I64]]
-; CHECK-NEXT: [[ARRAY_I:%.*]] = load i32, i32* [[ARRAY_I_PTR]], align 4
+; CHECK-NEXT: [[ARRAY_I_PTR:%.*]] = getelementptr inbounds i32, ptr [[ARRAY:%.*]], i64 [[I_I64]]
+; CHECK-NEXT: [[ARRAY_I:%.*]] = load i32, ptr [[ARRAY_I_PTR]], align 4
; CHECK-NEXT: [[LOOP_ACC_NEXT]] = add i32 [[LOOP_ACC]], [[ARRAY_I]]
; CHECK-NEXT: [[I_NEXT]] = add nuw i32 [[I]], 1
; CHECK-NEXT: [[CONTINUE:%.*]] = icmp sgt i32 [[I_NEXT]], [[N]]
@@ -409,8 +409,8 @@ deopt: ; preds = %loop
guarded: ; preds = %loop
%i.i64 = zext i32 %i to i64
- %array.i.ptr = getelementptr inbounds i32, i32* %array, i64 %i.i64
- %array.i = load i32, i32* %array.i.ptr, align 4
+ %array.i.ptr = getelementptr inbounds i32, ptr %array, i64 %i.i64
+ %array.i = load i32, ptr %array.i.ptr, align 4
%loop.acc.next = add i32 %loop.acc, %array.i
%i.next = add nuw i32 %i, 1
%continue = icmp sgt i32 %i.next, %n
@@ -421,7 +421,7 @@ exit: ; preds = %guarded, %entry
ret i32 %result
}
-define i32 @signed_loop_0_to_n_sle_latch_ult_check(i32* %array, i32 %length, i32 %n) {
+define i32 @signed_loop_0_to_n_sle_latch_ult_check(ptr %array, i32 %length, i32 %n) {
; CHECK-LABEL: @signed_loop_0_to_n_sle_latch_ult_check(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP5:%.*]] = icmp sle i32 [[N:%.*]], 0
@@ -444,8 +444,8 @@ define i32 @signed_loop_0_to_n_sle_latch_ult_check(i32* %array, i32 %length, i32
; CHECK: guarded:
; CHECK-NEXT: call void @llvm.assume(i1 [[WITHIN_BOUNDS]])
; CHECK-NEXT: [[I_I64:%.*]] = zext i32 [[I]] to i64
-; CHECK-NEXT: [[ARRAY_I_PTR:%.*]] = getelementptr inbounds i32, i32* [[ARRAY:%.*]], i64 [[I_I64]]
-; CHECK-NEXT: [[ARRAY_I:%.*]] = load i32, i32* [[ARRAY_I_PTR]], align 4
+; CHECK-NEXT: [[ARRAY_I_PTR:%.*]] = getelementptr inbounds i32, ptr [[ARRAY:%.*]], i64 [[I_I64]]
+; CHECK-NEXT: [[ARRAY_I:%.*]] = load i32, ptr [[ARRAY_I_PTR]], align 4
; CHECK-NEXT: [[LOOP_ACC_NEXT]] = add i32 [[LOOP_ACC]], [[ARRAY_I]]
; CHECK-NEXT: [[I_NEXT]] = add nuw i32 [[I]], 1
; CHECK-NEXT: [[CONTINUE:%.*]] = icmp sle i32 [[I_NEXT]], [[N]]
@@ -478,8 +478,8 @@ deopt: ; preds = %loop
guarded: ; preds = %loop
%i.i64 = zext i32 %i to i64
- %array.i.ptr = getelementptr inbounds i32, i32* %array, i64 %i.i64
- %array.i = load i32, i32* %array.i.ptr, align 4
+ %array.i.ptr = getelementptr inbounds i32, ptr %array, i64 %i.i64
+ %array.i = load i32, ptr %array.i.ptr, align 4
%loop.acc.next = add i32 %loop.acc, %array.i
%i.next = add nuw i32 %i, 1
%continue = icmp sle i32 %i.next, %n
@@ -490,7 +490,7 @@ exit: ; preds = %guarded, %entry
ret i32 %result
}
-define i32 @signed_loop_0_to_n_preincrement_latch_check(i32* %array, i32 %length, i32 %n) {
+define i32 @signed_loop_0_to_n_preincrement_latch_check(ptr %array, i32 %length, i32 %n) {
; CHECK-LABEL: @signed_loop_0_to_n_preincrement_latch_check(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP5:%.*]] = icmp sle i32 [[N:%.*]], 0
@@ -514,8 +514,8 @@ define i32 @signed_loop_0_to_n_preincrement_latch_check(i32* %array, i32 %length
; CHECK: guarded:
; CHECK-NEXT: call void @llvm.assume(i1 [[WITHIN_BOUNDS]])
; CHECK-NEXT: [[I_I64:%.*]] = zext i32 [[I]] to i64
-; CHECK-NEXT: [[ARRAY_I_PTR:%.*]] = getelementptr inbounds i32, i32* [[ARRAY:%.*]], i64 [[I_I64]]
-; CHECK-NEXT: [[ARRAY_I:%.*]] = load i32, i32* [[ARRAY_I_PTR]], align 4
+; CHECK-NEXT: [[ARRAY_I_PTR:%.*]] = getelementptr inbounds i32, ptr [[ARRAY:%.*]], i64 [[I_I64]]
+; CHECK-NEXT: [[ARRAY_I:%.*]] = load i32, ptr [[ARRAY_I_PTR]], align 4
; CHECK-NEXT: [[LOOP_ACC_NEXT]] = add i32 [[LOOP_ACC]], [[ARRAY_I]]
; CHECK-NEXT: [[I_NEXT]] = add i32 [[I]], 1
; CHECK-NEXT: [[CONTINUE:%.*]] = icmp slt i32 [[I]], [[N]]
@@ -548,8 +548,8 @@ deopt: ; preds = %loop
guarded: ; preds = %loop
%i.i64 = zext i32 %i to i64
- %array.i.ptr = getelementptr inbounds i32, i32* %array, i64 %i.i64
- %array.i = load i32, i32* %array.i.ptr, align 4
+ %array.i.ptr = getelementptr inbounds i32, ptr %array, i64 %i.i64
+ %array.i = load i32, ptr %array.i.ptr, align 4
%loop.acc.next = add i32 %loop.acc, %array.i
%i.next = add i32 %i, 1
%continue = icmp slt i32 %i, %n
@@ -560,7 +560,7 @@ exit: ; preds = %guarded, %entry
ret i32 %result
}
-define i32 @signed_loop_0_to_n_preincrement_latch_check_postincrement_guard_check(i32* %array, i32 %length, i32 %n) {
+define i32 @signed_loop_0_to_n_preincrement_latch_check_postincrement_guard_check(ptr %array, i32 %length, i32 %n) {
; CHECK-LABEL: @signed_loop_0_to_n_preincrement_latch_check_postincrement_guard_check(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP5:%.*]] = icmp sle i32 [[N:%.*]], 0
@@ -585,8 +585,8 @@ define i32 @signed_loop_0_to_n_preincrement_latch_check_postincrement_guard_chec
; CHECK: guarded:
; CHECK-NEXT: call void @llvm.assume(i1 [[WITHIN_BOUNDS]])
; CHECK-NEXT: [[I_I64:%.*]] = zext i32 [[I]] to i64
-; CHECK-NEXT: [[ARRAY_I_PTR:%.*]] = getelementptr inbounds i32, i32* [[ARRAY:%.*]], i64 [[I_I64]]
-; CHECK-NEXT: [[ARRAY_I:%.*]] = load i32, i32* [[ARRAY_I_PTR]], align 4
+; CHECK-NEXT: [[ARRAY_I_PTR:%.*]] = getelementptr inbounds i32, ptr [[ARRAY:%.*]], i64 [[I_I64]]
+; CHECK-NEXT: [[ARRAY_I:%.*]] = load i32, ptr [[ARRAY_I_PTR]], align 4
; CHECK-NEXT: [[LOOP_ACC_NEXT]] = add i32 [[LOOP_ACC]], [[ARRAY_I]]
; CHECK-NEXT: [[CONTINUE:%.*]] = icmp slt i32 [[I]], [[N]]
; CHECK-NEXT: br i1 [[CONTINUE]], label [[LOOP]], label [[EXIT_LOOPEXIT:%.*]], !prof [[PROF1]]
@@ -619,8 +619,8 @@ deopt: ; preds = %loop
guarded: ; preds = %loop
%i.i64 = zext i32 %i to i64
- %array.i.ptr = getelementptr inbounds i32, i32* %array, i64 %i.i64
- %array.i = load i32, i32* %array.i.ptr, align 4
+ %array.i.ptr = getelementptr inbounds i32, ptr %array, i64 %i.i64
+ %array.i = load i32, ptr %array.i.ptr, align 4
%loop.acc.next = add i32 %loop.acc, %array.i
%continue = icmp slt i32 %i, %n
br i1 %continue, label %loop, label %exit, !prof !2
@@ -630,7 +630,7 @@ exit: ; preds = %guarded, %entry
ret i32 %result
}
-define i32 @signed_loop_0_to_n_sle_latch_offset_ult_check(i32* %array, i32 %length, i32 %n) {
+define i32 @signed_loop_0_to_n_sle_latch_offset_ult_check(ptr %array, i32 %length, i32 %n) {
; CHECK-LABEL: @signed_loop_0_to_n_sle_latch_offset_ult_check(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP5:%.*]] = icmp sle i32 [[N:%.*]], 0
@@ -655,8 +655,8 @@ define i32 @signed_loop_0_to_n_sle_latch_offset_ult_check(i32* %array, i32 %leng
; CHECK: guarded:
; CHECK-NEXT: call void @llvm.assume(i1 [[WITHIN_BOUNDS]])
; CHECK-NEXT: [[I_I64:%.*]] = zext i32 [[I]] to i64
-; CHECK-NEXT: [[ARRAY_I_PTR:%.*]] = getelementptr inbounds i32, i32* [[ARRAY:%.*]], i64 [[I_I64]]
-; CHECK-NEXT: [[ARRAY_I:%.*]] = load i32, i32* [[ARRAY_I_PTR]], align 4
+; CHECK-NEXT: [[ARRAY_I_PTR:%.*]] = getelementptr inbounds i32, ptr [[ARRAY:%.*]], i64 [[I_I64]]
+; CHECK-NEXT: [[ARRAY_I:%.*]] = load i32, ptr [[ARRAY_I_PTR]], align 4
; CHECK-NEXT: [[LOOP_ACC_NEXT]] = add i32 [[LOOP_ACC]], [[ARRAY_I]]
; CHECK-NEXT: [[I_NEXT]] = add i32 [[I]], 1
; CHECK-NEXT: [[CONTINUE:%.*]] = icmp sle i32 [[I_NEXT]], [[N]]
@@ -690,8 +690,8 @@ deopt: ; preds = %loop
guarded: ; preds = %loop
%i.i64 = zext i32 %i to i64
- %array.i.ptr = getelementptr inbounds i32, i32* %array, i64 %i.i64
- %array.i = load i32, i32* %array.i.ptr, align 4
+ %array.i.ptr = getelementptr inbounds i32, ptr %array, i64 %i.i64
+ %array.i = load i32, ptr %array.i.ptr, align 4
%loop.acc.next = add i32 %loop.acc, %array.i
%i.next = add i32 %i, 1
%continue = icmp sle i32 %i.next, %n
@@ -702,7 +702,7 @@ exit: ; preds = %guarded, %entry
ret i32 %result
}
-define i32 @signed_loop_0_to_n_offset_sle_latch_offset_ult_check(i32* %array, i32 %length, i32 %n) {
+define i32 @signed_loop_0_to_n_offset_sle_latch_offset_ult_check(ptr %array, i32 %length, i32 %n) {
; CHECK-LABEL: @signed_loop_0_to_n_offset_sle_latch_offset_ult_check(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP5:%.*]] = icmp sle i32 [[N:%.*]], 0
@@ -726,8 +726,8 @@ define i32 @signed_loop_0_to_n_offset_sle_latch_offset_ult_check(i32* %array, i3
; CHECK: guarded:
; CHECK-NEXT: call void @llvm.assume(i1 [[WITHIN_BOUNDS]])
; CHECK-NEXT: [[I_I64:%.*]] = zext i32 [[I]] to i64
-; CHECK-NEXT: [[ARRAY_I_PTR:%.*]] = getelementptr inbounds i32, i32* [[ARRAY:%.*]], i64 [[I_I64]]
-; CHECK-NEXT: [[ARRAY_I:%.*]] = load i32, i32* [[ARRAY_I_PTR]], align 4
+; CHECK-NEXT: [[ARRAY_I_PTR:%.*]] = getelementptr inbounds i32, ptr [[ARRAY:%.*]], i64 [[I_I64]]
+; CHECK-NEXT: [[ARRAY_I:%.*]] = load i32, ptr [[ARRAY_I_PTR]], align 4
; CHECK-NEXT: [[LOOP_ACC_NEXT]] = add i32 [[LOOP_ACC]], [[ARRAY_I]]
; CHECK-NEXT: [[I_NEXT]] = add i32 [[I]], 1
; CHECK-NEXT: [[I_NEXT_OFFSET:%.*]] = add i32 [[I_NEXT]], 1
@@ -762,8 +762,8 @@ deopt: ; preds = %loop
guarded: ; preds = %loop
%i.i64 = zext i32 %i to i64
- %array.i.ptr = getelementptr inbounds i32, i32* %array, i64 %i.i64
- %array.i = load i32, i32* %array.i.ptr, align 4
+ %array.i.ptr = getelementptr inbounds i32, ptr %array, i64 %i.i64
+ %array.i = load i32, ptr %array.i.ptr, align 4
%loop.acc.next = add i32 %loop.acc, %array.i
%i.next = add i32 %i, 1
%i.next.offset = add i32 %i.next, 1
@@ -775,7 +775,7 @@ exit: ; preds = %guarded, %entry
ret i32 %result
}
-define i32 @unsupported_latch_pred_loop_0_to_n(i32* %array, i32 %length, i32 %n) {
+define i32 @unsupported_latch_pred_loop_0_to_n(ptr %array, i32 %length, i32 %n) {
; CHECK-LABEL: @unsupported_latch_pred_loop_0_to_n(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP5:%.*]] = icmp sle i32 [[N:%.*]], 0
@@ -794,8 +794,8 @@ define i32 @unsupported_latch_pred_loop_0_to_n(i32* %array, i32 %length, i32 %n)
; CHECK-NEXT: ret i32 [[DEOPTCALL]]
; CHECK: guarded:
; CHECK-NEXT: [[I_I64:%.*]] = zext i32 [[I]] to i64
-; CHECK-NEXT: [[ARRAY_I_PTR:%.*]] = getelementptr inbounds i32, i32* [[ARRAY:%.*]], i64 [[I_I64]]
-; CHECK-NEXT: [[ARRAY_I:%.*]] = load i32, i32* [[ARRAY_I_PTR]], align 4
+; CHECK-NEXT: [[ARRAY_I_PTR:%.*]] = getelementptr inbounds i32, ptr [[ARRAY:%.*]], i64 [[I_I64]]
+; CHECK-NEXT: [[ARRAY_I:%.*]] = load i32, ptr [[ARRAY_I_PTR]], align 4
; CHECK-NEXT: [[LOOP_ACC_NEXT]] = add i32 [[LOOP_ACC]], [[ARRAY_I]]
; CHECK-NEXT: [[I_NEXT]] = add nsw i32 [[I]], 1
; CHECK-NEXT: [[CONTINUE:%.*]] = icmp ne i32 [[I_NEXT]], [[N]]
@@ -828,8 +828,8 @@ deopt: ; preds = %loop
guarded: ; preds = %loop
%i.i64 = zext i32 %i to i64
- %array.i.ptr = getelementptr inbounds i32, i32* %array, i64 %i.i64
- %array.i = load i32, i32* %array.i.ptr, align 4
+ %array.i.ptr = getelementptr inbounds i32, ptr %array, i64 %i.i64
+ %array.i = load i32, ptr %array.i.ptr, align 4
%loop.acc.next = add i32 %loop.acc, %array.i
%i.next = add nsw i32 %i, 1
%continue = icmp ne i32 %i.next, %n
@@ -840,7 +840,7 @@ exit: ; preds = %guarded, %entry
ret i32 %result
}
-define i32 @signed_loop_0_to_n_unsupported_iv_step(i32* %array, i32 %length, i32 %n) {
+define i32 @signed_loop_0_to_n_unsupported_iv_step(ptr %array, i32 %length, i32 %n) {
; CHECK-LABEL: @signed_loop_0_to_n_unsupported_iv_step(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP5:%.*]] = icmp sle i32 [[N:%.*]], 0
@@ -859,8 +859,8 @@ define i32 @signed_loop_0_to_n_unsupported_iv_step(i32* %array, i32 %length, i32
; CHECK-NEXT: ret i32 [[DEOPTCALL]]
; CHECK: guarded:
; CHECK-NEXT: [[I_I64:%.*]] = zext i32 [[I]] to i64
-; CHECK-NEXT: [[ARRAY_I_PTR:%.*]] = getelementptr inbounds i32, i32* [[ARRAY:%.*]], i64 [[I_I64]]
-; CHECK-NEXT: [[ARRAY_I:%.*]] = load i32, i32* [[ARRAY_I_PTR]], align 4
+; CHECK-NEXT: [[ARRAY_I_PTR:%.*]] = getelementptr inbounds i32, ptr [[ARRAY:%.*]], i64 [[I_I64]]
+; CHECK-NEXT: [[ARRAY_I:%.*]] = load i32, ptr [[ARRAY_I_PTR]], align 4
; CHECK-NEXT: [[LOOP_ACC_NEXT]] = add i32 [[LOOP_ACC]], [[ARRAY_I]]
; CHECK-NEXT: [[I_NEXT]] = add nsw i32 [[I]], 2
; CHECK-NEXT: [[CONTINUE:%.*]] = icmp slt i32 [[I_NEXT]], [[N]]
@@ -893,8 +893,8 @@ deopt: ; preds = %loop
guarded: ; preds = %loop
%i.i64 = zext i32 %i to i64
- %array.i.ptr = getelementptr inbounds i32, i32* %array, i64 %i.i64
- %array.i = load i32, i32* %array.i.ptr, align 4
+ %array.i.ptr = getelementptr inbounds i32, ptr %array, i64 %i.i64
+ %array.i = load i32, ptr %array.i.ptr, align 4
%loop.acc.next = add i32 %loop.acc, %array.i
%i.next = add nsw i32 %i, 2
%continue = icmp slt i32 %i.next, %n
@@ -905,7 +905,7 @@ exit: ; preds = %guarded, %entry
ret i32 %result
}
-define i32 @signed_loop_0_to_n_equal_iv_range_check(i32* %array, i32 %length, i32 %n) {
+define i32 @signed_loop_0_to_n_equal_iv_range_check(ptr %array, i32 %length, i32 %n) {
; CHECK-LABEL: @signed_loop_0_to_n_equal_iv_range_check(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP5:%.*]] = icmp sle i32 [[N:%.*]], 0
@@ -929,8 +929,8 @@ define i32 @signed_loop_0_to_n_equal_iv_range_check(i32* %array, i32 %length, i3
; CHECK: guarded:
; CHECK-NEXT: call void @llvm.assume(i1 [[WITHIN_BOUNDS]])
; CHECK-NEXT: [[I_I64:%.*]] = zext i32 [[I]] to i64
-; CHECK-NEXT: [[ARRAY_I_PTR:%.*]] = getelementptr inbounds i32, i32* [[ARRAY:%.*]], i64 [[I_I64]]
-; CHECK-NEXT: [[ARRAY_I:%.*]] = load i32, i32* [[ARRAY_I_PTR]], align 4
+; CHECK-NEXT: [[ARRAY_I_PTR:%.*]] = getelementptr inbounds i32, ptr [[ARRAY:%.*]], i64 [[I_I64]]
+; CHECK-NEXT: [[ARRAY_I:%.*]] = load i32, ptr [[ARRAY_I_PTR]], align 4
; CHECK-NEXT: [[LOOP_ACC_NEXT]] = add i32 [[LOOP_ACC]], [[ARRAY_I]]
; CHECK-NEXT: [[J_NEXT]] = add nsw i32 [[J]], 1
; CHECK-NEXT: [[I_NEXT]] = add nsw i32 [[I]], 1
@@ -965,8 +965,8 @@ deopt: ; preds = %loop
guarded: ; preds = %loop
%i.i64 = zext i32 %i to i64
- %array.i.ptr = getelementptr inbounds i32, i32* %array, i64 %i.i64
- %array.i = load i32, i32* %array.i.ptr, align 4
+ %array.i.ptr = getelementptr inbounds i32, ptr %array, i64 %i.i64
+ %array.i = load i32, ptr %array.i.ptr, align 4
%loop.acc.next = add i32 %loop.acc, %array.i
%j.next = add nsw i32 %j, 1
%i.next = add nsw i32 %i, 1
@@ -978,7 +978,7 @@ exit: ; preds = %guarded, %entry
ret i32 %result
}
-define i32 @signed_loop_start_to_n_offset_iv_range_check(i32* %array, i32 %start.i, i32 %start.j, i32 %length, i32 %n) {
+define i32 @signed_loop_start_to_n_offset_iv_range_check(ptr %array, i32 %start.i, i32 %start.j, i32 %length, i32 %n) {
; CHECK-LABEL: @signed_loop_start_to_n_offset_iv_range_check(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP5:%.*]] = icmp sle i32 [[N:%.*]], 0
@@ -1004,8 +1004,8 @@ define i32 @signed_loop_start_to_n_offset_iv_range_check(i32* %array, i32 %start
; CHECK: guarded:
; CHECK-NEXT: call void @llvm.assume(i1 [[WITHIN_BOUNDS]])
; CHECK-NEXT: [[I_I64:%.*]] = zext i32 [[I]] to i64
-; CHECK-NEXT: [[ARRAY_I_PTR:%.*]] = getelementptr inbounds i32, i32* [[ARRAY:%.*]], i64 [[I_I64]]
-; CHECK-NEXT: [[ARRAY_I:%.*]] = load i32, i32* [[ARRAY_I_PTR]], align 4
+; CHECK-NEXT: [[ARRAY_I_PTR:%.*]] = getelementptr inbounds i32, ptr [[ARRAY:%.*]], i64 [[I_I64]]
+; CHECK-NEXT: [[ARRAY_I:%.*]] = load i32, ptr [[ARRAY_I_PTR]], align 4
; CHECK-NEXT: [[LOOP_ACC_NEXT]] = add i32 [[LOOP_ACC]], [[ARRAY_I]]
; CHECK-NEXT: [[J_NEXT]] = add i32 [[J]], 1
; CHECK-NEXT: [[I_NEXT]] = add i32 [[I]], 1
@@ -1040,8 +1040,8 @@ deopt: ; preds = %loop
guarded: ; preds = %loop
%i.i64 = zext i32 %i to i64
- %array.i.ptr = getelementptr inbounds i32, i32* %array, i64 %i.i64
- %array.i = load i32, i32* %array.i.ptr, align 4
+ %array.i.ptr = getelementptr inbounds i32, ptr %array, i64 %i.i64
+ %array.i = load i32, ptr %array.i.ptr, align 4
%loop.acc.next = add i32 %loop.acc, %array.i
%j.next = add i32 %j, 1
%i.next = add i32 %i, 1
@@ -1053,7 +1053,7 @@ exit: ; preds = %guarded, %entry
ret i32 %result
}
-define i32 @signed_loop_0_to_n_
diff erent_iv_types(i32* %array, i16 %length, i32 %n) {
+define i32 @signed_loop_0_to_n_
diff erent_iv_types(ptr %array, i16 %length, i32 %n) {
; CHECK-LABEL: @signed_loop_0_to_n_
diff erent_iv_types(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP5:%.*]] = icmp sle i32 [[N:%.*]], 0
@@ -1073,8 +1073,8 @@ define i32 @signed_loop_0_to_n_
diff erent_iv_types(i32* %array, i16 %length, i32
; CHECK-NEXT: ret i32 [[DEOPTCALL]]
; CHECK: guarded:
; CHECK-NEXT: [[I_I64:%.*]] = zext i32 [[I]] to i64
-; CHECK-NEXT: [[ARRAY_I_PTR:%.*]] = getelementptr inbounds i32, i32* [[ARRAY:%.*]], i64 [[I_I64]]
-; CHECK-NEXT: [[ARRAY_I:%.*]] = load i32, i32* [[ARRAY_I_PTR]], align 4
+; CHECK-NEXT: [[ARRAY_I_PTR:%.*]] = getelementptr inbounds i32, ptr [[ARRAY:%.*]], i64 [[I_I64]]
+; CHECK-NEXT: [[ARRAY_I:%.*]] = load i32, ptr [[ARRAY_I_PTR]], align 4
; CHECK-NEXT: [[LOOP_ACC_NEXT]] = add i32 [[LOOP_ACC]], [[ARRAY_I]]
; CHECK-NEXT: [[J_NEXT]] = add i16 [[J]], 1
; CHECK-NEXT: [[I_NEXT]] = add i32 [[I]], 1
@@ -1109,8 +1109,8 @@ deopt: ; preds = %loop
guarded: ; preds = %loop
%i.i64 = zext i32 %i to i64
- %array.i.ptr = getelementptr inbounds i32, i32* %array, i64 %i.i64
- %array.i = load i32, i32* %array.i.ptr, align 4
+ %array.i.ptr = getelementptr inbounds i32, ptr %array, i64 %i.i64
+ %array.i = load i32, ptr %array.i.ptr, align 4
%loop.acc.next = add i32 %loop.acc, %array.i
%j.next = add i16 %j, 1
%i.next = add i32 %i, 1
@@ -1122,7 +1122,7 @@ exit: ; preds = %guarded, %entry
ret i32 %result
}
-define i32 @signed_loop_0_to_n_
diff erent_iv_strides(i32* %array, i32 %length, i32 %n) {
+define i32 @signed_loop_0_to_n_
diff erent_iv_strides(ptr %array, i32 %length, i32 %n) {
; CHECK-LABEL: @signed_loop_0_to_n_
diff erent_iv_strides(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP5:%.*]] = icmp sle i32 [[N:%.*]], 0
@@ -1142,8 +1142,8 @@ define i32 @signed_loop_0_to_n_
diff erent_iv_strides(i32* %array, i32 %length, i3
; CHECK-NEXT: ret i32 [[DEOPTCALL]]
; CHECK: guarded:
; CHECK-NEXT: [[I_I64:%.*]] = zext i32 [[I]] to i64
-; CHECK-NEXT: [[ARRAY_I_PTR:%.*]] = getelementptr inbounds i32, i32* [[ARRAY:%.*]], i64 [[I_I64]]
-; CHECK-NEXT: [[ARRAY_I:%.*]] = load i32, i32* [[ARRAY_I_PTR]], align 4
+; CHECK-NEXT: [[ARRAY_I_PTR:%.*]] = getelementptr inbounds i32, ptr [[ARRAY:%.*]], i64 [[I_I64]]
+; CHECK-NEXT: [[ARRAY_I:%.*]] = load i32, ptr [[ARRAY_I_PTR]], align 4
; CHECK-NEXT: [[LOOP_ACC_NEXT]] = add i32 [[LOOP_ACC]], [[ARRAY_I]]
; CHECK-NEXT: [[J_NEXT]] = add nsw i32 [[J]], 2
; CHECK-NEXT: [[I_NEXT]] = add nsw i32 [[I]], 1
@@ -1178,8 +1178,8 @@ deopt: ; preds = %loop
guarded: ; preds = %loop
%i.i64 = zext i32 %i to i64
- %array.i.ptr = getelementptr inbounds i32, i32* %array, i64 %i.i64
- %array.i = load i32, i32* %array.i.ptr, align 4
+ %array.i.ptr = getelementptr inbounds i32, ptr %array, i64 %i.i64
+ %array.i = load i32, ptr %array.i.ptr, align 4
%loop.acc.next = add i32 %loop.acc, %array.i
%j.next = add nsw i32 %j, 2
%i.next = add nsw i32 %i, 1
@@ -1191,7 +1191,7 @@ exit: ; preds = %guarded, %entry
ret i32 %result
}
-define i32 @two_range_checks(i32* %array.1, i32 %length.1, i32* %array.2, i32 %length.2, i32 %n) {
+define i32 @two_range_checks(ptr %array.1, i32 %length.1, ptr %array.2, i32 %length.2, i32 %n) {
; CHECK-LABEL: @two_range_checks(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP5:%.*]] = icmp eq i32 [[N:%.*]], 0
@@ -1220,11 +1220,11 @@ define i32 @two_range_checks(i32* %array.1, i32 %length.1, i32* %array.2, i32 %l
; CHECK: guarded:
; CHECK-NEXT: call void @llvm.assume(i1 [[WITHIN_BOUNDS]])
; CHECK-NEXT: [[I_I64:%.*]] = zext i32 [[I]] to i64
-; CHECK-NEXT: [[ARRAY_1_I_PTR:%.*]] = getelementptr inbounds i32, i32* [[ARRAY_1:%.*]], i64 [[I_I64]]
-; CHECK-NEXT: [[ARRAY_1_I:%.*]] = load i32, i32* [[ARRAY_1_I_PTR]], align 4
+; CHECK-NEXT: [[ARRAY_1_I_PTR:%.*]] = getelementptr inbounds i32, ptr [[ARRAY_1:%.*]], i64 [[I_I64]]
+; CHECK-NEXT: [[ARRAY_1_I:%.*]] = load i32, ptr [[ARRAY_1_I_PTR]], align 4
; CHECK-NEXT: [[LOOP_ACC_1:%.*]] = add i32 [[LOOP_ACC]], [[ARRAY_1_I]]
-; CHECK-NEXT: [[ARRAY_2_I_PTR:%.*]] = getelementptr inbounds i32, i32* [[ARRAY_2:%.*]], i64 [[I_I64]]
-; CHECK-NEXT: [[ARRAY_2_I:%.*]] = load i32, i32* [[ARRAY_2_I_PTR]], align 4
+; CHECK-NEXT: [[ARRAY_2_I_PTR:%.*]] = getelementptr inbounds i32, ptr [[ARRAY_2:%.*]], i64 [[I_I64]]
+; CHECK-NEXT: [[ARRAY_2_I:%.*]] = load i32, ptr [[ARRAY_2_I_PTR]], align 4
; CHECK-NEXT: [[LOOP_ACC_NEXT]] = add i32 [[LOOP_ACC_1]], [[ARRAY_2_I]]
; CHECK-NEXT: [[I_NEXT]] = add nuw i32 [[I]], 1
; CHECK-NEXT: [[CONTINUE:%.*]] = icmp ult i32 [[I_NEXT]], [[N]]
@@ -1259,11 +1259,11 @@ deopt: ; preds = %loop
guarded: ; preds = %loop
%i.i64 = zext i32 %i to i64
- %array.1.i.ptr = getelementptr inbounds i32, i32* %array.1, i64 %i.i64
- %array.1.i = load i32, i32* %array.1.i.ptr, align 4
+ %array.1.i.ptr = getelementptr inbounds i32, ptr %array.1, i64 %i.i64
+ %array.1.i = load i32, ptr %array.1.i.ptr, align 4
%loop.acc.1 = add i32 %loop.acc, %array.1.i
- %array.2.i.ptr = getelementptr inbounds i32, i32* %array.2, i64 %i.i64
- %array.2.i = load i32, i32* %array.2.i.ptr, align 4
+ %array.2.i.ptr = getelementptr inbounds i32, ptr %array.2, i64 %i.i64
+ %array.2.i = load i32, ptr %array.2.i.ptr, align 4
%loop.acc.next = add i32 %loop.acc.1, %array.2.i
%i.next = add nuw i32 %i, 1
%continue = icmp ult i32 %i.next, %n
@@ -1274,7 +1274,7 @@ exit: ; preds = %guarded, %entry
ret i32 %result
}
-define i32 @three_range_checks(i32* %array.1, i32 %length.1, i32* %array.2, i32 %length.2, i32* %array.3, i32 %length.3, i32 %n) {
+define i32 @three_range_checks(ptr %array.1, i32 %length.1, ptr %array.2, i32 %length.2, ptr %array.3, i32 %length.3, i32 %n) {
; CHECK-LABEL: @three_range_checks(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP5:%.*]] = icmp eq i32 [[N:%.*]], 0
@@ -1309,14 +1309,14 @@ define i32 @three_range_checks(i32* %array.1, i32 %length.1, i32* %array.2, i32
; CHECK: guarded:
; CHECK-NEXT: call void @llvm.assume(i1 [[WITHIN_BOUNDS]])
; CHECK-NEXT: [[I_I64:%.*]] = zext i32 [[I]] to i64
-; CHECK-NEXT: [[ARRAY_1_I_PTR:%.*]] = getelementptr inbounds i32, i32* [[ARRAY_1:%.*]], i64 [[I_I64]]
-; CHECK-NEXT: [[ARRAY_1_I:%.*]] = load i32, i32* [[ARRAY_1_I_PTR]], align 4
+; CHECK-NEXT: [[ARRAY_1_I_PTR:%.*]] = getelementptr inbounds i32, ptr [[ARRAY_1:%.*]], i64 [[I_I64]]
+; CHECK-NEXT: [[ARRAY_1_I:%.*]] = load i32, ptr [[ARRAY_1_I_PTR]], align 4
; CHECK-NEXT: [[LOOP_ACC_1:%.*]] = add i32 [[LOOP_ACC]], [[ARRAY_1_I]]
-; CHECK-NEXT: [[ARRAY_2_I_PTR:%.*]] = getelementptr inbounds i32, i32* [[ARRAY_2:%.*]], i64 [[I_I64]]
-; CHECK-NEXT: [[ARRAY_2_I:%.*]] = load i32, i32* [[ARRAY_2_I_PTR]], align 4
+; CHECK-NEXT: [[ARRAY_2_I_PTR:%.*]] = getelementptr inbounds i32, ptr [[ARRAY_2:%.*]], i64 [[I_I64]]
+; CHECK-NEXT: [[ARRAY_2_I:%.*]] = load i32, ptr [[ARRAY_2_I_PTR]], align 4
; CHECK-NEXT: [[LOOP_ACC_2:%.*]] = add i32 [[LOOP_ACC_1]], [[ARRAY_2_I]]
-; CHECK-NEXT: [[ARRAY_3_I_PTR:%.*]] = getelementptr inbounds i32, i32* [[ARRAY_3:%.*]], i64 [[I_I64]]
-; CHECK-NEXT: [[ARRAY_3_I:%.*]] = load i32, i32* [[ARRAY_3_I_PTR]], align 4
+; CHECK-NEXT: [[ARRAY_3_I_PTR:%.*]] = getelementptr inbounds i32, ptr [[ARRAY_3:%.*]], i64 [[I_I64]]
+; CHECK-NEXT: [[ARRAY_3_I:%.*]] = load i32, ptr [[ARRAY_3_I_PTR]], align 4
; CHECK-NEXT: [[LOOP_ACC_NEXT]] = add i32 [[LOOP_ACC_2]], [[ARRAY_3_I]]
; CHECK-NEXT: [[I_NEXT]] = add nuw i32 [[I]], 1
; CHECK-NEXT: [[CONTINUE:%.*]] = icmp ult i32 [[I_NEXT]], [[N]]
@@ -1353,14 +1353,14 @@ deopt: ; preds = %loop
guarded: ; preds = %loop
%i.i64 = zext i32 %i to i64
- %array.1.i.ptr = getelementptr inbounds i32, i32* %array.1, i64 %i.i64
- %array.1.i = load i32, i32* %array.1.i.ptr, align 4
+ %array.1.i.ptr = getelementptr inbounds i32, ptr %array.1, i64 %i.i64
+ %array.1.i = load i32, ptr %array.1.i.ptr, align 4
%loop.acc.1 = add i32 %loop.acc, %array.1.i
- %array.2.i.ptr = getelementptr inbounds i32, i32* %array.2, i64 %i.i64
- %array.2.i = load i32, i32* %array.2.i.ptr, align 4
+ %array.2.i.ptr = getelementptr inbounds i32, ptr %array.2, i64 %i.i64
+ %array.2.i = load i32, ptr %array.2.i.ptr, align 4
%loop.acc.2 = add i32 %loop.acc.1, %array.2.i
- %array.3.i.ptr = getelementptr inbounds i32, i32* %array.3, i64 %i.i64
- %array.3.i = load i32, i32* %array.3.i.ptr, align 4
+ %array.3.i.ptr = getelementptr inbounds i32, ptr %array.3, i64 %i.i64
+ %array.3.i = load i32, ptr %array.3.i.ptr, align 4
%loop.acc.next = add i32 %loop.acc.2, %array.3.i
%i.next = add nuw i32 %i, 1
%continue = icmp ult i32 %i.next, %n
@@ -1371,7 +1371,7 @@ exit: ; preds = %guarded, %entry
ret i32 %result
}
-define i32 @three_guards(i32* %array.1, i32 %length.1, i32* %array.2, i32 %length.2, i32* %array.3, i32 %length.3, i32 %n) {
+define i32 @three_guards(ptr %array.1, i32 %length.1, ptr %array.2, i32 %length.2, ptr %array.3, i32 %length.3, i32 %n) {
; CHECK-LABEL: @three_guards(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP5:%.*]] = icmp eq i32 [[N:%.*]], 0
@@ -1400,8 +1400,8 @@ define i32 @three_guards(i32* %array.1, i32 %length.1, i32* %array.2, i32 %lengt
; CHECK: guarded:
; CHECK-NEXT: call void @llvm.assume(i1 [[WITHIN_BOUNDS_1]])
; CHECK-NEXT: [[I_I64:%.*]] = zext i32 [[I]] to i64
-; CHECK-NEXT: [[ARRAY_1_I_PTR:%.*]] = getelementptr inbounds i32, i32* [[ARRAY_1:%.*]], i64 [[I_I64]]
-; CHECK-NEXT: [[ARRAY_1_I:%.*]] = load i32, i32* [[ARRAY_1_I_PTR]], align 4
+; CHECK-NEXT: [[ARRAY_1_I_PTR:%.*]] = getelementptr inbounds i32, ptr [[ARRAY_1:%.*]], i64 [[I_I64]]
+; CHECK-NEXT: [[ARRAY_1_I:%.*]] = load i32, ptr [[ARRAY_1_I_PTR]], align 4
; CHECK-NEXT: [[LOOP_ACC_1:%.*]] = add i32 [[LOOP_ACC]], [[ARRAY_1_I]]
; CHECK-NEXT: [[WITHIN_BOUNDS_2:%.*]] = icmp ult i32 [[I]], [[LENGTH_2]]
; CHECK-NEXT: [[WIDENABLE_COND4:%.*]] = call i1 @llvm.experimental.widenable.condition()
@@ -1412,8 +1412,8 @@ define i32 @three_guards(i32* %array.1, i32 %length.1, i32* %array.2, i32 %lengt
; CHECK-NEXT: ret i32 [[DEOPTCALL3]]
; CHECK: guarded1:
; CHECK-NEXT: call void @llvm.assume(i1 [[WITHIN_BOUNDS_2]])
-; CHECK-NEXT: [[ARRAY_2_I_PTR:%.*]] = getelementptr inbounds i32, i32* [[ARRAY_2:%.*]], i64 [[I_I64]]
-; CHECK-NEXT: [[ARRAY_2_I:%.*]] = load i32, i32* [[ARRAY_2_I_PTR]], align 4
+; CHECK-NEXT: [[ARRAY_2_I_PTR:%.*]] = getelementptr inbounds i32, ptr [[ARRAY_2:%.*]], i64 [[I_I64]]
+; CHECK-NEXT: [[ARRAY_2_I:%.*]] = load i32, ptr [[ARRAY_2_I_PTR]], align 4
; CHECK-NEXT: [[LOOP_ACC_2:%.*]] = add i32 [[LOOP_ACC_1]], [[ARRAY_2_I]]
; CHECK-NEXT: [[WITHIN_BOUNDS_3:%.*]] = icmp ult i32 [[I]], [[LENGTH_3]]
; CHECK-NEXT: [[WIDENABLE_COND9:%.*]] = call i1 @llvm.experimental.widenable.condition()
@@ -1424,8 +1424,8 @@ define i32 @three_guards(i32* %array.1, i32 %length.1, i32* %array.2, i32 %lengt
; CHECK-NEXT: ret i32 [[DEOPTCALL8]]
; CHECK: guarded6:
; CHECK-NEXT: call void @llvm.assume(i1 [[WITHIN_BOUNDS_3]])
-; CHECK-NEXT: [[ARRAY_3_I_PTR:%.*]] = getelementptr inbounds i32, i32* [[ARRAY_3:%.*]], i64 [[I_I64]]
-; CHECK-NEXT: [[ARRAY_3_I:%.*]] = load i32, i32* [[ARRAY_3_I_PTR]], align 4
+; CHECK-NEXT: [[ARRAY_3_I_PTR:%.*]] = getelementptr inbounds i32, ptr [[ARRAY_3:%.*]], i64 [[I_I64]]
+; CHECK-NEXT: [[ARRAY_3_I:%.*]] = load i32, ptr [[ARRAY_3_I_PTR]], align 4
; CHECK-NEXT: [[LOOP_ACC_NEXT]] = add i32 [[LOOP_ACC_2]], [[ARRAY_3_I]]
; CHECK-NEXT: [[I_NEXT]] = add nuw i32 [[I]], 1
; CHECK-NEXT: [[CONTINUE:%.*]] = icmp ult i32 [[I_NEXT]], [[N]]
@@ -1458,8 +1458,8 @@ deopt: ; preds = %loop
guarded: ; preds = %loop
%i.i64 = zext i32 %i to i64
- %array.1.i.ptr = getelementptr inbounds i32, i32* %array.1, i64 %i.i64
- %array.1.i = load i32, i32* %array.1.i.ptr, align 4
+ %array.1.i.ptr = getelementptr inbounds i32, ptr %array.1, i64 %i.i64
+ %array.1.i = load i32, ptr %array.1.i.ptr, align 4
%loop.acc.1 = add i32 %loop.acc, %array.1.i
%within.bounds.2 = icmp ult i32 %i, %length.2
%widenable_cond4 = call i1 @llvm.experimental.widenable.condition()
@@ -1471,8 +1471,8 @@ deopt2: ; preds = %guarded
ret i32 %deoptcall3
guarded1: ; preds = %guarded
- %array.2.i.ptr = getelementptr inbounds i32, i32* %array.2, i64 %i.i64
- %array.2.i = load i32, i32* %array.2.i.ptr, align 4
+ %array.2.i.ptr = getelementptr inbounds i32, ptr %array.2, i64 %i.i64
+ %array.2.i = load i32, ptr %array.2.i.ptr, align 4
%loop.acc.2 = add i32 %loop.acc.1, %array.2.i
%within.bounds.3 = icmp ult i32 %i, %length.3
%widenable_cond9 = call i1 @llvm.experimental.widenable.condition()
@@ -1484,8 +1484,8 @@ deopt7: ; preds = %guarded1
ret i32 %deoptcall8
guarded6: ; preds = %guarded1
- %array.3.i.ptr = getelementptr inbounds i32, i32* %array.3, i64 %i.i64
- %array.3.i = load i32, i32* %array.3.i.ptr, align 4
+ %array.3.i.ptr = getelementptr inbounds i32, ptr %array.3, i64 %i.i64
+ %array.3.i = load i32, ptr %array.3.i.ptr, align 4
%loop.acc.next = add i32 %loop.acc.2, %array.3.i
%i.next = add nuw i32 %i, 1
%continue = icmp ult i32 %i.next, %n
@@ -1496,7 +1496,7 @@ exit: ; preds = %guarded6, %entry
ret i32 %result
}
-define i32 @unsigned_loop_0_to_n_unrelated_condition(i32* %array, i32 %length, i32 %n, i32 %x) {
+define i32 @unsigned_loop_0_to_n_unrelated_condition(ptr %array, i32 %length, i32 %n, i32 %x) {
; CHECK-LABEL: @unsigned_loop_0_to_n_unrelated_condition(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP5:%.*]] = icmp eq i32 [[N:%.*]], 0
@@ -1522,8 +1522,8 @@ define i32 @unsigned_loop_0_to_n_unrelated_condition(i32* %array, i32 %length, i
; CHECK: guarded:
; CHECK-NEXT: call void @llvm.assume(i1 [[GUARD_COND]])
; CHECK-NEXT: [[I_I64:%.*]] = zext i32 [[I]] to i64
-; CHECK-NEXT: [[ARRAY_I_PTR:%.*]] = getelementptr inbounds i32, i32* [[ARRAY:%.*]], i64 [[I_I64]]
-; CHECK-NEXT: [[ARRAY_I:%.*]] = load i32, i32* [[ARRAY_I_PTR]], align 4
+; CHECK-NEXT: [[ARRAY_I_PTR:%.*]] = getelementptr inbounds i32, ptr [[ARRAY:%.*]], i64 [[I_I64]]
+; CHECK-NEXT: [[ARRAY_I:%.*]] = load i32, ptr [[ARRAY_I_PTR]], align 4
; CHECK-NEXT: [[LOOP_ACC_NEXT]] = add i32 [[LOOP_ACC]], [[ARRAY_I]]
; CHECK-NEXT: [[I_NEXT]] = add nuw i32 [[I]], 1
; CHECK-NEXT: [[CONTINUE:%.*]] = icmp ult i32 [[I_NEXT]], [[N]]
@@ -1558,8 +1558,8 @@ deopt: ; preds = %loop
guarded: ; preds = %loop
%i.i64 = zext i32 %i to i64
- %array.i.ptr = getelementptr inbounds i32, i32* %array, i64 %i.i64
- %array.i = load i32, i32* %array.i.ptr, align 4
+ %array.i.ptr = getelementptr inbounds i32, ptr %array, i64 %i.i64
+ %array.i = load i32, ptr %array.i.ptr, align 4
%loop.acc.next = add i32 %loop.acc, %array.i
%i.next = add nuw i32 %i, 1
%continue = icmp ult i32 %i.next, %n
@@ -1570,7 +1570,7 @@ exit: ; preds = %guarded, %entry
ret i32 %result
}
-define i32 @test_no_widened_conditions(i32* %array, i32 %length, i32 %n, i32 %x1, i32 %x2, i32 %x3) {
+define i32 @test_no_widened_conditions(ptr %array, i32 %length, i32 %n, i32 %x1, i32 %x2, i32 %x3) {
; CHECK-LABEL: @test_no_widened_conditions(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP5:%.*]] = icmp eq i32 [[N:%.*]], 0
@@ -1593,8 +1593,8 @@ define i32 @test_no_widened_conditions(i32* %array, i32 %length, i32 %n, i32 %x1
; CHECK-NEXT: ret i32 [[DEOPTCALL]]
; CHECK: guarded:
; CHECK-NEXT: [[I_I64:%.*]] = zext i32 [[I]] to i64
-; CHECK-NEXT: [[ARRAY_I_PTR:%.*]] = getelementptr inbounds i32, i32* [[ARRAY:%.*]], i64 [[I_I64]]
-; CHECK-NEXT: [[ARRAY_I:%.*]] = load i32, i32* [[ARRAY_I_PTR]], align 4
+; CHECK-NEXT: [[ARRAY_I_PTR:%.*]] = getelementptr inbounds i32, ptr [[ARRAY:%.*]], i64 [[I_I64]]
+; CHECK-NEXT: [[ARRAY_I:%.*]] = load i32, ptr [[ARRAY_I_PTR]], align 4
; CHECK-NEXT: [[LOOP_ACC_NEXT]] = add i32 [[LOOP_ACC]], [[ARRAY_I]]
; CHECK-NEXT: [[I_NEXT]] = add nuw i32 [[I]], 1
; CHECK-NEXT: [[CONTINUE:%.*]] = icmp ult i32 [[I_NEXT]], [[N]]
@@ -1631,8 +1631,8 @@ deopt: ; preds = %loop
guarded: ; preds = %loop
%i.i64 = zext i32 %i to i64
- %array.i.ptr = getelementptr inbounds i32, i32* %array, i64 %i.i64
- %array.i = load i32, i32* %array.i.ptr, align 4
+ %array.i.ptr = getelementptr inbounds i32, ptr %array, i64 %i.i64
+ %array.i = load i32, ptr %array.i.ptr, align 4
%loop.acc.next = add i32 %loop.acc, %array.i
%i.next = add nuw i32 %i, 1
%continue = icmp ult i32 %i.next, %n
@@ -1643,7 +1643,7 @@ exit: ; preds = %guarded, %entry
ret i32 %result
}
-define i32 @signed_loop_start_to_n_loop_variant_bound(i32* %array, i32 %x, i32 %start, i32 %n) {
+define i32 @signed_loop_start_to_n_loop_variant_bound(ptr %array, i32 %x, i32 %start, i32 %n) {
; CHECK-LABEL: @signed_loop_start_to_n_loop_variant_bound(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP5:%.*]] = icmp sle i32 [[N:%.*]], 0
@@ -1663,8 +1663,8 @@ define i32 @signed_loop_start_to_n_loop_variant_bound(i32* %array, i32 %x, i32 %
; CHECK-NEXT: ret i32 [[DEOPTCALL]]
; CHECK: guarded:
; CHECK-NEXT: [[I_I64:%.*]] = zext i32 [[I]] to i64
-; CHECK-NEXT: [[ARRAY_I_PTR:%.*]] = getelementptr inbounds i32, i32* [[ARRAY:%.*]], i64 [[I_I64]]
-; CHECK-NEXT: [[ARRAY_I:%.*]] = load i32, i32* [[ARRAY_I_PTR]], align 4
+; CHECK-NEXT: [[ARRAY_I_PTR:%.*]] = getelementptr inbounds i32, ptr [[ARRAY:%.*]], i64 [[I_I64]]
+; CHECK-NEXT: [[ARRAY_I:%.*]] = load i32, ptr [[ARRAY_I_PTR]], align 4
; CHECK-NEXT: [[LOOP_ACC_NEXT]] = add i32 [[LOOP_ACC]], [[ARRAY_I]]
; CHECK-NEXT: [[I_NEXT]] = add nsw i32 [[I]], 1
; CHECK-NEXT: [[CONTINUE:%.*]] = icmp slt i32 [[I_NEXT]], [[N]]
@@ -1698,8 +1698,8 @@ deopt: ; preds = %loop
guarded: ; preds = %loop
%i.i64 = zext i32 %i to i64
- %array.i.ptr = getelementptr inbounds i32, i32* %array, i64 %i.i64
- %array.i = load i32, i32* %array.i.ptr, align 4
+ %array.i.ptr = getelementptr inbounds i32, ptr %array, i64 %i.i64
+ %array.i = load i32, ptr %array.i.ptr, align 4
%loop.acc.next = add i32 %loop.acc, %array.i
%i.next = add nsw i32 %i, 1
%continue = icmp slt i32 %i.next, %n
@@ -1710,7 +1710,7 @@ exit: ; preds = %guarded, %entry
ret i32 %result
}
-define i32 @signed_loop_start_to_n_non_monotonic_predicate(i32* %array, i32 %x, i32 %start, i32 %n) {
+define i32 @signed_loop_start_to_n_non_monotonic_predicate(ptr %array, i32 %x, i32 %start, i32 %n) {
; CHECK-LABEL: @signed_loop_start_to_n_non_monotonic_predicate(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP5:%.*]] = icmp sle i32 [[N:%.*]], 0
@@ -1729,8 +1729,8 @@ define i32 @signed_loop_start_to_n_non_monotonic_predicate(i32* %array, i32 %x,
; CHECK-NEXT: ret i32 [[DEOPTCALL]]
; CHECK: guarded:
; CHECK-NEXT: [[I_I64:%.*]] = zext i32 [[I]] to i64
-; CHECK-NEXT: [[ARRAY_I_PTR:%.*]] = getelementptr inbounds i32, i32* [[ARRAY:%.*]], i64 [[I_I64]]
-; CHECK-NEXT: [[ARRAY_I:%.*]] = load i32, i32* [[ARRAY_I_PTR]], align 4
+; CHECK-NEXT: [[ARRAY_I_PTR:%.*]] = getelementptr inbounds i32, ptr [[ARRAY:%.*]], i64 [[I_I64]]
+; CHECK-NEXT: [[ARRAY_I:%.*]] = load i32, ptr [[ARRAY_I_PTR]], align 4
; CHECK-NEXT: [[LOOP_ACC_NEXT]] = add i32 [[LOOP_ACC]], [[ARRAY_I]]
; CHECK-NEXT: [[I_NEXT]] = add nsw i32 [[I]], 1
; CHECK-NEXT: [[CONTINUE:%.*]] = icmp slt i32 [[I_NEXT]], [[N]]
@@ -1763,8 +1763,8 @@ deopt: ; preds = %loop
guarded: ; preds = %loop
%i.i64 = zext i32 %i to i64
- %array.i.ptr = getelementptr inbounds i32, i32* %array, i64 %i.i64
- %array.i = load i32, i32* %array.i.ptr, align 4
+ %array.i.ptr = getelementptr inbounds i32, ptr %array, i64 %i.i64
+ %array.i = load i32, ptr %array.i.ptr, align 4
%loop.acc.next = add i32 %loop.acc, %array.i
%i.next = add nsw i32 %i, 1
%continue = icmp slt i32 %i.next, %n
@@ -1775,7 +1775,7 @@ exit: ; preds = %guarded, %entry
ret i32 %result
}
-define i32 @unsigned_loop_0_to_n_hoist_length(i32* %array, i16 %length.i16, i32 %n) {
+define i32 @unsigned_loop_0_to_n_hoist_length(ptr %array, i16 %length.i16, i32 %n) {
; CHECK-LABEL: @unsigned_loop_0_to_n_hoist_length(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP5:%.*]] = icmp eq i32 [[N:%.*]], 0
@@ -1800,8 +1800,8 @@ define i32 @unsigned_loop_0_to_n_hoist_length(i32* %array, i16 %length.i16, i32
; CHECK: guarded:
; CHECK-NEXT: call void @llvm.assume(i1 [[WITHIN_BOUNDS]])
; CHECK-NEXT: [[I_I64:%.*]] = zext i32 [[I]] to i64
-; CHECK-NEXT: [[ARRAY_I_PTR:%.*]] = getelementptr inbounds i32, i32* [[ARRAY:%.*]], i64 [[I_I64]]
-; CHECK-NEXT: [[ARRAY_I:%.*]] = load i32, i32* [[ARRAY_I_PTR]], align 4
+; CHECK-NEXT: [[ARRAY_I_PTR:%.*]] = getelementptr inbounds i32, ptr [[ARRAY:%.*]], i64 [[I_I64]]
+; CHECK-NEXT: [[ARRAY_I:%.*]] = load i32, ptr [[ARRAY_I_PTR]], align 4
; CHECK-NEXT: [[LOOP_ACC_NEXT]] = add i32 [[LOOP_ACC]], [[ARRAY_I]]
; CHECK-NEXT: [[I_NEXT]] = add nuw i32 [[I]], 1
; CHECK-NEXT: [[CONTINUE:%.*]] = icmp ult i32 [[I_NEXT]], [[N]]
@@ -1835,8 +1835,8 @@ deopt: ; preds = %loop
guarded: ; preds = %loop
%i.i64 = zext i32 %i to i64
- %array.i.ptr = getelementptr inbounds i32, i32* %array, i64 %i.i64
- %array.i = load i32, i32* %array.i.ptr, align 4
+ %array.i.ptr = getelementptr inbounds i32, ptr %array, i64 %i.i64
+ %array.i = load i32, ptr %array.i.ptr, align 4
%loop.acc.next = add i32 %loop.acc, %array.i
%i.next = add nuw i32 %i, 1
%continue = icmp ult i32 %i.next, %n
@@ -1847,7 +1847,7 @@ exit: ; preds = %guarded, %entry
ret i32 %result
}
-define i32 @unsigned_loop_0_to_n_cant_hoist_length(i32* %array, i32 %length, i32 %divider, i32 %n) {
+define i32 @unsigned_loop_0_to_n_cant_hoist_length(ptr %array, i32 %length, i32 %divider, i32 %n) {
; CHECK-LABEL: @unsigned_loop_0_to_n_cant_hoist_length(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP5:%.*]] = icmp eq i32 [[N:%.*]], 0
@@ -1871,8 +1871,8 @@ define i32 @unsigned_loop_0_to_n_cant_hoist_length(i32* %array, i32 %length, i32
; CHECK: guarded:
; CHECK-NEXT: call void @llvm.assume(i1 [[WITHIN_BOUNDS]])
; CHECK-NEXT: [[I_I64:%.*]] = zext i32 [[I]] to i64
-; CHECK-NEXT: [[ARRAY_I_PTR:%.*]] = getelementptr inbounds i32, i32* [[ARRAY:%.*]], i64 [[I_I64]]
-; CHECK-NEXT: [[ARRAY_I:%.*]] = load i32, i32* [[ARRAY_I_PTR]], align 4
+; CHECK-NEXT: [[ARRAY_I_PTR:%.*]] = getelementptr inbounds i32, ptr [[ARRAY:%.*]], i64 [[I_I64]]
+; CHECK-NEXT: [[ARRAY_I:%.*]] = load i32, ptr [[ARRAY_I_PTR]], align 4
; CHECK-NEXT: [[LOOP_ACC_NEXT]] = add i32 [[LOOP_ACC]], [[ARRAY_I]]
; CHECK-NEXT: [[I_NEXT]] = add nuw i32 [[I]], 1
; CHECK-NEXT: [[CONTINUE:%.*]] = icmp ult i32 [[I_NEXT]], [[N]]
@@ -1906,8 +1906,8 @@ deopt: ; preds = %loop
guarded: ; preds = %loop
%i.i64 = zext i32 %i to i64
- %array.i.ptr = getelementptr inbounds i32, i32* %array, i64 %i.i64
- %array.i = load i32, i32* %array.i.ptr, align 4
+ %array.i.ptr = getelementptr inbounds i32, ptr %array, i64 %i.i64
+ %array.i = load i32, ptr %array.i.ptr, align 4
%loop.acc.next = add i32 %loop.acc, %array.i
%i.next = add nuw i32 %i, 1
%continue = icmp ult i32 %i.next, %n
@@ -1920,7 +1920,7 @@ exit: ; preds = %guarded, %entry
; Make sure that if we're going to consider a branch widenable, that the
; call to widenable condition is actually present.
-define i32 @negative_WC_required(i32* %array, i32 %length, i32 %n, i1 %unrelated) {
+define i32 @negative_WC_required(ptr %array, i32 %length, i32 %n, i1 %unrelated) {
; CHECK-LABEL: @negative_WC_required(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP5:%.*]] = icmp eq i32 [[N:%.*]], 0
@@ -1937,8 +1937,8 @@ define i32 @negative_WC_required(i32* %array, i32 %length, i32 %n, i1 %unrelated
; CHECK-NEXT: ret i32 [[DEOPTCALL]]
; CHECK: guarded:
; CHECK-NEXT: [[I_I64:%.*]] = zext i32 [[I]] to i64
-; CHECK-NEXT: [[ARRAY_I_PTR:%.*]] = getelementptr inbounds i32, i32* [[ARRAY:%.*]], i64 [[I_I64]]
-; CHECK-NEXT: store i32 0, i32* [[ARRAY_I_PTR]], align 4
+; CHECK-NEXT: [[ARRAY_I_PTR:%.*]] = getelementptr inbounds i32, ptr [[ARRAY:%.*]], i64 [[I_I64]]
+; CHECK-NEXT: store i32 0, ptr [[ARRAY_I_PTR]], align 4
; CHECK-NEXT: [[I_NEXT]] = add nuw i32 [[I]], 1
; CHECK-NEXT: [[CONTINUE:%.*]] = icmp ult i32 [[I_NEXT]], [[N]]
; CHECK-NEXT: br i1 [[CONTINUE]], label [[LOOP]], label [[EXIT_LOOPEXIT:%.*]], !prof [[PROF1]]
@@ -1966,8 +1966,8 @@ deopt:
guarded: ; preds = %loop
%i.i64 = zext i32 %i to i64
- %array.i.ptr = getelementptr inbounds i32, i32* %array, i64 %i.i64
- store i32 0, i32* %array.i.ptr, align 4
+ %array.i.ptr = getelementptr inbounds i32, ptr %array, i64 %i.i64
+ store i32 0, ptr %array.i.ptr, align 4
%i.next = add nuw i32 %i, 1
%continue = icmp ult i32 %i.next, %n
br i1 %continue, label %loop, label %exit, !prof !2
@@ -1976,7 +1976,7 @@ exit: ; preds = %guarded, %entry
ret i32 0
}
-define i32 @swapped_wb(i32* %array, i32 %length, i32 %n) {
+define i32 @swapped_wb(ptr %array, i32 %length, i32 %n) {
; CHECK-LABEL: @swapped_wb(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP5:%.*]] = icmp eq i32 [[N:%.*]], 0
@@ -1999,8 +1999,8 @@ define i32 @swapped_wb(i32* %array, i32 %length, i32 %n) {
; CHECK: guarded:
; CHECK-NEXT: call void @llvm.assume(i1 [[WITHIN_BOUNDS]])
; CHECK-NEXT: [[I_I64:%.*]] = zext i32 [[I]] to i64
-; CHECK-NEXT: [[ARRAY_I_PTR:%.*]] = getelementptr inbounds i32, i32* [[ARRAY:%.*]], i64 [[I_I64]]
-; CHECK-NEXT: [[ARRAY_I:%.*]] = load i32, i32* [[ARRAY_I_PTR]], align 4
+; CHECK-NEXT: [[ARRAY_I_PTR:%.*]] = getelementptr inbounds i32, ptr [[ARRAY:%.*]], i64 [[I_I64]]
+; CHECK-NEXT: [[ARRAY_I:%.*]] = load i32, ptr [[ARRAY_I_PTR]], align 4
; CHECK-NEXT: [[LOOP_ACC_NEXT]] = add i32 [[LOOP_ACC]], [[ARRAY_I]]
; CHECK-NEXT: [[I_NEXT]] = add nuw i32 [[I]], 1
; CHECK-NEXT: [[CONTINUE:%.*]] = icmp ult i32 [[I_NEXT]], [[N]]
@@ -2033,8 +2033,8 @@ deopt: ; preds = %loop
guarded: ; preds = %loop
%i.i64 = zext i32 %i to i64
- %array.i.ptr = getelementptr inbounds i32, i32* %array, i64 %i.i64
- %array.i = load i32, i32* %array.i.ptr, align 4
+ %array.i.ptr = getelementptr inbounds i32, ptr %array, i64 %i.i64
+ %array.i = load i32, ptr %array.i.ptr, align 4
%loop.acc.next = add i32 %loop.acc, %array.i
%i.next = add nuw i32 %i, 1
%continue = icmp ult i32 %i.next, %n
diff --git a/llvm/test/Transforms/LoopPredication/invariant_load.ll b/llvm/test/Transforms/LoopPredication/invariant_load.ll
index bc77bd31e94eb..b795bd0788d4c 100644
--- a/llvm/test/Transforms/LoopPredication/invariant_load.ll
+++ b/llvm/test/Transforms/LoopPredication/invariant_load.ll
@@ -6,7 +6,7 @@ declare void @llvm.experimental.guard(i1, ...)
@UNKNOWN = external global i1
-define i32 @neg_length_variant(i32* %array, i32* %length, i32 %n) {
+define i32 @neg_length_variant(ptr %array, ptr %length, i32 %n) {
; CHECK-LABEL: @neg_length_variant(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP5:%.*]] = icmp eq i32 [[N:%.*]], 0
@@ -16,14 +16,14 @@ define i32 @neg_length_variant(i32* %array, i32* %length, i32 %n) {
; CHECK: loop:
; CHECK-NEXT: [[LOOP_ACC:%.*]] = phi i32 [ [[LOOP_ACC_NEXT:%.*]], [[LOOP]] ], [ 0, [[LOOP_PREHEADER]] ]
; CHECK-NEXT: [[I:%.*]] = phi i32 [ [[I_NEXT:%.*]], [[LOOP]] ], [ 0, [[LOOP_PREHEADER]] ]
-; CHECK-NEXT: [[UNKNOWN:%.*]] = load volatile i1, i1* @UNKNOWN, align 1
+; CHECK-NEXT: [[UNKNOWN:%.*]] = load volatile i1, ptr @UNKNOWN, align 1
; CHECK-NEXT: call void (i1, ...) @llvm.experimental.guard(i1 [[UNKNOWN]]) [ "deopt"() ]
-; CHECK-NEXT: [[LEN:%.*]] = load i32, i32* [[LENGTH:%.*]], align 4
+; CHECK-NEXT: [[LEN:%.*]] = load i32, ptr [[LENGTH:%.*]], align 4
; CHECK-NEXT: [[WITHIN_BOUNDS:%.*]] = icmp ult i32 [[I]], [[LEN]]
; CHECK-NEXT: call void (i1, ...) @llvm.experimental.guard(i1 [[WITHIN_BOUNDS]], i32 9) [ "deopt"() ]
; CHECK-NEXT: [[I_I64:%.*]] = zext i32 [[I]] to i64
-; CHECK-NEXT: [[ARRAY_I_PTR:%.*]] = getelementptr inbounds i32, i32* [[ARRAY:%.*]], i64 [[I_I64]]
-; CHECK-NEXT: [[ARRAY_I:%.*]] = load i32, i32* [[ARRAY_I_PTR]], align 4
+; CHECK-NEXT: [[ARRAY_I_PTR:%.*]] = getelementptr inbounds i32, ptr [[ARRAY:%.*]], i64 [[I_I64]]
+; CHECK-NEXT: [[ARRAY_I:%.*]] = load i32, ptr [[ARRAY_I_PTR]], align 4
; CHECK-NEXT: [[LOOP_ACC_NEXT]] = add i32 [[LOOP_ACC]], [[ARRAY_I]]
; CHECK-NEXT: [[I_NEXT]] = add nuw i32 [[I]], 1
; CHECK-NEXT: [[CONTINUE:%.*]] = icmp ult i32 [[I_NEXT]], [[N]]
@@ -45,15 +45,15 @@ loop.preheader:
loop:
%loop.acc = phi i32 [ %loop.acc.next, %loop ], [ 0, %loop.preheader ]
%i = phi i32 [ %i.next, %loop ], [ 0, %loop.preheader ]
- %unknown = load volatile i1, i1* @UNKNOWN
+ %unknown = load volatile i1, ptr @UNKNOWN
call void (i1, ...) @llvm.experimental.guard(i1 %unknown) [ "deopt"() ]
- %len = load i32, i32* %length, align 4
+ %len = load i32, ptr %length, align 4
%within.bounds = icmp ult i32 %i, %len
call void (i1, ...) @llvm.experimental.guard(i1 %within.bounds, i32 9) [ "deopt"() ]
%i.i64 = zext i32 %i to i64
- %array.i.ptr = getelementptr inbounds i32, i32* %array, i64 %i.i64
- %array.i = load i32, i32* %array.i.ptr, align 4
+ %array.i.ptr = getelementptr inbounds i32, ptr %array, i64 %i.i64
+ %array.i = load i32, ptr %array.i.ptr, align 4
%loop.acc.next = add i32 %loop.acc, %array.i
%i.next = add nuw i32 %i, 1
@@ -65,7 +65,7 @@ exit:
ret i32 %result
}
-define i32 @invariant_load_guard_limit(i32* %array, i32* %length, i32 %n) {
+define i32 @invariant_load_guard_limit(ptr %array, ptr %length, i32 %n) {
; CHECK-LABEL: @invariant_load_guard_limit(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP5:%.*]] = icmp eq i32 [[N:%.*]], 0
@@ -75,9 +75,9 @@ define i32 @invariant_load_guard_limit(i32* %array, i32* %length, i32 %n) {
; CHECK: loop:
; CHECK-NEXT: [[LOOP_ACC:%.*]] = phi i32 [ [[LOOP_ACC_NEXT:%.*]], [[LOOP]] ], [ 0, [[LOOP_PREHEADER]] ]
; CHECK-NEXT: [[I:%.*]] = phi i32 [ [[I_NEXT:%.*]], [[LOOP]] ], [ 0, [[LOOP_PREHEADER]] ]
-; CHECK-NEXT: [[UNKNOWN:%.*]] = load volatile i1, i1* @UNKNOWN, align 1
+; CHECK-NEXT: [[UNKNOWN:%.*]] = load volatile i1, ptr @UNKNOWN, align 1
; CHECK-NEXT: call void (i1, ...) @llvm.experimental.guard(i1 [[UNKNOWN]]) [ "deopt"() ]
-; CHECK-NEXT: [[LEN:%.*]] = load i32, i32* [[LENGTH:%.*]], align 4, !invariant.load !0
+; CHECK-NEXT: [[LEN:%.*]] = load i32, ptr [[LENGTH:%.*]], align 4, !invariant.load !0
; CHECK-NEXT: [[WITHIN_BOUNDS:%.*]] = icmp ult i32 [[I]], [[LEN]]
; CHECK-NEXT: [[TMP0:%.*]] = icmp ule i32 [[N]], [[LEN]]
; CHECK-NEXT: [[TMP1:%.*]] = icmp ult i32 0, [[LEN]]
@@ -85,8 +85,8 @@ define i32 @invariant_load_guard_limit(i32* %array, i32* %length, i32 %n) {
; CHECK-NEXT: call void (i1, ...) @llvm.experimental.guard(i1 [[TMP2]], i32 9) [ "deopt"() ]
; CHECK-NEXT: call void @llvm.assume(i1 [[WITHIN_BOUNDS]])
; CHECK-NEXT: [[I_I64:%.*]] = zext i32 [[I]] to i64
-; CHECK-NEXT: [[ARRAY_I_PTR:%.*]] = getelementptr inbounds i32, i32* [[ARRAY:%.*]], i64 [[I_I64]]
-; CHECK-NEXT: [[ARRAY_I:%.*]] = load i32, i32* [[ARRAY_I_PTR]], align 4
+; CHECK-NEXT: [[ARRAY_I_PTR:%.*]] = getelementptr inbounds i32, ptr [[ARRAY:%.*]], i64 [[I_I64]]
+; CHECK-NEXT: [[ARRAY_I:%.*]] = load i32, ptr [[ARRAY_I_PTR]], align 4
; CHECK-NEXT: [[LOOP_ACC_NEXT]] = add i32 [[LOOP_ACC]], [[ARRAY_I]]
; CHECK-NEXT: [[I_NEXT]] = add nuw i32 [[I]], 1
; CHECK-NEXT: [[CONTINUE:%.*]] = icmp ult i32 [[I_NEXT]], [[N]]
@@ -108,15 +108,15 @@ loop.preheader:
loop:
%loop.acc = phi i32 [ %loop.acc.next, %loop ], [ 0, %loop.preheader ]
%i = phi i32 [ %i.next, %loop ], [ 0, %loop.preheader ]
- %unknown = load volatile i1, i1* @UNKNOWN
+ %unknown = load volatile i1, ptr @UNKNOWN
call void (i1, ...) @llvm.experimental.guard(i1 %unknown) [ "deopt"() ]
- %len = load i32, i32* %length, align 4, !invariant.load !{}
+ %len = load i32, ptr %length, align 4, !invariant.load !{}
%within.bounds = icmp ult i32 %i, %len
call void (i1, ...) @llvm.experimental.guard(i1 %within.bounds, i32 9) [ "deopt"() ]
%i.i64 = zext i32 %i to i64
- %array.i.ptr = getelementptr inbounds i32, i32* %array, i64 %i.i64
- %array.i = load i32, i32* %array.i.ptr, align 4
+ %array.i.ptr = getelementptr inbounds i32, ptr %array, i64 %i.i64
+ %array.i = load i32, ptr %array.i.ptr, align 4
%loop.acc.next = add i32 %loop.acc, %array.i
%i.next = add nuw i32 %i, 1
@@ -130,7 +130,7 @@ exit:
; Case where we have an invariant load, but it's not loading from a loop
; invariant location.
-define i32 @neg_varying_invariant_load_op(i32* %array, i32* %lengths, i32 %n) {
+define i32 @neg_varying_invariant_load_op(ptr %array, ptr %lengths, i32 %n) {
; CHECK-LABEL: @neg_varying_invariant_load_op(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP5:%.*]] = icmp eq i32 [[N:%.*]], 0
@@ -140,15 +140,15 @@ define i32 @neg_varying_invariant_load_op(i32* %array, i32* %lengths, i32 %n) {
; CHECK: loop:
; CHECK-NEXT: [[LOOP_ACC:%.*]] = phi i32 [ [[LOOP_ACC_NEXT:%.*]], [[LOOP]] ], [ 0, [[LOOP_PREHEADER]] ]
; CHECK-NEXT: [[I:%.*]] = phi i32 [ [[I_NEXT:%.*]], [[LOOP]] ], [ 0, [[LOOP_PREHEADER]] ]
-; CHECK-NEXT: [[UNKNOWN:%.*]] = load volatile i1, i1* @UNKNOWN, align 1
+; CHECK-NEXT: [[UNKNOWN:%.*]] = load volatile i1, ptr @UNKNOWN, align 1
; CHECK-NEXT: call void (i1, ...) @llvm.experimental.guard(i1 [[UNKNOWN]]) [ "deopt"() ]
-; CHECK-NEXT: [[LENGTH_ADDR:%.*]] = getelementptr i32, i32* [[LENGTHS:%.*]], i32 [[I]]
-; CHECK-NEXT: [[LEN:%.*]] = load i32, i32* [[LENGTH_ADDR]], align 4, !invariant.load !0
+; CHECK-NEXT: [[LENGTH_ADDR:%.*]] = getelementptr i32, ptr [[LENGTHS:%.*]], i32 [[I]]
+; CHECK-NEXT: [[LEN:%.*]] = load i32, ptr [[LENGTH_ADDR]], align 4, !invariant.load !0
; CHECK-NEXT: [[WITHIN_BOUNDS:%.*]] = icmp ult i32 [[I]], [[LEN]]
; CHECK-NEXT: call void (i1, ...) @llvm.experimental.guard(i1 [[WITHIN_BOUNDS]], i32 9) [ "deopt"() ]
; CHECK-NEXT: [[I_I64:%.*]] = zext i32 [[I]] to i64
-; CHECK-NEXT: [[ARRAY_I_PTR:%.*]] = getelementptr inbounds i32, i32* [[ARRAY:%.*]], i64 [[I_I64]]
-; CHECK-NEXT: [[ARRAY_I:%.*]] = load i32, i32* [[ARRAY_I_PTR]], align 4
+; CHECK-NEXT: [[ARRAY_I_PTR:%.*]] = getelementptr inbounds i32, ptr [[ARRAY:%.*]], i64 [[I_I64]]
+; CHECK-NEXT: [[ARRAY_I:%.*]] = load i32, ptr [[ARRAY_I_PTR]], align 4
; CHECK-NEXT: [[LOOP_ACC_NEXT]] = add i32 [[LOOP_ACC]], [[ARRAY_I]]
; CHECK-NEXT: [[I_NEXT]] = add nuw i32 [[I]], 1
; CHECK-NEXT: [[CONTINUE:%.*]] = icmp ult i32 [[I_NEXT]], [[N]]
@@ -170,17 +170,17 @@ loop.preheader:
loop:
%loop.acc = phi i32 [ %loop.acc.next, %loop ], [ 0, %loop.preheader ]
%i = phi i32 [ %i.next, %loop ], [ 0, %loop.preheader ]
- %unknown = load volatile i1, i1* @UNKNOWN
+ %unknown = load volatile i1, ptr @UNKNOWN
call void (i1, ...) @llvm.experimental.guard(i1 %unknown) [ "deopt"() ]
- %length.addr = getelementptr i32, i32* %lengths, i32 %i
- %len = load i32, i32* %length.addr, align 4, !invariant.load !{}
+ %length.addr = getelementptr i32, ptr %lengths, i32 %i
+ %len = load i32, ptr %length.addr, align 4, !invariant.load !{}
%within.bounds = icmp ult i32 %i, %len
call void (i1, ...) @llvm.experimental.guard(i1 %within.bounds, i32 9) [ "deopt"() ]
%i.i64 = zext i32 %i to i64
- %array.i.ptr = getelementptr inbounds i32, i32* %array, i64 %i.i64
- %array.i = load i32, i32* %array.i.ptr, align 4
+ %array.i.ptr = getelementptr inbounds i32, ptr %array, i64 %i.i64
+ %array.i = load i32, ptr %array.i.ptr, align 4
%loop.acc.next = add i32 %loop.acc, %array.i
%i.next = add nuw i32 %i, 1
@@ -195,7 +195,7 @@ exit:
; This is a case where moving the load which provides the limit for the latch
; would be invalid, so we can't preform the tempting transform. Loading the
; latch limit may fault since we could always fail the guard.
-define i32 @neg_invariant_load_latch_limit(i32* %array, i32* %length, i32 %n) {
+define i32 @neg_invariant_load_latch_limit(ptr %array, ptr %length, i32 %n) {
; CHECK-LABEL: @neg_invariant_load_latch_limit(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP5:%.*]] = icmp eq i32 [[N:%.*]], 0
@@ -205,16 +205,16 @@ define i32 @neg_invariant_load_latch_limit(i32* %array, i32* %length, i32 %n) {
; CHECK: loop:
; CHECK-NEXT: [[LOOP_ACC:%.*]] = phi i32 [ [[LOOP_ACC_NEXT:%.*]], [[LOOP]] ], [ 0, [[LOOP_PREHEADER]] ]
; CHECK-NEXT: [[I:%.*]] = phi i32 [ [[I_NEXT:%.*]], [[LOOP]] ], [ 0, [[LOOP_PREHEADER]] ]
-; CHECK-NEXT: [[UNKNOWN:%.*]] = load volatile i1, i1* @UNKNOWN, align 1
+; CHECK-NEXT: [[UNKNOWN:%.*]] = load volatile i1, ptr @UNKNOWN, align 1
; CHECK-NEXT: call void (i1, ...) @llvm.experimental.guard(i1 [[UNKNOWN]]) [ "deopt"() ]
; CHECK-NEXT: [[WITHIN_BOUNDS:%.*]] = icmp ult i32 [[I]], [[N]]
; CHECK-NEXT: call void (i1, ...) @llvm.experimental.guard(i1 [[WITHIN_BOUNDS]], i32 9) [ "deopt"() ]
; CHECK-NEXT: [[I_I64:%.*]] = zext i32 [[I]] to i64
-; CHECK-NEXT: [[ARRAY_I_PTR:%.*]] = getelementptr inbounds i32, i32* [[ARRAY:%.*]], i64 [[I_I64]]
-; CHECK-NEXT: [[ARRAY_I:%.*]] = load i32, i32* [[ARRAY_I_PTR]], align 4
+; CHECK-NEXT: [[ARRAY_I_PTR:%.*]] = getelementptr inbounds i32, ptr [[ARRAY:%.*]], i64 [[I_I64]]
+; CHECK-NEXT: [[ARRAY_I:%.*]] = load i32, ptr [[ARRAY_I_PTR]], align 4
; CHECK-NEXT: [[LOOP_ACC_NEXT]] = add i32 [[LOOP_ACC]], [[ARRAY_I]]
; CHECK-NEXT: [[I_NEXT]] = add nuw i32 [[I]], 1
-; CHECK-NEXT: [[LEN:%.*]] = load i32, i32* [[LENGTH:%.*]], align 4, !invariant.load !0
+; CHECK-NEXT: [[LEN:%.*]] = load i32, ptr [[LENGTH:%.*]], align 4, !invariant.load !0
; CHECK-NEXT: [[CONTINUE:%.*]] = icmp ult i32 [[I_NEXT]], [[LEN]]
; CHECK-NEXT: br i1 [[CONTINUE]], label [[LOOP]], label [[EXIT_LOOPEXIT:%.*]]
; CHECK: exit.loopexit:
@@ -234,18 +234,18 @@ loop.preheader:
loop:
%loop.acc = phi i32 [ %loop.acc.next, %loop ], [ 0, %loop.preheader ]
%i = phi i32 [ %i.next, %loop ], [ 0, %loop.preheader ]
- %unknown = load volatile i1, i1* @UNKNOWN
+ %unknown = load volatile i1, ptr @UNKNOWN
call void (i1, ...) @llvm.experimental.guard(i1 %unknown) [ "deopt"() ]
%within.bounds = icmp ult i32 %i, %n
call void (i1, ...) @llvm.experimental.guard(i1 %within.bounds, i32 9) [ "deopt"() ]
%i.i64 = zext i32 %i to i64
- %array.i.ptr = getelementptr inbounds i32, i32* %array, i64 %i.i64
- %array.i = load i32, i32* %array.i.ptr, align 4
+ %array.i.ptr = getelementptr inbounds i32, ptr %array, i64 %i.i64
+ %array.i = load i32, ptr %array.i.ptr, align 4
%loop.acc.next = add i32 %loop.acc, %array.i
%i.next = add nuw i32 %i, 1
- %len = load i32, i32* %length, align 4, !invariant.load !{}
+ %len = load i32, ptr %length, align 4, !invariant.load !{}
%continue = icmp ult i32 %i.next, %len
br i1 %continue, label %loop, label %exit
@@ -254,7 +254,7 @@ exit:
ret i32 %result
}
-define i32 @invariant_load_latch_limit(i32* %array,
+define i32 @invariant_load_latch_limit(ptr %array,
; CHECK-LABEL: @invariant_load_latch_limit(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP5:%.*]] = icmp eq i32 [[N:%.*]], 0
@@ -264,16 +264,16 @@ define i32 @invariant_load_latch_limit(i32* %array,
; CHECK: loop:
; CHECK-NEXT: [[LOOP_ACC:%.*]] = phi i32 [ [[LOOP_ACC_NEXT:%.*]], [[LOOP]] ], [ 0, [[LOOP_PREHEADER]] ]
; CHECK-NEXT: [[I:%.*]] = phi i32 [ [[I_NEXT:%.*]], [[LOOP]] ], [ 0, [[LOOP_PREHEADER]] ]
-; CHECK-NEXT: [[UNKNOWN:%.*]] = load volatile i1, i1* @UNKNOWN, align 1
+; CHECK-NEXT: [[UNKNOWN:%.*]] = load volatile i1, ptr @UNKNOWN, align 1
; CHECK-NEXT: call void (i1, ...) @llvm.experimental.guard(i1 [[UNKNOWN]]) [ "deopt"() ]
; CHECK-NEXT: [[WITHIN_BOUNDS:%.*]] = icmp ult i32 [[I]], [[N]]
; CHECK-NEXT: call void (i1, ...) @llvm.experimental.guard(i1 [[WITHIN_BOUNDS]], i32 9) [ "deopt"() ]
; CHECK-NEXT: [[I_I64:%.*]] = zext i32 [[I]] to i64
-; CHECK-NEXT: [[ARRAY_I_PTR:%.*]] = getelementptr inbounds i32, i32* [[ARRAY:%.*]], i64 [[I_I64]]
-; CHECK-NEXT: [[ARRAY_I:%.*]] = load i32, i32* [[ARRAY_I_PTR]], align 4
+; CHECK-NEXT: [[ARRAY_I_PTR:%.*]] = getelementptr inbounds i32, ptr [[ARRAY:%.*]], i64 [[I_I64]]
+; CHECK-NEXT: [[ARRAY_I:%.*]] = load i32, ptr [[ARRAY_I_PTR]], align 4
; CHECK-NEXT: [[LOOP_ACC_NEXT]] = add i32 [[LOOP_ACC]], [[ARRAY_I]]
; CHECK-NEXT: [[I_NEXT]] = add nuw i32 [[I]], 1
-; CHECK-NEXT: [[LEN:%.*]] = load i32, i32* [[LENGTH:%.*]], align 4, !invariant.load !0
+; CHECK-NEXT: [[LEN:%.*]] = load i32, ptr [[LENGTH:%.*]], align 4, !invariant.load !0
; CHECK-NEXT: [[CONTINUE:%.*]] = icmp ult i32 [[I_NEXT]], [[LEN]]
; CHECK-NEXT: br i1 [[CONTINUE]], label [[LOOP]], label [[EXIT_LOOPEXIT:%.*]]
; CHECK: exit.loopexit:
@@ -283,7 +283,7 @@ define i32 @invariant_load_latch_limit(i32* %array,
; CHECK-NEXT: [[RESULT:%.*]] = phi i32 [ 0, [[ENTRY:%.*]] ], [ [[LOOP_ACC_NEXT_LCSSA]], [[EXIT_LOOPEXIT]] ]
; CHECK-NEXT: ret i32 [[RESULT]]
;
- i32* dereferenceable(4) %length,
+ ptr dereferenceable(4) %length,
i32 %n) {
entry:
%tmp5 = icmp eq i32 %n, 0
@@ -295,18 +295,18 @@ loop.preheader:
loop:
%loop.acc = phi i32 [ %loop.acc.next, %loop ], [ 0, %loop.preheader ]
%i = phi i32 [ %i.next, %loop ], [ 0, %loop.preheader ]
- %unknown = load volatile i1, i1* @UNKNOWN
+ %unknown = load volatile i1, ptr @UNKNOWN
call void (i1, ...) @llvm.experimental.guard(i1 %unknown) [ "deopt"() ]
%within.bounds = icmp ult i32 %i, %n
call void (i1, ...) @llvm.experimental.guard(i1 %within.bounds, i32 9) [ "deopt"() ]
%i.i64 = zext i32 %i to i64
- %array.i.ptr = getelementptr inbounds i32, i32* %array, i64 %i.i64
- %array.i = load i32, i32* %array.i.ptr, align 4
+ %array.i.ptr = getelementptr inbounds i32, ptr %array, i64 %i.i64
+ %array.i = load i32, ptr %array.i.ptr, align 4
%loop.acc.next = add i32 %loop.acc, %array.i
%i.next = add nuw i32 %i, 1
- %len = load i32, i32* %length, align 4, !invariant.load !{}
+ %len = load i32, ptr %length, align 4, !invariant.load !{}
%continue = icmp ult i32 %i.next, %len
br i1 %continue, label %loop, label %exit
@@ -319,7 +319,7 @@ exit:
@Length = external constant i32
-define i32 @constant_memory(i32* %array, i32 %n) {
+define i32 @constant_memory(ptr %array, i32 %n) {
; CHECK-LABEL: @constant_memory(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP5:%.*]] = icmp eq i32 [[N:%.*]], 0
@@ -329,9 +329,9 @@ define i32 @constant_memory(i32* %array, i32 %n) {
; CHECK: loop:
; CHECK-NEXT: [[LOOP_ACC:%.*]] = phi i32 [ [[LOOP_ACC_NEXT:%.*]], [[LOOP]] ], [ 0, [[LOOP_PREHEADER]] ]
; CHECK-NEXT: [[I:%.*]] = phi i32 [ [[I_NEXT:%.*]], [[LOOP]] ], [ 0, [[LOOP_PREHEADER]] ]
-; CHECK-NEXT: [[UNKNOWN:%.*]] = load volatile i1, i1* @UNKNOWN, align 1
+; CHECK-NEXT: [[UNKNOWN:%.*]] = load volatile i1, ptr @UNKNOWN, align 1
; CHECK-NEXT: call void (i1, ...) @llvm.experimental.guard(i1 [[UNKNOWN]]) [ "deopt"() ]
-; CHECK-NEXT: [[LEN:%.*]] = load i32, i32* @Length, align 4
+; CHECK-NEXT: [[LEN:%.*]] = load i32, ptr @Length, align 4
; CHECK-NEXT: [[WITHIN_BOUNDS:%.*]] = icmp ult i32 [[I]], [[LEN]]
; CHECK-NEXT: [[TMP0:%.*]] = icmp ule i32 [[N]], [[LEN]]
; CHECK-NEXT: [[TMP1:%.*]] = icmp ult i32 0, [[LEN]]
@@ -339,8 +339,8 @@ define i32 @constant_memory(i32* %array, i32 %n) {
; CHECK-NEXT: call void (i1, ...) @llvm.experimental.guard(i1 [[TMP2]], i32 9) [ "deopt"() ]
; CHECK-NEXT: call void @llvm.assume(i1 [[WITHIN_BOUNDS]])
; CHECK-NEXT: [[I_I64:%.*]] = zext i32 [[I]] to i64
-; CHECK-NEXT: [[ARRAY_I_PTR:%.*]] = getelementptr inbounds i32, i32* [[ARRAY:%.*]], i64 [[I_I64]]
-; CHECK-NEXT: [[ARRAY_I:%.*]] = load i32, i32* [[ARRAY_I_PTR]], align 4
+; CHECK-NEXT: [[ARRAY_I_PTR:%.*]] = getelementptr inbounds i32, ptr [[ARRAY:%.*]], i64 [[I_I64]]
+; CHECK-NEXT: [[ARRAY_I:%.*]] = load i32, ptr [[ARRAY_I_PTR]], align 4
; CHECK-NEXT: [[LOOP_ACC_NEXT]] = add i32 [[LOOP_ACC]], [[ARRAY_I]]
; CHECK-NEXT: [[I_NEXT]] = add nuw i32 [[I]], 1
; CHECK-NEXT: [[CONTINUE:%.*]] = icmp ult i32 [[I_NEXT]], [[N]]
@@ -362,15 +362,15 @@ loop.preheader:
loop:
%loop.acc = phi i32 [ %loop.acc.next, %loop ], [ 0, %loop.preheader ]
%i = phi i32 [ %i.next, %loop ], [ 0, %loop.preheader ]
- %unknown = load volatile i1, i1* @UNKNOWN
+ %unknown = load volatile i1, ptr @UNKNOWN
call void (i1, ...) @llvm.experimental.guard(i1 %unknown) [ "deopt"() ]
- %len = load i32, i32* @Length, align 4
+ %len = load i32, ptr @Length, align 4
%within.bounds = icmp ult i32 %i, %len
call void (i1, ...) @llvm.experimental.guard(i1 %within.bounds, i32 9) [ "deopt"() ]
%i.i64 = zext i32 %i to i64
- %array.i.ptr = getelementptr inbounds i32, i32* %array, i64 %i.i64
- %array.i = load i32, i32* %array.i.ptr, align 4
+ %array.i.ptr = getelementptr inbounds i32, ptr %array, i64 %i.i64
+ %array.i = load i32, ptr %array.i.ptr, align 4
%loop.acc.next = add i32 %loop.acc, %array.i
%i.next = add nuw i32 %i, 1
@@ -382,7 +382,7 @@ exit:
ret i32 %result
}
-define i32 @constant_length(i32* %array, i32 %n) {
+define i32 @constant_length(ptr %array, i32 %n) {
; CHECK-LABEL: @constant_length(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP5:%.*]] = icmp eq i32 [[N:%.*]], 0
@@ -394,14 +394,14 @@ define i32 @constant_length(i32* %array, i32 %n) {
; CHECK: loop:
; CHECK-NEXT: [[LOOP_ACC:%.*]] = phi i32 [ [[LOOP_ACC_NEXT:%.*]], [[LOOP]] ], [ 0, [[LOOP_PREHEADER]] ]
; CHECK-NEXT: [[I:%.*]] = phi i32 [ [[I_NEXT:%.*]], [[LOOP]] ], [ 0, [[LOOP_PREHEADER]] ]
-; CHECK-NEXT: [[UNKNOWN:%.*]] = load volatile i1, i1* @UNKNOWN, align 1
+; CHECK-NEXT: [[UNKNOWN:%.*]] = load volatile i1, ptr @UNKNOWN, align 1
; CHECK-NEXT: call void (i1, ...) @llvm.experimental.guard(i1 [[UNKNOWN]]) [ "deopt"() ]
; CHECK-NEXT: [[WITHIN_BOUNDS:%.*]] = icmp ult i32 [[I]], 20
; CHECK-NEXT: call void (i1, ...) @llvm.experimental.guard(i1 [[TMP1]], i32 9) [ "deopt"() ]
; CHECK-NEXT: call void @llvm.assume(i1 [[WITHIN_BOUNDS]])
; CHECK-NEXT: [[I_I64:%.*]] = zext i32 [[I]] to i64
-; CHECK-NEXT: [[ARRAY_I_PTR:%.*]] = getelementptr inbounds i32, i32* [[ARRAY:%.*]], i64 [[I_I64]]
-; CHECK-NEXT: [[ARRAY_I:%.*]] = load i32, i32* [[ARRAY_I_PTR]], align 4
+; CHECK-NEXT: [[ARRAY_I_PTR:%.*]] = getelementptr inbounds i32, ptr [[ARRAY:%.*]], i64 [[I_I64]]
+; CHECK-NEXT: [[ARRAY_I:%.*]] = load i32, ptr [[ARRAY_I_PTR]], align 4
; CHECK-NEXT: [[LOOP_ACC_NEXT]] = add i32 [[LOOP_ACC]], [[ARRAY_I]]
; CHECK-NEXT: [[I_NEXT]] = add nuw i32 [[I]], 1
; CHECK-NEXT: [[CONTINUE:%.*]] = icmp ult i32 [[I_NEXT]], [[N]]
@@ -423,14 +423,14 @@ loop.preheader:
loop:
%loop.acc = phi i32 [ %loop.acc.next, %loop ], [ 0, %loop.preheader ]
%i = phi i32 [ %i.next, %loop ], [ 0, %loop.preheader ]
- %unknown = load volatile i1, i1* @UNKNOWN
+ %unknown = load volatile i1, ptr @UNKNOWN
call void (i1, ...) @llvm.experimental.guard(i1 %unknown) [ "deopt"() ]
%within.bounds = icmp ult i32 %i, 20
call void (i1, ...) @llvm.experimental.guard(i1 %within.bounds, i32 9) [ "deopt"() ]
%i.i64 = zext i32 %i to i64
- %array.i.ptr = getelementptr inbounds i32, i32* %array, i64 %i.i64
- %array.i = load i32, i32* %array.i.ptr, align 4
+ %array.i.ptr = getelementptr inbounds i32, ptr %array, i64 %i.i64
+ %array.i = load i32, ptr %array.i.ptr, align 4
%loop.acc.next = add i32 %loop.acc, %array.i
%i.next = add nuw i32 %i, 1
diff --git a/llvm/test/Transforms/LoopPredication/nested.ll b/llvm/test/Transforms/LoopPredication/nested.ll
index 091f26d1be32d..21d859ff402b4 100644
--- a/llvm/test/Transforms/LoopPredication/nested.ll
+++ b/llvm/test/Transforms/LoopPredication/nested.ll
@@ -4,7 +4,7 @@
declare void @llvm.experimental.guard(i1, ...)
-define i32 @signed_loop_0_to_n_nested_0_to_l_inner_index_check(i32* %array, i32 %length, i32 %n, i32 %l) {
+define i32 @signed_loop_0_to_n_nested_0_to_l_inner_index_check(ptr %array, i32 %length, i32 %n, i32 %l) {
; CHECK-LABEL: @signed_loop_0_to_n_nested_0_to_l_inner_index_check(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP5:%.*]] = icmp sle i32 [[N:%.*]], 0
@@ -28,8 +28,8 @@ define i32 @signed_loop_0_to_n_nested_0_to_l_inner_index_check(i32* %array, i32
; CHECK-NEXT: call void (i1, ...) @llvm.experimental.guard(i1 [[TMP2]], i32 9) [ "deopt"() ]
; CHECK-NEXT: call void @llvm.assume(i1 [[WITHIN_BOUNDS]])
; CHECK-NEXT: [[J_I64:%.*]] = zext i32 [[J]] to i64
-; CHECK-NEXT: [[ARRAY_J_PTR:%.*]] = getelementptr inbounds i32, i32* [[ARRAY:%.*]], i64 [[J_I64]]
-; CHECK-NEXT: [[ARRAY_J:%.*]] = load i32, i32* [[ARRAY_J_PTR]], align 4
+; CHECK-NEXT: [[ARRAY_J_PTR:%.*]] = getelementptr inbounds i32, ptr [[ARRAY:%.*]], i64 [[J_I64]]
+; CHECK-NEXT: [[ARRAY_J:%.*]] = load i32, ptr [[ARRAY_J_PTR]], align 4
; CHECK-NEXT: [[INNER_LOOP_ACC_NEXT]] = add i32 [[INNER_LOOP_ACC]], [[ARRAY_J]]
; CHECK-NEXT: [[J_NEXT]] = add nsw i32 [[J]], 1
; CHECK-NEXT: [[INNER_CONTINUE:%.*]] = icmp slt i32 [[J_NEXT]], [[L]]
@@ -73,8 +73,8 @@ inner.loop:
call void (i1, ...) @llvm.experimental.guard(i1 %within.bounds, i32 9) [ "deopt"() ]
%j.i64 = zext i32 %j to i64
- %array.j.ptr = getelementptr inbounds i32, i32* %array, i64 %j.i64
- %array.j = load i32, i32* %array.j.ptr, align 4
+ %array.j.ptr = getelementptr inbounds i32, ptr %array, i64 %j.i64
+ %array.j = load i32, ptr %array.j.ptr, align 4
%inner.loop.acc.next = add i32 %inner.loop.acc, %array.j
%j.next = add nsw i32 %j, 1
@@ -92,7 +92,7 @@ exit:
ret i32 %result
}
-define i32 @signed_loop_0_to_n_nested_0_to_l_outer_index_check(i32* %array, i32 %length, i32 %n, i32 %l) {
+define i32 @signed_loop_0_to_n_nested_0_to_l_outer_index_check(ptr %array, i32 %length, i32 %n, i32 %l) {
; CHECK-LABEL: @signed_loop_0_to_n_nested_0_to_l_outer_index_check(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP5:%.*]] = icmp sle i32 [[N:%.*]], 0
@@ -116,8 +116,8 @@ define i32 @signed_loop_0_to_n_nested_0_to_l_outer_index_check(i32* %array, i32
; CHECK-NEXT: call void (i1, ...) @llvm.experimental.guard(i1 [[TMP2]], i32 9) [ "deopt"() ]
; CHECK-NEXT: call void @llvm.assume(i1 [[WITHIN_BOUNDS]])
; CHECK-NEXT: [[I_I64:%.*]] = zext i32 [[I]] to i64
-; CHECK-NEXT: [[ARRAY_I_PTR:%.*]] = getelementptr inbounds i32, i32* [[ARRAY:%.*]], i64 [[I_I64]]
-; CHECK-NEXT: [[ARRAY_I:%.*]] = load i32, i32* [[ARRAY_I_PTR]], align 4
+; CHECK-NEXT: [[ARRAY_I_PTR:%.*]] = getelementptr inbounds i32, ptr [[ARRAY:%.*]], i64 [[I_I64]]
+; CHECK-NEXT: [[ARRAY_I:%.*]] = load i32, ptr [[ARRAY_I_PTR]], align 4
; CHECK-NEXT: [[INNER_LOOP_ACC_NEXT]] = add i32 [[INNER_LOOP_ACC]], [[ARRAY_I]]
; CHECK-NEXT: [[J_NEXT]] = add nsw i32 [[J]], 1
; CHECK-NEXT: [[INNER_CONTINUE:%.*]] = icmp slt i32 [[J_NEXT]], [[L]]
@@ -162,8 +162,8 @@ inner.loop:
call void (i1, ...) @llvm.experimental.guard(i1 %within.bounds, i32 9) [ "deopt"() ]
%i.i64 = zext i32 %i to i64
- %array.i.ptr = getelementptr inbounds i32, i32* %array, i64 %i.i64
- %array.i = load i32, i32* %array.i.ptr, align 4
+ %array.i.ptr = getelementptr inbounds i32, ptr %array, i64 %i.i64
+ %array.i = load i32, ptr %array.i.ptr, align 4
%inner.loop.acc.next = add i32 %inner.loop.acc, %array.i
%j.next = add nsw i32 %j, 1
@@ -181,7 +181,7 @@ exit:
ret i32 %result
}
-define i32 @signed_loop_0_to_n_nested_i_to_l_inner_index_check(i32* %array, i32 %length, i32 %n, i32 %l) {
+define i32 @signed_loop_0_to_n_nested_i_to_l_inner_index_check(ptr %array, i32 %length, i32 %n, i32 %l) {
; CHECK-LABEL: @signed_loop_0_to_n_nested_i_to_l_inner_index_check(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP5:%.*]] = icmp sle i32 [[N:%.*]], 0
@@ -210,8 +210,8 @@ define i32 @signed_loop_0_to_n_nested_i_to_l_inner_index_check(i32* %array, i32
; CHECK-NEXT: call void @llvm.assume(i1 [[TMP5]])
; CHECK-NEXT: call void @llvm.assume(i1 [[WITHIN_BOUNDS]])
; CHECK-NEXT: [[J_I64:%.*]] = zext i32 [[J]] to i64
-; CHECK-NEXT: [[ARRAY_J_PTR:%.*]] = getelementptr inbounds i32, i32* [[ARRAY:%.*]], i64 [[J_I64]]
-; CHECK-NEXT: [[ARRAY_J:%.*]] = load i32, i32* [[ARRAY_J_PTR]], align 4
+; CHECK-NEXT: [[ARRAY_J_PTR:%.*]] = getelementptr inbounds i32, ptr [[ARRAY:%.*]], i64 [[J_I64]]
+; CHECK-NEXT: [[ARRAY_J:%.*]] = load i32, ptr [[ARRAY_J_PTR]], align 4
; CHECK-NEXT: [[INNER_LOOP_ACC_NEXT]] = add i32 [[INNER_LOOP_ACC]], [[ARRAY_J]]
; CHECK-NEXT: [[J_NEXT]] = add nsw i32 [[J]], 1
; CHECK-NEXT: [[INNER_CONTINUE:%.*]] = icmp slt i32 [[J_NEXT]], [[L]]
@@ -255,8 +255,8 @@ inner.loop:
call void (i1, ...) @llvm.experimental.guard(i1 %within.bounds, i32 9) [ "deopt"() ]
%j.i64 = zext i32 %j to i64
- %array.j.ptr = getelementptr inbounds i32, i32* %array, i64 %j.i64
- %array.j = load i32, i32* %array.j.ptr, align 4
+ %array.j.ptr = getelementptr inbounds i32, ptr %array, i64 %j.i64
+ %array.j = load i32, ptr %array.j.ptr, align 4
%inner.loop.acc.next = add i32 %inner.loop.acc, %array.j
%j.next = add nsw i32 %j, 1
@@ -274,7 +274,7 @@ exit:
ret i32 %result
}
-define i32 @cant_expand_guard_check_start(i32* %array, i32 %length, i32 %n, i32 %l, i32 %maybezero) {
+define i32 @cant_expand_guard_check_start(ptr %array, i32 %length, i32 %n, i32 %l, i32 %maybezero) {
; CHECK-LABEL: @cant_expand_guard_check_start(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP5:%.*]] = icmp sle i32 [[N:%.*]], 0
@@ -295,8 +295,8 @@ define i32 @cant_expand_guard_check_start(i32* %array, i32 %length, i32 %n, i32
; CHECK-NEXT: [[WITHIN_BOUNDS:%.*]] = icmp ult i32 [[J]], [[LENGTH:%.*]]
; CHECK-NEXT: call void (i1, ...) @llvm.experimental.guard(i1 [[WITHIN_BOUNDS]], i32 9) [ "deopt"() ]
; CHECK-NEXT: [[J_I64:%.*]] = zext i32 [[J]] to i64
-; CHECK-NEXT: [[ARRAY_J_PTR:%.*]] = getelementptr inbounds i32, i32* [[ARRAY:%.*]], i64 [[J_I64]]
-; CHECK-NEXT: [[ARRAY_J:%.*]] = load i32, i32* [[ARRAY_J_PTR]], align 4
+; CHECK-NEXT: [[ARRAY_J_PTR:%.*]] = getelementptr inbounds i32, ptr [[ARRAY:%.*]], i64 [[J_I64]]
+; CHECK-NEXT: [[ARRAY_J:%.*]] = load i32, ptr [[ARRAY_J_PTR]], align 4
; CHECK-NEXT: [[INNER_LOOP_ACC_NEXT]] = add i32 [[INNER_LOOP_ACC]], [[ARRAY_J]]
; CHECK-NEXT: [[J_NEXT]] = add nsw i32 [[J]], 1
; CHECK-NEXT: [[INNER_CONTINUE:%.*]] = icmp slt i32 [[J_NEXT]], [[L]]
@@ -341,8 +341,8 @@ inner.loop:
call void (i1, ...) @llvm.experimental.guard(i1 %within.bounds, i32 9) [ "deopt"() ]
%j.i64 = zext i32 %j to i64
- %array.j.ptr = getelementptr inbounds i32, i32* %array, i64 %j.i64
- %array.j = load i32, i32* %array.j.ptr, align 4
+ %array.j.ptr = getelementptr inbounds i32, ptr %array, i64 %j.i64
+ %array.j = load i32, ptr %array.j.ptr, align 4
%inner.loop.acc.next = add i32 %inner.loop.acc, %array.j
%j.next = add nsw i32 %j, 1
diff --git a/llvm/test/Transforms/LoopPredication/predicate-exits.ll b/llvm/test/Transforms/LoopPredication/predicate-exits.ll
index 17c64524e028f..fb3c64a031926 100644
--- a/llvm/test/Transforms/LoopPredication/predicate-exits.ll
+++ b/llvm/test/Transforms/LoopPredication/predicate-exits.ll
@@ -5,7 +5,7 @@
declare void @prevent_merging()
; Base case - with side effects in loop
-define i32 @test1(i32* %array, i32 %length, i32 %n, i1 %cond_0) {
+define i32 @test1(ptr %array, i32 %length, i32 %n, i1 %cond_0) {
; CHECK-LABEL: @test1(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[WIDENABLE_COND:%.*]] = call i1 @llvm.experimental.widenable.condition()
@@ -34,9 +34,9 @@ define i32 @test1(i32* %array, i32 %length, i32 %n, i1 %cond_0) {
; CHECK-NEXT: ret i32 [[DEOPTRET2]]
; CHECK: guarded:
; CHECK-NEXT: [[I_I64:%.*]] = zext i32 [[I]] to i64
-; CHECK-NEXT: [[ARRAY_I_PTR:%.*]] = getelementptr inbounds i32, i32* [[ARRAY:%.*]], i64 [[I_I64]]
-; CHECK-NEXT: [[ARRAY_I:%.*]] = load i32, i32* [[ARRAY_I_PTR]], align 4
-; CHECK-NEXT: store i32 0, i32* [[ARRAY_I_PTR]], align 4
+; CHECK-NEXT: [[ARRAY_I_PTR:%.*]] = getelementptr inbounds i32, ptr [[ARRAY:%.*]], i64 [[I_I64]]
+; CHECK-NEXT: [[ARRAY_I:%.*]] = load i32, ptr [[ARRAY_I_PTR]], align 4
+; CHECK-NEXT: store i32 0, ptr [[ARRAY_I_PTR]], align 4
; CHECK-NEXT: [[LOOP_ACC_NEXT]] = add i32 [[LOOP_ACC]], [[ARRAY_I]]
; CHECK-NEXT: [[I_NEXT]] = add nuw i32 [[I]], 1
; CHECK-NEXT: [[CONTINUE:%.*]] = icmp ult i32 [[I_NEXT]], [[N]]
@@ -71,9 +71,9 @@ deopt2:
guarded:
%i.i64 = zext i32 %i to i64
- %array.i.ptr = getelementptr inbounds i32, i32* %array, i64 %i.i64
- %array.i = load i32, i32* %array.i.ptr, align 4
- store i32 0, i32* %array.i.ptr
+ %array.i.ptr = getelementptr inbounds i32, ptr %array, i64 %i.i64
+ %array.i = load i32, ptr %array.i.ptr, align 4
+ store i32 0, ptr %array.i.ptr
%loop.acc.next = add i32 %loop.acc, %array.i
%i.next = add nuw i32 %i, 1
%continue = icmp ult i32 %i.next, %n
@@ -86,7 +86,7 @@ exit:
-define i32 @test_non_canonical(i32* %array, i32 %length, i1 %cond_0) {
+define i32 @test_non_canonical(ptr %array, i32 %length, i1 %cond_0) {
; CHECK-LABEL: @test_non_canonical(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[WIDENABLE_COND:%.*]] = call i1 @llvm.experimental.widenable.condition()
@@ -115,9 +115,9 @@ define i32 @test_non_canonical(i32* %array, i32 %length, i1 %cond_0) {
; CHECK-NEXT: ret i32 [[DEOPTRET2]]
; CHECK: guarded:
; CHECK-NEXT: [[I_I64:%.*]] = zext i32 [[I]] to i64
-; CHECK-NEXT: [[ARRAY_I_PTR:%.*]] = getelementptr inbounds i32, i32* [[ARRAY:%.*]], i64 [[I_I64]]
-; CHECK-NEXT: [[ARRAY_I:%.*]] = load i32, i32* [[ARRAY_I_PTR]], align 4
-; CHECK-NEXT: store i32 0, i32* [[ARRAY_I_PTR]], align 4
+; CHECK-NEXT: [[ARRAY_I_PTR:%.*]] = getelementptr inbounds i32, ptr [[ARRAY:%.*]], i64 [[I_I64]]
+; CHECK-NEXT: [[ARRAY_I:%.*]] = load i32, ptr [[ARRAY_I_PTR]], align 4
+; CHECK-NEXT: store i32 0, ptr [[ARRAY_I_PTR]], align 4
; CHECK-NEXT: [[LOOP_ACC_NEXT]] = add i32 [[LOOP_ACC]], [[ARRAY_I]]
; CHECK-NEXT: [[I_NEXT]] = add nuw i32 [[I]], 1
; CHECK-NEXT: [[CONTINUE:%.*]] = icmp ult i32 [[I_NEXT]], [[LENGTH]]
@@ -152,9 +152,9 @@ deopt2:
guarded:
%i.i64 = zext i32 %i to i64
- %array.i.ptr = getelementptr inbounds i32, i32* %array, i64 %i.i64
- %array.i = load i32, i32* %array.i.ptr, align 4
- store i32 0, i32* %array.i.ptr
+ %array.i.ptr = getelementptr inbounds i32, ptr %array, i64 %i.i64
+ %array.i = load i32, ptr %array.i.ptr, align 4
+ store i32 0, ptr %array.i.ptr
%loop.acc.next = add i32 %loop.acc, %array.i
%i.next = add nuw i32 %i, 1
%continue = icmp ult i32 %i.next, %length
@@ -166,7 +166,7 @@ exit:
}
-define i32 @test_two_range_checks(i32* %array, i32 %length.1, i32 %length.2, i32 %n, i1 %cond_0) {
+define i32 @test_two_range_checks(ptr %array, i32 %length.1, i32 %length.2, i32 %n, i1 %cond_0) {
; CHECK-LABEL: @test_two_range_checks(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[WIDENABLE_COND:%.*]] = call i1 @llvm.experimental.widenable.condition()
@@ -206,9 +206,9 @@ define i32 @test_two_range_checks(i32* %array, i32 %length.1, i32 %length.2, i32
; CHECK-NEXT: ret i32 [[DEOPTRET3]]
; CHECK: guarded2:
; CHECK-NEXT: [[I_I64:%.*]] = zext i32 [[I]] to i64
-; CHECK-NEXT: [[ARRAY_I_PTR:%.*]] = getelementptr inbounds i32, i32* [[ARRAY:%.*]], i64 [[I_I64]]
-; CHECK-NEXT: [[ARRAY_I:%.*]] = load i32, i32* [[ARRAY_I_PTR]], align 4
-; CHECK-NEXT: store i32 0, i32* [[ARRAY_I_PTR]], align 4
+; CHECK-NEXT: [[ARRAY_I_PTR:%.*]] = getelementptr inbounds i32, ptr [[ARRAY:%.*]], i64 [[I_I64]]
+; CHECK-NEXT: [[ARRAY_I:%.*]] = load i32, ptr [[ARRAY_I_PTR]], align 4
+; CHECK-NEXT: store i32 0, ptr [[ARRAY_I_PTR]], align 4
; CHECK-NEXT: [[LOOP_ACC_NEXT]] = add i32 [[LOOP_ACC]], [[ARRAY_I]]
; CHECK-NEXT: [[I_NEXT]] = add nuw i32 [[I]], 1
; CHECK-NEXT: [[CONTINUE:%.*]] = icmp ult i32 [[I_NEXT]], [[N]]
@@ -252,9 +252,9 @@ deopt3:
guarded2:
%i.i64 = zext i32 %i to i64
- %array.i.ptr = getelementptr inbounds i32, i32* %array, i64 %i.i64
- %array.i = load i32, i32* %array.i.ptr, align 4
- store i32 0, i32* %array.i.ptr
+ %array.i.ptr = getelementptr inbounds i32, ptr %array, i64 %i.i64
+ %array.i = load i32, ptr %array.i.ptr, align 4
+ store i32 0, ptr %array.i.ptr
%loop.acc.next = add i32 %loop.acc, %array.i
%i.next = add nuw i32 %i, 1
%continue = icmp ult i32 %i.next, %n
@@ -267,7 +267,7 @@ exit:
@G = external global i32
-define i32 @test_unanalyzeable_exit(i32* %array, i32 %length, i32 %n, i1 %cond_0) {
+define i32 @test_unanalyzeable_exit(ptr %array, i32 %length, i32 %n, i1 %cond_0) {
; CHECK-LABEL: @test_unanalyzeable_exit(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[WIDENABLE_COND:%.*]] = call i1 @llvm.experimental.widenable.condition()
@@ -282,7 +282,7 @@ define i32 @test_unanalyzeable_exit(i32* %array, i32 %length, i32 %n, i1 %cond_0
; CHECK-NEXT: [[LOOP_ACC:%.*]] = phi i32 [ [[LOOP_ACC_NEXT:%.*]], [[GUARDED2:%.*]] ], [ 0, [[LOOP_PREHEADER]] ]
; CHECK-NEXT: [[I:%.*]] = phi i32 [ [[I_NEXT:%.*]], [[GUARDED2]] ], [ 0, [[LOOP_PREHEADER]] ]
; CHECK-NEXT: call void @unknown()
-; CHECK-NEXT: [[VOL:%.*]] = load volatile i32, i32* @G, align 4
+; CHECK-NEXT: [[VOL:%.*]] = load volatile i32, ptr @G, align 4
; CHECK-NEXT: [[UNKNOWN:%.*]] = icmp eq i32 [[VOL]], 0
; CHECK-NEXT: br i1 [[UNKNOWN]], label [[GUARDED2]], label [[DEOPT3:%.*]], !prof !0
; CHECK: deopt3:
@@ -291,9 +291,9 @@ define i32 @test_unanalyzeable_exit(i32* %array, i32 %length, i32 %n, i1 %cond_0
; CHECK-NEXT: ret i32 [[DEOPTRET3]]
; CHECK: guarded2:
; CHECK-NEXT: [[I_I64:%.*]] = zext i32 [[I]] to i64
-; CHECK-NEXT: [[ARRAY_I_PTR:%.*]] = getelementptr inbounds i32, i32* [[ARRAY:%.*]], i64 [[I_I64]]
-; CHECK-NEXT: [[ARRAY_I:%.*]] = load i32, i32* [[ARRAY_I_PTR]], align 4
-; CHECK-NEXT: store i32 0, i32* [[ARRAY_I_PTR]], align 4
+; CHECK-NEXT: [[ARRAY_I_PTR:%.*]] = getelementptr inbounds i32, ptr [[ARRAY:%.*]], i64 [[I_I64]]
+; CHECK-NEXT: [[ARRAY_I:%.*]] = load i32, ptr [[ARRAY_I_PTR]], align 4
+; CHECK-NEXT: store i32 0, ptr [[ARRAY_I_PTR]], align 4
; CHECK-NEXT: [[LOOP_ACC_NEXT]] = add i32 [[LOOP_ACC]], [[ARRAY_I]]
; CHECK-NEXT: [[I_NEXT]] = add nuw i32 [[I]], 1
; CHECK-NEXT: [[CONTINUE:%.*]] = icmp ult i32 [[I_NEXT]], [[N:%.*]]
@@ -318,7 +318,7 @@ loop:
%loop.acc = phi i32 [ %loop.acc.next, %guarded2 ], [ 0, %loop.preheader ]
%i = phi i32 [ %i.next, %guarded2 ], [ 0, %loop.preheader ]
call void @unknown()
- %vol = load volatile i32, i32* @G
+ %vol = load volatile i32, ptr @G
%unknown = icmp eq i32 %vol, 0
br i1 %unknown, label %guarded2, label %deopt3, !prof !0
@@ -329,9 +329,9 @@ deopt3:
guarded2:
%i.i64 = zext i32 %i to i64
- %array.i.ptr = getelementptr inbounds i32, i32* %array, i64 %i.i64
- %array.i = load i32, i32* %array.i.ptr, align 4
- store i32 0, i32* %array.i.ptr
+ %array.i.ptr = getelementptr inbounds i32, ptr %array, i64 %i.i64
+ %array.i = load i32, ptr %array.i.ptr, align 4
+ store i32 0, ptr %array.i.ptr
%loop.acc.next = add i32 %loop.acc, %array.i
%i.next = add nuw i32 %i, 1
%continue = icmp ult i32 %i.next, %n
@@ -342,7 +342,7 @@ exit:
ret i32 %result
}
-define i32 @test_unanalyzeable_exit2(i32* %array, i32 %length, i32 %n, i1 %cond_0) {
+define i32 @test_unanalyzeable_exit2(ptr %array, i32 %length, i32 %n, i1 %cond_0) {
; CHECK-LABEL: @test_unanalyzeable_exit2(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[WIDENABLE_COND:%.*]] = call i1 @llvm.experimental.widenable.condition()
@@ -370,7 +370,7 @@ define i32 @test_unanalyzeable_exit2(i32* %array, i32 %length, i32 %n, i1 %cond_
; CHECK-NEXT: [[DEOPTRET2:%.*]] = call i32 (...) @llvm.experimental.deoptimize.i32() [ "deopt"() ]
; CHECK-NEXT: ret i32 [[DEOPTRET2]]
; CHECK: guarded:
-; CHECK-NEXT: [[VOL:%.*]] = load volatile i32, i32* @G, align 4
+; CHECK-NEXT: [[VOL:%.*]] = load volatile i32, ptr @G, align 4
; CHECK-NEXT: [[UNKNOWN:%.*]] = icmp eq i32 [[VOL]], 0
; CHECK-NEXT: br i1 [[UNKNOWN]], label [[GUARDED2]], label [[DEOPT3:%.*]], !prof !0
; CHECK: deopt3:
@@ -379,9 +379,9 @@ define i32 @test_unanalyzeable_exit2(i32* %array, i32 %length, i32 %n, i1 %cond_
; CHECK-NEXT: ret i32 [[DEOPTRET3]]
; CHECK: guarded2:
; CHECK-NEXT: [[I_I64:%.*]] = zext i32 [[I]] to i64
-; CHECK-NEXT: [[ARRAY_I_PTR:%.*]] = getelementptr inbounds i32, i32* [[ARRAY:%.*]], i64 [[I_I64]]
-; CHECK-NEXT: [[ARRAY_I:%.*]] = load i32, i32* [[ARRAY_I_PTR]], align 4
-; CHECK-NEXT: store i32 0, i32* [[ARRAY_I_PTR]], align 4
+; CHECK-NEXT: [[ARRAY_I_PTR:%.*]] = getelementptr inbounds i32, ptr [[ARRAY:%.*]], i64 [[I_I64]]
+; CHECK-NEXT: [[ARRAY_I:%.*]] = load i32, ptr [[ARRAY_I_PTR]], align 4
+; CHECK-NEXT: store i32 0, ptr [[ARRAY_I_PTR]], align 4
; CHECK-NEXT: [[LOOP_ACC_NEXT]] = add i32 [[LOOP_ACC]], [[ARRAY_I]]
; CHECK-NEXT: [[I_NEXT]] = add nuw i32 [[I]], 1
; CHECK-NEXT: [[CONTINUE:%.*]] = icmp ult i32 [[I_NEXT]], [[N]]
@@ -415,7 +415,7 @@ deopt2:
ret i32 %deoptret2
guarded:
- %vol = load volatile i32, i32* @G
+ %vol = load volatile i32, ptr @G
%unknown = icmp eq i32 %vol, 0
br i1 %unknown, label %guarded2, label %deopt3, !prof !0
@@ -426,9 +426,9 @@ deopt3:
guarded2:
%i.i64 = zext i32 %i to i64
- %array.i.ptr = getelementptr inbounds i32, i32* %array, i64 %i.i64
- %array.i = load i32, i32* %array.i.ptr, align 4
- store i32 0, i32* %array.i.ptr
+ %array.i.ptr = getelementptr inbounds i32, ptr %array, i64 %i.i64
+ %array.i = load i32, ptr %array.i.ptr, align 4
+ store i32 0, ptr %array.i.ptr
%loop.acc.next = add i32 %loop.acc, %array.i
%i.next = add nuw i32 %i, 1
%continue = icmp ult i32 %i.next, %n
@@ -440,7 +440,7 @@ exit:
}
-define i32 @test_unanalyzeable_latch(i32* %array, i32 %length, i32 %n, i1 %cond_0) {
+define i32 @test_unanalyzeable_latch(ptr %array, i32 %length, i32 %n, i1 %cond_0) {
; CHECK-LABEL: @test_unanalyzeable_latch(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[WIDENABLE_COND:%.*]] = call i1 @llvm.experimental.widenable.condition()
@@ -463,12 +463,12 @@ define i32 @test_unanalyzeable_latch(i32* %array, i32 %length, i32 %n, i1 %cond_
; CHECK-NEXT: ret i32 [[DEOPTRET2]]
; CHECK: guarded:
; CHECK-NEXT: [[I_I64:%.*]] = zext i32 [[I]] to i64
-; CHECK-NEXT: [[ARRAY_I_PTR:%.*]] = getelementptr inbounds i32, i32* [[ARRAY:%.*]], i64 [[I_I64]]
-; CHECK-NEXT: [[ARRAY_I:%.*]] = load i32, i32* [[ARRAY_I_PTR]], align 4
-; CHECK-NEXT: store i32 0, i32* [[ARRAY_I_PTR]], align 4
+; CHECK-NEXT: [[ARRAY_I_PTR:%.*]] = getelementptr inbounds i32, ptr [[ARRAY:%.*]], i64 [[I_I64]]
+; CHECK-NEXT: [[ARRAY_I:%.*]] = load i32, ptr [[ARRAY_I_PTR]], align 4
+; CHECK-NEXT: store i32 0, ptr [[ARRAY_I_PTR]], align 4
; CHECK-NEXT: [[LOOP_ACC_NEXT]] = add i32 [[LOOP_ACC]], [[ARRAY_I]]
; CHECK-NEXT: [[I_NEXT]] = add nuw i32 [[I]], 1
-; CHECK-NEXT: [[VOL:%.*]] = load volatile i32, i32* @G, align 4
+; CHECK-NEXT: [[VOL:%.*]] = load volatile i32, ptr @G, align 4
; CHECK-NEXT: [[UNKNOWN:%.*]] = icmp eq i32 [[VOL]], 0
; CHECK-NEXT: br i1 [[UNKNOWN]], label [[LOOP]], label [[EXIT:%.*]]
; CHECK: exit:
@@ -501,12 +501,12 @@ deopt2:
guarded:
%i.i64 = zext i32 %i to i64
- %array.i.ptr = getelementptr inbounds i32, i32* %array, i64 %i.i64
- %array.i = load i32, i32* %array.i.ptr, align 4
- store i32 0, i32* %array.i.ptr
+ %array.i.ptr = getelementptr inbounds i32, ptr %array, i64 %i.i64
+ %array.i = load i32, ptr %array.i.ptr, align 4
+ store i32 0, ptr %array.i.ptr
%loop.acc.next = add i32 %loop.acc, %array.i
%i.next = add nuw i32 %i, 1
- %vol = load volatile i32, i32* @G
+ %vol = load volatile i32, ptr @G
%unknown = icmp eq i32 %vol, 0
br i1 %unknown, label %loop, label %exit
@@ -516,7 +516,7 @@ exit:
}
-define i32 @provably_taken(i32* %array, i1 %cond_0) {
+define i32 @provably_taken(ptr %array, i1 %cond_0) {
; CHECK-LABEL: @provably_taken(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[WIDENABLE_COND:%.*]] = call i1 @llvm.experimental.widenable.condition()
@@ -541,9 +541,9 @@ define i32 @provably_taken(i32* %array, i1 %cond_0) {
; CHECK-NEXT: ret i32 [[DEOPTRET2]]
; CHECK: guarded:
; CHECK-NEXT: [[I_I64:%.*]] = zext i32 [[I]] to i64
-; CHECK-NEXT: [[ARRAY_I_PTR:%.*]] = getelementptr inbounds i32, i32* [[ARRAY:%.*]], i64 [[I_I64]]
-; CHECK-NEXT: [[ARRAY_I:%.*]] = load i32, i32* [[ARRAY_I_PTR]], align 4
-; CHECK-NEXT: store i32 0, i32* [[ARRAY_I_PTR]], align 4
+; CHECK-NEXT: [[ARRAY_I_PTR:%.*]] = getelementptr inbounds i32, ptr [[ARRAY:%.*]], i64 [[I_I64]]
+; CHECK-NEXT: [[ARRAY_I:%.*]] = load i32, ptr [[ARRAY_I_PTR]], align 4
+; CHECK-NEXT: store i32 0, ptr [[ARRAY_I_PTR]], align 4
; CHECK-NEXT: [[LOOP_ACC_NEXT]] = add i32 [[LOOP_ACC]], [[ARRAY_I]]
; CHECK-NEXT: [[I_NEXT]] = add nuw i32 [[I]], 1
; CHECK-NEXT: [[CONTINUE:%.*]] = icmp ult i32 [[I_NEXT]], 200
@@ -578,9 +578,9 @@ deopt2:
guarded:
%i.i64 = zext i32 %i to i64
- %array.i.ptr = getelementptr inbounds i32, i32* %array, i64 %i.i64
- %array.i = load i32, i32* %array.i.ptr, align 4
- store i32 0, i32* %array.i.ptr
+ %array.i.ptr = getelementptr inbounds i32, ptr %array, i64 %i.i64
+ %array.i = load i32, ptr %array.i.ptr, align 4
+ store i32 0, ptr %array.i.ptr
%loop.acc.next = add i32 %loop.acc, %array.i
%i.next = add nuw i32 %i, 1
%continue = icmp ult i32 %i.next, 200
@@ -591,7 +591,7 @@ exit:
ret i32 %result
}
-define i32 @provably_not_taken(i32* %array, i1 %cond_0) {
+define i32 @provably_not_taken(ptr %array, i1 %cond_0) {
; CHECK-LABEL: @provably_not_taken(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[WIDENABLE_COND:%.*]] = call i1 @llvm.experimental.widenable.condition()
@@ -616,9 +616,9 @@ define i32 @provably_not_taken(i32* %array, i1 %cond_0) {
; CHECK-NEXT: ret i32 [[DEOPTRET2]]
; CHECK: guarded:
; CHECK-NEXT: [[I_I64:%.*]] = zext i32 [[I]] to i64
-; CHECK-NEXT: [[ARRAY_I_PTR:%.*]] = getelementptr inbounds i32, i32* [[ARRAY:%.*]], i64 [[I_I64]]
-; CHECK-NEXT: [[ARRAY_I:%.*]] = load i32, i32* [[ARRAY_I_PTR]], align 4
-; CHECK-NEXT: store i32 0, i32* [[ARRAY_I_PTR]], align 4
+; CHECK-NEXT: [[ARRAY_I_PTR:%.*]] = getelementptr inbounds i32, ptr [[ARRAY:%.*]], i64 [[I_I64]]
+; CHECK-NEXT: [[ARRAY_I:%.*]] = load i32, ptr [[ARRAY_I_PTR]], align 4
+; CHECK-NEXT: store i32 0, ptr [[ARRAY_I_PTR]], align 4
; CHECK-NEXT: [[LOOP_ACC_NEXT]] = add i32 [[LOOP_ACC]], [[ARRAY_I]]
; CHECK-NEXT: [[I_NEXT]] = add nuw i32 [[I]], 1
; CHECK-NEXT: [[CONTINUE:%.*]] = icmp ult i32 [[I_NEXT]], 200
@@ -653,9 +653,9 @@ deopt2:
guarded:
%i.i64 = zext i32 %i to i64
- %array.i.ptr = getelementptr inbounds i32, i32* %array, i64 %i.i64
- %array.i = load i32, i32* %array.i.ptr, align 4
- store i32 0, i32* %array.i.ptr
+ %array.i.ptr = getelementptr inbounds i32, ptr %array, i64 %i.i64
+ %array.i = load i32, ptr %array.i.ptr, align 4
+ store i32 0, ptr %array.i.ptr
%loop.acc.next = add i32 %loop.acc, %array.i
%i.next = add nuw i32 %i, 1
%continue = icmp ult i32 %i.next, 200
@@ -669,7 +669,7 @@ exit:
;; Unswitch likes to produce some ugly exit blocks without simplifications
;; being applied. Make sure we can handle that form.
-define i32 @unswitch_exit_form(i32* %array, i32 %length, i32 %n, i1 %cond_0) {
+define i32 @unswitch_exit_form(ptr %array, i32 %length, i32 %n, i1 %cond_0) {
; CHECK-LABEL: @unswitch_exit_form(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[WIDENABLE_COND:%.*]] = call i1 @llvm.experimental.widenable.condition()
@@ -700,9 +700,9 @@ define i32 @unswitch_exit_form(i32* %array, i32 %length, i32 %n, i1 %cond_0) {
; CHECK-NEXT: br i1 true, label [[GUARDED]], label [[DEOPT_LOOPEXIT]], !prof !0
; CHECK: guarded:
; CHECK-NEXT: [[I_I64:%.*]] = zext i32 [[I]] to i64
-; CHECK-NEXT: [[ARRAY_I_PTR:%.*]] = getelementptr inbounds i32, i32* [[ARRAY:%.*]], i64 [[I_I64]]
-; CHECK-NEXT: [[ARRAY_I:%.*]] = load i32, i32* [[ARRAY_I_PTR]], align 4
-; CHECK-NEXT: store i32 0, i32* [[ARRAY_I_PTR]], align 4
+; CHECK-NEXT: [[ARRAY_I_PTR:%.*]] = getelementptr inbounds i32, ptr [[ARRAY:%.*]], i64 [[I_I64]]
+; CHECK-NEXT: [[ARRAY_I:%.*]] = load i32, ptr [[ARRAY_I_PTR]], align 4
+; CHECK-NEXT: store i32 0, ptr [[ARRAY_I_PTR]], align 4
; CHECK-NEXT: [[LOOP_ACC_NEXT]] = add i32 [[LOOP_ACC]], [[ARRAY_I]]
; CHECK-NEXT: [[I_NEXT]] = add nuw i32 [[I]], 1
; CHECK-NEXT: [[CONTINUE:%.*]] = icmp ult i32 [[I_NEXT]], [[N]]
@@ -739,9 +739,9 @@ loop:
guarded:
%i.i64 = zext i32 %i to i64
- %array.i.ptr = getelementptr inbounds i32, i32* %array, i64 %i.i64
- %array.i = load i32, i32* %array.i.ptr, align 4
- store i32 0, i32* %array.i.ptr
+ %array.i.ptr = getelementptr inbounds i32, ptr %array, i64 %i.i64
+ %array.i = load i32, ptr %array.i.ptr, align 4
+ store i32 0, ptr %array.i.ptr
%loop.acc.next = add i32 %loop.acc, %array.i
%i.next = add nuw i32 %i, 1
%continue = icmp ult i32 %i.next, %n
@@ -752,7 +752,7 @@ exit:
ret i32 %result
}
-define i32 @swapped_wb(i32* %array, i32 %length, i32 %n, i1 %cond_0) {
+define i32 @swapped_wb(ptr %array, i32 %length, i32 %n, i1 %cond_0) {
; CHECK-LABEL: @swapped_wb(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[WIDENABLE_COND:%.*]] = call i1 @llvm.experimental.widenable.condition()
@@ -781,9 +781,9 @@ define i32 @swapped_wb(i32* %array, i32 %length, i32 %n, i1 %cond_0) {
; CHECK-NEXT: ret i32 [[DEOPTRET2]]
; CHECK: guarded:
; CHECK-NEXT: [[I_I64:%.*]] = zext i32 [[I]] to i64
-; CHECK-NEXT: [[ARRAY_I_PTR:%.*]] = getelementptr inbounds i32, i32* [[ARRAY:%.*]], i64 [[I_I64]]
-; CHECK-NEXT: [[ARRAY_I:%.*]] = load i32, i32* [[ARRAY_I_PTR]], align 4
-; CHECK-NEXT: store i32 0, i32* [[ARRAY_I_PTR]], align 4
+; CHECK-NEXT: [[ARRAY_I_PTR:%.*]] = getelementptr inbounds i32, ptr [[ARRAY:%.*]], i64 [[I_I64]]
+; CHECK-NEXT: [[ARRAY_I:%.*]] = load i32, ptr [[ARRAY_I_PTR]], align 4
+; CHECK-NEXT: store i32 0, ptr [[ARRAY_I_PTR]], align 4
; CHECK-NEXT: [[LOOP_ACC_NEXT]] = add i32 [[LOOP_ACC]], [[ARRAY_I]]
; CHECK-NEXT: [[I_NEXT]] = add nuw i32 [[I]], 1
; CHECK-NEXT: [[CONTINUE:%.*]] = icmp ult i32 [[I_NEXT]], [[N]]
@@ -818,9 +818,9 @@ deopt2:
guarded:
%i.i64 = zext i32 %i to i64
- %array.i.ptr = getelementptr inbounds i32, i32* %array, i64 %i.i64
- %array.i = load i32, i32* %array.i.ptr, align 4
- store i32 0, i32* %array.i.ptr
+ %array.i.ptr = getelementptr inbounds i32, ptr %array, i64 %i.i64
+ %array.i = load i32, ptr %array.i.ptr, align 4
+ store i32 0, ptr %array.i.ptr
%loop.acc.next = add i32 %loop.acc, %array.i
%i.next = add nuw i32 %i, 1
%continue = icmp ult i32 %i.next, %n
@@ -831,7 +831,7 @@ exit:
ret i32 %result
}
-define i32 @trivial_wb(i32* %array, i32 %length, i32 %n) {
+define i32 @trivial_wb(ptr %array, i32 %length, i32 %n) {
; CHECK-LABEL: @trivial_wb(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[UMAX:%.*]] = call i32 @llvm.umax.i32(i32 [[N:%.*]], i32 1)
@@ -859,9 +859,9 @@ define i32 @trivial_wb(i32* %array, i32 %length, i32 %n) {
; CHECK-NEXT: ret i32 [[DEOPTRET2]]
; CHECK: guarded:
; CHECK-NEXT: [[I_I64:%.*]] = zext i32 [[I]] to i64
-; CHECK-NEXT: [[ARRAY_I_PTR:%.*]] = getelementptr inbounds i32, i32* [[ARRAY:%.*]], i64 [[I_I64]]
-; CHECK-NEXT: [[ARRAY_I:%.*]] = load i32, i32* [[ARRAY_I_PTR]], align 4
-; CHECK-NEXT: store i32 0, i32* [[ARRAY_I_PTR]], align 4
+; CHECK-NEXT: [[ARRAY_I_PTR:%.*]] = getelementptr inbounds i32, ptr [[ARRAY:%.*]], i64 [[I_I64]]
+; CHECK-NEXT: [[ARRAY_I:%.*]] = load i32, ptr [[ARRAY_I_PTR]], align 4
+; CHECK-NEXT: store i32 0, ptr [[ARRAY_I_PTR]], align 4
; CHECK-NEXT: [[LOOP_ACC_NEXT]] = add i32 [[LOOP_ACC]], [[ARRAY_I]]
; CHECK-NEXT: [[I_NEXT]] = add nuw i32 [[I]], 1
; CHECK-NEXT: [[CONTINUE:%.*]] = icmp ult i32 [[I_NEXT]], [[N]]
@@ -895,9 +895,9 @@ deopt2:
guarded:
%i.i64 = zext i32 %i to i64
- %array.i.ptr = getelementptr inbounds i32, i32* %array, i64 %i.i64
- %array.i = load i32, i32* %array.i.ptr, align 4
- store i32 0, i32* %array.i.ptr
+ %array.i.ptr = getelementptr inbounds i32, ptr %array, i64 %i.i64
+ %array.i = load i32, ptr %array.i.ptr, align 4
+ store i32 0, ptr %array.i.ptr
%loop.acc.next = add i32 %loop.acc, %array.i
%i.next = add nuw i32 %i, 1
%continue = icmp ult i32 %i.next, %n
@@ -910,7 +910,7 @@ exit:
; TODO: Non-latch exits can still be predicated
; This is currently prevented by an overly restrictive profitability check.
-define i32 @todo_unconditional_latch(i32* %array, i32 %length, i1 %cond_0) {
+define i32 @todo_unconditional_latch(ptr %array, i32 %length, i1 %cond_0) {
; CHECK-LABEL: @todo_unconditional_latch(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[WIDENABLE_COND:%.*]] = call i1 @llvm.experimental.widenable.condition()
@@ -933,9 +933,9 @@ define i32 @todo_unconditional_latch(i32* %array, i32 %length, i1 %cond_0) {
; CHECK-NEXT: ret i32 [[DEOPTRET2]]
; CHECK: guarded:
; CHECK-NEXT: [[I_I64:%.*]] = zext i32 [[I]] to i64
-; CHECK-NEXT: [[ARRAY_I_PTR:%.*]] = getelementptr inbounds i32, i32* [[ARRAY:%.*]], i64 [[I_I64]]
-; CHECK-NEXT: [[ARRAY_I:%.*]] = load i32, i32* [[ARRAY_I_PTR]], align 4
-; CHECK-NEXT: store i32 0, i32* [[ARRAY_I_PTR]], align 4
+; CHECK-NEXT: [[ARRAY_I_PTR:%.*]] = getelementptr inbounds i32, ptr [[ARRAY:%.*]], i64 [[I_I64]]
+; CHECK-NEXT: [[ARRAY_I:%.*]] = load i32, ptr [[ARRAY_I_PTR]], align 4
+; CHECK-NEXT: store i32 0, ptr [[ARRAY_I_PTR]], align 4
; CHECK-NEXT: [[LOOP_ACC_NEXT]] = add i32 [[LOOP_ACC]], [[ARRAY_I]]
; CHECK-NEXT: [[I_NEXT]] = add nuw i32 [[I]], 1
; CHECK-NEXT: br label [[LOOP]]
@@ -966,9 +966,9 @@ deopt2:
guarded:
%i.i64 = zext i32 %i to i64
- %array.i.ptr = getelementptr inbounds i32, i32* %array, i64 %i.i64
- %array.i = load i32, i32* %array.i.ptr, align 4
- store i32 0, i32* %array.i.ptr
+ %array.i.ptr = getelementptr inbounds i32, ptr %array, i64 %i.i64
+ %array.i = load i32, ptr %array.i.ptr, align 4
+ store i32 0, ptr %array.i.ptr
%loop.acc.next = add i32 %loop.acc, %array.i
%i.next = add nuw i32 %i, 1
br label %loop
@@ -978,7 +978,7 @@ guarded:
; If we have a stray widenable branch in the loop, we should still be able to
; run. This can happen when unswitching's cost model avoids unswitching some
; branches.
-define i32 @wb_in_loop(i32* %array, i32 %length, i32 %n, i1 %cond_0) {
+define i32 @wb_in_loop(ptr %array, i32 %length, i32 %n, i1 %cond_0) {
; CHECK-LABEL: @wb_in_loop(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[WIDENABLE_COND:%.*]] = call i1 @llvm.experimental.widenable.condition()
@@ -1020,9 +1020,9 @@ define i32 @wb_in_loop(i32* %array, i32 %length, i32 %n, i1 %cond_0) {
; CHECK-NEXT: ret i32 [[DEOPTRET3]]
; CHECK: guarded2:
; CHECK-NEXT: [[I_I64:%.*]] = zext i32 [[I]] to i64
-; CHECK-NEXT: [[ARRAY_I_PTR:%.*]] = getelementptr inbounds i32, i32* [[ARRAY:%.*]], i64 [[I_I64]]
-; CHECK-NEXT: [[ARRAY_I:%.*]] = load i32, i32* [[ARRAY_I_PTR]], align 4
-; CHECK-NEXT: store i32 0, i32* [[ARRAY_I_PTR]], align 4
+; CHECK-NEXT: [[ARRAY_I_PTR:%.*]] = getelementptr inbounds i32, ptr [[ARRAY:%.*]], i64 [[I_I64]]
+; CHECK-NEXT: [[ARRAY_I:%.*]] = load i32, ptr [[ARRAY_I_PTR]], align 4
+; CHECK-NEXT: store i32 0, ptr [[ARRAY_I_PTR]], align 4
; CHECK-NEXT: [[LOOP_ACC_NEXT]] = add i32 [[LOOP_ACC]], [[ARRAY_I]]
; CHECK-NEXT: [[I_NEXT]] = add nuw i32 [[I]], 1
; CHECK-NEXT: [[CONTINUE:%.*]] = icmp ult i32 [[I_NEXT]], [[N]]
@@ -1069,9 +1069,9 @@ deopt3:
guarded2:
%i.i64 = zext i32 %i to i64
- %array.i.ptr = getelementptr inbounds i32, i32* %array, i64 %i.i64
- %array.i = load i32, i32* %array.i.ptr, align 4
- store i32 0, i32* %array.i.ptr
+ %array.i.ptr = getelementptr inbounds i32, ptr %array, i64 %i.i64
+ %array.i = load i32, ptr %array.i.ptr, align 4
+ store i32 0, ptr %array.i.ptr
%loop.acc.next = add i32 %loop.acc, %array.i
%i.next = add nuw i32 %i, 1
%continue = icmp ult i32 %i.next, %n
diff --git a/llvm/test/Transforms/LoopPredication/preserve-bpi.ll b/llvm/test/Transforms/LoopPredication/preserve-bpi.ll
index f87184cfdbdb4..7fbb19783f464 100644
--- a/llvm/test/Transforms/LoopPredication/preserve-bpi.ll
+++ b/llvm/test/Transforms/LoopPredication/preserve-bpi.ll
@@ -16,7 +16,7 @@ declare void @llvm.experimental.guard(i1, ...)
; CHECK-NEXT: Running pass: SimpleLoopUnswitchPass on loop
; CHECK-NEXT: Running pass: LoopSimplifyCFGPass on loop
-define i32 @unsigned_loop_0_to_n_ult_check(i32* %array, i32 %length, i32 %n) {
+define i32 @unsigned_loop_0_to_n_ult_check(ptr %array, i32 %length, i32 %n) {
entry:
%tmp5 = icmp eq i32 %n, 0
br i1 %tmp5, label %exit, label %loop.preheader
@@ -38,8 +38,8 @@ deopt: ; preds = %loop
guarded: ; preds = %loop
%i.i64 = zext i32 %i to i64
- %array.i.ptr = getelementptr inbounds i32, i32* %array, i64 %i.i64
- %array.i = load i32, i32* %array.i.ptr, align 4
+ %array.i.ptr = getelementptr inbounds i32, ptr %array, i64 %i.i64
+ %array.i = load i32, ptr %array.i.ptr, align 4
%loop.acc.next = add i32 %loop.acc, %array.i
%i.next = add nuw i32 %i, 1
%continue = icmp ult i32 %i.next, %n
diff --git a/llvm/test/Transforms/LoopPredication/profitability.ll b/llvm/test/Transforms/LoopPredication/profitability.ll
index c9c93977c96de..0a61ceeceee77 100644
--- a/llvm/test/Transforms/LoopPredication/profitability.ll
+++ b/llvm/test/Transforms/LoopPredication/profitability.ll
@@ -6,14 +6,14 @@
; check.
; LatchExitProbability: 0x04000000 / 0x80000000 = 3.12%
; ExitingBlockProbability: 0x7ffa572a / 0x80000000 = 99.98%
-define i64 @donot_predicate(i64* nocapture readonly %arg, i32 %length, i64* nocapture readonly %arg2, i64* nocapture readonly %n_addr, i64 %i) !prof !21 {
+define i64 @donot_predicate(ptr nocapture readonly %arg, i32 %length, ptr nocapture readonly %arg2, ptr nocapture readonly %n_addr, i64 %i) !prof !21 {
; CHECK-LABEL: @donot_predicate(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[LENGTH_EXT:%.*]] = zext i32 [[LENGTH:%.*]] to i64
-; CHECK-NEXT: [[N_PRE:%.*]] = load i64, i64* [[N_ADDR:%.*]], align 4
+; CHECK-NEXT: [[N_PRE:%.*]] = load i64, ptr [[N_ADDR:%.*]], align 4
; CHECK-NEXT: br label [[HEADER:%.*]]
; CHECK: Header:
-; CHECK-NEXT: [[RESULT_IN3:%.*]] = phi i64* [ [[ARG2:%.*]], [[ENTRY:%.*]] ], [ [[ARG:%.*]], [[LATCH:%.*]] ]
+; CHECK-NEXT: [[RESULT_IN3:%.*]] = phi ptr [ [[ARG2:%.*]], [[ENTRY:%.*]] ], [ [[ARG:%.*]], [[LATCH:%.*]] ]
; CHECK-NEXT: [[J2:%.*]] = phi i64 [ 0, [[ENTRY]] ], [ [[J_NEXT:%.*]], [[LATCH]] ]
; CHECK-NEXT: [[WITHIN_BOUNDS:%.*]] = icmp ult i64 [[J2]], [[LENGTH_EXT]]
; CHECK-NEXT: call void (i1, ...) @llvm.experimental.guard(i1 [[WITHIN_BOUNDS]], i32 9) [ "deopt"() ]
@@ -27,17 +27,17 @@ define i64 @donot_predicate(i64* nocapture readonly %arg, i32 %length, i64* noca
; CHECK-NEXT: [[COUNTED_SPECULATION_FAILED:%.*]] = call i64 (...) @llvm.experimental.deoptimize.i64(i64 30) [ "deopt"(i32 0) ]
; CHECK-NEXT: ret i64 [[COUNTED_SPECULATION_FAILED]]
; CHECK: exit:
-; CHECK-NEXT: [[RESULT_IN3_LCSSA:%.*]] = phi i64* [ [[RESULT_IN3]], [[HEADER]] ]
-; CHECK-NEXT: [[RESULT_LE:%.*]] = load i64, i64* [[RESULT_IN3_LCSSA]], align 8
+; CHECK-NEXT: [[RESULT_IN3_LCSSA:%.*]] = phi ptr [ [[RESULT_IN3]], [[HEADER]] ]
+; CHECK-NEXT: [[RESULT_LE:%.*]] = load i64, ptr [[RESULT_IN3_LCSSA]], align 8
; CHECK-NEXT: ret i64 [[RESULT_LE]]
;
entry:
%length.ext = zext i32 %length to i64
- %n.pre = load i64, i64* %n_addr, align 4
+ %n.pre = load i64, ptr %n_addr, align 4
br label %Header
Header: ; preds = %entry, %Latch
- %result.in3 = phi i64* [ %arg2, %entry ], [ %arg, %Latch ]
+ %result.in3 = phi ptr [ %arg2, %entry ], [ %arg, %Latch ]
%j2 = phi i64 [ 0, %entry ], [ %j.next, %Latch ]
%within.bounds = icmp ult i64 %j2, %length.ext
call void (i1, ...) @llvm.experimental.guard(i1 %within.bounds, i32 9) [ "deopt"() ]
@@ -54,25 +54,25 @@ deopt: ; preds = %Latch
ret i64 %counted_speculation_failed
exit: ; preds = %Header
- %result.in3.lcssa = phi i64* [ %result.in3, %Header ]
- %result.le = load i64, i64* %result.in3.lcssa, align 8
+ %result.in3.lcssa = phi ptr [ %result.in3, %Header ]
+ %result.le = load i64, ptr %result.in3.lcssa, align 8
ret i64 %result.le
}
!0 = !{!"branch_weights", i32 18, i32 104200}
; predicate loop since there's no profile information and BPI concluded all
; exiting blocks have same probability of exiting from loop.
-define i64 @predicate(i64* nocapture readonly %arg, i32 %length, i64* nocapture readonly %arg2, i64* nocapture readonly %n_addr, i64 %i) !prof !21 {
+define i64 @predicate(ptr nocapture readonly %arg, i32 %length, ptr nocapture readonly %arg2, ptr nocapture readonly %n_addr, i64 %i) !prof !21 {
; CHECK-LABEL: @predicate(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[LENGTH_EXT:%.*]] = zext i32 [[LENGTH:%.*]] to i64
-; CHECK-NEXT: [[N_PRE:%.*]] = load i64, i64* [[N_ADDR:%.*]], align 4
+; CHECK-NEXT: [[N_PRE:%.*]] = load i64, ptr [[N_ADDR:%.*]], align 4
; CHECK-NEXT: [[TMP0:%.*]] = icmp ule i64 1048576, [[LENGTH_EXT]]
; CHECK-NEXT: [[TMP1:%.*]] = icmp ult i64 0, [[LENGTH_EXT]]
; CHECK-NEXT: [[TMP2:%.*]] = and i1 [[TMP1]], [[TMP0]]
; CHECK-NEXT: br label [[HEADER:%.*]]
; CHECK: Header:
-; CHECK-NEXT: [[RESULT_IN3:%.*]] = phi i64* [ [[ARG2:%.*]], [[ENTRY:%.*]] ], [ [[ARG:%.*]], [[LATCH:%.*]] ]
+; CHECK-NEXT: [[RESULT_IN3:%.*]] = phi ptr [ [[ARG2:%.*]], [[ENTRY:%.*]] ], [ [[ARG:%.*]], [[LATCH:%.*]] ]
; CHECK-NEXT: [[J2:%.*]] = phi i64 [ 0, [[ENTRY]] ], [ [[J_NEXT:%.*]], [[LATCH]] ]
; CHECK-NEXT: [[WITHIN_BOUNDS:%.*]] = icmp ult i64 [[J2]], [[LENGTH_EXT]]
; CHECK-NEXT: call void (i1, ...) @llvm.experimental.guard(i1 [[TMP2]], i32 9) [ "deopt"() ]
@@ -86,17 +86,17 @@ define i64 @predicate(i64* nocapture readonly %arg, i32 %length, i64* nocapture
; CHECK: exitLatch:
; CHECK-NEXT: ret i64 1
; CHECK: exit:
-; CHECK-NEXT: [[RESULT_IN3_LCSSA:%.*]] = phi i64* [ [[RESULT_IN3]], [[HEADER]] ]
-; CHECK-NEXT: [[RESULT_LE:%.*]] = load i64, i64* [[RESULT_IN3_LCSSA]], align 8
+; CHECK-NEXT: [[RESULT_IN3_LCSSA:%.*]] = phi ptr [ [[RESULT_IN3]], [[HEADER]] ]
+; CHECK-NEXT: [[RESULT_LE:%.*]] = load i64, ptr [[RESULT_IN3_LCSSA]], align 8
; CHECK-NEXT: ret i64 [[RESULT_LE]]
;
entry:
%length.ext = zext i32 %length to i64
- %n.pre = load i64, i64* %n_addr, align 4
+ %n.pre = load i64, ptr %n_addr, align 4
br label %Header
Header: ; preds = %entry, %Latch
- %result.in3 = phi i64* [ %arg2, %entry ], [ %arg, %Latch ]
+ %result.in3 = phi ptr [ %arg2, %entry ], [ %arg, %Latch ]
%j2 = phi i64 [ 0, %entry ], [ %j.next, %Latch ]
%within.bounds = icmp ult i64 %j2, %length.ext
call void (i1, ...) @llvm.experimental.guard(i1 %within.bounds, i32 9) [ "deopt"() ]
@@ -112,8 +112,8 @@ exitLatch: ; preds = %Latch
ret i64 1
exit: ; preds = %Header
- %result.in3.lcssa = phi i64* [ %result.in3, %Header ]
- %result.le = load i64, i64* %result.in3.lcssa, align 8
+ %result.in3.lcssa = phi ptr [ %result.in3, %Header ]
+ %result.le = load i64, ptr %result.in3.lcssa, align 8
ret i64 %result.le
}
@@ -121,14 +121,14 @@ exit: ; preds = %Header
; the loop is the header exiting block (not the latch block). So do not predicate.
; LatchExitProbability: 0x000020e1 / 0x80000000 = 0.00%
; ExitingBlockProbability: 0x7ffcbb86 / 0x80000000 = 99.99%
-define i64 @donot_predicate_prof(i64* nocapture readonly %arg, i32 %length, i64* nocapture readonly %arg2, i64* nocapture readonly %n_addr, i64 %i) !prof !21 {
+define i64 @donot_predicate_prof(ptr nocapture readonly %arg, i32 %length, ptr nocapture readonly %arg2, ptr nocapture readonly %n_addr, i64 %i) !prof !21 {
; CHECK-LABEL: @donot_predicate_prof(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[LENGTH_EXT:%.*]] = zext i32 [[LENGTH:%.*]] to i64
-; CHECK-NEXT: [[N_PRE:%.*]] = load i64, i64* [[N_ADDR:%.*]], align 4
+; CHECK-NEXT: [[N_PRE:%.*]] = load i64, ptr [[N_ADDR:%.*]], align 4
; CHECK-NEXT: br label [[HEADER:%.*]]
; CHECK: Header:
-; CHECK-NEXT: [[RESULT_IN3:%.*]] = phi i64* [ [[ARG2:%.*]], [[ENTRY:%.*]] ], [ [[ARG:%.*]], [[LATCH:%.*]] ]
+; CHECK-NEXT: [[RESULT_IN3:%.*]] = phi ptr [ [[ARG2:%.*]], [[ENTRY:%.*]] ], [ [[ARG:%.*]], [[LATCH:%.*]] ]
; CHECK-NEXT: [[J2:%.*]] = phi i64 [ 0, [[ENTRY]] ], [ [[J_NEXT:%.*]], [[LATCH]] ]
; CHECK-NEXT: [[WITHIN_BOUNDS:%.*]] = icmp ult i64 [[J2]], [[LENGTH_EXT]]
; CHECK-NEXT: call void (i1, ...) @llvm.experimental.guard(i1 [[WITHIN_BOUNDS]], i32 9) [ "deopt"() ]
@@ -141,17 +141,17 @@ define i64 @donot_predicate_prof(i64* nocapture readonly %arg, i32 %length, i64*
; CHECK: exitLatch:
; CHECK-NEXT: ret i64 1
; CHECK: exit:
-; CHECK-NEXT: [[RESULT_IN3_LCSSA:%.*]] = phi i64* [ [[RESULT_IN3]], [[HEADER]] ]
-; CHECK-NEXT: [[RESULT_LE:%.*]] = load i64, i64* [[RESULT_IN3_LCSSA]], align 8
+; CHECK-NEXT: [[RESULT_IN3_LCSSA:%.*]] = phi ptr [ [[RESULT_IN3]], [[HEADER]] ]
+; CHECK-NEXT: [[RESULT_LE:%.*]] = load i64, ptr [[RESULT_IN3_LCSSA]], align 8
; CHECK-NEXT: ret i64 [[RESULT_LE]]
;
entry:
%length.ext = zext i32 %length to i64
- %n.pre = load i64, i64* %n_addr, align 4
+ %n.pre = load i64, ptr %n_addr, align 4
br label %Header
Header: ; preds = %entry, %Latch
- %result.in3 = phi i64* [ %arg2, %entry ], [ %arg, %Latch ]
+ %result.in3 = phi ptr [ %arg2, %entry ], [ %arg, %Latch ]
%j2 = phi i64 [ 0, %entry ], [ %j.next, %Latch ]
%within.bounds = icmp ult i64 %j2, %length.ext
call void (i1, ...) @llvm.experimental.guard(i1 %within.bounds, i32 9) [ "deopt"() ]
@@ -167,8 +167,8 @@ exitLatch: ; preds = %Latch
ret i64 1
exit: ; preds = %Header
- %result.in3.lcssa = phi i64* [ %result.in3, %Header ]
- %result.le = load i64, i64* %result.in3.lcssa, align 8
+ %result.in3.lcssa = phi ptr [ %result.in3, %Header ]
+ %result.le = load i64, ptr %result.in3.lcssa, align 8
ret i64 %result.le
}
declare i64 @llvm.experimental.deoptimize.i64(...)
diff --git a/llvm/test/Transforms/LoopPredication/reverse.ll b/llvm/test/Transforms/LoopPredication/reverse.ll
index 105b9056ed132..968069ccc0f5d 100644
--- a/llvm/test/Transforms/LoopPredication/reverse.ll
+++ b/llvm/test/Transforms/LoopPredication/reverse.ll
@@ -4,7 +4,7 @@
declare void @llvm.experimental.guard(i1, ...)
-define i32 @signed_reverse_loop_n_to_lower_limit(i32* %array, i32 %length, i32 %n, i32 %lowerlimit) {
+define i32 @signed_reverse_loop_n_to_lower_limit(ptr %array, i32 %length, i32 %n, i32 %lowerlimit) {
; CHECK-LABEL: @signed_reverse_loop_n_to_lower_limit(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP5:%.*]] = icmp eq i32 [[N:%.*]], 0
@@ -23,8 +23,8 @@ define i32 @signed_reverse_loop_n_to_lower_limit(i32* %array, i32 %length, i32 %
; CHECK-NEXT: call void (i1, ...) @llvm.experimental.guard(i1 [[TMP3]], i32 9) [ "deopt"() ]
; CHECK-NEXT: call void @llvm.assume(i1 [[WITHIN_BOUNDS]])
; CHECK-NEXT: [[I_I64:%.*]] = zext i32 [[I_NEXT]] to i64
-; CHECK-NEXT: [[ARRAY_I_PTR:%.*]] = getelementptr inbounds i32, i32* [[ARRAY:%.*]], i64 [[I_I64]]
-; CHECK-NEXT: [[ARRAY_I:%.*]] = load i32, i32* [[ARRAY_I_PTR]], align 4
+; CHECK-NEXT: [[ARRAY_I_PTR:%.*]] = getelementptr inbounds i32, ptr [[ARRAY:%.*]], i64 [[I_I64]]
+; CHECK-NEXT: [[ARRAY_I:%.*]] = load i32, ptr [[ARRAY_I_PTR]], align 4
; CHECK-NEXT: [[LOOP_ACC_NEXT]] = add i32 [[LOOP_ACC]], [[ARRAY_I]]
; CHECK-NEXT: [[CONTINUE:%.*]] = icmp sgt i32 [[I]], [[LOWERLIMIT]]
; CHECK-NEXT: br i1 [[CONTINUE]], label [[LOOP]], label [[EXIT_LOOPEXIT:%.*]]
@@ -49,8 +49,8 @@ loop:
%within.bounds = icmp ult i32 %i.next, %length
call void (i1, ...) @llvm.experimental.guard(i1 %within.bounds, i32 9) [ "deopt"() ]
%i.i64 = zext i32 %i.next to i64
- %array.i.ptr = getelementptr inbounds i32, i32* %array, i64 %i.i64
- %array.i = load i32, i32* %array.i.ptr, align 4
+ %array.i.ptr = getelementptr inbounds i32, ptr %array, i64 %i.i64
+ %array.i = load i32, ptr %array.i.ptr, align 4
%loop.acc.next = add i32 %loop.acc, %array.i
%continue = icmp sgt i32 %i, %lowerlimit
br i1 %continue, label %loop, label %exit
@@ -60,7 +60,7 @@ exit:
ret i32 %result
}
-define i32 @unsigned_reverse_loop_n_to_lower_limit(i32* %array, i32 %length, i32 %n, i32 %lowerlimit) {
+define i32 @unsigned_reverse_loop_n_to_lower_limit(ptr %array, i32 %length, i32 %n, i32 %lowerlimit) {
; CHECK-LABEL: @unsigned_reverse_loop_n_to_lower_limit(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP5:%.*]] = icmp eq i32 [[N:%.*]], 0
@@ -79,8 +79,8 @@ define i32 @unsigned_reverse_loop_n_to_lower_limit(i32* %array, i32 %length, i32
; CHECK-NEXT: call void (i1, ...) @llvm.experimental.guard(i1 [[TMP3]], i32 9) [ "deopt"() ]
; CHECK-NEXT: call void @llvm.assume(i1 [[WITHIN_BOUNDS]])
; CHECK-NEXT: [[I_I64:%.*]] = zext i32 [[I_NEXT]] to i64
-; CHECK-NEXT: [[ARRAY_I_PTR:%.*]] = getelementptr inbounds i32, i32* [[ARRAY:%.*]], i64 [[I_I64]]
-; CHECK-NEXT: [[ARRAY_I:%.*]] = load i32, i32* [[ARRAY_I_PTR]], align 4
+; CHECK-NEXT: [[ARRAY_I_PTR:%.*]] = getelementptr inbounds i32, ptr [[ARRAY:%.*]], i64 [[I_I64]]
+; CHECK-NEXT: [[ARRAY_I:%.*]] = load i32, ptr [[ARRAY_I_PTR]], align 4
; CHECK-NEXT: [[LOOP_ACC_NEXT]] = add i32 [[LOOP_ACC]], [[ARRAY_I]]
; CHECK-NEXT: [[CONTINUE:%.*]] = icmp ugt i32 [[I]], [[LOWERLIMIT]]
; CHECK-NEXT: br i1 [[CONTINUE]], label [[LOOP]], label [[EXIT_LOOPEXIT:%.*]]
@@ -105,8 +105,8 @@ loop:
%within.bounds = icmp ult i32 %i.next, %length
call void (i1, ...) @llvm.experimental.guard(i1 %within.bounds, i32 9) [ "deopt"() ]
%i.i64 = zext i32 %i.next to i64
- %array.i.ptr = getelementptr inbounds i32, i32* %array, i64 %i.i64
- %array.i = load i32, i32* %array.i.ptr, align 4
+ %array.i.ptr = getelementptr inbounds i32, ptr %array, i64 %i.i64
+ %array.i = load i32, ptr %array.i.ptr, align 4
%loop.acc.next = add i32 %loop.acc, %array.i
%continue = icmp ugt i32 %i, %lowerlimit
br i1 %continue, label %loop, label %exit
@@ -119,7 +119,7 @@ exit:
; if we predicated the loop, the guard will definitely fail and we will
; deoptimize early on.
-define i32 @unsigned_reverse_loop_n_to_0(i32* %array, i32 %length, i32 %n, i32 %lowerlimit) {
+define i32 @unsigned_reverse_loop_n_to_0(ptr %array, i32 %length, i32 %n, i32 %lowerlimit) {
; CHECK-LABEL: @unsigned_reverse_loop_n_to_0(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP5:%.*]] = icmp eq i32 [[N:%.*]], 0
@@ -137,8 +137,8 @@ define i32 @unsigned_reverse_loop_n_to_0(i32* %array, i32 %length, i32 %n, i32 %
; CHECK-NEXT: call void (i1, ...) @llvm.experimental.guard(i1 [[TMP2]], i32 9) [ "deopt"() ]
; CHECK-NEXT: call void @llvm.assume(i1 [[WITHIN_BOUNDS]])
; CHECK-NEXT: [[I_I64:%.*]] = zext i32 [[I_NEXT]] to i64
-; CHECK-NEXT: [[ARRAY_I_PTR:%.*]] = getelementptr inbounds i32, i32* [[ARRAY:%.*]], i64 [[I_I64]]
-; CHECK-NEXT: [[ARRAY_I:%.*]] = load i32, i32* [[ARRAY_I_PTR]], align 4
+; CHECK-NEXT: [[ARRAY_I_PTR:%.*]] = getelementptr inbounds i32, ptr [[ARRAY:%.*]], i64 [[I_I64]]
+; CHECK-NEXT: [[ARRAY_I:%.*]] = load i32, ptr [[ARRAY_I_PTR]], align 4
; CHECK-NEXT: [[LOOP_ACC_NEXT]] = add i32 [[LOOP_ACC]], [[ARRAY_I]]
; CHECK-NEXT: [[CONTINUE:%.*]] = icmp ugt i32 [[I]], 0
; CHECK-NEXT: br i1 [[CONTINUE]], label [[LOOP]], label [[EXIT_LOOPEXIT:%.*]]
@@ -163,8 +163,8 @@ loop:
%within.bounds = icmp ult i32 %i.next, %length
call void (i1, ...) @llvm.experimental.guard(i1 %within.bounds, i32 9) [ "deopt"() ]
%i.i64 = zext i32 %i.next to i64
- %array.i.ptr = getelementptr inbounds i32, i32* %array, i64 %i.i64
- %array.i = load i32, i32* %array.i.ptr, align 4
+ %array.i.ptr = getelementptr inbounds i32, ptr %array, i64 %i.i64
+ %array.i = load i32, ptr %array.i.ptr, align 4
%loop.acc.next = add i32 %loop.acc, %array.i
%continue = icmp ugt i32 %i, 0
br i1 %continue, label %loop, label %exit
@@ -175,7 +175,7 @@ exit:
}
; do not loop predicate when the range has step -1 and latch has step 1.
-define i32 @reverse_loop_range_step_increment(i32 %n, i32* %array, i32 %length) {
+define i32 @reverse_loop_range_step_increment(i32 %n, ptr %array, i32 %length) {
; CHECK-LABEL: @reverse_loop_range_step_increment(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP5:%.*]] = icmp eq i32 [[N:%.*]], 0
@@ -190,8 +190,8 @@ define i32 @reverse_loop_range_step_increment(i32 %n, i32* %array, i32 %length)
; CHECK-NEXT: [[WITHIN_BOUNDS:%.*]] = icmp ult i32 [[IRC]], [[LENGTH:%.*]]
; CHECK-NEXT: call void (i1, ...) @llvm.experimental.guard(i1 [[WITHIN_BOUNDS]], i32 9) [ "deopt"() ]
; CHECK-NEXT: [[I_I64:%.*]] = zext i32 [[IRC]] to i64
-; CHECK-NEXT: [[ARRAY_I_PTR:%.*]] = getelementptr inbounds i32, i32* [[ARRAY:%.*]], i64 [[I_I64]]
-; CHECK-NEXT: [[ARRAY_I:%.*]] = load i32, i32* [[ARRAY_I_PTR]], align 4
+; CHECK-NEXT: [[ARRAY_I_PTR:%.*]] = getelementptr inbounds i32, ptr [[ARRAY:%.*]], i64 [[I_I64]]
+; CHECK-NEXT: [[ARRAY_I:%.*]] = load i32, ptr [[ARRAY_I_PTR]], align 4
; CHECK-NEXT: [[I_NEXT]] = add nsw i32 [[I]], -1
; CHECK-NEXT: [[LOOP_ACC_NEXT]] = add i32 [[LOOP_ACC]], [[ARRAY_I]]
; CHECK-NEXT: [[CONTINUE:%.*]] = icmp ugt i32 [[I]], 65534
@@ -218,8 +218,8 @@ loop:
%within.bounds = icmp ult i32 %irc, %length
call void (i1, ...) @llvm.experimental.guard(i1 %within.bounds, i32 9) [ "deopt"() ]
%i.i64 = zext i32 %irc to i64
- %array.i.ptr = getelementptr inbounds i32, i32* %array, i64 %i.i64
- %array.i = load i32, i32* %array.i.ptr, align 4
+ %array.i.ptr = getelementptr inbounds i32, ptr %array, i64 %i.i64
+ %array.i = load i32, ptr %array.i.ptr, align 4
%i.next = add nsw i32 %i, -1
%loop.acc.next = add i32 %loop.acc, %array.i
%continue = icmp ugt i32 %i, 65534
@@ -230,7 +230,7 @@ exit:
ret i32 %result
}
-define i32 @signed_reverse_loop_n_to_lower_limit_equal(i32* %array, i32 %length, i32 %n, i32 %lowerlimit) {
+define i32 @signed_reverse_loop_n_to_lower_limit_equal(ptr %array, i32 %length, i32 %n, i32 %lowerlimit) {
; CHECK-LABEL: @signed_reverse_loop_n_to_lower_limit_equal(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP5:%.*]] = icmp eq i32 [[N:%.*]], 0
@@ -249,8 +249,8 @@ define i32 @signed_reverse_loop_n_to_lower_limit_equal(i32* %array, i32 %length,
; CHECK-NEXT: call void (i1, ...) @llvm.experimental.guard(i1 [[TMP3]], i32 9) [ "deopt"() ]
; CHECK-NEXT: call void @llvm.assume(i1 [[WITHIN_BOUNDS]])
; CHECK-NEXT: [[I_I64:%.*]] = zext i32 [[I_NEXT]] to i64
-; CHECK-NEXT: [[ARRAY_I_PTR:%.*]] = getelementptr inbounds i32, i32* [[ARRAY:%.*]], i64 [[I_I64]]
-; CHECK-NEXT: [[ARRAY_I:%.*]] = load i32, i32* [[ARRAY_I_PTR]], align 4
+; CHECK-NEXT: [[ARRAY_I_PTR:%.*]] = getelementptr inbounds i32, ptr [[ARRAY:%.*]], i64 [[I_I64]]
+; CHECK-NEXT: [[ARRAY_I:%.*]] = load i32, ptr [[ARRAY_I_PTR]], align 4
; CHECK-NEXT: [[LOOP_ACC_NEXT]] = add i32 [[LOOP_ACC]], [[ARRAY_I]]
; CHECK-NEXT: [[CONTINUE:%.*]] = icmp sge i32 [[I]], [[LOWERLIMIT]]
; CHECK-NEXT: br i1 [[CONTINUE]], label [[LOOP]], label [[EXIT_LOOPEXIT:%.*]]
@@ -275,8 +275,8 @@ loop:
%within.bounds = icmp ult i32 %i.next, %length
call void (i1, ...) @llvm.experimental.guard(i1 %within.bounds, i32 9) [ "deopt"() ]
%i.i64 = zext i32 %i.next to i64
- %array.i.ptr = getelementptr inbounds i32, i32* %array, i64 %i.i64
- %array.i = load i32, i32* %array.i.ptr, align 4
+ %array.i.ptr = getelementptr inbounds i32, ptr %array, i64 %i.i64
+ %array.i = load i32, ptr %array.i.ptr, align 4
%loop.acc.next = add i32 %loop.acc, %array.i
%continue = icmp sge i32 %i, %lowerlimit
br i1 %continue, label %loop, label %exit
@@ -286,7 +286,7 @@ exit:
ret i32 %result
}
-define i32 @unsigned_reverse_loop_n_to_lower_limit_equal(i32* %array, i32 %length, i32 %n, i32 %lowerlimit) {
+define i32 @unsigned_reverse_loop_n_to_lower_limit_equal(ptr %array, i32 %length, i32 %n, i32 %lowerlimit) {
; CHECK-LABEL: @unsigned_reverse_loop_n_to_lower_limit_equal(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP5:%.*]] = icmp eq i32 [[N:%.*]], 0
@@ -305,8 +305,8 @@ define i32 @unsigned_reverse_loop_n_to_lower_limit_equal(i32* %array, i32 %lengt
; CHECK-NEXT: call void (i1, ...) @llvm.experimental.guard(i1 [[TMP3]], i32 9) [ "deopt"() ]
; CHECK-NEXT: call void @llvm.assume(i1 [[WITHIN_BOUNDS]])
; CHECK-NEXT: [[I_I64:%.*]] = zext i32 [[I_NEXT]] to i64
-; CHECK-NEXT: [[ARRAY_I_PTR:%.*]] = getelementptr inbounds i32, i32* [[ARRAY:%.*]], i64 [[I_I64]]
-; CHECK-NEXT: [[ARRAY_I:%.*]] = load i32, i32* [[ARRAY_I_PTR]], align 4
+; CHECK-NEXT: [[ARRAY_I_PTR:%.*]] = getelementptr inbounds i32, ptr [[ARRAY:%.*]], i64 [[I_I64]]
+; CHECK-NEXT: [[ARRAY_I:%.*]] = load i32, ptr [[ARRAY_I_PTR]], align 4
; CHECK-NEXT: [[LOOP_ACC_NEXT]] = add i32 [[LOOP_ACC]], [[ARRAY_I]]
; CHECK-NEXT: [[CONTINUE:%.*]] = icmp uge i32 [[I]], [[LOWERLIMIT]]
; CHECK-NEXT: br i1 [[CONTINUE]], label [[LOOP]], label [[EXIT_LOOPEXIT:%.*]]
@@ -331,8 +331,8 @@ loop:
%within.bounds = icmp ult i32 %i.next, %length
call void (i1, ...) @llvm.experimental.guard(i1 %within.bounds, i32 9) [ "deopt"() ]
%i.i64 = zext i32 %i.next to i64
- %array.i.ptr = getelementptr inbounds i32, i32* %array, i64 %i.i64
- %array.i = load i32, i32* %array.i.ptr, align 4
+ %array.i.ptr = getelementptr inbounds i32, ptr %array, i64 %i.i64
+ %array.i = load i32, ptr %array.i.ptr, align 4
%loop.acc.next = add i32 %loop.acc, %array.i
%continue = icmp uge i32 %i, %lowerlimit
br i1 %continue, label %loop, label %exit
@@ -345,7 +345,7 @@ exit:
; if we predicated the loop, the guard will definitely fail and we will
; deoptimize early on.
-define i32 @unsigned_reverse_loop_n_to_1(i32* %array, i32 %length, i32 %n, i32 %lowerlimit) {
+define i32 @unsigned_reverse_loop_n_to_1(ptr %array, i32 %length, i32 %n, i32 %lowerlimit) {
; CHECK-LABEL: @unsigned_reverse_loop_n_to_1(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP5:%.*]] = icmp eq i32 [[N:%.*]], 0
@@ -363,8 +363,8 @@ define i32 @unsigned_reverse_loop_n_to_1(i32* %array, i32 %length, i32 %n, i32 %
; CHECK-NEXT: call void (i1, ...) @llvm.experimental.guard(i1 [[TMP2]], i32 9) [ "deopt"() ]
; CHECK-NEXT: call void @llvm.assume(i1 [[WITHIN_BOUNDS]])
; CHECK-NEXT: [[I_I64:%.*]] = zext i32 [[I_NEXT]] to i64
-; CHECK-NEXT: [[ARRAY_I_PTR:%.*]] = getelementptr inbounds i32, i32* [[ARRAY:%.*]], i64 [[I_I64]]
-; CHECK-NEXT: [[ARRAY_I:%.*]] = load i32, i32* [[ARRAY_I_PTR]], align 4
+; CHECK-NEXT: [[ARRAY_I_PTR:%.*]] = getelementptr inbounds i32, ptr [[ARRAY:%.*]], i64 [[I_I64]]
+; CHECK-NEXT: [[ARRAY_I:%.*]] = load i32, ptr [[ARRAY_I_PTR]], align 4
; CHECK-NEXT: [[LOOP_ACC_NEXT]] = add i32 [[LOOP_ACC]], [[ARRAY_I]]
; CHECK-NEXT: [[CONTINUE:%.*]] = icmp uge i32 [[I]], 1
; CHECK-NEXT: br i1 [[CONTINUE]], label [[LOOP]], label [[EXIT_LOOPEXIT:%.*]]
@@ -389,8 +389,8 @@ loop:
%within.bounds = icmp ult i32 %i.next, %length
call void (i1, ...) @llvm.experimental.guard(i1 %within.bounds, i32 9) [ "deopt"() ]
%i.i64 = zext i32 %i.next to i64
- %array.i.ptr = getelementptr inbounds i32, i32* %array, i64 %i.i64
- %array.i = load i32, i32* %array.i.ptr, align 4
+ %array.i.ptr = getelementptr inbounds i32, ptr %array, i64 %i.i64
+ %array.i = load i32, ptr %array.i.ptr, align 4
%loop.acc.next = add i32 %loop.acc, %array.i
%continue = icmp uge i32 %i, 1
br i1 %continue, label %loop, label %exit
diff --git a/llvm/test/Transforms/LoopPredication/unswitch-exit-loop.ll b/llvm/test/Transforms/LoopPredication/unswitch-exit-loop.ll
index ba9665e73b52e..36d5a97749f31 100644
--- a/llvm/test/Transforms/LoopPredication/unswitch-exit-loop.ll
+++ b/llvm/test/Transforms/LoopPredication/unswitch-exit-loop.ll
@@ -7,7 +7,7 @@
;; Error checking is rather silly here - it should pass compilation successfully,
;; in bad case it will just timeout.
;;
-define i32 @unswitch_exit_form_with_endless_loop(i32* %array, i32 %length, i32 %n, i1 %cond_0) {
+define i32 @unswitch_exit_form_with_endless_loop(ptr %array, i32 %length, i32 %n, i1 %cond_0) {
; CHECK-LABEL: @unswitch_exit_form_with_endless_loop
entry:
%widenable_cond = call i1 @llvm.experimental.widenable.condition()
@@ -33,9 +33,9 @@ loop:
guarded:
%i.i64 = zext i32 %i to i64
- %array.i.ptr = getelementptr inbounds i32, i32* %array, i64 %i.i64
- %array.i = load i32, i32* %array.i.ptr, align 4
- store i32 0, i32* %array.i.ptr
+ %array.i.ptr = getelementptr inbounds i32, ptr %array, i64 %i.i64
+ %array.i = load i32, ptr %array.i.ptr, align 4
+ store i32 0, ptr %array.i.ptr
%loop.acc.next = add i32 %loop.acc, %array.i
%i.next = add nuw i32 %i, 1
%continue = icmp ult i32 %i.next, %n
diff --git a/llvm/test/Transforms/LoopPredication/visited.ll b/llvm/test/Transforms/LoopPredication/visited.ll
index 2c9f9051afd49..68471e75d01a6 100644
--- a/llvm/test/Transforms/LoopPredication/visited.ll
+++ b/llvm/test/Transforms/LoopPredication/visited.ll
@@ -4,7 +4,7 @@
declare void @llvm.experimental.guard(i1, ...)
-define i32 @test_visited(i32* %array, i32 %length, i32 %n, i32 %x) {
+define i32 @test_visited(ptr %array, i32 %length, i32 %n, i32 %x) {
; CHECK-LABEL: @test_visited(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP5:%.*]] = icmp eq i32 [[N:%.*]], 0
@@ -121,8 +121,8 @@ define i32 @test_visited(i32* %array, i32 %length, i32 %n, i32 %x) {
; CHECK-NEXT: call void (i1, ...) @llvm.experimental.guard(i1 [[TMP3]], i32 9) [ "deopt"() ]
; CHECK-NEXT: call void @llvm.assume(i1 [[GUARD_COND_99]])
; CHECK-NEXT: [[I_I64:%.*]] = zext i32 [[I]] to i64
-; CHECK-NEXT: [[ARRAY_I_PTR:%.*]] = getelementptr inbounds i32, i32* [[ARRAY:%.*]], i64 [[I_I64]]
-; CHECK-NEXT: [[ARRAY_I:%.*]] = load i32, i32* [[ARRAY_I_PTR]], align 4
+; CHECK-NEXT: [[ARRAY_I_PTR:%.*]] = getelementptr inbounds i32, ptr [[ARRAY:%.*]], i64 [[I_I64]]
+; CHECK-NEXT: [[ARRAY_I:%.*]] = load i32, ptr [[ARRAY_I_PTR]], align 4
; CHECK-NEXT: [[LOOP_ACC_NEXT]] = add i32 [[LOOP_ACC]], [[ARRAY_I]]
; CHECK-NEXT: [[I_NEXT]] = add nuw i32 [[I]], 1
; CHECK-NEXT: [[CONTINUE:%.*]] = icmp ult i32 [[I_NEXT]], [[N]]
@@ -247,8 +247,8 @@ loop:
call void (i1, ...) @llvm.experimental.guard(i1 %guard.cond.99, i32 9) [ "deopt"() ]
%i.i64 = zext i32 %i to i64
- %array.i.ptr = getelementptr inbounds i32, i32* %array, i64 %i.i64
- %array.i = load i32, i32* %array.i.ptr, align 4
+ %array.i.ptr = getelementptr inbounds i32, ptr %array, i64 %i.i64
+ %array.i = load i32, ptr %array.i.ptr, align 4
%loop.acc.next = add i32 %loop.acc, %array.i
%i.next = add nuw i32 %i, 1
diff --git a/llvm/test/Transforms/LoopPredication/widened.ll b/llvm/test/Transforms/LoopPredication/widened.ll
index 3cf37d1f2d3fe..a6f773ddec3a4 100644
--- a/llvm/test/Transforms/LoopPredication/widened.ll
+++ b/llvm/test/Transforms/LoopPredication/widened.ll
@@ -3,16 +3,16 @@
; RUN: opt -S -passes='require<scalar-evolution>,loop-mssa(loop-predication)' -verify-memoryssa < %s 2>&1 | FileCheck %s
declare void @llvm.experimental.guard(i1, ...)
-declare i32 @length(i8*)
+declare i32 @length(ptr)
-declare i16 @short_length(i8*)
+declare i16 @short_length(ptr)
; Consider range check of type i16 and i32, while IV is of type i64
; We can loop predicate this because the IV range is within i16 and within i32.
-define i64 @iv_wider_type_rc_two_narrow_types(i32 %offA, i16 %offB, i8* %arrA, i8* %arrB) {
+define i64 @iv_wider_type_rc_two_narrow_types(i32 %offA, i16 %offB, ptr %arrA, ptr %arrB) {
; CHECK-LABEL: @iv_wider_type_rc_two_narrow_types(
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[LENGTHA:%.*]] = call i32 @length(i8* [[ARRA:%.*]])
-; CHECK-NEXT: [[LENGTHB:%.*]] = call i16 @short_length(i8* [[ARRB:%.*]])
+; CHECK-NEXT: [[LENGTHA:%.*]] = call i32 @length(ptr [[ARRA:%.*]])
+; CHECK-NEXT: [[LENGTHB:%.*]] = call i16 @short_length(ptr [[ARRB:%.*]])
; CHECK-NEXT: [[TMP0:%.*]] = sub i16 [[LENGTHB]], [[OFFB:%.*]]
; CHECK-NEXT: [[TMP1:%.*]] = icmp ule i16 16, [[TMP0]]
; CHECK-NEXT: [[TMP2:%.*]] = icmp ult i16 [[OFFB]], [[LENGTHB]]
@@ -35,11 +35,11 @@ define i64 @iv_wider_type_rc_two_narrow_types(i32 %offA, i16 %offB, i8* %arrA, i
; CHECK-NEXT: call void (i1, ...) @llvm.experimental.guard(i1 [[TMP8]], i32 9) [ "deopt"() ]
; CHECK-NEXT: call void @llvm.assume(i1 [[WIDE_CHK]])
; CHECK-NEXT: [[INDEXA_EXT:%.*]] = zext i32 [[INDEXA]] to i64
-; CHECK-NEXT: [[ADDRA:%.*]] = getelementptr inbounds i8, i8* [[ARRA]], i64 [[INDEXA_EXT]]
-; CHECK-NEXT: [[ELTA:%.*]] = load i8, i8* [[ADDRA]], align 1
+; CHECK-NEXT: [[ADDRA:%.*]] = getelementptr inbounds i8, ptr [[ARRA]], i64 [[INDEXA_EXT]]
+; CHECK-NEXT: [[ELTA:%.*]] = load i8, ptr [[ADDRA]], align 1
; CHECK-NEXT: [[INDEXB_EXT:%.*]] = zext i16 [[INDEXB]] to i64
-; CHECK-NEXT: [[ADDRB:%.*]] = getelementptr inbounds i8, i8* [[ARRB]], i64 [[INDEXB_EXT]]
-; CHECK-NEXT: store i8 [[ELTA]], i8* [[ADDRB]], align 1
+; CHECK-NEXT: [[ADDRB:%.*]] = getelementptr inbounds i8, ptr [[ARRB]], i64 [[INDEXB_EXT]]
+; CHECK-NEXT: store i8 [[ELTA]], ptr [[ADDRB]], align 1
; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
; CHECK-NEXT: [[LATCH_CHECK:%.*]] = icmp ult i64 [[IV_NEXT]], 16
; CHECK-NEXT: br i1 [[LATCH_CHECK]], label [[LOOP]], label [[EXIT:%.*]]
@@ -48,8 +48,8 @@ define i64 @iv_wider_type_rc_two_narrow_types(i32 %offA, i16 %offB, i8* %arrA, i
; CHECK-NEXT: ret i64 [[IV_LCSSA]]
;
entry:
- %lengthA = call i32 @length(i8* %arrA)
- %lengthB = call i16 @short_length(i8* %arrB)
+ %lengthA = call i32 @length(ptr %arrA)
+ %lengthB = call i16 @short_length(ptr %arrB)
br label %loop
loop:
@@ -63,11 +63,11 @@ loop:
%wide.chk = and i1 %rcA, %rcB
call void (i1, ...) @llvm.experimental.guard(i1 %wide.chk, i32 9) [ "deopt"() ]
%indexA.ext = zext i32 %indexA to i64
- %addrA = getelementptr inbounds i8, i8* %arrA, i64 %indexA.ext
- %eltA = load i8, i8* %addrA
+ %addrA = getelementptr inbounds i8, ptr %arrA, i64 %indexA.ext
+ %eltA = load i8, ptr %addrA
%indexB.ext = zext i16 %indexB to i64
- %addrB = getelementptr inbounds i8, i8* %arrB, i64 %indexB.ext
- store i8 %eltA, i8* %addrB
+ %addrB = getelementptr inbounds i8, ptr %arrB, i64 %indexB.ext
+ store i8 %eltA, ptr %addrB
%iv.next = add nuw nsw i64 %iv, 1
%latch.check = icmp ult i64 %iv.next, 16
br i1 %latch.check, label %loop, label %exit
@@ -79,11 +79,11 @@ exit:
; Consider an IV of type long and an array access into int array.
; IV is of type i64 while the range check operands are of type i32 and i64.
-define i64 @iv_rc_
diff erent_types(i32 %offA, i32 %offB, i8* %arrA, i8* %arrB, i64 %max)
+define i64 @iv_rc_
diff erent_types(i32 %offA, i32 %offB, ptr %arrA, ptr %arrB, i64 %max)
; CHECK-LABEL: @iv_rc_
diff erent_types(
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[LENGTHA:%.*]] = call i32 @length(i8* [[ARRA:%.*]])
-; CHECK-NEXT: [[LENGTHB:%.*]] = call i32 @length(i8* [[ARRB:%.*]])
+; CHECK-NEXT: [[LENGTHA:%.*]] = call i32 @length(ptr [[ARRA:%.*]])
+; CHECK-NEXT: [[LENGTHB:%.*]] = call i32 @length(ptr [[ARRB:%.*]])
; CHECK-NEXT: [[TMP0:%.*]] = add i32 [[LENGTHB]], -1
; CHECK-NEXT: [[TMP1:%.*]] = sub i32 [[TMP0]], [[OFFB:%.*]]
; CHECK-NEXT: [[TMP2:%.*]] = icmp ule i32 15, [[TMP1]]
@@ -114,13 +114,13 @@ define i64 @iv_rc_
diff erent_types(i32 %offA, i32 %offB, i8* %arrA, i8* %arrB, i6
; CHECK-NEXT: call void (i1, ...) @llvm.experimental.guard(i1 [[TMP15]], i32 9) [ "deopt"() ]
; CHECK-NEXT: call void @llvm.assume(i1 [[WIDE_CHK_FINAL]])
; CHECK-NEXT: [[INDEXA_EXT:%.*]] = zext i32 [[INDEXA]] to i64
-; CHECK-NEXT: [[ADDRA:%.*]] = getelementptr inbounds i8, i8* [[ARRA]], i64 [[INDEXA_EXT]]
-; CHECK-NEXT: [[ELTA:%.*]] = load i8, i8* [[ADDRA]], align 1
+; CHECK-NEXT: [[ADDRA:%.*]] = getelementptr inbounds i8, ptr [[ARRA]], i64 [[INDEXA_EXT]]
+; CHECK-NEXT: [[ELTA:%.*]] = load i8, ptr [[ADDRA]], align 1
; CHECK-NEXT: [[INDEXB_EXT:%.*]] = zext i32 [[INDEXB]] to i64
-; CHECK-NEXT: [[ADDRB:%.*]] = getelementptr inbounds i8, i8* [[ARRB]], i64 [[INDEXB_EXT]]
-; CHECK-NEXT: [[ELTB:%.*]] = load i8, i8* [[ADDRB]], align 1
+; CHECK-NEXT: [[ADDRB:%.*]] = getelementptr inbounds i8, ptr [[ARRB]], i64 [[INDEXB_EXT]]
+; CHECK-NEXT: [[ELTB:%.*]] = load i8, ptr [[ADDRB]], align 1
; CHECK-NEXT: [[RESULT:%.*]] = xor i8 [[ELTA]], [[ELTB]]
-; CHECK-NEXT: store i8 [[RESULT]], i8* [[ADDRA]], align 1
+; CHECK-NEXT: store i8 [[RESULT]], ptr [[ADDRA]], align 1
; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
; CHECK-NEXT: [[LATCH_CHECK:%.*]] = icmp ult i64 [[IV]], 15
; CHECK-NEXT: br i1 [[LATCH_CHECK]], label [[LOOP]], label [[EXIT:%.*]]
@@ -130,8 +130,8 @@ define i64 @iv_rc_
diff erent_types(i32 %offA, i32 %offB, i8* %arrA, i8* %arrB, i6
;
{
entry:
- %lengthA = call i32 @length(i8* %arrA)
- %lengthB = call i32 @length(i8* %arrB)
+ %lengthA = call i32 @length(ptr %arrA)
+ %lengthB = call i32 @length(ptr %arrB)
br label %loop
loop:
@@ -146,13 +146,13 @@ loop:
%wide.chk.final = and i1 %wide.chk, %rcB
call void (i1, ...) @llvm.experimental.guard(i1 %wide.chk.final, i32 9) [ "deopt"() ]
%indexA.ext = zext i32 %indexA to i64
- %addrA = getelementptr inbounds i8, i8* %arrA, i64 %indexA.ext
- %eltA = load i8, i8* %addrA
+ %addrA = getelementptr inbounds i8, ptr %arrA, i64 %indexA.ext
+ %eltA = load i8, ptr %addrA
%indexB.ext = zext i32 %indexB to i64
- %addrB = getelementptr inbounds i8, i8* %arrB, i64 %indexB.ext
- %eltB = load i8, i8* %addrB
+ %addrB = getelementptr inbounds i8, ptr %arrB, i64 %indexB.ext
+ %eltB = load i8, ptr %addrB
%result = xor i8 %eltA, %eltB
- store i8 %result, i8* %addrA
+ store i8 %result, ptr %addrA
%iv.next = add nuw nsw i64 %iv, 1
%latch.check = icmp ult i64 %iv, 15
br i1 %latch.check, label %loop, label %exit
@@ -164,10 +164,10 @@ exit:
; cannot narrow the IV to the range type, because we lose information.
; for (i64 i= 5; i>= 2; i++)
; this loop wraps around after reaching 2^64.
-define i64 @iv_rc_
diff erent_type(i32 %offA, i8* %arrA) {
+define i64 @iv_rc_
diff erent_type(i32 %offA, ptr %arrA) {
; CHECK-LABEL: @iv_rc_
diff erent_type(
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[LENGTHA:%.*]] = call i32 @length(i8* [[ARRA:%.*]])
+; CHECK-NEXT: [[LENGTHA:%.*]] = call i32 @length(ptr [[ARRA:%.*]])
; CHECK-NEXT: br label [[LOOP:%.*]]
; CHECK: loop:
; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 5, [[ENTRY:%.*]] ], [ [[IV_NEXT:%.*]], [[LOOP]] ]
@@ -176,10 +176,10 @@ define i64 @iv_rc_
diff erent_type(i32 %offA, i8* %arrA) {
; CHECK-NEXT: [[RCA:%.*]] = icmp ult i32 [[INDEXA]], [[LENGTHA]]
; CHECK-NEXT: call void (i1, ...) @llvm.experimental.guard(i1 [[RCA]], i32 9) [ "deopt"() ]
; CHECK-NEXT: [[INDEXA_EXT:%.*]] = zext i32 [[INDEXA]] to i64
-; CHECK-NEXT: [[ADDRA:%.*]] = getelementptr inbounds i8, i8* [[ARRA]], i64 [[INDEXA_EXT]]
-; CHECK-NEXT: [[ELTA:%.*]] = load i8, i8* [[ADDRA]], align 1
+; CHECK-NEXT: [[ADDRA:%.*]] = getelementptr inbounds i8, ptr [[ARRA]], i64 [[INDEXA_EXT]]
+; CHECK-NEXT: [[ELTA:%.*]] = load i8, ptr [[ADDRA]], align 1
; CHECK-NEXT: [[RES:%.*]] = add i8 [[ELTA]], 2
-; CHECK-NEXT: store i8 [[ELTA]], i8* [[ADDRA]], align 1
+; CHECK-NEXT: store i8 [[ELTA]], ptr [[ADDRA]], align 1
; CHECK-NEXT: [[IV_NEXT]] = add i64 [[IV]], 1
; CHECK-NEXT: [[LATCH_CHECK:%.*]] = icmp sge i64 [[IV_NEXT]], 2
; CHECK-NEXT: br i1 [[LATCH_CHECK]], label [[LOOP]], label [[EXIT:%.*]]
@@ -188,7 +188,7 @@ define i64 @iv_rc_
diff erent_type(i32 %offA, i8* %arrA) {
; CHECK-NEXT: ret i64 [[IV_LCSSA]]
;
entry:
- %lengthA = call i32 @length(i8* %arrA)
+ %lengthA = call i32 @length(ptr %arrA)
br label %loop
loop:
@@ -198,10 +198,10 @@ loop:
%rcA = icmp ult i32 %indexA, %lengthA
call void (i1, ...) @llvm.experimental.guard(i1 %rcA, i32 9) [ "deopt"() ]
%indexA.ext = zext i32 %indexA to i64
- %addrA = getelementptr inbounds i8, i8* %arrA, i64 %indexA.ext
- %eltA = load i8, i8* %addrA
+ %addrA = getelementptr inbounds i8, ptr %arrA, i64 %indexA.ext
+ %eltA = load i8, ptr %addrA
%res = add i8 %eltA, 2
- store i8 %eltA, i8* %addrA
+ store i8 %eltA, ptr %addrA
%iv.next = add i64 %iv, 1
%latch.check = icmp sge i64 %iv.next, 2
br i1 %latch.check, label %loop, label %exit
More information about the llvm-commits
mailing list