[llvm] 05dc149 - [LAA] Convert tests to opaque pointers (NFC)
Nikita Popov via llvm-commits
llvm-commits at lists.llvm.org
Fri Dec 16 03:46:10 PST 2022
Author: Nikita Popov
Date: 2022-12-16T12:45:59+01:00
New Revision: 05dc149c875cafcd948675dff4f7a7ccb092e128
URL: https://github.com/llvm/llvm-project/commit/05dc149c875cafcd948675dff4f7a7ccb092e128
DIFF: https://github.com/llvm/llvm-project/commit/05dc149c875cafcd948675dff4f7a7ccb092e128.diff
LOG: [LAA] Convert tests to opaque pointers (NFC)
Added:
Modified:
llvm/test/Analysis/LoopAccessAnalysis/backward-dep-different-types.ll
llvm/test/Analysis/LoopAccessAnalysis/depend_diff_types.ll
llvm/test/Analysis/LoopAccessAnalysis/forward-loop-carried.ll
llvm/test/Analysis/LoopAccessAnalysis/forward-loop-independent.ll
llvm/test/Analysis/LoopAccessAnalysis/independent-interleaved.ll
llvm/test/Analysis/LoopAccessAnalysis/interleave-innermost.ll
llvm/test/Analysis/LoopAccessAnalysis/memcheck-for-loop-invariant.ll
llvm/test/Analysis/LoopAccessAnalysis/memcheck-off-by-one-error.ll
llvm/test/Analysis/LoopAccessAnalysis/memcheck-store-vs-alloc-size.ll
llvm/test/Analysis/LoopAccessAnalysis/memcheck-wrapping-pointers.ll
llvm/test/Analysis/LoopAccessAnalysis/multiple-strides-rt-memory-checks.ll
llvm/test/Analysis/LoopAccessAnalysis/non-wrapping-pointer.ll
llvm/test/Analysis/LoopAccessAnalysis/nullptr.ll
llvm/test/Analysis/LoopAccessAnalysis/number-of-memchecks.ll
llvm/test/Analysis/LoopAccessAnalysis/pointer-phis.ll
llvm/test/Analysis/LoopAccessAnalysis/pointer-with-unknown-bounds.ll
llvm/test/Analysis/LoopAccessAnalysis/pr31098.ll
llvm/test/Analysis/LoopAccessAnalysis/pr56672.ll
llvm/test/Analysis/LoopAccessAnalysis/resort-to-memchecks-only.ll
llvm/test/Analysis/LoopAccessAnalysis/reverse-memcheck-bounds.ll
llvm/test/Analysis/LoopAccessAnalysis/runtime-pointer-checking-insert-typesize.ll
llvm/test/Analysis/LoopAccessAnalysis/safe-no-checks.ll
llvm/test/Analysis/LoopAccessAnalysis/safe-with-dep-distance.ll
llvm/test/Analysis/LoopAccessAnalysis/scalable-vector-regression-tests.ll
llvm/test/Analysis/LoopAccessAnalysis/store-to-invariant-check1.ll
llvm/test/Analysis/LoopAccessAnalysis/store-to-invariant-check2.ll
llvm/test/Analysis/LoopAccessAnalysis/store-to-invariant-check3.ll
llvm/test/Analysis/LoopAccessAnalysis/stride-access-dependence.ll
llvm/test/Analysis/LoopAccessAnalysis/symbolic-stride.ll
llvm/test/Analysis/LoopAccessAnalysis/uncomputable-backedge-taken-count.ll
llvm/test/Analysis/LoopAccessAnalysis/underlying-objects-1.ll
llvm/test/Analysis/LoopAccessAnalysis/underlying-objects-2.ll
llvm/test/Analysis/LoopAccessAnalysis/unsafe-and-rt-checks-convergent.ll
llvm/test/Analysis/LoopAccessAnalysis/unsafe-and-rt-checks.ll
llvm/test/Analysis/LoopAccessAnalysis/wrapping-pointer-versioning.ll
Removed:
################################################################################
diff --git a/llvm/test/Analysis/LoopAccessAnalysis/backward-dep-
diff erent-types.ll b/llvm/test/Analysis/LoopAccessAnalysis/backward-dep-
diff erent-types.ll
index 2a969f5e91d0..dd251367631b 100644
--- a/llvm/test/Analysis/LoopAccessAnalysis/backward-dep-
diff erent-types.ll
+++ b/llvm/test/Analysis/LoopAccessAnalysis/backward-dep-
diff erent-types.ll
@@ -14,32 +14,31 @@ target triple = "x86_64-apple-macosx10.10.0"
; CHECK: Report: unsafe dependent memory operations in loop
; CHECK-NOT: Memory dependences are safe
- at B = common global i32* null, align 8
- at A = common global i32* null, align 8
+ at B = common global ptr null, align 8
+ at A = common global ptr null, align 8
define void @f() {
entry:
- %a = load i32*, i32** @A, align 8
- %b = load i32*, i32** @B, align 8
+ %a = load ptr, ptr @A, align 8
+ %b = load ptr, ptr @B, align 8
br label %for.body
for.body: ; preds = %for.body, %entry
%storemerge3 = phi i64 [ 0, %entry ], [ %add, %for.body ]
- %arrayidxA = getelementptr inbounds i32, i32* %a, i64 %storemerge3
- %loadA = load i32, i32* %arrayidxA, align 2
+ %arrayidxA = getelementptr inbounds i32, ptr %a, i64 %storemerge3
+ %loadA = load i32, ptr %arrayidxA, align 2
- %arrayidxB = getelementptr inbounds i32, i32* %b, i64 %storemerge3
- %loadB = load i32, i32* %arrayidxB, align 2
+ %arrayidxB = getelementptr inbounds i32, ptr %b, i64 %storemerge3
+ %loadB = load i32, ptr %arrayidxB, align 2
%mul = mul i32 %loadB, %loadA
%add = add nuw nsw i64 %storemerge3, 1
- %a_float = bitcast i32* %a to float*
- %arrayidxA_plus_2 = getelementptr inbounds float, float* %a_float, i64 %add
+ %arrayidxA_plus_2 = getelementptr inbounds float, ptr %a, i64 %add
%mul_float = sitofp i32 %mul to float
- store float %mul_float, float* %arrayidxA_plus_2, align 2
+ store float %mul_float, ptr %arrayidxA_plus_2, align 2
%exitcond = icmp eq i64 %add, 20
br i1 %exitcond, label %for.end, label %for.body
diff --git a/llvm/test/Analysis/LoopAccessAnalysis/depend_
diff _types.ll b/llvm/test/Analysis/LoopAccessAnalysis/depend_
diff _types.ll
index f6afc52ff286..3d89c051b3f9 100644
--- a/llvm/test/Analysis/LoopAccessAnalysis/depend_
diff _types.ll
+++ b/llvm/test/Analysis/LoopAccessAnalysis/depend_
diff _types.ll
@@ -12,21 +12,21 @@
; CHECK-NEXT: Memory dependences are safe with a maximum dependence distance of 800 bytes
; CHECK-NEXT: Dependences:
; CHECK-NEXT: Forward:
-; CHECK-NEXT: %ld.f32 = load float, float* %gep.iv.f32, align 8 ->
-; CHECK-NEXT: store i32 %indvars.iv.i32, i32* %gep.iv, align 8
+; CHECK-NEXT: %ld.f32 = load float, ptr %gep.iv, align 8 ->
+; CHECK-NEXT: store i32 %indvars.iv.i32, ptr %gep.iv, align 8
; CHECK-EMPTY:
; CHECK-NEXT: Forward:
-; CHECK-NEXT: %ld.f32 = load float, float* %gep.iv.f32, align 8 ->
-; CHECK-NEXT: store float %val, float* %gep.iv.min.100.f32, align 8
+; CHECK-NEXT: %ld.f32 = load float, ptr %gep.iv, align 8 ->
+; CHECK-NEXT: store float %val, ptr %gep.iv.min.100, align 8
; CHECK-EMPTY:
; CHECK-NEXT: BackwardVectorizable:
-; CHECK-NEXT: store float %val, float* %gep.iv.min.100.f32, align 8 ->
-; CHECK-NEXT: store i32 %indvars.iv.i32, i32* %gep.iv, align 8
+; CHECK-NEXT: store float %val, ptr %gep.iv.min.100, align 8 ->
+; CHECK-NEXT: store i32 %indvars.iv.i32, ptr %gep.iv, align 8
; CHECK-EMPTY:
; CHECK-NEXT: Run-time memory checks:
; CHECK-NEXT: Grouped accesses:
-define void @backdep_type_size_equivalence(%int_pair* nocapture %vec, i64 %n) {
+define void @backdep_type_size_equivalence(ptr nocapture %vec, i64 %n) {
entry:
br label %loop
@@ -34,32 +34,30 @@ loop:
%indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %loop ]
;; Load from vec[indvars.iv].x as float
- %gep.iv = getelementptr inbounds %int_pair, %int_pair* %vec, i64 %indvars.iv, i32 0
- %gep.iv.f32 = bitcast i32* %gep.iv to float*
- %ld.f32 = load float, float* %gep.iv.f32, align 8
+ %gep.iv = getelementptr inbounds %int_pair, ptr %vec, i64 %indvars.iv, i32 0
+ %ld.f32 = load float, ptr %gep.iv, align 8
%val = fmul fast float %ld.f32, 5.0
;; Store to vec[indvars.iv - 100].x as float
%indvars.iv.min.100 = add nsw i64 %indvars.iv, -100
- %gep.iv.min.100 = getelementptr inbounds %int_pair, %int_pair* %vec, i64 %indvars.iv.min.100, i32 0
- %gep.iv.min.100.f32 = bitcast i32* %gep.iv.min.100 to float*
- store float %val, float* %gep.iv.min.100.f32, align 8
+ %gep.iv.min.100 = getelementptr inbounds %int_pair, ptr %vec, i64 %indvars.iv.min.100, i32 0
+ store float %val, ptr %gep.iv.min.100, align 8
;; Store to vec[indvars.iv].x as i32, creating a backward dependency between
;; the two stores with
diff erent element types but the same element size.
%indvars.iv.i32 = trunc i64 %indvars.iv to i32
- store i32 %indvars.iv.i32, i32* %gep.iv, align 8
+ store i32 %indvars.iv.i32, ptr %gep.iv, align 8
;; Store to vec[indvars.iv].y as i32, strided accesses should be independent
;; between the two stores with
diff erent element types but the same element size.
- %gep.iv.1 = getelementptr inbounds %int_pair, %int_pair* %vec, i64 %indvars.iv, i32 1
- store i32 %indvars.iv.i32, i32* %gep.iv.1, align 8
+ %gep.iv.1 = getelementptr inbounds %int_pair, ptr %vec, i64 %indvars.iv, i32 1
+ store i32 %indvars.iv.i32, ptr %gep.iv.1, align 8
;; Store to vec[indvars.iv + n].y as i32, to verify no dependence in the case
;; of unknown dependence distance.
%indvars.iv.n = add nuw nsw i64 %indvars.iv, %n
- %gep.iv.n = getelementptr inbounds %int_pair, %int_pair* %vec, i64 %indvars.iv.n, i32 1
- store i32 %indvars.iv.i32, i32* %gep.iv.n, align 8
+ %gep.iv.n = getelementptr inbounds %int_pair, ptr %vec, i64 %indvars.iv.n, i32 1
+ store i32 %indvars.iv.i32, ptr %gep.iv.n, align 8
;; Loop condition.
%indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
@@ -81,13 +79,13 @@ exit:
; CHECK-NEXT: Unknown data dependence.
; CHECK-NEXT: Dependences:
; CHECK-NEXT: Unknown:
-; CHECK-NEXT: %ld.f32 = load float, float* %gep.iv.f32, align 8 ->
-; CHECK-NEXT: store i19 %indvars.iv.i19, i19* %gep.iv.i19, align 8
+; CHECK-NEXT: %ld.f32 = load float, ptr %gep.iv, align 8 ->
+; CHECK-NEXT: store i19 %indvars.iv.i19, ptr %gep.iv, align 8
; CHECK-EMPTY:
; CHECK-NEXT: Run-time memory checks:
; CHECK-NEXT: Grouped accesses:
-define void @backdep_type_store_size_equivalence(%int_pair* nocapture %vec, i64 %n) {
+define void @backdep_type_store_size_equivalence(ptr nocapture %vec, i64 %n) {
entry:
br label %loop
@@ -95,15 +93,13 @@ loop:
%indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %loop ]
;; Load from vec[indvars.iv].x as float
- %gep.iv = getelementptr inbounds %int_pair, %int_pair* %vec, i64 %indvars.iv, i32 0
- %gep.iv.f32 = bitcast i32* %gep.iv to float*
- %ld.f32 = load float, float* %gep.iv.f32, align 8
+ %gep.iv = getelementptr inbounds %int_pair, ptr %vec, i64 %indvars.iv, i32 0
+ %ld.f32 = load float, ptr %gep.iv, align 8
%val = fmul fast float %ld.f32, 5.0
;; Store to vec[indvars.iv].x as i19.
%indvars.iv.i19 = trunc i64 %indvars.iv to i19
- %gep.iv.i19 = bitcast i32* %gep.iv to i19*
- store i19 %indvars.iv.i19, i19* %gep.iv.i19, align 8
+ store i19 %indvars.iv.i19, ptr %gep.iv, align 8
;; Loop condition.
%indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
@@ -124,29 +120,29 @@ exit:
; CHECK-NEXT: Unknown data dependence.
; CHECK-NEXT: Dependences:
; CHECK-NEXT: Unknown:
-; CHECK-NEXT: %ld.i64 = load i64, i64* %gep.iv, align 8 ->
-; CHECK-NEXT: store i32 %ld.i64.i32, i32* %gep.iv.n.i32, align 8
-; CHECK-EMPTY:
-; CHECK-NEXT: ForwardButPreventsForwarding:
-; CHECK-NEXT: store double %val, double* %gep.iv.101.f64, align 8 ->
-; CHECK-NEXT: %ld.i64 = load i64, i64* %gep.iv, align 8
+; CHECK-NEXT: %ld.f64 = load double, ptr %gep.iv, align 8 ->
+; CHECK-NEXT: store i32 %ld.i64.i32, ptr %gep.iv.n.i64, align 8
; CHECK-EMPTY:
; CHECK-NEXT: Unknown:
-; CHECK-NEXT: %ld.f64 = load double, double* %gep.iv.f64, align 8 ->
-; CHECK-NEXT: store i32 %ld.i64.i32, i32* %gep.iv.n.i32, align 8
+; CHECK-NEXT: %ld.i64 = load i64, ptr %gep.iv, align 8 ->
+; CHECK-NEXT: store i32 %ld.i64.i32, ptr %gep.iv.n.i64, align 8
; CHECK-EMPTY:
; CHECK-NEXT: BackwardVectorizableButPreventsForwarding:
-; CHECK-NEXT: %ld.f64 = load double, double* %gep.iv.f64, align 8 ->
-; CHECK-NEXT: store double %val, double* %gep.iv.101.f64, align 8
+; CHECK-NEXT: %ld.f64 = load double, ptr %gep.iv, align 8 ->
+; CHECK-NEXT: store double %val, ptr %gep.iv.101.i64, align 8
+; CHECK-EMPTY:
+; CHECK-NEXT: ForwardButPreventsForwarding:
+; CHECK-NEXT: store double %val, ptr %gep.iv.101.i64, align 8 ->
+; CHECK-NEXT: %ld.i64 = load i64, ptr %gep.iv, align 8
; CHECK-EMPTY:
; CHECK-NEXT: Unknown:
-; CHECK-NEXT: store double %val, double* %gep.iv.101.f64, align 8 ->
-; CHECK-NEXT: store i32 %ld.i64.i32, i32* %gep.iv.n.i32, align 8
+; CHECK-NEXT: store double %val, ptr %gep.iv.101.i64, align 8 ->
+; CHECK-NEXT: store i32 %ld.i64.i32, ptr %gep.iv.n.i64, align 8
; CHECK-EMPTY:
; CHECK-NEXT: Run-time memory checks:
; CHECK-NEXT: Grouped accesses:
-define void @neg_dist_dep_type_size_equivalence(i64* nocapture %vec, i64 %n) {
+define void @neg_dist_dep_type_size_equivalence(ptr nocapture %vec, i64 %n) {
entry:
br label %loop
@@ -154,28 +150,25 @@ loop:
%indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %loop ]
;; Load from vec[indvars.iv] as double
- %gep.iv = getelementptr i64, i64* %vec, i64 %indvars.iv
- %gep.iv.f64 = bitcast i64* %gep.iv to double*
- %ld.f64 = load double, double* %gep.iv.f64, align 8
+ %gep.iv = getelementptr i64, ptr %vec, i64 %indvars.iv
+ %ld.f64 = load double, ptr %gep.iv, align 8
%val = fmul fast double %ld.f64, 5.0
;; Store to vec[indvars.iv + 101] as double
%indvars.iv.101 = add nsw i64 %indvars.iv, 101
- %gep.iv.101.i64 = getelementptr i64, i64* %vec, i64 %indvars.iv.101
- %gep.iv.101.f64 = bitcast i64* %gep.iv.101.i64 to double*
- store double %val, double* %gep.iv.101.f64, align 8
+ %gep.iv.101.i64 = getelementptr i64, ptr %vec, i64 %indvars.iv.101
+ store double %val, ptr %gep.iv.101.i64, align 8
;; Read from vec[indvars.iv] as i64 creating
;; a forward but prevents forwarding dependence
;; with
diff erent types but same sizes.
- %ld.i64 = load i64, i64* %gep.iv, align 8
+ %ld.i64 = load i64, ptr %gep.iv, align 8
;; Different sizes
%indvars.iv.n = add nuw nsw i64 %indvars.iv, %n
- %gep.iv.n.i64 = getelementptr inbounds i64, i64* %vec, i64 %indvars.iv.n
- %gep.iv.n.i32 = bitcast i64* %gep.iv.n.i64 to i32*
+ %gep.iv.n.i64 = getelementptr inbounds i64, ptr %vec, i64 %indvars.iv.n
%ld.i64.i32 = trunc i64 %ld.i64 to i32
- store i32 %ld.i64.i32, i32* %gep.iv.n.i32, align 8
+ store i32 %ld.i64.i32, ptr %gep.iv.n.i64, align 8
;; Loop condition.
%indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
diff --git a/llvm/test/Analysis/LoopAccessAnalysis/forward-loop-carried.ll b/llvm/test/Analysis/LoopAccessAnalysis/forward-loop-carried.ll
index 18b25fd73710..650887f2f6f5 100644
--- a/llvm/test/Analysis/LoopAccessAnalysis/forward-loop-carried.ll
+++ b/llvm/test/Analysis/LoopAccessAnalysis/forward-loop-carried.ll
@@ -7,12 +7,12 @@
target datalayout = "e-m:o-i64:64-f80:128-n8:16:32:64-S128"
-define void @f(i32* %A, i32* %B, i32* %C, i64 %N) {
+define void @f(ptr %A, ptr %B, ptr %C, i64 %N) {
; CHECK: Dependences:
; CHECK-NEXT: Forward:
-; CHECK-NEXT: store i32 %a_p1, i32* %Aidx_ahead, align 4 ->
-; CHECK-NEXT: %a = load i32, i32* %Aidx, align 4
+; CHECK-NEXT: store i32 %a_p1, ptr %Aidx_ahead, align 4 ->
+; CHECK-NEXT: %a = load i32, ptr %Aidx, align 4
entry:
br label %for.body
@@ -23,18 +23,18 @@ for.body: ; preds = %for.body, %entry
%idx = add nuw nsw i64 %indvars.iv, 8
- %Aidx_ahead = getelementptr inbounds i32, i32* %A, i64 %idx
- %Bidx = getelementptr inbounds i32, i32* %B, i64 %indvars.iv
- %Cidx = getelementptr inbounds i32, i32* %C, i64 %indvars.iv
- %Aidx = getelementptr inbounds i32, i32* %A, i64 %indvars.iv
+ %Aidx_ahead = getelementptr inbounds i32, ptr %A, i64 %idx
+ %Bidx = getelementptr inbounds i32, ptr %B, i64 %indvars.iv
+ %Cidx = getelementptr inbounds i32, ptr %C, i64 %indvars.iv
+ %Aidx = getelementptr inbounds i32, ptr %A, i64 %indvars.iv
- %b = load i32, i32* %Bidx, align 4
+ %b = load i32, ptr %Bidx, align 4
%a_p1 = add i32 %b, 2
- store i32 %a_p1, i32* %Aidx_ahead, align 4
+ store i32 %a_p1, ptr %Aidx_ahead, align 4
- %a = load i32, i32* %Aidx, align 4
+ %a = load i32, ptr %Aidx, align 4
%c = mul i32 %a, 2
- store i32 %c, i32* %Cidx, align 4
+ store i32 %c, ptr %Cidx, align 4
%exitcond = icmp eq i64 %indvars.iv.next, %N
br i1 %exitcond, label %for.end, label %for.body
diff --git a/llvm/test/Analysis/LoopAccessAnalysis/forward-loop-independent.ll b/llvm/test/Analysis/LoopAccessAnalysis/forward-loop-independent.ll
index 03e04cd35733..42d87edd8b4b 100644
--- a/llvm/test/Analysis/LoopAccessAnalysis/forward-loop-independent.ll
+++ b/llvm/test/Analysis/LoopAccessAnalysis/forward-loop-independent.ll
@@ -20,18 +20,18 @@
target datalayout = "e-m:o-i64:64-f80:128-n8:16:32:64-S128"
-define void @f(i32* noalias %A, i32* noalias %B, i32* noalias %C, i64 %N) {
+define void @f(ptr noalias %A, ptr noalias %B, ptr noalias %C, i64 %N) {
; CHECK: Dependences:
; CHECK-NEXT: Forward:
-; CHECK-NEXT: store i32 %b_p1, i32* %Aidx, align 4 ->
-; CHECK-NEXT: %a = load i32, i32* %Aidx, align 4
+; CHECK-NEXT: store i32 %b_p1, ptr %Aidx, align 4 ->
+; CHECK-NEXT: %a = load i32, ptr %Aidx, align 4
; CHECK: ForwardButPreventsForwarding:
-; CHECK-NEXT: store i32 %b_p2, i32* %Aidx_next, align 4 ->
-; CHECK-NEXT: %a = load i32, i32* %Aidx, align 4
+; CHECK-NEXT: store i32 %b_p2, ptr %Aidx_next, align 4 ->
+; CHECK-NEXT: %a = load i32, ptr %Aidx, align 4
; CHECK: Forward:
-; CHECK-NEXT: store i32 %b_p2, i32* %Aidx_next, align 4 ->
-; CHECK-NEXT: store i32 %b_p1, i32* %Aidx, align 4
+; CHECK-NEXT: store i32 %b_p2, ptr %Aidx_next, align 4 ->
+; CHECK-NEXT: store i32 %b_p1, ptr %Aidx, align 4
entry:
br label %for.body
@@ -40,21 +40,21 @@ for.body: ; preds = %for.body, %entry
%indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
%indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
- %Bidx = getelementptr inbounds i32, i32* %B, i64 %indvars.iv
- %Cidx = getelementptr inbounds i32, i32* %C, i64 %indvars.iv
- %Aidx_next = getelementptr inbounds i32, i32* %A, i64 %indvars.iv.next
- %Aidx = getelementptr inbounds i32, i32* %A, i64 %indvars.iv
+ %Bidx = getelementptr inbounds i32, ptr %B, i64 %indvars.iv
+ %Cidx = getelementptr inbounds i32, ptr %C, i64 %indvars.iv
+ %Aidx_next = getelementptr inbounds i32, ptr %A, i64 %indvars.iv.next
+ %Aidx = getelementptr inbounds i32, ptr %A, i64 %indvars.iv
- %b = load i32, i32* %Bidx, align 4
+ %b = load i32, ptr %Bidx, align 4
%b_p2 = add i32 %b, 1
- store i32 %b_p2, i32* %Aidx_next, align 4
+ store i32 %b_p2, ptr %Aidx_next, align 4
%b_p1 = add i32 %b, 2
- store i32 %b_p1, i32* %Aidx, align 4
+ store i32 %b_p1, ptr %Aidx, align 4
- %a = load i32, i32* %Aidx, align 4
+ %a = load i32, ptr %Aidx, align 4
%c = mul i32 %a, 2
- store i32 %c, i32* %Cidx, align 4
+ store i32 %c, ptr %Cidx, align 4
%exitcond = icmp eq i64 %indvars.iv.next, %N
br i1 %exitcond, label %for.end, label %for.body
diff --git a/llvm/test/Analysis/LoopAccessAnalysis/independent-interleaved.ll b/llvm/test/Analysis/LoopAccessAnalysis/independent-interleaved.ll
index 5bd8981dbde7..b00056998d53 100644
--- a/llvm/test/Analysis/LoopAccessAnalysis/independent-interleaved.ll
+++ b/llvm/test/Analysis/LoopAccessAnalysis/independent-interleaved.ll
@@ -19,21 +19,21 @@
; CHECK: for.body:
; CHECK-NOT: Forward:
-; CHECK-NOT: store i32 %z, i32* %p_i.y, align 8 ->
-; CHECK-NOT: %0 = load i32, i32* %p_i.x, align 8
+; CHECK-NOT: store i32 %z, ptr %p_i.y, align 8 ->
+; CHECK-NOT: %0 = load i32, ptr %p_i.x, align 8
%pair = type { i32, i32 }
-define i32 @independent_interleaved(%pair *%p, i64 %n, i32 %z) {
+define i32 @independent_interleaved(ptr %p, i64 %n, i32 %z) {
entry:
br label %for.body
for.body:
%i = phi i64 [ %i.next, %for.body ], [ 0, %entry ]
%s = phi i32 [ %1, %for.body ], [ 0, %entry ]
- %p_i.x = getelementptr inbounds %pair, %pair* %p, i64 %i, i32 0
- %p_i.y = getelementptr inbounds %pair, %pair* %p, i64 %i, i32 1
- store i32 %z, i32* %p_i.y, align 8
- %0 = load i32, i32* %p_i.x, align 8
+ %p_i.x = getelementptr inbounds %pair, ptr %p, i64 %i, i32 0
+ %p_i.y = getelementptr inbounds %pair, ptr %p, i64 %i, i32 1
+ store i32 %z, ptr %p_i.y, align 8
+ %0 = load i32, ptr %p_i.x, align 8
%1 = add nsw i32 %0, %s
%i.next = add nuw nsw i64 %i, 1
%cond = icmp slt i64 %i.next, %n
diff --git a/llvm/test/Analysis/LoopAccessAnalysis/interleave-innermost.ll b/llvm/test/Analysis/LoopAccessAnalysis/interleave-innermost.ll
index 5029fc2acdcf..74d02ebc8e3e 100644
--- a/llvm/test/Analysis/LoopAccessAnalysis/interleave-innermost.ll
+++ b/llvm/test/Analysis/LoopAccessAnalysis/interleave-innermost.ll
@@ -16,13 +16,13 @@ bb:
br i1 %X, label %.loopexit5.outer, label %.lr.ph12
.lr.ph12:
- %f.110 = phi i32* [ %tmp1, %.loopexit ], [ null, %.loopexit5.outer ]
- %tmp1 = getelementptr inbounds i32, i32* %f.110, i64 -2
+ %f.110 = phi ptr [ %tmp1, %.loopexit ], [ null, %.loopexit5.outer ]
+ %tmp1 = getelementptr inbounds i32, ptr %f.110, i64 -2
br i1 %Y, label %bb4, label %.loopexit
bb4:
%j.27 = phi i32 [ 0, %.lr.ph12 ], [ %tmp7, %bb4 ]
- %tmp5 = load i32, i32* %f.110, align 4
+ %tmp5 = load i32, ptr %f.110, align 4
%tmp7 = add nsw i32 %j.27, 1
%exitcond = icmp eq i32 %tmp7, 0
br i1 %exitcond, label %.loopexit, label %bb4
diff --git a/llvm/test/Analysis/LoopAccessAnalysis/memcheck-for-loop-invariant.ll b/llvm/test/Analysis/LoopAccessAnalysis/memcheck-for-loop-invariant.ll
index af2b928f0f06..322723bb9a00 100644
--- a/llvm/test/Analysis/LoopAccessAnalysis/memcheck-for-loop-invariant.ll
+++ b/llvm/test/Analysis/LoopAccessAnalysis/memcheck-for-loop-invariant.ll
@@ -13,21 +13,21 @@ target datalayout = "e-m:o-i64:64-f80:128-n8:16:32:64-S128"
; CHECK: Run-time memory checks:
; CHECK-NEXT: Check 0:
; CHECK-NEXT: Comparing group ({{.*}}):
-; CHECK-NEXT: %arrayidxA = getelementptr inbounds i32, i32* %a, i64 %ind
+; CHECK-NEXT: %arrayidxA = getelementptr inbounds i32, ptr %a, i64 %ind
; CHECK-NEXT: Against group ({{.*}}):
-; CHECK-NEXT: i32* %b
+; CHECK-NEXT: ptr %b
-define void @f(i32* %a, i32* %b) {
+define void @f(ptr %a, ptr %b) {
entry:
br label %for.body
for.body: ; preds = %for.body, %entry
%ind = phi i64 [ 0, %entry ], [ %inc, %for.body ]
- %arrayidxA = getelementptr inbounds i32, i32* %a, i64 %ind
+ %arrayidxA = getelementptr inbounds i32, ptr %a, i64 %ind
- %loadB = load i32, i32* %b, align 4
- store i32 %loadB, i32* %arrayidxA, align 4
+ %loadB = load i32, ptr %b, align 4
+ store i32 %loadB, ptr %arrayidxA, align 4
%inc = add nuw nsw i64 %ind, 1
%exitcond = icmp eq i64 %inc, 20
diff --git a/llvm/test/Analysis/LoopAccessAnalysis/memcheck-off-by-one-error.ll b/llvm/test/Analysis/LoopAccessAnalysis/memcheck-off-by-one-error.ll
index 32d57f5b480c..4a9f004cb44a 100644
--- a/llvm/test/Analysis/LoopAccessAnalysis/memcheck-off-by-one-error.ll
+++ b/llvm/test/Analysis/LoopAccessAnalysis/memcheck-off-by-one-error.ll
@@ -22,7 +22,7 @@
;CHECK: (Low: %op High: (32 + %op))
;CHECK: (Low: %src High: (32 + %src))
-define void @fastCopy(i8* nocapture readonly %src, i8* nocapture %op) {
+define void @fastCopy(ptr nocapture readonly %src, ptr nocapture %op) {
entry:
br label %while.body.preheader
@@ -31,14 +31,12 @@ while.body.preheader: ; preds = %entry
while.body: ; preds = %while.body.preheader, %while.body
%len.addr.07 = phi i32 [ %sub, %while.body ], [ 32, %while.body.preheader ]
- %op.addr.06 = phi i8* [ %add.ptr1, %while.body ], [ %op, %while.body.preheader ]
- %src.addr.05 = phi i8* [ %add.ptr, %while.body ], [ %src, %while.body.preheader ]
- %0 = bitcast i8* %src.addr.05 to i64*
- %1 = load i64, i64* %0, align 8
- %2 = bitcast i8* %op.addr.06 to i64*
- store i64 %1, i64* %2, align 8
- %add.ptr = getelementptr inbounds i8, i8* %src.addr.05, i64 8
- %add.ptr1 = getelementptr inbounds i8, i8* %op.addr.06, i64 8
+ %op.addr.06 = phi ptr [ %add.ptr1, %while.body ], [ %op, %while.body.preheader ]
+ %src.addr.05 = phi ptr [ %add.ptr, %while.body ], [ %src, %while.body.preheader ]
+ %0 = load i64, ptr %src.addr.05, align 8
+ store i64 %0, ptr %op.addr.06, align 8
+ %add.ptr = getelementptr inbounds i8, ptr %src.addr.05, i64 8
+ %add.ptr1 = getelementptr inbounds i8, ptr %op.addr.06, i64 8
%sub = add nsw i32 %len.addr.07, -8
%cmp = icmp sgt i32 %len.addr.07, 8
br i1 %cmp, label %while.body, label %while.end.loopexit
diff --git a/llvm/test/Analysis/LoopAccessAnalysis/memcheck-store-vs-alloc-size.ll b/llvm/test/Analysis/LoopAccessAnalysis/memcheck-store-vs-alloc-size.ll
index 577887d5c95a..6bb1d21b9080 100644
--- a/llvm/test/Analysis/LoopAccessAnalysis/memcheck-store-vs-alloc-size.ll
+++ b/llvm/test/Analysis/LoopAccessAnalysis/memcheck-store-vs-alloc-size.ll
@@ -9,7 +9,7 @@
;CHECK: (Low: %op High: (27 + %op))
;CHECK: (Low: %src High: (27 + %src))
-define void @fastCopy(i8* nocapture readonly %src, i8* nocapture %op) {
+define void @fastCopy(ptr nocapture readonly %src, ptr nocapture %op) {
entry:
br label %while.body.preheader
@@ -18,14 +18,12 @@ while.body.preheader: ; preds = %entry
while.body: ; preds = %while.body.preheader, %while.body
%len.addr.07 = phi i32 [ %sub, %while.body ], [ 32, %while.body.preheader ]
- %op.addr.06 = phi i8* [ %add.ptr1, %while.body ], [ %op, %while.body.preheader ]
- %src.addr.05 = phi i8* [ %add.ptr, %while.body ], [ %src, %while.body.preheader ]
- %0 = bitcast i8* %src.addr.05 to i19*
- %1 = load i19, i19* %0, align 8
- %2 = bitcast i8* %op.addr.06 to i19*
- store i19 %1, i19* %2, align 8
- %add.ptr = getelementptr inbounds i8, i8* %src.addr.05, i19 8
- %add.ptr1 = getelementptr inbounds i8, i8* %op.addr.06, i19 8
+ %op.addr.06 = phi ptr [ %add.ptr1, %while.body ], [ %op, %while.body.preheader ]
+ %src.addr.05 = phi ptr [ %add.ptr, %while.body ], [ %src, %while.body.preheader ]
+ %0 = load i19, ptr %src.addr.05, align 8
+ store i19 %0, ptr %op.addr.06, align 8
+ %add.ptr = getelementptr inbounds i8, ptr %src.addr.05, i19 8
+ %add.ptr1 = getelementptr inbounds i8, ptr %op.addr.06, i19 8
%sub = add nsw i32 %len.addr.07, -8
%cmp = icmp sgt i32 %len.addr.07, 8
br i1 %cmp, label %while.body, label %while.end.loopexit
diff --git a/llvm/test/Analysis/LoopAccessAnalysis/memcheck-wrapping-pointers.ll b/llvm/test/Analysis/LoopAccessAnalysis/memcheck-wrapping-pointers.ll
index 3bbc6b47686d..6dbb4a0c0129 100644
--- a/llvm/test/Analysis/LoopAccessAnalysis/memcheck-wrapping-pointers.ll
+++ b/llvm/test/Analysis/LoopAccessAnalysis/memcheck-wrapping-pointers.ll
@@ -29,9 +29,9 @@ target datalayout = "e-m:e-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128"
; CHECK-NEXT: Run-time memory checks:
; CHECK-NEXT: Check 0:
; CHECK-NEXT: Comparing group
-; CHECK-NEXT: %arrayidx = getelementptr inbounds i32, i32* %a, i64 %idxprom
+; CHECK-NEXT: %arrayidx = getelementptr inbounds i32, ptr %a, i64 %idxprom
; CHECK-NEXT: Against group
-; CHECK-NEXT: %arrayidx4 = getelementptr inbounds i32, i32* %b, i64 %conv11
+; CHECK-NEXT: %arrayidx4 = getelementptr inbounds i32, ptr %b, i64 %conv11
; CHECK-NEXT: Grouped accesses:
; CHECK-NEXT: Group
; CHECK-NEXT: (Low: (4 + %a) High: (4 + (4 * (1 umax %x)) + %a))
@@ -44,13 +44,13 @@ target datalayout = "e-m:e-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128"
; CHECK-NEXT: {1,+,1}<%for.body> Added Flags: <nusw>
; CHECK-NEXT: {0,+,1}<%for.body> Added Flags: <nusw>
; CHECK: Expressions re-written:
-; CHECK-NEXT: [PSE] %arrayidx = getelementptr inbounds i32, i32* %a, i64 %idxprom:
+; CHECK-NEXT: [PSE] %arrayidx = getelementptr inbounds i32, ptr %a, i64 %idxprom:
; CHECK-NEXT: ((4 * (zext i32 {1,+,1}<%for.body> to i64))<nuw><nsw> + %a)<nuw>
; CHECK-NEXT: --> {(4 + %a),+,4}<%for.body>
-; CHECK-NEXT: [PSE] %arrayidx4 = getelementptr inbounds i32, i32* %b, i64 %conv11:
+; CHECK-NEXT: [PSE] %arrayidx4 = getelementptr inbounds i32, ptr %b, i64 %conv11:
; CHECK-NEXT: ((4 * (zext i32 {0,+,1}<%for.body> to i64))<nuw><nsw> + %b)<nuw>
; CHECK-NEXT: --> {%b,+,4}<%for.body>
-define void @test1(i64 %x, i32* %a, i32* %b) {
+define void @test1(i64 %x, ptr %a, ptr %b) {
entry:
br label %for.body
@@ -59,11 +59,11 @@ for.body: ; preds = %for.body.preheader,
%i.010 = phi i32 [ %add, %for.body ], [ 0, %entry ]
%add = add i32 %i.010, 1
%idxprom = zext i32 %add to i64
- %arrayidx = getelementptr inbounds i32, i32* %a, i64 %idxprom
- %ld = load i32, i32* %arrayidx, align 4
+ %arrayidx = getelementptr inbounds i32, ptr %a, i64 %idxprom
+ %ld = load i32, ptr %arrayidx, align 4
%add2 = add nsw i32 %ld, 1
- %arrayidx4 = getelementptr inbounds i32, i32* %b, i64 %conv11
- store i32 %add2, i32* %arrayidx4, align 4
+ %arrayidx4 = getelementptr inbounds i32, ptr %b, i64 %conv11
+ store i32 %add2, ptr %arrayidx4, align 4
%conv = zext i32 %add to i64
%cmp = icmp ult i64 %conv, %x
br i1 %cmp, label %for.body, label %exit
@@ -86,17 +86,17 @@ exit:
; CHECK: SCEV assumptions:
; CHECK-NEXT: {1,+,1}<%for.body> Added Flags: <nusw>
; CHECK-NEXT: {0,+,1}<%for.body> Added Flags: <nusw>
- define void @test2(i64 %x, i32* %a) {
+ define void @test2(i64 %x, ptr %a) {
entry:
br label %for.body
for.body:
%conv11 = phi i64 [ %conv, %for.body ], [ 0, %entry ]
%i.010 = phi i32 [ %inc, %for.body ], [ 0, %entry ]
- %arrayidx = getelementptr inbounds i32, i32* %a, i64 %conv11
- %ld = load i32, i32* %arrayidx, align 4
+ %arrayidx = getelementptr inbounds i32, ptr %a, i64 %conv11
+ %ld = load i32, ptr %arrayidx, align 4
%add = add nsw i32 %ld, 1
- store i32 %add, i32* %arrayidx, align 4
+ store i32 %add, ptr %arrayidx, align 4
%inc = add i32 %i.010, 1
%conv = zext i32 %inc to i64
%cmp = icmp ult i64 %conv, %x
diff --git a/llvm/test/Analysis/LoopAccessAnalysis/multiple-strides-rt-memory-checks.ll b/llvm/test/Analysis/LoopAccessAnalysis/multiple-strides-rt-memory-checks.ll
index 45f9e2e2c458..05882a7b0a7d 100644
--- a/llvm/test/Analysis/LoopAccessAnalysis/multiple-strides-rt-memory-checks.ll
+++ b/llvm/test/Analysis/LoopAccessAnalysis/multiple-strides-rt-memory-checks.ll
@@ -34,13 +34,13 @@ target triple = "x86_64-unknown-linux-gnu"
%struct.s = type { [32 x i32], [32 x i32], [32 x [32 x i32]] }
-define void @Test(%struct.s* nocapture %obj, i64 %z) #0 {
+define void @Test(ptr nocapture %obj, i64 %z) #0 {
br label %.outer.preheader
.outer.preheader:
%i = phi i64 [ 0, %0 ], [ %i.next, %.outer ]
- %1 = getelementptr inbounds %struct.s, %struct.s* %obj, i64 0, i32 1, i64 %i
+ %1 = getelementptr inbounds %struct.s, ptr %obj, i64 0, i32 1, i64 %i
br label %.inner
.exit:
@@ -53,14 +53,14 @@ define void @Test(%struct.s* nocapture %obj, i64 %z) #0 {
.inner:
%j = phi i64 [ 0, %.outer.preheader ], [ %j.next, %.inner ]
- %2 = getelementptr inbounds %struct.s, %struct.s* %obj, i64 0, i32 0, i64 %j
- %3 = load i32, i32* %2
- %4 = load i32, i32* %1
+ %2 = getelementptr inbounds %struct.s, ptr %obj, i64 0, i32 0, i64 %j
+ %3 = load i32, ptr %2
+ %4 = load i32, ptr %1
%5 = add nsw i32 %4, %3
- %6 = getelementptr inbounds %struct.s, %struct.s* %obj, i64 0, i32 2, i64 %i, i64 %j
- %7 = load i32, i32* %6
+ %6 = getelementptr inbounds %struct.s, ptr %obj, i64 0, i32 2, i64 %i, i64 %j
+ %7 = load i32, ptr %6
%8 = add nsw i32 %5, %7
- store i32 %8, i32* %6
+ store i32 %8, ptr %6
%j.next = add nuw nsw i64 %j, 1
%exitcond.inner = icmp eq i64 %j.next, %z
br i1 %exitcond.inner, label %.outer, label %.inner
diff --git a/llvm/test/Analysis/LoopAccessAnalysis/non-wrapping-pointer.ll b/llvm/test/Analysis/LoopAccessAnalysis/non-wrapping-pointer.ll
index 29dc3e5d8cae..882609aaa034 100644
--- a/llvm/test/Analysis/LoopAccessAnalysis/non-wrapping-pointer.ll
+++ b/llvm/test/Analysis/LoopAccessAnalysis/non-wrapping-pointer.ll
@@ -12,8 +12,8 @@ target datalayout = "e-m:o-i64:64-f80:128-n8:16:32:64-S128"
; CHECK: Memory dependences are safe{{$}}
-define void @f(i16* noalias %a,
- i16* noalias %b, i64 %N) {
+define void @f(ptr noalias %a,
+ ptr noalias %b, i64 %N) {
entry:
br label %for.body
@@ -22,15 +22,15 @@ for.body: ; preds = %for.body, %entry
%mul = mul nuw nsw i64 %ind, 2
- %arrayidxA = getelementptr inbounds i16, i16* %a, i64 %mul
- %loadA = load i16, i16* %arrayidxA, align 2
+ %arrayidxA = getelementptr inbounds i16, ptr %a, i64 %mul
+ %loadA = load i16, ptr %arrayidxA, align 2
- %arrayidxB = getelementptr inbounds i16, i16* %b, i64 %ind
- %loadB = load i16, i16* %arrayidxB, align 2
+ %arrayidxB = getelementptr inbounds i16, ptr %b, i64 %ind
+ %loadB = load i16, ptr %arrayidxB, align 2
%add = mul i16 %loadA, %loadB
- store i16 %add, i16* %arrayidxA, align 2
+ store i16 %add, ptr %arrayidxA, align 2
%inc = add nuw nsw i64 %ind, 1
%exitcond = icmp eq i64 %inc, %N
diff --git a/llvm/test/Analysis/LoopAccessAnalysis/nullptr.ll b/llvm/test/Analysis/LoopAccessAnalysis/nullptr.ll
index f1c80057702b..b1c4908c4d45 100644
--- a/llvm/test/Analysis/LoopAccessAnalysis/nullptr.ll
+++ b/llvm/test/Analysis/LoopAccessAnalysis/nullptr.ll
@@ -13,24 +13,24 @@ target datalayout = "e-m:o-i64:64-f80:128-n8:16:32:64-S128"
target triple = "x86_64-apple-macosx10.11.0"
; Function Attrs: ssp uwtable
-define void @foo(i1 %cond, i32* %ptr1, i32* %ptr2) {
+define void @foo(i1 %cond, ptr %ptr1, ptr %ptr2) {
br i1 %cond, label %.preheader, label %diamond
diamond: ; preds = %.noexc.i.i
br label %.preheader
.preheader: ; preds = %diamond, %0
- %ptr1_or_null = phi i32* [ null, %0 ], [ %ptr1, %diamond ]
- %ptr2_or_null = phi i32* [ null, %0 ], [ %ptr2, %diamond ]
+ %ptr1_or_null = phi ptr [ null, %0 ], [ %ptr1, %diamond ]
+ %ptr2_or_null = phi ptr [ null, %0 ], [ %ptr2, %diamond ]
br label %.lr.ph
.lr.ph: ; preds = %.lr.ph, %.preheader
%indvars.iv = phi i64 [ %indvars.iv.next, %.lr.ph ], [ 10, %.preheader ]
%indvars.iv.next = add nsw i64 %indvars.iv, -1
- %tmp4 = getelementptr inbounds i32, i32* %ptr2_or_null, i64 %indvars.iv.next
- %tmp5 = load i32, i32* %tmp4, align 4
- %tmp6 = getelementptr inbounds i32, i32* %ptr1_or_null, i64 %indvars.iv.next
- store i32 undef, i32* %tmp6, align 4
+ %tmp4 = getelementptr inbounds i32, ptr %ptr2_or_null, i64 %indvars.iv.next
+ %tmp5 = load i32, ptr %tmp4, align 4
+ %tmp6 = getelementptr inbounds i32, ptr %ptr1_or_null, i64 %indvars.iv.next
+ store i32 undef, ptr %tmp6, align 4
br i1 false, label %.lr.ph, label %.end
.end:
diff --git a/llvm/test/Analysis/LoopAccessAnalysis/number-of-memchecks.ll b/llvm/test/Analysis/LoopAccessAnalysis/number-of-memchecks.ll
index be628f129ec9..8ddcc152d11c 100644
--- a/llvm/test/Analysis/LoopAccessAnalysis/number-of-memchecks.ll
+++ b/llvm/test/Analysis/LoopAccessAnalysis/number-of-memchecks.ll
@@ -16,12 +16,12 @@ target triple = "aarch64--linux-gnueabi"
; CHECK: Check 11:
; CHECK-NOT: Check 12:
-define void @testf(i16* %a,
- i16* %b,
- i16* %c,
- i16* %d,
- i16* %e,
- i16* %f) {
+define void @testf(ptr %a,
+ ptr %b,
+ ptr %c,
+ ptr %d,
+ ptr %e,
+ ptr %f) {
entry:
br label %for.body
@@ -30,26 +30,26 @@ for.body: ; preds = %for.body, %entry
%add = add nuw nsw i64 %ind, 1
- %arrayidxA = getelementptr inbounds i16, i16* %a, i64 %ind
- %loadA = load i16, i16* %arrayidxA, align 2
+ %arrayidxA = getelementptr inbounds i16, ptr %a, i64 %ind
+ %loadA = load i16, ptr %arrayidxA, align 2
- %arrayidxB = getelementptr inbounds i16, i16* %b, i64 %ind
- %loadB = load i16, i16* %arrayidxB, align 2
+ %arrayidxB = getelementptr inbounds i16, ptr %b, i64 %ind
+ %loadB = load i16, ptr %arrayidxB, align 2
- %arrayidxC = getelementptr inbounds i16, i16* %c, i64 %ind
- %loadC = load i16, i16* %arrayidxC, align 2
+ %arrayidxC = getelementptr inbounds i16, ptr %c, i64 %ind
+ %loadC = load i16, ptr %arrayidxC, align 2
%mul = mul i16 %loadB, %loadA
%mul1 = mul i16 %mul, %loadC
- %arrayidxD = getelementptr inbounds i16, i16* %d, i64 %ind
- store i16 %mul1, i16* %arrayidxD, align 2
+ %arrayidxD = getelementptr inbounds i16, ptr %d, i64 %ind
+ store i16 %mul1, ptr %arrayidxD, align 2
- %arrayidxE = getelementptr inbounds i16, i16* %e, i64 %ind
- store i16 %mul, i16* %arrayidxE, align 2
+ %arrayidxE = getelementptr inbounds i16, ptr %e, i64 %ind
+ store i16 %mul, ptr %arrayidxE, align 2
- %arrayidxF = getelementptr inbounds i16, i16* %f, i64 %ind
- store i16 %mul1, i16* %arrayidxF, align 2
+ %arrayidxF = getelementptr inbounds i16, ptr %f, i64 %ind
+ store i16 %mul1, ptr %arrayidxF, align 2
%exitcond = icmp eq i64 %add, 20
br i1 %exitcond, label %for.end, label %for.body
@@ -66,7 +66,7 @@ for.end: ; preds = %for.body
; unsigned long ind = 0;
; for (unsigned long ind = 0; ind < 20; ++ind) {
; c[2 * ind] = a[ind] * a[ind + 1];
-; c[2 * ind + 1] = a[ind] * a[ind + 1] * b[ind];
+; c[2 * ind + 1] = a[ind] * a[ind] * b[ind];
; }
; }
;
@@ -82,17 +82,17 @@ for.end: ; preds = %for.body
; CHECK: Run-time memory checks:
; CHECK-NEXT: Check 0:
; CHECK-NEXT: Comparing group ([[ZERO:.+]]):
-; CHECK-NEXT: %arrayidxC1 = getelementptr inbounds i16, i16* %c, i64 %store_ind_inc
-; CHECK-NEXT: %arrayidxC = getelementptr inbounds i16, i16* %c, i64 %store_ind
+; CHECK-NEXT: %arrayidxC1 = getelementptr inbounds i16, ptr %c, i64 %store_ind_inc
+; CHECK-NEXT: %arrayidxC = getelementptr inbounds i16, ptr %c, i64 %store_ind
; CHECK-NEXT: Against group ([[ONE:.+]]):
-; CHECK-NEXT: %arrayidxA1 = getelementptr inbounds i16, i16* %a, i64 %add
-; CHECK-NEXT: %arrayidxA = getelementptr inbounds i16, i16* %a, i64 %ind
+; CHECK-NEXT: %arrayidxA1 = getelementptr inbounds i16, ptr %a, i64 %add
+; CHECK-NEXT: %arrayidxA = getelementptr inbounds i16, ptr %a, i64 %ind
; CHECK-NEXT: Check 1:
; CHECK-NEXT: Comparing group ({{.*}}[[ZERO]]):
-; CHECK-NEXT: %arrayidxC1 = getelementptr inbounds i16, i16* %c, i64 %store_ind_inc
-; CHECK-NEXT: %arrayidxC = getelementptr inbounds i16, i16* %c, i64 %store_ind
+; CHECK-NEXT: %arrayidxC1 = getelementptr inbounds i16, ptr %c, i64 %store_ind_inc
+; CHECK-NEXT: %arrayidxC = getelementptr inbounds i16, ptr %c, i64 %store_ind
; CHECK-NEXT: Against group ([[TWO:.+]]):
-; CHECK-NEXT: %arrayidxB = getelementptr inbounds i16, i16* %b, i64 %ind
+; CHECK-NEXT: %arrayidxB = getelementptr inbounds i16, ptr %b, i64 %ind
; CHECK-NEXT: Grouped accesses:
; CHECK-NEXT: Group {{.*}}[[ZERO]]:
; CHECK-NEXT: (Low: %c High: (80 + %c))
@@ -106,9 +106,9 @@ for.end: ; preds = %for.body
; CHECK-NEXT: (Low: %b High: (40 + %b))
; CHECK-NEXT: Member: {%b,+,2}
-define void @testg(i16* %a,
- i16* %b,
- i16* %c) {
+define void @testg(ptr %a,
+ ptr %b,
+ ptr %c) {
entry:
br label %for.body
@@ -120,23 +120,23 @@ for.body: ; preds = %for.body, %entry
%store_ind_inc = add nuw nsw i64 %store_ind, 1
%store_ind_next = add nuw nsw i64 %store_ind_inc, 1
- %arrayidxA = getelementptr inbounds i16, i16* %a, i64 %ind
- %loadA = load i16, i16* %arrayidxA, align 2
+ %arrayidxA = getelementptr inbounds i16, ptr %a, i64 %ind
+ %loadA = load i16, ptr %arrayidxA, align 2
- %arrayidxA1 = getelementptr inbounds i16, i16* %a, i64 %add
- %loadA1 = load i16, i16* %arrayidxA1, align 2
+ %arrayidxA1 = getelementptr inbounds i16, ptr %a, i64 %add
+ %loadA1 = load i16, ptr %arrayidxA1, align 2
- %arrayidxB = getelementptr inbounds i16, i16* %b, i64 %ind
- %loadB = load i16, i16* %arrayidxB, align 2
+ %arrayidxB = getelementptr inbounds i16, ptr %b, i64 %ind
+ %loadB = load i16, ptr %arrayidxB, align 2
%mul = mul i16 %loadA, %loadA1
%mul1 = mul i16 %mul, %loadB
- %arrayidxC = getelementptr inbounds i16, i16* %c, i64 %store_ind
- store i16 %mul1, i16* %arrayidxC, align 2
+ %arrayidxC = getelementptr inbounds i16, ptr %c, i64 %store_ind
+ store i16 %mul1, ptr %arrayidxC, align 2
- %arrayidxC1 = getelementptr inbounds i16, i16* %c, i64 %store_ind_inc
- store i16 %mul, i16* %arrayidxC1, align 2
+ %arrayidxC1 = getelementptr inbounds i16, ptr %c, i64 %store_ind_inc
+ store i16 %mul, ptr %arrayidxC1, align 2
%exitcond = icmp eq i64 %add, 20
br i1 %exitcond, label %for.end, label %for.body
@@ -154,17 +154,17 @@ for.end: ; preds = %for.body
; CHECK: Run-time memory checks:
; CHECK-NEXT: Check 0:
; CHECK-NEXT: Comparing group ([[ZERO:.+]]):
-; CHECK-NEXT: %arrayidxC1 = getelementptr inbounds i16, i16* %c, i64 %store_ind_inc
-; CHECK-NEXT: %arrayidxC = getelementptr inbounds i16, i16* %c, i64 %store_ind
+; CHECK-NEXT: %arrayidxC1 = getelementptr inbounds i16, ptr %c, i64 %store_ind_inc
+; CHECK-NEXT: %arrayidxC = getelementptr inbounds i16, ptr %c, i64 %store_ind
; CHECK-NEXT: Against group ([[ONE:.+]]):
-; CHECK-NEXT: %arrayidxA1 = getelementptr i16, i16* %a, i64 %add
-; CHECK-NEXT: %arrayidxA = getelementptr i16, i16* %a, i64 %ind
+; CHECK-NEXT: %arrayidxA1 = getelementptr i16, ptr %a, i64 %add
+; CHECK-NEXT: %arrayidxA = getelementptr i16, ptr %a, i64 %ind
; CHECK-NEXT: Check 1:
; CHECK-NEXT: Comparing group ({{.*}}[[ZERO]]):
-; CHECK-NEXT: %arrayidxC1 = getelementptr inbounds i16, i16* %c, i64 %store_ind_inc
-; CHECK-NEXT: %arrayidxC = getelementptr inbounds i16, i16* %c, i64 %store_ind
+; CHECK-NEXT: %arrayidxC1 = getelementptr inbounds i16, ptr %c, i64 %store_ind_inc
+; CHECK-NEXT: %arrayidxC = getelementptr inbounds i16, ptr %c, i64 %store_ind
; CHECK-NEXT: Against group ([[TWO:.+]]):
-; CHECK-NEXT: %arrayidxB = getelementptr i16, i16* %b, i64 %ind
+; CHECK-NEXT: %arrayidxB = getelementptr i16, ptr %b, i64 %ind
; CHECK-NEXT: Grouped accesses:
; CHECK-NEXT: Group {{.*}}[[ZERO]]:
; CHECK-NEXT: (Low: %c High: (80 + %c))
@@ -178,9 +178,9 @@ for.end: ; preds = %for.body
; CHECK-NEXT: (Low: %b High: (40 + %b))
; CHECK-NEXT: Member: {%b,+,2}
-define void @testh(i16* %a,
- i16* %b,
- i16* %c) {
+define void @testh(ptr %a,
+ ptr %b,
+ ptr %c) {
entry:
br label %for.body
@@ -192,23 +192,23 @@ for.body: ; preds = %for.body, %entry
%store_ind_inc = add nuw nsw i64 %store_ind, 1
%store_ind_next = add nuw nsw i64 %store_ind_inc, 1
- %arrayidxA = getelementptr i16, i16* %a, i64 %ind
- %loadA = load i16, i16* %arrayidxA, align 2
+ %arrayidxA = getelementptr i16, ptr %a, i64 %ind
+ %loadA = load i16, ptr %arrayidxA, align 2
- %arrayidxA1 = getelementptr i16, i16* %a, i64 %add
- %loadA1 = load i16, i16* %arrayidxA1, align 2
+ %arrayidxA1 = getelementptr i16, ptr %a, i64 %add
+ %loadA1 = load i16, ptr %arrayidxA1, align 2
- %arrayidxB = getelementptr i16, i16* %b, i64 %ind
- %loadB = load i16, i16* %arrayidxB, align 2
+ %arrayidxB = getelementptr i16, ptr %b, i64 %ind
+ %loadB = load i16, ptr %arrayidxB, align 2
%mul = mul i16 %loadA, %loadA1
%mul1 = mul i16 %mul, %loadB
- %arrayidxC = getelementptr inbounds i16, i16* %c, i64 %store_ind
- store i16 %mul1, i16* %arrayidxC, align 2
+ %arrayidxC = getelementptr inbounds i16, ptr %c, i64 %store_ind
+ store i16 %mul1, ptr %arrayidxC, align 2
- %arrayidxC1 = getelementptr inbounds i16, i16* %c, i64 %store_ind_inc
- store i16 %mul, i16* %arrayidxC1, align 2
+ %arrayidxC1 = getelementptr inbounds i16, ptr %c, i64 %store_ind_inc
+ store i16 %mul, ptr %arrayidxC1, align 2
%exitcond = icmp eq i64 %add, 20
br i1 %exitcond, label %for.end, label %for.body
@@ -236,14 +236,14 @@ for.end: ; preds = %for.body
; CHECK: Run-time memory checks:
; CHECK-NEXT: Check 0:
; CHECK-NEXT: Comparing group ([[ZERO:.+]]):
-; CHECK-NEXT: %storeidx = getelementptr inbounds i16, i16* %a, i64 %store_ind
+; CHECK-NEXT: %storeidx = getelementptr inbounds i16, ptr %a, i64 %store_ind
; CHECK-NEXT: Against group ([[ONE:.+]]):
-; CHECK-NEXT: %arrayidxA1 = getelementptr i16, i16* %a, i64 %ind
+; CHECK-NEXT: %arrayidxA1 = getelementptr i16, ptr %a, i64 %ind
; CHECK-NEXT: Check 1:
; CHECK-NEXT: Comparing group ({{.*}}[[ZERO]]):
-; CHECK-NEXT: %storeidx = getelementptr inbounds i16, i16* %a, i64 %store_ind
+; CHECK-NEXT: %storeidx = getelementptr inbounds i16, ptr %a, i64 %store_ind
; CHECK-NEXT: Against group ([[TWO:.+]]):
-; CHECK-NEXT: %arrayidxA2 = getelementptr i16, i16* %a, i64 %ind2
+; CHECK-NEXT: %arrayidxA2 = getelementptr i16, ptr %a, i64 %ind2
; CHECK-NEXT: Grouped accesses:
; CHECK-NEXT: Group {{.*}}[[ZERO]]:
; CHECK-NEXT: (Low: ((2 * %offset) + %a) High: (10000 + (2 * %offset) + %a))
@@ -255,7 +255,7 @@ for.end: ; preds = %for.body
; CHECK-NEXT: (Low: (20000 + %a) High: (30000 + %a))
; CHECK-NEXT: Member: {(20000 + %a),+,2}<nw><%for.body>
-define void @testi(i16* %a,
+define void @testi(ptr %a,
i64 %offset) {
entry:
br label %for.body
@@ -267,17 +267,17 @@ for.body: ; preds = %for.body, %entry
%add = add nuw nsw i64 %ind, 1
%store_ind_inc = add nuw nsw i64 %store_ind, 1
- %arrayidxA1 = getelementptr i16, i16* %a, i64 %ind
+ %arrayidxA1 = getelementptr i16, ptr %a, i64 %ind
%ind2 = add nuw nsw i64 %ind, 10000
- %arrayidxA2 = getelementptr i16, i16* %a, i64 %ind2
+ %arrayidxA2 = getelementptr i16, ptr %a, i64 %ind2
- %loadA1 = load i16, i16* %arrayidxA1, align 2
- %loadA2 = load i16, i16* %arrayidxA2, align 2
+ %loadA1 = load i16, ptr %arrayidxA1, align 2
+ %loadA2 = load i16, ptr %arrayidxA2, align 2
%addres = add i16 %loadA1, %loadA2
- %storeidx = getelementptr inbounds i16, i16* %a, i64 %store_ind
- store i16 %addres, i16* %storeidx, align 2
+ %storeidx = getelementptr inbounds i16, ptr %a, i64 %store_ind
+ store i16 %addres, ptr %storeidx, align 2
%exitcond = icmp eq i64 %add, 5000
br i1 %exitcond, label %for.end, label %for.body
diff --git a/llvm/test/Analysis/LoopAccessAnalysis/pointer-phis.ll b/llvm/test/Analysis/LoopAccessAnalysis/pointer-phis.ll
index db3fb78c108d..07ebaf2dfe05 100644
--- a/llvm/test/Analysis/LoopAccessAnalysis/pointer-phis.ll
+++ b/llvm/test/Analysis/LoopAccessAnalysis/pointer-phis.ll
@@ -2,7 +2,7 @@
%s1 = type { [32000 x double], [32000 x double], [32000 x double] }
-define i32 @load_with_pointer_phi_no_runtime_checks(%s1* %data) {
+define i32 @load_with_pointer_phi_no_runtime_checks(ptr %data) {
; CHECK-LABEL: load_with_pointer_phi_no_runtime_checks
; CHECK-NEXT: loop.header:
; CHECK-NEXT: Memory dependences are safe
@@ -14,22 +14,22 @@ loop.header: ; preds = %loop.latch, %entr
%iv = phi i64 [ 0, %entry ], [ %iv.next, %loop.latch ]
%iv.next = add nuw nsw i64 %iv, 1
%cmp5 = icmp ult i64 %iv, 15999
- %arrayidx = getelementptr inbounds %s1, %s1 * %data, i64 0, i32 0, i64 %iv
+ %arrayidx = getelementptr inbounds %s1, ptr %data, i64 0, i32 0, i64 %iv
br i1 %cmp5, label %if.then, label %if.else
if.then: ; preds = %loop.header
- %gep.1 = getelementptr inbounds %s1, %s1* %data, i64 0, i32 1, i64 %iv
+ %gep.1 = getelementptr inbounds %s1, ptr %data, i64 0, i32 1, i64 %iv
br label %loop.latch
if.else: ; preds = %loop.header
- %gep.2 = getelementptr inbounds %s1, %s1* %data, i64 0, i32 2, i64 %iv
+ %gep.2 = getelementptr inbounds %s1, ptr %data, i64 0, i32 2, i64 %iv
br label %loop.latch
loop.latch: ; preds = %if.else, %if.then
- %gep.2.sink = phi double* [ %gep.2, %if.else ], [ %gep.1, %if.then ]
- %v8 = load double, double* %gep.2.sink, align 8
+ %gep.2.sink = phi ptr [ %gep.2, %if.else ], [ %gep.1, %if.then ]
+ %v8 = load double, ptr %gep.2.sink, align 8
%mul16 = fmul double 3.0, %v8
- store double %mul16, double* %arrayidx, align 8
+ store double %mul16, ptr %arrayidx, align 8
%exitcond.not = icmp eq i64 %iv.next, 32000
br i1 %exitcond.not, label %exit, label %loop.header
@@ -37,7 +37,7 @@ exit: ; preds = %loop.latch
ret i32 10
}
-define i32 @store_with_pointer_phi_no_runtime_checks(%s1* %data) {
+define i32 @store_with_pointer_phi_no_runtime_checks(ptr %data) {
; CHECK-LABEL: 'store_with_pointer_phi_no_runtime_checks'
; CHECK-NEXT: loop.header:
; CHECK-NEXT: Memory dependences are safe
@@ -49,22 +49,22 @@ loop.header: ; preds = %loop.latch, %entr
%iv = phi i64 [ 0, %entry ], [ %iv.next, %loop.latch ]
%iv.next = add nuw nsw i64 %iv, 1
%cmp5 = icmp ult i64 %iv, 15999
- %arrayidx = getelementptr inbounds %s1, %s1 * %data, i64 0, i32 0, i64 %iv
+ %arrayidx = getelementptr inbounds %s1, ptr %data, i64 0, i32 0, i64 %iv
br i1 %cmp5, label %if.then, label %if.else
if.then: ; preds = %loop.header
- %gep.1 = getelementptr inbounds %s1, %s1* %data, i64 0, i32 1, i64 %iv
+ %gep.1 = getelementptr inbounds %s1, ptr %data, i64 0, i32 1, i64 %iv
br label %loop.latch
if.else: ; preds = %loop.header
- %gep.2 = getelementptr inbounds %s1, %s1* %data, i64 0, i32 2, i64 %iv
+ %gep.2 = getelementptr inbounds %s1, ptr %data, i64 0, i32 2, i64 %iv
br label %loop.latch
loop.latch: ; preds = %if.else, %if.then
- %gep.2.sink = phi double* [ %gep.2, %if.else ], [ %gep.1, %if.then ]
- %v8 = load double, double* %arrayidx, align 8
+ %gep.2.sink = phi ptr [ %gep.2, %if.else ], [ %gep.1, %if.then ]
+ %v8 = load double, ptr %arrayidx, align 8
%mul16 = fmul double 3.0, %v8
- store double %mul16, double* %gep.2.sink, align 8
+ store double %mul16, ptr %gep.2.sink, align 8
%exitcond.not = icmp eq i64 %iv.next, 32000
br i1 %exitcond.not, label %exit, label %loop.header
@@ -72,26 +72,26 @@ exit: ; preds = %loop.latch
ret i32 10
}
-define i32 @store_with_pointer_phi_runtime_checks(double* %A, double* %B, double* %C) {
+define i32 @store_with_pointer_phi_runtime_checks(ptr %A, ptr %B, ptr %C) {
; CHECK-LABEL: 'store_with_pointer_phi_runtime_checks'
; CHECK-NEXT: loop.header:
; CHECK-NEXT: Memory dependences are safe with run-time checks
; CHECK: Run-time memory checks:
; CHECK-NEXT: Check 0:
; CHECK-NEXT: Comparing group ([[GROUP_B:.+]]):
-; CHECK-NEXT: %gep.1 = getelementptr inbounds double, double* %B, i64 %iv
+; CHECK-NEXT: %gep.1 = getelementptr inbounds double, ptr %B, i64 %iv
; CHECK-NEXT: Against group ([[GROUP_C:.+]]):
-; CHECK-NEXT: %gep.2 = getelementptr inbounds double, double* %C, i64 %iv
+; CHECK-NEXT: %gep.2 = getelementptr inbounds double, ptr %C, i64 %iv
; CHECK-NEXT: Check 1:
; CHECK-NEXT: Comparing group ([[GROUP_B]]):
-; CHECK-NEXT: %gep.1 = getelementptr inbounds double, double* %B, i64 %iv
+; CHECK-NEXT: %gep.1 = getelementptr inbounds double, ptr %B, i64 %iv
; CHECK-NEXT: Against group ([[GROUP_A:.+]]):
-; CHECK-NEXT: %arrayidx = getelementptr inbounds double, double* %A, i64 %iv
+; CHECK-NEXT: %arrayidx = getelementptr inbounds double, ptr %A, i64 %iv
; CHECK-NEXT: Check 2:
; CHECK-NEXT: Comparing group ([[GROUP_C]]):
-; CHECK-NEXT: %gep.2 = getelementptr inbounds double, double* %C, i64 %iv
+; CHECK-NEXT: %gep.2 = getelementptr inbounds double, ptr %C, i64 %iv
; CHECK-NEXT: Against group ([[GROUP_A]]):
-; CHECK-NEXT: %arrayidx = getelementptr inbounds double, double* %A, i64 %iv
+; CHECK-NEXT: %arrayidx = getelementptr inbounds double, ptr %A, i64 %iv
;
entry:
br label %loop.header
@@ -100,22 +100,22 @@ loop.header: ; preds = %loop.latch, %entr
%iv = phi i64 [ 0, %entry ], [ %iv.next, %loop.latch ]
%iv.next = add nuw nsw i64 %iv, 1
%cmp5 = icmp ult i64 %iv, 15999
- %arrayidx = getelementptr inbounds double, double* %A, i64 %iv
+ %arrayidx = getelementptr inbounds double, ptr %A, i64 %iv
br i1 %cmp5, label %if.then, label %if.else
if.then: ; preds = %loop.header
- %gep.1 = getelementptr inbounds double, double* %B, i64 %iv
+ %gep.1 = getelementptr inbounds double, ptr %B, i64 %iv
br label %loop.latch
if.else: ; preds = %loop.header
- %gep.2 = getelementptr inbounds double, double* %C, i64 %iv
+ %gep.2 = getelementptr inbounds double, ptr %C, i64 %iv
br label %loop.latch
loop.latch: ; preds = %if.else, %if.then
- %gep.2.sink = phi double* [ %gep.2, %if.else ], [ %gep.1, %if.then ]
- %v8 = load double, double* %arrayidx, align 8
+ %gep.2.sink = phi ptr [ %gep.2, %if.else ], [ %gep.1, %if.then ]
+ %v8 = load double, ptr %arrayidx, align 8
%mul16 = fmul double 3.0, %v8
- store double %mul16, double* %gep.2.sink, align 8
+ store double %mul16, ptr %gep.2.sink, align 8
%exitcond.not = icmp eq i64 %iv.next, 32000
br i1 %exitcond.not, label %exit, label %loop.header
@@ -123,15 +123,15 @@ exit: ; preds = %loop.latch
ret i32 10
}
-define i32 @load_with_pointer_phi_outside_loop(double* %A, double* %B, double* %C, i1 %c.0, i1 %c.1) {
+define i32 @load_with_pointer_phi_outside_loop(ptr %A, ptr %B, ptr %C, i1 %c.0, i1 %c.1) {
; CHECK-LABEL: 'load_with_pointer_phi_outside_loop'
; CHECK-NEXT: loop.header:
; CHECK-NEXT: Report: unsafe dependent memory operations in loop
; CHECK-NEXT: Unknown data dependence.
; CHECK-NEXT: Dependences:
; CHECK-NEXT: Unknown:
-; CHECK-NEXT: %v8 = load double, double* %ptr, align 8 ->
-; CHECK-NEXT: store double %mul16, double* %arrayidx, align 8
+; CHECK-NEXT: %v8 = load double, ptr %ptr, align 8 ->
+; CHECK-NEXT: store double %mul16, ptr %arrayidx, align 8
;
entry:
br i1 %c.0, label %if.then, label %if.else
@@ -140,20 +140,20 @@ if.then:
br label %loop.ph
if.else:
- %ptr.select = select i1 %c.1, double* %C, double* %B
+ %ptr.select = select i1 %c.1, ptr %C, ptr %B
br label %loop.ph
loop.ph:
- %ptr = phi double* [ %A, %if.then ], [ %ptr.select, %if.else ]
+ %ptr = phi ptr [ %A, %if.then ], [ %ptr.select, %if.else ]
br label %loop.header
loop.header: ; preds = %loop.latch, %entry
%iv = phi i64 [ 0, %loop.ph ], [ %iv.next, %loop.header ]
%iv.next = add nuw nsw i64 %iv, 1
- %arrayidx = getelementptr inbounds double, double* %A, i64 %iv
- %v8 = load double, double* %ptr, align 8
+ %arrayidx = getelementptr inbounds double, ptr %A, i64 %iv
+ %v8 = load double, ptr %ptr, align 8
%mul16 = fmul double 3.0, %v8
- store double %mul16, double* %arrayidx, align 8
+ store double %mul16, ptr %arrayidx, align 8
%exitcond.not = icmp eq i64 %iv.next, 32000
br i1 %exitcond.not, label %exit, label %loop.header
@@ -161,15 +161,15 @@ exit: ; preds = %loop.latch
ret i32 10
}
-define i32 @store_with_pointer_phi_outside_loop(double* %A, double* %B, double* %C, i1 %c.0, i1 %c.1) {
+define i32 @store_with_pointer_phi_outside_loop(ptr %A, ptr %B, ptr %C, i1 %c.0, i1 %c.1) {
; CHECK-LABEL: 'store_with_pointer_phi_outside_loop'
; CHECK-NEXT: loop.header:
; CHECK-NEXT: Report: unsafe dependent memory operations in loop.
; CHECK-NEXT: Unknown data dependence.
; CHECK-NEXT: Dependences:
; CHECK-NEXT: Unknown:
-; CHECK-NEXT: %v8 = load double, double* %arrayidx, align 8 ->
-; CHECK-NEXT: store double %mul16, double* %ptr, align 8
+; CHECK-NEXT: %v8 = load double, ptr %arrayidx, align 8 ->
+; CHECK-NEXT: store double %mul16, ptr %ptr, align 8
;
entry:
br i1 %c.0, label %if.then, label %if.else
@@ -178,20 +178,20 @@ if.then:
br label %loop.ph
if.else:
- %ptr.select = select i1 %c.1, double* %C, double* %B
+ %ptr.select = select i1 %c.1, ptr %C, ptr %B
br label %loop.ph
loop.ph:
- %ptr = phi double* [ %A, %if.then ], [ %ptr.select, %if.else ]
+ %ptr = phi ptr [ %A, %if.then ], [ %ptr.select, %if.else ]
br label %loop.header
loop.header: ; preds = %loop.latch, %entry
%iv = phi i64 [ 0, %loop.ph ], [ %iv.next, %loop.header ]
%iv.next = add nuw nsw i64 %iv, 1
- %arrayidx = getelementptr inbounds double, double* %A, i64 %iv
- %v8 = load double, double* %arrayidx, align 8
+ %arrayidx = getelementptr inbounds double, ptr %A, i64 %iv
+ %v8 = load double, ptr %arrayidx, align 8
%mul16 = fmul double 3.0, %v8
- store double %mul16, double* %ptr, align 8
+ store double %mul16, ptr %ptr, align 8
%exitcond.not = icmp eq i64 %iv.next, 32000
br i1 %exitcond.not, label %exit, label %loop.header
@@ -199,34 +199,34 @@ exit: ; preds = %loop.latch
ret i32 10
}
-define i32 @store_with_pointer_phi_incoming_phi(double* %A, double* %B, double* %C, i1 %c.0, i1 %c.1) {
+define i32 @store_with_pointer_phi_incoming_phi(ptr %A, ptr %B, ptr %C, i1 %c.0, i1 %c.1) {
; CHECK-LABEL: 'store_with_pointer_phi_incoming_phi'
; CHECK-NEXT: loop.header:
; CHECK-NEXT: Report: unsafe dependent memory operations in loop. Use #pragma loop distribute(enable) to allow loop distribution to attempt to isolate the offending operations into a separate loop
; CHECK-NEXT: Unknown data dependence.
; CHECK-NEXT: Dependences:
; CHECK-NEXT: Unknown:
-; CHECK-NEXT: %v8 = load double, double* %arrayidx, align 8 ->
-; CHECK-NEXT: store double %mul16, double* %ptr.2, align 8
+; CHECK-NEXT: %v8 = load double, ptr %arrayidx, align 8 ->
+; CHECK-NEXT: store double %mul16, ptr %ptr.2, align 8
; CHECK-EMPTY:
; CHECK-NEXT: Run-time memory checks:
; CHECK-NEXT: Check 0:
; CHECK-NEXT: Comparing group ([[GROUP_C:.+]]):
-; CHECK-NEXT: double* %C
+; CHECK-NEXT: ptr %C
; CHECK-NEXT: Against group ([[GROUP_B:.+]]):
-; CHECK-NEXT: double* %B
+; CHECK-NEXT: ptr %B
; CHECK-NEXT: Check 1:
; CHECK-NEXT: Comparing group ([[GROUP_C]]):
-; CHECK-NEXT: double* %C
+; CHECK-NEXT: ptr %C
; CHECK-NEXT: Against group ([[GROUP_A:.+]]):
-; CHECK-NEXT: %arrayidx = getelementptr inbounds double, double* %A, i64 %iv
-; CHECK-NEXT: double* %A
+; CHECK-NEXT: %arrayidx = getelementptr inbounds double, ptr %A, i64 %iv
+; CHECK-NEXT: ptr %A
; CHECK-NEXT: Check 2:
; CHECK-NEXT: Comparing group ([[GROUP_B]]):
-; CHECK-NEXT: double* %B
+; CHECK-NEXT: ptr %B
; CHECK-NEXT: Against group ([[GROUP_A]]):
-; CHECK-NEXT: %arrayidx = getelementptr inbounds double, double* %A, i64 %iv
-; CHECK-NEXT: double* %A
+; CHECK-NEXT: %arrayidx = getelementptr inbounds double, ptr %A, i64 %iv
+; CHECK-NEXT: ptr %A
; CHECK-NEXT: Grouped accesses:
; CHECK-NEXT: Group [[GROUP_C]]:
; CHECK-NEXT: (Low: %C High: (8 + %C))
@@ -245,8 +245,8 @@ entry:
loop.header: ; preds = %loop.latch, %entry
%iv = phi i64 [ 0, %entry ], [ %iv.next, %loop.latch ]
%iv.next = add nuw nsw i64 %iv, 1
- %arrayidx = getelementptr inbounds double, double* %A, i64 %iv
- %v8 = load double, double* %arrayidx, align 8
+ %arrayidx = getelementptr inbounds double, ptr %A, i64 %iv
+ %v8 = load double, ptr %arrayidx, align 8
%mul16 = fmul double 3.0, %v8
br i1 %c.0, label %loop.then, label %loop.latch
@@ -261,13 +261,13 @@ loop.else.2:
merge.2:
- %ptr = phi double* [ %A, %loop.then.2 ], [ %B, %loop.else.2 ]
+ %ptr = phi ptr [ %A, %loop.then.2 ], [ %B, %loop.else.2 ]
br label %loop.latch
loop.latch:
- %ptr.2 = phi double* [ %ptr, %merge.2], [ %C, %loop.header ]
- store double %mul16, double* %ptr.2, align 8
+ %ptr.2 = phi ptr [ %ptr, %merge.2], [ %C, %loop.header ]
+ store double %mul16, ptr %ptr.2, align 8
%exitcond.not = icmp eq i64 %iv.next, 32000
br i1 %exitcond.not, label %exit, label %loop.header
@@ -276,34 +276,34 @@ exit: ; preds = %loop.latch
}
; Test cases with pointer phis forming a cycle.
-define i32 @store_with_pointer_phi_incoming_phi_irreducible_cycle(double* %A, double* %B, double* %C, i1 %c.0, i1 %c.1) {
+define i32 @store_with_pointer_phi_incoming_phi_irreducible_cycle(ptr %A, ptr %B, ptr %C, i1 %c.0, i1 %c.1) {
; CHECK-LABEL: 'store_with_pointer_phi_incoming_phi_irreducible_cycle'
; CHECK-NEXT: loop.header:
; CHECK-NEXT: Report: unsafe dependent memory operations in loop. Use #pragma loop distribute(enable) to allow loop distribution to attempt to isolate the offending operations into a separate loop
; CHECK-NEXT: Unknown data dependence.
; CHECK-NEXT: Dependences:
; CHECK-NEXT: Unknown:
-; CHECK-NEXT: %v8 = load double, double* %arrayidx, align 8 ->
-; CHECK-NEXT: store double %mul16, double* %ptr.3, align 8
+; CHECK-NEXT: %v8 = load double, ptr %arrayidx, align 8 ->
+; CHECK-NEXT: store double %mul16, ptr %ptr.3, align 8
; CHECK-EMPTY:
; CHECK-NEXT: Run-time memory checks:
; CHECK-NEXT: Check 0:
; CHECK-NEXT: Comparing group ([[GROUP_C:.+]]):
-; CHECK-NEXT: double* %C
+; CHECK-NEXT: ptr %C
; CHECK-NEXT: Against group ([[GROUP_B:.+]]):
-; CHECK-NEXT: double* %B
+; CHECK-NEXT: ptr %B
; CHECK-NEXT: Check 1:
; CHECK-NEXT: Comparing group ([[GROUP_C]]):
-; CHECK-NEXT: double* %C
+; CHECK-NEXT: ptr %C
; CHECK-NEXT: Against group ([[GROUP_A:.+]]):
-; CHECK-NEXT: %arrayidx = getelementptr inbounds double, double* %A, i64 %iv
-; CHECK-NEXT: double* %A
+; CHECK-NEXT: %arrayidx = getelementptr inbounds double, ptr %A, i64 %iv
+; CHECK-NEXT: ptr %A
; CHECK-NEXT: Check 2:
; CHECK-NEXT: Comparing group ([[GROUP_B]]):
-; CHECK-NEXT: double* %B
+; CHECK-NEXT: ptr %B
; CHECK-NEXT: Against group ([[GROUP_A]]):
-; CHECK-NEXT: %arrayidx = getelementptr inbounds double, double* %A, i64 %iv
-; CHECK-NEXT: double* %A
+; CHECK-NEXT: %arrayidx = getelementptr inbounds double, ptr %A, i64 %iv
+; CHECK-NEXT: ptr %A
; CHECK-NEXT: Grouped accesses:
; CHECK-NEXT: Group [[GROUP_C]]
; CHECK-NEXT: (Low: %C High: (8 + %C))
@@ -322,8 +322,8 @@ entry:
loop.header: ; preds = %loop.latch, %entry
%iv = phi i64 [ 0, %entry ], [ %iv.next, %loop.latch ]
%iv.next = add nuw nsw i64 %iv, 1
- %arrayidx = getelementptr inbounds double, double* %A, i64 %iv
- %v8 = load double, double* %arrayidx, align 8
+ %arrayidx = getelementptr inbounds double, ptr %A, i64 %iv
+ %v8 = load double, ptr %arrayidx, align 8
%mul16 = fmul double 3.0, %v8
br i1 %c.0, label %loop.then, label %loop.latch
@@ -331,16 +331,16 @@ loop.then:
br i1 %c.0, label %BB.A, label %BB.B
BB.A:
- %ptr = phi double* [ %A, %loop.then ], [ %ptr.2, %BB.B ]
+ %ptr = phi ptr [ %A, %loop.then ], [ %ptr.2, %BB.B ]
br label %BB.B
BB.B:
- %ptr.2 = phi double* [ %ptr, %BB.A ], [ %B, %loop.then ]
+ %ptr.2 = phi ptr [ %ptr, %BB.A ], [ %B, %loop.then ]
br i1 %c.1, label %loop.latch, label %BB.A
loop.latch:
- %ptr.3 = phi double* [ %ptr.2, %BB.B ], [ %C, %loop.header ]
- store double %mul16, double* %ptr.3, align 8
+ %ptr.3 = phi ptr [ %ptr.2, %BB.B ], [ %C, %loop.header ]
+ store double %mul16, ptr %ptr.3, align 8
%exitcond.not = icmp eq i64 %iv.next, 32000
br i1 %exitcond.not, label %exit, label %loop.header
@@ -348,15 +348,15 @@ exit: ; preds = %loop.latch
ret i32 10
}
-define i32 @store_with_pointer_phi_outside_loop_select(double* %A, double* %B, double* %C, i1 %c.0, i1 %c.1) {
+define i32 @store_with_pointer_phi_outside_loop_select(ptr %A, ptr %B, ptr %C, i1 %c.0, i1 %c.1) {
; CHECK-LABEL: 'store_with_pointer_phi_outside_loop_select'
; CHECK-NEXT: loop.header:
; CHECK-NEXT: Report: unsafe dependent memory operations in loop.
; CHECK-NEXT: Unknown data dependence.
; CHECK-NEXT: Dependences:
; CHECK-NEXT: Unknown:
-; CHECK-NEXT: %v8 = load double, double* %arrayidx, align 8 ->
-; CHECK-NEXT: store double %mul16, double* %ptr, align 8
+; CHECK-NEXT: %v8 = load double, ptr %arrayidx, align 8 ->
+; CHECK-NEXT: store double %mul16, ptr %ptr, align 8
;
entry:
br i1 %c.0, label %if.then, label %if.else
@@ -365,20 +365,20 @@ if.then:
br label %loop.ph
if.else:
- %ptr.select = select i1 %c.1, double* %C, double* %B
+ %ptr.select = select i1 %c.1, ptr %C, ptr %B
br label %loop.ph
loop.ph:
- %ptr = phi double* [ %A, %if.then ], [ %ptr.select, %if.else ]
+ %ptr = phi ptr [ %A, %if.then ], [ %ptr.select, %if.else ]
br label %loop.header
loop.header: ; preds = %loop.latch, %entry
%iv = phi i64 [ 0, %loop.ph ], [ %iv.next, %loop.header ]
%iv.next = add nuw nsw i64 %iv, 1
- %arrayidx = getelementptr inbounds double, double* %A, i64 %iv
- %v8 = load double, double* %arrayidx, align 8
+ %arrayidx = getelementptr inbounds double, ptr %A, i64 %iv
+ %v8 = load double, ptr %arrayidx, align 8
%mul16 = fmul double 3.0, %v8
- store double %mul16, double* %ptr, align 8
+ store double %mul16, ptr %ptr, align 8
%exitcond.not = icmp eq i64 %iv.next, 32000
br i1 %exitcond.not, label %exit, label %loop.header
@@ -386,7 +386,7 @@ exit: ; preds = %loop.latch
ret i32 10
}
-define i32 @store_with_pointer_phi_in_same_bb_use_other_phi(double* %A, double* %B, double* %C, double* %D, i1 %c.0, i1 %c.1) {
+define i32 @store_with_pointer_phi_in_same_bb_use_other_phi(ptr %A, ptr %B, ptr %C, ptr %D, i1 %c.0, i1 %c.1) {
; CHECK-LABEL: Loop access info in function 'store_with_pointer_phi_in_same_bb_use_other_phi':
; CHECK-NEXT: loop.header:
; CHECK-NEXT: Report: cannot identify array bounds
@@ -399,14 +399,14 @@ entry:
br label %loop.header
loop.header: ; preds = %loop.latch, %entry
- %ptr.0 = phi double* [ %C, %entry ], [ %D, %loop.header ]
- %ptr.1 = phi double* [ %B, %entry ], [ %ptr.0, %loop.header ]
+ %ptr.0 = phi ptr [ %C, %entry ], [ %D, %loop.header ]
+ %ptr.1 = phi ptr [ %B, %entry ], [ %ptr.0, %loop.header ]
%iv = phi i64 [ 0, %entry ], [ %iv.next, %loop.header ]
%iv.next = add nuw nsw i64 %iv, 1
- %arrayidx = getelementptr inbounds double, double* %A, i64 %iv
- %v8 = load double, double* %arrayidx, align 8
+ %arrayidx = getelementptr inbounds double, ptr %A, i64 %iv
+ %v8 = load double, ptr %arrayidx, align 8
%mul16 = fmul double 3.0, %v8
- store double %mul16, double* %ptr.1, align 8
+ store double %mul16, ptr %ptr.1, align 8
%exitcond.not = icmp eq i64 %iv.next, 32000
br i1 %exitcond.not, label %exit, label %loop.header
@@ -414,50 +414,50 @@ exit: ; preds = %loop.latch
ret i32 10
}
-define void @phi_load_store_memdep_check(i1 %c, i16* %A, i16* %B, i16* %C) {
+define void @phi_load_store_memdep_check(i1 %c, ptr %A, ptr %B, ptr %C) {
; CHECK-LABEL: Loop access info in function 'phi_load_store_memdep_check':
; CHECK-NEXT: for.body:
; CHECK-NEXT: Report: unsafe dependent memory operations in loop. Use #pragma loop distribute(enable) to allow loop distribution to attempt to isolate the offending operations into a separate loop
; CHECK-NEXT: Unknown data dependence.
; CHECK-NEXT: Dependences:
; CHECK-NEXT: Unknown:
-; CHECK-NEXT: %lv3 = load i16, i16* %c.sink, align 2 ->
-; CHECK-NEXT: store i16 %add, i16* %c.sink, align 1
+; CHECK-NEXT: %lv3 = load i16, ptr %c.sink, align 2 ->
+; CHECK-NEXT: store i16 %add, ptr %c.sink, align 1
; CHECK-EMPTY:
; CHECK-NEXT: Unknown:
-; CHECK-NEXT: %lv3 = load i16, i16* %c.sink, align 2 ->
-; CHECK-NEXT: store i16 %add, i16* %c.sink, align 1
+; CHECK-NEXT: %lv3 = load i16, ptr %c.sink, align 2 ->
+; CHECK-NEXT: store i16 %add, ptr %c.sink, align 1
; CHECK-EMPTY:
; CHECK-NEXT: Unknown:
-; CHECK-NEXT: %lv = load i16, i16* %A, align 1 ->
-; CHECK-NEXT: store i16 %lv, i16* %A, align 1
+; CHECK-NEXT: %lv = load i16, ptr %A, align 1 ->
+; CHECK-NEXT: store i16 %lv, ptr %A, align 1
; CHECK-EMPTY:
; CHECK-NEXT: Unknown:
-; CHECK-NEXT: store i16 %lv, i16* %A, align 1 ->
-; CHECK-NEXT: %lv2 = load i16, i16* %A, align 1
+; CHECK-NEXT: store i16 %lv, ptr %A, align 1 ->
+; CHECK-NEXT: %lv2 = load i16, ptr %A, align 1
; CHECK-EMPTY:
; CHECK-NEXT: Run-time memory checks:
; CHECK-NEXT: Check 0:
; CHECK-NEXT: Comparing group ([[GROUP_A:.+]]):
-; CHECK-NEXT: i16* %A
-; CHECK-NEXT: i16* %A
+; CHECK-NEXT: ptr %A
+; CHECK-NEXT: ptr %A
; CHECK-NEXT: Against group ([[GROUP_C:.+]]):
-; CHECK-NEXT: i16* %C
-; CHECK-NEXT: i16* %C
+; CHECK-NEXT: ptr %C
+; CHECK-NEXT: ptr %C
; CHECK-NEXT: Check 1:
; CHECK-NEXT: Comparing group ([[GROUP_A]]):
-; CHECK-NEXT: i16* %A
-; CHECK-NEXT: i16* %A
+; CHECK-NEXT: ptr %A
+; CHECK-NEXT: ptr %A
; CHECK-NEXT: Against group ([[GROUP_B:.+]]):
-; CHECK-NEXT: i16* %B
-; CHECK-NEXT: i16* %B
+; CHECK-NEXT: ptr %B
+; CHECK-NEXT: ptr %B
; CHECK-NEXT: Check 2:
; CHECK-NEXT: Comparing group ([[GROUP_C]]):
-; CHECK-NEXT: i16* %C
-; CHECK-NEXT: i16* %C
+; CHECK-NEXT: ptr %C
+; CHECK-NEXT: ptr %C
; CHECK-NEXT: Against group ([[GROUP_B]]):
-; CHECK-NEXT: i16* %B
-; CHECK-NEXT: i16* %B
+; CHECK-NEXT: ptr %B
+; CHECK-NEXT: ptr %B
; CHECK-NEXT: Grouped accesses:
; CHECK-NEXT: Group [[GROUP_A]]
; CHECK-NEXT: (Low: %A High: (2 + %A))
@@ -478,19 +478,19 @@ entry:
for.body: ; preds = %if.end, %entry
%iv = phi i16 [ 0, %entry ], [ %iv.next, %if.end ]
- %lv = load i16, i16* %A, align 1
- store i16 %lv, i16* %A, align 1
+ %lv = load i16, ptr %A, align 1
+ store i16 %lv, ptr %A, align 1
br i1 %c, label %if.then, label %if.end
if.then: ; preds = %for.body
- %lv2 = load i16, i16* %A, align 1
+ %lv2 = load i16, ptr %A, align 1
br label %if.end
if.end: ; preds = %if.then, %for.body
- %c.sink = phi i16* [ %B, %if.then ], [ %C, %for.body ]
- %lv3 = load i16, i16* %c.sink
+ %c.sink = phi ptr [ %B, %if.then ], [ %C, %for.body ]
+ %lv3 = load i16, ptr %c.sink
%add = add i16 %lv3, 10
- store i16 %add, i16* %c.sink, align 1
+ store i16 %add, ptr %c.sink, align 1
%iv.next = add nuw nsw i16 %iv, 1
%tobool.not = icmp eq i16 %iv.next, 1000
br i1 %tobool.not, label %for.end.loopexit, label %for.body
diff --git a/llvm/test/Analysis/LoopAccessAnalysis/pointer-with-unknown-bounds.ll b/llvm/test/Analysis/LoopAccessAnalysis/pointer-with-unknown-bounds.ll
index f35c2813ad87..546a75cf4efd 100644
--- a/llvm/test/Analysis/LoopAccessAnalysis/pointer-with-unknown-bounds.ll
+++ b/llvm/test/Analysis/LoopAccessAnalysis/pointer-with-unknown-bounds.ll
@@ -16,10 +16,10 @@ target datalayout = "e-m:o-i64:64-f80:128-n8:16:32:64-S128"
; CHECK-NEXT: Unknown data dependence.
; CHECK-NEXT: Dependences:
; CHECK-NEXT: Unknown:
-; CHECK-NEXT: %loadA = load i16, i16* %arrayidxA, align 2 ->
-; CHECK-NEXT: store i16 %mul, i16* %arrayidxA, align 2
+; CHECK-NEXT: %loadA = load i16, ptr %arrayidxA, align 2 ->
+; CHECK-NEXT: store i16 %mul, ptr %arrayidxA, align 2
-define void @addrec_squared(i16* %a) {
+define void @addrec_squared(ptr %a) {
entry:
br label %for.body
@@ -28,12 +28,12 @@ for.body: ; preds = %for.body, %entry
%access_ind = mul i64 %ind, %ind
- %arrayidxA = getelementptr inbounds i16, i16* %a, i64 %access_ind
- %loadA = load i16, i16* %arrayidxA, align 2
+ %arrayidxA = getelementptr inbounds i16, ptr %a, i64 %access_ind
+ %loadA = load i16, ptr %arrayidxA, align 2
%mul = mul i16 %loadA, 2
- store i16 %mul, i16* %arrayidxA, align 2
+ store i16 %mul, ptr %arrayidxA, align 2
%add = add nuw nsw i64 %ind, 1
%exitcond = icmp eq i64 %add, 20
@@ -46,7 +46,7 @@ for.end: ; preds = %for.body
; TODO: We cannot compute the bound for %arrayidxA_ub, because the index is
; loaded on each iteration. As %a and %b are no-alias, no memchecks are required
; and unknown bounds should not prevent further analysis.
-define void @loaded_bound(i16* noalias %a, i16* noalias %b) {
+define void @loaded_bound(ptr noalias %a, ptr noalias %b) {
; CHECK-LABEL: loaded_bound
; CHECK-NEXT: for.body:
; CHECK-NEXT: Report: cannot identify array bounds
@@ -61,16 +61,16 @@ for.body: ; preds = %for.body, %entry
%iv.next = add nuw nsw i64 %iv, 1
- %arrayidxB = getelementptr inbounds i16, i16* %b, i64 %iv
- %loadB = load i16, i16* %arrayidxB, align 2
+ %arrayidxB = getelementptr inbounds i16, ptr %b, i64 %iv
+ %loadB = load i16, ptr %arrayidxB, align 2
- %arrayidxA_ub = getelementptr inbounds i16, i16* %a, i16 %loadB
- %loadA_ub = load i16, i16* %arrayidxA_ub, align 2
+ %arrayidxA_ub = getelementptr inbounds i16, ptr %a, i16 %loadB
+ %loadA_ub = load i16, ptr %arrayidxA_ub, align 2
%mul = mul i16 %loadB, %loadA_ub
- %arrayidxA = getelementptr inbounds i16, i16* %a, i64 %iv
- store i16 %mul, i16* %arrayidxA, align 2
+ %arrayidxA = getelementptr inbounds i16, ptr %a, i64 %iv
+ store i16 %mul, ptr %arrayidxA, align 2
%exitcond = icmp eq i64 %iv, 20
br i1 %exitcond, label %for.end, label %for.body
diff --git a/llvm/test/Analysis/LoopAccessAnalysis/pr31098.ll b/llvm/test/Analysis/LoopAccessAnalysis/pr31098.ll
index da4711e6701e..408d75ff8442 100644
--- a/llvm/test/Analysis/LoopAccessAnalysis/pr31098.ll
+++ b/llvm/test/Analysis/LoopAccessAnalysis/pr31098.ll
@@ -58,7 +58,7 @@ target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
%class.Complex = type { float, float }
-define void @Test(%class.Complex* nocapture %out, i64 %size) local_unnamed_addr {
+define void @Test(ptr nocapture %out, i64 %size) local_unnamed_addr {
entry:
%div = lshr i64 %size, 1
%cmp47 = icmp eq i64 %div, 0
@@ -75,23 +75,23 @@ for.cond.cleanup:
for.body:
%offset.048 = phi i64 [ %inc, %for.body ], [ 0, %for.body.preheader ]
- %0 = getelementptr inbounds %class.Complex, %class.Complex* %out, i64 %offset.048, i32 0
- %1 = load float, float* %0, align 4
- %imaginary_.i.i = getelementptr inbounds %class.Complex, %class.Complex* %out, i64 %offset.048, i32 1
- %2 = load float, float* %imaginary_.i.i, align 4
+ %0 = getelementptr inbounds %class.Complex, ptr %out, i64 %offset.048, i32 0
+ %1 = load float, ptr %0, align 4
+ %imaginary_.i.i = getelementptr inbounds %class.Complex, ptr %out, i64 %offset.048, i32 1
+ %2 = load float, ptr %imaginary_.i.i, align 4
%add = add nuw i64 %offset.048, %div
- %3 = getelementptr inbounds %class.Complex, %class.Complex* %out, i64 %add, i32 0
- %4 = load float, float* %3, align 4
- %imaginary_.i.i28 = getelementptr inbounds %class.Complex, %class.Complex* %out, i64 %add, i32 1
- %5 = load float, float* %imaginary_.i.i28, align 4
+ %3 = getelementptr inbounds %class.Complex, ptr %out, i64 %add, i32 0
+ %4 = load float, ptr %3, align 4
+ %imaginary_.i.i28 = getelementptr inbounds %class.Complex, ptr %out, i64 %add, i32 1
+ %5 = load float, ptr %imaginary_.i.i28, align 4
%add.i = fadd fast float %4, %1
%add4.i = fadd fast float %5, %2
- store float %add.i, float* %0, align 4
- store float %add4.i, float* %imaginary_.i.i, align 4
+ store float %add.i, ptr %0, align 4
+ store float %add4.i, ptr %imaginary_.i.i, align 4
%sub.i = fsub fast float %1, %4
%sub4.i = fsub fast float %2, %5
- store float %sub.i, float* %3, align 4
- store float %sub4.i, float* %imaginary_.i.i28, align 4
+ store float %sub.i, ptr %3, align 4
+ store float %sub4.i, ptr %imaginary_.i.i28, align 4
%inc = add nuw nsw i64 %offset.048, 1
%exitcond = icmp eq i64 %inc, %div
br i1 %exitcond, label %for.cond.cleanup.loopexit, label %for.body
diff --git a/llvm/test/Analysis/LoopAccessAnalysis/pr56672.ll b/llvm/test/Analysis/LoopAccessAnalysis/pr56672.ll
index 92887feb6395..a1773ad2c980 100644
--- a/llvm/test/Analysis/LoopAccessAnalysis/pr56672.ll
+++ b/llvm/test/Analysis/LoopAccessAnalysis/pr56672.ll
@@ -6,7 +6,7 @@
; recomputation of LI produces loop with
diff erent blocks order, and LA gives
; a
diff erent result for it. The reason of this bug hasn't been found yet, but
; the algorithm is somehow dependent on blocks order.
-define void @test_01(i32* %p) {
+define void @test_01(ptr %p) {
; CHECK-LABEL: test_01
; CHECK: Report: unsafe dependent memory operations in loop.
; CHECK-NOT: Memory dependences are safe
@@ -17,7 +17,7 @@ loop.progress: ; preds = %loop
br label %loop.backedge
loop.backedge: ; preds = %loop.progress
- store i32 1, i32* %tmp7, align 4
+ store i32 1, ptr %tmp7, align 4
%tmp = add nuw i64 %tmp5, 1
%tmp3 = icmp ult i64 %tmp, 1000
br i1 %tmp3, label %loop, label %exit
@@ -25,10 +25,10 @@ loop.backedge: ; preds = %loop.prog
loop: ; preds = %loop.backedge, %entry
%tmp5 = phi i64 [ %tmp, %loop.backedge ], [ 16, %entry ]
%tmp6 = phi i64 [ %tmp5, %loop.backedge ], [ 15, %entry ]
- %tmp7 = getelementptr inbounds i32, i32* %p, i64 %tmp5
- %tmp8 = load i32, i32* %tmp7, align 4
+ %tmp7 = getelementptr inbounds i32, ptr %p, i64 %tmp5
+ %tmp8 = load i32, ptr %tmp7, align 4
%tmp9 = add i32 %tmp8, -5
- store i32 %tmp9, i32* %tmp7, align 4
+ store i32 %tmp9, ptr %tmp7, align 4
br i1 false, label %never, label %loop.progress
never: ; preds = %loop
diff --git a/llvm/test/Analysis/LoopAccessAnalysis/resort-to-memchecks-only.ll b/llvm/test/Analysis/LoopAccessAnalysis/resort-to-memchecks-only.ll
index dc7cb08c81b0..b5ba85c6e3fa 100644
--- a/llvm/test/Analysis/LoopAccessAnalysis/resort-to-memchecks-only.ll
+++ b/llvm/test/Analysis/LoopAccessAnalysis/resort-to-memchecks-only.ll
@@ -16,39 +16,39 @@ target triple = "x86_64-apple-macosx10.10.0"
; CHECK-NEXT: Run-time memory checks:
; CHECK-NEXT: 0:
; CHECK-NEXT: Comparing group
-; CHECK-NEXT: %arrayidxA2 = getelementptr inbounds i16, i16* %a, i64 %idx
+; CHECK-NEXT: %arrayidxA2 = getelementptr inbounds i16, ptr %a, i64 %idx
; CHECK-NEXT: Against group
-; CHECK-NEXT: %arrayidxA = getelementptr inbounds i16, i16* %a, i64 %indvar
+; CHECK-NEXT: %arrayidxA = getelementptr inbounds i16, ptr %a, i64 %indvar
- at B = common global i16* null, align 8
- at A = common global i16* null, align 8
- at C = common global i16* null, align 8
+ at B = common global ptr null, align 8
+ at A = common global ptr null, align 8
+ at C = common global ptr null, align 8
define void @f(i64 %offset) {
entry:
- %a = load i16*, i16** @A, align 8
- %b = load i16*, i16** @B, align 8
- %c = load i16*, i16** @C, align 8
+ %a = load ptr, ptr @A, align 8
+ %b = load ptr, ptr @B, align 8
+ %c = load ptr, ptr @C, align 8
br label %for.body
for.body: ; preds = %for.body, %entry
%indvar = phi i64 [ 0, %entry ], [ %add, %for.body ]
- %arrayidxA = getelementptr inbounds i16, i16* %a, i64 %indvar
- %loadA = load i16, i16* %arrayidxA, align 2
+ %arrayidxA = getelementptr inbounds i16, ptr %a, i64 %indvar
+ %loadA = load i16, ptr %arrayidxA, align 2
- %arrayidxB = getelementptr inbounds i16, i16* %b, i64 %indvar
- %loadB = load i16, i16* %arrayidxB, align 2
+ %arrayidxB = getelementptr inbounds i16, ptr %b, i64 %indvar
+ %loadB = load i16, ptr %arrayidxB, align 2
- %arrayidxC = getelementptr inbounds i16, i16* %c, i64 %indvar
- %loadC = load i16, i16* %arrayidxC, align 2
+ %arrayidxC = getelementptr inbounds i16, ptr %c, i64 %indvar
+ %loadC = load i16, ptr %arrayidxC, align 2
%mul = mul i16 %loadB, %loadA
%mul1 = mul i16 %mul, %loadC
%idx = add i64 %indvar, %offset
- %arrayidxA2 = getelementptr inbounds i16, i16* %a, i64 %idx
- store i16 %mul1, i16* %arrayidxA2, align 2
+ %arrayidxA2 = getelementptr inbounds i16, ptr %a, i64 %idx
+ store i16 %mul1, ptr %arrayidxA2, align 2
%add = add nuw nsw i64 %indvar, 1
%exitcond = icmp eq i64 %add, 20
diff --git a/llvm/test/Analysis/LoopAccessAnalysis/reverse-memcheck-bounds.ll b/llvm/test/Analysis/LoopAccessAnalysis/reverse-memcheck-bounds.ll
index e68861e95192..86395eea96f5 100644
--- a/llvm/test/Analysis/LoopAccessAnalysis/reverse-memcheck-bounds.ll
+++ b/llvm/test/Analysis/LoopAccessAnalysis/reverse-memcheck-bounds.ll
@@ -17,28 +17,28 @@ target triple = "aarch64--linux-gnueabi"
; CHECK: function 'f':
; CHECK: (Low: (20000 + %a)<nuw> High: (60004 + %a))
- at B = common global i32* null, align 8
- at A = common global i32* null, align 8
+ at B = common global ptr null, align 8
+ at A = common global ptr null, align 8
define void @f() {
entry:
- %a = load i32*, i32** @A, align 8
- %b = load i32*, i32** @B, align 8
+ %a = load ptr, ptr @A, align 8
+ %b = load ptr, ptr @B, align 8
br label %for.body
for.body: ; preds = %for.body, %entry
%idx = phi i64 [ 0, %entry ], [ %add, %for.body ]
%negidx = sub i64 15000, %idx
- %arrayidxA0 = getelementptr inbounds i32, i32* %a, i64 %negidx
- %loadA0 = load i32, i32* %arrayidxA0, align 2
+ %arrayidxA0 = getelementptr inbounds i32, ptr %a, i64 %negidx
+ %loadA0 = load i32, ptr %arrayidxA0, align 2
%res = mul i32 %loadA0, 3
%add = add nuw nsw i64 %idx, 1
- %arrayidxB = getelementptr inbounds i32, i32* %b, i64 %idx
- store i32 %res, i32* %arrayidxB, align 2
+ %arrayidxB = getelementptr inbounds i32, ptr %b, i64 %idx
+ store i32 %res, ptr %arrayidxB, align 2
%exitcond = icmp eq i64 %idx, 10000
br i1 %exitcond, label %for.end, label %for.body
@@ -52,7 +52,7 @@ for.end: ; preds = %for.body
; the interval limits.
; for (i = 0; i < 10000; i++) {
-; B[i] = A[15000 - step * i] * 3;
+; B[i] = A[i] * 3;
; }
; Here it is not obvious what the limits are, since 'step' could be negative.
@@ -62,8 +62,8 @@ for.end: ; preds = %for.body
define void @g(i64 %step) {
entry:
- %a = load i32*, i32** @A, align 8
- %b = load i32*, i32** @B, align 8
+ %a = load ptr, ptr @A, align 8
+ %b = load ptr, ptr @B, align 8
br label %for.body
for.body: ; preds = %for.body, %entry
@@ -71,15 +71,15 @@ for.body: ; preds = %for.body, %entry
%idx_mul = mul i64 %idx, %step
%negidx = sub i64 15000, %idx_mul
- %arrayidxA0 = getelementptr inbounds i32, i32* %a, i64 %negidx
- %loadA0 = load i32, i32* %arrayidxA0, align 2
+ %arrayidxA0 = getelementptr inbounds i32, ptr %a, i64 %negidx
+ %loadA0 = load i32, ptr %arrayidxA0, align 2
%res = mul i32 %loadA0, 3
%add = add nuw nsw i64 %idx, 1
- %arrayidxB = getelementptr inbounds i32, i32* %b, i64 %idx
- store i32 %res, i32* %arrayidxB, align 2
+ %arrayidxB = getelementptr inbounds i32, ptr %b, i64 %idx
+ store i32 %res, ptr %arrayidxB, align 2
%exitcond = icmp eq i64 %idx, 10000
br i1 %exitcond, label %for.end, label %for.body
diff --git a/llvm/test/Analysis/LoopAccessAnalysis/runtime-pointer-checking-insert-typesize.ll b/llvm/test/Analysis/LoopAccessAnalysis/runtime-pointer-checking-insert-typesize.ll
index 3d6cd3e0e270..1ddcc9b93607 100644
--- a/llvm/test/Analysis/LoopAccessAnalysis/runtime-pointer-checking-insert-typesize.ll
+++ b/llvm/test/Analysis/LoopAccessAnalysis/runtime-pointer-checking-insert-typesize.ll
@@ -5,16 +5,16 @@
; in RuntimePointerChecking::insert when performing loop load elimination
; because this function was previously unaware of scalable types.
-define void @runtime_pointer_checking_insert_typesize(<vscale x 4 x i32>* %a,
- <vscale x 4 x i32>* %b) {
+define void @runtime_pointer_checking_insert_typesize(ptr %a,
+ ptr %b) {
entry:
br label %loop.body
loop.body:
%0 = phi i64 [ 0, %entry ], [%1, %loop.body]
- %idx_a = getelementptr <vscale x 4 x i32>, <vscale x 4 x i32>* %a, i64 %0
- %idx_b = getelementptr <vscale x 4 x i32>, <vscale x 4 x i32>* %b, i64 %0
- %tmp = load <vscale x 4 x i32>, <vscale x 4 x i32>* %idx_a
- store <vscale x 4 x i32> %tmp, <vscale x 4 x i32>* %idx_b
+ %idx_a = getelementptr <vscale x 4 x i32>, ptr %a, i64 %0
+ %idx_b = getelementptr <vscale x 4 x i32>, ptr %b, i64 %0
+ %tmp = load <vscale x 4 x i32>, ptr %idx_a
+ store <vscale x 4 x i32> %tmp, ptr %idx_b
%1 = add i64 %0, 2
%2 = icmp eq i64 %1, 1024
br i1 %2, label %loop.end, label %loop.body
diff --git a/llvm/test/Analysis/LoopAccessAnalysis/safe-no-checks.ll b/llvm/test/Analysis/LoopAccessAnalysis/safe-no-checks.ll
index 74cbe97ca760..fc00d68a8c02 100644
--- a/llvm/test/Analysis/LoopAccessAnalysis/safe-no-checks.ll
+++ b/llvm/test/Analysis/LoopAccessAnalysis/safe-no-checks.ll
@@ -2,7 +2,7 @@
; If the arrays don't alias this loop is safe with no memchecks:
; for (i = 0; i < n; i++)
-; A[i] = A[i+1] * B[i] * C[i];
+; A[i] = A[i] * B[i] * C[i];
target datalayout = "e-m:o-i64:64-f80:128-n8:16:32:64-S128"
target triple = "x86_64-apple-macosx10.10.0"
@@ -13,13 +13,13 @@ target triple = "x86_64-apple-macosx10.10.0"
; CHECK: Memory dependences are safe{{$}}
; CHECK-NEXT: Dependences:
; CHECK-NEXT: Forward:
-; CHECK-NEXT: %loadA_plus_2 = load i16, i16* %arrayidxA_plus_2, align 2 ->
-; CHECK-NEXT: store i16 %mul1, i16* %arrayidxA, align 2
+; CHECK-NEXT: %loadA_plus_2 = load i16, ptr %arrayidxA_plus_2, align 2 ->
+; CHECK-NEXT: store i16 %mul1, ptr %arrayidxA, align 2
-define void @f(i16* noalias %a,
- i16* noalias %b,
- i16* noalias %c) {
+define void @f(ptr noalias %a,
+ ptr noalias %b,
+ ptr noalias %c) {
entry:
br label %for.body
@@ -28,20 +28,20 @@ for.body: ; preds = %for.body, %entry
%add = add nuw nsw i64 %ind, 1
- %arrayidxA_plus_2 = getelementptr inbounds i16, i16* %a, i64 %add
- %loadA_plus_2 = load i16, i16* %arrayidxA_plus_2, align 2
+ %arrayidxA_plus_2 = getelementptr inbounds i16, ptr %a, i64 %add
+ %loadA_plus_2 = load i16, ptr %arrayidxA_plus_2, align 2
- %arrayidxB = getelementptr inbounds i16, i16* %b, i64 %ind
- %loadB = load i16, i16* %arrayidxB, align 2
+ %arrayidxB = getelementptr inbounds i16, ptr %b, i64 %ind
+ %loadB = load i16, ptr %arrayidxB, align 2
- %arrayidxC = getelementptr inbounds i16, i16* %c, i64 %ind
- %loadC = load i16, i16* %arrayidxC, align 2
+ %arrayidxC = getelementptr inbounds i16, ptr %c, i64 %ind
+ %loadC = load i16, ptr %arrayidxC, align 2
%mul = mul i16 %loadB, %loadA_plus_2
%mul1 = mul i16 %mul, %loadC
- %arrayidxA = getelementptr inbounds i16, i16* %a, i64 %ind
- store i16 %mul1, i16* %arrayidxA, align 2
+ %arrayidxA = getelementptr inbounds i16, ptr %a, i64 %ind
+ store i16 %mul1, ptr %arrayidxA, align 2
%exitcond = icmp eq i64 %add, 20
br i1 %exitcond, label %for.end, label %for.body
diff --git a/llvm/test/Analysis/LoopAccessAnalysis/safe-with-dep-distance.ll b/llvm/test/Analysis/LoopAccessAnalysis/safe-with-dep-distance.ll
index 5e3663e13178..23fdcd5d2a1d 100644
--- a/llvm/test/Analysis/LoopAccessAnalysis/safe-with-dep-distance.ll
+++ b/llvm/test/Analysis/LoopAccessAnalysis/safe-with-dep-distance.ll
@@ -9,24 +9,24 @@
target datalayout = "e-m:o-i64:64-f80:128-n8:16:32:64-S128"
target triple = "x86_64-apple-macosx10.10.0"
- at A = common global i16* null, align 8
+ at A = common global ptr null, align 8
define void @f() {
entry:
- %a = load i16*, i16** @A, align 8
+ %a = load ptr, ptr @A, align 8
br label %for.body
for.body: ; preds = %for.body, %entry
%ind = phi i64 [ 0, %entry ], [ %add, %for.body ]
- %arrayidxA = getelementptr inbounds i16, i16* %a, i64 %ind
- %loadA = load i16, i16* %arrayidxA, align 2
+ %arrayidxA = getelementptr inbounds i16, ptr %a, i64 %ind
+ %loadA = load i16, ptr %arrayidxA, align 2
%mul = mul i16 %loadA, 2
%next = add nuw nsw i64 %ind, 4
- %arrayidxA_next = getelementptr inbounds i16, i16* %a, i64 %next
- store i16 %mul, i16* %arrayidxA_next, align 2
+ %arrayidxA_next = getelementptr inbounds i16, ptr %a, i64 %next
+ store i16 %mul, ptr %arrayidxA_next, align 2
%add = add nuw nsw i64 %ind, 1
%exitcond = icmp eq i64 %add, 20
diff --git a/llvm/test/Analysis/LoopAccessAnalysis/scalable-vector-regression-tests.ll b/llvm/test/Analysis/LoopAccessAnalysis/scalable-vector-regression-tests.ll
index 995bc66da69d..82a884a63725 100644
--- a/llvm/test/Analysis/LoopAccessAnalysis/scalable-vector-regression-tests.ll
+++ b/llvm/test/Analysis/LoopAccessAnalysis/scalable-vector-regression-tests.ll
@@ -8,14 +8,14 @@
; No output checked for this one, but causes a fatal error if the regression is present.
-define void @regression_test_get_gep_induction_operand_typesize_warning(i64 %n, <vscale x 4 x i32>* %a) {
+define void @regression_test_get_gep_induction_operand_typesize_warning(i64 %n, ptr %a) {
entry:
br label %loop.body
loop.body:
%0 = phi i64 [ 0, %entry ], [ %1, %loop.body ]
- %idx = getelementptr <vscale x 4 x i32>, <vscale x 4 x i32>* %a, i64 %0
- store <vscale x 4 x i32> zeroinitializer, <vscale x 4 x i32>* %idx
+ %idx = getelementptr <vscale x 4 x i32>, ptr %a, i64 %0
+ store <vscale x 4 x i32> zeroinitializer, ptr %idx
%1 = add i64 %0, 1
%2 = icmp eq i64 %1, %n
br i1 %2, label %loop.end, label %loop.body
@@ -26,15 +26,15 @@ loop.end:
; CHECK-LABEL: LAA: Found a loop in regression_test_loop_access_scalable_typesize
; CHECK: LAA: Bad stride - Scalable object:
-define void @regression_test_loop_access_scalable_typesize(<vscale x 16 x i8>* %input_ptr) {
+define void @regression_test_loop_access_scalable_typesize(ptr %input_ptr) {
entry:
br label %vector.body
vector.body:
- %ind_ptr = phi <vscale x 16 x i8>* [ %next_ptr, %vector.body ], [ %input_ptr, %entry ]
+ %ind_ptr = phi ptr [ %next_ptr, %vector.body ], [ %input_ptr, %entry ]
%ind = phi i64 [ %next, %vector.body ], [ 0, %entry ]
- %ld = load <vscale x 16 x i8>, <vscale x 16 x i8>* %ind_ptr, align 16
- store <vscale x 16 x i8> zeroinitializer, <vscale x 16 x i8>* %ind_ptr, align 16
- %next_ptr = getelementptr inbounds <vscale x 16 x i8>, <vscale x 16 x i8>* %ind_ptr, i64 1
+ %ld = load <vscale x 16 x i8>, ptr %ind_ptr, align 16
+ store <vscale x 16 x i8> zeroinitializer, ptr %ind_ptr, align 16
+ %next_ptr = getelementptr inbounds <vscale x 16 x i8>, ptr %ind_ptr, i64 1
%next = add i64 %ind, 1
%cond = icmp ult i64 %next, 1024
br i1 %cond, label %end, label %vector.body
@@ -44,16 +44,15 @@ end:
; CHECK-LABEL: LAA: Found a loop in regression_test_loop_access_scalable_typesize_nonscalable_object
; CHECK: LAA: Bad stride - Scalable object:
-define void @regression_test_loop_access_scalable_typesize_nonscalable_object(i8* %input_ptr) {
+define void @regression_test_loop_access_scalable_typesize_nonscalable_object(ptr %input_ptr) {
entry:
br label %vector.body
vector.body:
- %ind_ptr = phi i8* [ %next_ptr, %vector.body ], [ %input_ptr, %entry ]
+ %ind_ptr = phi ptr [ %next_ptr, %vector.body ], [ %input_ptr, %entry ]
%ind = phi i64 [ %next, %vector.body ], [ 0, %entry ]
- %scalable_ptr = bitcast i8* %ind_ptr to <vscale x 16 x i8>*
- %ld = load <vscale x 16 x i8>, <vscale x 16 x i8>* %scalable_ptr, align 16
- store <vscale x 16 x i8> zeroinitializer, <vscale x 16 x i8>* %scalable_ptr, align 16
- %next_ptr = getelementptr inbounds i8, i8* %ind_ptr, i64 1
+ %ld = load <vscale x 16 x i8>, ptr %ind_ptr, align 16
+ store <vscale x 16 x i8> zeroinitializer, ptr %ind_ptr, align 16
+ %next_ptr = getelementptr inbounds i8, ptr %ind_ptr, i64 1
%next = add i64 %ind, 1
%cond = icmp ult i64 %next, 1024
br i1 %cond, label %end, label %vector.body
diff --git a/llvm/test/Analysis/LoopAccessAnalysis/store-to-invariant-check1.ll b/llvm/test/Analysis/LoopAccessAnalysis/store-to-invariant-check1.ll
index bf0b932eaa26..5617b93545e4 100644
--- a/llvm/test/Analysis/LoopAccessAnalysis/store-to-invariant-check1.ll
+++ b/llvm/test/Analysis/LoopAccessAnalysis/store-to-invariant-check1.ll
@@ -17,7 +17,7 @@
; CHECK: for.cond1.preheader:
; CHECK: Non vectorizable stores to invariant address were not found in loop.
-define i32 @foo(i32* nocapture %var1, i32* nocapture readonly %var2, i32 %itr) #0 {
+define i32 @foo(ptr nocapture %var1, ptr nocapture readonly %var2, i32 %itr) #0 {
entry:
%cmp20 = icmp eq i32 %itr, 0
br i1 %cmp20, label %for.end10, label %for.cond1.preheader
@@ -29,20 +29,20 @@ for.cond1.preheader: ; preds = %entry, %for.inc8
br i1 %cmp218, label %for.body3.lr.ph, label %for.inc8
for.body3.lr.ph: ; preds = %for.cond1.preheader
- %arrayidx5 = getelementptr inbounds i32, i32* %var1, i64 %indvars.iv23
+ %arrayidx5 = getelementptr inbounds i32, ptr %var1, i64 %indvars.iv23
%0 = zext i32 %j.022 to i64
br label %for.body3
for.body3: ; preds = %for.body3, %for.body3.lr.ph
%indvars.iv = phi i64 [ %0, %for.body3.lr.ph ], [ %indvars.iv.next, %for.body3 ]
- %arrayidx = getelementptr inbounds i32, i32* %var2, i64 %indvars.iv
- %1 = load i32, i32* %arrayidx, align 4
- %2 = load i32, i32* %arrayidx5, align 4
+ %arrayidx = getelementptr inbounds i32, ptr %var2, i64 %indvars.iv
+ %1 = load i32, ptr %arrayidx, align 4
+ %2 = load i32, ptr %arrayidx5, align 4
%add = add nsw i32 %2, %1
- store i32 %add, i32* %arrayidx5, align 4
- %3 = load i32, i32* %arrayidx5, align 4
+ store i32 %add, ptr %arrayidx5, align 4
+ %3 = load i32, ptr %arrayidx5, align 4
%4 = add nsw i32 %3, 1
- store i32 %4, i32* %arrayidx5, align 4
+ store i32 %4, ptr %arrayidx5, align 4
%indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
%lftr.wideiv = trunc i64 %indvars.iv.next to i32
%exitcond = icmp eq i32 %lftr.wideiv, %itr
diff --git a/llvm/test/Analysis/LoopAccessAnalysis/store-to-invariant-check2.ll b/llvm/test/Analysis/LoopAccessAnalysis/store-to-invariant-check2.ll
index 5338187eb2d3..d0b08ac978ac 100644
--- a/llvm/test/Analysis/LoopAccessAnalysis/store-to-invariant-check2.ll
+++ b/llvm/test/Analysis/LoopAccessAnalysis/store-to-invariant-check2.ll
@@ -13,7 +13,7 @@
; CHECK-NOT: Non vectorizable stores to invariant address were found in loop.
-define i32 @foo(i32* nocapture readonly %var1, i32* nocapture %var2, i32 %itr) #0 {
+define i32 @foo(ptr nocapture readonly %var1, ptr nocapture %var2, i32 %itr) #0 {
entry:
%cmp20 = icmp eq i32 %itr, 0
br i1 %cmp20, label %for.end10, label %for.cond1.preheader
@@ -25,17 +25,17 @@ for.cond1.preheader: ; preds = %entry, %for.inc8
br i1 %cmp218, label %for.body3.lr.ph, label %for.inc8
for.body3.lr.ph: ; preds = %for.cond1.preheader
- %arrayidx5 = getelementptr inbounds i32, i32* %var1, i64 %indvars.iv23
+ %arrayidx5 = getelementptr inbounds i32, ptr %var1, i64 %indvars.iv23
%0 = zext i32 %j.022 to i64
br label %for.body3
for.body3: ; preds = %for.body3, %for.body3.lr.ph
%indvars.iv = phi i64 [ %0, %for.body3.lr.ph ], [ %indvars.iv.next, %for.body3 ]
- %arrayidx = getelementptr inbounds i32, i32* %var2, i64 %indvars.iv
- %1 = load i32, i32* %arrayidx, align 4
- %2 = load i32, i32* %arrayidx5, align 4
+ %arrayidx = getelementptr inbounds i32, ptr %var2, i64 %indvars.iv
+ %1 = load i32, ptr %arrayidx, align 4
+ %2 = load i32, ptr %arrayidx5, align 4
%add = add nsw i32 %2, %1
- store i32 %add, i32* %arrayidx, align 4
+ store i32 %add, ptr %arrayidx, align 4
%indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
%lftr.wideiv = trunc i64 %indvars.iv.next to i32
%exitcond = icmp eq i32 %lftr.wideiv, %itr
diff --git a/llvm/test/Analysis/LoopAccessAnalysis/store-to-invariant-check3.ll b/llvm/test/Analysis/LoopAccessAnalysis/store-to-invariant-check3.ll
index 9c5c9e7a828e..d7f8fddbdc3d 100644
--- a/llvm/test/Analysis/LoopAccessAnalysis/store-to-invariant-check3.ll
+++ b/llvm/test/Analysis/LoopAccessAnalysis/store-to-invariant-check3.ll
@@ -11,7 +11,7 @@
; CHECK: Non vectorizable stores to invariant address were not found in loop.
-define void @foo(i32* nocapture %var1, i32* nocapture %var2, i32 %itr) #0 {
+define void @foo(ptr nocapture %var1, ptr nocapture %var2, i32 %itr) #0 {
entry:
%cmp20 = icmp sgt i32 %itr, 0
br i1 %cmp20, label %for.cond1.preheader, label %for.end11
@@ -23,19 +23,19 @@ for.cond1.preheader: ; preds = %entry, %for.inc9
br i1 %cmp218, label %for.body3.lr.ph, label %for.inc9
for.body3.lr.ph: ; preds = %for.cond1.preheader
- %arrayidx = getelementptr inbounds i32, i32* %var2, i64 %indvars.iv23
+ %arrayidx = getelementptr inbounds i32, ptr %var2, i64 %indvars.iv23
%0 = sext i32 %j.022 to i64
br label %for.body3
for.body3: ; preds = %for.body3, %for.body3.lr.ph
%indvars.iv = phi i64 [ %0, %for.body3.lr.ph ], [ %indvars.iv.next, %for.body3 ]
- %1 = load i32, i32* %arrayidx, align 4
+ %1 = load i32, ptr %arrayidx, align 4
%inc = add nsw i32 %1, 1
- store i32 %inc, i32* %arrayidx, align 4
- %arrayidx5 = getelementptr inbounds i32, i32* %var1, i64 %indvars.iv
- %2 = load i32, i32* %arrayidx5, align 4
+ store i32 %inc, ptr %arrayidx, align 4
+ %arrayidx5 = getelementptr inbounds i32, ptr %var1, i64 %indvars.iv
+ %2 = load i32, ptr %arrayidx5, align 4
%add = add nsw i32 %inc, %2
- store i32 %add, i32* %arrayidx5, align 4
+ store i32 %add, ptr %arrayidx5, align 4
%indvars.iv.next = add nsw i64 %indvars.iv, 1
%lftr.wideiv = trunc i64 %indvars.iv.next to i32
%exitcond = icmp eq i32 %lftr.wideiv, %itr
diff --git a/llvm/test/Analysis/LoopAccessAnalysis/stride-access-dependence.ll b/llvm/test/Analysis/LoopAccessAnalysis/stride-access-dependence.ll
index bb31c94cc3ac..145117a712ac 100644
--- a/llvm/test/Analysis/LoopAccessAnalysis/stride-access-dependence.ll
+++ b/llvm/test/Analysis/LoopAccessAnalysis/stride-access-dependence.ll
@@ -16,9 +16,9 @@ target datalayout = "e-m:e-i64:64-i128:128-n32:64-S128"
; CHECK-NEXT: Dependences:
; CHECK-NEXT: Run-time memory checks:
-define void @nodep_Read_Write(i32* nocapture %A) {
+define void @nodep_Read_Write(ptr nocapture %A) {
entry:
- %add.ptr = getelementptr inbounds i32, i32* %A, i64 1
+ %add.ptr = getelementptr inbounds i32, ptr %A, i64 1
br label %for.body
for.cond.cleanup: ; preds = %for.body
@@ -26,11 +26,11 @@ for.cond.cleanup: ; preds = %for.body
for.body: ; preds = %entry, %for.body
%indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
- %arrayidx = getelementptr inbounds i32, i32* %A, i64 %indvars.iv
- %0 = load i32, i32* %arrayidx, align 4
+ %arrayidx = getelementptr inbounds i32, ptr %A, i64 %indvars.iv
+ %0 = load i32, ptr %arrayidx, align 4
%add = add nsw i32 %0, 1
- %arrayidx2 = getelementptr inbounds i32, i32* %add.ptr, i64 %indvars.iv
- store i32 %add, i32* %arrayidx2, align 4
+ %arrayidx2 = getelementptr inbounds i32, ptr %add.ptr, i64 %indvars.iv
+ store i32 %add, ptr %arrayidx2, align 4
%indvars.iv.next = add nuw nsw i64 %indvars.iv, 3
%cmp = icmp ult i64 %indvars.iv.next, 1024
br i1 %cmp, label %for.body, label %for.cond.cleanup
@@ -52,7 +52,7 @@ for.body: ; preds = %entry, %for.body
; CHECK-NEXT: Dependences:
; CHECK-NEXT: Run-time memory checks:
-define i32 @nodep_Write_Read(i32* nocapture %A) {
+define i32 @nodep_Write_Read(ptr nocapture %A) {
entry:
br label %for.body
@@ -62,12 +62,12 @@ for.cond.cleanup: ; preds = %for.body
for.body: ; preds = %entry, %for.body
%indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
%sum.013 = phi i32 [ 0, %entry ], [ %add3, %for.body ]
- %arrayidx = getelementptr inbounds i32, i32* %A, i64 %indvars.iv
+ %arrayidx = getelementptr inbounds i32, ptr %A, i64 %indvars.iv
%0 = trunc i64 %indvars.iv to i32
- store i32 %0, i32* %arrayidx, align 4
+ store i32 %0, ptr %arrayidx, align 4
%1 = or i64 %indvars.iv, 3
- %arrayidx2 = getelementptr inbounds i32, i32* %A, i64 %1
- %2 = load i32, i32* %arrayidx2, align 4
+ %arrayidx2 = getelementptr inbounds i32, ptr %A, i64 %1
+ %2 = load i32, ptr %arrayidx2, align 4
%add3 = add nsw i32 %2, %sum.013
%indvars.iv.next = add nuw nsw i64 %indvars.iv, 4
%cmp = icmp ult i64 %indvars.iv.next, 1024
@@ -87,7 +87,7 @@ for.body: ; preds = %entry, %for.body
; CHECK-NEXT: Dependences:
; CHECK-NEXT: Run-time memory checks:
-define void @nodep_Write_Write(i32* nocapture %A) {
+define void @nodep_Write_Write(ptr nocapture %A) {
entry:
br label %for.body
@@ -96,13 +96,13 @@ for.cond.cleanup: ; preds = %for.body
for.body: ; preds = %entry, %for.body
%indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
- %arrayidx = getelementptr inbounds i32, i32* %A, i64 %indvars.iv
+ %arrayidx = getelementptr inbounds i32, ptr %A, i64 %indvars.iv
%0 = trunc i64 %indvars.iv to i32
- store i32 %0, i32* %arrayidx, align 4
+ store i32 %0, ptr %arrayidx, align 4
%1 = or i64 %indvars.iv, 1
- %arrayidx3 = getelementptr inbounds i32, i32* %A, i64 %1
+ %arrayidx3 = getelementptr inbounds i32, ptr %A, i64 %1
%2 = trunc i64 %1 to i32
- store i32 %2, i32* %arrayidx3, align 4
+ store i32 %2, ptr %arrayidx3, align 4
%indvars.iv.next = add nuw nsw i64 %indvars.iv, 2
%cmp = icmp ult i64 %indvars.iv.next, 1024
br i1 %cmp, label %for.body, label %for.cond.cleanup
@@ -121,10 +121,10 @@ for.body: ; preds = %entry, %for.body
; CHECK-NEXT: Backward loop carried data dependence.
; CHECK-NEXT: Dependences:
; CHECK-NEXT: Backward:
-; CHECK-NEXT: %0 = load i32, i32* %arrayidx, align 4 ->
-; CHECK-NEXT: store i32 %add, i32* %arrayidx3, align 4
+; CHECK-NEXT: %0 = load i32, ptr %arrayidx, align 4 ->
+; CHECK-NEXT: store i32 %add, ptr %arrayidx3, align 4
-define void @unsafe_Read_Write(i32* nocapture %A) {
+define void @unsafe_Read_Write(ptr nocapture %A) {
entry:
br label %for.body
@@ -134,13 +134,13 @@ for.cond.cleanup: ; preds = %for.body
for.body: ; preds = %entry, %for.body
%i.010 = phi i32 [ 0, %entry ], [ %add1, %for.body ]
%idxprom = zext i32 %i.010 to i64
- %arrayidx = getelementptr inbounds i32, i32* %A, i64 %idxprom
- %0 = load i32, i32* %arrayidx, align 4
+ %arrayidx = getelementptr inbounds i32, ptr %A, i64 %idxprom
+ %0 = load i32, ptr %arrayidx, align 4
%add = add nsw i32 %0, 1
%add1 = add i32 %i.010, 3
%idxprom2 = zext i32 %add1 to i64
- %arrayidx3 = getelementptr inbounds i32, i32* %A, i64 %idxprom2
- store i32 %add, i32* %arrayidx3, align 4
+ %arrayidx3 = getelementptr inbounds i32, ptr %A, i64 %idxprom2
+ store i32 %add, ptr %arrayidx3, align 4
%cmp = icmp ult i32 %add1, 1024
br i1 %cmp, label %for.body, label %for.cond.cleanup
}
@@ -161,10 +161,10 @@ for.body: ; preds = %entry, %for.body
; CHECK-NEXT: Backward loop carried data dependence.
; CHECK-NEXT: Dependences:
; CHECK-NEXT: Backward:
-; CHECK-NEXT: store i32 %0, i32* %arrayidx, align 4 ->
-; CHECK-NEXT: %1 = load i32, i32* %arrayidx2, align 4
+; CHECK-NEXT: store i32 %0, ptr %arrayidx, align 4 ->
+; CHECK-NEXT: %1 = load i32, ptr %arrayidx2, align 4
-define i32 @unsafe_Write_Read(i32* nocapture %A) {
+define i32 @unsafe_Write_Read(ptr nocapture %A) {
entry:
br label %for.body
@@ -174,12 +174,12 @@ for.cond.cleanup: ; preds = %for.body
for.body: ; preds = %entry, %for.body
%indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
%sum.013 = phi i32 [ 0, %entry ], [ %add3, %for.body ]
- %arrayidx = getelementptr inbounds i32, i32* %A, i64 %indvars.iv
+ %arrayidx = getelementptr inbounds i32, ptr %A, i64 %indvars.iv
%0 = trunc i64 %indvars.iv to i32
- store i32 %0, i32* %arrayidx, align 4
+ store i32 %0, ptr %arrayidx, align 4
%indvars.iv.next = add nuw nsw i64 %indvars.iv, 4
- %arrayidx2 = getelementptr inbounds i32, i32* %A, i64 %indvars.iv.next
- %1 = load i32, i32* %arrayidx2, align 4
+ %arrayidx2 = getelementptr inbounds i32, ptr %A, i64 %indvars.iv.next
+ %1 = load i32, ptr %arrayidx2, align 4
%add3 = add nsw i32 %1, %sum.013
%cmp = icmp ult i64 %indvars.iv.next, 1024
br i1 %cmp, label %for.body, label %for.cond.cleanup
@@ -198,10 +198,10 @@ for.body: ; preds = %entry, %for.body
; CHECK-NEXT: Backward loop carried data dependence.
; CHECK-NEXT: Dependences:
; CHECK-NEXT: Backward:
-; CHECK-NEXT: store i32 %0, i32* %arrayidx, align 4 ->
-; CHECK-NEXT: store i32 %2, i32* %arrayidx3, align 4
+; CHECK-NEXT: store i32 %0, ptr %arrayidx, align 4 ->
+; CHECK-NEXT: store i32 %2, ptr %arrayidx3, align 4
-define void @unsafe_Write_Write(i32* nocapture %A) {
+define void @unsafe_Write_Write(ptr nocapture %A) {
entry:
br label %for.body
@@ -210,14 +210,14 @@ for.cond.cleanup: ; preds = %for.body
for.body: ; preds = %entry, %for.body
%indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
- %arrayidx = getelementptr inbounds i32, i32* %A, i64 %indvars.iv
+ %arrayidx = getelementptr inbounds i32, ptr %A, i64 %indvars.iv
%0 = trunc i64 %indvars.iv to i32
- store i32 %0, i32* %arrayidx, align 4
+ store i32 %0, ptr %arrayidx, align 4
%1 = or i64 %indvars.iv, 1
%indvars.iv.next = add nuw nsw i64 %indvars.iv, 2
- %arrayidx3 = getelementptr inbounds i32, i32* %A, i64 %indvars.iv.next
+ %arrayidx3 = getelementptr inbounds i32, ptr %A, i64 %indvars.iv.next
%2 = trunc i64 %1 to i32
- store i32 %2, i32* %arrayidx3, align 4
+ store i32 %2, ptr %arrayidx3, align 4
%cmp = icmp ult i64 %indvars.iv.next, 1024
br i1 %cmp, label %for.body, label %for.cond.cleanup
}
@@ -235,12 +235,12 @@ for.body: ; preds = %entry, %for.body
; CHECK-NEXT: Memory dependences are safe
; CHECK-NEXT: Dependences:
; CHECK-NEXT: BackwardVectorizable:
-; CHECK-NEXT: %0 = load i32, i32* %arrayidx, align 4 ->
-; CHECK-NEXT: store i32 %add, i32* %arrayidx2, align 4
+; CHECK-NEXT: %0 = load i32, ptr %arrayidx, align 4 ->
+; CHECK-NEXT: store i32 %add, ptr %arrayidx2, align 4
-define void @vectorizable_Read_Write(i32* nocapture %A) {
+define void @vectorizable_Read_Write(ptr nocapture %A) {
entry:
- %add.ptr = getelementptr inbounds i32, i32* %A, i64 4
+ %add.ptr = getelementptr inbounds i32, ptr %A, i64 4
br label %for.body
for.cond.cleanup: ; preds = %for.body
@@ -248,11 +248,11 @@ for.cond.cleanup: ; preds = %for.body
for.body: ; preds = %entry, %for.body
%indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
- %arrayidx = getelementptr inbounds i32, i32* %A, i64 %indvars.iv
- %0 = load i32, i32* %arrayidx, align 4
+ %arrayidx = getelementptr inbounds i32, ptr %A, i64 %indvars.iv
+ %0 = load i32, ptr %arrayidx, align 4
%add = add nsw i32 %0, 1
- %arrayidx2 = getelementptr inbounds i32, i32* %add.ptr, i64 %indvars.iv
- store i32 %add, i32* %arrayidx2, align 4
+ %arrayidx2 = getelementptr inbounds i32, ptr %add.ptr, i64 %indvars.iv
+ store i32 %add, ptr %arrayidx2, align 4
%indvars.iv.next = add nuw nsw i64 %indvars.iv, 2
%cmp = icmp ult i64 %indvars.iv.next, 1024
br i1 %cmp, label %for.body, label %for.cond.cleanup
@@ -274,12 +274,12 @@ for.body: ; preds = %entry, %for.body
; CHECK-NEXT: Memory dependences are safe
; CHECK-NEXT: Dependences:
; CHECK-NEXT: BackwardVectorizable:
-; CHECK-NEXT: store i32 %0, i32* %arrayidx, align 4 ->
-; CHECK-NEXT: %1 = load i32, i32* %arrayidx2, align 4
+; CHECK-NEXT: store i32 %0, ptr %arrayidx, align 4 ->
+; CHECK-NEXT: %1 = load i32, ptr %arrayidx2, align 4
-define i32 @vectorizable_Write_Read(i32* nocapture %A) {
+define i32 @vectorizable_Write_Read(ptr nocapture %A) {
entry:
- %add.ptr = getelementptr inbounds i32, i32* %A, i64 4
+ %add.ptr = getelementptr inbounds i32, ptr %A, i64 4
br label %for.body
for.cond.cleanup: ; preds = %for.body
@@ -288,11 +288,11 @@ for.cond.cleanup: ; preds = %for.body
for.body: ; preds = %entry, %for.body
%indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
%sum.013 = phi i32 [ 0, %entry ], [ %add, %for.body ]
- %arrayidx = getelementptr inbounds i32, i32* %A, i64 %indvars.iv
+ %arrayidx = getelementptr inbounds i32, ptr %A, i64 %indvars.iv
%0 = trunc i64 %indvars.iv to i32
- store i32 %0, i32* %arrayidx, align 4
- %arrayidx2 = getelementptr inbounds i32, i32* %add.ptr, i64 %indvars.iv
- %1 = load i32, i32* %arrayidx2, align 4
+ store i32 %0, ptr %arrayidx, align 4
+ %arrayidx2 = getelementptr inbounds i32, ptr %add.ptr, i64 %indvars.iv
+ %1 = load i32, ptr %arrayidx2, align 4
%add = add nsw i32 %1, %sum.013
%indvars.iv.next = add nuw nsw i64 %indvars.iv, 2
%cmp = icmp ult i64 %indvars.iv.next, 1024
@@ -312,12 +312,12 @@ for.body: ; preds = %entry, %for.body
; CHECK-NEXT: Memory dependences are safe
; CHECK-NEXT: Dependences:
; CHECK-NEXT: BackwardVectorizable:
-; CHECK-NEXT: store i32 %0, i32* %arrayidx, align 4 ->
-; CHECK-NEXT: store i32 %2, i32* %arrayidx2, align 4
+; CHECK-NEXT: store i32 %0, ptr %arrayidx, align 4 ->
+; CHECK-NEXT: store i32 %2, ptr %arrayidx2, align 4
-define void @vectorizable_Write_Write(i32* nocapture %A) {
+define void @vectorizable_Write_Write(ptr nocapture %A) {
entry:
- %add.ptr = getelementptr inbounds i32, i32* %A, i64 4
+ %add.ptr = getelementptr inbounds i32, ptr %A, i64 4
br label %for.body
for.cond.cleanup: ; preds = %for.body
@@ -325,13 +325,13 @@ for.cond.cleanup: ; preds = %for.body
for.body: ; preds = %entry, %for.body
%indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
- %arrayidx = getelementptr inbounds i32, i32* %A, i64 %indvars.iv
+ %arrayidx = getelementptr inbounds i32, ptr %A, i64 %indvars.iv
%0 = trunc i64 %indvars.iv to i32
- store i32 %0, i32* %arrayidx, align 4
+ store i32 %0, ptr %arrayidx, align 4
%1 = or i64 %indvars.iv, 1
- %arrayidx2 = getelementptr inbounds i32, i32* %add.ptr, i64 %indvars.iv
+ %arrayidx2 = getelementptr inbounds i32, ptr %add.ptr, i64 %indvars.iv
%2 = trunc i64 %1 to i32
- store i32 %2, i32* %arrayidx2, align 4
+ store i32 %2, ptr %arrayidx2, align 4
%indvars.iv.next = add nuw nsw i64 %indvars.iv, 2
%cmp = icmp ult i64 %indvars.iv.next, 1024
br i1 %cmp, label %for.body, label %for.cond.cleanup
@@ -352,14 +352,12 @@ for.body: ; preds = %entry, %for.body
; CHECK-NEXT: Backward loop carried data dependence that prevents store-to-load forwarding.
; CHECK-NEXT: Dependences:
; CHECK-NEXT: BackwardVectorizableButPreventsForwarding:
-; CHECK-NEXT: %2 = load i32, i32* %arrayidx, align 4 ->
-; CHECK-NEXT: store i32 %add, i32* %arrayidx2, align 4
+; CHECK-NEXT: %0 = load i32, ptr %arrayidx, align 4 ->
+; CHECK-NEXT: store i32 %add, ptr %arrayidx2, align 4
-define void @vectorizable_unscaled_Read_Write(i32* nocapture %A) {
+define void @vectorizable_unscaled_Read_Write(ptr nocapture %A) {
entry:
- %0 = bitcast i32* %A to i8*
- %add.ptr = getelementptr inbounds i8, i8* %0, i64 14
- %1 = bitcast i8* %add.ptr to i32*
+ %add.ptr = getelementptr inbounds i8, ptr %A, i64 14
br label %for.body
for.cond.cleanup: ; preds = %for.body
@@ -367,11 +365,11 @@ for.cond.cleanup: ; preds = %for.body
for.body: ; preds = %entry, %for.body
%indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
- %arrayidx = getelementptr inbounds i32, i32* %A, i64 %indvars.iv
- %2 = load i32, i32* %arrayidx, align 4
- %add = add nsw i32 %2, 1
- %arrayidx2 = getelementptr inbounds i32, i32* %1, i64 %indvars.iv
- store i32 %add, i32* %arrayidx2, align 4
+ %arrayidx = getelementptr inbounds i32, ptr %A, i64 %indvars.iv
+ %0 = load i32, ptr %arrayidx, align 4
+ %add = add nsw i32 %0, 1
+ %arrayidx2 = getelementptr inbounds i32, ptr %add.ptr, i64 %indvars.iv
+ store i32 %add, ptr %arrayidx2, align 4
%indvars.iv.next = add nuw nsw i64 %indvars.iv, 2
%cmp = icmp ult i64 %indvars.iv.next, 1024
br i1 %cmp, label %for.body, label %for.cond.cleanup
@@ -393,14 +391,12 @@ for.body: ; preds = %entry, %for.body
; CHECK-NEXT: Memory dependences are safe
; CHECK-NEXT: Dependences:
; CHECK-NEXT: BackwardVectorizable:
-; CHECK-NEXT: store i32 %2, i32* %arrayidx, align 4 ->
-; CHECK-NEXT: %3 = load i32, i32* %arrayidx2, align 4
+; CHECK-NEXT: store i32 %0, ptr %arrayidx, align 4 ->
+; CHECK-NEXT: %1 = load i32, ptr %arrayidx2, align 4
-define i32 @vectorizable_unscaled_Write_Read(i32* nocapture %A) {
+define i32 @vectorizable_unscaled_Write_Read(ptr nocapture %A) {
entry:
- %0 = bitcast i32* %A to i8*
- %add.ptr = getelementptr inbounds i8, i8* %0, i64 17
- %1 = bitcast i8* %add.ptr to i32*
+ %add.ptr = getelementptr inbounds i8, ptr %A, i64 17
br label %for.body
for.cond.cleanup: ; preds = %for.body
@@ -409,12 +405,12 @@ for.cond.cleanup: ; preds = %for.body
for.body: ; preds = %entry, %for.body
%indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
%sum.013 = phi i32 [ 0, %entry ], [ %add, %for.body ]
- %arrayidx = getelementptr inbounds i32, i32* %A, i64 %indvars.iv
- %2 = trunc i64 %indvars.iv to i32
- store i32 %2, i32* %arrayidx, align 4
- %arrayidx2 = getelementptr inbounds i32, i32* %1, i64 %indvars.iv
- %3 = load i32, i32* %arrayidx2, align 4
- %add = add nsw i32 %3, %sum.013
+ %arrayidx = getelementptr inbounds i32, ptr %A, i64 %indvars.iv
+ %0 = trunc i64 %indvars.iv to i32
+ store i32 %0, ptr %arrayidx, align 4
+ %arrayidx2 = getelementptr inbounds i32, ptr %add.ptr, i64 %indvars.iv
+ %1 = load i32, ptr %arrayidx2, align 4
+ %add = add nsw i32 %1, %sum.013
%indvars.iv.next = add nuw nsw i64 %indvars.iv, 2
%cmp = icmp ult i64 %indvars.iv.next, 1024
br i1 %cmp, label %for.body, label %for.cond.cleanup
@@ -432,14 +428,12 @@ for.body: ; preds = %entry, %for.body
; CHECK-NEXT: Backward loop carried data dependence.
; CHECK-NEXT: Dependences:
; CHECK-NEXT: Backward:
-; CHECK-NEXT: %2 = load i32, i32* %arrayidx, align 4 ->
-; CHECK-NEXT: store i32 %add, i32* %arrayidx2, align 4
+; CHECK-NEXT: %0 = load i32, ptr %arrayidx, align 4 ->
+; CHECK-NEXT: store i32 %add, ptr %arrayidx2, align 4
-define void @unsafe_unscaled_Read_Write(i32* nocapture %A) {
+define void @unsafe_unscaled_Read_Write(ptr nocapture %A) {
entry:
- %0 = bitcast i32* %A to i8*
- %add.ptr = getelementptr inbounds i8, i8* %0, i64 11
- %1 = bitcast i8* %add.ptr to i32*
+ %add.ptr = getelementptr inbounds i8, ptr %A, i64 11
br label %for.body
for.cond.cleanup: ; preds = %for.body
@@ -447,11 +441,11 @@ for.cond.cleanup: ; preds = %for.body
for.body: ; preds = %entry, %for.body
%indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
- %arrayidx = getelementptr inbounds i32, i32* %A, i64 %indvars.iv
- %2 = load i32, i32* %arrayidx, align 4
- %add = add nsw i32 %2, 1
- %arrayidx2 = getelementptr inbounds i32, i32* %1, i64 %indvars.iv
- store i32 %add, i32* %arrayidx2, align 4
+ %arrayidx = getelementptr inbounds i32, ptr %A, i64 %indvars.iv
+ %0 = load i32, ptr %arrayidx, align 4
+ %add = add nsw i32 %0, 1
+ %arrayidx2 = getelementptr inbounds i32, ptr %add.ptr, i64 %indvars.iv
+ store i32 %add, ptr %arrayidx2, align 4
%indvars.iv.next = add nuw nsw i64 %indvars.iv, 2
%cmp = icmp ult i64 %indvars.iv.next, 1024
br i1 %cmp, label %for.body, label %for.cond.cleanup
@@ -463,8 +457,8 @@ for.body: ; preds = %entry, %for.body
; CHECK-NEXT: Backward loop carried data dependence.
; CHECK-NEXT: Dependences:
; CHECK-NEXT: Backward:
-; CHECK-NEXT: %2 = load i32, i32* %arrayidx, align 4 ->
-; CHECK-NEXT: store i32 %add, i32* %arrayidx2, align 4
+; CHECK-NEXT: %0 = load i32, ptr %arrayidx, align 4 ->
+; CHECK-NEXT: store i32 %add, ptr %arrayidx2, align 4
; void unsafe_unscaled_Read_Write2(int *A) {
; int *B = (int *)((char *)A + 1);
@@ -472,11 +466,9 @@ for.body: ; preds = %entry, %for.body
; B[i] = A[i] + 1;
; }
-define void @unsafe_unscaled_Read_Write2(i32* nocapture %A) {
+define void @unsafe_unscaled_Read_Write2(ptr nocapture %A) {
entry:
- %0 = bitcast i32* %A to i8*
- %add.ptr = getelementptr inbounds i8, i8* %0, i64 1
- %1 = bitcast i8* %add.ptr to i32*
+ %add.ptr = getelementptr inbounds i8, ptr %A, i64 1
br label %for.body
for.cond.cleanup: ; preds = %for.body
@@ -484,11 +476,11 @@ for.cond.cleanup: ; preds = %for.body
for.body: ; preds = %entry, %for.body
%indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
- %arrayidx = getelementptr inbounds i32, i32* %A, i64 %indvars.iv
- %2 = load i32, i32* %arrayidx, align 4
- %add = add nsw i32 %2, 1
- %arrayidx2 = getelementptr inbounds i32, i32* %1, i64 %indvars.iv
- store i32 %add, i32* %arrayidx2, align 4
+ %arrayidx = getelementptr inbounds i32, ptr %A, i64 %indvars.iv
+ %0 = load i32, ptr %arrayidx, align 4
+ %add = add nsw i32 %0, 1
+ %arrayidx2 = getelementptr inbounds i32, ptr %add.ptr, i64 %indvars.iv
+ store i32 %add, ptr %arrayidx2, align 4
%indvars.iv.next = add nuw nsw i64 %indvars.iv, 2
%cmp = icmp ult i64 %indvars.iv.next, 1024
br i1 %cmp, label %for.body, label %for.cond.cleanup
@@ -514,17 +506,15 @@ for.body: ; preds = %entry, %for.body
; CHECK-NEXT: Backward loop carried data dependence.
; CHECK-NEXT: Dependences:
; CHECK-NEXT: Backward:
-; CHECK-NEXT: store i32 %4, i32* %arrayidx5, align 4 ->
-; CHECK-NEXT: store i32 %4, i32* %arrayidx9, align 4
+; CHECK-NEXT: store i32 %2, ptr %arrayidx5, align 4 ->
+; CHECK-NEXT: store i32 %2, ptr %arrayidx9, align 4
; CHECK: Backward:
-; CHECK-NEXT: store i32 %2, i32* %arrayidx2, align 4 ->
-; CHECK-NEXT: store i32 %4, i32* %arrayidx5, align 4
+; CHECK-NEXT: store i32 %0, ptr %arrayidx2, align 4 ->
+; CHECK-NEXT: store i32 %2, ptr %arrayidx5, align 4
-define void @interleaved_stores(i32* nocapture %A) {
+define void @interleaved_stores(ptr nocapture %A) {
entry:
- %0 = bitcast i32* %A to i8*
- %incdec.ptr = getelementptr inbounds i8, i8* %0, i64 1
- %1 = bitcast i8* %incdec.ptr to i32*
+ %incdec.ptr = getelementptr inbounds i8, ptr %A, i64 1
br label %for.body
for.cond.cleanup: ; preds = %for.body
@@ -532,15 +522,15 @@ for.cond.cleanup: ; preds = %for.body
for.body: ; preds = %entry, %for.body
%indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
- %2 = trunc i64 %indvars.iv to i32
- %arrayidx2 = getelementptr inbounds i32, i32* %1, i64 %indvars.iv
- store i32 %2, i32* %arrayidx2, align 4
- %3 = or i64 %indvars.iv, 1
- %arrayidx5 = getelementptr inbounds i32, i32* %A, i64 %3
- %4 = trunc i64 %3 to i32
- store i32 %4, i32* %arrayidx5, align 4
- %arrayidx9 = getelementptr inbounds i32, i32* %1, i64 %3
- store i32 %4, i32* %arrayidx9, align 4
+ %0 = trunc i64 %indvars.iv to i32
+ %arrayidx2 = getelementptr inbounds i32, ptr %incdec.ptr, i64 %indvars.iv
+ store i32 %0, ptr %arrayidx2, align 4
+ %1 = or i64 %indvars.iv, 1
+ %arrayidx5 = getelementptr inbounds i32, ptr %A, i64 %1
+ %2 = trunc i64 %1 to i32
+ store i32 %2, ptr %arrayidx5, align 4
+ %arrayidx9 = getelementptr inbounds i32, ptr %incdec.ptr, i64 %1
+ store i32 %2, ptr %arrayidx9, align 4
%indvars.iv.next = add nuw nsw i64 %indvars.iv, 2
%cmp = icmp slt i64 %indvars.iv.next, 1024
br i1 %cmp, label %for.body, label %for.cond.cleanup
diff --git a/llvm/test/Analysis/LoopAccessAnalysis/symbolic-stride.ll b/llvm/test/Analysis/LoopAccessAnalysis/symbolic-stride.ll
index 584dd84bf6f8..016c574b3b7c 100644
--- a/llvm/test/Analysis/LoopAccessAnalysis/symbolic-stride.ll
+++ b/llvm/test/Analysis/LoopAccessAnalysis/symbolic-stride.ll
@@ -4,15 +4,15 @@
target datalayout = "e-m:o-i64:64-f80:128-n8:16:32:64-S128"
; A forwarding in the presence of symbolic strides.
-define void @single_stride(i32* noalias %A, i32* noalias %B, i64 %N, i64 %stride) {
+define void @single_stride(ptr noalias %A, ptr noalias %B, i64 %N, i64 %stride) {
; CHECK-LABEL: Loop access info in function 'single_stride':
; CHECK-NEXT: loop:
; CHECK-NEXT: Report: unsafe dependent memory operations in loop.
; CHECK-NEXT: Backward loop carried data dependence.
; CHECK-NEXT: Dependences:
; CHECK-NEXT: Backward:
-; CHECK-NEXT: %load = load i32, i32* %gep.A, align 4 ->
-; CHECK-NEXT: store i32 %add, i32* %gep.A.next, align 4
+; CHECK-NEXT: %load = load i32, ptr %gep.A, align 4 ->
+; CHECK-NEXT: store i32 %add, ptr %gep.A.next, align 4
; CHECK-EMPTY:
; CHECK-NEXT: Run-time memory checks:
; CHECK-NEXT: Grouped accesses:
@@ -22,7 +22,7 @@ define void @single_stride(i32* noalias %A, i32* noalias %B, i64 %N, i64 %stride
; CHECK-NEXT: Equal predicate: %stride == 1
; CHECK-EMPTY:
; CHECK-NEXT: Expressions re-written:
-; CHECK-NEXT: [PSE] %gep.A = getelementptr inbounds i32, i32* %A, i64 %mul:
+; CHECK-NEXT: [PSE] %gep.A = getelementptr inbounds i32, ptr %A, i64 %mul:
; CHECK-NEXT: {%A,+,(4 * %stride)}<%loop>
; CHECK-NEXT: --> {%A,+,4}<%loop>
;
@@ -32,14 +32,14 @@ entry:
loop:
%iv = phi i64 [ 0, %entry ], [ %iv.next, %loop ]
%mul = mul i64 %iv, %stride
- %gep.A = getelementptr inbounds i32, i32* %A, i64 %mul
- %load = load i32, i32* %gep.A, align 4
- %gep.B = getelementptr inbounds i32, i32* %B, i64 %iv
- %load_1 = load i32, i32* %gep.B, align 4
+ %gep.A = getelementptr inbounds i32, ptr %A, i64 %mul
+ %load = load i32, ptr %gep.A, align 4
+ %gep.B = getelementptr inbounds i32, ptr %B, i64 %iv
+ %load_1 = load i32, ptr %gep.B, align 4
%add = add i32 %load_1, %load
%iv.next = add nuw nsw i64 %iv, 1
- %gep.A.next = getelementptr inbounds i32, i32* %A, i64 %iv.next
- store i32 %add, i32* %gep.A.next, align 4
+ %gep.A.next = getelementptr inbounds i32, ptr %A, i64 %iv.next
+ store i32 %add, ptr %gep.A.next, align 4
%exitcond = icmp eq i64 %iv.next, %N
br i1 %exitcond, label %exit, label %loop
@@ -48,15 +48,15 @@ exit: ; preds = %loop
}
; Similar to @single_stride, but with struct types.
-define void @single_stride_struct({ i32, i8 }* noalias %A, { i32, i8 }* noalias %B, i64 %N, i64 %stride) {
+define void @single_stride_struct(ptr noalias %A, ptr noalias %B, i64 %N, i64 %stride) {
; CHECK-LABEL: Loop access info in function 'single_stride_struct':
; CHECK-NEXT: loop:
; CHECK-NEXT: Report: unsafe dependent memory operations in loop.
; CHECK-NEXT: Backward loop carried data dependence.
; CHECK-NEXT: Dependences:
; CHECK-NEXT: Backward:
-; CHECK-NEXT: %load = load { i32, i8 }, { i32, i8 }* %gep.A, align 4 ->
-; CHECK-NEXT: store { i32, i8 } %ins, { i32, i8 }* %gep.A.next, align 4
+; CHECK-NEXT: %load = load { i32, i8 }, ptr %gep.A, align 4 ->
+; CHECK-NEXT: store { i32, i8 } %ins, ptr %gep.A.next, align 4
; CHECK-EMPTY:
; CHECK-NEXT: Run-time memory checks:
; CHECK-NEXT: Grouped accesses:
@@ -66,7 +66,7 @@ define void @single_stride_struct({ i32, i8 }* noalias %A, { i32, i8 }* noalias
; CHECK-NEXT: Equal predicate: %stride == 1
; CHECK-EMPTY:
; CHECK-NEXT: Expressions re-written:
-; CHECK-NEXT: [PSE] %gep.A = getelementptr inbounds { i32, i8 }, { i32, i8 }* %A, i64 %mul:
+; CHECK-NEXT: [PSE] %gep.A = getelementptr inbounds { i32, i8 }, ptr %A, i64 %mul:
; CHECK-NEXT: {%A,+,(8 * %stride)}<%loop>
; CHECK-NEXT: --> {%A,+,8}<%loop>
;
@@ -76,17 +76,17 @@ entry:
loop:
%iv = phi i64 [ 0, %entry ], [ %iv.next, %loop ]
%mul = mul i64 %iv, %stride
- %gep.A = getelementptr inbounds { i32, i8 }, { i32, i8 }* %A, i64 %mul
- %load = load { i32, i8 }, { i32, i8 }* %gep.A, align 4
- %gep.B = getelementptr inbounds { i32, i8 }, { i32, i8 }* %B, i64 %iv
- %load_1 = load { i32, i8 }, { i32, i8 }* %gep.B, align 4
+ %gep.A = getelementptr inbounds { i32, i8 }, ptr %A, i64 %mul
+ %load = load { i32, i8 }, ptr %gep.A, align 4
+ %gep.B = getelementptr inbounds { i32, i8 }, ptr %B, i64 %iv
+ %load_1 = load { i32, i8 }, ptr %gep.B, align 4
%v1 = extractvalue { i32, i8 } %load, 0
%v2 = extractvalue { i32, i8} %load_1, 0
%add = add i32 %v1, %v2
%ins = insertvalue { i32, i8 } undef, i32 %add, 0
%iv.next = add nuw nsw i64 %iv, 1
- %gep.A.next = getelementptr inbounds { i32, i8 }, { i32, i8 }* %A, i64 %iv.next
- store { i32, i8 } %ins, { i32, i8 }* %gep.A.next, align 4
+ %gep.A.next = getelementptr inbounds { i32, i8 }, ptr %A, i64 %iv.next
+ store { i32, i8 } %ins, ptr %gep.A.next, align 4
%exitcond = icmp eq i64 %iv.next, %N
br i1 %exitcond, label %exit, label %loop
@@ -95,15 +95,15 @@ exit:
}
; A loop with two symbolic strides.
-define void @two_strides(i32* noalias %A, i32* noalias %B, i64 %N, i64 %stride.1, i64 %stride.2) {
+define void @two_strides(ptr noalias %A, ptr noalias %B, i64 %N, i64 %stride.1, i64 %stride.2) {
; CHECK-LABEL: Loop access info in function 'two_strides':
; CHECK-NEXT: loop:
; CHECK-NEXT: Report: unsafe dependent memory operations in loop.
; CHECK-NEXT: Backward loop carried data dependence.
; CHECK-NEXT: Dependences:
; CHECK-NEXT: Backward:
-; CHECK-NEXT: %load = load i32, i32* %gep.A, align 4 ->
-; CHECK-NEXT: store i32 %add, i32* %gep.A.next, align 4
+; CHECK-NEXT: %load = load i32, ptr %gep.A, align 4 ->
+; CHECK-NEXT: store i32 %add, ptr %gep.A.next, align 4
; CHECK-EMPTY:
; CHECK-NEXT: Run-time memory checks:
; CHECK-NEXT: Grouped accesses:
@@ -114,10 +114,10 @@ define void @two_strides(i32* noalias %A, i32* noalias %B, i64 %N, i64 %stride.1
; CHECK-NEXT: Equal predicate: %stride.1 == 1
; CHECK-EMPTY:
; CHECK-NEXT: Expressions re-written:
-; CHECK-NEXT: [PSE] %gep.A = getelementptr inbounds i32, i32* %A, i64 %mul:
+; CHECK-NEXT: [PSE] %gep.A = getelementptr inbounds i32, ptr %A, i64 %mul:
; CHECK-NEXT: {%A,+,(4 * %stride.1)}<%loop>
; CHECK-NEXT: --> {%A,+,4}<%loop>
-; CHECK-NEXT: [PSE] %gep.A.next = getelementptr inbounds i32, i32* %A, i64 %mul.2:
+; CHECK-NEXT: [PSE] %gep.A.next = getelementptr inbounds i32, ptr %A, i64 %mul.2:
; CHECK-NEXT: {((4 * %stride.2) + %A),+,(4 * %stride.2)}<%loop>
; CHECK-NEXT: --> {(4 + %A),+,4}<%loop>
;
@@ -127,15 +127,15 @@ entry:
loop:
%iv = phi i64 [ 0, %entry ], [ %iv.next, %loop ]
%mul = mul i64 %iv, %stride.1
- %gep.A = getelementptr inbounds i32, i32* %A, i64 %mul
- %load = load i32, i32* %gep.A, align 4
- %gep.B = getelementptr inbounds i32, i32* %B, i64 %iv
- %load_1 = load i32, i32* %gep.B, align 4
+ %gep.A = getelementptr inbounds i32, ptr %A, i64 %mul
+ %load = load i32, ptr %gep.A, align 4
+ %gep.B = getelementptr inbounds i32, ptr %B, i64 %iv
+ %load_1 = load i32, ptr %gep.B, align 4
%add = add i32 %load_1, %load
%iv.next = add nuw nsw i64 %iv, 1
%mul.2 = mul i64 %iv.next, %stride.2
- %gep.A.next = getelementptr inbounds i32, i32* %A, i64 %mul.2
- store i32 %add, i32* %gep.A.next, align 4
+ %gep.A.next = getelementptr inbounds i32, ptr %A, i64 %mul.2
+ store i32 %add, ptr %gep.A.next, align 4
%exitcond = icmp eq i64 %iv.next, %N
br i1 %exitcond, label %exit, label %loop
diff --git a/llvm/test/Analysis/LoopAccessAnalysis/uncomputable-backedge-taken-count.ll b/llvm/test/Analysis/LoopAccessAnalysis/uncomputable-backedge-taken-count.ll
index a16090880e06..0ea4afbe3e29 100644
--- a/llvm/test/Analysis/LoopAccessAnalysis/uncomputable-backedge-taken-count.ll
+++ b/llvm/test/Analysis/LoopAccessAnalysis/uncomputable-backedge-taken-count.ll
@@ -9,8 +9,8 @@ target triple = "x86_64-apple-macosx10.10.0"
; No memory checks are required, because base pointers do not alias and we have
; a forward dependence for %a.
-define void @safe_forward_dependence(i16* noalias %a,
- i16* noalias %b) {
+define void @safe_forward_dependence(ptr noalias %a,
+ ptr noalias %b) {
; CHECK-LABEL: safe_forward_dependence
; CHECK: for.body:
; CHECK-NEXT: Report: could not determine number of loop iterations
@@ -23,17 +23,17 @@ for.body: ; preds = %for.body, %entry
%iv.next = add nuw nsw i64 %iv, 1
- %arrayidxA_plus_2 = getelementptr inbounds i16, i16* %a, i64 %iv.next
- %loadA_plus_2 = load i16, i16* %arrayidxA_plus_2, align 2
+ %arrayidxA_plus_2 = getelementptr inbounds i16, ptr %a, i64 %iv.next
+ %loadA_plus_2 = load i16, ptr %arrayidxA_plus_2, align 2
- %arrayidxB = getelementptr inbounds i16, i16* %b, i64 %iv
- %loadB = load i16, i16* %arrayidxB, align 2
+ %arrayidxB = getelementptr inbounds i16, ptr %b, i64 %iv
+ %loadB = load i16, ptr %arrayidxB, align 2
%mul = mul i16 %loadB, %loadA_plus_2
- %arrayidxA = getelementptr inbounds i16, i16* %a, i64 %iv
- store i16 %mul, i16* %arrayidxA, align 2
+ %arrayidxA = getelementptr inbounds i16, ptr %a, i64 %iv
+ store i16 %mul, ptr %arrayidxA, align 2
%exitcond = icmp eq i16 %loadB, 20
br i1 %exitcond, label %for.end, label %for.body
@@ -45,8 +45,8 @@ for.end: ; preds = %for.body
-define void @unsafe_backwards_dependence(i16* noalias %a,
- i16* noalias %b) {
+define void @unsafe_backwards_dependence(ptr noalias %a,
+ ptr noalias %b) {
; CHECK-LABEL: unsafe_backwards_dependence
; CHECK: for.body:
; CHECK-NEXT: Report: could not determine number of loop iterations
@@ -60,17 +60,17 @@ for.body: ; preds = %for.body, %entry
%idx = add nuw nsw i64 %iv, -1
%iv.next = add nuw nsw i64 %iv, 1
- %arrayidxA_plus_2 = getelementptr inbounds i16, i16* %a, i64 %idx
- %loadA_plus_2 = load i16, i16* %arrayidxA_plus_2, align 2
+ %arrayidxA_plus_2 = getelementptr inbounds i16, ptr %a, i64 %idx
+ %loadA_plus_2 = load i16, ptr %arrayidxA_plus_2, align 2
- %arrayidxB = getelementptr inbounds i16, i16* %b, i64 %iv
- %loadB = load i16, i16* %arrayidxB, align 2
+ %arrayidxB = getelementptr inbounds i16, ptr %b, i64 %iv
+ %loadB = load i16, ptr %arrayidxB, align 2
%mul = mul i16 %loadB, %loadA_plus_2
- %arrayidxA = getelementptr inbounds i16, i16* %a, i64 %iv
- store i16 %mul, i16* %arrayidxA, align 2
+ %arrayidxA = getelementptr inbounds i16, ptr %a, i64 %iv
+ store i16 %mul, ptr %arrayidxA, align 2
%exitcond = icmp eq i16 %loadB, 20
br i1 %exitcond, label %for.end, label %for.body
@@ -80,7 +80,7 @@ for.end: ; preds = %for.body
}
-define void @ptr_may_alias(i16* %a, i16* %b) {
+define void @ptr_may_alias(ptr %a, ptr %b) {
; CHECK-LABEL: ptr_may_alias
; CHECK: for.body:
; CHECK-NEXT: Report: could not determine number of loop iterations
@@ -94,15 +94,15 @@ for.body: ; preds = %for.body, %entry
%idx = add nuw nsw i64 %iv, -1
%iv.next = add nuw nsw i64 %iv, 1
- %arrayidxA = getelementptr inbounds i16, i16* %a, i64 %iv
- %loadA = load i16, i16* %arrayidxA, align 2
+ %arrayidxA = getelementptr inbounds i16, ptr %a, i64 %iv
+ %loadA = load i16, ptr %arrayidxA, align 2
- %arrayidxB = getelementptr inbounds i16, i16* %b, i64 %iv
- %loadB = load i16, i16* %arrayidxB, align 2
+ %arrayidxB = getelementptr inbounds i16, ptr %b, i64 %iv
+ %loadB = load i16, ptr %arrayidxB, align 2
%mul = mul i16 %loadB, %loadA
- store i16 %mul, i16* %arrayidxA, align 2
+ store i16 %mul, ptr %arrayidxA, align 2
%exitcond = icmp eq i16 %loadB, 20
br i1 %exitcond, label %for.end, label %for.body
diff --git a/llvm/test/Analysis/LoopAccessAnalysis/underlying-objects-1.ll b/llvm/test/Analysis/LoopAccessAnalysis/underlying-objects-1.ll
index bc8c59d2f979..4f4ba308ee67 100644
--- a/llvm/test/Analysis/LoopAccessAnalysis/underlying-objects-1.ll
+++ b/llvm/test/Analysis/LoopAccessAnalysis/underlying-objects-1.ll
@@ -5,7 +5,7 @@
; store_ptr = A;
; load_ptr = &A[2];
; for (i = 0; i < n; i++)
-; *store_ptr++ = *load_ptr++ *10; // A[i] = A[i+2] * 10
+; *store_ptr++ = *load_ptr++ *10; // A[i] = Aptr 10
;
; make sure, we look through the PHI to conclude that store_ptr and load_ptr
; both have A as their underlying object. The dependence is safe for
@@ -19,24 +19,24 @@ target triple = "x86_64-apple-macosx10.10.0"
; CHECK: Memory dependences are safe{{$}}
-define void @f(i8* noalias %A, i64 %width) {
+define void @f(ptr noalias %A, i64 %width) {
for.body.preheader:
- %A_ahead = getelementptr inbounds i8, i8* %A, i64 2
+ %A_ahead = getelementptr inbounds i8, ptr %A, i64 2
br label %for.body
for.body:
%i = phi i64 [ %i.1, %for.body ], [ 0, %for.body.preheader ]
- %load_ptr = phi i8* [ %load_ptr.1, %for.body ], [ %A_ahead, %for.body.preheader ]
- %store_ptr = phi i8* [ %store_ptr.1, %for.body ], [ %A, %for.body.preheader ]
+ %load_ptr = phi ptr [ %load_ptr.1, %for.body ], [ %A_ahead, %for.body.preheader ]
+ %store_ptr = phi ptr [ %store_ptr.1, %for.body ], [ %A, %for.body.preheader ]
- %loadA = load i8, i8* %load_ptr, align 1
+ %loadA = load i8, ptr %load_ptr, align 1
%mul = mul i8 %loadA, 10
- store i8 %mul, i8* %store_ptr, align 1
+ store i8 %mul, ptr %store_ptr, align 1
- %load_ptr.1 = getelementptr inbounds i8, i8* %load_ptr, i64 1
- %store_ptr.1 = getelementptr inbounds i8, i8* %store_ptr, i64 1
+ %load_ptr.1 = getelementptr inbounds i8, ptr %load_ptr, i64 1
+ %store_ptr.1 = getelementptr inbounds i8, ptr %store_ptr, i64 1
%i.1 = add nuw i64 %i, 1
%exitcond = icmp eq i64 %i.1, %width
diff --git a/llvm/test/Analysis/LoopAccessAnalysis/underlying-objects-2.ll b/llvm/test/Analysis/LoopAccessAnalysis/underlying-objects-2.ll
index 0a0c4ac97a2c..f72a2a9c8414 100644
--- a/llvm/test/Analysis/LoopAccessAnalysis/underlying-objects-2.ll
+++ b/llvm/test/Analysis/LoopAccessAnalysis/underlying-objects-2.ll
@@ -42,19 +42,19 @@ target triple = "x86_64-apple-macosx10.10.0"
; CHECK-NEXT: Backward loop carried data dependence.
; CHECK-NEXT: Dependences:
; CHECK-NEXT: Backward:
-; CHECK-NEXT: %loadB = load i8, i8* %gepB, align 1 ->
-; CHECK-NEXT: store i8 2, i8* %gepB_plus_one, align 1
+; CHECK-NEXT: %loadB = load i8, ptr %gepB, align 1 ->
+; CHECK-NEXT: store i8 2, ptr %gepB_plus_one, align 1
-define void @f(i8** noalias %A, i8* noalias %B, i64 %N) {
+define void @f(ptr noalias %A, ptr noalias %B, i64 %N) {
for_i.preheader:
- %prev_0 = load i8*, i8** %A, align 8
+ %prev_0 = load ptr, ptr %A, align 8
br label %for_i.body
for_i.body:
%i = phi i64 [1, %for_i.preheader], [%i.1, %for_j.end]
- %prev = phi i8* [%prev_0, %for_i.preheader], [%curr, %for_j.end]
- %gep = getelementptr inbounds i8*, i8** %A, i64 %i
- %curr = load i8*, i8** %gep, align 8
+ %prev = phi ptr [%prev_0, %for_i.preheader], [%curr, %for_j.end]
+ %gep = getelementptr inbounds ptr, ptr %A, i64 %i
+ %curr = load ptr, ptr %gep, align 8
br label %for_j.preheader
for_j.preheader:
@@ -63,19 +63,19 @@ for_j.preheader:
for_j.body:
%j = phi i64 [0, %for_j.preheader], [%j.1, %for_j.body]
- %gepPrev = getelementptr inbounds i8, i8* %prev, i64 %j
- %gepCurr = getelementptr inbounds i8, i8* %curr, i64 %j
- %gepB = getelementptr inbounds i8, i8* %B, i64 %j
+ %gepPrev = getelementptr inbounds i8, ptr %prev, i64 %j
+ %gepCurr = getelementptr inbounds i8, ptr %curr, i64 %j
+ %gepB = getelementptr inbounds i8, ptr %B, i64 %j
- %loadPrev = load i8, i8* %gepPrev, align 1
- %loadB = load i8, i8* %gepB, align 1
+ %loadPrev = load i8, ptr %gepPrev, align 1
+ %loadB = load i8, ptr %gepB, align 1
%mul = mul i8 %loadPrev, %loadB
- store i8 %mul, i8* %gepCurr, align 1
+ store i8 %mul, ptr %gepCurr, align 1
- %gepB_plus_one = getelementptr inbounds i8, i8* %gepB, i64 1
- store i8 2, i8* %gepB_plus_one, align 1
+ %gepB_plus_one = getelementptr inbounds i8, ptr %gepB, i64 1
+ store i8 2, ptr %gepB_plus_one, align 1
%j.1 = add nuw i64 %j, 1
%exitcondj = icmp eq i64 %j.1, %N
@@ -97,16 +97,16 @@ for_i.end:
; CHECK-NEXT: Memory dependences are safe with run-time checks
; CHECK-NEXT: Dependences:
-define void @f_deep(i8** noalias %A, i8* noalias %B, i64 %N) {
+define void @f_deep(ptr noalias %A, ptr noalias %B, i64 %N) {
for_i.preheader:
- %prev_0 = load i8*, i8** %A, align 8
+ %prev_0 = load ptr, ptr %A, align 8
br label %for_i.body
for_i.body:
%i = phi i64 [1, %for_i.preheader], [%i.1, %for_j.end]
- %prev = phi i8* [%prev_0, %for_i.preheader], [%curr, %for_j.end]
- %gep = getelementptr inbounds i8*, i8** %A, i64 %i
- %curr = load i8*, i8** %gep, align 8
+ %prev = phi ptr [%prev_0, %for_i.preheader], [%curr, %for_j.end]
+ %gep = getelementptr inbounds ptr, ptr %A, i64 %i
+ %curr = load ptr, ptr %gep, align 8
br label %for_j.preheader
for_j.preheader:
@@ -115,10 +115,10 @@ for_j.preheader:
for_j.body:
%j = phi i64 [0, %for_j.preheader], [%j.1, %for_j.body]
- %gepPrev = getelementptr inbounds i8, i8* %prev, i64 %j
- %gepCurr = getelementptr inbounds i8, i8* %curr, i64 %j
- %gepB = getelementptr inbounds i8, i8* %B, i64 %j
- %gepB1 = getelementptr inbounds i8, i8* %gepB, i64 %j
+ %gepPrev = getelementptr inbounds i8, ptr %prev, i64 %j
+ %gepCurr = getelementptr inbounds i8, ptr %curr, i64 %j
+ %gepB = getelementptr inbounds i8, ptr %B, i64 %j
+ %gepB1 = getelementptr inbounds i8, ptr %gepB, i64 %j
%gepB2 = getelementptr inbounds i8, i8* %gepB1, i64 0
%gepB3 = getelementptr inbounds i8, i8* %gepB2, i64 0
%gepB4 = getelementptr inbounds i8, i8* %gepB3, i64 0
@@ -128,15 +128,15 @@ for_j.body:
%gepB8 = getelementptr inbounds i8, i8* %gepB7, i64 0
%gepB9 = getelementptr inbounds i8, i8* %gepB8, i64 0
- %loadPrev = load i8, i8* %gepPrev, align 1
- %loadB = load i8, i8* %gepB9, align 1
+ %loadPrev = load i8, ptr %gepPrev, align 1
+ %loadB = load i8, ptr %gepB9, align 1
%mul = mul i8 %loadPrev, %loadB
- store i8 %mul, i8* %gepCurr, align 1
+ store i8 %mul, ptr %gepCurr, align 1
- %gepB_plus_one = getelementptr inbounds i8, i8* %gepB, i64 1
- store i8 2, i8* %gepB_plus_one, align 1
+ %gepB_plus_one = getelementptr inbounds i8, ptr %gepB, i64 1
+ store i8 2, ptr %gepB_plus_one, align 1
%j.1 = add nuw i64 %j, 1
%exitcondj = icmp eq i64 %j.1, %N
diff --git a/llvm/test/Analysis/LoopAccessAnalysis/unsafe-and-rt-checks-convergent.ll b/llvm/test/Analysis/LoopAccessAnalysis/unsafe-and-rt-checks-convergent.ll
index a27867024f8a..4876c5c4b26f 100644
--- a/llvm/test/Analysis/LoopAccessAnalysis/unsafe-and-rt-checks-convergent.ll
+++ b/llvm/test/Analysis/LoopAccessAnalysis/unsafe-and-rt-checks-convergent.ll
@@ -11,44 +11,44 @@ target datalayout = "e-m:o-i64:64-f80:128-n8:16:32:64-S128"
; CHECK: Report: cannot add control dependency to convergent operation
; CHECK-NEXT: Dependences:
; CHECK-NEXT: Backward:
-; CHECK-NEXT: %loadA = load i16, i16* %arrayidxA, align 2 ->
-; CHECK-NEXT: store i16 %mul1, i16* %arrayidxA_plus_2, align 2
+; CHECK-NEXT: %loadA = load i16, ptr %arrayidxA, align 2 ->
+; CHECK-NEXT: store i16 %mul1, ptr %arrayidxA_plus_2, align 2
; CHECK: Run-time memory checks:
; CHECK-NEXT: 0:
; CHECK-NEXT: Comparing group
-; CHECK-NEXT: %arrayidxA = getelementptr inbounds i16, i16* %a, i64 %storemerge3
-; CHECK-NEXT: %arrayidxA_plus_2 = getelementptr inbounds i16, i16* %a, i64 %add
+; CHECK-NEXT: %arrayidxA = getelementptr inbounds i16, ptr %a, i64 %storemerge3
+; CHECK-NEXT: %arrayidxA_plus_2 = getelementptr inbounds i16, ptr %a, i64 %add
; CHECK-NEXT: Against group
-; CHECK-NEXT: %arrayidxB = getelementptr inbounds i16, i16* %b, i64 %storemerge3
+; CHECK-NEXT: %arrayidxB = getelementptr inbounds i16, ptr %b, i64 %storemerge3
; CHECK-NEXT: 1:
; CHECK-NEXT: Comparing group
-; CHECK-NEXT: %arrayidxA = getelementptr inbounds i16, i16* %a, i64 %storemerge3
-; CHECK-NEXT: %arrayidxA_plus_2 = getelementptr inbounds i16, i16* %a, i64 %add
+; CHECK-NEXT: %arrayidxA = getelementptr inbounds i16, ptr %a, i64 %storemerge3
+; CHECK-NEXT: %arrayidxA_plus_2 = getelementptr inbounds i16, ptr %a, i64 %add
; CHECK-NEXT: Against group
-; CHECK-NEXT: %arrayidxC = getelementptr inbounds i16, i16* %c, i64 %storemerge3
+; CHECK-NEXT: %arrayidxC = getelementptr inbounds i16, ptr %c, i64 %storemerge3
- at B = common global i16* null, align 8
- at A = common global i16* null, align 8
- at C = common global i16* null, align 8
+ at B = common global ptr null, align 8
+ at A = common global ptr null, align 8
+ at C = common global ptr null, align 8
define void @f() #1 {
entry:
- %a = load i16*, i16** @A, align 8
- %b = load i16*, i16** @B, align 8
- %c = load i16*, i16** @C, align 8
+ %a = load ptr, ptr @A, align 8
+ %b = load ptr, ptr @B, align 8
+ %c = load ptr, ptr @C, align 8
br label %for.body
for.body: ; preds = %for.body, %entry
%storemerge3 = phi i64 [ 0, %entry ], [ %add, %for.body ]
- %arrayidxA = getelementptr inbounds i16, i16* %a, i64 %storemerge3
- %loadA = load i16, i16* %arrayidxA, align 2
+ %arrayidxA = getelementptr inbounds i16, ptr %a, i64 %storemerge3
+ %loadA = load i16, ptr %arrayidxA, align 2
- %arrayidxB = getelementptr inbounds i16, i16* %b, i64 %storemerge3
- %loadB = load i16, i16* %arrayidxB, align 2
+ %arrayidxB = getelementptr inbounds i16, ptr %b, i64 %storemerge3
+ %loadB = load i16, ptr %arrayidxB, align 2
- %arrayidxC = getelementptr inbounds i16, i16* %c, i64 %storemerge3
- %loadC = load i16, i16* %arrayidxC, align 2
+ %arrayidxC = getelementptr inbounds i16, ptr %c, i64 %storemerge3
+ %loadC = load i16, ptr %arrayidxC, align 2
call void @llvm.convergent()
@@ -56,8 +56,8 @@ for.body: ; preds = %for.body, %entry
%mul1 = mul i16 %mul, %loadC
%add = add nuw nsw i64 %storemerge3, 1
- %arrayidxA_plus_2 = getelementptr inbounds i16, i16* %a, i64 %add
- store i16 %mul1, i16* %arrayidxA_plus_2, align 2
+ %arrayidxA_plus_2 = getelementptr inbounds i16, ptr %a, i64 %add
+ store i16 %mul1, ptr %arrayidxA_plus_2, align 2
%exitcond = icmp eq i64 %add, 20
br i1 %exitcond, label %for.end, label %for.body
diff --git a/llvm/test/Analysis/LoopAccessAnalysis/unsafe-and-rt-checks.ll b/llvm/test/Analysis/LoopAccessAnalysis/unsafe-and-rt-checks.ll
index 460809c203dc..f7a6f5f5edd2 100644
--- a/llvm/test/Analysis/LoopAccessAnalysis/unsafe-and-rt-checks.ll
+++ b/llvm/test/Analysis/LoopAccessAnalysis/unsafe-and-rt-checks.ll
@@ -11,51 +11,51 @@ target triple = "x86_64-apple-macosx10.10.0"
; CHECK-NEXT: Backward loop carried data dependence.
; CHECK-NEXT: Dependences:
; CHECK-NEXT: Backward:
-; CHECK-NEXT: %loadA = load i16, i16* %arrayidxA, align 2 ->
-; CHECK-NEXT: store i16 %mul1, i16* %arrayidxA_plus_2, align 2
+; CHECK-NEXT: %loadA = load i16, ptr %arrayidxA, align 2 ->
+; CHECK-NEXT: store i16 %mul1, ptr %arrayidxA_plus_2, align 2
; CHECK: Run-time memory checks:
; CHECK-NEXT: 0:
; CHECK-NEXT: Comparing group
-; CHECK-NEXT: %arrayidxA = getelementptr inbounds i16, i16* %a, i64 %storemerge3
-; CHECK-NEXT: %arrayidxA_plus_2 = getelementptr inbounds i16, i16* %a, i64 %add
+; CHECK-NEXT: %arrayidxA = getelementptr inbounds i16, ptr %a, i64 %storemerge3
+; CHECK-NEXT: %arrayidxA_plus_2 = getelementptr inbounds i16, ptr %a, i64 %add
; CHECK-NEXT: Against group
-; CHECK-NEXT: %arrayidxB = getelementptr inbounds i16, i16* %b, i64 %storemerge3
+; CHECK-NEXT: %arrayidxB = getelementptr inbounds i16, ptr %b, i64 %storemerge3
; CHECK-NEXT: 1:
; CHECK-NEXT: Comparing group
-; CHECK-NEXT: %arrayidxA = getelementptr inbounds i16, i16* %a, i64 %storemerge3
-; CHECK-NEXT: %arrayidxA_plus_2 = getelementptr inbounds i16, i16* %a, i64 %add
+; CHECK-NEXT: %arrayidxA = getelementptr inbounds i16, ptr %a, i64 %storemerge3
+; CHECK-NEXT: %arrayidxA_plus_2 = getelementptr inbounds i16, ptr %a, i64 %add
; CHECK-NEXT: Against group
-; CHECK-NEXT: %arrayidxC = getelementptr inbounds i16, i16* %c, i64 %storemerge3
+; CHECK-NEXT: %arrayidxC = getelementptr inbounds i16, ptr %c, i64 %storemerge3
- at B = common global i16* null, align 8
- at A = common global i16* null, align 8
- at C = common global i16* null, align 8
+ at B = common global ptr null, align 8
+ at A = common global ptr null, align 8
+ at C = common global ptr null, align 8
define void @f() {
entry:
- %a = load i16*, i16** @A, align 8
- %b = load i16*, i16** @B, align 8
- %c = load i16*, i16** @C, align 8
+ %a = load ptr, ptr @A, align 8
+ %b = load ptr, ptr @B, align 8
+ %c = load ptr, ptr @C, align 8
br label %for.body
for.body: ; preds = %for.body, %entry
%storemerge3 = phi i64 [ 0, %entry ], [ %add, %for.body ]
- %arrayidxA = getelementptr inbounds i16, i16* %a, i64 %storemerge3
- %loadA = load i16, i16* %arrayidxA, align 2
+ %arrayidxA = getelementptr inbounds i16, ptr %a, i64 %storemerge3
+ %loadA = load i16, ptr %arrayidxA, align 2
- %arrayidxB = getelementptr inbounds i16, i16* %b, i64 %storemerge3
- %loadB = load i16, i16* %arrayidxB, align 2
+ %arrayidxB = getelementptr inbounds i16, ptr %b, i64 %storemerge3
+ %loadB = load i16, ptr %arrayidxB, align 2
- %arrayidxC = getelementptr inbounds i16, i16* %c, i64 %storemerge3
- %loadC = load i16, i16* %arrayidxC, align 2
+ %arrayidxC = getelementptr inbounds i16, ptr %c, i64 %storemerge3
+ %loadC = load i16, ptr %arrayidxC, align 2
%mul = mul i16 %loadB, %loadA
%mul1 = mul i16 %mul, %loadC
%add = add nuw nsw i64 %storemerge3, 1
- %arrayidxA_plus_2 = getelementptr inbounds i16, i16* %a, i64 %add
- store i16 %mul1, i16* %arrayidxA_plus_2, align 2
+ %arrayidxA_plus_2 = getelementptr inbounds i16, ptr %a, i64 %add
+ store i16 %mul1, ptr %arrayidxA_plus_2, align 2
%exitcond = icmp eq i64 %add, 20
br i1 %exitcond, label %for.end, label %for.body
diff --git a/llvm/test/Analysis/LoopAccessAnalysis/wrapping-pointer-versioning.ll b/llvm/test/Analysis/LoopAccessAnalysis/wrapping-pointer-versioning.ll
index 6c8c37aa90ee..c110aa6a6965 100644
--- a/llvm/test/Analysis/LoopAccessAnalysis/wrapping-pointer-versioning.ll
+++ b/llvm/test/Analysis/LoopAccessAnalysis/wrapping-pointer-versioning.ll
@@ -30,13 +30,13 @@ target datalayout = "e-m:o-i64:64-f80:128-n8:16:32:64-S128"
; We have added the nusw flag to turn this expression into the SCEV expression:
; i64 {0,+,2}<%for.body>
-; LAA: [PSE] %arrayidxA = getelementptr i16, i16* %a, i64 %mul_ext:
+; LAA: [PSE] %arrayidxA = getelementptr i16, ptr %a, i64 %mul_ext:
; LAA-NEXT: ((2 * (zext i32 {0,+,2}<%for.body> to i64))<nuw><nsw> + %a)
; LAA-NEXT: --> {%a,+,4}<%for.body>
-define void @f1(i16* noalias %a,
- i16* noalias %b, i64 %N) {
+define void @f1(ptr noalias %a,
+ ptr noalias %b, i64 %N) {
entry:
br label %for.body
@@ -47,15 +47,15 @@ for.body: ; preds = %for.body, %entry
%mul = mul i32 %ind1, 2
%mul_ext = zext i32 %mul to i64
- %arrayidxA = getelementptr i16, i16* %a, i64 %mul_ext
- %loadA = load i16, i16* %arrayidxA, align 2
+ %arrayidxA = getelementptr i16, ptr %a, i64 %mul_ext
+ %loadA = load i16, ptr %arrayidxA, align 2
- %arrayidxB = getelementptr i16, i16* %b, i64 %ind
- %loadB = load i16, i16* %arrayidxB, align 2
+ %arrayidxB = getelementptr i16, ptr %b, i64 %ind
+ %loadB = load i16, ptr %arrayidxB, align 2
%add = mul i16 %loadA, %loadB
- store i16 %add, i16* %arrayidxA, align 2
+ store i16 %add, ptr %arrayidxA, align 2
%inc = add nuw nsw i64 %ind, 1
%inc1 = add i32 %ind1, 1
@@ -97,12 +97,12 @@ for.end: ; preds = %for.body
; We have added the nusw flag to turn this expression into the following SCEV:
; i64 {zext i32 (2 * (trunc i64 %N to i32)) to i64,+,-2}<%for.body>
-; LAA: [PSE] %arrayidxA = getelementptr i16, i16* %a, i64 %mul_ext:
+; LAA: [PSE] %arrayidxA = getelementptr i16, ptr %a, i64 %mul_ext:
; LAA-NEXT: ((2 * (zext i32 {(2 * (trunc i64 %N to i32)),+,-2}<%for.body> to i64))<nuw><nsw> + %a)
; LAA-NEXT: --> {((4 * (zext i31 (trunc i64 %N to i31) to i64))<nuw><nsw> + %a),+,-4}<%for.body>
-define void @f2(i16* noalias %a,
- i16* noalias %b, i64 %N) {
+define void @f2(ptr noalias %a,
+ ptr noalias %b, i64 %N) {
entry:
%TruncN = trunc i64 %N to i32
br label %for.body
@@ -114,15 +114,15 @@ for.body: ; preds = %for.body, %entry
%mul = mul i32 %ind1, 2
%mul_ext = zext i32 %mul to i64
- %arrayidxA = getelementptr i16, i16* %a, i64 %mul_ext
- %loadA = load i16, i16* %arrayidxA, align 2
+ %arrayidxA = getelementptr i16, ptr %a, i64 %mul_ext
+ %loadA = load i16, ptr %arrayidxA, align 2
- %arrayidxB = getelementptr i16, i16* %b, i64 %ind
- %loadB = load i16, i16* %arrayidxB, align 2
+ %arrayidxB = getelementptr i16, ptr %b, i64 %ind
+ %loadB = load i16, ptr %arrayidxB, align 2
%add = mul i16 %loadA, %loadB
- store i16 %add, i16* %arrayidxA, align 2
+ store i16 %add, ptr %arrayidxA, align 2
%inc = add nuw nsw i64 %ind, 1
%dec = sub i32 %ind1, 1
@@ -148,12 +148,12 @@ for.end: ; preds = %for.body
; We have added the nssw flag to turn this expression into the following SCEV:
; i64 {0,+,2}<%for.body>
-; LAA: [PSE] %arrayidxA = getelementptr i16, i16* %a, i64 %mul_ext:
+; LAA: [PSE] %arrayidxA = getelementptr i16, ptr %a, i64 %mul_ext:
; LAA-NEXT: ((2 * (sext i32 {0,+,2}<%for.body> to i64))<nsw> + %a)
; LAA-NEXT: --> {%a,+,4}<%for.body>
-define void @f3(i16* noalias %a,
- i16* noalias %b, i64 %N) {
+define void @f3(ptr noalias %a,
+ ptr noalias %b, i64 %N) {
entry:
br label %for.body
@@ -164,15 +164,15 @@ for.body: ; preds = %for.body, %entry
%mul = mul i32 %ind1, 2
%mul_ext = sext i32 %mul to i64
- %arrayidxA = getelementptr i16, i16* %a, i64 %mul_ext
- %loadA = load i16, i16* %arrayidxA, align 2
+ %arrayidxA = getelementptr i16, ptr %a, i64 %mul_ext
+ %loadA = load i16, ptr %arrayidxA, align 2
- %arrayidxB = getelementptr i16, i16* %b, i64 %ind
- %loadB = load i16, i16* %arrayidxB, align 2
+ %arrayidxB = getelementptr i16, ptr %b, i64 %ind
+ %loadB = load i16, ptr %arrayidxB, align 2
%add = mul i16 %loadA, %loadB
- store i16 %add, i16* %arrayidxA, align 2
+ store i16 %add, ptr %arrayidxA, align 2
%inc = add nuw nsw i64 %ind, 1
%inc1 = add i32 %ind1, 1
@@ -195,12 +195,12 @@ for.end: ; preds = %for.body
; We have added the nssw flag to turn this expression into the following SCEV:
; i64 {sext i32 (2 * (trunc i64 %N to i32)) to i64,+,-2}<%for.body>
-; LAA: [PSE] %arrayidxA = getelementptr i16, i16* %a, i64 %mul_ext:
+; LAA: [PSE] %arrayidxA = getelementptr i16, ptr %a, i64 %mul_ext:
; LAA-NEXT: ((2 * (sext i32 {(2 * (trunc i64 %N to i32)),+,-2}<%for.body> to i64))<nsw> + %a)
; LAA-NEXT: --> {((2 * (sext i32 (2 * (trunc i64 %N to i32)) to i64))<nsw> + %a),+,-4}<%for.body>
-define void @f4(i16* noalias %a,
- i16* noalias %b, i64 %N) {
+define void @f4(ptr noalias %a,
+ ptr noalias %b, i64 %N) {
entry:
%TruncN = trunc i64 %N to i32
br label %for.body
@@ -212,15 +212,15 @@ for.body: ; preds = %for.body, %entry
%mul = mul i32 %ind1, 2
%mul_ext = sext i32 %mul to i64
- %arrayidxA = getelementptr i16, i16* %a, i64 %mul_ext
- %loadA = load i16, i16* %arrayidxA, align 2
+ %arrayidxA = getelementptr i16, ptr %a, i64 %mul_ext
+ %loadA = load i16, ptr %arrayidxA, align 2
- %arrayidxB = getelementptr i16, i16* %b, i64 %ind
- %loadB = load i16, i16* %arrayidxB, align 2
+ %arrayidxB = getelementptr i16, ptr %b, i64 %ind
+ %loadB = load i16, ptr %arrayidxB, align 2
%add = mul i16 %loadA, %loadB
- store i16 %add, i16* %arrayidxA, align 2
+ store i16 %add, ptr %arrayidxA, align 2
%inc = add nuw nsw i64 %ind, 1
%dec = sub i32 %ind1, 1
@@ -245,12 +245,12 @@ for.end: ; preds = %for.body
; LAA-NEXT: {(2 * (trunc i64 %N to i32)),+,-2}<%for.body> Added Flags: <nssw>
; LAA-NEXT: {((2 * (sext i32 (2 * (trunc i64 %N to i32)) to i64))<nsw> + %a),+,-4}<%for.body> Added Flags: <nusw>
-; LAA: [PSE] %arrayidxA = getelementptr inbounds i16, i16* %a, i32 %mul:
+; LAA: [PSE] %arrayidxA = getelementptr inbounds i16, ptr %a, i32 %mul:
; LAA-NEXT: ((2 * (sext i32 {(2 * (trunc i64 %N to i32)),+,-2}<%for.body> to i64))<nsw> + %a)
; LAA-NEXT: --> {((2 * (sext i32 (2 * (trunc i64 %N to i32)) to i64))<nsw> + %a),+,-4}<%for.body>
-define void @f5(i16* noalias %a,
- i16* noalias %b, i64 %N) {
+define void @f5(ptr noalias %a,
+ ptr noalias %b, i64 %N) {
entry:
%TruncN = trunc i64 %N to i32
br label %for.body
@@ -261,15 +261,15 @@ for.body: ; preds = %for.body, %entry
%mul = mul i32 %ind1, 2
- %arrayidxA = getelementptr inbounds i16, i16* %a, i32 %mul
- %loadA = load i16, i16* %arrayidxA, align 2
+ %arrayidxA = getelementptr inbounds i16, ptr %a, i32 %mul
+ %loadA = load i16, ptr %arrayidxA, align 2
- %arrayidxB = getelementptr inbounds i16, i16* %b, i64 %ind
- %loadB = load i16, i16* %arrayidxB, align 2
+ %arrayidxB = getelementptr inbounds i16, ptr %b, i64 %ind
+ %loadB = load i16, ptr %arrayidxB, align 2
%add = mul i16 %loadA, %loadB
- store i16 %add, i16* %arrayidxA, align 2
+ store i16 %add, ptr %arrayidxA, align 2
%inc = add nuw nsw i64 %ind, 1
%dec = sub i32 %ind1, 1
More information about the llvm-commits
mailing list