[llvm] e7f4ad1 - [Transforms] Convert some tests to opaque pointers (NFC)
Nikita Popov via llvm-commits
llvm-commits at lists.llvm.org
Tue Apr 11 07:49:22 PDT 2023
Author: Nikita Popov
Date: 2023-04-11T16:49:12+02:00
New Revision: e7f4ad13ae57f7a5170cd9caab3b1253d444c2f5
URL: https://github.com/llvm/llvm-project/commit/e7f4ad13ae57f7a5170cd9caab3b1253d444c2f5
DIFF: https://github.com/llvm/llvm-project/commit/e7f4ad13ae57f7a5170cd9caab3b1253d444c2f5.diff
LOG: [Transforms] Convert some tests to opaque pointers (NFC)
Added:
Modified:
llvm/test/Transforms/GVN/PRE/load-metadata.ll
llvm/test/Transforms/Inline/AArch64/sve-alloca-merge.ll
llvm/test/Transforms/LoopDistribute/scev-inserted-runtime-check.ll
llvm/test/Transforms/LoopReroll/complex_reroll.ll
llvm/test/Transforms/LoopStrengthReduce/X86/expander-crashes.ll
llvm/test/Transforms/LoopStrengthReduce/X86/nested-ptr-addrec.ll
llvm/test/Transforms/LoopStrengthReduce/post-inc-icmpzero.ll
llvm/test/Transforms/LoopStrengthReduce/post-increment-insertion.ll
llvm/test/Transforms/LoopStrengthReduce/preserve-gep-loop-variant.ll
llvm/test/Transforms/LoopStrengthReduce/uglygep.ll
llvm/test/Transforms/LoopVersioning/lcssa.ll
llvm/test/Transforms/LoopVersioning/wrapping-pointer-versioning.ll
llvm/test/Transforms/NewGVN/pr31613.ll
Removed:
################################################################################
diff --git a/llvm/test/Transforms/GVN/PRE/load-metadata.ll b/llvm/test/Transforms/GVN/PRE/load-metadata.ll
index 861f3ec159032..415812be95b3a 100644
--- a/llvm/test/Transforms/GVN/PRE/load-metadata.ll
+++ b/llvm/test/Transforms/GVN/PRE/load-metadata.ll
@@ -1,6 +1,6 @@
-; RUN: opt -opaque-pointers=0 -S -passes=gvn < %s | FileCheck %s
+; RUN: opt -S -passes=gvn < %s | FileCheck %s
-define i32 @test1(i32* %p, i1 %C) {
+define i32 @test1(ptr %p, i1 %C) {
; CHECK-LABEL: @test1(
block1:
br i1 %C, label %block2, label %block3
@@ -8,14 +8,14 @@ block1:
block2:
br label %block4
; CHECK: block2:
-; CHECK-NEXT: load i32, i32* %p, align 4, !range !0, !invariant.group !1
+; CHECK-NEXT: load i32, ptr %p, align 4, !range !0, !invariant.group !1
block3:
- store i32 0, i32* %p
+ store i32 0, ptr %p
br label %block4
block4:
- %PRE = load i32, i32* %p, !range !0, !invariant.group !1
+ %PRE = load i32, ptr %p, !range !0, !invariant.group !1
ret i32 %PRE
}
diff --git a/llvm/test/Transforms/Inline/AArch64/sve-alloca-merge.ll b/llvm/test/Transforms/Inline/AArch64/sve-alloca-merge.ll
index 1def31e5f66ac..c1375cbf3493d 100644
--- a/llvm/test/Transforms/Inline/AArch64/sve-alloca-merge.ll
+++ b/llvm/test/Transforms/Inline/AArch64/sve-alloca-merge.ll
@@ -1,29 +1,26 @@
-; RUN: opt -opaque-pointers=0 -mtriple=aarch64--linux-gnu -mattr=+sve < %s -passes=inline -S | FileCheck %s
+; RUN: opt -mtriple=aarch64--linux-gnu -mattr=+sve < %s -passes=inline -S | FileCheck %s
-define void @bar(<vscale x 2 x i64>* %a) {
+define void @bar(ptr %a) {
entry:
%b = alloca <vscale x 2 x i64>, align 16
- store <vscale x 2 x i64> zeroinitializer, <vscale x 2 x i64>* %b, align 16
- %c = load <vscale x 2 x i64>, <vscale x 2 x i64>* %a, align 16
- %d = load <vscale x 2 x i64>, <vscale x 2 x i64>* %b, align 16
+ store <vscale x 2 x i64> zeroinitializer, ptr %b, align 16
+ %c = load <vscale x 2 x i64>, ptr %a, align 16
+ %d = load <vscale x 2 x i64>, ptr %b, align 16
%e = add <vscale x 2 x i64> %c, %d
%f = add <vscale x 2 x i64> %e, %c
- store <vscale x 2 x i64> %f, <vscale x 2 x i64>* %a, align 16
+ store <vscale x 2 x i64> %f, ptr %a, align 16
ret void
}
define i64 @foo() {
; CHECK-LABEL: @foo(
-; CHECK: %0 = bitcast <vscale x 2 x i64>* %{{.*}} to i8*
-; CHECK-NEXT: call void @llvm.lifetime.start.p0i8(i64 -1, i8* %0)
-; CHECK: %1 = bitcast <vscale x 2 x i64>* %{{.*}} to i8*
-; CHECK-NEXT: call void @llvm.lifetime.end.p0i8(i64 -1, i8* %1)
+; CHECK: call void @llvm.lifetime.start.p0(i64 -1, ptr %{{.*}})
+; CHECK: call void @llvm.lifetime.end.p0(i64 -1, ptr %{{.*}})
entry:
%a = alloca <vscale x 2 x i64>, align 16
- store <vscale x 2 x i64> zeroinitializer, <vscale x 2 x i64>* %a, align 16
- %a1 = bitcast <vscale x 2 x i64>* %a to i64*
- store i64 1, i64* %a1, align 8
- call void @bar(<vscale x 2 x i64>* %a)
- %el = load i64, i64* %a1
+ store <vscale x 2 x i64> zeroinitializer, ptr %a, align 16
+ store i64 1, ptr %a, align 8
+ call void @bar(ptr %a)
+ %el = load i64, ptr %a
ret i64 %el
}
diff --git a/llvm/test/Transforms/LoopDistribute/scev-inserted-runtime-check.ll b/llvm/test/Transforms/LoopDistribute/scev-inserted-runtime-check.ll
index 7330fb97087f3..fa3448e28041a 100644
--- a/llvm/test/Transforms/LoopDistribute/scev-inserted-runtime-check.ll
+++ b/llvm/test/Transforms/LoopDistribute/scev-inserted-runtime-check.ll
@@ -1,28 +1,27 @@
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
-; RUN: opt -opaque-pointers=0 -passes=loop-distribute -enable-loop-distribute -S -enable-mem-access-versioning=0 < %s | FileCheck %s
+; RUN: opt -passes=loop-distribute -enable-loop-distribute -S -enable-mem-access-versioning=0 < %s | FileCheck %s
target datalayout = "e-m:o-i64:64-f80:128-n8:16:32:64-S128"
; PredicatedScalarEvolution decides it needs to insert a bounds check
; not based on memory access.
-define void @f(i32* noalias %a, i32* noalias %b, i32* noalias %c, i32* noalias %d, i32* noalias %e, i64 %N) {
+define void @f(ptr noalias %a, ptr noalias %b, ptr noalias %c, ptr noalias %d, ptr noalias %e, i64 %N) {
; CHECK-LABEL: @f(
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[A5:%.*]] = bitcast i32* [[A:%.*]] to i8*
; CHECK-NEXT: br label [[FOR_BODY_LVER_CHECK:%.*]]
; CHECK: for.body.lver.check:
; CHECK-NEXT: [[TMP0:%.*]] = add i64 [[N:%.*]], -1
-; CHECK-NEXT: [[TMP7:%.*]] = icmp ugt i64 [[TMP0]], 4294967295
-; CHECK-NEXT: [[MUL2:%.*]] = call { i64, i1 } @llvm.umul.with.overflow.i64(i64 8, i64 [[TMP0]])
-; CHECK-NEXT: [[MUL_RESULT3:%.*]] = extractvalue { i64, i1 } [[MUL2]], 0
-; CHECK-NEXT: [[MUL_OVERFLOW4:%.*]] = extractvalue { i64, i1 } [[MUL2]], 1
-; CHECK-NEXT: [[TMP11:%.*]] = sub i64 0, [[MUL_RESULT3]]
-; CHECK-NEXT: [[TMP12:%.*]] = getelementptr i8, i8* [[A5]], i64 [[MUL_RESULT3]]
-; CHECK-NEXT: [[TMP15:%.*]] = icmp ult i8* [[TMP12]], [[A5]]
-; CHECK-NEXT: [[TMP17:%.*]] = or i1 [[TMP15]], [[MUL_OVERFLOW4]]
-; CHECK-NEXT: [[TMP18:%.*]] = or i1 [[TMP7]], [[TMP17]]
-; CHECK-NEXT: br i1 [[TMP18]], label [[FOR_BODY_PH_LVER_ORIG:%.*]], label [[FOR_BODY_PH_LDIST1:%.*]]
+; CHECK-NEXT: [[TMP1:%.*]] = icmp ugt i64 [[TMP0]], 4294967295
+; CHECK-NEXT: [[MUL1:%.*]] = call { i64, i1 } @llvm.umul.with.overflow.i64(i64 8, i64 [[TMP0]])
+; CHECK-NEXT: [[MUL_RESULT:%.*]] = extractvalue { i64, i1 } [[MUL1]], 0
+; CHECK-NEXT: [[MUL_OVERFLOW:%.*]] = extractvalue { i64, i1 } [[MUL1]], 1
+; CHECK-NEXT: [[TMP2:%.*]] = sub i64 0, [[MUL_RESULT]]
+; CHECK-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[A:%.*]], i64 [[MUL_RESULT]]
+; CHECK-NEXT: [[TMP4:%.*]] = icmp ult ptr [[TMP3]], [[A]]
+; CHECK-NEXT: [[TMP5:%.*]] = or i1 [[TMP4]], [[MUL_OVERFLOW]]
+; CHECK-NEXT: [[TMP6:%.*]] = or i1 [[TMP1]], [[TMP5]]
+; CHECK-NEXT: br i1 [[TMP6]], label [[FOR_BODY_PH_LVER_ORIG:%.*]], label [[FOR_BODY_PH_LDIST1:%.*]]
; CHECK: for.body.ph.lver.orig:
; CHECK-NEXT: br label [[FOR_BODY_LVER_ORIG:%.*]]
; CHECK: for.body.lver.orig:
@@ -30,22 +29,22 @@ define void @f(i32* noalias %a, i32* noalias %b, i32* noalias %c, i32* noalias %
; CHECK-NEXT: [[IND1_LVER_ORIG:%.*]] = phi i32 [ 0, [[FOR_BODY_PH_LVER_ORIG]] ], [ [[INC1_LVER_ORIG:%.*]], [[FOR_BODY_LVER_ORIG]] ]
; CHECK-NEXT: [[MUL_LVER_ORIG:%.*]] = mul i32 [[IND1_LVER_ORIG]], 2
; CHECK-NEXT: [[MUL_EXT_LVER_ORIG:%.*]] = zext i32 [[MUL_LVER_ORIG]] to i64
-; CHECK-NEXT: [[ARRAYIDXA_LVER_ORIG:%.*]] = getelementptr inbounds i32, i32* [[A]], i64 [[MUL_EXT_LVER_ORIG]]
-; CHECK-NEXT: [[LOADA_LVER_ORIG:%.*]] = load i32, i32* [[ARRAYIDXA_LVER_ORIG]], align 4
-; CHECK-NEXT: [[ARRAYIDXB_LVER_ORIG:%.*]] = getelementptr inbounds i32, i32* [[B:%.*]], i64 [[MUL_EXT_LVER_ORIG]]
-; CHECK-NEXT: [[LOADB_LVER_ORIG:%.*]] = load i32, i32* [[ARRAYIDXB_LVER_ORIG]], align 4
+; CHECK-NEXT: [[ARRAYIDXA_LVER_ORIG:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[MUL_EXT_LVER_ORIG]]
+; CHECK-NEXT: [[LOADA_LVER_ORIG:%.*]] = load i32, ptr [[ARRAYIDXA_LVER_ORIG]], align 4
+; CHECK-NEXT: [[ARRAYIDXB_LVER_ORIG:%.*]] = getelementptr inbounds i32, ptr [[B:%.*]], i64 [[MUL_EXT_LVER_ORIG]]
+; CHECK-NEXT: [[LOADB_LVER_ORIG:%.*]] = load i32, ptr [[ARRAYIDXB_LVER_ORIG]], align 4
; CHECK-NEXT: [[MULA_LVER_ORIG:%.*]] = mul i32 [[LOADB_LVER_ORIG]], [[LOADA_LVER_ORIG]]
; CHECK-NEXT: [[ADD_LVER_ORIG]] = add nuw nsw i64 [[IND_LVER_ORIG]], 1
; CHECK-NEXT: [[INC1_LVER_ORIG]] = add i32 [[IND1_LVER_ORIG]], 1
-; CHECK-NEXT: [[ARRAYIDXA_PLUS_4_LVER_ORIG:%.*]] = getelementptr inbounds i32, i32* [[A]], i64 [[ADD_LVER_ORIG]]
-; CHECK-NEXT: store i32 [[MULA_LVER_ORIG]], i32* [[ARRAYIDXA_PLUS_4_LVER_ORIG]], align 4
-; CHECK-NEXT: [[ARRAYIDXD_LVER_ORIG:%.*]] = getelementptr inbounds i32, i32* [[D:%.*]], i64 [[MUL_EXT_LVER_ORIG]]
-; CHECK-NEXT: [[LOADD_LVER_ORIG:%.*]] = load i32, i32* [[ARRAYIDXD_LVER_ORIG]], align 4
-; CHECK-NEXT: [[ARRAYIDXE_LVER_ORIG:%.*]] = getelementptr inbounds i32, i32* [[E:%.*]], i64 [[MUL_EXT_LVER_ORIG]]
-; CHECK-NEXT: [[LOADE_LVER_ORIG:%.*]] = load i32, i32* [[ARRAYIDXE_LVER_ORIG]], align 4
+; CHECK-NEXT: [[ARRAYIDXA_PLUS_4_LVER_ORIG:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[ADD_LVER_ORIG]]
+; CHECK-NEXT: store i32 [[MULA_LVER_ORIG]], ptr [[ARRAYIDXA_PLUS_4_LVER_ORIG]], align 4
+; CHECK-NEXT: [[ARRAYIDXD_LVER_ORIG:%.*]] = getelementptr inbounds i32, ptr [[D:%.*]], i64 [[MUL_EXT_LVER_ORIG]]
+; CHECK-NEXT: [[LOADD_LVER_ORIG:%.*]] = load i32, ptr [[ARRAYIDXD_LVER_ORIG]], align 4
+; CHECK-NEXT: [[ARRAYIDXE_LVER_ORIG:%.*]] = getelementptr inbounds i32, ptr [[E:%.*]], i64 [[MUL_EXT_LVER_ORIG]]
+; CHECK-NEXT: [[LOADE_LVER_ORIG:%.*]] = load i32, ptr [[ARRAYIDXE_LVER_ORIG]], align 4
; CHECK-NEXT: [[MULC_LVER_ORIG:%.*]] = mul i32 [[LOADD_LVER_ORIG]], [[LOADE_LVER_ORIG]]
-; CHECK-NEXT: [[ARRAYIDXC_LVER_ORIG:%.*]] = getelementptr inbounds i32, i32* [[C:%.*]], i64 [[MUL_EXT_LVER_ORIG]]
-; CHECK-NEXT: store i32 [[MULC_LVER_ORIG]], i32* [[ARRAYIDXC_LVER_ORIG]], align 4
+; CHECK-NEXT: [[ARRAYIDXC_LVER_ORIG:%.*]] = getelementptr inbounds i32, ptr [[C:%.*]], i64 [[MUL_EXT_LVER_ORIG]]
+; CHECK-NEXT: store i32 [[MULC_LVER_ORIG]], ptr [[ARRAYIDXC_LVER_ORIG]], align 4
; CHECK-NEXT: [[EXITCOND_LVER_ORIG:%.*]] = icmp eq i64 [[ADD_LVER_ORIG]], [[N]]
; CHECK-NEXT: br i1 [[EXITCOND_LVER_ORIG]], label [[FOR_END_LOOPEXIT:%.*]], label [[FOR_BODY_LVER_ORIG]]
; CHECK: for.body.ph.ldist1:
@@ -55,15 +54,15 @@ define void @f(i32* noalias %a, i32* noalias %b, i32* noalias %c, i32* noalias %
; CHECK-NEXT: [[IND1_LDIST1:%.*]] = phi i32 [ 0, [[FOR_BODY_PH_LDIST1]] ], [ [[INC1_LDIST1:%.*]], [[FOR_BODY_LDIST1]] ]
; CHECK-NEXT: [[MUL_LDIST1:%.*]] = mul i32 [[IND1_LDIST1]], 2
; CHECK-NEXT: [[MUL_EXT_LDIST1:%.*]] = zext i32 [[MUL_LDIST1]] to i64
-; CHECK-NEXT: [[ARRAYIDXA_LDIST1:%.*]] = getelementptr inbounds i32, i32* [[A]], i64 [[MUL_EXT_LDIST1]]
-; CHECK-NEXT: [[LOADA_LDIST1:%.*]] = load i32, i32* [[ARRAYIDXA_LDIST1]], align 4, !alias.scope !0
-; CHECK-NEXT: [[ARRAYIDXB_LDIST1:%.*]] = getelementptr inbounds i32, i32* [[B]], i64 [[MUL_EXT_LDIST1]]
-; CHECK-NEXT: [[LOADB_LDIST1:%.*]] = load i32, i32* [[ARRAYIDXB_LDIST1]], align 4
+; CHECK-NEXT: [[ARRAYIDXA_LDIST1:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[MUL_EXT_LDIST1]]
+; CHECK-NEXT: [[LOADA_LDIST1:%.*]] = load i32, ptr [[ARRAYIDXA_LDIST1]], align 4, !alias.scope !0
+; CHECK-NEXT: [[ARRAYIDXB_LDIST1:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[MUL_EXT_LDIST1]]
+; CHECK-NEXT: [[LOADB_LDIST1:%.*]] = load i32, ptr [[ARRAYIDXB_LDIST1]], align 4
; CHECK-NEXT: [[MULA_LDIST1:%.*]] = mul i32 [[LOADB_LDIST1]], [[LOADA_LDIST1]]
; CHECK-NEXT: [[ADD_LDIST1]] = add nuw nsw i64 [[IND_LDIST1]], 1
; CHECK-NEXT: [[INC1_LDIST1]] = add i32 [[IND1_LDIST1]], 1
-; CHECK-NEXT: [[ARRAYIDXA_PLUS_4_LDIST1:%.*]] = getelementptr inbounds i32, i32* [[A]], i64 [[ADD_LDIST1]]
-; CHECK-NEXT: store i32 [[MULA_LDIST1]], i32* [[ARRAYIDXA_PLUS_4_LDIST1]], align 4, !alias.scope !3
+; CHECK-NEXT: [[ARRAYIDXA_PLUS_4_LDIST1:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[ADD_LDIST1]]
+; CHECK-NEXT: store i32 [[MULA_LDIST1]], ptr [[ARRAYIDXA_PLUS_4_LDIST1]], align 4, !alias.scope !3
; CHECK-NEXT: [[EXITCOND_LDIST1:%.*]] = icmp eq i64 [[ADD_LDIST1]], [[N]]
; CHECK-NEXT: br i1 [[EXITCOND_LDIST1]], label [[FOR_BODY_PH:%.*]], label [[FOR_BODY_LDIST1]]
; CHECK: for.body.ph:
@@ -75,18 +74,18 @@ define void @f(i32* noalias %a, i32* noalias %b, i32* noalias %c, i32* noalias %
; CHECK-NEXT: [[MUL_EXT:%.*]] = zext i32 [[MUL]] to i64
; CHECK-NEXT: [[ADD]] = add nuw nsw i64 [[IND]], 1
; CHECK-NEXT: [[INC1]] = add i32 [[IND1]], 1
-; CHECK-NEXT: [[ARRAYIDXD:%.*]] = getelementptr inbounds i32, i32* [[D]], i64 [[MUL_EXT]]
-; CHECK-NEXT: [[LOADD:%.*]] = load i32, i32* [[ARRAYIDXD]], align 4
-; CHECK-NEXT: [[ARRAYIDXE:%.*]] = getelementptr inbounds i32, i32* [[E]], i64 [[MUL_EXT]]
-; CHECK-NEXT: [[LOADE:%.*]] = load i32, i32* [[ARRAYIDXE]], align 4
+; CHECK-NEXT: [[ARRAYIDXD:%.*]] = getelementptr inbounds i32, ptr [[D]], i64 [[MUL_EXT]]
+; CHECK-NEXT: [[LOADD:%.*]] = load i32, ptr [[ARRAYIDXD]], align 4
+; CHECK-NEXT: [[ARRAYIDXE:%.*]] = getelementptr inbounds i32, ptr [[E]], i64 [[MUL_EXT]]
+; CHECK-NEXT: [[LOADE:%.*]] = load i32, ptr [[ARRAYIDXE]], align 4
; CHECK-NEXT: [[MULC:%.*]] = mul i32 [[LOADD]], [[LOADE]]
-; CHECK-NEXT: [[ARRAYIDXC:%.*]] = getelementptr inbounds i32, i32* [[C]], i64 [[MUL_EXT]]
-; CHECK-NEXT: store i32 [[MULC]], i32* [[ARRAYIDXC]], align 4
+; CHECK-NEXT: [[ARRAYIDXC:%.*]] = getelementptr inbounds i32, ptr [[C]], i64 [[MUL_EXT]]
+; CHECK-NEXT: store i32 [[MULC]], ptr [[ARRAYIDXC]], align 4
; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[ADD]], [[N]]
-; CHECK-NEXT: br i1 [[EXITCOND]], label [[FOR_END_LOOPEXIT3:%.*]], label [[FOR_BODY]]
+; CHECK-NEXT: br i1 [[EXITCOND]], label [[FOR_END_LOOPEXIT2:%.*]], label [[FOR_BODY]]
; CHECK: for.end.loopexit:
; CHECK-NEXT: br label [[FOR_END:%.*]]
-; CHECK: for.end.loopexit3:
+; CHECK: for.end.loopexit2:
; CHECK-NEXT: br label [[FOR_END]]
; CHECK: for.end:
; CHECK-NEXT: ret void
@@ -102,30 +101,30 @@ for.body: ; preds = %for.body, %entry
%mul_ext = zext i32 %mul to i64
- %arrayidxA = getelementptr inbounds i32, i32* %a, i64 %mul_ext
- %loadA = load i32, i32* %arrayidxA, align 4
+ %arrayidxA = getelementptr inbounds i32, ptr %a, i64 %mul_ext
+ %loadA = load i32, ptr %arrayidxA, align 4
- %arrayidxB = getelementptr inbounds i32, i32* %b, i64 %mul_ext
- %loadB = load i32, i32* %arrayidxB, align 4
+ %arrayidxB = getelementptr inbounds i32, ptr %b, i64 %mul_ext
+ %loadB = load i32, ptr %arrayidxB, align 4
%mulA = mul i32 %loadB, %loadA
%add = add nuw nsw i64 %ind, 1
%inc1 = add i32 %ind1, 1
- %arrayidxA_plus_4 = getelementptr inbounds i32, i32* %a, i64 %add
- store i32 %mulA, i32* %arrayidxA_plus_4, align 4
+ %arrayidxA_plus_4 = getelementptr inbounds i32, ptr %a, i64 %add
+ store i32 %mulA, ptr %arrayidxA_plus_4, align 4
- %arrayidxD = getelementptr inbounds i32, i32* %d, i64 %mul_ext
- %loadD = load i32, i32* %arrayidxD, align 4
+ %arrayidxD = getelementptr inbounds i32, ptr %d, i64 %mul_ext
+ %loadD = load i32, ptr %arrayidxD, align 4
- %arrayidxE = getelementptr inbounds i32, i32* %e, i64 %mul_ext
- %loadE = load i32, i32* %arrayidxE, align 4
+ %arrayidxE = getelementptr inbounds i32, ptr %e, i64 %mul_ext
+ %loadE = load i32, ptr %arrayidxE, align 4
%mulC = mul i32 %loadD, %loadE
- %arrayidxC = getelementptr inbounds i32, i32* %c, i64 %mul_ext
- store i32 %mulC, i32* %arrayidxC, align 4
+ %arrayidxC = getelementptr inbounds i32, ptr %c, i64 %mul_ext
+ store i32 %mulC, ptr %arrayidxC, align 4
%exitcond = icmp eq i64 %add, %N
br i1 %exitcond, label %for.end, label %for.body
@@ -137,26 +136,25 @@ for.end: ; preds = %for.body
declare void @use64(i64)
@global_a = common local_unnamed_addr global [8192 x i32] zeroinitializer, align 16
-define void @f_with_offset(i32* noalias %b, i32* noalias %c, i32* noalias %d, i32* noalias %e, i64 %N) {
+define void @f_with_offset(ptr noalias %b, ptr noalias %c, ptr noalias %d, ptr noalias %e, i64 %N) {
; CHECK-LABEL: @f_with_offset(
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[A_BASE:%.*]] = getelementptr [8192 x i32], [8192 x i32]* @global_a, i32 0, i32 0
-; CHECK-NEXT: [[A_INTPTR:%.*]] = ptrtoint i32* [[A_BASE]] to i64
+; CHECK-NEXT: [[A_INTPTR:%.*]] = ptrtoint ptr @global_a to i64
; CHECK-NEXT: call void @use64(i64 [[A_INTPTR]])
-; CHECK-NEXT: [[A:%.*]] = getelementptr i32, i32* [[A_BASE]], i32 42
+; CHECK-NEXT: [[A:%.*]] = getelementptr i32, ptr @global_a, i32 42
; CHECK-NEXT: br label [[FOR_BODY_LVER_CHECK:%.*]]
; CHECK: for.body.lver.check:
; CHECK-NEXT: [[TMP0:%.*]] = add i64 [[N:%.*]], -1
-; CHECK-NEXT: [[TMP7:%.*]] = icmp ugt i64 [[TMP0]], 4294967295
-; CHECK-NEXT: [[MUL2:%.*]] = call { i64, i1 } @llvm.umul.with.overflow.i64(i64 8, i64 [[TMP0]])
-; CHECK-NEXT: [[MUL_RESULT3:%.*]] = extractvalue { i64, i1 } [[MUL2]], 0
-; CHECK-NEXT: [[MUL_OVERFLOW4:%.*]] = extractvalue { i64, i1 } [[MUL2]], 1
-; CHECK-NEXT: [[TMP11:%.*]] = sub i64 0, [[MUL_RESULT3]]
-; CHECK-NEXT: [[TMP12:%.*]] = getelementptr i8, i8* bitcast (i32* getelementptr inbounds ([8192 x i32], [8192 x i32]* @global_a, i64 0, i64 42) to i8*), i64 [[MUL_RESULT3]]
-; CHECK-NEXT: [[TMP15:%.*]] = icmp ult i8* [[TMP12]], bitcast (i32* getelementptr inbounds ([8192 x i32], [8192 x i32]* @global_a, i64 0, i64 42) to i8*)
-; CHECK-NEXT: [[TMP17:%.*]] = or i1 [[TMP15]], [[MUL_OVERFLOW4]]
-; CHECK-NEXT: [[TMP18:%.*]] = or i1 [[TMP7]], [[TMP17]]
-; CHECK-NEXT: br i1 [[TMP18]], label [[FOR_BODY_PH_LVER_ORIG:%.*]], label [[FOR_BODY_PH_LDIST1:%.*]]
+; CHECK-NEXT: [[TMP1:%.*]] = icmp ugt i64 [[TMP0]], 4294967295
+; CHECK-NEXT: [[MUL1:%.*]] = call { i64, i1 } @llvm.umul.with.overflow.i64(i64 8, i64 [[TMP0]])
+; CHECK-NEXT: [[MUL_RESULT:%.*]] = extractvalue { i64, i1 } [[MUL1]], 0
+; CHECK-NEXT: [[MUL_OVERFLOW:%.*]] = extractvalue { i64, i1 } [[MUL1]], 1
+; CHECK-NEXT: [[TMP2:%.*]] = sub i64 0, [[MUL_RESULT]]
+; CHECK-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[A]], i64 [[MUL_RESULT]]
+; CHECK-NEXT: [[TMP4:%.*]] = icmp ult ptr [[TMP3]], [[A]]
+; CHECK-NEXT: [[TMP5:%.*]] = or i1 [[TMP4]], [[MUL_OVERFLOW]]
+; CHECK-NEXT: [[TMP6:%.*]] = or i1 [[TMP1]], [[TMP5]]
+; CHECK-NEXT: br i1 [[TMP6]], label [[FOR_BODY_PH_LVER_ORIG:%.*]], label [[FOR_BODY_PH_LDIST1:%.*]]
; CHECK: for.body.ph.lver.orig:
; CHECK-NEXT: br label [[FOR_BODY_LVER_ORIG:%.*]]
; CHECK: for.body.lver.orig:
@@ -164,22 +162,22 @@ define void @f_with_offset(i32* noalias %b, i32* noalias %c, i32* noalias %d, i3
; CHECK-NEXT: [[IND1_LVER_ORIG:%.*]] = phi i32 [ 0, [[FOR_BODY_PH_LVER_ORIG]] ], [ [[INC1_LVER_ORIG:%.*]], [[FOR_BODY_LVER_ORIG]] ]
; CHECK-NEXT: [[MUL_LVER_ORIG:%.*]] = mul i32 [[IND1_LVER_ORIG]], 2
; CHECK-NEXT: [[MUL_EXT_LVER_ORIG:%.*]] = zext i32 [[MUL_LVER_ORIG]] to i64
-; CHECK-NEXT: [[ARRAYIDXA_LVER_ORIG:%.*]] = getelementptr inbounds i32, i32* [[A]], i64 [[MUL_EXT_LVER_ORIG]]
-; CHECK-NEXT: [[LOADA_LVER_ORIG:%.*]] = load i32, i32* [[ARRAYIDXA_LVER_ORIG]], align 4
-; CHECK-NEXT: [[ARRAYIDXB_LVER_ORIG:%.*]] = getelementptr inbounds i32, i32* [[B:%.*]], i64 [[MUL_EXT_LVER_ORIG]]
-; CHECK-NEXT: [[LOADB_LVER_ORIG:%.*]] = load i32, i32* [[ARRAYIDXB_LVER_ORIG]], align 4
+; CHECK-NEXT: [[ARRAYIDXA_LVER_ORIG:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[MUL_EXT_LVER_ORIG]]
+; CHECK-NEXT: [[LOADA_LVER_ORIG:%.*]] = load i32, ptr [[ARRAYIDXA_LVER_ORIG]], align 4
+; CHECK-NEXT: [[ARRAYIDXB_LVER_ORIG:%.*]] = getelementptr inbounds i32, ptr [[B:%.*]], i64 [[MUL_EXT_LVER_ORIG]]
+; CHECK-NEXT: [[LOADB_LVER_ORIG:%.*]] = load i32, ptr [[ARRAYIDXB_LVER_ORIG]], align 4
; CHECK-NEXT: [[MULA_LVER_ORIG:%.*]] = mul i32 [[LOADB_LVER_ORIG]], [[LOADA_LVER_ORIG]]
; CHECK-NEXT: [[ADD_LVER_ORIG]] = add nuw nsw i64 [[IND_LVER_ORIG]], 1
; CHECK-NEXT: [[INC1_LVER_ORIG]] = add i32 [[IND1_LVER_ORIG]], 1
-; CHECK-NEXT: [[ARRAYIDXA_PLUS_4_LVER_ORIG:%.*]] = getelementptr inbounds i32, i32* [[A]], i64 [[ADD_LVER_ORIG]]
-; CHECK-NEXT: store i32 [[MULA_LVER_ORIG]], i32* [[ARRAYIDXA_PLUS_4_LVER_ORIG]], align 4
-; CHECK-NEXT: [[ARRAYIDXD_LVER_ORIG:%.*]] = getelementptr inbounds i32, i32* [[D:%.*]], i64 [[MUL_EXT_LVER_ORIG]]
-; CHECK-NEXT: [[LOADD_LVER_ORIG:%.*]] = load i32, i32* [[ARRAYIDXD_LVER_ORIG]], align 4
-; CHECK-NEXT: [[ARRAYIDXE_LVER_ORIG:%.*]] = getelementptr inbounds i32, i32* [[E:%.*]], i64 [[MUL_EXT_LVER_ORIG]]
-; CHECK-NEXT: [[LOADE_LVER_ORIG:%.*]] = load i32, i32* [[ARRAYIDXE_LVER_ORIG]], align 4
+; CHECK-NEXT: [[ARRAYIDXA_PLUS_4_LVER_ORIG:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[ADD_LVER_ORIG]]
+; CHECK-NEXT: store i32 [[MULA_LVER_ORIG]], ptr [[ARRAYIDXA_PLUS_4_LVER_ORIG]], align 4
+; CHECK-NEXT: [[ARRAYIDXD_LVER_ORIG:%.*]] = getelementptr inbounds i32, ptr [[D:%.*]], i64 [[MUL_EXT_LVER_ORIG]]
+; CHECK-NEXT: [[LOADD_LVER_ORIG:%.*]] = load i32, ptr [[ARRAYIDXD_LVER_ORIG]], align 4
+; CHECK-NEXT: [[ARRAYIDXE_LVER_ORIG:%.*]] = getelementptr inbounds i32, ptr [[E:%.*]], i64 [[MUL_EXT_LVER_ORIG]]
+; CHECK-NEXT: [[LOADE_LVER_ORIG:%.*]] = load i32, ptr [[ARRAYIDXE_LVER_ORIG]], align 4
; CHECK-NEXT: [[MULC_LVER_ORIG:%.*]] = mul i32 [[LOADD_LVER_ORIG]], [[LOADE_LVER_ORIG]]
-; CHECK-NEXT: [[ARRAYIDXC_LVER_ORIG:%.*]] = getelementptr inbounds i32, i32* [[C:%.*]], i64 [[MUL_EXT_LVER_ORIG]]
-; CHECK-NEXT: store i32 [[MULC_LVER_ORIG]], i32* [[ARRAYIDXC_LVER_ORIG]], align 4
+; CHECK-NEXT: [[ARRAYIDXC_LVER_ORIG:%.*]] = getelementptr inbounds i32, ptr [[C:%.*]], i64 [[MUL_EXT_LVER_ORIG]]
+; CHECK-NEXT: store i32 [[MULC_LVER_ORIG]], ptr [[ARRAYIDXC_LVER_ORIG]], align 4
; CHECK-NEXT: [[EXITCOND_LVER_ORIG:%.*]] = icmp eq i64 [[ADD_LVER_ORIG]], [[N]]
; CHECK-NEXT: br i1 [[EXITCOND_LVER_ORIG]], label [[FOR_END_LOOPEXIT:%.*]], label [[FOR_BODY_LVER_ORIG]]
; CHECK: for.body.ph.ldist1:
@@ -189,15 +187,15 @@ define void @f_with_offset(i32* noalias %b, i32* noalias %c, i32* noalias %d, i3
; CHECK-NEXT: [[IND1_LDIST1:%.*]] = phi i32 [ 0, [[FOR_BODY_PH_LDIST1]] ], [ [[INC1_LDIST1:%.*]], [[FOR_BODY_LDIST1]] ]
; CHECK-NEXT: [[MUL_LDIST1:%.*]] = mul i32 [[IND1_LDIST1]], 2
; CHECK-NEXT: [[MUL_EXT_LDIST1:%.*]] = zext i32 [[MUL_LDIST1]] to i64
-; CHECK-NEXT: [[ARRAYIDXA_LDIST1:%.*]] = getelementptr inbounds i32, i32* [[A]], i64 [[MUL_EXT_LDIST1]]
-; CHECK-NEXT: [[LOADA_LDIST1:%.*]] = load i32, i32* [[ARRAYIDXA_LDIST1]], align 4, !alias.scope !5
-; CHECK-NEXT: [[ARRAYIDXB_LDIST1:%.*]] = getelementptr inbounds i32, i32* [[B]], i64 [[MUL_EXT_LDIST1]]
-; CHECK-NEXT: [[LOADB_LDIST1:%.*]] = load i32, i32* [[ARRAYIDXB_LDIST1]], align 4
+; CHECK-NEXT: [[ARRAYIDXA_LDIST1:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[MUL_EXT_LDIST1]]
+; CHECK-NEXT: [[LOADA_LDIST1:%.*]] = load i32, ptr [[ARRAYIDXA_LDIST1]], align 4, !alias.scope !5
+; CHECK-NEXT: [[ARRAYIDXB_LDIST1:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[MUL_EXT_LDIST1]]
+; CHECK-NEXT: [[LOADB_LDIST1:%.*]] = load i32, ptr [[ARRAYIDXB_LDIST1]], align 4
; CHECK-NEXT: [[MULA_LDIST1:%.*]] = mul i32 [[LOADB_LDIST1]], [[LOADA_LDIST1]]
; CHECK-NEXT: [[ADD_LDIST1]] = add nuw nsw i64 [[IND_LDIST1]], 1
; CHECK-NEXT: [[INC1_LDIST1]] = add i32 [[IND1_LDIST1]], 1
-; CHECK-NEXT: [[ARRAYIDXA_PLUS_4_LDIST1:%.*]] = getelementptr inbounds i32, i32* [[A]], i64 [[ADD_LDIST1]]
-; CHECK-NEXT: store i32 [[MULA_LDIST1]], i32* [[ARRAYIDXA_PLUS_4_LDIST1]], align 4, !alias.scope !8
+; CHECK-NEXT: [[ARRAYIDXA_PLUS_4_LDIST1:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[ADD_LDIST1]]
+; CHECK-NEXT: store i32 [[MULA_LDIST1]], ptr [[ARRAYIDXA_PLUS_4_LDIST1]], align 4, !alias.scope !8
; CHECK-NEXT: [[EXITCOND_LDIST1:%.*]] = icmp eq i64 [[ADD_LDIST1]], [[N]]
; CHECK-NEXT: br i1 [[EXITCOND_LDIST1]], label [[FOR_BODY_PH:%.*]], label [[FOR_BODY_LDIST1]]
; CHECK: for.body.ph:
@@ -209,13 +207,13 @@ define void @f_with_offset(i32* noalias %b, i32* noalias %c, i32* noalias %d, i3
; CHECK-NEXT: [[MUL_EXT:%.*]] = zext i32 [[MUL]] to i64
; CHECK-NEXT: [[ADD]] = add nuw nsw i64 [[IND]], 1
; CHECK-NEXT: [[INC1]] = add i32 [[IND1]], 1
-; CHECK-NEXT: [[ARRAYIDXD:%.*]] = getelementptr inbounds i32, i32* [[D]], i64 [[MUL_EXT]]
-; CHECK-NEXT: [[LOADD:%.*]] = load i32, i32* [[ARRAYIDXD]], align 4
-; CHECK-NEXT: [[ARRAYIDXE:%.*]] = getelementptr inbounds i32, i32* [[E]], i64 [[MUL_EXT]]
-; CHECK-NEXT: [[LOADE:%.*]] = load i32, i32* [[ARRAYIDXE]], align 4
+; CHECK-NEXT: [[ARRAYIDXD:%.*]] = getelementptr inbounds i32, ptr [[D]], i64 [[MUL_EXT]]
+; CHECK-NEXT: [[LOADD:%.*]] = load i32, ptr [[ARRAYIDXD]], align 4
+; CHECK-NEXT: [[ARRAYIDXE:%.*]] = getelementptr inbounds i32, ptr [[E]], i64 [[MUL_EXT]]
+; CHECK-NEXT: [[LOADE:%.*]] = load i32, ptr [[ARRAYIDXE]], align 4
; CHECK-NEXT: [[MULC:%.*]] = mul i32 [[LOADD]], [[LOADE]]
-; CHECK-NEXT: [[ARRAYIDXC:%.*]] = getelementptr inbounds i32, i32* [[C]], i64 [[MUL_EXT]]
-; CHECK-NEXT: store i32 [[MULC]], i32* [[ARRAYIDXC]], align 4
+; CHECK-NEXT: [[ARRAYIDXC:%.*]] = getelementptr inbounds i32, ptr [[C]], i64 [[MUL_EXT]]
+; CHECK-NEXT: store i32 [[MULC]], ptr [[ARRAYIDXC]], align 4
; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[ADD]], [[N]]
; CHECK-NEXT: br i1 [[EXITCOND]], label [[FOR_END_LOOPEXIT2:%.*]], label [[FOR_BODY]]
; CHECK: for.end.loopexit:
@@ -226,10 +224,9 @@ define void @f_with_offset(i32* noalias %b, i32* noalias %c, i32* noalias %d, i3
; CHECK-NEXT: ret void
;
entry:
- %a_base = getelementptr [8192 x i32], [8192 x i32]* @global_a, i32 0, i32 0
- %a_intptr = ptrtoint i32* %a_base to i64
+ %a_intptr = ptrtoint ptr @global_a to i64
call void @use64(i64 %a_intptr)
- %a = getelementptr i32, i32* %a_base, i32 42
+ %a = getelementptr i32, ptr @global_a, i32 42
br label %for.body
for.body: ; preds = %for.body, %entry
@@ -240,30 +237,30 @@ for.body: ; preds = %for.body, %entry
%mul_ext = zext i32 %mul to i64
- %arrayidxA = getelementptr inbounds i32, i32* %a, i64 %mul_ext
- %loadA = load i32, i32* %arrayidxA, align 4
+ %arrayidxA = getelementptr inbounds i32, ptr %a, i64 %mul_ext
+ %loadA = load i32, ptr %arrayidxA, align 4
- %arrayidxB = getelementptr inbounds i32, i32* %b, i64 %mul_ext
- %loadB = load i32, i32* %arrayidxB, align 4
+ %arrayidxB = getelementptr inbounds i32, ptr %b, i64 %mul_ext
+ %loadB = load i32, ptr %arrayidxB, align 4
%mulA = mul i32 %loadB, %loadA
%add = add nuw nsw i64 %ind, 1
%inc1 = add i32 %ind1, 1
- %arrayidxA_plus_4 = getelementptr inbounds i32, i32* %a, i64 %add
- store i32 %mulA, i32* %arrayidxA_plus_4, align 4
+ %arrayidxA_plus_4 = getelementptr inbounds i32, ptr %a, i64 %add
+ store i32 %mulA, ptr %arrayidxA_plus_4, align 4
- %arrayidxD = getelementptr inbounds i32, i32* %d, i64 %mul_ext
- %loadD = load i32, i32* %arrayidxD, align 4
+ %arrayidxD = getelementptr inbounds i32, ptr %d, i64 %mul_ext
+ %loadD = load i32, ptr %arrayidxD, align 4
- %arrayidxE = getelementptr inbounds i32, i32* %e, i64 %mul_ext
- %loadE = load i32, i32* %arrayidxE, align 4
+ %arrayidxE = getelementptr inbounds i32, ptr %e, i64 %mul_ext
+ %loadE = load i32, ptr %arrayidxE, align 4
%mulC = mul i32 %loadD, %loadE
- %arrayidxC = getelementptr inbounds i32, i32* %c, i64 %mul_ext
- store i32 %mulC, i32* %arrayidxC, align 4
+ %arrayidxC = getelementptr inbounds i32, ptr %c, i64 %mul_ext
+ store i32 %mulC, ptr %arrayidxC, align 4
%exitcond = icmp eq i64 %add, %N
br i1 %exitcond, label %for.end, label %for.body
@@ -273,7 +270,7 @@ for.end: ; preds = %for.body
}
; Can't add control dependency with convergent in loop body.
-define void @f_with_convergent(i32* noalias %a, i32* noalias %b, i32* noalias %c, i32* noalias %d, i32* noalias %e, i64 %N) #1 {
+define void @f_with_convergent(ptr noalias %a, ptr noalias %b, ptr noalias %c, ptr noalias %d, ptr noalias %e, i64 %N) #1 {
; CHECK-LABEL: @f_with_convergent(
; CHECK-NEXT: entry:
; CHECK-NEXT: br label [[FOR_BODY:%.*]]
@@ -282,23 +279,23 @@ define void @f_with_convergent(i32* noalias %a, i32* noalias %b, i32* noalias %c
; CHECK-NEXT: [[IND1:%.*]] = phi i32 [ 0, [[ENTRY]] ], [ [[INC1:%.*]], [[FOR_BODY]] ]
; CHECK-NEXT: [[MUL:%.*]] = mul i32 [[IND1]], 2
; CHECK-NEXT: [[MUL_EXT:%.*]] = zext i32 [[MUL]] to i64
-; CHECK-NEXT: [[ARRAYIDXA:%.*]] = getelementptr inbounds i32, i32* [[A:%.*]], i64 [[MUL_EXT]]
-; CHECK-NEXT: [[LOADA:%.*]] = load i32, i32* [[ARRAYIDXA]], align 4
-; CHECK-NEXT: [[ARRAYIDXB:%.*]] = getelementptr inbounds i32, i32* [[B:%.*]], i64 [[MUL_EXT]]
-; CHECK-NEXT: [[LOADB:%.*]] = load i32, i32* [[ARRAYIDXB]], align 4
+; CHECK-NEXT: [[ARRAYIDXA:%.*]] = getelementptr inbounds i32, ptr [[A:%.*]], i64 [[MUL_EXT]]
+; CHECK-NEXT: [[LOADA:%.*]] = load i32, ptr [[ARRAYIDXA]], align 4
+; CHECK-NEXT: [[ARRAYIDXB:%.*]] = getelementptr inbounds i32, ptr [[B:%.*]], i64 [[MUL_EXT]]
+; CHECK-NEXT: [[LOADB:%.*]] = load i32, ptr [[ARRAYIDXB]], align 4
; CHECK-NEXT: [[MULA:%.*]] = mul i32 [[LOADB]], [[LOADA]]
; CHECK-NEXT: [[ADD]] = add nuw nsw i64 [[IND]], 1
; CHECK-NEXT: [[INC1]] = add i32 [[IND1]], 1
-; CHECK-NEXT: [[ARRAYIDXA_PLUS_4:%.*]] = getelementptr inbounds i32, i32* [[A]], i64 [[ADD]]
-; CHECK-NEXT: store i32 [[MULA]], i32* [[ARRAYIDXA_PLUS_4]], align 4
-; CHECK-NEXT: [[ARRAYIDXD:%.*]] = getelementptr inbounds i32, i32* [[D:%.*]], i64 [[MUL_EXT]]
-; CHECK-NEXT: [[LOADD:%.*]] = load i32, i32* [[ARRAYIDXD]], align 4
-; CHECK-NEXT: [[ARRAYIDXE:%.*]] = getelementptr inbounds i32, i32* [[E:%.*]], i64 [[MUL_EXT]]
-; CHECK-NEXT: [[LOADE:%.*]] = load i32, i32* [[ARRAYIDXE]], align 4
+; CHECK-NEXT: [[ARRAYIDXA_PLUS_4:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[ADD]]
+; CHECK-NEXT: store i32 [[MULA]], ptr [[ARRAYIDXA_PLUS_4]], align 4
+; CHECK-NEXT: [[ARRAYIDXD:%.*]] = getelementptr inbounds i32, ptr [[D:%.*]], i64 [[MUL_EXT]]
+; CHECK-NEXT: [[LOADD:%.*]] = load i32, ptr [[ARRAYIDXD]], align 4
+; CHECK-NEXT: [[ARRAYIDXE:%.*]] = getelementptr inbounds i32, ptr [[E:%.*]], i64 [[MUL_EXT]]
+; CHECK-NEXT: [[LOADE:%.*]] = load i32, ptr [[ARRAYIDXE]], align 4
; CHECK-NEXT: [[CONVERGENTD:%.*]] = call i32 @llvm.convergent(i32 [[LOADD]])
; CHECK-NEXT: [[MULC:%.*]] = mul i32 [[CONVERGENTD]], [[LOADE]]
-; CHECK-NEXT: [[ARRAYIDXC:%.*]] = getelementptr inbounds i32, i32* [[C:%.*]], i64 [[MUL_EXT]]
-; CHECK-NEXT: store i32 [[MULC]], i32* [[ARRAYIDXC]], align 4
+; CHECK-NEXT: [[ARRAYIDXC:%.*]] = getelementptr inbounds i32, ptr [[C:%.*]], i64 [[MUL_EXT]]
+; CHECK-NEXT: store i32 [[MULC]], ptr [[ARRAYIDXC]], align 4
; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[ADD]], [[N:%.*]]
; CHECK-NEXT: br i1 [[EXITCOND]], label [[FOR_END:%.*]], label [[FOR_BODY]]
; CHECK: for.end:
@@ -315,31 +312,31 @@ for.body: ; preds = %for.body, %entry
%mul_ext = zext i32 %mul to i64
- %arrayidxA = getelementptr inbounds i32, i32* %a, i64 %mul_ext
- %loadA = load i32, i32* %arrayidxA, align 4
+ %arrayidxA = getelementptr inbounds i32, ptr %a, i64 %mul_ext
+ %loadA = load i32, ptr %arrayidxA, align 4
- %arrayidxB = getelementptr inbounds i32, i32* %b, i64 %mul_ext
- %loadB = load i32, i32* %arrayidxB, align 4
+ %arrayidxB = getelementptr inbounds i32, ptr %b, i64 %mul_ext
+ %loadB = load i32, ptr %arrayidxB, align 4
%mulA = mul i32 %loadB, %loadA
%add = add nuw nsw i64 %ind, 1
%inc1 = add i32 %ind1, 1
- %arrayidxA_plus_4 = getelementptr inbounds i32, i32* %a, i64 %add
- store i32 %mulA, i32* %arrayidxA_plus_4, align 4
+ %arrayidxA_plus_4 = getelementptr inbounds i32, ptr %a, i64 %add
+ store i32 %mulA, ptr %arrayidxA_plus_4, align 4
- %arrayidxD = getelementptr inbounds i32, i32* %d, i64 %mul_ext
- %loadD = load i32, i32* %arrayidxD, align 4
+ %arrayidxD = getelementptr inbounds i32, ptr %d, i64 %mul_ext
+ %loadD = load i32, ptr %arrayidxD, align 4
- %arrayidxE = getelementptr inbounds i32, i32* %e, i64 %mul_ext
- %loadE = load i32, i32* %arrayidxE, align 4
+ %arrayidxE = getelementptr inbounds i32, ptr %e, i64 %mul_ext
+ %loadE = load i32, ptr %arrayidxE, align 4
%convergentD = call i32 @llvm.convergent(i32 %loadD)
%mulC = mul i32 %convergentD, %loadE
- %arrayidxC = getelementptr inbounds i32, i32* %c, i64 %mul_ext
- store i32 %mulC, i32* %arrayidxC, align 4
+ %arrayidxC = getelementptr inbounds i32, ptr %c, i64 %mul_ext
+ store i32 %mulC, ptr %arrayidxC, align 4
%exitcond = icmp eq i64 %add, %N
br i1 %exitcond, label %for.end, label %for.body
diff --git a/llvm/test/Transforms/LoopReroll/complex_reroll.ll b/llvm/test/Transforms/LoopReroll/complex_reroll.ll
index 66ce3097ebdaa..27139eeecf8ce 100644
--- a/llvm/test/Transforms/LoopReroll/complex_reroll.ll
+++ b/llvm/test/Transforms/LoopReroll/complex_reroll.ll
@@ -1,8 +1,8 @@
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
-; RUN: opt -opaque-pointers=0 -S -passes=loop-reroll %s | FileCheck %s
+; RUN: opt -S -passes=loop-reroll %s | FileCheck %s
declare i32 @goo(i32, i32)
- at buf = external global i8*
+ at buf = external global ptr
@aaa = global [16 x i8] c"\01\02\03\04\05\06\07\08\09\0A\0B\0C\0D\0E\0F\10", align 1
define i32 @test1(i32 %len) {
@@ -13,8 +13,8 @@ define i32 @test1(i32 %len) {
; CHECK-NEXT: [[INDVAR:%.*]] = phi i64 [ [[INDVAR_NEXT:%.*]], [[WHILE_BODY]] ], [ 0, [[ENTRY:%.*]] ]
; CHECK-NEXT: [[SUM44_020:%.*]] = phi i64 [ 0, [[ENTRY]] ], [ [[ADD:%.*]], [[WHILE_BODY]] ]
; CHECK-NEXT: [[TMP0:%.*]] = trunc i64 [[INDVAR]] to i32
-; CHECK-NEXT: [[SCEVGEP:%.*]] = getelementptr [16 x i8], [16 x i8]* @aaa, i64 0, i64 [[INDVAR]]
-; CHECK-NEXT: [[TMP1:%.*]] = load i8, i8* [[SCEVGEP]], align 1
+; CHECK-NEXT: [[SCEVGEP:%.*]] = getelementptr i8, ptr @aaa, i64 [[INDVAR]]
+; CHECK-NEXT: [[TMP1:%.*]] = load i8, ptr [[SCEVGEP]], align 1
; CHECK-NEXT: [[CONV:%.*]] = zext i8 [[TMP1]] to i64
; CHECK-NEXT: [[ADD]] = add i64 [[CONV]], [[SUM44_020]]
; CHECK-NEXT: [[INDVAR_NEXT]] = add i64 [[INDVAR]], 1
@@ -32,24 +32,24 @@ entry:
while.body:
%dec22 = phi i32 [ 4, %entry ], [ %dec, %while.body ]
- %buf.021 = phi i8* [ getelementptr inbounds ([16 x i8], [16 x i8]* @aaa, i64 0, i64 0), %entry ], [ %add.ptr, %while.body ]
+ %buf.021 = phi ptr [ @aaa, %entry ], [ %add.ptr, %while.body ]
%sum44.020 = phi i64 [ 0, %entry ], [ %add9, %while.body ]
- %0 = load i8, i8* %buf.021, align 1
+ %0 = load i8, ptr %buf.021, align 1
%conv = zext i8 %0 to i64
%add = add i64 %conv, %sum44.020
- %arrayidx1 = getelementptr inbounds i8, i8* %buf.021, i64 1
- %1 = load i8, i8* %arrayidx1, align 1
+ %arrayidx1 = getelementptr inbounds i8, ptr %buf.021, i64 1
+ %1 = load i8, ptr %arrayidx1, align 1
%conv2 = zext i8 %1 to i64
%add3 = add i64 %add, %conv2
- %arrayidx4 = getelementptr inbounds i8, i8* %buf.021, i64 2
- %2 = load i8, i8* %arrayidx4, align 1
+ %arrayidx4 = getelementptr inbounds i8, ptr %buf.021, i64 2
+ %2 = load i8, ptr %arrayidx4, align 1
%conv5 = zext i8 %2 to i64
%add6 = add i64 %add3, %conv5
- %arrayidx7 = getelementptr inbounds i8, i8* %buf.021, i64 3
- %3 = load i8, i8* %arrayidx7, align 1
+ %arrayidx7 = getelementptr inbounds i8, ptr %buf.021, i64 3
+ %3 = load i8, ptr %arrayidx7, align 1
%conv8 = zext i8 %3 to i64
%add9 = add i64 %add6, %conv8
- %add.ptr = getelementptr inbounds i8, i8* %buf.021, i64 4
+ %add.ptr = getelementptr inbounds i8, ptr %buf.021, i64 4
%dec = add nsw i32 %dec22, -1
%tobool = icmp eq i32 %dec, 0
br i1 %tobool, label %while.end, label %while.body
@@ -60,7 +60,7 @@ while.end: ; preds = %while.body
unreachable
}
-define i32 @test2(i32 %N, i32* nocapture readonly %a, i32 %S) {
+define i32 @test2(i32 %N, ptr nocapture readonly %a, i32 %S) {
; CHECK-LABEL: @test2(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[CMP_9:%.*]] = icmp sgt i32 [[N:%.*]], 0
@@ -81,9 +81,10 @@ define i32 @test2(i32 %N, i32* nocapture readonly %a, i32 %S) {
; CHECK-NEXT: [[INDVAR:%.*]] = phi i64 [ [[INDVAR_NEXT:%.*]], [[FOR_BODY]] ], [ 0, [[FOR_BODY_LR_PH]] ]
; CHECK-NEXT: [[S_ADDR_011:%.*]] = phi i32 [ [[S]], [[FOR_BODY_LR_PH]] ], [ [[ADD]], [[FOR_BODY]] ]
; CHECK-NEXT: [[TMP4:%.*]] = trunc i64 [[INDVAR]] to i32
-; CHECK-NEXT: [[SCEVGEP:%.*]] = getelementptr i32, i32* [[A:%.*]], i64 [[INDVAR]]
-; CHECK-NEXT: [[TMP5:%.*]] = load i32, i32* [[SCEVGEP]], align 4
-; CHECK-NEXT: [[ADD]] = add nsw i32 [[TMP5]], [[S_ADDR_011]]
+; CHECK-NEXT: [[TMP5:%.*]] = shl nuw nsw i64 [[INDVAR]], 2
+; CHECK-NEXT: [[SCEVGEP:%.*]] = getelementptr i8, ptr [[A:%.*]], i64 [[TMP5]]
+; CHECK-NEXT: [[TMP6:%.*]] = load i32, ptr [[SCEVGEP]], align 4
+; CHECK-NEXT: [[ADD]] = add nsw i32 [[TMP6]], [[S_ADDR_011]]
; CHECK-NEXT: [[INDVAR_NEXT]] = add i64 [[INDVAR]], 1
; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i32 [[TMP4]], [[TMP3]]
; CHECK-NEXT: br i1 [[EXITCOND]], label [[FOR_COND_FOR_COND_CLEANUP_CRIT_EDGE]], label [[FOR_BODY]]
@@ -106,19 +107,19 @@ for.body:
%i.012 = phi i32 [ 0, %for.body.lr.ph ], [ %add3, %for.body ]
%S.addr.011 = phi i32 [ %S, %for.body.lr.ph ], [ %add2, %for.body ]
- %a.addr.010 = phi i32* [ %a, %for.body.lr.ph ], [ %incdec.ptr1, %for.body ]
- %incdec.ptr = getelementptr inbounds i32, i32* %a.addr.010, i64 1
- %0 = load i32, i32* %a.addr.010, align 4
+ %a.addr.010 = phi ptr [ %a, %for.body.lr.ph ], [ %incdec.ptr1, %for.body ]
+ %incdec.ptr = getelementptr inbounds i32, ptr %a.addr.010, i64 1
+ %0 = load i32, ptr %a.addr.010, align 4
%add = add nsw i32 %0, %S.addr.011
- %incdec.ptr1 = getelementptr inbounds i32, i32* %a.addr.010, i64 2
- %1 = load i32, i32* %incdec.ptr, align 4
+ %incdec.ptr1 = getelementptr inbounds i32, ptr %a.addr.010, i64 2
+ %1 = load i32, ptr %incdec.ptr, align 4
%add2 = add nsw i32 %add, %1
%add3 = add nsw i32 %i.012, 2
%cmp = icmp slt i32 %add3, %N
br i1 %cmp, label %for.body, label %for.cond.for.cond.cleanup_crit_edge
}
-define i32 @test3(i32* nocapture readonly %buf, i32 %len) #0 {
+define i32 @test3(ptr nocapture readonly %buf, i32 %len) #0 {
; CHECK-LABEL: @test3(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[CMP10:%.*]] = icmp sgt i32 [[LEN:%.*]], 1
@@ -133,9 +134,9 @@ define i32 @test3(i32* nocapture readonly %buf, i32 %len) #0 {
; CHECK-NEXT: [[INDVAR:%.*]] = phi i64 [ [[INDVAR_NEXT:%.*]], [[WHILE_BODY]] ], [ 0, [[WHILE_BODY_PREHEADER]] ]
; CHECK-NEXT: [[S_012:%.*]] = phi i32 [ [[ADD:%.*]], [[WHILE_BODY]] ], [ undef, [[WHILE_BODY_PREHEADER]] ]
; CHECK-NEXT: [[TMP4:%.*]] = trunc i64 [[INDVAR]] to i32
-; CHECK-NEXT: [[TMP5:%.*]] = mul nsw i64 [[INDVAR]], -1
-; CHECK-NEXT: [[SCEVGEP:%.*]] = getelementptr i32, i32* [[BUF:%.*]], i64 [[TMP5]]
-; CHECK-NEXT: [[TMP6:%.*]] = load i32, i32* [[SCEVGEP]], align 4
+; CHECK-NEXT: [[TMP5:%.*]] = mul nsw i64 [[INDVAR]], -4
+; CHECK-NEXT: [[SCEVGEP:%.*]] = getelementptr i8, ptr [[BUF:%.*]], i64 [[TMP5]]
+; CHECK-NEXT: [[TMP6:%.*]] = load i32, ptr [[SCEVGEP]], align 4
; CHECK-NEXT: [[ADD]] = add nsw i32 [[TMP6]], [[S_012]]
; CHECK-NEXT: [[INDVAR_NEXT]] = add i64 [[INDVAR]], 1
; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i32 [[TMP4]], [[TMP3]]
@@ -158,13 +159,13 @@ while.body: ; preds = %while.body.preheade
%i.013 = phi i32 [ %sub, %while.body ], [ %len, %while.body.preheader ]
%S.012 = phi i32 [ %add2, %while.body ], [ undef, %while.body.preheader ]
- %buf.addr.011 = phi i32* [ %add.ptr, %while.body ], [ %buf, %while.body.preheader ]
- %0 = load i32, i32* %buf.addr.011, align 4
+ %buf.addr.011 = phi ptr [ %add.ptr, %while.body ], [ %buf, %while.body.preheader ]
+ %0 = load i32, ptr %buf.addr.011, align 4
%add = add nsw i32 %0, %S.012
- %arrayidx1 = getelementptr inbounds i32, i32* %buf.addr.011, i64 -1
- %1 = load i32, i32* %arrayidx1, align 4
+ %arrayidx1 = getelementptr inbounds i32, ptr %buf.addr.011, i64 -1
+ %1 = load i32, ptr %arrayidx1, align 4
%add2 = add nsw i32 %add, %1
- %add.ptr = getelementptr inbounds i32, i32* %buf.addr.011, i64 -2
+ %add.ptr = getelementptr inbounds i32, ptr %buf.addr.011, i64 -2
%sub = add nsw i32 %i.013, -2
%cmp = icmp sgt i32 %sub, 1
br i1 %cmp, label %while.body, label %while.end.loopexit
@@ -185,8 +186,8 @@ define i32 @test4(i32 %len) {
; CHECK-NEXT: [[INDVAR:%.*]] = phi i64 [ [[INDVAR_NEXT:%.*]], [[WHILE_BODY]] ], [ 0, [[ENTRY:%.*]] ]
; CHECK-NEXT: [[SUM44_020:%.*]] = phi i64 [ 0, [[ENTRY]] ], [ [[ADD:%.*]], [[WHILE_BODY]] ]
; CHECK-NEXT: [[TMP0:%.*]] = trunc i64 [[INDVAR]] to i32
-; CHECK-NEXT: [[SCEVGEP:%.*]] = getelementptr [16 x i8], [16 x i8]* @aaa, i64 0, i64 [[INDVAR]]
-; CHECK-NEXT: [[TMP1:%.*]] = load i8, i8* [[SCEVGEP]], align 1
+; CHECK-NEXT: [[SCEVGEP:%.*]] = getelementptr i8, ptr @aaa, i64 [[INDVAR]]
+; CHECK-NEXT: [[TMP1:%.*]] = load i8, ptr [[SCEVGEP]], align 1
; CHECK-NEXT: [[CONV:%.*]] = zext i8 [[TMP1]] to i64
; CHECK-NEXT: [[ADD]] = add i64 [[CONV]], [[SUM44_020]]
; CHECK-NEXT: [[INDVAR_NEXT]] = add i64 [[INDVAR]], 1
@@ -204,24 +205,24 @@ entry:
while.body:
%a = phi i32 [ 4, %entry ], [ %a.next, %while.body ]
%b = phi i32 [ 6, %entry ], [ %b.next, %while.body ]
- %buf.021 = phi i8* [ getelementptr inbounds ([16 x i8], [16 x i8]* @aaa, i64 0, i64 0), %entry ], [ %add.ptr, %while.body ]
+ %buf.021 = phi ptr [ @aaa, %entry ], [ %add.ptr, %while.body ]
%sum44.020 = phi i64 [ 0, %entry ], [ %add9, %while.body ]
- %0 = load i8, i8* %buf.021, align 1
+ %0 = load i8, ptr %buf.021, align 1
%conv = zext i8 %0 to i64
%add = add i64 %conv, %sum44.020
- %arrayidx1 = getelementptr inbounds i8, i8* %buf.021, i64 1
- %1 = load i8, i8* %arrayidx1, align 1
+ %arrayidx1 = getelementptr inbounds i8, ptr %buf.021, i64 1
+ %1 = load i8, ptr %arrayidx1, align 1
%conv2 = zext i8 %1 to i64
%add3 = add i64 %add, %conv2
- %arrayidx4 = getelementptr inbounds i8, i8* %buf.021, i64 2
- %2 = load i8, i8* %arrayidx4, align 1
+ %arrayidx4 = getelementptr inbounds i8, ptr %buf.021, i64 2
+ %2 = load i8, ptr %arrayidx4, align 1
%conv5 = zext i8 %2 to i64
%add6 = add i64 %add3, %conv5
- %arrayidx7 = getelementptr inbounds i8, i8* %buf.021, i64 3
- %3 = load i8, i8* %arrayidx7, align 1
+ %arrayidx7 = getelementptr inbounds i8, ptr %buf.021, i64 3
+ %3 = load i8, ptr %arrayidx7, align 1
%conv8 = zext i8 %3 to i64
%add9 = add i64 %add6, %conv8
- %add.ptr = getelementptr inbounds i8, i8* %buf.021, i64 4
+ %add.ptr = getelementptr inbounds i8, ptr %buf.021, i64 4
%a.next = add nsw i32 %a, -1
%b.next = add nsw i32 %b, -1
%cond = add nsw i32 %a, %b
diff --git a/llvm/test/Transforms/LoopStrengthReduce/X86/expander-crashes.ll b/llvm/test/Transforms/LoopStrengthReduce/X86/expander-crashes.ll
index e857c7f36ac0b..29c03b88c5fb1 100644
--- a/llvm/test/Transforms/LoopStrengthReduce/X86/expander-crashes.ll
+++ b/llvm/test/Transforms/LoopStrengthReduce/X86/expander-crashes.ll
@@ -1,5 +1,5 @@
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
-; RUN: opt -opaque-pointers=0 -loop-reduce %s -S | FileCheck %s
+; RUN: opt -loop-reduce %s -S | FileCheck %s
target datalayout = "e-m:o-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128"
target triple = "x86_64-apple-macosx10.15.0"
@@ -8,34 +8,33 @@ target triple = "x86_64-apple-macosx10.15.0"
%struct.hoge = type { i32, i32, i32, i32 }
-define i64 @blam(%struct.hoge* %start, %struct.hoge* %end, %struct.hoge* %ptr.2) {
+define i64 @blam(ptr %start, ptr %end, ptr %ptr.2) {
; CHECK-LABEL: @blam(
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[START1:%.*]] = ptrtoint %struct.hoge* [[START:%.*]] to i64
+; CHECK-NEXT: [[START1:%.*]] = ptrtoint ptr [[START:%.*]] to i64
; CHECK-NEXT: br label [[LOOP_1_HEADER:%.*]]
; CHECK: loop.1.header:
-; CHECK-NEXT: [[LSR_IV5:%.*]] = phi i64 [ [[LSR_IV_NEXT6:%.*]], [[LOOP_1_HEADER]] ], [ [[START1]], [[ENTRY:%.*]] ]
-; CHECK-NEXT: [[IV:%.*]] = phi %struct.hoge* [ [[IV_NEXT:%.*]], [[LOOP_1_HEADER]] ], [ [[START]], [[ENTRY]] ]
-; CHECK-NEXT: [[IV_NEXT]] = getelementptr inbounds [[STRUCT_HOGE:%.*]], %struct.hoge* [[IV]], i64 1
-; CHECK-NEXT: [[LSR_IV_NEXT6]] = add nuw i64 [[LSR_IV5]], 16
-; CHECK-NEXT: [[EC:%.*]] = icmp eq %struct.hoge* [[IV_NEXT]], [[END:%.*]]
+; CHECK-NEXT: [[LSR_IV4:%.*]] = phi i64 [ [[LSR_IV_NEXT5:%.*]], [[LOOP_1_HEADER]] ], [ [[START1]], [[ENTRY:%.*]] ]
+; CHECK-NEXT: [[IV:%.*]] = phi ptr [ [[IV_NEXT:%.*]], [[LOOP_1_HEADER]] ], [ [[START]], [[ENTRY]] ]
+; CHECK-NEXT: [[IV_NEXT]] = getelementptr inbounds [[STRUCT_HOGE:%.*]], ptr [[IV]], i64 1
+; CHECK-NEXT: [[LSR_IV_NEXT5]] = add nuw i64 [[LSR_IV4]], 16
+; CHECK-NEXT: [[EC:%.*]] = icmp eq ptr [[IV_NEXT]], [[END:%.*]]
; CHECK-NEXT: br i1 [[EC]], label [[LOOP_2_PH:%.*]], label [[LOOP_1_HEADER]]
; CHECK: loop.2.ph:
-; CHECK-NEXT: [[IV_NEXT_LCSSA:%.*]] = phi %struct.hoge* [ [[IV_NEXT]], [[LOOP_1_HEADER]] ]
-; CHECK-NEXT: [[LSR_IV_NEXT6_LCSSA:%.*]] = phi i64 [ [[LSR_IV_NEXT6]], [[LOOP_1_HEADER]] ]
+; CHECK-NEXT: [[IV_NEXT_LCSSA:%.*]] = phi ptr [ [[IV_NEXT]], [[LOOP_1_HEADER]] ]
+; CHECK-NEXT: [[LSR_IV_NEXT5_LCSSA:%.*]] = phi i64 [ [[LSR_IV_NEXT5]], [[LOOP_1_HEADER]] ]
; CHECK-NEXT: br label [[LOOP_2_HEADER:%.*]]
; CHECK: loop.2.header:
-; CHECK-NEXT: [[LSR_IV2:%.*]] = phi i64 [ [[LSR_IV_NEXT3:%.*]], [[LOOP_2_LATCH:%.*]] ], [ [[LSR_IV_NEXT6_LCSSA]], [[LOOP_2_PH]] ]
-; CHECK-NEXT: [[IV2:%.*]] = phi %struct.hoge* [ [[IV2_NEXT:%.*]], [[LOOP_2_LATCH]] ], [ [[IV_NEXT_LCSSA]], [[LOOP_2_PH]] ]
-; CHECK-NEXT: [[IV24:%.*]] = bitcast %struct.hoge* [[IV2]] to i32*
+; CHECK-NEXT: [[LSR_IV2:%.*]] = phi i64 [ [[LSR_IV_NEXT3:%.*]], [[LOOP_2_LATCH:%.*]] ], [ [[LSR_IV_NEXT5_LCSSA]], [[LOOP_2_PH]] ]
+; CHECK-NEXT: [[IV2:%.*]] = phi ptr [ [[IV2_NEXT:%.*]], [[LOOP_2_LATCH]] ], [ [[IV_NEXT_LCSSA]], [[LOOP_2_PH]] ]
; CHECK-NEXT: [[TMP0:%.*]] = add i64 [[LSR_IV2]], 12
; CHECK-NEXT: call void @use.i64(i64 [[TMP0]])
-; CHECK-NEXT: [[SCEVGEP:%.*]] = getelementptr i32, i32* [[IV24]], i64 2
-; CHECK-NEXT: store i32 10, i32* [[SCEVGEP]], align 8
-; CHECK-NEXT: [[EC_2:%.*]] = icmp ugt %struct.hoge* [[IV2]], [[PTR_2:%.*]]
+; CHECK-NEXT: [[SCEVGEP:%.*]] = getelementptr i8, ptr [[IV2]], i64 8
+; CHECK-NEXT: store i32 10, ptr [[SCEVGEP]], align 8
+; CHECK-NEXT: [[EC_2:%.*]] = icmp ugt ptr [[IV2]], [[PTR_2:%.*]]
; CHECK-NEXT: br i1 [[EC_2]], label [[LOOP_2_EXIT:%.*]], label [[LOOP_2_LATCH]]
; CHECK: loop.2.latch:
-; CHECK-NEXT: [[IV2_NEXT]] = getelementptr inbounds [[STRUCT_HOGE]], %struct.hoge* [[IV2]], i64 1
+; CHECK-NEXT: [[IV2_NEXT]] = getelementptr inbounds [[STRUCT_HOGE]], ptr [[IV2]], i64 1
; CHECK-NEXT: [[LSR_IV_NEXT3]] = add i64 [[LSR_IV2]], 16
; CHECK-NEXT: br label [[LOOP_2_HEADER]]
; CHECK: loop.2.exit:
@@ -45,30 +44,30 @@ entry:
br label %loop.1.header
loop.1.header:
- %iv = phi %struct.hoge* [ %iv.next, %loop.1.header ], [ %start, %entry ]
- %iv.next = getelementptr inbounds %struct.hoge, %struct.hoge* %iv, i64 1
- %ec = icmp eq %struct.hoge* %iv.next, %end
+ %iv = phi ptr [ %iv.next, %loop.1.header ], [ %start, %entry ]
+ %iv.next = getelementptr inbounds %struct.hoge, ptr %iv, i64 1
+ %ec = icmp eq ptr %iv.next, %end
br i1 %ec, label %loop.2.ph, label %loop.1.header
loop.2.ph:
br label %loop.2.header
loop.2.header:
- %iv2 = phi %struct.hoge* [ %iv2.next, %loop.2.latch ], [ %iv.next, %loop.2.ph ]
- %tmp7 = getelementptr inbounds %struct.hoge, %struct.hoge* %iv2, i64 0, i32 3
- %tmp8 = ptrtoint i32* %tmp7 to i64
+ %iv2 = phi ptr [ %iv2.next, %loop.2.latch ], [ %iv.next, %loop.2.ph ]
+ %tmp7 = getelementptr inbounds %struct.hoge, ptr %iv2, i64 0, i32 3
+ %tmp8 = ptrtoint ptr %tmp7 to i64
call void @use.i64(i64 %tmp8)
- %tmp9 = getelementptr inbounds %struct.hoge, %struct.hoge* %iv2, i64 0, i32 2
- store i32 10, i32* %tmp9, align 8
- %ec.2 = icmp ugt %struct.hoge* %iv2, %ptr.2
+ %tmp9 = getelementptr inbounds %struct.hoge, ptr %iv2, i64 0, i32 2
+ store i32 10, ptr %tmp9, align 8
+ %ec.2 = icmp ugt ptr %iv2, %ptr.2
br i1 %ec.2, label %loop.2.exit, label %loop.2.latch
loop.2.latch:
- %iv2.next = getelementptr inbounds %struct.hoge, %struct.hoge* %iv2, i64 1
+ %iv2.next = getelementptr inbounds %struct.hoge, ptr %iv2, i64 1
br label %loop.2.header
loop.2.exit: ; preds = %bb6
- %iv2.cast = ptrtoint %struct.hoge* %iv2 to i64
+ %iv2.cast = ptrtoint ptr %iv2 to i64
ret i64 %iv2.cast
}
diff --git a/llvm/test/Transforms/LoopStrengthReduce/X86/nested-ptr-addrec.ll b/llvm/test/Transforms/LoopStrengthReduce/X86/nested-ptr-addrec.ll
index 0fefb48fcdc7d..a500cdc57298f 100644
--- a/llvm/test/Transforms/LoopStrengthReduce/X86/nested-ptr-addrec.ll
+++ b/llvm/test/Transforms/LoopStrengthReduce/X86/nested-ptr-addrec.ll
@@ -1,5 +1,5 @@
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
-; RUN: opt -opaque-pointers=0 -S -loop-reduce < %s | FileCheck %s
+; RUN: opt -S -loop-reduce < %s | FileCheck %s
; Test an assertion failure from D113349, where the SCEV for the outer phi
; gets computed and registered in the value map while attempting to compute it.
@@ -12,30 +12,29 @@ define void @test() {
; CHECK-NEXT: entry:
; CHECK-NEXT: br label [[LOOP_HEADER:%.*]]
; CHECK: loop.header:
-; CHECK-NEXT: [[LSR_IV:%.*]] = phi i64* [ [[SCEVGEP:%.*]], [[LOOP_LATCH:%.*]] ], [ inttoptr (i64 -8 to i64*), [[ENTRY:%.*]] ]
+; CHECK-NEXT: [[LSR_IV:%.*]] = phi ptr [ [[SCEVGEP:%.*]], [[LOOP_LATCH:%.*]] ], [ inttoptr (i64 -8 to ptr), [[ENTRY:%.*]] ]
; CHECK-NEXT: br i1 true, label [[LOOP_EXIT:%.*]], label [[LOOP2_PREHEADER:%.*]]
; CHECK: loop.exit:
; CHECK-NEXT: ret void
; CHECK: loop2.preheader:
; CHECK-NEXT: br label [[LOOP2_HEADER:%.*]]
; CHECK: loop2.header:
-; CHECK-NEXT: [[LSR_IV1:%.*]] = phi i64* [ [[SCEVGEP2:%.*]], [[LOOP2_HEADER]] ], [ [[LSR_IV]], [[LOOP2_PREHEADER]] ]
-; CHECK-NEXT: [[SCEVGEP2]] = getelementptr i64, i64* [[LSR_IV1]], i64 1
-; CHECK-NEXT: [[SCEVGEP23:%.*]] = bitcast i64* [[SCEVGEP2]] to i8*
+; CHECK-NEXT: [[LSR_IV1:%.*]] = phi ptr [ [[SCEVGEP2:%.*]], [[LOOP2_HEADER]] ], [ [[LSR_IV]], [[LOOP2_PREHEADER]] ]
+; CHECK-NEXT: [[SCEVGEP2]] = getelementptr i8, ptr [[LSR_IV1]], i64 8
; CHECK-NEXT: br i1 false, label [[LOOP2_HEADER]], label [[LOOP2_CONT:%.*]]
; CHECK: loop2.cont:
-; CHECK-NEXT: [[V:%.*]] = load i8, i8* [[SCEVGEP23]], align 1
+; CHECK-NEXT: [[V:%.*]] = load i8, ptr [[SCEVGEP2]], align 1
; CHECK-NEXT: [[C:%.*]] = icmp ne i8 [[V]], 0
; CHECK-NEXT: br i1 [[C]], label [[LOOP_EXIT]], label [[LOOP_LATCH]]
; CHECK: loop.latch:
-; CHECK-NEXT: [[SCEVGEP]] = getelementptr i64, i64* [[LSR_IV]], i64 1
+; CHECK-NEXT: [[SCEVGEP]] = getelementptr i8, ptr [[LSR_IV]], i64 8
; CHECK-NEXT: br label [[LOOP_HEADER]]
;
entry:
br label %loop.header
loop.header:
- %ptr = phi i64* [ %ptr.next, %loop.latch ], [ null, %entry ]
+ %ptr = phi ptr [ %ptr.next, %loop.latch ], [ null, %entry ]
br i1 true, label %loop.exit, label %loop2.preheader
loop.exit:
@@ -45,17 +44,16 @@ loop2.preheader:
br label %loop2.header
loop2.header:
- %ptr2 = phi i64* [ %ptr, %loop2.preheader ], [ %ptr2.next, %loop2.header ]
- %ptr2.next = getelementptr inbounds i64, i64* %ptr2, i64 1
+ %ptr2 = phi ptr [ %ptr, %loop2.preheader ], [ %ptr2.next, %loop2.header ]
+ %ptr2.next = getelementptr inbounds i64, ptr %ptr2, i64 1
br i1 false, label %loop2.header, label %loop2.cont
loop2.cont:
- %ptr2.i8 = bitcast i64* %ptr2 to i8*
- %v = load i8, i8* %ptr2.i8
+ %v = load i8, ptr %ptr2
%c = icmp ne i8 %v, 0
br i1 %c, label %loop.exit, label %loop.latch
loop.latch:
- %ptr.next = getelementptr inbounds i64, i64* %ptr, i64 1
+ %ptr.next = getelementptr inbounds i64, ptr %ptr, i64 1
br label %loop.header
}
diff --git a/llvm/test/Transforms/LoopStrengthReduce/post-inc-icmpzero.ll b/llvm/test/Transforms/LoopStrengthReduce/post-inc-icmpzero.ll
index fbaa595e4b91e..f73f92246fff2 100644
--- a/llvm/test/Transforms/LoopStrengthReduce/post-inc-icmpzero.ll
+++ b/llvm/test/Transforms/LoopStrengthReduce/post-inc-icmpzero.ll
@@ -1,5 +1,5 @@
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
-; RUN: opt -opaque-pointers=0 -loop-reduce -S < %s | FileCheck %s
+; RUN: opt -loop-reduce -S < %s | FileCheck %s
; PR9939
; LSR should properly handle the post-inc offset when folding the
@@ -7,138 +7,127 @@
target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64"
-%struct.Vector2 = type { i16*, [64 x i16], i32 }
+%struct.Vector2 = type { ptr, [64 x i16], i32 }
@.str = private unnamed_addr constant [37 x i8] c"0123456789abcdefghijklmnopqrstuvwxyz\00"
-define void @_Z15IntegerToStringjjR7Vector2(i32 %i, i32 %radix, %struct.Vector2* nocapture %result) nounwind noinline {
+define void @_Z15IntegerToStringjjR7Vector2(i32 %i, i32 %radix, ptr nocapture %result) nounwind noinline {
; CHECK-LABEL: @_Z15IntegerToStringjjR7Vector2(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[BUFFER:%.*]] = alloca [33 x i16], align 16
-; CHECK-NEXT: [[ADD_PTR:%.*]] = getelementptr inbounds [33 x i16], [33 x i16]* [[BUFFER]], i64 0, i64 33
-; CHECK-NEXT: [[SUB_PTR_LHS_CAST:%.*]] = ptrtoint i16* [[ADD_PTR]] to i64
-; CHECK-NEXT: [[SUB_PTR_RHS_CAST:%.*]] = ptrtoint i16* [[ADD_PTR]] to i64
-; CHECK-NEXT: [[SCEVGEP5:%.*]] = getelementptr [33 x i16], [33 x i16]* [[BUFFER]], i64 0, i64 32
-; CHECK-NEXT: [[SCEVGEP56:%.*]] = bitcast i16* [[SCEVGEP5]] to [33 x i16]*
-; CHECK-NEXT: [[SCEVGEP12:%.*]] = getelementptr [33 x i16], [33 x i16]* [[BUFFER]], i64 1, i64 0
-; CHECK-NEXT: [[SCEVGEP1213:%.*]] = bitcast i16* [[SCEVGEP12]] to [33 x i16]*
+; CHECK-NEXT: [[ADD_PTR:%.*]] = getelementptr [33 x i16], ptr [[BUFFER]], i64 0, i64 33
+; CHECK-NEXT: [[SUB_PTR_LHS_CAST:%.*]] = ptrtoint ptr [[ADD_PTR]] to i64
+; CHECK-NEXT: [[SUB_PTR_RHS_CAST:%.*]] = ptrtoint ptr [[ADD_PTR]] to i64
+; CHECK-NEXT: [[SCEVGEP3:%.*]] = getelementptr i8, ptr [[BUFFER]], i64 64
; CHECK-NEXT: br label [[DO_BODY:%.*]]
; CHECK: do.body:
-; CHECK-NEXT: [[LSR_IV16:%.*]] = phi i64 [ [[LSR_IV_NEXT17:%.*]], [[DO_BODY]] ], [ -1, [[ENTRY:%.*]] ]
-; CHECK-NEXT: [[LSR_IV14:%.*]] = phi [33 x i16]* [ [[TMP2:%.*]], [[DO_BODY]] ], [ [[SCEVGEP1213]], [[ENTRY]] ]
-; CHECK-NEXT: [[LSR_IV7:%.*]] = phi [33 x i16]* [ [[TMP1:%.*]], [[DO_BODY]] ], [ [[SCEVGEP56]], [[ENTRY]] ]
+; CHECK-NEXT: [[LSR_IV10:%.*]] = phi i64 [ [[LSR_IV_NEXT11:%.*]], [[DO_BODY]] ], [ -1, [[ENTRY:%.*]] ]
+; CHECK-NEXT: [[LSR_IV8:%.*]] = phi ptr [ [[SCEVGEP9:%.*]], [[DO_BODY]] ], [ [[ADD_PTR]], [[ENTRY]] ]
+; CHECK-NEXT: [[LSR_IV4:%.*]] = phi ptr [ [[SCEVGEP5:%.*]], [[DO_BODY]] ], [ [[SCEVGEP3]], [[ENTRY]] ]
; CHECK-NEXT: [[I_ADDR_0:%.*]] = phi i32 [ [[DIV:%.*]], [[DO_BODY]] ], [ [[I:%.*]], [[ENTRY]] ]
-; CHECK-NEXT: [[LSR_IV718:%.*]] = bitcast [33 x i16]* [[LSR_IV7]] to i16*
; CHECK-NEXT: [[REM:%.*]] = urem i32 [[I_ADDR_0]], 10
; CHECK-NEXT: [[DIV]] = udiv i32 [[I_ADDR_0]], 10
; CHECK-NEXT: [[IDXPROM:%.*]] = zext i32 [[REM]] to i64
-; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [37 x i8], [37 x i8]* @.str, i64 0, i64 [[IDXPROM]]
-; CHECK-NEXT: [[INST5:%.*]] = load i8, i8* [[ARRAYIDX]], align 1
+; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [37 x i8], ptr @.str, i64 0, i64 [[IDXPROM]]
+; CHECK-NEXT: [[INST5:%.*]] = load i8, ptr [[ARRAYIDX]], align 1
; CHECK-NEXT: [[CONV:%.*]] = sext i8 [[INST5]] to i16
-; CHECK-NEXT: store i16 [[CONV]], i16* [[LSR_IV718]], align 2
+; CHECK-NEXT: store i16 [[CONV]], ptr [[LSR_IV4]], align 2
; CHECK-NEXT: [[TMP0:%.*]] = icmp ugt i32 [[I_ADDR_0]], 9
-; CHECK-NEXT: [[SCEVGEP8:%.*]] = getelementptr [33 x i16], [33 x i16]* [[LSR_IV7]], i64 0, i64 -1
-; CHECK-NEXT: [[TMP1]] = bitcast i16* [[SCEVGEP8]] to [33 x i16]*
-; CHECK-NEXT: [[SCEVGEP15:%.*]] = getelementptr [33 x i16], [33 x i16]* [[LSR_IV14]], i64 0, i64 -1
-; CHECK-NEXT: [[TMP2]] = bitcast i16* [[SCEVGEP15]] to [33 x i16]*
-; CHECK-NEXT: [[LSR_IV_NEXT17]] = add i64 [[LSR_IV16]], 1
+; CHECK-NEXT: [[SCEVGEP5]] = getelementptr i8, ptr [[LSR_IV4]], i64 -2
+; CHECK-NEXT: [[SCEVGEP9]] = getelementptr i8, ptr [[LSR_IV8]], i64 -2
+; CHECK-NEXT: [[LSR_IV_NEXT11]] = add i64 [[LSR_IV10]], 1
; CHECK-NEXT: br i1 [[TMP0]], label [[DO_BODY]], label [[DO_END:%.*]]
; CHECK: do.end:
-; CHECK-NEXT: [[DOTLCSSA:%.*]] = phi [33 x i16]* [ [[TMP2]], [[DO_BODY]] ]
-; CHECK-NEXT: [[XAP_0:%.*]] = inttoptr i64 [[LSR_IV_NEXT17]] to i1*
-; CHECK-NEXT: [[CAP_0:%.*]] = ptrtoint i1* [[XAP_0]] to i64
+; CHECK-NEXT: [[SCEVGEP9_LCSSA:%.*]] = phi ptr [ [[SCEVGEP9]], [[DO_BODY]] ]
+; CHECK-NEXT: [[XAP_0:%.*]] = inttoptr i64 [[LSR_IV_NEXT11]] to ptr
+; CHECK-NEXT: [[CAP_0:%.*]] = ptrtoint ptr [[XAP_0]] to i64
; CHECK-NEXT: [[SUB_PTR_SUB:%.*]] = sub i64 [[SUB_PTR_LHS_CAST]], [[SUB_PTR_RHS_CAST]]
; CHECK-NEXT: [[SUB_PTR_DIV39:%.*]] = lshr exact i64 [[SUB_PTR_SUB]], 1
; CHECK-NEXT: [[CONV11:%.*]] = trunc i64 [[SUB_PTR_DIV39]] to i32
-; CHECK-NEXT: [[MLENGTH:%.*]] = getelementptr inbounds [[STRUCT_VECTOR2:%.*]], %struct.Vector2* [[RESULT:%.*]], i64 0, i32 2
+; CHECK-NEXT: [[MLENGTH:%.*]] = getelementptr inbounds [[STRUCT_VECTOR2:%.*]], ptr [[RESULT:%.*]], i64 0, i32 2
; CHECK-NEXT: [[IDX_EXT21:%.*]] = bitcast i64 [[SUB_PTR_DIV39]] to i64
; CHECK-NEXT: [[CMP2740:%.*]] = icmp eq i64 [[IDX_EXT21]], 0
; CHECK-NEXT: br i1 [[CMP2740]], label [[FOR_END:%.*]], label [[FOR_BODY_LR_PH:%.*]]
; CHECK: for.body.lr.ph:
-; CHECK-NEXT: [[INST16:%.*]] = load i32, i32* [[MLENGTH]], align 4
-; CHECK-NEXT: [[MBEGIN:%.*]] = getelementptr inbounds [[STRUCT_VECTOR2]], %struct.Vector2* [[RESULT]], i64 0, i32 0
-; CHECK-NEXT: [[INST14:%.*]] = load i16*, i16** [[MBEGIN]], align 8
+; CHECK-NEXT: [[INST16:%.*]] = load i32, ptr [[MLENGTH]], align 4
+; CHECK-NEXT: [[INST14:%.*]] = load ptr, ptr [[RESULT]], align 8
; CHECK-NEXT: [[INST48:%.*]] = zext i32 [[INST16]] to i64
-; CHECK-NEXT: [[SCEVGEP:%.*]] = getelementptr i16, i16* [[INST14]], i64 [[INST48]]
-; CHECK-NEXT: [[SCEVGEP1:%.*]] = bitcast i16* [[SCEVGEP]] to i8*
+; CHECK-NEXT: [[TMP1:%.*]] = shl nuw nsw i64 [[INST48]], 1
+; CHECK-NEXT: [[SCEVGEP:%.*]] = getelementptr i8, ptr [[INST14]], i64 [[TMP1]]
; CHECK-NEXT: br label [[FOR_BODY:%.*]]
; CHECK: for.body:
-; CHECK-NEXT: [[LSR_IV9:%.*]] = phi [33 x i16]* [ [[TMP3:%.*]], [[FOR_BODY]] ], [ [[DOTLCSSA]], [[FOR_BODY_LR_PH]] ]
+; CHECK-NEXT: [[LSR_IV6:%.*]] = phi ptr [ [[SCEVGEP7:%.*]], [[FOR_BODY]] ], [ [[SCEVGEP9_LCSSA]], [[FOR_BODY_LR_PH]] ]
; CHECK-NEXT: [[LSR_IV:%.*]] = phi i64 [ [[LSR_IV_NEXT:%.*]], [[FOR_BODY]] ], [ 0, [[FOR_BODY_LR_PH]] ]
-; CHECK-NEXT: [[LSR_IV911:%.*]] = bitcast [33 x i16]* [[LSR_IV9]] to i16*
-; CHECK-NEXT: [[SCEVGEP2:%.*]] = getelementptr i8, i8* [[SCEVGEP1]], i64 [[LSR_IV]]
-; CHECK-NEXT: [[SCEVGEP23:%.*]] = bitcast i8* [[SCEVGEP2]] to i16*
-; CHECK-NEXT: [[INST29:%.*]] = load i16, i16* [[LSR_IV911]], align 2
-; CHECK-NEXT: store i16 [[INST29]], i16* [[SCEVGEP23]], align 2
+; CHECK-NEXT: [[SCEVGEP1:%.*]] = getelementptr i8, ptr [[SCEVGEP]], i64 [[LSR_IV]]
+; CHECK-NEXT: [[INST29:%.*]] = load i16, ptr [[LSR_IV6]], align 2
+; CHECK-NEXT: store i16 [[INST29]], ptr [[SCEVGEP1]], align 2
; CHECK-NEXT: [[LSR_IV_NEXT]] = add i64 [[LSR_IV]], 2
-; CHECK-NEXT: [[LSR_IV_NEXT4:%.*]] = inttoptr i64 [[LSR_IV_NEXT]] to i16*
-; CHECK-NEXT: [[SCEVGEP10:%.*]] = getelementptr [33 x i16], [33 x i16]* [[LSR_IV9]], i64 0, i64 1
-; CHECK-NEXT: [[TMP3]] = bitcast i16* [[SCEVGEP10]] to [33 x i16]*
-; CHECK-NEXT: [[CMP27:%.*]] = icmp eq i16* [[LSR_IV_NEXT4]], null
+; CHECK-NEXT: [[LSR_IV_NEXT2:%.*]] = inttoptr i64 [[LSR_IV_NEXT]] to ptr
+; CHECK-NEXT: [[SCEVGEP7]] = getelementptr i8, ptr [[LSR_IV6]], i64 2
+; CHECK-NEXT: [[CMP27:%.*]] = icmp eq ptr [[LSR_IV_NEXT2]], null
; CHECK-NEXT: br i1 [[CMP27]], label [[FOR_END_LOOPEXIT:%.*]], label [[FOR_BODY]]
; CHECK: for.end.loopexit:
; CHECK-NEXT: br label [[FOR_END]]
; CHECK: for.end:
-; CHECK-NEXT: [[INST38:%.*]] = load i32, i32* [[MLENGTH]], align 4
+; CHECK-NEXT: [[INST38:%.*]] = load i32, ptr [[MLENGTH]], align 4
; CHECK-NEXT: [[ADD:%.*]] = add i32 [[INST38]], [[CONV11]]
-; CHECK-NEXT: store i32 [[ADD]], i32* [[MLENGTH]], align 4
+; CHECK-NEXT: store i32 [[ADD]], ptr [[MLENGTH]], align 4
; CHECK-NEXT: ret void
;
entry:
%buffer = alloca [33 x i16], align 16
- %add.ptr = getelementptr inbounds [33 x i16], [33 x i16]* %buffer, i64 0, i64 33
- %sub.ptr.lhs.cast = ptrtoint i16* %add.ptr to i64
- %sub.ptr.rhs.cast = ptrtoint i16* %add.ptr to i64
+ %add.ptr = getelementptr inbounds [33 x i16], ptr %buffer, i64 0, i64 33
+ %sub.ptr.lhs.cast = ptrtoint ptr %add.ptr to i64
+ %sub.ptr.rhs.cast = ptrtoint ptr %add.ptr to i64
br label %do.body
do.body: ; preds = %do.body, %entry
%0 = phi i64 [ %indvar.next44, %do.body ], [ 0, %entry ]
%i.addr.0 = phi i32 [ %div, %do.body ], [ %i, %entry ]
%inst51 = sub i64 32, %0
- %incdec.ptr = getelementptr [33 x i16], [33 x i16]* %buffer, i64 0, i64 %inst51
+ %incdec.ptr = getelementptr [33 x i16], ptr %buffer, i64 0, i64 %inst51
%rem = urem i32 %i.addr.0, 10
%div = udiv i32 %i.addr.0, 10
%idxprom = zext i32 %rem to i64
- %arrayidx = getelementptr inbounds [37 x i8], [37 x i8]* @.str, i64 0, i64 %idxprom
- %inst5 = load i8, i8* %arrayidx, align 1
+ %arrayidx = getelementptr inbounds [37 x i8], ptr @.str, i64 0, i64 %idxprom
+ %inst5 = load i8, ptr %arrayidx, align 1
%conv = sext i8 %inst5 to i16
- store i16 %conv, i16* %incdec.ptr, align 2
+ store i16 %conv, ptr %incdec.ptr, align 2
%1 = icmp ugt i32 %i.addr.0, 9
%indvar.next44 = add i64 %0, 1
br i1 %1, label %do.body, label %do.end
do.end: ; preds = %do.body
- %xap.0 = inttoptr i64 %0 to i1*
- %cap.0 = ptrtoint i1* %xap.0 to i64
+ %xap.0 = inttoptr i64 %0 to ptr
+ %cap.0 = ptrtoint ptr %xap.0 to i64
%sub.ptr.sub = sub i64 %sub.ptr.lhs.cast, %sub.ptr.rhs.cast
%sub.ptr.div39 = lshr exact i64 %sub.ptr.sub, 1
%conv11 = trunc i64 %sub.ptr.div39 to i32
- %mLength = getelementptr inbounds %struct.Vector2, %struct.Vector2* %result, i64 0, i32 2
+ %mLength = getelementptr inbounds %struct.Vector2, ptr %result, i64 0, i32 2
%idx.ext21 = bitcast i64 %sub.ptr.div39 to i64
%incdec.ptr.sum = add i64 %idx.ext21, -1
%cp.0.sum = sub i64 %incdec.ptr.sum, %0
- %add.ptr22 = getelementptr [33 x i16], [33 x i16]* %buffer, i64 1, i64 %cp.0.sum
+ %add.ptr22 = getelementptr [33 x i16], ptr %buffer, i64 1, i64 %cp.0.sum
%cmp2740 = icmp eq i64 %idx.ext21, 0
br i1 %cmp2740, label %for.end, label %for.body.lr.ph
for.body.lr.ph: ; preds = %do.end
- %inst16 = load i32, i32* %mLength, align 4
- %mBegin = getelementptr inbounds %struct.Vector2, %struct.Vector2* %result, i64 0, i32 0
- %inst14 = load i16*, i16** %mBegin, align 8
+ %inst16 = load i32, ptr %mLength, align 4
+ %inst14 = load ptr, ptr %result, align 8
%inst48 = zext i32 %inst16 to i64
br label %for.body
for.body: ; preds = %for.body, %for.body.lr.ph
%indvar = phi i64 [ 0, %for.body.lr.ph ], [ %indvar.next, %for.body ]
%inst46 = add i64 %inst51, %indvar
- %p.042 = getelementptr [33 x i16], [33 x i16]* %buffer, i64 0, i64 %inst46
+ %p.042 = getelementptr [33 x i16], ptr %buffer, i64 0, i64 %inst46
%inst47 = sub i64 %indvar, %0
- %incdec.ptr32 = getelementptr [33 x i16], [33 x i16]* %buffer, i64 1, i64 %inst47
+ %incdec.ptr32 = getelementptr [33 x i16], ptr %buffer, i64 1, i64 %inst47
%inst49 = add i64 %inst48, %indvar
- %dst.041 = getelementptr i16, i16* %inst14, i64 %inst49
- %inst29 = load i16, i16* %p.042, align 2
- store i16 %inst29, i16* %dst.041, align 2
- %cmp27 = icmp eq i16* %incdec.ptr32, %add.ptr22
+ %dst.041 = getelementptr i16, ptr %inst14, i64 %inst49
+ %inst29 = load i16, ptr %p.042, align 2
+ store i16 %inst29, ptr %dst.041, align 2
+ %cmp27 = icmp eq ptr %incdec.ptr32, %add.ptr22
%indvar.next = add i64 %indvar, 1
br i1 %cmp27, label %for.end.loopexit, label %for.body
@@ -146,8 +135,8 @@ for.end.loopexit: ; preds = %for.body
br label %for.end
for.end: ; preds = %for.end.loopexit, %do.end
- %inst38 = load i32, i32* %mLength, align 4
+ %inst38 = load i32, ptr %mLength, align 4
%add = add i32 %inst38, %conv11
- store i32 %add, i32* %mLength, align 4
+ store i32 %add, ptr %mLength, align 4
ret void
}
diff --git a/llvm/test/Transforms/LoopStrengthReduce/post-increment-insertion.ll b/llvm/test/Transforms/LoopStrengthReduce/post-increment-insertion.ll
index aaba805baabaa..d8861f44cabfd 100644
--- a/llvm/test/Transforms/LoopStrengthReduce/post-increment-insertion.ll
+++ b/llvm/test/Transforms/LoopStrengthReduce/post-increment-insertion.ll
@@ -1,23 +1,24 @@
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
-; RUN: opt -opaque-pointers=0 < %s -loop-reduce -S | FileCheck %s
+; RUN: opt < %s -loop-reduce -S | FileCheck %s
; REQUIRES: x86-registered-target
target datalayout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128-ni:1-p2:32:8:8:32-ni:2"
target triple = "x86_64-unknown-linux-gnu"
; FIXME: iv.next is supposed to be inserted in the backedge.
-define i32 @test_01(i32* %p, i64 %len, i32 %x) {
+define i32 @test_01(ptr %p, i64 %len, i32 %x) {
; CHECK-LABEL: @test_01(
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[SCEVGEP:%.*]] = getelementptr i32, i32* [[P:%.*]], i64 -1
+; CHECK-NEXT: [[SCEVGEP:%.*]] = getelementptr i8, ptr [[P:%.*]], i64 -4
; CHECK-NEXT: br label [[LOOP:%.*]]
; CHECK: loop:
; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[IV_NEXT:%.*]], [[BACKEDGE:%.*]] ], [ [[LEN:%.*]], [[ENTRY:%.*]] ]
; CHECK-NEXT: [[COND_1:%.*]] = icmp eq i64 [[IV]], 0
; CHECK-NEXT: br i1 [[COND_1]], label [[EXIT:%.*]], label [[BACKEDGE]]
; CHECK: backedge:
-; CHECK-NEXT: [[SCEVGEP1:%.*]] = getelementptr i32, i32* [[SCEVGEP]], i64 [[IV]]
-; CHECK-NEXT: [[LOADED:%.*]] = load atomic i32, i32* [[SCEVGEP1]] unordered, align 4
+; CHECK-NEXT: [[TMP0:%.*]] = shl i64 [[IV]], 2
+; CHECK-NEXT: [[SCEVGEP1:%.*]] = getelementptr i8, ptr [[SCEVGEP]], i64 [[TMP0]]
+; CHECK-NEXT: [[LOADED:%.*]] = load atomic i32, ptr [[SCEVGEP1]] unordered, align 4
; CHECK-NEXT: [[COND_2:%.*]] = icmp eq i32 [[LOADED]], [[X:%.*]]
; CHECK-NEXT: [[IV_NEXT]] = add nsw i64 [[IV]], -1
; CHECK-NEXT: br i1 [[COND_2]], label [[FAILURE:%.*]], label [[LOOP]]
@@ -36,8 +37,8 @@ loop: ; preds = %backedge, %entry
br i1 %cond_1, label %exit, label %backedge
backedge: ; preds = %loop
- %addr = getelementptr inbounds i32, i32* %p, i64 %iv.next
- %loaded = load atomic i32, i32* %addr unordered, align 4
+ %addr = getelementptr inbounds i32, ptr %p, i64 %iv.next
+ %loaded = load atomic i32, ptr %addr unordered, align 4
%cond_2 = icmp eq i32 %loaded, %x
br i1 %cond_2, label %failure, label %loop
@@ -48,18 +49,19 @@ failure: ; preds = %backedge
unreachable
}
-define i32 @test_02(i32* %p, i64 %len, i32 %x) {
+define i32 @test_02(ptr %p, i64 %len, i32 %x) {
; CHECK-LABEL: @test_02(
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[SCEVGEP:%.*]] = getelementptr i32, i32* [[P:%.*]], i64 -1
+; CHECK-NEXT: [[SCEVGEP:%.*]] = getelementptr i8, ptr [[P:%.*]], i64 -4
; CHECK-NEXT: br label [[LOOP:%.*]]
; CHECK: loop:
; CHECK-NEXT: [[LSR_IV:%.*]] = phi i64 [ [[LSR_IV_NEXT:%.*]], [[BACKEDGE:%.*]] ], [ [[LEN:%.*]], [[ENTRY:%.*]] ]
; CHECK-NEXT: [[COND_1:%.*]] = icmp eq i64 [[LSR_IV]], 0
; CHECK-NEXT: br i1 [[COND_1]], label [[EXIT:%.*]], label [[BACKEDGE]]
; CHECK: backedge:
-; CHECK-NEXT: [[SCEVGEP1:%.*]] = getelementptr i32, i32* [[SCEVGEP]], i64 [[LSR_IV]]
-; CHECK-NEXT: [[LOADED:%.*]] = load atomic i32, i32* [[SCEVGEP1]] unordered, align 4
+; CHECK-NEXT: [[TMP0:%.*]] = shl i64 [[LSR_IV]], 2
+; CHECK-NEXT: [[SCEVGEP1:%.*]] = getelementptr i8, ptr [[SCEVGEP]], i64 [[TMP0]]
+; CHECK-NEXT: [[LOADED:%.*]] = load atomic i32, ptr [[SCEVGEP1]] unordered, align 4
; CHECK-NEXT: [[COND_2:%.*]] = icmp eq i32 [[LOADED]], [[X:%.*]]
; CHECK-NEXT: [[LSR_IV_NEXT]] = add i64 [[LSR_IV]], -1
; CHECK-NEXT: br i1 [[COND_2]], label [[FAILURE:%.*]], label [[LOOP]]
@@ -81,8 +83,8 @@ loop: ; preds = %backedge, %entry
br i1 %cond_1, label %exit, label %backedge
backedge: ; preds = %loop
- %addr = getelementptr inbounds i32, i32* %p, i64 %iv.next.offset
- %loaded = load atomic i32, i32* %addr unordered, align 4
+ %addr = getelementptr inbounds i32, ptr %p, i64 %iv.next.offset
+ %loaded = load atomic i32, ptr %addr unordered, align 4
%cond_2 = icmp eq i32 %loaded, %x
br i1 %cond_2, label %failure, label %loop
@@ -93,18 +95,19 @@ failure: ; preds = %backedge
unreachable
}
-define i32 @test_03(i32* %p, i64 %len, i32 %x) {
+define i32 @test_03(ptr %p, i64 %len, i32 %x) {
; CHECK-LABEL: @test_03(
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[SCEVGEP:%.*]] = getelementptr i32, i32* [[P:%.*]], i64 -1
+; CHECK-NEXT: [[SCEVGEP:%.*]] = getelementptr i8, ptr [[P:%.*]], i64 -4
; CHECK-NEXT: br label [[LOOP:%.*]]
; CHECK: loop:
; CHECK-NEXT: [[LSR_IV:%.*]] = phi i64 [ [[LSR_IV_NEXT:%.*]], [[BACKEDGE:%.*]] ], [ [[LEN:%.*]], [[ENTRY:%.*]] ]
; CHECK-NEXT: [[COND_1:%.*]] = icmp eq i64 [[LSR_IV]], 0
; CHECK-NEXT: br i1 [[COND_1]], label [[EXIT:%.*]], label [[BACKEDGE]]
; CHECK: backedge:
-; CHECK-NEXT: [[SCEVGEP1:%.*]] = getelementptr i32, i32* [[SCEVGEP]], i64 [[LSR_IV]]
-; CHECK-NEXT: [[LOADED:%.*]] = load atomic i32, i32* [[SCEVGEP1]] unordered, align 4
+; CHECK-NEXT: [[TMP0:%.*]] = shl i64 [[LSR_IV]], 2
+; CHECK-NEXT: [[SCEVGEP1:%.*]] = getelementptr i8, ptr [[SCEVGEP]], i64 [[TMP0]]
+; CHECK-NEXT: [[LOADED:%.*]] = load atomic i32, ptr [[SCEVGEP1]] unordered, align 4
; CHECK-NEXT: [[COND_2:%.*]] = icmp eq i32 [[LOADED]], [[X:%.*]]
; CHECK-NEXT: [[LSR_IV_NEXT]] = add i64 [[LSR_IV]], -1
; CHECK-NEXT: br i1 [[COND_2]], label [[FAILURE:%.*]], label [[LOOP]]
@@ -126,8 +129,8 @@ loop: ; preds = %backedge, %entry
br i1 %cond_1, label %exit, label %backedge
backedge: ; preds = %loop
- %addr = getelementptr inbounds i32, i32* %p, i64 %iv.next.offset
- %loaded = load atomic i32, i32* %addr unordered, align 4
+ %addr = getelementptr inbounds i32, ptr %p, i64 %iv.next.offset
+ %loaded = load atomic i32, ptr %addr unordered, align 4
%cond_2 = icmp eq i32 %loaded, %x
br i1 %cond_2, label %failure, label %loop
@@ -138,18 +141,19 @@ failure: ; preds = %backedge
unreachable
}
-define i32 @test_04(i32* %p, i64 %len, i32 %x) {
+define i32 @test_04(ptr %p, i64 %len, i32 %x) {
; CHECK-LABEL: @test_04(
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[SCEVGEP:%.*]] = getelementptr i32, i32* [[P:%.*]], i64 -1
+; CHECK-NEXT: [[SCEVGEP:%.*]] = getelementptr i8, ptr [[P:%.*]], i64 -4
; CHECK-NEXT: br label [[LOOP:%.*]]
; CHECK: loop:
; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[IV_NEXT:%.*]], [[BACKEDGE:%.*]] ], [ [[LEN:%.*]], [[ENTRY:%.*]] ]
; CHECK-NEXT: [[COND_1:%.*]] = icmp eq i64 [[IV]], 0
; CHECK-NEXT: br i1 [[COND_1]], label [[EXIT:%.*]], label [[BACKEDGE]]
; CHECK: backedge:
-; CHECK-NEXT: [[SCEVGEP1:%.*]] = getelementptr i32, i32* [[SCEVGEP]], i64 [[IV]]
-; CHECK-NEXT: [[LOADED:%.*]] = load atomic i32, i32* [[SCEVGEP1]] unordered, align 4
+; CHECK-NEXT: [[TMP0:%.*]] = shl i64 [[IV]], 2
+; CHECK-NEXT: [[SCEVGEP1:%.*]] = getelementptr i8, ptr [[SCEVGEP]], i64 [[TMP0]]
+; CHECK-NEXT: [[LOADED:%.*]] = load atomic i32, ptr [[SCEVGEP1]] unordered, align 4
; CHECK-NEXT: [[COND_2:%.*]] = icmp eq i32 [[LOADED]], [[X:%.*]]
; CHECK-NEXT: [[IV_NEXT]] = sub i64 [[IV]], 1
; CHECK-NEXT: br i1 [[COND_2]], label [[FAILURE:%.*]], label [[LOOP]]
@@ -168,8 +172,8 @@ loop: ; preds = %backedge, %entry
br i1 %cond_1, label %exit, label %backedge
backedge: ; preds = %loop
- %addr = getelementptr inbounds i32, i32* %p, i64 %iv.next
- %loaded = load atomic i32, i32* %addr unordered, align 4
+ %addr = getelementptr inbounds i32, ptr %p, i64 %iv.next
+ %loaded = load atomic i32, ptr %addr unordered, align 4
%cond_2 = icmp eq i32 %loaded, %x
br i1 %cond_2, label %failure, label %loop
@@ -180,7 +184,7 @@ failure: ; preds = %backedge
unreachable
}
-define i32 @test_05(i32* %p, i64 %len, i32 %x) {
+define i32 @test_05(ptr %p, i64 %len, i32 %x) {
; CHECK-LABEL: @test_05(
; CHECK-NEXT: entry:
; CHECK-NEXT: br label [[LOOP:%.*]]
@@ -190,8 +194,8 @@ define i32 @test_05(i32* %p, i64 %len, i32 %x) {
; CHECK-NEXT: [[COND_1:%.*]] = icmp eq i64 [[IV]], 0
; CHECK-NEXT: br i1 [[COND_1]], label [[EXIT:%.*]], label [[BACKEDGE]]
; CHECK: backedge:
-; CHECK-NEXT: [[ADDR:%.*]] = getelementptr inbounds i32, i32* [[P:%.*]], i64 [[IV_NEXT]]
-; CHECK-NEXT: [[LOADED:%.*]] = load atomic i32, i32* [[ADDR]] unordered, align 4
+; CHECK-NEXT: [[ADDR:%.*]] = getelementptr inbounds i32, ptr [[P:%.*]], i64 [[IV_NEXT]]
+; CHECK-NEXT: [[LOADED:%.*]] = load atomic i32, ptr [[ADDR]] unordered, align 4
; CHECK-NEXT: [[COND_2:%.*]] = icmp eq i32 [[LOADED]], [[X:%.*]]
; CHECK-NEXT: br i1 [[COND_2]], label [[FAILURE:%.*]], label [[LOOP]]
; CHECK: exit:
@@ -209,8 +213,8 @@ loop: ; preds = %backedge, %entry
br i1 %cond_1, label %exit, label %backedge
backedge: ; preds = %loop
- %addr = getelementptr inbounds i32, i32* %p, i64 %iv.next
- %loaded = load atomic i32, i32* %addr unordered, align 4
+ %addr = getelementptr inbounds i32, ptr %p, i64 %iv.next
+ %loaded = load atomic i32, ptr %addr unordered, align 4
%cond_2 = icmp eq i32 %loaded, %x
br i1 %cond_2, label %failure, label %loop
@@ -221,10 +225,11 @@ failure: ; preds = %backedge
unreachable
}
-define i32 @test_06(i32* %p, i64 %len, i32 %x, i64 %step) {
+define i32 @test_06(ptr %p, i64 %len, i32 %x, i64 %step) {
; CHECK-LABEL: @test_06(
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[SCEVGEP:%.*]] = getelementptr i32, i32* [[P:%.*]], i64 [[STEP:%.*]]
+; CHECK-NEXT: [[TMP0:%.*]] = shl i64 [[STEP:%.*]], 2
+; CHECK-NEXT: [[SCEVGEP:%.*]] = getelementptr i8, ptr [[P:%.*]], i64 [[TMP0]]
; CHECK-NEXT: br label [[LOOP:%.*]]
; CHECK: loop:
; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[IV_NEXT:%.*]], [[BACKEDGE:%.*]] ], [ [[LEN:%.*]], [[ENTRY:%.*]] ]
@@ -232,8 +237,9 @@ define i32 @test_06(i32* %p, i64 %len, i32 %x, i64 %step) {
; CHECK-NEXT: [[COND_1:%.*]] = icmp eq i64 [[STEP]], [[IV_NEXT]]
; CHECK-NEXT: br i1 [[COND_1]], label [[EXIT:%.*]], label [[BACKEDGE]]
; CHECK: backedge:
-; CHECK-NEXT: [[SCEVGEP1:%.*]] = getelementptr i32, i32* [[SCEVGEP]], i64 [[IV]]
-; CHECK-NEXT: [[LOADED:%.*]] = load atomic i32, i32* [[SCEVGEP1]] unordered, align 4
+; CHECK-NEXT: [[TMP1:%.*]] = shl i64 [[IV]], 2
+; CHECK-NEXT: [[SCEVGEP1:%.*]] = getelementptr i8, ptr [[SCEVGEP]], i64 [[TMP1]]
+; CHECK-NEXT: [[LOADED:%.*]] = load atomic i32, ptr [[SCEVGEP1]] unordered, align 4
; CHECK-NEXT: [[COND_2:%.*]] = icmp eq i32 [[LOADED]], [[X:%.*]]
; CHECK-NEXT: br i1 [[COND_2]], label [[FAILURE:%.*]], label [[LOOP]]
; CHECK: exit:
@@ -251,8 +257,8 @@ loop: ; preds = %backedge, %entry
br i1 %cond_1, label %exit, label %backedge
backedge: ; preds = %loop
- %addr = getelementptr inbounds i32, i32* %p, i64 %iv.next
- %loaded = load atomic i32, i32* %addr unordered, align 4
+ %addr = getelementptr inbounds i32, ptr %p, i64 %iv.next
+ %loaded = load atomic i32, ptr %addr unordered, align 4
%cond_2 = icmp eq i32 %loaded, %x
br i1 %cond_2, label %failure, label %loop
diff --git a/llvm/test/Transforms/LoopStrengthReduce/preserve-gep-loop-variant.ll b/llvm/test/Transforms/LoopStrengthReduce/preserve-gep-loop-variant.ll
index 6e1d4d4ffd1c6..d6660f806b29d 100644
--- a/llvm/test/Transforms/LoopStrengthReduce/preserve-gep-loop-variant.ll
+++ b/llvm/test/Transforms/LoopStrengthReduce/preserve-gep-loop-variant.ll
@@ -1,4 +1,4 @@
-; RUN: opt -opaque-pointers=0 < %s -loop-reduce -S | FileCheck %s
+; RUN: opt < %s -loop-reduce -S | FileCheck %s
; CHECK-NOT: {{inttoptr|ptrtoint}}
; CHECK: scevgep
; CHECK-NOT: {{inttoptr|ptrtoint}}
@@ -6,7 +6,7 @@ target datalayout = "E-p:64:64:64-a0:0:8-f32:32:32-f64:64:64-i1:8:8-i8:8:8-i16:1
; Indvars shouldn't need inttoptr/ptrtoint to expand an address here.
-define void @foo(i8* %p) nounwind {
+define void @foo(ptr %p) nounwind {
entry:
br i1 true, label %bb.nph, label %for.end
@@ -28,8 +28,8 @@ for.body:
%conv3 = sext i8 %conv to i64
%add = add nsw i64 %call, %storemerge1
%add4 = add nsw i64 %add, %conv3
- %arrayidx = getelementptr inbounds i8, i8* %p, i64 %add4
- store i8 0, i8* %arrayidx
+ %arrayidx = getelementptr inbounds i8, ptr %p, i64 %add4
+ store i8 0, ptr %arrayidx
%inc = add nsw i64 %storemerge1, 1
br label %for.cond
diff --git a/llvm/test/Transforms/LoopStrengthReduce/uglygep.ll b/llvm/test/Transforms/LoopStrengthReduce/uglygep.ll
index a9052d55cdd2d..2d7c3cb3c5ff4 100644
--- a/llvm/test/Transforms/LoopStrengthReduce/uglygep.ll
+++ b/llvm/test/Transforms/LoopStrengthReduce/uglygep.ll
@@ -1,5 +1,5 @@
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 2
-; RUN: opt -opaque-pointers=0 < %s -loop-reduce -S | FileCheck %s
+; RUN: opt < %s -loop-reduce -S | FileCheck %s
; LSR shouldn't consider %t8 to be an interesting user of %t6, and it
; should be able to form pretty GEPs.
@@ -9,9 +9,9 @@ target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f3
; Check that LSR hoists %t2 computation outside the loop,
; folds %t3's add within the address
; and uses the induction variable (%t4) to access the right element.
-define void @test(i8* %ptr.i8, float** %ptr.float) {
+define void @test(ptr %ptr.i8, ptr %ptr.float) {
; CHECK-LABEL: define void @test
-; CHECK-SAME: (i8* [[PTR_I8:%.*]], float** [[PTR_FLOAT:%.*]]) {
+; CHECK-SAME: (ptr [[PTR_I8:%.*]], ptr [[PTR_FLOAT:%.*]]) {
; CHECK-NEXT: bb:
; CHECK-NEXT: br label [[BB3:%.*]]
; CHECK: bb1:
@@ -24,15 +24,14 @@ define void @test(i8* %ptr.i8, float** %ptr.float) {
; CHECK-NEXT: br label [[BB1:%.*]]
; CHECK: bb10:
; CHECK-NEXT: [[T7:%.*]] = icmp eq i64 [[T4]], 0
-; CHECK-NEXT: [[SCEVGEP:%.*]] = getelementptr i8, i8* [[PTR_I8]], i64 [[T4]]
+; CHECK-NEXT: [[SCEVGEP:%.*]] = getelementptr i8, ptr [[PTR_I8]], i64 [[T4]]
; CHECK-NEXT: br label [[BB14:%.*]]
; CHECK: bb14:
-; CHECK-NEXT: store i8 undef, i8* [[SCEVGEP]], align 1
-; CHECK-NEXT: [[T6:%.*]] = load float*, float** [[PTR_FLOAT]], align 8
-; CHECK-NEXT: [[SCEVGEP1:%.*]] = getelementptr float, float* [[T6]], i64 4
-; CHECK-NEXT: [[SCEVGEP12:%.*]] = bitcast float* [[SCEVGEP1]] to i8*
-; CHECK-NEXT: [[SCEVGEP3:%.*]] = getelementptr i8, i8* [[SCEVGEP12]], i64 [[T4]]
-; CHECK-NEXT: store i8 undef, i8* [[SCEVGEP3]], align 1
+; CHECK-NEXT: store i8 undef, ptr [[SCEVGEP]], align 1
+; CHECK-NEXT: [[T6:%.*]] = load ptr, ptr [[PTR_FLOAT]], align 8
+; CHECK-NEXT: [[SCEVGEP1:%.*]] = getelementptr i8, ptr [[T6]], i64 16
+; CHECK-NEXT: [[SCEVGEP2:%.*]] = getelementptr i8, ptr [[SCEVGEP1]], i64 [[T4]]
+; CHECK-NEXT: store i8 undef, ptr [[SCEVGEP2]], align 1
; CHECK-NEXT: br label [[BB14]]
;
bb:
@@ -55,12 +54,11 @@ bb10: ; preds = %bb9
br label %bb14
bb14: ; preds = %bb14, %bb10
- %t2 = getelementptr inbounds i8, i8* %ptr.i8, i64 %t4 ; <i8*> [#uses=1]
- store i8 undef, i8* %t2
- %t6 = load float*, float** %ptr.float
- %t8 = bitcast float* %t6 to i8* ; <i8*> [#uses=1]
- %t9 = getelementptr inbounds i8, i8* %t8, i64 %t3 ; <i8*> [#uses=1]
- store i8 undef, i8* %t9
+ %t2 = getelementptr inbounds i8, ptr %ptr.i8, i64 %t4 ; <ptr> [#uses=1]
+ store i8 undef, ptr %t2
+ %t6 = load ptr, ptr %ptr.float
+ %t9 = getelementptr inbounds i8, ptr %t6, i64 %t3 ; <ptr> [#uses=1]
+ store i8 undef, ptr %t9
br label %bb14
}
diff --git a/llvm/test/Transforms/LoopVersioning/lcssa.ll b/llvm/test/Transforms/LoopVersioning/lcssa.ll
index 4b51c21257243..fe5dcdacd5e33 100644
--- a/llvm/test/Transforms/LoopVersioning/lcssa.ll
+++ b/llvm/test/Transforms/LoopVersioning/lcssa.ll
@@ -1,88 +1,88 @@
-; RUN: opt -opaque-pointers=0 -passes=loop-versioning -S < %s | FileCheck %s
+; RUN: opt -passes=loop-versioning -S < %s | FileCheck %s
target triple = "x86_64-unknown-linux-gnu"
-define void @fill(i8** %ls1.20, i8** %ls2.21, i8* %cse3.22) {
+define void @fill(ptr %ls1.20, ptr %ls2.21, ptr %cse3.22) {
; CHECK-LABEL: @fill(
; CHECK-NEXT: bb1.lver.check:
-; CHECK-NEXT: [[LS1_20_PROMOTED:%.*]] = load i8*, i8** [[LS1_20:%.*]], align 8
-; CHECK-NEXT: [[LS2_21_PROMOTED:%.*]] = load i8*, i8** [[LS2_21:%.*]], align 8
-; CHECK-NEXT: [[SCEVGEP:%.*]] = getelementptr i8, i8* [[LS1_20_PROMOTED]], i64 -1
-; CHECK-NEXT: [[SCEVGEP1:%.*]] = getelementptr i8, i8* [[LS1_20_PROMOTED]], i64 1
-; CHECK-NEXT: [[SCEVGEP2:%.*]] = getelementptr i8, i8* [[LS2_21_PROMOTED]], i64 1
-; CHECK-NEXT: [[BOUND0:%.*]] = icmp ult i8* [[SCEVGEP]], [[SCEVGEP2]]
-; CHECK-NEXT: [[BOUND1:%.*]] = icmp ult i8* [[LS2_21_PROMOTED]], [[SCEVGEP1]]
+; CHECK-NEXT: [[LS1_20_PROMOTED:%.*]] = load ptr, ptr [[LS1_20:%.*]], align 8
+; CHECK-NEXT: [[LS2_21_PROMOTED:%.*]] = load ptr, ptr [[LS2_21:%.*]], align 8
+; CHECK-NEXT: [[SCEVGEP:%.*]] = getelementptr i8, ptr [[LS1_20_PROMOTED]], i64 -1
+; CHECK-NEXT: [[SCEVGEP1:%.*]] = getelementptr i8, ptr [[LS1_20_PROMOTED]], i64 1
+; CHECK-NEXT: [[SCEVGEP2:%.*]] = getelementptr i8, ptr [[LS2_21_PROMOTED]], i64 1
+; CHECK-NEXT: [[BOUND0:%.*]] = icmp ult ptr [[SCEVGEP]], [[SCEVGEP2]]
+; CHECK-NEXT: [[BOUND1:%.*]] = icmp ult ptr [[LS2_21_PROMOTED]], [[SCEVGEP1]]
; CHECK-NEXT: [[FOUND_CONFLICT:%.*]] = and i1 [[BOUND0]], [[BOUND1]]
; CHECK-NEXT: br i1 [[FOUND_CONFLICT]], label %bb1.ph.lver.orig, label %bb1.ph
; CHECK: bb1.ph.lver.orig:
;
bb1.ph:
- %ls1.20.promoted = load i8*, i8** %ls1.20
- %ls2.21.promoted = load i8*, i8** %ls2.21
+ %ls1.20.promoted = load ptr, ptr %ls1.20
+ %ls2.21.promoted = load ptr, ptr %ls2.21
br label %bb1
bb1:
- %_tmp302 = phi i8* [ %ls2.21.promoted, %bb1.ph ], [ %_tmp30, %bb1 ]
- %_tmp281 = phi i8* [ %ls1.20.promoted, %bb1.ph ], [ %_tmp28, %bb1 ]
- %_tmp14 = getelementptr i8, i8* %_tmp281, i16 -1
- %_tmp15 = load i8, i8* %_tmp14
+ %_tmp302 = phi ptr [ %ls2.21.promoted, %bb1.ph ], [ %_tmp30, %bb1 ]
+ %_tmp281 = phi ptr [ %ls1.20.promoted, %bb1.ph ], [ %_tmp28, %bb1 ]
+ %_tmp14 = getelementptr i8, ptr %_tmp281, i16 -1
+ %_tmp15 = load i8, ptr %_tmp14
%add = add i8 %_tmp15, 1
- store i8 %add, i8* %_tmp281
- store i8 %add, i8* %_tmp302
- %_tmp28 = getelementptr i8, i8* %_tmp281, i16 1
- %_tmp30 = getelementptr i8, i8* %_tmp302, i16 1
+ store i8 %add, ptr %_tmp281
+ store i8 %add, ptr %_tmp302
+ %_tmp28 = getelementptr i8, ptr %_tmp281, i16 1
+ %_tmp30 = getelementptr i8, ptr %_tmp302, i16 1
br i1 false, label %bb1, label %bb3.loopexit
bb3.loopexit:
- %_tmp30.lcssa = phi i8* [ %_tmp30, %bb1 ]
+ %_tmp30.lcssa = phi ptr [ %_tmp30, %bb1 ]
%_tmp15.lcssa = phi i8 [ %_tmp15, %bb1 ]
- %_tmp28.lcssa = phi i8* [ %_tmp28, %bb1 ]
- store i8* %_tmp28.lcssa, i8** %ls1.20
- store i8 %_tmp15.lcssa, i8* %cse3.22
- store i8* %_tmp30.lcssa, i8** %ls2.21
+ %_tmp28.lcssa = phi ptr [ %_tmp28, %bb1 ]
+ store ptr %_tmp28.lcssa, ptr %ls1.20
+ store i8 %_tmp15.lcssa, ptr %cse3.22
+ store ptr %_tmp30.lcssa, ptr %ls2.21
br label %bb3
bb3:
ret void
}
-define void @fill_no_null_opt(i8** %ls1.20, i8** %ls2.21, i8* %cse3.22) #0 {
+define void @fill_no_null_opt(ptr %ls1.20, ptr %ls2.21, ptr %cse3.22) #0 {
; CHECK-LABEL: @fill_no_null_opt(
; CHECK-NEXT: bb1.lver.check:
-; CHECK-NEXT: [[LS1_20_PROMOTED:%.*]] = load i8*, i8** [[LS1_20:%.*]], align 8
-; CHECK-NEXT: [[LS2_21_PROMOTED:%.*]] = load i8*, i8** [[LS2_21:%.*]], align 8
-; CHECK-NEXT: [[SCEVGEP:%.*]] = getelementptr i8, i8* [[LS1_20_PROMOTED]], i64 -1
-; CHECK-NEXT: [[SCEVGEP1:%.*]] = getelementptr i8, i8* [[LS1_20_PROMOTED]], i64 1
-; CHECK-NEXT: [[SCEVGEP2:%.*]] = getelementptr i8, i8* [[LS2_21_PROMOTED]], i64 1
-; CHECK-NEXT: [[BOUND0:%.*]] = icmp ult i8* [[SCEVGEP]], [[SCEVGEP2]]
-; CHECK-NEXT: [[BOUND1:%.*]] = icmp ult i8* [[LS2_21_PROMOTED]], [[SCEVGEP1]]
+; CHECK-NEXT: [[LS1_20_PROMOTED:%.*]] = load ptr, ptr [[LS1_20:%.*]], align 8
+; CHECK-NEXT: [[LS2_21_PROMOTED:%.*]] = load ptr, ptr [[LS2_21:%.*]], align 8
+; CHECK-NEXT: [[SCEVGEP:%.*]] = getelementptr i8, ptr [[LS1_20_PROMOTED]], i64 -1
+; CHECK-NEXT: [[SCEVGEP1:%.*]] = getelementptr i8, ptr [[LS1_20_PROMOTED]], i64 1
+; CHECK-NEXT: [[SCEVGEP2:%.*]] = getelementptr i8, ptr [[LS2_21_PROMOTED]], i64 1
+; CHECK-NEXT: [[BOUND0:%.*]] = icmp ult ptr [[SCEVGEP]], [[SCEVGEP2]]
+; CHECK-NEXT: [[BOUND1:%.*]] = icmp ult ptr [[LS2_21_PROMOTED]], [[SCEVGEP1]]
; CHECK-NEXT: [[FOUND_CONFLICT:%.*]] = and i1 [[BOUND0]], [[BOUND1]]
; CHECK-NEXT: br i1 [[FOUND_CONFLICT]], label %bb1.ph.lver.orig, label %bb1.ph
; CHECK: bb1.ph.lver.orig:
;
bb1.ph:
- %ls1.20.promoted = load i8*, i8** %ls1.20
- %ls2.21.promoted = load i8*, i8** %ls2.21
+ %ls1.20.promoted = load ptr, ptr %ls1.20
+ %ls2.21.promoted = load ptr, ptr %ls2.21
br label %bb1
bb1:
- %_tmp302 = phi i8* [ %ls2.21.promoted, %bb1.ph ], [ %_tmp30, %bb1 ]
- %_tmp281 = phi i8* [ %ls1.20.promoted, %bb1.ph ], [ %_tmp28, %bb1 ]
- %_tmp14 = getelementptr i8, i8* %_tmp281, i16 -1
- %_tmp15 = load i8, i8* %_tmp14
+ %_tmp302 = phi ptr [ %ls2.21.promoted, %bb1.ph ], [ %_tmp30, %bb1 ]
+ %_tmp281 = phi ptr [ %ls1.20.promoted, %bb1.ph ], [ %_tmp28, %bb1 ]
+ %_tmp14 = getelementptr i8, ptr %_tmp281, i16 -1
+ %_tmp15 = load i8, ptr %_tmp14
%add = add i8 %_tmp15, 1
- store i8 %add, i8* %_tmp281
- store i8 %add, i8* %_tmp302
- %_tmp28 = getelementptr i8, i8* %_tmp281, i16 1
- %_tmp30 = getelementptr i8, i8* %_tmp302, i16 1
+ store i8 %add, ptr %_tmp281
+ store i8 %add, ptr %_tmp302
+ %_tmp28 = getelementptr i8, ptr %_tmp281, i16 1
+ %_tmp30 = getelementptr i8, ptr %_tmp302, i16 1
br i1 false, label %bb1, label %bb3.loopexit
bb3.loopexit:
- %_tmp30.lcssa = phi i8* [ %_tmp30, %bb1 ]
+ %_tmp30.lcssa = phi ptr [ %_tmp30, %bb1 ]
%_tmp15.lcssa = phi i8 [ %_tmp15, %bb1 ]
- %_tmp28.lcssa = phi i8* [ %_tmp28, %bb1 ]
- store i8* %_tmp28.lcssa, i8** %ls1.20
- store i8 %_tmp15.lcssa, i8* %cse3.22
- store i8* %_tmp30.lcssa, i8** %ls2.21
+ %_tmp28.lcssa = phi ptr [ %_tmp28, %bb1 ]
+ store ptr %_tmp28.lcssa, ptr %ls1.20
+ store i8 %_tmp15.lcssa, ptr %cse3.22
+ store ptr %_tmp30.lcssa, ptr %ls2.21
br label %bb3
bb3:
diff --git a/llvm/test/Transforms/LoopVersioning/wrapping-pointer-versioning.ll b/llvm/test/Transforms/LoopVersioning/wrapping-pointer-versioning.ll
index 655d6ef23cf48..38b64eec17d8d 100644
--- a/llvm/test/Transforms/LoopVersioning/wrapping-pointer-versioning.ll
+++ b/llvm/test/Transforms/LoopVersioning/wrapping-pointer-versioning.ll
@@ -1,5 +1,5 @@
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
-; RUN: opt -opaque-pointers=0 -passes=loop-versioning -S < %s | FileCheck %s -check-prefix=LV
+; RUN: opt -passes=loop-versioning -S < %s | FileCheck %s -check-prefix=LV
target datalayout = "e-m:o-i64:64-f80:128-n8:16:32:64-S128"
@@ -25,21 +25,20 @@ target datalayout = "e-m:o-i64:64-f80:128-n8:16:32:64-S128"
; We have added the nusw flag to turn this expression into the SCEV expression:
; i64 {0,+,2}<%for.body>
-define void @f1(i16* noalias %a,
+define void @f1(ptr noalias %a,
; LV-LABEL: @f1(
; LV-NEXT: for.body.lver.check:
-; LV-NEXT: [[A5:%.*]] = bitcast i16* [[A:%.*]] to i8*
; LV-NEXT: [[TMP0:%.*]] = add i64 [[N:%.*]], -1
-; LV-NEXT: [[TMP7:%.*]] = icmp ugt i64 [[TMP0]], 4294967295
-; LV-NEXT: [[MUL2:%.*]] = call { i64, i1 } @llvm.umul.with.overflow.i64(i64 4, i64 [[TMP0]])
-; LV-NEXT: [[MUL_RESULT3:%.*]] = extractvalue { i64, i1 } [[MUL2]], 0
-; LV-NEXT: [[MUL_OVERFLOW4:%.*]] = extractvalue { i64, i1 } [[MUL2]], 1
-; LV-NEXT: [[TMP11:%.*]] = sub i64 0, [[MUL_RESULT3]]
-; LV-NEXT: [[TMP12:%.*]] = getelementptr i8, i8* [[A5]], i64 [[MUL_RESULT3]]
-; LV-NEXT: [[TMP15:%.*]] = icmp ult i8* [[TMP12]], [[A5]]
-; LV-NEXT: [[TMP17:%.*]] = or i1 [[TMP15]], [[MUL_OVERFLOW4]]
-; LV-NEXT: [[TMP18:%.*]] = or i1 [[TMP7]], [[TMP17]]
-; LV-NEXT: br i1 [[TMP18]], label [[FOR_BODY_PH_LVER_ORIG:%.*]], label [[FOR_BODY_PH:%.*]]
+; LV-NEXT: [[TMP1:%.*]] = icmp ugt i64 [[TMP0]], 4294967295
+; LV-NEXT: [[MUL1:%.*]] = call { i64, i1 } @llvm.umul.with.overflow.i64(i64 4, i64 [[TMP0]])
+; LV-NEXT: [[MUL_RESULT:%.*]] = extractvalue { i64, i1 } [[MUL1]], 0
+; LV-NEXT: [[MUL_OVERFLOW:%.*]] = extractvalue { i64, i1 } [[MUL1]], 1
+; LV-NEXT: [[TMP2:%.*]] = sub i64 0, [[MUL_RESULT]]
+; LV-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[A:%.*]], i64 [[MUL_RESULT]]
+; LV-NEXT: [[TMP4:%.*]] = icmp ult ptr [[TMP3]], [[A]]
+; LV-NEXT: [[TMP5:%.*]] = or i1 [[TMP4]], [[MUL_OVERFLOW]]
+; LV-NEXT: [[TMP6:%.*]] = or i1 [[TMP1]], [[TMP5]]
+; LV-NEXT: br i1 [[TMP6]], label [[FOR_BODY_PH_LVER_ORIG:%.*]], label [[FOR_BODY_PH:%.*]]
; LV: for.body.ph.lver.orig:
; LV-NEXT: br label [[FOR_BODY_LVER_ORIG:%.*]]
; LV: for.body.lver.orig:
@@ -47,12 +46,12 @@ define void @f1(i16* noalias %a,
; LV-NEXT: [[IND1_LVER_ORIG:%.*]] = phi i32 [ 0, [[FOR_BODY_PH_LVER_ORIG]] ], [ [[INC1_LVER_ORIG:%.*]], [[FOR_BODY_LVER_ORIG]] ]
; LV-NEXT: [[MUL_LVER_ORIG:%.*]] = mul i32 [[IND1_LVER_ORIG]], 2
; LV-NEXT: [[MUL_EXT_LVER_ORIG:%.*]] = zext i32 [[MUL_LVER_ORIG]] to i64
-; LV-NEXT: [[ARRAYIDXA_LVER_ORIG:%.*]] = getelementptr i16, i16* [[A]], i64 [[MUL_EXT_LVER_ORIG]]
-; LV-NEXT: [[LOADA_LVER_ORIG:%.*]] = load i16, i16* [[ARRAYIDXA_LVER_ORIG]], align 2
-; LV-NEXT: [[ARRAYIDXB_LVER_ORIG:%.*]] = getelementptr i16, i16* [[B:%.*]], i64 [[IND_LVER_ORIG]]
-; LV-NEXT: [[LOADB_LVER_ORIG:%.*]] = load i16, i16* [[ARRAYIDXB_LVER_ORIG]], align 2
+; LV-NEXT: [[ARRAYIDXA_LVER_ORIG:%.*]] = getelementptr i16, ptr [[A]], i64 [[MUL_EXT_LVER_ORIG]]
+; LV-NEXT: [[LOADA_LVER_ORIG:%.*]] = load i16, ptr [[ARRAYIDXA_LVER_ORIG]], align 2
+; LV-NEXT: [[ARRAYIDXB_LVER_ORIG:%.*]] = getelementptr i16, ptr [[B:%.*]], i64 [[IND_LVER_ORIG]]
+; LV-NEXT: [[LOADB_LVER_ORIG:%.*]] = load i16, ptr [[ARRAYIDXB_LVER_ORIG]], align 2
; LV-NEXT: [[ADD_LVER_ORIG:%.*]] = mul i16 [[LOADA_LVER_ORIG]], [[LOADB_LVER_ORIG]]
-; LV-NEXT: store i16 [[ADD_LVER_ORIG]], i16* [[ARRAYIDXA_LVER_ORIG]], align 2
+; LV-NEXT: store i16 [[ADD_LVER_ORIG]], ptr [[ARRAYIDXA_LVER_ORIG]], align 2
; LV-NEXT: [[INC_LVER_ORIG]] = add nuw nsw i64 [[IND_LVER_ORIG]], 1
; LV-NEXT: [[INC1_LVER_ORIG]] = add i32 [[IND1_LVER_ORIG]], 1
; LV-NEXT: [[EXITCOND_LVER_ORIG:%.*]] = icmp eq i64 [[INC_LVER_ORIG]], [[N]]
@@ -64,24 +63,24 @@ define void @f1(i16* noalias %a,
; LV-NEXT: [[IND1:%.*]] = phi i32 [ 0, [[FOR_BODY_PH]] ], [ [[INC1:%.*]], [[FOR_BODY]] ]
; LV-NEXT: [[MUL:%.*]] = mul i32 [[IND1]], 2
; LV-NEXT: [[MUL_EXT:%.*]] = zext i32 [[MUL]] to i64
-; LV-NEXT: [[ARRAYIDXA:%.*]] = getelementptr i16, i16* [[A]], i64 [[MUL_EXT]]
-; LV-NEXT: [[LOADA:%.*]] = load i16, i16* [[ARRAYIDXA]], align 2
-; LV-NEXT: [[ARRAYIDXB:%.*]] = getelementptr i16, i16* [[B]], i64 [[IND]]
-; LV-NEXT: [[LOADB:%.*]] = load i16, i16* [[ARRAYIDXB]], align 2
+; LV-NEXT: [[ARRAYIDXA:%.*]] = getelementptr i16, ptr [[A]], i64 [[MUL_EXT]]
+; LV-NEXT: [[LOADA:%.*]] = load i16, ptr [[ARRAYIDXA]], align 2
+; LV-NEXT: [[ARRAYIDXB:%.*]] = getelementptr i16, ptr [[B]], i64 [[IND]]
+; LV-NEXT: [[LOADB:%.*]] = load i16, ptr [[ARRAYIDXB]], align 2
; LV-NEXT: [[ADD:%.*]] = mul i16 [[LOADA]], [[LOADB]]
-; LV-NEXT: store i16 [[ADD]], i16* [[ARRAYIDXA]], align 2
+; LV-NEXT: store i16 [[ADD]], ptr [[ARRAYIDXA]], align 2
; LV-NEXT: [[INC]] = add nuw nsw i64 [[IND]], 1
; LV-NEXT: [[INC1]] = add i32 [[IND1]], 1
; LV-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[INC]], [[N]]
-; LV-NEXT: br i1 [[EXITCOND]], label [[FOR_END_LOOPEXIT3:%.*]], label [[FOR_BODY]]
+; LV-NEXT: br i1 [[EXITCOND]], label [[FOR_END_LOOPEXIT2:%.*]], label [[FOR_BODY]]
; LV: for.end.loopexit:
; LV-NEXT: br label [[FOR_END:%.*]]
-; LV: for.end.loopexit3:
+; LV: for.end.loopexit2:
; LV-NEXT: br label [[FOR_END]]
; LV: for.end:
; LV-NEXT: ret void
;
- i16* noalias %b, i64 %N) {
+ ptr noalias %b, i64 %N) {
entry:
br label %for.body
@@ -92,15 +91,15 @@ for.body: ; preds = %for.body, %entry
%mul = mul i32 %ind1, 2
%mul_ext = zext i32 %mul to i64
- %arrayidxA = getelementptr i16, i16* %a, i64 %mul_ext
- %loadA = load i16, i16* %arrayidxA, align 2
+ %arrayidxA = getelementptr i16, ptr %a, i64 %mul_ext
+ %loadA = load i16, ptr %arrayidxA, align 2
- %arrayidxB = getelementptr i16, i16* %b, i64 %ind
- %loadB = load i16, i16* %arrayidxB, align 2
+ %arrayidxB = getelementptr i16, ptr %b, i64 %ind
+ %loadB = load i16, ptr %arrayidxB, align 2
%add = mul i16 %loadA, %loadB
- store i16 %add, i16* %arrayidxA, align 2
+ store i16 %add, ptr %arrayidxA, align 2
%inc = add nuw nsw i64 %ind, 1
%inc1 = add i32 %ind1, 1
@@ -136,7 +135,7 @@ for.end: ; preds = %for.body
; We have added the nusw flag to turn this expression into the following SCEV:
; i64 {zext i32 (2 * (trunc i64 %N to i32)) to i64,+,-2}<%for.body>
-define void @f2(i16* noalias %a,
+define void @f2(ptr noalias %a,
; LV-LABEL: @f2(
; LV-NEXT: for.body.lver.check:
; LV-NEXT: [[TRUNCN:%.*]] = trunc i64 [[N:%.*]] to i32
@@ -146,25 +145,24 @@ define void @f2(i16* noalias %a,
; LV-NEXT: [[MUL1:%.*]] = call { i32, i1 } @llvm.umul.with.overflow.i32(i32 2, i32 [[TMP2]])
; LV-NEXT: [[MUL_RESULT:%.*]] = extractvalue { i32, i1 } [[MUL1]], 0
; LV-NEXT: [[MUL_OVERFLOW:%.*]] = extractvalue { i32, i1 } [[MUL1]], 1
-; LV-NEXT: [[TMP4:%.*]] = sub i32 [[TMP1]], [[MUL_RESULT]]
-; LV-NEXT: [[TMP5:%.*]] = icmp ugt i32 [[TMP4]], [[TMP1]]
-; LV-NEXT: [[TMP9:%.*]] = or i1 [[TMP5]], [[MUL_OVERFLOW]]
-; LV-NEXT: [[TMP8:%.*]] = icmp ugt i64 [[TMP0]], 4294967295
-; LV-NEXT: [[TMP10:%.*]] = or i1 [[TMP9]], [[TMP8]]
-; LV-NEXT: [[TMP12:%.*]] = trunc i64 [[N]] to i31
-; LV-NEXT: [[TMP13:%.*]] = zext i31 [[TMP12]] to i64
-; LV-NEXT: [[TMP14:%.*]] = shl nuw nsw i64 [[TMP13]], 1
-; LV-NEXT: [[SCEVGEP:%.*]] = getelementptr i16, i16* [[A:%.*]], i64 [[TMP14]]
+; LV-NEXT: [[TMP3:%.*]] = sub i32 [[TMP1]], [[MUL_RESULT]]
+; LV-NEXT: [[TMP4:%.*]] = icmp ugt i32 [[TMP3]], [[TMP1]]
+; LV-NEXT: [[TMP5:%.*]] = or i1 [[TMP4]], [[MUL_OVERFLOW]]
+; LV-NEXT: [[TMP6:%.*]] = icmp ugt i64 [[TMP0]], 4294967295
+; LV-NEXT: [[TMP7:%.*]] = or i1 [[TMP5]], [[TMP6]]
+; LV-NEXT: [[TMP8:%.*]] = trunc i64 [[N]] to i31
+; LV-NEXT: [[TMP9:%.*]] = zext i31 [[TMP8]] to i64
+; LV-NEXT: [[TMP10:%.*]] = shl nuw nsw i64 [[TMP9]], 2
+; LV-NEXT: [[SCEVGEP:%.*]] = getelementptr i8, ptr [[A:%.*]], i64 [[TMP10]]
; LV-NEXT: [[MUL2:%.*]] = call { i64, i1 } @llvm.umul.with.overflow.i64(i64 4, i64 [[TMP0]])
; LV-NEXT: [[MUL_RESULT3:%.*]] = extractvalue { i64, i1 } [[MUL2]], 0
; LV-NEXT: [[MUL_OVERFLOW4:%.*]] = extractvalue { i64, i1 } [[MUL2]], 1
-; LV-NEXT: [[SCEVGEP5:%.*]] = bitcast i16* [[SCEVGEP]] to i8*
-; LV-NEXT: [[TMP15:%.*]] = sub i64 0, [[MUL_RESULT3]]
-; LV-NEXT: [[TMP17:%.*]] = getelementptr i8, i8* [[SCEVGEP5]], i64 [[TMP15]]
-; LV-NEXT: [[TMP18:%.*]] = icmp ugt i8* [[TMP17]], [[SCEVGEP5]]
-; LV-NEXT: [[TMP21:%.*]] = or i1 [[TMP18]], [[MUL_OVERFLOW4]]
-; LV-NEXT: [[TMP22:%.*]] = or i1 [[TMP10]], [[TMP21]]
-; LV-NEXT: br i1 [[TMP22]], label [[FOR_BODY_PH_LVER_ORIG:%.*]], label [[FOR_BODY_PH:%.*]]
+; LV-NEXT: [[TMP11:%.*]] = sub i64 0, [[MUL_RESULT3]]
+; LV-NEXT: [[TMP12:%.*]] = getelementptr i8, ptr [[SCEVGEP]], i64 [[TMP11]]
+; LV-NEXT: [[TMP13:%.*]] = icmp ugt ptr [[TMP12]], [[SCEVGEP]]
+; LV-NEXT: [[TMP14:%.*]] = or i1 [[TMP13]], [[MUL_OVERFLOW4]]
+; LV-NEXT: [[TMP15:%.*]] = or i1 [[TMP7]], [[TMP14]]
+; LV-NEXT: br i1 [[TMP15]], label [[FOR_BODY_PH_LVER_ORIG:%.*]], label [[FOR_BODY_PH:%.*]]
; LV: for.body.ph.lver.orig:
; LV-NEXT: br label [[FOR_BODY_LVER_ORIG:%.*]]
; LV: for.body.lver.orig:
@@ -172,12 +170,12 @@ define void @f2(i16* noalias %a,
; LV-NEXT: [[IND1_LVER_ORIG:%.*]] = phi i32 [ [[TRUNCN]], [[FOR_BODY_PH_LVER_ORIG]] ], [ [[DEC_LVER_ORIG:%.*]], [[FOR_BODY_LVER_ORIG]] ]
; LV-NEXT: [[MUL_LVER_ORIG:%.*]] = mul i32 [[IND1_LVER_ORIG]], 2
; LV-NEXT: [[MUL_EXT_LVER_ORIG:%.*]] = zext i32 [[MUL_LVER_ORIG]] to i64
-; LV-NEXT: [[ARRAYIDXA_LVER_ORIG:%.*]] = getelementptr i16, i16* [[A]], i64 [[MUL_EXT_LVER_ORIG]]
-; LV-NEXT: [[LOADA_LVER_ORIG:%.*]] = load i16, i16* [[ARRAYIDXA_LVER_ORIG]], align 2
-; LV-NEXT: [[ARRAYIDXB_LVER_ORIG:%.*]] = getelementptr i16, i16* [[B:%.*]], i64 [[IND_LVER_ORIG]]
-; LV-NEXT: [[LOADB_LVER_ORIG:%.*]] = load i16, i16* [[ARRAYIDXB_LVER_ORIG]], align 2
+; LV-NEXT: [[ARRAYIDXA_LVER_ORIG:%.*]] = getelementptr i16, ptr [[A]], i64 [[MUL_EXT_LVER_ORIG]]
+; LV-NEXT: [[LOADA_LVER_ORIG:%.*]] = load i16, ptr [[ARRAYIDXA_LVER_ORIG]], align 2
+; LV-NEXT: [[ARRAYIDXB_LVER_ORIG:%.*]] = getelementptr i16, ptr [[B:%.*]], i64 [[IND_LVER_ORIG]]
+; LV-NEXT: [[LOADB_LVER_ORIG:%.*]] = load i16, ptr [[ARRAYIDXB_LVER_ORIG]], align 2
; LV-NEXT: [[ADD_LVER_ORIG:%.*]] = mul i16 [[LOADA_LVER_ORIG]], [[LOADB_LVER_ORIG]]
-; LV-NEXT: store i16 [[ADD_LVER_ORIG]], i16* [[ARRAYIDXA_LVER_ORIG]], align 2
+; LV-NEXT: store i16 [[ADD_LVER_ORIG]], ptr [[ARRAYIDXA_LVER_ORIG]], align 2
; LV-NEXT: [[INC_LVER_ORIG]] = add nuw nsw i64 [[IND_LVER_ORIG]], 1
; LV-NEXT: [[DEC_LVER_ORIG]] = sub i32 [[IND1_LVER_ORIG]], 1
; LV-NEXT: [[EXITCOND_LVER_ORIG:%.*]] = icmp eq i64 [[INC_LVER_ORIG]], [[N]]
@@ -189,24 +187,24 @@ define void @f2(i16* noalias %a,
; LV-NEXT: [[IND1:%.*]] = phi i32 [ [[TRUNCN]], [[FOR_BODY_PH]] ], [ [[DEC:%.*]], [[FOR_BODY]] ]
; LV-NEXT: [[MUL:%.*]] = mul i32 [[IND1]], 2
; LV-NEXT: [[MUL_EXT:%.*]] = zext i32 [[MUL]] to i64
-; LV-NEXT: [[ARRAYIDXA:%.*]] = getelementptr i16, i16* [[A]], i64 [[MUL_EXT]]
-; LV-NEXT: [[LOADA:%.*]] = load i16, i16* [[ARRAYIDXA]], align 2
-; LV-NEXT: [[ARRAYIDXB:%.*]] = getelementptr i16, i16* [[B]], i64 [[IND]]
-; LV-NEXT: [[LOADB:%.*]] = load i16, i16* [[ARRAYIDXB]], align 2
+; LV-NEXT: [[ARRAYIDXA:%.*]] = getelementptr i16, ptr [[A]], i64 [[MUL_EXT]]
+; LV-NEXT: [[LOADA:%.*]] = load i16, ptr [[ARRAYIDXA]], align 2
+; LV-NEXT: [[ARRAYIDXB:%.*]] = getelementptr i16, ptr [[B]], i64 [[IND]]
+; LV-NEXT: [[LOADB:%.*]] = load i16, ptr [[ARRAYIDXB]], align 2
; LV-NEXT: [[ADD:%.*]] = mul i16 [[LOADA]], [[LOADB]]
-; LV-NEXT: store i16 [[ADD]], i16* [[ARRAYIDXA]], align 2
+; LV-NEXT: store i16 [[ADD]], ptr [[ARRAYIDXA]], align 2
; LV-NEXT: [[INC]] = add nuw nsw i64 [[IND]], 1
; LV-NEXT: [[DEC]] = sub i32 [[IND1]], 1
; LV-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[INC]], [[N]]
-; LV-NEXT: br i1 [[EXITCOND]], label [[FOR_END_LOOPEXIT6:%.*]], label [[FOR_BODY]]
+; LV-NEXT: br i1 [[EXITCOND]], label [[FOR_END_LOOPEXIT5:%.*]], label [[FOR_BODY]]
; LV: for.end.loopexit:
; LV-NEXT: br label [[FOR_END:%.*]]
-; LV: for.end.loopexit6:
+; LV: for.end.loopexit5:
; LV-NEXT: br label [[FOR_END]]
; LV: for.end:
; LV-NEXT: ret void
;
- i16* noalias %b, i64 %N) {
+ ptr noalias %b, i64 %N) {
entry:
%TruncN = trunc i64 %N to i32
br label %for.body
@@ -218,15 +216,15 @@ for.body: ; preds = %for.body, %entry
%mul = mul i32 %ind1, 2
%mul_ext = zext i32 %mul to i64
- %arrayidxA = getelementptr i16, i16* %a, i64 %mul_ext
- %loadA = load i16, i16* %arrayidxA, align 2
+ %arrayidxA = getelementptr i16, ptr %a, i64 %mul_ext
+ %loadA = load i16, ptr %arrayidxA, align 2
- %arrayidxB = getelementptr i16, i16* %b, i64 %ind
- %loadB = load i16, i16* %arrayidxB, align 2
+ %arrayidxB = getelementptr i16, ptr %b, i64 %ind
+ %loadB = load i16, ptr %arrayidxB, align 2
%add = mul i16 %loadA, %loadB
- store i16 %add, i16* %arrayidxA, align 2
+ store i16 %add, ptr %arrayidxA, align 2
%inc = add nuw nsw i64 %ind, 1
%dec = sub i32 %ind1, 1
@@ -246,28 +244,27 @@ for.end: ; preds = %for.body
; We have added the nssw flag to turn this expression into the following SCEV:
; i64 {0,+,2}<%for.body>
-define void @f3(i16* noalias %a,
+define void @f3(ptr noalias %a,
; LV-LABEL: @f3(
; LV-NEXT: for.body.lver.check:
-; LV-NEXT: [[A5:%.*]] = bitcast i16* [[A:%.*]] to i8*
; LV-NEXT: [[TMP0:%.*]] = add i64 [[N:%.*]], -1
; LV-NEXT: [[TMP1:%.*]] = trunc i64 [[TMP0]] to i32
; LV-NEXT: [[MUL1:%.*]] = call { i32, i1 } @llvm.umul.with.overflow.i32(i32 2, i32 [[TMP1]])
; LV-NEXT: [[MUL_RESULT:%.*]] = extractvalue { i32, i1 } [[MUL1]], 0
; LV-NEXT: [[MUL_OVERFLOW:%.*]] = extractvalue { i32, i1 } [[MUL1]], 1
-; LV-NEXT: [[TMP5:%.*]] = icmp slt i32 [[MUL_RESULT]], 0
-; LV-NEXT: [[TMP8:%.*]] = or i1 [[TMP5]], [[MUL_OVERFLOW]]
-; LV-NEXT: [[TMP7:%.*]] = icmp ugt i64 [[TMP0]], 4294967295
-; LV-NEXT: [[TMP9:%.*]] = or i1 [[TMP8]], [[TMP7]]
+; LV-NEXT: [[TMP2:%.*]] = icmp slt i32 [[MUL_RESULT]], 0
+; LV-NEXT: [[TMP3:%.*]] = or i1 [[TMP2]], [[MUL_OVERFLOW]]
+; LV-NEXT: [[TMP4:%.*]] = icmp ugt i64 [[TMP0]], 4294967295
+; LV-NEXT: [[TMP5:%.*]] = or i1 [[TMP3]], [[TMP4]]
; LV-NEXT: [[MUL2:%.*]] = call { i64, i1 } @llvm.umul.with.overflow.i64(i64 4, i64 [[TMP0]])
; LV-NEXT: [[MUL_RESULT3:%.*]] = extractvalue { i64, i1 } [[MUL2]], 0
; LV-NEXT: [[MUL_OVERFLOW4:%.*]] = extractvalue { i64, i1 } [[MUL2]], 1
-; LV-NEXT: [[TMP11:%.*]] = sub i64 0, [[MUL_RESULT3]]
-; LV-NEXT: [[TMP12:%.*]] = getelementptr i8, i8* [[A5]], i64 [[MUL_RESULT3]]
-; LV-NEXT: [[TMP15:%.*]] = icmp ult i8* [[TMP12]], [[A5]]
-; LV-NEXT: [[TMP17:%.*]] = or i1 [[TMP15]], [[MUL_OVERFLOW4]]
-; LV-NEXT: [[TMP18:%.*]] = or i1 [[TMP9]], [[TMP17]]
-; LV-NEXT: br i1 [[TMP18]], label [[FOR_BODY_PH_LVER_ORIG:%.*]], label [[FOR_BODY_PH:%.*]]
+; LV-NEXT: [[TMP6:%.*]] = sub i64 0, [[MUL_RESULT3]]
+; LV-NEXT: [[TMP7:%.*]] = getelementptr i8, ptr [[A:%.*]], i64 [[MUL_RESULT3]]
+; LV-NEXT: [[TMP8:%.*]] = icmp ult ptr [[TMP7]], [[A]]
+; LV-NEXT: [[TMP9:%.*]] = or i1 [[TMP8]], [[MUL_OVERFLOW4]]
+; LV-NEXT: [[TMP10:%.*]] = or i1 [[TMP5]], [[TMP9]]
+; LV-NEXT: br i1 [[TMP10]], label [[FOR_BODY_PH_LVER_ORIG:%.*]], label [[FOR_BODY_PH:%.*]]
; LV: for.body.ph.lver.orig:
; LV-NEXT: br label [[FOR_BODY_LVER_ORIG:%.*]]
; LV: for.body.lver.orig:
@@ -275,12 +272,12 @@ define void @f3(i16* noalias %a,
; LV-NEXT: [[IND1_LVER_ORIG:%.*]] = phi i32 [ 0, [[FOR_BODY_PH_LVER_ORIG]] ], [ [[INC1_LVER_ORIG:%.*]], [[FOR_BODY_LVER_ORIG]] ]
; LV-NEXT: [[MUL_LVER_ORIG:%.*]] = mul i32 [[IND1_LVER_ORIG]], 2
; LV-NEXT: [[MUL_EXT_LVER_ORIG:%.*]] = sext i32 [[MUL_LVER_ORIG]] to i64
-; LV-NEXT: [[ARRAYIDXA_LVER_ORIG:%.*]] = getelementptr i16, i16* [[A]], i64 [[MUL_EXT_LVER_ORIG]]
-; LV-NEXT: [[LOADA_LVER_ORIG:%.*]] = load i16, i16* [[ARRAYIDXA_LVER_ORIG]], align 2
-; LV-NEXT: [[ARRAYIDXB_LVER_ORIG:%.*]] = getelementptr i16, i16* [[B:%.*]], i64 [[IND_LVER_ORIG]]
-; LV-NEXT: [[LOADB_LVER_ORIG:%.*]] = load i16, i16* [[ARRAYIDXB_LVER_ORIG]], align 2
+; LV-NEXT: [[ARRAYIDXA_LVER_ORIG:%.*]] = getelementptr i16, ptr [[A]], i64 [[MUL_EXT_LVER_ORIG]]
+; LV-NEXT: [[LOADA_LVER_ORIG:%.*]] = load i16, ptr [[ARRAYIDXA_LVER_ORIG]], align 2
+; LV-NEXT: [[ARRAYIDXB_LVER_ORIG:%.*]] = getelementptr i16, ptr [[B:%.*]], i64 [[IND_LVER_ORIG]]
+; LV-NEXT: [[LOADB_LVER_ORIG:%.*]] = load i16, ptr [[ARRAYIDXB_LVER_ORIG]], align 2
; LV-NEXT: [[ADD_LVER_ORIG:%.*]] = mul i16 [[LOADA_LVER_ORIG]], [[LOADB_LVER_ORIG]]
-; LV-NEXT: store i16 [[ADD_LVER_ORIG]], i16* [[ARRAYIDXA_LVER_ORIG]], align 2
+; LV-NEXT: store i16 [[ADD_LVER_ORIG]], ptr [[ARRAYIDXA_LVER_ORIG]], align 2
; LV-NEXT: [[INC_LVER_ORIG]] = add nuw nsw i64 [[IND_LVER_ORIG]], 1
; LV-NEXT: [[INC1_LVER_ORIG]] = add i32 [[IND1_LVER_ORIG]], 1
; LV-NEXT: [[EXITCOND_LVER_ORIG:%.*]] = icmp eq i64 [[INC_LVER_ORIG]], [[N]]
@@ -292,24 +289,24 @@ define void @f3(i16* noalias %a,
; LV-NEXT: [[IND1:%.*]] = phi i32 [ 0, [[FOR_BODY_PH]] ], [ [[INC1:%.*]], [[FOR_BODY]] ]
; LV-NEXT: [[MUL:%.*]] = mul i32 [[IND1]], 2
; LV-NEXT: [[MUL_EXT:%.*]] = sext i32 [[MUL]] to i64
-; LV-NEXT: [[ARRAYIDXA:%.*]] = getelementptr i16, i16* [[A]], i64 [[MUL_EXT]]
-; LV-NEXT: [[LOADA:%.*]] = load i16, i16* [[ARRAYIDXA]], align 2
-; LV-NEXT: [[ARRAYIDXB:%.*]] = getelementptr i16, i16* [[B]], i64 [[IND]]
-; LV-NEXT: [[LOADB:%.*]] = load i16, i16* [[ARRAYIDXB]], align 2
+; LV-NEXT: [[ARRAYIDXA:%.*]] = getelementptr i16, ptr [[A]], i64 [[MUL_EXT]]
+; LV-NEXT: [[LOADA:%.*]] = load i16, ptr [[ARRAYIDXA]], align 2
+; LV-NEXT: [[ARRAYIDXB:%.*]] = getelementptr i16, ptr [[B]], i64 [[IND]]
+; LV-NEXT: [[LOADB:%.*]] = load i16, ptr [[ARRAYIDXB]], align 2
; LV-NEXT: [[ADD:%.*]] = mul i16 [[LOADA]], [[LOADB]]
-; LV-NEXT: store i16 [[ADD]], i16* [[ARRAYIDXA]], align 2
+; LV-NEXT: store i16 [[ADD]], ptr [[ARRAYIDXA]], align 2
; LV-NEXT: [[INC]] = add nuw nsw i64 [[IND]], 1
; LV-NEXT: [[INC1]] = add i32 [[IND1]], 1
; LV-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[INC]], [[N]]
-; LV-NEXT: br i1 [[EXITCOND]], label [[FOR_END_LOOPEXIT6:%.*]], label [[FOR_BODY]]
+; LV-NEXT: br i1 [[EXITCOND]], label [[FOR_END_LOOPEXIT5:%.*]], label [[FOR_BODY]]
; LV: for.end.loopexit:
; LV-NEXT: br label [[FOR_END:%.*]]
-; LV: for.end.loopexit6:
+; LV: for.end.loopexit5:
; LV-NEXT: br label [[FOR_END]]
; LV: for.end:
; LV-NEXT: ret void
;
- i16* noalias %b, i64 %N) {
+ ptr noalias %b, i64 %N) {
entry:
br label %for.body
@@ -320,15 +317,15 @@ for.body: ; preds = %for.body, %entry
%mul = mul i32 %ind1, 2
%mul_ext = sext i32 %mul to i64
- %arrayidxA = getelementptr i16, i16* %a, i64 %mul_ext
- %loadA = load i16, i16* %arrayidxA, align 2
+ %arrayidxA = getelementptr i16, ptr %a, i64 %mul_ext
+ %loadA = load i16, ptr %arrayidxA, align 2
- %arrayidxB = getelementptr i16, i16* %b, i64 %ind
- %loadB = load i16, i16* %arrayidxB, align 2
+ %arrayidxB = getelementptr i16, ptr %b, i64 %ind
+ %loadB = load i16, ptr %arrayidxB, align 2
%add = mul i16 %loadA, %loadB
- store i16 %add, i16* %arrayidxA, align 2
+ store i16 %add, ptr %arrayidxA, align 2
%inc = add nuw nsw i64 %ind, 1
%inc1 = add i32 %ind1, 1
@@ -340,7 +337,7 @@ for.end: ; preds = %for.body
ret void
}
-define void @f4(i16* noalias %a,
+define void @f4(ptr noalias %a,
; LV-LABEL: @f4(
; LV-NEXT: for.body.lver.check:
; LV-NEXT: [[TRUNCN:%.*]] = trunc i64 [[N:%.*]] to i32
@@ -350,23 +347,23 @@ define void @f4(i16* noalias %a,
; LV-NEXT: [[MUL1:%.*]] = call { i32, i1 } @llvm.umul.with.overflow.i32(i32 2, i32 [[TMP2]])
; LV-NEXT: [[MUL_RESULT:%.*]] = extractvalue { i32, i1 } [[MUL1]], 0
; LV-NEXT: [[MUL_OVERFLOW:%.*]] = extractvalue { i32, i1 } [[MUL1]], 1
-; LV-NEXT: [[TMP4:%.*]] = sub i32 [[TMP1]], [[MUL_RESULT]]
-; LV-NEXT: [[TMP5:%.*]] = icmp sgt i32 [[TMP4]], [[TMP1]]
-; LV-NEXT: [[TMP9:%.*]] = or i1 [[TMP5]], [[MUL_OVERFLOW]]
-; LV-NEXT: [[TMP8:%.*]] = icmp ugt i64 [[TMP0]], 4294967295
-; LV-NEXT: [[TMP10:%.*]] = or i1 [[TMP9]], [[TMP8]]
-; LV-NEXT: [[TMP12:%.*]] = sext i32 [[TMP1]] to i64
-; LV-NEXT: [[SCEVGEP:%.*]] = getelementptr i16, i16* [[A:%.*]], i64 [[TMP12]]
+; LV-NEXT: [[TMP3:%.*]] = sub i32 [[TMP1]], [[MUL_RESULT]]
+; LV-NEXT: [[TMP4:%.*]] = icmp sgt i32 [[TMP3]], [[TMP1]]
+; LV-NEXT: [[TMP5:%.*]] = or i1 [[TMP4]], [[MUL_OVERFLOW]]
+; LV-NEXT: [[TMP6:%.*]] = icmp ugt i64 [[TMP0]], 4294967295
+; LV-NEXT: [[TMP7:%.*]] = or i1 [[TMP5]], [[TMP6]]
+; LV-NEXT: [[TMP8:%.*]] = sext i32 [[TMP1]] to i64
+; LV-NEXT: [[TMP9:%.*]] = shl nsw i64 [[TMP8]], 1
+; LV-NEXT: [[SCEVGEP:%.*]] = getelementptr i8, ptr [[A:%.*]], i64 [[TMP9]]
; LV-NEXT: [[MUL2:%.*]] = call { i64, i1 } @llvm.umul.with.overflow.i64(i64 4, i64 [[TMP0]])
; LV-NEXT: [[MUL_RESULT3:%.*]] = extractvalue { i64, i1 } [[MUL2]], 0
; LV-NEXT: [[MUL_OVERFLOW4:%.*]] = extractvalue { i64, i1 } [[MUL2]], 1
-; LV-NEXT: [[SCEVGEP5:%.*]] = bitcast i16* [[SCEVGEP]] to i8*
-; LV-NEXT: [[TMP13:%.*]] = sub i64 0, [[MUL_RESULT3]]
-; LV-NEXT: [[TMP15:%.*]] = getelementptr i8, i8* [[SCEVGEP5]], i64 [[TMP13]]
-; LV-NEXT: [[TMP16:%.*]] = icmp ugt i8* [[TMP15]], [[SCEVGEP5]]
-; LV-NEXT: [[TMP19:%.*]] = or i1 [[TMP16]], [[MUL_OVERFLOW4]]
-; LV-NEXT: [[TMP20:%.*]] = or i1 [[TMP10]], [[TMP19]]
-; LV-NEXT: br i1 [[TMP20]], label [[FOR_BODY_PH_LVER_ORIG:%.*]], label [[FOR_BODY_PH:%.*]]
+; LV-NEXT: [[TMP10:%.*]] = sub i64 0, [[MUL_RESULT3]]
+; LV-NEXT: [[TMP11:%.*]] = getelementptr i8, ptr [[SCEVGEP]], i64 [[TMP10]]
+; LV-NEXT: [[TMP12:%.*]] = icmp ugt ptr [[TMP11]], [[SCEVGEP]]
+; LV-NEXT: [[TMP13:%.*]] = or i1 [[TMP12]], [[MUL_OVERFLOW4]]
+; LV-NEXT: [[TMP14:%.*]] = or i1 [[TMP7]], [[TMP13]]
+; LV-NEXT: br i1 [[TMP14]], label [[FOR_BODY_PH_LVER_ORIG:%.*]], label [[FOR_BODY_PH:%.*]]
; LV: for.body.ph.lver.orig:
; LV-NEXT: br label [[FOR_BODY_LVER_ORIG:%.*]]
; LV: for.body.lver.orig:
@@ -374,12 +371,12 @@ define void @f4(i16* noalias %a,
; LV-NEXT: [[IND1_LVER_ORIG:%.*]] = phi i32 [ [[TRUNCN]], [[FOR_BODY_PH_LVER_ORIG]] ], [ [[DEC_LVER_ORIG:%.*]], [[FOR_BODY_LVER_ORIG]] ]
; LV-NEXT: [[MUL_LVER_ORIG:%.*]] = mul i32 [[IND1_LVER_ORIG]], 2
; LV-NEXT: [[MUL_EXT_LVER_ORIG:%.*]] = sext i32 [[MUL_LVER_ORIG]] to i64
-; LV-NEXT: [[ARRAYIDXA_LVER_ORIG:%.*]] = getelementptr i16, i16* [[A]], i64 [[MUL_EXT_LVER_ORIG]]
-; LV-NEXT: [[LOADA_LVER_ORIG:%.*]] = load i16, i16* [[ARRAYIDXA_LVER_ORIG]], align 2
-; LV-NEXT: [[ARRAYIDXB_LVER_ORIG:%.*]] = getelementptr i16, i16* [[B:%.*]], i64 [[IND_LVER_ORIG]]
-; LV-NEXT: [[LOADB_LVER_ORIG:%.*]] = load i16, i16* [[ARRAYIDXB_LVER_ORIG]], align 2
+; LV-NEXT: [[ARRAYIDXA_LVER_ORIG:%.*]] = getelementptr i16, ptr [[A]], i64 [[MUL_EXT_LVER_ORIG]]
+; LV-NEXT: [[LOADA_LVER_ORIG:%.*]] = load i16, ptr [[ARRAYIDXA_LVER_ORIG]], align 2
+; LV-NEXT: [[ARRAYIDXB_LVER_ORIG:%.*]] = getelementptr i16, ptr [[B:%.*]], i64 [[IND_LVER_ORIG]]
+; LV-NEXT: [[LOADB_LVER_ORIG:%.*]] = load i16, ptr [[ARRAYIDXB_LVER_ORIG]], align 2
; LV-NEXT: [[ADD_LVER_ORIG:%.*]] = mul i16 [[LOADA_LVER_ORIG]], [[LOADB_LVER_ORIG]]
-; LV-NEXT: store i16 [[ADD_LVER_ORIG]], i16* [[ARRAYIDXA_LVER_ORIG]], align 2
+; LV-NEXT: store i16 [[ADD_LVER_ORIG]], ptr [[ARRAYIDXA_LVER_ORIG]], align 2
; LV-NEXT: [[INC_LVER_ORIG]] = add nuw nsw i64 [[IND_LVER_ORIG]], 1
; LV-NEXT: [[DEC_LVER_ORIG]] = sub i32 [[IND1_LVER_ORIG]], 1
; LV-NEXT: [[EXITCOND_LVER_ORIG:%.*]] = icmp eq i64 [[INC_LVER_ORIG]], [[N]]
@@ -391,24 +388,24 @@ define void @f4(i16* noalias %a,
; LV-NEXT: [[IND1:%.*]] = phi i32 [ [[TRUNCN]], [[FOR_BODY_PH]] ], [ [[DEC:%.*]], [[FOR_BODY]] ]
; LV-NEXT: [[MUL:%.*]] = mul i32 [[IND1]], 2
; LV-NEXT: [[MUL_EXT:%.*]] = sext i32 [[MUL]] to i64
-; LV-NEXT: [[ARRAYIDXA:%.*]] = getelementptr i16, i16* [[A]], i64 [[MUL_EXT]]
-; LV-NEXT: [[LOADA:%.*]] = load i16, i16* [[ARRAYIDXA]], align 2
-; LV-NEXT: [[ARRAYIDXB:%.*]] = getelementptr i16, i16* [[B]], i64 [[IND]]
-; LV-NEXT: [[LOADB:%.*]] = load i16, i16* [[ARRAYIDXB]], align 2
+; LV-NEXT: [[ARRAYIDXA:%.*]] = getelementptr i16, ptr [[A]], i64 [[MUL_EXT]]
+; LV-NEXT: [[LOADA:%.*]] = load i16, ptr [[ARRAYIDXA]], align 2
+; LV-NEXT: [[ARRAYIDXB:%.*]] = getelementptr i16, ptr [[B]], i64 [[IND]]
+; LV-NEXT: [[LOADB:%.*]] = load i16, ptr [[ARRAYIDXB]], align 2
; LV-NEXT: [[ADD:%.*]] = mul i16 [[LOADA]], [[LOADB]]
-; LV-NEXT: store i16 [[ADD]], i16* [[ARRAYIDXA]], align 2
+; LV-NEXT: store i16 [[ADD]], ptr [[ARRAYIDXA]], align 2
; LV-NEXT: [[INC]] = add nuw nsw i64 [[IND]], 1
; LV-NEXT: [[DEC]] = sub i32 [[IND1]], 1
; LV-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[INC]], [[N]]
-; LV-NEXT: br i1 [[EXITCOND]], label [[FOR_END_LOOPEXIT6:%.*]], label [[FOR_BODY]]
+; LV-NEXT: br i1 [[EXITCOND]], label [[FOR_END_LOOPEXIT5:%.*]], label [[FOR_BODY]]
; LV: for.end.loopexit:
; LV-NEXT: br label [[FOR_END:%.*]]
-; LV: for.end.loopexit6:
+; LV: for.end.loopexit5:
; LV-NEXT: br label [[FOR_END]]
; LV: for.end:
; LV-NEXT: ret void
;
- i16* noalias %b, i64 %N) {
+ ptr noalias %b, i64 %N) {
entry:
%TruncN = trunc i64 %N to i32
br label %for.body
@@ -420,15 +417,15 @@ for.body: ; preds = %for.body, %entry
%mul = mul i32 %ind1, 2
%mul_ext = sext i32 %mul to i64
- %arrayidxA = getelementptr i16, i16* %a, i64 %mul_ext
- %loadA = load i16, i16* %arrayidxA, align 2
+ %arrayidxA = getelementptr i16, ptr %a, i64 %mul_ext
+ %loadA = load i16, ptr %arrayidxA, align 2
- %arrayidxB = getelementptr i16, i16* %b, i64 %ind
- %loadB = load i16, i16* %arrayidxB, align 2
+ %arrayidxB = getelementptr i16, ptr %b, i64 %ind
+ %loadB = load i16, ptr %arrayidxB, align 2
%add = mul i16 %loadA, %loadB
- store i16 %add, i16* %arrayidxA, align 2
+ store i16 %add, ptr %arrayidxA, align 2
%inc = add nuw nsw i64 %ind, 1
%dec = sub i32 %ind1, 1
@@ -447,7 +444,7 @@ for.end: ; preds = %for.body
;
; We can still analyze this by adding the required no wrap SCEV predicates.
-define void @f5(i16* noalias %a,
+define void @f5(ptr noalias %a,
; LV-LABEL: @f5(
; LV-NEXT: for.body.lver.check:
; LV-NEXT: [[TRUNCN:%.*]] = trunc i64 [[N:%.*]] to i32
@@ -457,35 +454,35 @@ define void @f5(i16* noalias %a,
; LV-NEXT: [[MUL1:%.*]] = call { i32, i1 } @llvm.umul.with.overflow.i32(i32 2, i32 [[TMP2]])
; LV-NEXT: [[MUL_RESULT:%.*]] = extractvalue { i32, i1 } [[MUL1]], 0
; LV-NEXT: [[MUL_OVERFLOW:%.*]] = extractvalue { i32, i1 } [[MUL1]], 1
-; LV-NEXT: [[TMP4:%.*]] = sub i32 [[TMP1]], [[MUL_RESULT]]
-; LV-NEXT: [[TMP5:%.*]] = icmp sgt i32 [[TMP4]], [[TMP1]]
-; LV-NEXT: [[TMP9:%.*]] = or i1 [[TMP5]], [[MUL_OVERFLOW]]
-; LV-NEXT: [[TMP8:%.*]] = icmp ugt i64 [[TMP0]], 4294967295
-; LV-NEXT: [[TMP10:%.*]] = or i1 [[TMP9]], [[TMP8]]
-; LV-NEXT: [[TMP12:%.*]] = sext i32 [[TMP1]] to i64
-; LV-NEXT: [[SCEVGEP:%.*]] = getelementptr i16, i16* [[A:%.*]], i64 [[TMP12]]
+; LV-NEXT: [[TMP3:%.*]] = sub i32 [[TMP1]], [[MUL_RESULT]]
+; LV-NEXT: [[TMP4:%.*]] = icmp sgt i32 [[TMP3]], [[TMP1]]
+; LV-NEXT: [[TMP5:%.*]] = or i1 [[TMP4]], [[MUL_OVERFLOW]]
+; LV-NEXT: [[TMP6:%.*]] = icmp ugt i64 [[TMP0]], 4294967295
+; LV-NEXT: [[TMP7:%.*]] = or i1 [[TMP5]], [[TMP6]]
+; LV-NEXT: [[TMP8:%.*]] = sext i32 [[TMP1]] to i64
+; LV-NEXT: [[TMP9:%.*]] = shl nsw i64 [[TMP8]], 1
+; LV-NEXT: [[SCEVGEP:%.*]] = getelementptr i8, ptr [[A:%.*]], i64 [[TMP9]]
; LV-NEXT: [[MUL2:%.*]] = call { i64, i1 } @llvm.umul.with.overflow.i64(i64 4, i64 [[TMP0]])
; LV-NEXT: [[MUL_RESULT3:%.*]] = extractvalue { i64, i1 } [[MUL2]], 0
; LV-NEXT: [[MUL_OVERFLOW4:%.*]] = extractvalue { i64, i1 } [[MUL2]], 1
-; LV-NEXT: [[SCEVGEP5:%.*]] = bitcast i16* [[SCEVGEP]] to i8*
-; LV-NEXT: [[TMP13:%.*]] = sub i64 0, [[MUL_RESULT3]]
-; LV-NEXT: [[TMP15:%.*]] = getelementptr i8, i8* [[SCEVGEP5]], i64 [[TMP13]]
-; LV-NEXT: [[TMP16:%.*]] = icmp ugt i8* [[TMP15]], [[SCEVGEP5]]
-; LV-NEXT: [[TMP19:%.*]] = or i1 [[TMP16]], [[MUL_OVERFLOW4]]
-; LV-NEXT: [[TMP20:%.*]] = or i1 [[TMP10]], [[TMP19]]
-; LV-NEXT: br i1 [[TMP20]], label [[FOR_BODY_PH_LVER_ORIG:%.*]], label [[FOR_BODY_PH:%.*]]
+; LV-NEXT: [[TMP10:%.*]] = sub i64 0, [[MUL_RESULT3]]
+; LV-NEXT: [[TMP11:%.*]] = getelementptr i8, ptr [[SCEVGEP]], i64 [[TMP10]]
+; LV-NEXT: [[TMP12:%.*]] = icmp ugt ptr [[TMP11]], [[SCEVGEP]]
+; LV-NEXT: [[TMP13:%.*]] = or i1 [[TMP12]], [[MUL_OVERFLOW4]]
+; LV-NEXT: [[TMP14:%.*]] = or i1 [[TMP7]], [[TMP13]]
+; LV-NEXT: br i1 [[TMP14]], label [[FOR_BODY_PH_LVER_ORIG:%.*]], label [[FOR_BODY_PH:%.*]]
; LV: for.body.ph.lver.orig:
; LV-NEXT: br label [[FOR_BODY_LVER_ORIG:%.*]]
; LV: for.body.lver.orig:
; LV-NEXT: [[IND_LVER_ORIG:%.*]] = phi i64 [ 0, [[FOR_BODY_PH_LVER_ORIG]] ], [ [[INC_LVER_ORIG:%.*]], [[FOR_BODY_LVER_ORIG]] ]
; LV-NEXT: [[IND1_LVER_ORIG:%.*]] = phi i32 [ [[TRUNCN]], [[FOR_BODY_PH_LVER_ORIG]] ], [ [[DEC_LVER_ORIG:%.*]], [[FOR_BODY_LVER_ORIG]] ]
; LV-NEXT: [[MUL_LVER_ORIG:%.*]] = mul i32 [[IND1_LVER_ORIG]], 2
-; LV-NEXT: [[ARRAYIDXA_LVER_ORIG:%.*]] = getelementptr inbounds i16, i16* [[A]], i32 [[MUL_LVER_ORIG]]
-; LV-NEXT: [[LOADA_LVER_ORIG:%.*]] = load i16, i16* [[ARRAYIDXA_LVER_ORIG]], align 2
-; LV-NEXT: [[ARRAYIDXB_LVER_ORIG:%.*]] = getelementptr inbounds i16, i16* [[B:%.*]], i64 [[IND_LVER_ORIG]]
-; LV-NEXT: [[LOADB_LVER_ORIG:%.*]] = load i16, i16* [[ARRAYIDXB_LVER_ORIG]], align 2
+; LV-NEXT: [[ARRAYIDXA_LVER_ORIG:%.*]] = getelementptr inbounds i16, ptr [[A]], i32 [[MUL_LVER_ORIG]]
+; LV-NEXT: [[LOADA_LVER_ORIG:%.*]] = load i16, ptr [[ARRAYIDXA_LVER_ORIG]], align 2
+; LV-NEXT: [[ARRAYIDXB_LVER_ORIG:%.*]] = getelementptr inbounds i16, ptr [[B:%.*]], i64 [[IND_LVER_ORIG]]
+; LV-NEXT: [[LOADB_LVER_ORIG:%.*]] = load i16, ptr [[ARRAYIDXB_LVER_ORIG]], align 2
; LV-NEXT: [[ADD_LVER_ORIG:%.*]] = mul i16 [[LOADA_LVER_ORIG]], [[LOADB_LVER_ORIG]]
-; LV-NEXT: store i16 [[ADD_LVER_ORIG]], i16* [[ARRAYIDXA_LVER_ORIG]], align 2
+; LV-NEXT: store i16 [[ADD_LVER_ORIG]], ptr [[ARRAYIDXA_LVER_ORIG]], align 2
; LV-NEXT: [[INC_LVER_ORIG]] = add nuw nsw i64 [[IND_LVER_ORIG]], 1
; LV-NEXT: [[DEC_LVER_ORIG]] = sub i32 [[IND1_LVER_ORIG]], 1
; LV-NEXT: [[EXITCOND_LVER_ORIG:%.*]] = icmp eq i64 [[INC_LVER_ORIG]], [[N]]
@@ -496,24 +493,24 @@ define void @f5(i16* noalias %a,
; LV-NEXT: [[IND:%.*]] = phi i64 [ 0, [[FOR_BODY_PH]] ], [ [[INC:%.*]], [[FOR_BODY]] ]
; LV-NEXT: [[IND1:%.*]] = phi i32 [ [[TRUNCN]], [[FOR_BODY_PH]] ], [ [[DEC:%.*]], [[FOR_BODY]] ]
; LV-NEXT: [[MUL:%.*]] = mul i32 [[IND1]], 2
-; LV-NEXT: [[ARRAYIDXA:%.*]] = getelementptr inbounds i16, i16* [[A]], i32 [[MUL]]
-; LV-NEXT: [[LOADA:%.*]] = load i16, i16* [[ARRAYIDXA]], align 2
-; LV-NEXT: [[ARRAYIDXB:%.*]] = getelementptr inbounds i16, i16* [[B]], i64 [[IND]]
-; LV-NEXT: [[LOADB:%.*]] = load i16, i16* [[ARRAYIDXB]], align 2
+; LV-NEXT: [[ARRAYIDXA:%.*]] = getelementptr inbounds i16, ptr [[A]], i32 [[MUL]]
+; LV-NEXT: [[LOADA:%.*]] = load i16, ptr [[ARRAYIDXA]], align 2
+; LV-NEXT: [[ARRAYIDXB:%.*]] = getelementptr inbounds i16, ptr [[B]], i64 [[IND]]
+; LV-NEXT: [[LOADB:%.*]] = load i16, ptr [[ARRAYIDXB]], align 2
; LV-NEXT: [[ADD:%.*]] = mul i16 [[LOADA]], [[LOADB]]
-; LV-NEXT: store i16 [[ADD]], i16* [[ARRAYIDXA]], align 2
+; LV-NEXT: store i16 [[ADD]], ptr [[ARRAYIDXA]], align 2
; LV-NEXT: [[INC]] = add nuw nsw i64 [[IND]], 1
; LV-NEXT: [[DEC]] = sub i32 [[IND1]], 1
; LV-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[INC]], [[N]]
-; LV-NEXT: br i1 [[EXITCOND]], label [[FOR_END_LOOPEXIT6:%.*]], label [[FOR_BODY]]
+; LV-NEXT: br i1 [[EXITCOND]], label [[FOR_END_LOOPEXIT5:%.*]], label [[FOR_BODY]]
; LV: for.end.loopexit:
; LV-NEXT: br label [[FOR_END:%.*]]
-; LV: for.end.loopexit6:
+; LV: for.end.loopexit5:
; LV-NEXT: br label [[FOR_END]]
; LV: for.end:
; LV-NEXT: ret void
;
- i16* noalias %b, i64 %N) {
+ ptr noalias %b, i64 %N) {
entry:
%TruncN = trunc i64 %N to i32
br label %for.body
@@ -524,15 +521,15 @@ for.body: ; preds = %for.body, %entry
%mul = mul i32 %ind1, 2
- %arrayidxA = getelementptr inbounds i16, i16* %a, i32 %mul
- %loadA = load i16, i16* %arrayidxA, align 2
+ %arrayidxA = getelementptr inbounds i16, ptr %a, i32 %mul
+ %loadA = load i16, ptr %arrayidxA, align 2
- %arrayidxB = getelementptr inbounds i16, i16* %b, i64 %ind
- %loadB = load i16, i16* %arrayidxB, align 2
+ %arrayidxB = getelementptr inbounds i16, ptr %b, i64 %ind
+ %loadB = load i16, ptr %arrayidxB, align 2
%add = mul i16 %loadA, %loadB
- store i16 %add, i16* %arrayidxA, align 2
+ store i16 %add, ptr %arrayidxA, align 2
%inc = add nuw nsw i64 %ind, 1
%dec = sub i32 %ind1, 1
diff --git a/llvm/test/Transforms/NewGVN/pr31613.ll b/llvm/test/Transforms/NewGVN/pr31613.ll
index c7ed829008612..943cdbc113dc4 100644
--- a/llvm/test/Transforms/NewGVN/pr31613.ll
+++ b/llvm/test/Transforms/NewGVN/pr31613.ll
@@ -1,5 +1,5 @@
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
-; RUN: opt -opaque-pointers=0 < %s -passes=newgvn -enable-store-refinement -S | FileCheck %s
+; RUN: opt < %s -passes=newgvn -enable-store-refinement -S | FileCheck %s
target datalayout = "e-m:o-i64:64-f80:128-n8:16:32:64-S128"
;; Both of these tests are tests of phi nodes that end up all equivalent to each other
@@ -69,43 +69,41 @@ bb18: ; preds = %bb4
%struct.a = type {}
%struct.b = type {}
-declare void @c.d.p(i64, i8*)
+declare void @c.d.p(i64, ptr)
-define void @e(i32 %a0, i32 %a1, %struct.a** %p2) {
+define void @e(i32 %a0, i32 %a1, ptr %p2) {
; CHECK-LABEL: @e(
-; CHECK-NEXT: [[F:%.*]] = alloca i32
-; CHECK-NEXT: store i32 [[A0:%.*]], i32* [[F]], align 4, !g !0
+; CHECK-NEXT: [[F:%.*]] = alloca i32, align 4
+; CHECK-NEXT: store i32 [[A0:%.*]], ptr [[F]], align 4, !g !0
; CHECK-NEXT: br label [[H:%.*]]
; CHECK: h:
-; CHECK-NEXT: call void @c.d.p(i64 8, i8* undef)
-; CHECK-NEXT: [[I:%.*]] = load i32, i32* [[F]], align 4
-; CHECK-NEXT: [[J:%.*]] = load i32, i32* null, align 4
+; CHECK-NEXT: call void @c.d.p(i64 8, ptr undef)
+; CHECK-NEXT: [[I:%.*]] = load i32, ptr [[F]], align 4
+; CHECK-NEXT: [[J:%.*]] = load i32, ptr null, align 4
; CHECK-NEXT: [[K:%.*]] = icmp eq i32 [[I]], [[J]]
; CHECK-NEXT: br i1 [[K]], label [[L:%.*]], label [[Q:%.*]]
; CHECK: l:
; CHECK-NEXT: br label [[R:%.*]]
; CHECK: q:
-; CHECK-NEXT: [[M:%.*]] = load %struct.a*, %struct.a** null, align 8
; CHECK-NEXT: br label [[R]]
; CHECK: r:
; CHECK-NEXT: switch i32 undef, label [[N:%.*]] [
; CHECK-NEXT: i32 0, label [[S:%.*]]
; CHECK-NEXT: ]
; CHECK: s:
-; CHECK-NEXT: store i32 [[A1:%.*]], i32* [[F]], align 4, !g !0
+; CHECK-NEXT: store i32 [[A1:%.*]], ptr [[F]], align 4, !g !0
; CHECK-NEXT: br label [[H]]
; CHECK: n:
-; CHECK-NEXT: [[O:%.*]] = load %struct.a*, %struct.a** [[P2:%.*]], align 8
; CHECK-NEXT: ret void
;
%f = alloca i32
- store i32 %a0, i32* %f, !g !0
+ store i32 %a0, ptr %f, !g !0
br label %h
h: ; preds = %s, %0
- call void @c.d.p(i64 8, i8* undef)
- %i = load i32, i32* %f
- %j = load i32, i32* null
+ call void @c.d.p(i64 8, ptr undef)
+ %i = load i32, ptr %f
+ %j = load i32, ptr null
%k = icmp eq i32 %i, %j
br i1 %k, label %l, label %q
@@ -113,8 +111,7 @@ l: ; preds = %h
br label %r
q: ; preds = %h
- %m = load %struct.a*, %struct.a** null
- %1 = bitcast %struct.a* %m to %struct.b*
+ %m = load ptr, ptr null
br label %r
r: ; preds = %q, %l
@@ -123,12 +120,11 @@ r: ; preds = %q, %l
]
s: ; preds = %r
- store i32 %a1, i32* %f, !g !0
+ store i32 %a1, ptr %f, !g !0
br label %h
n: ; preds = %r
- %o = load %struct.a*, %struct.a** %p2
- %2 = bitcast %struct.a* %o to %struct.b*
+ %o = load ptr, ptr %p2
ret void
}
More information about the llvm-commits
mailing list