[llvm] 538d355 - [LoopVersioning] Convert tests to opaque pointers (NFC)

Nikita Popov via llvm-commits llvm-commits at lists.llvm.org
Tue Jun 20 08:24:06 PDT 2023


Author: Nikita Popov
Date: 2023-06-20T17:23:56+02:00
New Revision: 538d3552900cfb772b05afcd24b13cff1236f43f

URL: https://github.com/llvm/llvm-project/commit/538d3552900cfb772b05afcd24b13cff1236f43f
DIFF: https://github.com/llvm/llvm-project/commit/538d3552900cfb772b05afcd24b13cff1236f43f.diff

LOG: [LoopVersioning] Convert tests to opaque pointers (NFC)

Added: 
    

Modified: 
    llvm/test/Transforms/LoopVersioning/add-phi-update-users.ll
    llvm/test/Transforms/LoopVersioning/basic.ll
    llvm/test/Transforms/LoopVersioning/bound-check-partially-known.ll

Removed: 
    


################################################################################
diff  --git a/llvm/test/Transforms/LoopVersioning/add-phi-update-users.ll b/llvm/test/Transforms/LoopVersioning/add-phi-update-users.ll
index 78e9697e583e7..d9050700001a8 100644
--- a/llvm/test/Transforms/LoopVersioning/add-phi-update-users.ll
+++ b/llvm/test/Transforms/LoopVersioning/add-phi-update-users.ll
@@ -1,5 +1,5 @@
 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 2
-; RUN: opt -opaque-pointers=0 < %s -passes=loop-versioning -S -o - | FileCheck %s
+; RUN: opt < %s -passes=loop-versioning -S -o - | FileCheck %s
 
 ; This test case used to end like this:
 ;
@@ -18,27 +18,25 @@
 
 @a = dso_local global i16 0, align 1
 @b = dso_local global i16 0, align 1
- at c = dso_local global i16* null, align 1
+ at c = dso_local global ptr null, align 1
 
 define void @f1() {
 ; CHECK-LABEL: define void @f1() {
 ; CHECK-NEXT:  entry:
-; CHECK-NEXT:    [[T0:%.*]] = load i16*, i16** @c, align 1
-; CHECK-NEXT:    [[T01:%.*]] = bitcast i16* [[T0]] to i8*
-; CHECK-NEXT:    [[SCEVGEP:%.*]] = getelementptr i16, i16* [[T0]], i64 1
-; CHECK-NEXT:    [[SCEVGEP2:%.*]] = bitcast i16* [[SCEVGEP]] to i8*
+; CHECK-NEXT:    [[T0:%.*]] = load ptr, ptr @c, align 1
+; CHECK-NEXT:    [[SCEVGEP:%.*]] = getelementptr i8, ptr [[T0]], i64 2
 ; CHECK-NEXT:    br label [[FOR_BODY_LVER_CHECK:%.*]]
 ; CHECK:       for.body.lver.check:
-; CHECK-NEXT:    [[BOUND0:%.*]] = icmp ult i8* [[T01]], bitcast (i16* getelementptr inbounds (i16, i16* @b, i64 1) to i8*)
-; CHECK-NEXT:    [[BOUND1:%.*]] = icmp ult i8* bitcast (i16* @b to i8*), [[SCEVGEP2]]
+; CHECK-NEXT:    [[BOUND0:%.*]] = icmp ult ptr [[T0]], getelementptr inbounds (i16, ptr @b, i64 1)
+; CHECK-NEXT:    [[BOUND1:%.*]] = icmp ult ptr @b, [[SCEVGEP]]
 ; CHECK-NEXT:    [[FOUND_CONFLICT:%.*]] = and i1 [[BOUND0]], [[BOUND1]]
 ; CHECK-NEXT:    br i1 [[FOUND_CONFLICT]], label [[FOR_BODY_PH_LVER_ORIG:%.*]], label [[FOR_BODY_PH:%.*]]
 ; CHECK:       for.body.ph.lver.orig:
 ; CHECK-NEXT:    br label [[FOR_BODY_LVER_ORIG:%.*]]
 ; CHECK:       for.body.lver.orig:
 ; CHECK-NEXT:    [[T1_LVER_ORIG:%.*]] = phi i64 [ 0, [[FOR_BODY_PH_LVER_ORIG]] ], [ [[INC_LVER_ORIG:%.*]], [[FOR_BODY_LVER_ORIG]] ]
-; CHECK-NEXT:    [[T2_LVER_ORIG:%.*]] = load i16, i16* @b, align 1, !tbaa [[TBAA2:![0-9]+]]
-; CHECK-NEXT:    store i16 [[T2_LVER_ORIG]], i16* [[T0]], align 1, !tbaa [[TBAA2]]
+; CHECK-NEXT:    [[T2_LVER_ORIG:%.*]] = load i16, ptr @b, align 1, !tbaa [[TBAA2:![0-9]+]]
+; CHECK-NEXT:    store i16 [[T2_LVER_ORIG]], ptr [[T0]], align 1, !tbaa [[TBAA2]]
 ; CHECK-NEXT:    [[INC_LVER_ORIG]] = add nuw nsw i64 [[T1_LVER_ORIG]], 1
 ; CHECK-NEXT:    [[CMP_LVER_ORIG:%.*]] = icmp ult i64 [[INC_LVER_ORIG]], 3
 ; CHECK-NEXT:    br i1 [[CMP_LVER_ORIG]], label [[FOR_BODY_LVER_ORIG]], label [[FOR_END_LOOPEXIT:%.*]]
@@ -46,29 +44,29 @@ define void @f1() {
 ; CHECK-NEXT:    br label [[FOR_BODY:%.*]]
 ; CHECK:       for.body:
 ; CHECK-NEXT:    [[T1:%.*]] = phi i64 [ 0, [[FOR_BODY_PH]] ], [ [[INC:%.*]], [[FOR_BODY]] ]
-; CHECK-NEXT:    [[T2:%.*]] = load i16, i16* @b, align 1, !tbaa [[TBAA2]], !alias.scope !6
-; CHECK-NEXT:    store i16 [[T2]], i16* [[T0]], align 1, !tbaa [[TBAA2]], !alias.scope !9, !noalias !6
+; CHECK-NEXT:    [[T2:%.*]] = load i16, ptr @b, align 1, !tbaa [[TBAA2]], !alias.scope !6
+; CHECK-NEXT:    store i16 [[T2]], ptr [[T0]], align 1, !tbaa [[TBAA2]], !alias.scope !9, !noalias !6
 ; CHECK-NEXT:    [[INC]] = add nuw nsw i64 [[T1]], 1
 ; CHECK-NEXT:    [[CMP:%.*]] = icmp ult i64 [[INC]], 3
-; CHECK-NEXT:    br i1 [[CMP]], label [[FOR_BODY]], label [[FOR_END_LOOPEXIT3:%.*]]
+; CHECK-NEXT:    br i1 [[CMP]], label [[FOR_BODY]], label [[FOR_END_LOOPEXIT1:%.*]]
 ; CHECK:       for.end.loopexit:
 ; CHECK-NEXT:    [[T2_LVER_PH:%.*]] = phi i16 [ [[T2_LVER_ORIG]], [[FOR_BODY_LVER_ORIG]] ]
 ; CHECK-NEXT:    br label [[FOR_END:%.*]]
-; CHECK:       for.end.loopexit3:
-; CHECK-NEXT:    [[T2_LVER_PH4:%.*]] = phi i16 [ [[T2]], [[FOR_BODY]] ]
+; CHECK:       for.end.loopexit1:
+; CHECK-NEXT:    [[T2_LVER_PH2:%.*]] = phi i16 [ [[T2]], [[FOR_BODY]] ]
 ; CHECK-NEXT:    br label [[FOR_END]]
 ; CHECK:       for.end:
-; CHECK-NEXT:    [[T2_LVER:%.*]] = phi i16 [ [[T2_LVER_PH]], [[FOR_END_LOOPEXIT]] ], [ [[T2_LVER_PH4]], [[FOR_END_LOOPEXIT3]] ]
+; CHECK-NEXT:    [[T2_LVER:%.*]] = phi i16 [ [[T2_LVER_PH]], [[FOR_END_LOOPEXIT]] ], [ [[T2_LVER_PH2]], [[FOR_END_LOOPEXIT1]] ]
 ; CHECK-NEXT:    [[TOBOOL:%.*]] = icmp eq i16 [[T2_LVER]], 0
 ; CHECK-NEXT:    br i1 [[TOBOOL]], label [[FOR_COND_BACKEDGE:%.*]], label [[IF_THEN:%.*]]
 ; CHECK:       for.cond.backedge:
 ; CHECK-NEXT:    br label [[FOR_BODY_LVER_CHECK]]
 ; CHECK:       if.then:
-; CHECK-NEXT:    store i16 [[T2_LVER]], i16* @a, align 1, !tbaa [[TBAA2]]
+; CHECK-NEXT:    store i16 [[T2_LVER]], ptr @a, align 1, !tbaa [[TBAA2]]
 ; CHECK-NEXT:    br label [[FOR_COND_BACKEDGE]]
 ;
 entry:
-  %t0 = load i16*, i16** @c, align 1
+  %t0 = load ptr, ptr @c, align 1
   br label %for.cond
 
 for.cond:                                         ; preds = %for.cond.backedge, %entry
@@ -76,8 +74,8 @@ for.cond:                                         ; preds = %for.cond.backedge,
 
 for.body:                                         ; preds = %for.cond, %for.body
   %t1 = phi i64 [ 0, %for.cond ], [ %inc, %for.body ]
-  %t2 = load i16, i16* @b, align 1, !tbaa !2
-  store i16 %t2, i16* %t0, align 1, !tbaa !2
+  %t2 = load i16, ptr @b, align 1, !tbaa !2
+  store i16 %t2, ptr %t0, align 1, !tbaa !2
   %inc = add nuw nsw i64 %t1, 1
   %cmp = icmp ult i64 %inc, 3
   br i1 %cmp, label %for.body, label %for.end
@@ -90,7 +88,7 @@ for.cond.backedge:                                ; preds = %for.end, %if.then
   br label %for.cond
 
 if.then:                                          ; preds = %for.end
-  store i16 %t2, i16* @a, align 1, !tbaa !2
+  store i16 %t2, ptr @a, align 1, !tbaa !2
   br label %for.cond.backedge
 }
 

diff  --git a/llvm/test/Transforms/LoopVersioning/basic.ll b/llvm/test/Transforms/LoopVersioning/basic.ll
index 7f374f8ee726d..8e9685ad2d5ca 100644
--- a/llvm/test/Transforms/LoopVersioning/basic.ll
+++ b/llvm/test/Transforms/LoopVersioning/basic.ll
@@ -1,42 +1,36 @@
 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 2
-; RUN: opt -opaque-pointers=0 -passes=loop-versioning -S < %s | FileCheck %s
+; RUN: opt -passes=loop-versioning -S < %s | FileCheck %s
 
 target datalayout = "e-m:o-i64:64-f80:128-n8:16:32:64-S128"
 
 ; Version this loop with overlap checks between a, c and b, c.
 
-define void @f(i32* %a, i32* %b, i32* %c) {
+define void @f(ptr %a, ptr %b, ptr %c) {
 ; CHECK-LABEL: define void @f
-; CHECK-SAME: (i32* [[A:%.*]], i32* [[B:%.*]], i32* [[C:%.*]]) {
+; CHECK-SAME: (ptr [[A:%.*]], ptr [[B:%.*]], ptr [[C:%.*]]) {
 ; CHECK-NEXT:  for.body.lver.check:
-; CHECK-NEXT:    [[C1:%.*]] = bitcast i32* [[C]] to i8*
-; CHECK-NEXT:    [[A3:%.*]] = bitcast i32* [[A]] to i8*
-; CHECK-NEXT:    [[B6:%.*]] = bitcast i32* [[B]] to i8*
-; CHECK-NEXT:    [[SCEVGEP:%.*]] = getelementptr i32, i32* [[C]], i64 20
-; CHECK-NEXT:    [[SCEVGEP2:%.*]] = bitcast i32* [[SCEVGEP]] to i8*
-; CHECK-NEXT:    [[SCEVGEP4:%.*]] = getelementptr i32, i32* [[A]], i64 20
-; CHECK-NEXT:    [[SCEVGEP45:%.*]] = bitcast i32* [[SCEVGEP4]] to i8*
-; CHECK-NEXT:    [[SCEVGEP7:%.*]] = getelementptr i32, i32* [[B]], i64 20
-; CHECK-NEXT:    [[SCEVGEP78:%.*]] = bitcast i32* [[SCEVGEP7]] to i8*
-; CHECK-NEXT:    [[BOUND0:%.*]] = icmp ult i8* [[C1]], [[SCEVGEP45]]
-; CHECK-NEXT:    [[BOUND1:%.*]] = icmp ult i8* [[A3]], [[SCEVGEP2]]
+; CHECK-NEXT:    [[SCEVGEP:%.*]] = getelementptr i8, ptr [[C]], i64 80
+; CHECK-NEXT:    [[SCEVGEP1:%.*]] = getelementptr i8, ptr [[A]], i64 80
+; CHECK-NEXT:    [[SCEVGEP2:%.*]] = getelementptr i8, ptr [[B]], i64 80
+; CHECK-NEXT:    [[BOUND0:%.*]] = icmp ult ptr [[C]], [[SCEVGEP1]]
+; CHECK-NEXT:    [[BOUND1:%.*]] = icmp ult ptr [[A]], [[SCEVGEP]]
 ; CHECK-NEXT:    [[FOUND_CONFLICT:%.*]] = and i1 [[BOUND0]], [[BOUND1]]
-; CHECK-NEXT:    [[BOUND09:%.*]] = icmp ult i8* [[C1]], [[SCEVGEP78]]
-; CHECK-NEXT:    [[BOUND110:%.*]] = icmp ult i8* [[B6]], [[SCEVGEP2]]
-; CHECK-NEXT:    [[FOUND_CONFLICT11:%.*]] = and i1 [[BOUND09]], [[BOUND110]]
-; CHECK-NEXT:    [[CONFLICT_RDX:%.*]] = or i1 [[FOUND_CONFLICT]], [[FOUND_CONFLICT11]]
+; CHECK-NEXT:    [[BOUND03:%.*]] = icmp ult ptr [[C]], [[SCEVGEP2]]
+; CHECK-NEXT:    [[BOUND14:%.*]] = icmp ult ptr [[B]], [[SCEVGEP]]
+; CHECK-NEXT:    [[FOUND_CONFLICT5:%.*]] = and i1 [[BOUND03]], [[BOUND14]]
+; CHECK-NEXT:    [[CONFLICT_RDX:%.*]] = or i1 [[FOUND_CONFLICT]], [[FOUND_CONFLICT5]]
 ; CHECK-NEXT:    br i1 [[CONFLICT_RDX]], label [[FOR_BODY_PH_LVER_ORIG:%.*]], label [[FOR_BODY_PH:%.*]]
 ; CHECK:       for.body.ph.lver.orig:
 ; CHECK-NEXT:    br label [[FOR_BODY_LVER_ORIG:%.*]]
 ; CHECK:       for.body.lver.orig:
 ; CHECK-NEXT:    [[IND_LVER_ORIG:%.*]] = phi i64 [ 0, [[FOR_BODY_PH_LVER_ORIG]] ], [ [[ADD_LVER_ORIG:%.*]], [[FOR_BODY_LVER_ORIG]] ]
-; CHECK-NEXT:    [[ARRAYIDXA_LVER_ORIG:%.*]] = getelementptr inbounds i32, i32* [[A]], i64 [[IND_LVER_ORIG]]
-; CHECK-NEXT:    [[LOADA_LVER_ORIG:%.*]] = load i32, i32* [[ARRAYIDXA_LVER_ORIG]], align 4
-; CHECK-NEXT:    [[ARRAYIDXB_LVER_ORIG:%.*]] = getelementptr inbounds i32, i32* [[B]], i64 [[IND_LVER_ORIG]]
-; CHECK-NEXT:    [[LOADB_LVER_ORIG:%.*]] = load i32, i32* [[ARRAYIDXB_LVER_ORIG]], align 4
+; CHECK-NEXT:    [[ARRAYIDXA_LVER_ORIG:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[IND_LVER_ORIG]]
+; CHECK-NEXT:    [[LOADA_LVER_ORIG:%.*]] = load i32, ptr [[ARRAYIDXA_LVER_ORIG]], align 4
+; CHECK-NEXT:    [[ARRAYIDXB_LVER_ORIG:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[IND_LVER_ORIG]]
+; CHECK-NEXT:    [[LOADB_LVER_ORIG:%.*]] = load i32, ptr [[ARRAYIDXB_LVER_ORIG]], align 4
 ; CHECK-NEXT:    [[MULC_LVER_ORIG:%.*]] = mul i32 [[LOADA_LVER_ORIG]], [[LOADB_LVER_ORIG]]
-; CHECK-NEXT:    [[ARRAYIDXC_LVER_ORIG:%.*]] = getelementptr inbounds i32, i32* [[C]], i64 [[IND_LVER_ORIG]]
-; CHECK-NEXT:    store i32 [[MULC_LVER_ORIG]], i32* [[ARRAYIDXC_LVER_ORIG]], align 4
+; CHECK-NEXT:    [[ARRAYIDXC_LVER_ORIG:%.*]] = getelementptr inbounds i32, ptr [[C]], i64 [[IND_LVER_ORIG]]
+; CHECK-NEXT:    store i32 [[MULC_LVER_ORIG]], ptr [[ARRAYIDXC_LVER_ORIG]], align 4
 ; CHECK-NEXT:    [[ADD_LVER_ORIG]] = add nuw nsw i64 [[IND_LVER_ORIG]], 1
 ; CHECK-NEXT:    [[EXITCOND_LVER_ORIG:%.*]] = icmp eq i64 [[ADD_LVER_ORIG]], 20
 ; CHECK-NEXT:    br i1 [[EXITCOND_LVER_ORIG]], label [[FOR_END_LOOPEXIT:%.*]], label [[FOR_BODY_LVER_ORIG]]
@@ -44,19 +38,19 @@ define void @f(i32* %a, i32* %b, i32* %c) {
 ; CHECK-NEXT:    br label [[FOR_BODY:%.*]]
 ; CHECK:       for.body:
 ; CHECK-NEXT:    [[IND:%.*]] = phi i64 [ 0, [[FOR_BODY_PH]] ], [ [[ADD:%.*]], [[FOR_BODY]] ]
-; CHECK-NEXT:    [[ARRAYIDXA:%.*]] = getelementptr inbounds i32, i32* [[A]], i64 [[IND]]
-; CHECK-NEXT:    [[LOADA:%.*]] = load i32, i32* [[ARRAYIDXA]], align 4, !alias.scope !0
-; CHECK-NEXT:    [[ARRAYIDXB:%.*]] = getelementptr inbounds i32, i32* [[B]], i64 [[IND]]
-; CHECK-NEXT:    [[LOADB:%.*]] = load i32, i32* [[ARRAYIDXB]], align 4, !alias.scope !3
+; CHECK-NEXT:    [[ARRAYIDXA:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[IND]]
+; CHECK-NEXT:    [[LOADA:%.*]] = load i32, ptr [[ARRAYIDXA]], align 4, !alias.scope !0
+; CHECK-NEXT:    [[ARRAYIDXB:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[IND]]
+; CHECK-NEXT:    [[LOADB:%.*]] = load i32, ptr [[ARRAYIDXB]], align 4, !alias.scope !3
 ; CHECK-NEXT:    [[MULC:%.*]] = mul i32 [[LOADA]], [[LOADB]]
-; CHECK-NEXT:    [[ARRAYIDXC:%.*]] = getelementptr inbounds i32, i32* [[C]], i64 [[IND]]
-; CHECK-NEXT:    store i32 [[MULC]], i32* [[ARRAYIDXC]], align 4, !alias.scope !5, !noalias !7
+; CHECK-NEXT:    [[ARRAYIDXC:%.*]] = getelementptr inbounds i32, ptr [[C]], i64 [[IND]]
+; CHECK-NEXT:    store i32 [[MULC]], ptr [[ARRAYIDXC]], align 4, !alias.scope !5, !noalias !7
 ; CHECK-NEXT:    [[ADD]] = add nuw nsw i64 [[IND]], 1
 ; CHECK-NEXT:    [[EXITCOND:%.*]] = icmp eq i64 [[ADD]], 20
-; CHECK-NEXT:    br i1 [[EXITCOND]], label [[FOR_END_LOOPEXIT12:%.*]], label [[FOR_BODY]]
+; CHECK-NEXT:    br i1 [[EXITCOND]], label [[FOR_END_LOOPEXIT6:%.*]], label [[FOR_BODY]]
 ; CHECK:       for.end.loopexit:
 ; CHECK-NEXT:    br label [[FOR_END:%.*]]
-; CHECK:       for.end.loopexit12:
+; CHECK:       for.end.loopexit6:
 ; CHECK-NEXT:    br label [[FOR_END]]
 ; CHECK:       for.end:
 ; CHECK-NEXT:    ret void
@@ -69,16 +63,16 @@ entry:
 for.body:                                         ; preds = %for.body, %entry
   %ind = phi i64 [ 0, %entry ], [ %add, %for.body ]
 
-  %arrayidxA = getelementptr inbounds i32, i32* %a, i64 %ind
-  %loadA = load i32, i32* %arrayidxA, align 4
+  %arrayidxA = getelementptr inbounds i32, ptr %a, i64 %ind
+  %loadA = load i32, ptr %arrayidxA, align 4
 
-  %arrayidxB = getelementptr inbounds i32, i32* %b, i64 %ind
-  %loadB = load i32, i32* %arrayidxB, align 4
+  %arrayidxB = getelementptr inbounds i32, ptr %b, i64 %ind
+  %loadB = load i32, ptr %arrayidxB, align 4
 
   %mulC = mul i32 %loadA, %loadB
 
-  %arrayidxC = getelementptr inbounds i32, i32* %c, i64 %ind
-  store i32 %mulC, i32* %arrayidxC, align 4
+  %arrayidxC = getelementptr inbounds i32, ptr %c, i64 %ind
+  store i32 %mulC, ptr %arrayidxC, align 4
 
   %add = add nuw nsw i64 %ind, 1
   %exitcond = icmp eq i64 %add, 20

diff  --git a/llvm/test/Transforms/LoopVersioning/bound-check-partially-known.ll b/llvm/test/Transforms/LoopVersioning/bound-check-partially-known.ll
index 8185e45cf6b36..70c12a2d62ec3 100644
--- a/llvm/test/Transforms/LoopVersioning/bound-check-partially-known.ll
+++ b/llvm/test/Transforms/LoopVersioning/bound-check-partially-known.ll
@@ -1,5 +1,5 @@
 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 2
-; RUN: opt -opaque-pointers=0 -aa-pipeline=basic-aa -passes=loop-versioning -S %s | FileCheck %s
+; RUN: opt -aa-pipeline=basic-aa -passes=loop-versioning -S %s | FileCheck %s
 
 %struct.foo = type { [32000 x double], [32000 x double] }
 
@@ -10,31 +10,29 @@ define void @bound_check_partially_known_1(i32 %N) {
 ; CHECK-SAME: (i32 [[N:%.*]]) {
 ; CHECK-NEXT:  loop.lver.check:
 ; CHECK-NEXT:    [[N_EXT:%.*]] = zext i32 [[N]] to i64
-; CHECK-NEXT:    [[SCEVGEP:%.*]] = getelementptr [[STRUCT_FOO:%.*]], %struct.foo* @global, i64 0, i32 0, i64 [[N_EXT]]
-; CHECK-NEXT:    [[SCEVGEP1:%.*]] = bitcast double* [[SCEVGEP]] to i8*
-; CHECK-NEXT:    [[TMP0:%.*]] = shl nuw nsw i64 [[N_EXT]], 1
-; CHECK-NEXT:    [[SCEVGEP2:%.*]] = getelementptr [[STRUCT_FOO]], %struct.foo* @global, i64 0, i32 0, i64 [[TMP0]]
-; CHECK-NEXT:    [[SCEVGEP23:%.*]] = bitcast double* [[SCEVGEP2]] to i8*
-; CHECK-NEXT:    [[TMP1:%.*]] = add nuw nsw i64 [[N_EXT]], 32000
-; CHECK-NEXT:    [[SCEVGEP4:%.*]] = getelementptr [[STRUCT_FOO]], %struct.foo* @global, i64 0, i32 0, i64 [[TMP1]]
-; CHECK-NEXT:    [[SCEVGEP45:%.*]] = bitcast double* [[SCEVGEP4]] to i8*
-; CHECK-NEXT:    [[BOUND1:%.*]] = icmp ult i8* bitcast (%struct.foo* @global to i8*), [[SCEVGEP23]]
-; CHECK-NEXT:    [[BOUND0:%.*]] = icmp ult i8* [[SCEVGEP1]], [[SCEVGEP45]]
-; CHECK-NEXT:    [[BOUND16:%.*]] = icmp ult i8* bitcast (double* getelementptr inbounds ([[STRUCT_FOO]], %struct.foo* @global, i64 0, i32 1, i64 0) to i8*), [[SCEVGEP23]]
-; CHECK-NEXT:    [[FOUND_CONFLICT:%.*]] = and i1 [[BOUND0]], [[BOUND16]]
+; CHECK-NEXT:    [[TMP0:%.*]] = shl nuw nsw i64 [[N_EXT]], 3
+; CHECK-NEXT:    [[SCEVGEP:%.*]] = getelementptr i8, ptr @global, i64 [[TMP0]]
+; CHECK-NEXT:    [[TMP1:%.*]] = shl nuw nsw i64 [[N_EXT]], 4
+; CHECK-NEXT:    [[SCEVGEP1:%.*]] = getelementptr i8, ptr @global, i64 [[TMP1]]
+; CHECK-NEXT:    [[TMP2:%.*]] = add nuw nsw i64 [[TMP0]], 256000
+; CHECK-NEXT:    [[SCEVGEP2:%.*]] = getelementptr i8, ptr @global, i64 [[TMP2]]
+; CHECK-NEXT:    [[BOUND1:%.*]] = icmp ult ptr @global, [[SCEVGEP1]]
+; CHECK-NEXT:    [[BOUND0:%.*]] = icmp ult ptr [[SCEVGEP]], [[SCEVGEP2]]
+; CHECK-NEXT:    [[BOUND13:%.*]] = icmp ult ptr getelementptr inbounds ([[STRUCT_FOO:%.*]], ptr @global, i64 0, i32 1, i64 0), [[SCEVGEP1]]
+; CHECK-NEXT:    [[FOUND_CONFLICT:%.*]] = and i1 [[BOUND0]], [[BOUND13]]
 ; CHECK-NEXT:    br i1 [[FOUND_CONFLICT]], label [[LOOP_PH_LVER_ORIG:%.*]], label [[LOOP_PH:%.*]]
 ; CHECK:       loop.ph.lver.orig:
 ; CHECK-NEXT:    br label [[LOOP_LVER_ORIG:%.*]]
 ; CHECK:       loop.lver.orig:
 ; CHECK-NEXT:    [[IV_LVER_ORIG:%.*]] = phi i64 [ 0, [[LOOP_PH_LVER_ORIG]] ], [ [[IV_NEXT_LVER_ORIG:%.*]], [[LOOP_LVER_ORIG]] ]
-; CHECK-NEXT:    [[GEP_0_IV_LVER_ORIG:%.*]] = getelementptr inbounds [[STRUCT_FOO]], %struct.foo* @global, i64 0, i32 0, i64 [[IV_LVER_ORIG]]
-; CHECK-NEXT:    [[L_0_LVER_ORIG:%.*]] = load double, double* [[GEP_0_IV_LVER_ORIG]], align 8
-; CHECK-NEXT:    [[GEP_1_IV_LVER_ORIG:%.*]] = getelementptr inbounds [[STRUCT_FOO]], %struct.foo* @global, i64 0, i32 1, i64 [[IV_LVER_ORIG]]
-; CHECK-NEXT:    [[L_1_LVER_ORIG:%.*]] = load double, double* [[GEP_1_IV_LVER_ORIG]], align 8
+; CHECK-NEXT:    [[GEP_0_IV_LVER_ORIG:%.*]] = getelementptr inbounds [[STRUCT_FOO]], ptr @global, i64 0, i32 0, i64 [[IV_LVER_ORIG]]
+; CHECK-NEXT:    [[L_0_LVER_ORIG:%.*]] = load double, ptr [[GEP_0_IV_LVER_ORIG]], align 8
+; CHECK-NEXT:    [[GEP_1_IV_LVER_ORIG:%.*]] = getelementptr inbounds [[STRUCT_FOO]], ptr @global, i64 0, i32 1, i64 [[IV_LVER_ORIG]]
+; CHECK-NEXT:    [[L_1_LVER_ORIG:%.*]] = load double, ptr [[GEP_1_IV_LVER_ORIG]], align 8
 ; CHECK-NEXT:    [[ADD_LVER_ORIG:%.*]] = fadd double [[L_0_LVER_ORIG]], [[L_1_LVER_ORIG]]
 ; CHECK-NEXT:    [[IV_N_LVER_ORIG:%.*]] = add nuw nsw i64 [[IV_LVER_ORIG]], [[N_EXT]]
-; CHECK-NEXT:    [[GEP_0_IV_N_LVER_ORIG:%.*]] = getelementptr inbounds [[STRUCT_FOO]], %struct.foo* @global, i64 0, i32 0, i64 [[IV_N_LVER_ORIG]]
-; CHECK-NEXT:    store double [[ADD_LVER_ORIG]], double* [[GEP_0_IV_N_LVER_ORIG]], align 8
+; CHECK-NEXT:    [[GEP_0_IV_N_LVER_ORIG:%.*]] = getelementptr inbounds [[STRUCT_FOO]], ptr @global, i64 0, i32 0, i64 [[IV_N_LVER_ORIG]]
+; CHECK-NEXT:    store double [[ADD_LVER_ORIG]], ptr [[GEP_0_IV_N_LVER_ORIG]], align 8
 ; CHECK-NEXT:    [[IV_NEXT_LVER_ORIG]] = add nuw nsw i64 [[IV_LVER_ORIG]], 1
 ; CHECK-NEXT:    [[EXITCOND_LVER_ORIG:%.*]] = icmp eq i64 [[IV_NEXT_LVER_ORIG]], [[N_EXT]]
 ; CHECK-NEXT:    br i1 [[EXITCOND_LVER_ORIG]], label [[EXIT_LOOPEXIT:%.*]], label [[LOOP_LVER_ORIG]]
@@ -42,20 +40,20 @@ define void @bound_check_partially_known_1(i32 %N) {
 ; CHECK-NEXT:    br label [[LOOP:%.*]]
 ; CHECK:       loop:
 ; CHECK-NEXT:    [[IV:%.*]] = phi i64 [ 0, [[LOOP_PH]] ], [ [[IV_NEXT:%.*]], [[LOOP]] ]
-; CHECK-NEXT:    [[GEP_0_IV:%.*]] = getelementptr inbounds [[STRUCT_FOO]], %struct.foo* @global, i64 0, i32 0, i64 [[IV]]
-; CHECK-NEXT:    [[L_0:%.*]] = load double, double* [[GEP_0_IV]], align 8, !alias.scope !0
-; CHECK-NEXT:    [[GEP_1_IV:%.*]] = getelementptr inbounds [[STRUCT_FOO]], %struct.foo* @global, i64 0, i32 1, i64 [[IV]]
-; CHECK-NEXT:    [[L_1:%.*]] = load double, double* [[GEP_1_IV]], align 8, !alias.scope !3
+; CHECK-NEXT:    [[GEP_0_IV:%.*]] = getelementptr inbounds [[STRUCT_FOO]], ptr @global, i64 0, i32 0, i64 [[IV]]
+; CHECK-NEXT:    [[L_0:%.*]] = load double, ptr [[GEP_0_IV]], align 8, !alias.scope !0
+; CHECK-NEXT:    [[GEP_1_IV:%.*]] = getelementptr inbounds [[STRUCT_FOO]], ptr @global, i64 0, i32 1, i64 [[IV]]
+; CHECK-NEXT:    [[L_1:%.*]] = load double, ptr [[GEP_1_IV]], align 8, !alias.scope !3
 ; CHECK-NEXT:    [[ADD:%.*]] = fadd double [[L_0]], [[L_1]]
 ; CHECK-NEXT:    [[IV_N:%.*]] = add nuw nsw i64 [[IV]], [[N_EXT]]
-; CHECK-NEXT:    [[GEP_0_IV_N:%.*]] = getelementptr inbounds [[STRUCT_FOO]], %struct.foo* @global, i64 0, i32 0, i64 [[IV_N]]
-; CHECK-NEXT:    store double [[ADD]], double* [[GEP_0_IV_N]], align 8, !alias.scope !5, !noalias !7
+; CHECK-NEXT:    [[GEP_0_IV_N:%.*]] = getelementptr inbounds [[STRUCT_FOO]], ptr @global, i64 0, i32 0, i64 [[IV_N]]
+; CHECK-NEXT:    store double [[ADD]], ptr [[GEP_0_IV_N]], align 8, !alias.scope !5, !noalias !7
 ; CHECK-NEXT:    [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
 ; CHECK-NEXT:    [[EXITCOND:%.*]] = icmp eq i64 [[IV_NEXT]], [[N_EXT]]
-; CHECK-NEXT:    br i1 [[EXITCOND]], label [[EXIT_LOOPEXIT7:%.*]], label [[LOOP]]
+; CHECK-NEXT:    br i1 [[EXITCOND]], label [[EXIT_LOOPEXIT4:%.*]], label [[LOOP]]
 ; CHECK:       exit.loopexit:
 ; CHECK-NEXT:    br label [[EXIT:%.*]]
-; CHECK:       exit.loopexit7:
+; CHECK:       exit.loopexit4:
 ; CHECK-NEXT:    br label [[EXIT]]
 ; CHECK:       exit:
 ; CHECK-NEXT:    ret void
@@ -66,14 +64,14 @@ entry:
 
 loop:
   %iv = phi i64 [ 0, %entry ], [ %iv.next, %loop ]
-  %gep.0.iv = getelementptr inbounds %struct.foo, %struct.foo* @global, i64 0, i32 0, i64 %iv
-  %l.0 = load double, double* %gep.0.iv, align 8
-  %gep.1.iv = getelementptr inbounds %struct.foo, %struct.foo* @global, i64 0, i32 1, i64 %iv
-  %l.1 = load double, double* %gep.1.iv, align 8
+  %gep.0.iv = getelementptr inbounds %struct.foo, ptr @global, i64 0, i32 0, i64 %iv
+  %l.0 = load double, ptr %gep.0.iv, align 8
+  %gep.1.iv = getelementptr inbounds %struct.foo, ptr @global, i64 0, i32 1, i64 %iv
+  %l.1 = load double, ptr %gep.1.iv, align 8
   %add = fadd double %l.0, %l.1
   %iv.N = add nuw nsw i64 %iv, %N.ext
-  %gep.0.iv.N = getelementptr inbounds %struct.foo, %struct.foo* @global, i64 0, i32 0, i64 %iv.N
-  store double %add, double* %gep.0.iv.N, align 8
+  %gep.0.iv.N = getelementptr inbounds %struct.foo, ptr @global, i64 0, i32 0, i64 %iv.N
+  store double %add, ptr %gep.0.iv.N, align 8
   %iv.next = add nuw nsw i64 %iv, 1
   %exitcond = icmp eq i64 %iv.next, %N.ext
   br i1 %exitcond, label %exit, label %loop


        


More information about the llvm-commits mailing list