[llvm-branch-commits] [llvm-branch] r133703 - /llvm/branches/type-system-rewrite/test/Other/constant-fold-gep.ll

Chris Lattner sabre at nondot.org
Wed Jun 22 23:36:03 PDT 2011


Author: lattner
Date: Thu Jun 23 01:36:03 2011
New Revision: 133703

URL: http://llvm.org/viewvc/llvm-project?rev=133703&view=rev
Log:
update this test to include the explicit types that the asmprinter now preserves.

Modified:
    llvm/branches/type-system-rewrite/test/Other/constant-fold-gep.ll

Modified: llvm/branches/type-system-rewrite/test/Other/constant-fold-gep.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/type-system-rewrite/test/Other/constant-fold-gep.ll?rev=133703&r1=133702&r2=133703&view=diff
==============================================================================
--- llvm/branches/type-system-rewrite/test/Other/constant-fold-gep.ll (original)
+++ llvm/branches/type-system-rewrite/test/Other/constant-fold-gep.ll Thu Jun 23 01:36:03 2011
@@ -13,20 +13,6 @@
 ; "SCEV" - ScalarEvolution but no targetdata.
 ; RUN: opt -analyze -scalar-evolution < %s | FileCheck --check-prefix=SCEV %s
 
-; ScalarEvolution with targetdata isn't interesting on these testcases
-; because ScalarEvolution doesn't attempt to duplicate all of instcombine's
-; and the constant folders' folding.
-
-; PLAIN: %0 = type { i1, double }
-; PLAIN: %1 = type { double, float, double, double }
-; PLAIN: %2 = type { i1, i1* }
-; PLAIN: %3 = type { i64, i64 }
-; PLAIN: %4 = type { i32, i32 }
-; OPT: %0 = type { i1, double }
-; OPT: %1 = type { double, float, double, double }
-; OPT: %2 = type { i1, i1* }
-; OPT: %3 = type { i64, i64 }
-; OPT: %4 = type { i32, i32 }
 
 ; The automatic constant folder in opt does not have targetdata access, so
 ; it can't fold gep arithmetic, in general. However, the constant folder run
@@ -63,23 +49,23 @@
 ; target-dependent folder should fold these down to constants.
 
 ; PLAIN: @a = constant i64 mul (i64 ptrtoint (double* getelementptr (double* null, i32 1) to i64), i64 2310)
-; PLAIN: @b = constant i64 ptrtoint (double* getelementptr (%0* null, i64 0, i32 1) to i64)
+; PLAIN: @b = constant i64 ptrtoint (double* getelementptr ({ i1, double }* null, i64 0, i32 1) to i64)
 ; PLAIN: @c = constant i64 mul nuw (i64 ptrtoint (double* getelementptr (double* null, i32 1) to i64), i64 2)
 ; PLAIN: @d = constant i64 mul nuw (i64 ptrtoint (double* getelementptr (double* null, i32 1) to i64), i64 11)
-; PLAIN: @e = constant i64 ptrtoint (double* getelementptr (%1* null, i64 0, i32 2) to i64)
+; PLAIN: @e = constant i64 ptrtoint (double* getelementptr ({ double, float, double, double }* null, i64 0, i32 2) to i64)
 ; PLAIN: @f = constant i64 1
-; PLAIN: @g = constant i64 ptrtoint (double* getelementptr (%0* null, i64 0, i32 1) to i64)
+; PLAIN: @g = constant i64 ptrtoint (double* getelementptr ({ i1, double }* null, i64 0, i32 1) to i64)
 ; PLAIN: @h = constant i64 ptrtoint (i1** getelementptr (i1** null, i32 1) to i64)
-; PLAIN: @i = constant i64 ptrtoint (i1** getelementptr (%2* null, i64 0, i32 1) to i64)
+; PLAIN: @i = constant i64 ptrtoint (i1** getelementptr ({ i1, i1* }* null, i64 0, i32 1) to i64)
 ; OPT: @a = constant i64 mul (i64 ptrtoint (double* getelementptr (double* null, i32 1) to i64), i64 2310)
-; OPT: @b = constant i64 ptrtoint (double* getelementptr (%0* null, i64 0, i32 1) to i64)
+; OPT: @b = constant i64 ptrtoint (double* getelementptr ({ i1, double }* null, i64 0, i32 1) to i64)
 ; OPT: @c = constant i64 mul (i64 ptrtoint (double* getelementptr (double* null, i32 1) to i64), i64 2)
 ; OPT: @d = constant i64 mul (i64 ptrtoint (double* getelementptr (double* null, i32 1) to i64), i64 11)
-; OPT: @e = constant i64 ptrtoint (double* getelementptr (%1* null, i64 0, i32 2) to i64)
+; OPT: @e = constant i64 ptrtoint (double* getelementptr ({ double, float, double, double }* null, i64 0, i32 2) to i64)
 ; OPT: @f = constant i64 1
-; OPT: @g = constant i64 ptrtoint (double* getelementptr (%0* null, i64 0, i32 1) to i64)
+; OPT: @g = constant i64 ptrtoint (double* getelementptr ({ i1, double }* null, i64 0, i32 1) to i64)
 ; OPT: @h = constant i64 ptrtoint (i1** getelementptr (i1** null, i32 1) to i64)
-; OPT: @i = constant i64 ptrtoint (i1** getelementptr (%2* null, i64 0, i32 1) to i64)
+; OPT: @i = constant i64 ptrtoint (i1** getelementptr ({ i1, i1* }* null, i64 0, i32 1) to i64)
 ; TO: @a = constant i64 18480
 ; TO: @b = constant i64 8
 ; TO: @c = constant i64 16
@@ -103,10 +89,10 @@
 ; The target-dependent folder should cast GEP indices to integer-sized pointers.
 
 ; PLAIN: @M = constant i64* getelementptr (i64* null, i32 1)
-; PLAIN: @N = constant i64* getelementptr (%3* null, i32 0, i32 1)
+; PLAIN: @N = constant i64* getelementptr ({ i64, i64 }* null, i32 0, i32 1)
 ; PLAIN: @O = constant i64* getelementptr ([2 x i64]* null, i32 0, i32 1)
 ; OPT: @M = constant i64* getelementptr (i64* null, i32 1)
-; OPT: @N = constant i64* getelementptr (%3* null, i32 0, i32 1)
+; OPT: @N = constant i64* getelementptr ({ i64, i64 }* null, i32 0, i32 1)
 ; OPT: @O = constant i64* getelementptr ([2 x i64]* null, i32 0, i32 1)
 ; TO: @M = constant i64* inttoptr (i64 8 to i64*)
 ; TO: @N = constant i64* inttoptr (i64 8 to i64*)
@@ -119,9 +105,9 @@
 ; Fold GEP of a GEP. Theoretically some of these cases could be folded
 ; without using targetdata, however that's not implemented yet.
 
-; PLAIN: @Z = global i32* getelementptr inbounds (i32* getelementptr inbounds ([3 x %4]* @ext, i64 0, i64 1, i32 0), i64 1)
-; OPT: @Z = global i32* getelementptr (i32* getelementptr inbounds ([3 x %4]* @ext, i64 0, i64 1, i32 0), i64 1)
-; TO: @Z = global i32* getelementptr inbounds ([3 x %0]* @ext, i64 0, i64 1, i32 1)
+; PLAIN: @Z = global i32* getelementptr inbounds (i32* getelementptr inbounds ([3 x { i32, i32 }]* @ext, i64 0, i64 1, i32 0), i64 1)
+; OPT: @Z = global i32* getelementptr (i32* getelementptr inbounds ([3 x { i32, i32 }]* @ext, i64 0, i64 1, i32 0), i64 1)
+; TO: @Z = global i32* getelementptr inbounds ([3 x { i32, i32 }]* @ext, i64 0, i64 1, i32 1)
 
 @ext = external global [3 x { i32, i32 }]
 @Z = global i32* getelementptr inbounds (i32* getelementptr inbounds ([3 x { i32, i32 }]* @ext, i64 0, i64 1, i32 0), i64 1)
@@ -236,7 +222,7 @@
 ; PLAIN:   ret i64 %t
 ; PLAIN: }
 ; PLAIN: define i64 @fb() nounwind {
-; PLAIN:   %t = bitcast i64 ptrtoint (double* getelementptr (%0* null, i64 0, i32 1) to i64) to i64
+; PLAIN:   %t = bitcast i64 ptrtoint (double* getelementptr ({ i1, double }* null, i64 0, i32 1) to i64) to i64
 ; PLAIN:   ret i64 %t
 ; PLAIN: }
 ; PLAIN: define i64 @fc() nounwind {
@@ -248,7 +234,7 @@
 ; PLAIN:   ret i64 %t
 ; PLAIN: }
 ; PLAIN: define i64 @fe() nounwind {
-; PLAIN:   %t = bitcast i64 ptrtoint (double* getelementptr (%1* null, i64 0, i32 2) to i64) to i64
+; PLAIN:   %t = bitcast i64 ptrtoint (double* getelementptr ({ double, float, double, double }* null, i64 0, i32 2) to i64) to i64
 ; PLAIN:   ret i64 %t
 ; PLAIN: }
 ; PLAIN: define i64 @ff() nounwind {
@@ -256,7 +242,7 @@
 ; PLAIN:   ret i64 %t
 ; PLAIN: }
 ; PLAIN: define i64 @fg() nounwind {
-; PLAIN:   %t = bitcast i64 ptrtoint (double* getelementptr (%0* null, i64 0, i32 1) to i64) to i64
+; PLAIN:   %t = bitcast i64 ptrtoint (double* getelementptr ({ i1, double }* null, i64 0, i32 1) to i64) to i64
 ; PLAIN:   ret i64 %t
 ; PLAIN: }
 ; PLAIN: define i64 @fh() nounwind {
@@ -264,14 +250,14 @@
 ; PLAIN:   ret i64 %t
 ; PLAIN: }
 ; PLAIN: define i64 @fi() nounwind {
-; PLAIN:   %t = bitcast i64 ptrtoint (i1** getelementptr (%2* null, i64 0, i32 1) to i64) to i64
+; PLAIN:   %t = bitcast i64 ptrtoint (i1** getelementptr ({ i1, i1* }* null, i64 0, i32 1) to i64) to i64
 ; PLAIN:   ret i64 %t
 ; PLAIN: }
 ; OPT: define i64 @fa() nounwind {
 ; OPT:   ret i64 mul (i64 ptrtoint (double* getelementptr (double* null, i32 1) to i64), i64 2310)
 ; OPT: }
 ; OPT: define i64 @fb() nounwind {
-; OPT:   ret i64 ptrtoint (double* getelementptr (%0* null, i64 0, i32 1) to i64)
+; OPT:   ret i64 ptrtoint (double* getelementptr ({ i1, double }* null, i64 0, i32 1) to i64)
 ; OPT: }
 ; OPT: define i64 @fc() nounwind {
 ; OPT:   ret i64 mul nuw (i64 ptrtoint (double* getelementptr (double* null, i32 1) to i64), i64 2)
@@ -280,19 +266,19 @@
 ; OPT:   ret i64 mul nuw (i64 ptrtoint (double* getelementptr (double* null, i32 1) to i64), i64 11)
 ; OPT: }
 ; OPT: define i64 @fe() nounwind {
-; OPT:   ret i64 ptrtoint (double* getelementptr (%1* null, i64 0, i32 2) to i64)
+; OPT:   ret i64 ptrtoint (double* getelementptr ({ double, float, double, double }* null, i64 0, i32 2) to i64)
 ; OPT: }
 ; OPT: define i64 @ff() nounwind {
 ; OPT:   ret i64 1
 ; OPT: }
 ; OPT: define i64 @fg() nounwind {
-; OPT:   ret i64 ptrtoint (double* getelementptr (%0* null, i64 0, i32 1) to i64)
+; OPT:   ret i64 ptrtoint (double* getelementptr ({ i1, double }* null, i64 0, i32 1) to i64)
 ; OPT: }
 ; OPT: define i64 @fh() nounwind {
 ; OPT:   ret i64 ptrtoint (i1** getelementptr (i1** null, i32 1) to i64)
 ; OPT: }
 ; OPT: define i64 @fi() nounwind {
-; OPT:   ret i64 ptrtoint (i1** getelementptr (%2* null, i64 0, i32 1) to i64)
+; OPT:   ret i64 ptrtoint (i1** getelementptr ({ i1, i1* }* null, i64 0, i32 1) to i64)
 ; OPT: }
 ; TO: define i64 @fa() nounwind {
 ; TO:   ret i64 18480
@@ -325,7 +311,7 @@
 ; SCEV:   %t = bitcast i64 mul (i64 ptrtoint (double* getelementptr (double* null, i32 1) to i64), i64 2310) to i64 
 ; SCEV:   -->  (2310 * sizeof(double))
 ; SCEV: Classifying expressions for: @fb
-; SCEV:   %t = bitcast i64 ptrtoint (double* getelementptr (%0* null, i64 0, i32 1) to i64) to i64 
+; SCEV:   %t = bitcast i64 ptrtoint (double* getelementptr ({ i1, double }* null, i64 0, i32 1) to i64) to i64 
 ; SCEV:   -->  alignof(double)
 ; SCEV: Classifying expressions for: @fc
 ; SCEV:   %t = bitcast i64 mul nuw (i64 ptrtoint (double* getelementptr (double* null, i32 1) to i64), i64 2) to i64 
@@ -334,19 +320,19 @@
 ; SCEV:   %t = bitcast i64 mul nuw (i64 ptrtoint (double* getelementptr (double* null, i32 1) to i64), i64 11) to i64 
 ; SCEV:   -->  (11 * sizeof(double))
 ; SCEV: Classifying expressions for: @fe
-; SCEV:   %t = bitcast i64 ptrtoint (double* getelementptr (%1* null, i64 0, i32 2) to i64) to i64 
+; SCEV:   %t = bitcast i64 ptrtoint (double* getelementptr ({ double, float, double, double }* null, i64 0, i32 2) to i64) to i64 
 ; SCEV:   -->  offsetof({ double, float, double, double }, 2)
 ; SCEV: Classifying expressions for: @ff
 ; SCEV:   %t = bitcast i64 1 to i64 
 ; SCEV:   -->  1
 ; SCEV: Classifying expressions for: @fg
-; SCEV:   %t = bitcast i64 ptrtoint (double* getelementptr (%0* null, i64 0, i32 1) to i64) to i64
+; SCEV:   %t = bitcast i64 ptrtoint (double* getelementptr ({ i1, double }* null, i64 0, i32 1) to i64) to i64
 ; SCEV:   -->  alignof(double)
 ; SCEV: Classifying expressions for: @fh
 ; SCEV:   %t = bitcast i64 ptrtoint (i1** getelementptr (i1** null, i32 1) to i64) to i64
 ; SCEV:   -->  sizeof(i1*)
 ; SCEV: Classifying expressions for: @fi
-; SCEV:   %t = bitcast i64 ptrtoint (i1** getelementptr (%2* null, i64 0, i32 1) to i64) to i64
+; SCEV:   %t = bitcast i64 ptrtoint (i1** getelementptr ({ i1, i1* }* null, i64 0, i32 1) to i64) to i64
 ; SCEV:   -->  alignof(i1*)
 
 define i64 @fa() nounwind {
@@ -391,7 +377,7 @@
 ; PLAIN:   ret i64* %t
 ; PLAIN: }
 ; PLAIN: define i64* @fN() nounwind {
-; PLAIN:   %t = bitcast i64* getelementptr (%3* null, i32 0, i32 1) to i64*
+; PLAIN:   %t = bitcast i64* getelementptr ({ i64, i64 }* null, i32 0, i32 1) to i64*
 ; PLAIN:   ret i64* %t
 ; PLAIN: }
 ; PLAIN: define i64* @fO() nounwind {
@@ -402,7 +388,7 @@
 ; OPT:   ret i64* getelementptr (i64* null, i32 1)
 ; OPT: }
 ; OPT: define i64* @fN() nounwind {
-; OPT:   ret i64* getelementptr (%3* null, i32 0, i32 1)
+; OPT:   ret i64* getelementptr ({ i64, i64 }* null, i32 0, i32 1)
 ; OPT: }
 ; OPT: define i64* @fO() nounwind {
 ; OPT:   ret i64* getelementptr ([2 x i64]* null, i32 0, i32 1)
@@ -420,7 +406,7 @@
 ; SCEV:   %t = bitcast i64* getelementptr (i64* null, i32 1) to i64* 
 ; SCEV:   -->  sizeof(i64)
 ; SCEV: Classifying expressions for: @fN
-; SCEV:   %t = bitcast i64* getelementptr (%3* null, i32 0, i32 1) to i64* 
+; SCEV:   %t = bitcast i64* getelementptr ({ i64, i64 }* null, i32 0, i32 1) to i64* 
 ; SCEV:   -->  sizeof(i64)
 ; SCEV: Classifying expressions for: @fO
 ; SCEV:   %t = bitcast i64* getelementptr ([2 x i64]* null, i32 0, i32 1) to i64* 
@@ -440,17 +426,17 @@
 }
 
 ; PLAIN: define i32* @fZ() nounwind {
-; PLAIN:   %t = bitcast i32* getelementptr inbounds (i32* getelementptr inbounds ([3 x %4]* @ext, i64 0, i64 1, i32 0), i64 1) to i32*
+; PLAIN:   %t = bitcast i32* getelementptr inbounds (i32* getelementptr inbounds ([3 x { i32, i32 }]* @ext, i64 0, i64 1, i32 0), i64 1) to i32*
 ; PLAIN:   ret i32* %t
 ; PLAIN: }
 ; OPT: define i32* @fZ() nounwind {
-; OPT:   ret i32* getelementptr inbounds (i32* getelementptr inbounds ([3 x %4]* @ext, i64 0, i64 1, i32 0), i64 1)
+; OPT:   ret i32* getelementptr inbounds (i32* getelementptr inbounds ([3 x { i32, i32 }]* @ext, i64 0, i64 1, i32 0), i64 1)
 ; OPT: }
 ; TO: define i32* @fZ() nounwind {
-; TO:   ret i32* getelementptr inbounds ([3 x %0]* @ext, i64 0, i64 1, i32 1)
+; TO:   ret i32* getelementptr inbounds ([3 x { i32, i32 }]* @ext, i64 0, i64 1, i32 1)
 ; TO: }
 ; SCEV: Classifying expressions for: @fZ
-; SCEV:   %t = bitcast i32* getelementptr inbounds (i32* getelementptr inbounds ([3 x %4]* @ext, i64 0, i64 1, i32 0), i64 1) to i32*
+; SCEV:   %t = bitcast i32* getelementptr inbounds (i32* getelementptr inbounds ([3 x { i32, i32 }]* @ext, i64 0, i64 1, i32 0), i64 1) to i32*
 ; SCEV:   -->  ((3 * sizeof(i32)) + @ext)
 
 define i32* @fZ() nounwind {





More information about the llvm-branch-commits mailing list