[llvm-commits] CVS: llvm/test/Transforms/InstCombine/2007-01-13-ExtCompareMiscompile.ll 2007-01-14-FcmpSelf.ll 2007-01-18-VectorInfLoop.ll IntPtrCast.ll add.ll cast-malloc.ll sub.ll vec_shuffle.ll xor.ll zeroext-and-reduce.ll
Reid Spencer
reid at x10sys.com
Fri Jan 26 00:26:11 PST 2007
Changes in directory llvm/test/Transforms/InstCombine:
2007-01-13-ExtCompareMiscompile.ll updated: 1.1 -> 1.2
2007-01-14-FcmpSelf.ll updated: 1.1 -> 1.2
2007-01-18-VectorInfLoop.ll updated: 1.1 -> 1.2
IntPtrCast.ll updated: 1.3 -> 1.4
add.ll updated: 1.34 -> 1.35
cast-malloc.ll updated: 1.2 -> 1.3
sub.ll updated: 1.25 -> 1.26
vec_shuffle.ll updated: 1.7 -> 1.8
xor.ll updated: 1.19 -> 1.20
zeroext-and-reduce.ll updated: 1.4 -> 1.5
---
Log message:
For PR761: http://llvm.org/PR761 :
Remove "target endian/pointersize" or add "target datalayout" to make
the test parse properly or set the datalayout because defaults changes.
For PR645: http://llvm.org/PR645 :
Make global names use the @ prefix.
For llvm-upgrade changes:
Fix test cases or completely remove use of llvm-upgrade for test cases
that cannot survive the new renaming or upgrade capabilities.
---
Diffs of the changes: (+30 -24)
2007-01-13-ExtCompareMiscompile.ll | 2 +-
2007-01-14-FcmpSelf.ll | 4 ++--
2007-01-18-VectorInfLoop.ll | 2 +-
IntPtrCast.ll | 1 +
add.ll | 8 ++++----
cast-malloc.ll | 7 ++++++-
sub.ll | 2 +-
vec_shuffle.ll | 14 +++++++-------
xor.ll | 12 ++++++------
zeroext-and-reduce.ll | 2 +-
10 files changed, 30 insertions(+), 24 deletions(-)
Index: llvm/test/Transforms/InstCombine/2007-01-13-ExtCompareMiscompile.ll
diff -u llvm/test/Transforms/InstCombine/2007-01-13-ExtCompareMiscompile.ll:1.1 llvm/test/Transforms/InstCombine/2007-01-13-ExtCompareMiscompile.ll:1.2
--- llvm/test/Transforms/InstCombine/2007-01-13-ExtCompareMiscompile.ll:1.1 Sat Jan 13 17:11:45 2007
+++ llvm/test/Transforms/InstCombine/2007-01-13-ExtCompareMiscompile.ll Fri Jan 26 02:25:06 2007
@@ -1,7 +1,7 @@
; RUN: llvm-as < %s | opt -instcombine | llvm-dis | grep zext
; PR1107
-define i1 %test(i8 %A, i8 %B) {
+define i1 @test(i8 %A, i8 %B) {
%a = zext i8 %A to i32
%b = zext i8 %B to i32
%c = icmp sgt i32 %a, %b
Index: llvm/test/Transforms/InstCombine/2007-01-14-FcmpSelf.ll
diff -u llvm/test/Transforms/InstCombine/2007-01-14-FcmpSelf.ll:1.1 llvm/test/Transforms/InstCombine/2007-01-14-FcmpSelf.ll:1.2
--- llvm/test/Transforms/InstCombine/2007-01-14-FcmpSelf.ll:1.1 Sun Jan 14 13:40:48 2007
+++ llvm/test/Transforms/InstCombine/2007-01-14-FcmpSelf.ll Fri Jan 26 02:25:06 2007
@@ -1,6 +1,6 @@
; RUN: llvm-as < %s | opt -instcombine | llvm-dis | grep 'fcmp uno.*0.0'
; PR1111
-define i1 %test(double %X) {
-%tmp = fcmp une double %X, %X
+define i1 @test(double %X) {
+ %tmp = fcmp une double %X, %X
ret i1 %tmp
}
Index: llvm/test/Transforms/InstCombine/2007-01-18-VectorInfLoop.ll
diff -u llvm/test/Transforms/InstCombine/2007-01-18-VectorInfLoop.ll:1.1 llvm/test/Transforms/InstCombine/2007-01-18-VectorInfLoop.ll:1.2
--- llvm/test/Transforms/InstCombine/2007-01-18-VectorInfLoop.ll:1.1 Thu Jan 18 16:16:03 2007
+++ llvm/test/Transforms/InstCombine/2007-01-18-VectorInfLoop.ll Fri Jan 26 02:25:06 2007
@@ -1,6 +1,6 @@
; RUN: llvm-as < %s | opt -instcombine -disable-output
-define <4 x i32> %test(<4 x i32> %A) {
+define <4 x i32> @test(<4 x i32> %A) {
%B = xor <4 x i32> %A, < i32 -1, i32 -1, i32 -1, i32 -1 >
%C = and <4 x i32> %B, < i32 -1, i32 -1, i32 -1, i32 -1 >
ret <4 x i32> %C
Index: llvm/test/Transforms/InstCombine/IntPtrCast.ll
diff -u llvm/test/Transforms/InstCombine/IntPtrCast.ll:1.3 llvm/test/Transforms/InstCombine/IntPtrCast.ll:1.4
--- llvm/test/Transforms/InstCombine/IntPtrCast.ll:1.3 Fri Dec 1 22:23:09 2006
+++ llvm/test/Transforms/InstCombine/IntPtrCast.ll Fri Jan 26 02:25:06 2007
@@ -1,4 +1,5 @@
; RUN: llvm-upgrade < %s | llvm-as | opt -instcombine | llvm-dis | notcast
+target endian = little
target pointersize = 32
int *%test(int *%P) {
Index: llvm/test/Transforms/InstCombine/add.ll
diff -u llvm/test/Transforms/InstCombine/add.ll:1.34 llvm/test/Transforms/InstCombine/add.ll:1.35
--- llvm/test/Transforms/InstCombine/add.ll:1.34 Thu Jan 4 21:03:27 2007
+++ llvm/test/Transforms/InstCombine/add.ll Fri Jan 26 02:25:06 2007
@@ -241,9 +241,9 @@
ret ubyte %C
}
-i32 %test34(i32 %a) { ;; -> -1
- %tmpnot = xor i32 %a, -1
- %tmp2 = add i32 %tmpnot, %a
- ret i32 %tmp2
+int %test34(int %a) { ;; -> -1
+ %tmpnot = xor int %a, -1
+ %tmp2 = add int %tmpnot, %a
+ ret int %tmp2
}
Index: llvm/test/Transforms/InstCombine/cast-malloc.ll
diff -u llvm/test/Transforms/InstCombine/cast-malloc.ll:1.2 llvm/test/Transforms/InstCombine/cast-malloc.ll:1.3
--- llvm/test/Transforms/InstCombine/cast-malloc.ll:1.2 Fri Dec 1 22:23:09 2006
+++ llvm/test/Transforms/InstCombine/cast-malloc.ll Fri Jan 26 02:25:06 2007
@@ -1,6 +1,11 @@
; test that casted mallocs get converted to malloc of the right type
-; RUN: llvm-upgrade < %s | llvm-as | opt -instcombine | llvm-dis | not grep bitcast
+; RUN: llvm-upgrade < %s | llvm-as | opt -instcombine | llvm-dis | \
+; RUN: not grep bitcast
+; The target datalayout is important for this test case. We have to tell
+; instcombine that the ABI alignment for a long is 4-bytes, not 8, otherwise
+; it won't do the transform.
+target datalayout = "e-l:32:64"
int* %test(uint %size) {
%X = malloc long, uint %size
%ret = bitcast long* %X to int*
Index: llvm/test/Transforms/InstCombine/sub.ll
diff -u llvm/test/Transforms/InstCombine/sub.ll:1.25 llvm/test/Transforms/InstCombine/sub.ll:1.26
--- llvm/test/Transforms/InstCombine/sub.ll:1.25 Mon Jan 1 23:55:05 2007
+++ llvm/test/Transforms/InstCombine/sub.ll Fri Jan 26 02:25:06 2007
@@ -2,7 +2,7 @@
;
; RUN: llvm-upgrade < %s | llvm-as | opt -instcombine | llvm-dis | \
-; RUN: grep -v 'sub i32 %Cok.s, %Bok.s' | not grep sub
+; RUN: grep -v 'sub i32 %Cok, %Bok' | not grep sub
implementation
Index: llvm/test/Transforms/InstCombine/vec_shuffle.ll
diff -u llvm/test/Transforms/InstCombine/vec_shuffle.ll:1.7 llvm/test/Transforms/InstCombine/vec_shuffle.ll:1.8
--- llvm/test/Transforms/InstCombine/vec_shuffle.ll:1.7 Fri Jan 5 01:35:24 2007
+++ llvm/test/Transforms/InstCombine/vec_shuffle.ll Fri Jan 26 02:25:06 2007
@@ -5,36 +5,36 @@
implementation
-define %T %test1(%T %v1) {
+define %T @test1(%T %v1) {
%v2 = shufflevector %T %v1, %T undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
ret %T %v2
}
-define %T %test2(%T %v1) {
+define %T @test2(%T %v1) {
%v2 = shufflevector %T %v1, %T %v1, <4 x i32> <i32 0, i32 5, i32 2, i32 7>
ret %T %v2
}
-define float %test3(%T %A, %T %B, float %f) {
+define float @test3(%T %A, %T %B, float %f) {
%C = insertelement %T %A, float %f, i32 0
%D = shufflevector %T %C, %T %B, <4 x i32> <i32 5, i32 0, i32 2, i32 7>
%E = extractelement %T %D, i32 1
ret float %E
}
-define i32 %test4(<4 x i32> %X) {
+define i32 @test4(<4 x i32> %X) {
%tmp152.i53899.i = shufflevector <4 x i32> %X, <4 x i32> undef, <4 x i32> zeroinitializer
%tmp34 = extractelement <4 x i32> %tmp152.i53899.i, i32 0
ret i32 %tmp34
}
-define i32 %test5(<4 x i32> %X) {
+define i32 @test5(<4 x i32> %X) {
%tmp152.i53899.i = shufflevector <4 x i32> %X, <4 x i32> undef, <4 x i32> <i32 3, i32 2, i32 undef, i32 undef>
%tmp34 = extractelement <4 x i32> %tmp152.i53899.i, i32 0
ret i32 %tmp34
}
-define float %test6(<4 x float> %X) {
+define float @test6(<4 x float> %X) {
%X = bitcast <4 x float> %X to <4 x i32>
%tmp152.i53899.i = shufflevector <4 x i32> %X, <4 x i32> undef, <4 x i32> zeroinitializer
%tmp152.i53900.i = bitcast <4 x i32> %tmp152.i53899.i to <4 x float>
@@ -42,7 +42,7 @@
ret float %tmp34
}
-define <4 x float> %test7(<4 x float> %tmp45.i) {
+define <4 x float> @test7(<4 x float> %tmp45.i) {
%tmp1642.i = shufflevector <4 x float> %tmp45.i, <4 x float> undef, <4 x i32> < i32 0, i32 1, i32 6, i32 7 >
ret <4 x float> %tmp1642.i
}
Index: llvm/test/Transforms/InstCombine/xor.ll
diff -u llvm/test/Transforms/InstCombine/xor.ll:1.19 llvm/test/Transforms/InstCombine/xor.ll:1.20
--- llvm/test/Transforms/InstCombine/xor.ll:1.19 Thu Jan 4 21:03:51 2007
+++ llvm/test/Transforms/InstCombine/xor.ll Fri Jan 26 02:25:06 2007
@@ -182,11 +182,11 @@
}
-i32 %test27(i32 %b, i32 %c, i32 %d) {
- %tmp2 = xor i32 %d, %b
- %tmp5 = xor i32 %d, %c
- %tmp = icmp eq i32 %tmp2, %tmp5
- %tmp6 = zext bool %tmp to i32
- ret i32 %tmp6
+int %test27(int %b, int %c, int %d) {
+ %tmp2 = xor int %d, %b
+ %tmp5 = xor int %d, %c
+ %tmp = icmp eq int %tmp2, %tmp5
+ %tmp6 = zext bool %tmp to int
+ ret int %tmp6
}
Index: llvm/test/Transforms/InstCombine/zeroext-and-reduce.ll
diff -u llvm/test/Transforms/InstCombine/zeroext-and-reduce.ll:1.4 llvm/test/Transforms/InstCombine/zeroext-and-reduce.ll:1.5
--- llvm/test/Transforms/InstCombine/zeroext-and-reduce.ll:1.4 Mon Jan 1 23:55:05 2007
+++ llvm/test/Transforms/InstCombine/zeroext-and-reduce.ll Fri Jan 26 02:25:06 2007
@@ -1,5 +1,5 @@
; RUN: llvm-upgrade < %s | llvm-as | opt -instcombine | llvm-dis | \
-; RUN: grep 'and i32 %Y.s, 8'
+; RUN: grep 'and i32 %Y, 8'
int %test1(ubyte %X) {
%Y = cast ubyte %X to int
More information about the llvm-commits
mailing list