[llvm-commits] CVS: llvm/test/CodeGen/ARM/2007-01-19-InfiniteLoop.ll 2007-01-31-RegInfoAssert.ll bits.ll long_shift.ll sxt_rot.ll uxt_rot.ll
Reid Spencer
reid at x10sys.com
Thu Feb 1 18:17:36 PST 2007
Changes in directory llvm/test/CodeGen/ARM:
2007-01-19-InfiniteLoop.ll updated: 1.3 -> 1.4
2007-01-31-RegInfoAssert.ll updated: 1.1 -> 1.2
bits.ll updated: 1.2 -> 1.3
long_shift.ll updated: 1.2 -> 1.3
sxt_rot.ll updated: 1.2 -> 1.3
uxt_rot.ll updated: 1.2 -> 1.3
---
Log message:
Changes to support making the shift instructions be true BinaryOperators.
This feature is needed in order to support shifts of more than 255 bits
on large integer types. This changes the syntax for llvm assembly to
make shl, ashr and lshr instructions look like a binary operator:
shl i32 %X, 1
instead of
shl i32 %X, i8 1
Additionally, this should help a few passes perform additional optimizations.
---
Diffs of the changes: (+42 -42)
2007-01-19-InfiniteLoop.ll | 12 ++++++------
2007-01-31-RegInfoAssert.ll | 2 +-
bits.ll | 42 +++++++++++++++++++++---------------------
long_shift.ll | 16 ++++++++--------
sxt_rot.ll | 8 ++++----
uxt_rot.ll | 4 ++--
6 files changed, 42 insertions(+), 42 deletions(-)
Index: llvm/test/CodeGen/ARM/2007-01-19-InfiniteLoop.ll
diff -u llvm/test/CodeGen/ARM/2007-01-19-InfiniteLoop.ll:1.3 llvm/test/CodeGen/ARM/2007-01-19-InfiniteLoop.ll:1.4
--- llvm/test/CodeGen/ARM/2007-01-19-InfiniteLoop.ll:1.3 Tue Jan 30 10:16:01 2007
+++ llvm/test/CodeGen/ARM/2007-01-19-InfiniteLoop.ll Thu Feb 1 20:16:22 2007
@@ -19,13 +19,13 @@
%tmp502 = load i32* null ; <i32> [#uses=1]
%tmp542 = getelementptr [6 x [4 x [4 x i32]]]* @quant_coef, i32 0, i32 0, i32 %i.8, i32 %j.7 ; <i32*> [#uses=1]
%tmp543 = load i32* %tmp542 ; <i32> [#uses=1]
- %tmp548 = ashr i32 0, i8 0 ; <i32> [#uses=3]
+ %tmp548 = ashr i32 0, 0 ; <i32> [#uses=3]
%tmp561 = sub i32 0, %tmp496 ; <i32> [#uses=3]
%abscond563 = icmp sgt i32 %tmp561, -1 ; <i1> [#uses=1]
%abs564 = select i1 %abscond563, i32 %tmp561, i32 0 ; <i32> [#uses=1]
%tmp572 = mul i32 %abs564, %tmp543 ; <i32> [#uses=1]
%tmp574 = add i32 %tmp572, 0 ; <i32> [#uses=1]
- %tmp576 = ashr i32 %tmp574, i8 0 ; <i32> [#uses=7]
+ %tmp576 = ashr i32 %tmp574, 0 ; <i32> [#uses=7]
%tmp579 = icmp eq i32 %tmp548, %tmp576 ; <i1> [#uses=1]
br i1 %tmp579, label %bb712, label %cond_next589
@@ -40,8 +40,8 @@
%tmp642 = call fastcc i32 @sign( i32 %tmp576, i32 %tmp561 ) ; <i32> [#uses=1]
%tmp650 = mul i32 %tmp606, %tmp642 ; <i32> [#uses=1]
%tmp656 = mul i32 %tmp650, %tmp612 ; <i32> [#uses=1]
- %tmp658 = shl i32 %tmp656, i8 0 ; <i32> [#uses=1]
- %tmp659 = ashr i32 %tmp658, i8 6 ; <i32> [#uses=1]
+ %tmp658 = shl i32 %tmp656, 0 ; <i32> [#uses=1]
+ %tmp659 = ashr i32 %tmp658, 6 ; <i32> [#uses=1]
%tmp660 = sub i32 0, %tmp659 ; <i32> [#uses=1]
%tmp666 = sub i32 %tmp660, %tmp496 ; <i32> [#uses=1]
%tmp667 = sitofp i32 %tmp666 to double ; <double> [#uses=2]
@@ -85,8 +85,8 @@
%tmp786 = load i32* %tmp785 ; <i32> [#uses=1]
%tmp781 = mul i32 %tmp780, %tmp761 ; <i32> [#uses=1]
%tmp787 = mul i32 %tmp781, %tmp786 ; <i32> [#uses=1]
- %tmp789 = shl i32 %tmp787, i8 0 ; <i32> [#uses=1]
- %tmp790 = ashr i32 %tmp789, i8 6 ; <i32> [#uses=1]
+ %tmp789 = shl i32 %tmp787, 0 ; <i32> [#uses=1]
+ %tmp790 = ashr i32 %tmp789, 6 ; <i32> [#uses=1]
br label %cond_next791
cond_next791: ; preds = %cond_true740, %bb737
Index: llvm/test/CodeGen/ARM/2007-01-31-RegInfoAssert.ll
diff -u llvm/test/CodeGen/ARM/2007-01-31-RegInfoAssert.ll:1.1 llvm/test/CodeGen/ARM/2007-01-31-RegInfoAssert.ll:1.2
--- llvm/test/CodeGen/ARM/2007-01-31-RegInfoAssert.ll:1.1 Wed Jan 31 20:27:24 2007
+++ llvm/test/CodeGen/ARM/2007-01-31-RegInfoAssert.ll Thu Feb 1 20:16:22 2007
@@ -7,7 +7,7 @@
%D = alloca %struct.rtx_def, align 1
%tmp1 = bitcast %struct.rtx_def* %D to i32*
%tmp7 = load i32* %tmp1
- %tmp14 = lshr i32 %tmp7, i8 1
+ %tmp14 = lshr i32 %tmp7, 1
%tmp1415 = and i32 %tmp14, 1
call void (i32, ...)* @printf( i32 undef, i32 0, i32 %tmp1415 )
ret void
Index: llvm/test/CodeGen/ARM/bits.ll
diff -u llvm/test/CodeGen/ARM/bits.ll:1.2 llvm/test/CodeGen/ARM/bits.ll:1.3
--- llvm/test/CodeGen/ARM/bits.ll:1.2 Fri Dec 1 22:23:08 2006
+++ llvm/test/CodeGen/ARM/bits.ll Thu Feb 1 20:16:22 2007
@@ -1,36 +1,36 @@
-; RUN: llvm-upgrade < %s | llvm-as | llc -march=arm &&
-; RUN: llvm-upgrade < %s | llvm-as | llc -march=arm | grep and | wc -l | grep 1 &&
-; RUN: llvm-upgrade < %s | llvm-as | llc -march=arm | grep orr | wc -l | grep 1 &&
-; RUN: llvm-upgrade < %s | llvm-as | llc -march=arm | grep eor | wc -l | grep 1 &&
-; RUN: llvm-upgrade < %s | llvm-as | llc -march=arm | grep mov.*lsl | wc -l | grep 1 &&
-; RUN: llvm-upgrade < %s | llvm-as | llc -march=arm | grep mov.*asr | wc -l | grep 1
+; RUN: llvm-as < %s | llc -march=arm &&
+; RUN: llvm-as < %s | llc -march=arm | grep and | wc -l | grep 1 &&
+; RUN: llvm-as < %s | llc -march=arm | grep orr | wc -l | grep 1 &&
+; RUN: llvm-as < %s | llc -march=arm | grep eor | wc -l | grep 1 &&
+; RUN: llvm-as < %s | llc -march=arm | grep mov.*lsl | wc -l | grep 1 &&
+; RUN: llvm-as < %s | llc -march=arm | grep mov.*asr | wc -l | grep 1
-int %f1(int %a, int %b) {
+define i32 @f1(i32 %a, i32 %b) {
entry:
- %tmp2 = and int %b, %a ; <int> [#uses=1]
- ret int %tmp2
+ %tmp2 = and i32 %b, %a ; <i32> [#uses=1]
+ ret i32 %tmp2
}
-int %f2(int %a, int %b) {
+define i32 @f2(i32 %a, i32 %b) {
entry:
- %tmp2 = or int %b, %a ; <int> [#uses=1]
- ret int %tmp2
+ %tmp2 = or i32 %b, %a ; <i32> [#uses=1]
+ ret i32 %tmp2
}
-int %f3(int %a, int %b) {
+define i32 @f3(i32 %a, i32 %b) {
entry:
- %tmp2 = xor int %b, %a ; <int> [#uses=1]
- ret int %tmp2
+ %tmp2 = xor i32 %b, %a ; <i32> [#uses=1]
+ ret i32 %tmp2
}
-int %f4(int %a, ubyte %b) {
+define i32 @f4(i32 %a, i32 %b) {
entry:
- %tmp3 = shl int %a, ubyte %b ; <int> [#uses=1]
- ret int %tmp3
+ %tmp3 = shl i32 %a, %b ; <i32> [#uses=1]
+ ret i32 %tmp3
}
-int %f5(int %a, ubyte %b) {
+define i32 @f5(i32 %a, i32 %b) {
entry:
- %tmp3 = shr int %a, ubyte %b ; <int> [#uses=1]
- ret int %tmp3
+ %tmp3 = ashr i32 %a, %b ; <i32> [#uses=1]
+ ret i32 %tmp3
}
Index: llvm/test/CodeGen/ARM/long_shift.ll
diff -u llvm/test/CodeGen/ARM/long_shift.ll:1.2 llvm/test/CodeGen/ARM/long_shift.ll:1.3
--- llvm/test/CodeGen/ARM/long_shift.ll:1.2 Fri Jan 26 18:04:57 2007
+++ llvm/test/CodeGen/ARM/long_shift.ll Thu Feb 1 20:16:22 2007
@@ -5,27 +5,27 @@
; RUN: llvm-as < %s | llc -march=arm | grep __lshrdi3 &&
; RUN: llvm-as < %s | llc -march=arm -enable-thumb
-define i64 @f00(i64 %A, i64 %B) {
+define i64 @f0(i64 %A, i64 %B) {
%tmp = bitcast i64 %A to i64
- %tmp2 = lshr i64 %B, i8 1
+ %tmp2 = lshr i64 %B, 1
%tmp3 = sub i64 %tmp, %tmp2
ret i64 %tmp3
}
-define i32 @f1(i64 %x, i8 %y) {
- %a = shl i64 %x, i8 %y
+define i32 @f1(i64 %x, i64 %y) {
+ %a = shl i64 %x, %y
%b = trunc i64 %a to i32
ret i32 %b
}
-define i32 @f2(i64 %x, i8 %y) {
- %a = ashr i64 %x, i8 %y
+define i32 @f2(i64 %x, i64 %y) {
+ %a = ashr i64 %x, %y
%b = trunc i64 %a to i32
ret i32 %b
}
-define i32 @f3(i64 %x, i8 %y) {
- %a = lshr i64 %x, i8 %y
+define i32 @f3(i64 %x, i64 %y) {
+ %a = lshr i64 %x, %y
%b = trunc i64 %a to i32
ret i32 %b
}
Index: llvm/test/CodeGen/ARM/sxt_rot.ll
diff -u llvm/test/CodeGen/ARM/sxt_rot.ll:1.2 llvm/test/CodeGen/ARM/sxt_rot.ll:1.3
--- llvm/test/CodeGen/ARM/sxt_rot.ll:1.2 Fri Jan 26 02:25:05 2007
+++ llvm/test/CodeGen/ARM/sxt_rot.ll Thu Feb 1 20:16:22 2007
@@ -4,16 +4,16 @@
; RUN: llvm-as < %s | llc -march=arm -mattr=+v6 | grep "sxtab" | wc -l | grep 1
define i8 @test1(i32 %A) sext {
- %B = lshr i32 %A, i8 8
- %C = shl i32 %A, i8 24
+ %B = lshr i32 %A, 8
+ %C = shl i32 %A, 24
%D = or i32 %B, %C
%E = trunc i32 %D to i8
ret i8 %E
}
define i32 @test2(i32 %A, i32 %X) sext {
- %B = lshr i32 %A, i8 8
- %C = shl i32 %A, i8 24
+ %B = lshr i32 %A, 8
+ %C = shl i32 %A, 24
%D = or i32 %B, %C
%E = trunc i32 %D to i8
%F = sext i8 %E to i32
Index: llvm/test/CodeGen/ARM/uxt_rot.ll
diff -u llvm/test/CodeGen/ARM/uxt_rot.ll:1.2 llvm/test/CodeGen/ARM/uxt_rot.ll:1.3
--- llvm/test/CodeGen/ARM/uxt_rot.ll:1.2 Fri Jan 26 02:25:05 2007
+++ llvm/test/CodeGen/ARM/uxt_rot.ll Thu Feb 1 20:16:22 2007
@@ -17,8 +17,8 @@
}
define i32 @test3(i32 %A.u) zext {
- %B.u = lshr i32 %A.u, i8 8
- %C.u = shl i32 %A.u, i8 24
+ %B.u = lshr i32 %A.u, 8
+ %C.u = shl i32 %A.u, 24
%D.u = or i32 %B.u, %C.u
%E.u = trunc i32 %D.u to i16
%F.u = zext i16 %E.u to i32
More information about the llvm-commits
mailing list