[llvm-commits] CVS: llvm/test/CodeGen/PowerPC/and-elim.ll and_sext.ll rlwinm2.ll rotl.ll

Reid Spencer reid at x10sys.com
Thu Feb 1 18:17:36 PST 2007



Changes in directory llvm/test/CodeGen/PowerPC:

and-elim.ll updated: 1.6 -> 1.7
and_sext.ll updated: 1.7 -> 1.8
rlwinm2.ll updated: 1.2 -> 1.3
rotl.ll updated: 1.5 -> 1.6
---
Log message:

Changes to support making the shift instructions be true BinaryOperators.
This feature is needed in order to support shifts of more than 255 bits
on large integer types.  This changes the syntax for llvm assembly to 
make shl, ashr and lshr instructions look like a binary operator:
   shl i32 %X, 1
instead of
   shl i32 %X, i8 1
Additionally, this should help a few passes perform additional optimizations.


---
Diffs of the changes:  (+50 -68)

 and-elim.ll |    4 +--
 and_sext.ll |    4 +--
 rlwinm2.ll  |   43 +++++++++++++++++---------------------
 rotl.ll     |   67 +++++++++++++++++++++++-------------------------------------
 4 files changed, 50 insertions(+), 68 deletions(-)


Index: llvm/test/CodeGen/PowerPC/and-elim.ll
diff -u llvm/test/CodeGen/PowerPC/and-elim.ll:1.6 llvm/test/CodeGen/PowerPC/and-elim.ll:1.7
--- llvm/test/CodeGen/PowerPC/and-elim.ll:1.6	Fri Jan 26 02:25:06 2007
+++ llvm/test/CodeGen/PowerPC/and-elim.ll	Thu Feb  1 20:16:22 2007
@@ -3,7 +3,7 @@
 
 define void @test(i8* %P) {
 	%W = load i8* %P
-	%X = shl i8 %W, i8 1
+	%X = shl i8 %W, 1
 	%Y = add i8 %X, 2
 	%Z = and i8 %Y, 254        ; dead and
 	store i8 %Z, i8* %P
@@ -12,7 +12,7 @@
 
 define i16 @test2(i16 zext %crc) zext { 
         ; No and's should be needed for the i16s here.
-        %tmp.1 = lshr i16 %crc, i8 1
+        %tmp.1 = lshr i16 %crc, 1
         %tmp.7 = xor i16 %tmp.1, 40961
         ret i16 %tmp.7
 }


Index: llvm/test/CodeGen/PowerPC/and_sext.ll
diff -u llvm/test/CodeGen/PowerPC/and_sext.ll:1.7 llvm/test/CodeGen/PowerPC/and_sext.ll:1.8
--- llvm/test/CodeGen/PowerPC/and_sext.ll:1.7	Tue Jan 30 10:16:01 2007
+++ llvm/test/CodeGen/PowerPC/and_sext.ll	Thu Feb  1 20:16:22 2007
@@ -14,7 +14,7 @@
         %tmp = sext i16 %X to i32
         %tmp1 = sext i16 %x to i32
         %tmp2 = add i32 %tmp, %tmp1
-        %tmp4 = ashr i32 %tmp2, i8 1
+        %tmp4 = ashr i32 %tmp2, 1
         %tmp5 = trunc i32 %tmp4 to i16
         %tmp45 = sext i16 %tmp5 to i32
         %retval = trunc i32 %tmp45 to i16
@@ -22,7 +22,7 @@
 }
 
 define i16 @test3(i32 zext %X) sext {
-        %tmp1 = lshr i32 %X, i8 16
+        %tmp1 = lshr i32 %X, 16
         %tmp2 = trunc i32 %tmp1 to i16
         ret i16 %tmp2
 }


Index: llvm/test/CodeGen/PowerPC/rlwinm2.ll
diff -u llvm/test/CodeGen/PowerPC/rlwinm2.ll:1.2 llvm/test/CodeGen/PowerPC/rlwinm2.ll:1.3
--- llvm/test/CodeGen/PowerPC/rlwinm2.ll:1.2	Fri Dec  1 22:23:08 2006
+++ llvm/test/CodeGen/PowerPC/rlwinm2.ll	Thu Feb  1 20:16:22 2007
@@ -1,30 +1,27 @@
 ; All of these ands and shifts should be folded into rlw[i]nm instructions
-; RUN: llvm-upgrade < %s | llvm-as | llc -march=ppc32 | not grep and && 
-; RUN: llvm-upgrade < %s | llvm-as | llc -march=ppc32 | not grep srawi && 
-; RUN: llvm-upgrade < %s | llvm-as | llc -march=ppc32 | not grep srwi && 
-; RUN: llvm-upgrade < %s | llvm-as | llc -march=ppc32 | not grep slwi && 
-; RUN: llvm-upgrade < %s | llvm-as | llc -march=ppc32 | grep rlwnm | wc -l | grep 1 &&
-; RUN: llvm-upgrade < %s | llvm-as | llc -march=ppc32 | grep rlwinm | wc -l | grep 1
+; RUN: llvm-as < %s | llc -march=ppc32 | not grep and && 
+; RUN: llvm-as < %s | llc -march=ppc32 | not grep srawi && 
+; RUN: llvm-as < %s | llc -march=ppc32 | not grep srwi && 
+; RUN: llvm-as < %s | llc -march=ppc32 | not grep slwi && 
+; RUN: llvm-as < %s | llc -march=ppc32 | grep rlwnm | wc -l | grep 1 &&
+; RUN: llvm-as < %s | llc -march=ppc32 | grep rlwinm | wc -l | grep 1
 
-
-implementation   ; Functions:
-
-uint %test1(uint %X, int %Y) {
+define i32 @test1(i32 %X, i32 %Y) {
 entry:
-	%tmp = cast int %Y to ubyte		; <ubyte> [#uses=2]
-	%tmp1 = shl uint %X, ubyte %tmp		; <uint> [#uses=1]
-	%tmp2 = sub ubyte 32, %tmp		; <ubyte> [#uses=1]
-	%tmp3 = shr uint %X, ubyte %tmp2		; <uint> [#uses=1]
-	%tmp4 = or uint %tmp1, %tmp3		; <uint> [#uses=1]
-	%tmp6 = and uint %tmp4, 127		; <uint> [#uses=1]
-	ret uint %tmp6
+	%tmp = trunc i32 %Y to i8		; <i8> [#uses=2]
+	%tmp1 = shl i32 %X, %Y		; <i32> [#uses=1]
+	%tmp2 = sub i32 32, %Y		; <i8> [#uses=1]
+	%tmp3 = lshr i32 %X, %tmp2		; <i32> [#uses=1]
+	%tmp4 = or i32 %tmp1, %tmp3		; <i32> [#uses=1]
+	%tmp6 = and i32 %tmp4, 127		; <i32> [#uses=1]
+	ret i32 %tmp6
 }
 
-uint %test2(uint %X) {
+define i32 @test2(i32 %X) {
 entry:
-	%tmp1 = shr uint %X, ubyte 27		; <uint> [#uses=1]
-	%tmp2 = shl uint %X, ubyte 5		; <uint> [#uses=1]
-	%tmp2.masked = and uint %tmp2, 96		; <uint> [#uses=1]
-	%tmp5 = or uint %tmp1, %tmp2.masked		; <uint> [#uses=1]
-	ret uint %tmp5
+	%tmp1 = lshr i32 %X, 27		; <i32> [#uses=1]
+	%tmp2 = shl i32 %X, 5		; <i32> [#uses=1]
+	%tmp2.masked = and i32 %tmp2, 96		; <i32> [#uses=1]
+	%tmp5 = or i32 %tmp1, %tmp2.masked		; <i32> [#uses=1]
+	ret i32 %tmp5
 }


Index: llvm/test/CodeGen/PowerPC/rotl.ll
diff -u llvm/test/CodeGen/PowerPC/rotl.ll:1.5 llvm/test/CodeGen/PowerPC/rotl.ll:1.6
--- llvm/test/CodeGen/PowerPC/rotl.ll:1.5	Fri Jan  5 12:33:43 2007
+++ llvm/test/CodeGen/PowerPC/rotl.ll	Thu Feb  1 20:16:22 2007
@@ -1,53 +1,38 @@
-; RUN: llvm-upgrade < %s | llvm-as | llc -march=ppc32 | not grep or && 
-; RUN: llvm-upgrade < %s | llvm-as | llc -march=ppc32 | \
-; RUN:    grep rlwnm  | wc -l | grep 2 &&
-; RUN: llvm-upgrade < %s | llvm-as | llc -march=ppc32 | \
-; RUN:    grep rlwinm | wc -l | grep 2
+; RUN: llvm-as < %s | llc -march=ppc32 | not grep or && 
+; RUN: llvm-as < %s | llc -march=ppc32 | grep rlwnm  | wc -l | grep 2 &&
+; RUN: llvm-as < %s | llc -march=ppc32 | grep rlwinm | wc -l | grep 2
 
-implementation   ; Functions:
-
-int %rotlw(uint %x, int %sh) {
+define i32 @rotlw(i32 %x, i32 %sh) {
 entry:
-	%tmp.3 = cast int %sh to ubyte		; <ubyte> [#uses=1]
-	%x = cast uint %x to int		; <int> [#uses=1]
-	%tmp.7 = sub int 32, %sh		; <int> [#uses=1]
-	%tmp.9 = cast int %tmp.7 to ubyte		; <ubyte> [#uses=1]
-	%tmp.10 = shr uint %x, ubyte %tmp.9		; <uint> [#uses=1]
-	%tmp.4 = shl int %x, ubyte %tmp.3		; <int> [#uses=1]
-	%tmp.10 = cast uint %tmp.10 to int		; <int> [#uses=1]
-	%tmp.12 = or int %tmp.10, %tmp.4		; <int> [#uses=1]
-	ret int %tmp.12
+	%tmp.7 = sub i32 32, %sh		; <i32> [#uses=1]
+	%tmp.10 = lshr i32 %x, %tmp.7		; <i32> [#uses=2]
+	%tmp.4 = shl i32 %x, %sh 		; <i32> [#uses=1]
+	%tmp.12 = or i32 %tmp.10, %tmp.4		; <i32> [#uses=1]
+	ret i32 %tmp.12
 }
 
-int %rotrw(uint %x, int %sh) {
+define i32 @rotrw(i32 %x, i32 %sh) {
 entry:
-	%tmp.3 = cast int %sh to ubyte		; <ubyte> [#uses=1]
-	%tmp.4 = shr uint %x, ubyte %tmp.3		; <uint> [#uses=1]
-	%tmp.7 = sub int 32, %sh		; <int> [#uses=1]
-	%tmp.9 = cast int %tmp.7 to ubyte		; <ubyte> [#uses=1]
-	%x = cast uint %x to int		; <int> [#uses=1]
-	%tmp.4 = cast uint %tmp.4 to int		; <int> [#uses=1]
-	%tmp.10 = shl int %x, ubyte %tmp.9		; <int> [#uses=1]
-	%tmp.12 = or int %tmp.4, %tmp.10		; <int> [#uses=1]
-	ret int %tmp.12
+	%tmp.3 = trunc i32 %sh to i8		; <i8> [#uses=1]
+	%tmp.4 = lshr i32 %x, %sh		; <i32> [#uses=2]
+	%tmp.7 = sub i32 32, %sh		; <i32> [#uses=1]
+	%tmp.10 = shl i32 %x, %tmp.7    	; <i32> [#uses=1]
+	%tmp.12 = or i32 %tmp.4, %tmp.10		; <i32> [#uses=1]
+	ret i32 %tmp.12
 }
 
-int %rotlwi(uint %x) {
+define i32 @rotlwi(i32 %x) {
 entry:
-	%x = cast uint %x to int		; <int> [#uses=1]
-	%tmp.7 = shr uint %x, ubyte 27		; <uint> [#uses=1]
-	%tmp.3 = shl int %x, ubyte 5		; <int> [#uses=1]
-	%tmp.7 = cast uint %tmp.7 to int		; <int> [#uses=1]
-	%tmp.9 = or int %tmp.3, %tmp.7		; <int> [#uses=1]
-	ret int %tmp.9
+	%tmp.7 = lshr i32 %x, 27		; <i32> [#uses=2]
+	%tmp.3 = shl i32 %x, 5		; <i32> [#uses=1]
+	%tmp.9 = or i32 %tmp.3, %tmp.7		; <i32> [#uses=1]
+	ret i32 %tmp.9
 }
 
-int %rotrwi(uint %x) {
+define i32 @rotrwi(i32 %x) {
 entry:
-	%tmp.3 = shr uint %x, ubyte 5		; <uint> [#uses=1]
-	%x = cast uint %x to int		; <int> [#uses=1]
-	%tmp.3 = cast uint %tmp.3 to int		; <int> [#uses=1]
-	%tmp.7 = shl int %x, ubyte 27		; <int> [#uses=1]
-	%tmp.9 = or int %tmp.3, %tmp.7		; <int> [#uses=1]
-	ret int %tmp.9
+	%tmp.3 = lshr i32 %x, 5		; <i32> [#uses=2]
+	%tmp.7 = shl i32 %x, 27		; <i32> [#uses=1]
+	%tmp.9 = or i32 %tmp.3, %tmp.7		; <i32> [#uses=1]
+	ret i32 %tmp.9
 }






More information about the llvm-commits mailing list