[llvm-commits] CVS: llvm/test/Regression/CodeGen/X86/commute-two-addr.ll overlap-add.ll overlap-shift.ll
Chris Lattner
lattner at cs.uiuc.edu
Sat Jan 1 18:30:17 PST 2005
Changes in directory llvm/test/Regression/CodeGen/X86:
commute-two-addr.ll added (r1.1)
overlap-add.ll added (r1.1)
overlap-shift.ll added (r1.1)
---
Log message:
Add several testcases for new optimizations in the code generator.
---
Diffs of the changes: (+62 -0)
Index: llvm/test/Regression/CodeGen/X86/commute-two-addr.ll
diff -c /dev/null llvm/test/Regression/CodeGen/X86/commute-two-addr.ll:1.1
*** /dev/null Sat Jan 1 20:30:15 2005
--- llvm/test/Regression/CodeGen/X86/commute-two-addr.ll Sat Jan 1 20:30:04 2005
***************
*** 0 ****
--- 1,21 ----
+ ; The register allocator can commute two-address instructions to avoid
+ ; insertion of register-register copies.
+
+ ; Check that there are no register-register copies left.
+ ; RUN: llvm-as < %s | llc -march=x86 -x86-asm-syntax=intel | not grep 'mov %E.X, %E.X'
+
+ %G = external global int
+
+ declare void %ext(int)
+
+ int %add_test(int %X, int %Y) {
+ %Z = add int %X, %Y ;; Last use of Y, but not of X.
+ store int %Z, int* %G
+ ret int %X
+ }
+
+ int %xor_test(int %X, int %Y) {
+ %Z = xor int %X, %Y ;; Last use of Y, but not of X.
+ store int %Z, int* %G
+ ret int %X
+ }
Index: llvm/test/Regression/CodeGen/X86/overlap-add.ll
diff -c /dev/null llvm/test/Regression/CodeGen/X86/overlap-add.ll:1.1
*** /dev/null Sat Jan 1 20:30:17 2005
--- llvm/test/Regression/CodeGen/X86/overlap-add.ll Sat Jan 1 20:30:04 2005
***************
*** 0 ****
--- 1,24 ----
+ ;; X's live range extends beyond the shift, so the register allocator
+ ;; cannot coallesce it with Y. Because of this, a copy needs to be
+ ;; emitted before the shift to save the register value before it is
+ ;; clobbered. However, this copy is not needed if the register
+ ;; allocator turns the shift into an LEA. This also occurs for ADD.
+
+ ; Check that the shift gets turned into an LEA.
+
+ ; RUN: llvm-as < %s | llc -march=x86 -x86-asm-syntax=intel | not grep 'mov %E.X, %E.X'
+
+ %G = external global int
+
+ int %test1(int %X, int %Y) {
+ %Z = add int %X, %Y
+ volatile store int %Y, int* %G
+ volatile store int %Z, int* %G
+ ret int %X
+ }
+
+ int %test2(int %X) {
+ %Z = add int %X, 1 ;; inc
+ volatile store int %Z, int* %G
+ ret int %X
+ }
Index: llvm/test/Regression/CodeGen/X86/overlap-shift.ll
diff -c /dev/null llvm/test/Regression/CodeGen/X86/overlap-shift.ll:1.1
*** /dev/null Sat Jan 1 20:30:17 2005
--- llvm/test/Regression/CodeGen/X86/overlap-shift.ll Sat Jan 1 20:30:04 2005
***************
*** 0 ****
--- 1,17 ----
+ ;; X's live range extends beyond the shift, so the register allocator
+ ;; cannot coallesce it with Y. Because of this, a copy needs to be
+ ;; emitted before the shift to save the register value before it is
+ ;; clobbered. However, this copy is not needed if the register
+ ;; allocator turns the shift into an LEA. This also occurs for ADD.
+
+ ; Check that the shift gets turned into an LEA.
+
+ ; RUN: llvm-as < %s | llc -march=x86 -x86-asm-syntax=intel | not grep 'mov %E.X, %E.X'
+
+ %G = external global int
+
+ int %test1(int %X) {
+ %Z = shl int %X, ubyte 2
+ volatile store int %Z, int* %G
+ ret int %X
+ }
More information about the llvm-commits
mailing list