[llvm-commits] [llvm] r127031 - in /llvm/trunk: lib/Analysis/ScalarEvolutionExpander.cpp test/CodeGen/X86/lsr-redundant-addressing.ll

Dan Gohman gohman at apple.com
Fri Mar 4 12:46:46 PST 2011


Author: djg
Date: Fri Mar  4 14:46:46 2011
New Revision: 127031

URL: http://llvm.org/viewvc/llvm-project?rev=127031&view=rev
Log:
When decling to reuse existing expressions that involve casts, ignore
bitcasts, which are really no-ops here. This fixes slowdowns on
MultiSource/Applications/aha and others.

Added:
    llvm/trunk/test/CodeGen/X86/lsr-redundant-addressing.ll
Modified:
    llvm/trunk/lib/Analysis/ScalarEvolutionExpander.cpp

Modified: llvm/trunk/lib/Analysis/ScalarEvolutionExpander.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Analysis/ScalarEvolutionExpander.cpp?rev=127031&r1=127030&r2=127031&view=diff
==============================================================================
--- llvm/trunk/lib/Analysis/ScalarEvolutionExpander.cpp (original)
+++ llvm/trunk/lib/Analysis/ScalarEvolutionExpander.cpp Fri Mar  4 14:46:46 2011
@@ -859,7 +859,7 @@
         // to be.
         do {
           if (IncV->getNumOperands() == 0 || isa<PHINode>(IncV) ||
-              isa<CastInst>(IncV)) {
+              (isa<CastInst>(IncV) && !isa<BitCastInst>(IncV))) {
             IncV = 0;
             break;
           }

Added: llvm/trunk/test/CodeGen/X86/lsr-redundant-addressing.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/lsr-redundant-addressing.ll?rev=127031&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/X86/lsr-redundant-addressing.ll (added)
+++ llvm/trunk/test/CodeGen/X86/lsr-redundant-addressing.ll Fri Mar  4 14:46:46 2011
@@ -0,0 +1,45 @@
+; RUN: llc -march=x86-64 < %s | fgrep {addq	$-16,} | count 1
+; rdar://9081094
+
+; LSR shouldn't create lots of redundant address computations.
+
+%0 = type { i32, [3 x i32] }
+%1 = type { i32 (i32, i32, i32)*, i32, i32, [3 x i32], i8*, i8*, i8* }
+
+ at pgm = external hidden unnamed_addr global [5 x %0], align 32
+ at isa = external hidden unnamed_addr constant [13 x %1], align 32
+
+define void @main_bb.i() nounwind {
+bb:
+  br label %bb38
+
+bb38:                                             ; preds = %bb200, %bb
+  %tmp39 = phi i64 [ %tmp201, %bb200 ], [ 0, %bb ]
+  %tmp40 = sub i64 0, %tmp39
+  %tmp47 = getelementptr [5 x %0]* @pgm, i64 0, i64 %tmp40, i32 0
+  %tmp34 = load i32* %tmp47, align 16
+  %tmp203 = icmp slt i32 %tmp34, 12
+  br i1 %tmp203, label %bb215, label %bb200
+
+bb200:                                            ; preds = %bb38
+  %tmp201 = add i64 %tmp39, 1
+  br label %bb38
+
+bb215:                                            ; preds = %bb38
+  %tmp50 = getelementptr [5 x %0]* @pgm, i64 0, i64 %tmp40, i32 1, i64 2
+  %tmp49 = getelementptr [5 x %0]* @pgm, i64 0, i64 %tmp40, i32 1, i64 1
+  %tmp48 = getelementptr [5 x %0]* @pgm, i64 0, i64 %tmp40, i32 1, i64 0
+  %tmp216 = add nsw i32 %tmp34, 1
+  store i32 %tmp216, i32* %tmp47, align 16
+  %tmp217 = sext i32 %tmp216 to i64
+  %tmp218 = getelementptr inbounds [13 x %1]* @isa, i64 0, i64 %tmp217, i32 3, i64 0
+  %tmp219 = load i32* %tmp218, align 8
+  store i32 %tmp219, i32* %tmp48, align 4
+  %tmp220 = getelementptr inbounds [13 x %1]* @isa, i64 0, i64 %tmp217, i32 3, i64 1
+  %tmp221 = load i32* %tmp220, align 4
+  store i32 %tmp221, i32* %tmp49, align 4
+  %tmp222 = getelementptr inbounds [13 x %1]* @isa, i64 0, i64 %tmp217, i32 3, i64 2
+  %tmp223 = load i32* %tmp222, align 8
+  store i32 %tmp223, i32* %tmp50, align 4
+  ret void
+}





More information about the llvm-commits mailing list