[llvm-commits] [llvm] r101350 - in /llvm/trunk: lib/CodeGen/SelectionDAG/DAGCombiner.cpp test/CodeGen/X86/store-narrow.ll

Chris Lattner sabre at nondot.org
Wed Apr 14 22:40:59 PDT 2010


Author: lattner
Date: Thu Apr 15 00:40:59 2010
New Revision: 101350

URL: http://llvm.org/viewvc/llvm-project?rev=101350&view=rev
Log:
teach codegen to turn trunc(zextload) into load when possible.
This doesn't occur much at all, it only seems to formed in the case
when the trunc optimization kicks in due to phase ordering.  In that
case it is saves a few bytes on x86-32.

Modified:
    llvm/trunk/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
    llvm/trunk/test/CodeGen/X86/store-narrow.ll

Modified: llvm/trunk/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/CodeGen/SelectionDAG/DAGCombiner.cpp?rev=101350&r1=101349&r2=101350&view=diff
==============================================================================
--- llvm/trunk/lib/CodeGen/SelectionDAG/DAGCombiner.cpp (original)
+++ llvm/trunk/lib/CodeGen/SelectionDAG/DAGCombiner.cpp Thu Apr 15 00:40:59 2010
@@ -3637,7 +3637,7 @@
   // Do not generate loads of non-round integer types since these can
   // be expensive (and would be wrong if the type is not byte sized).
   if (isa<LoadSDNode>(N0) && N0.hasOneUse() && ExtVT.isRound() &&
-      cast<LoadSDNode>(N0)->getMemoryVT().getSizeInBits() > EVTBits &&
+      cast<LoadSDNode>(N0)->getMemoryVT().getSizeInBits() >= EVTBits &&
       // Do not change the width of a volatile load.
       !cast<LoadSDNode>(N0)->isVolatile()) {
     LoadSDNode *LN0 = cast<LoadSDNode>(N0);

Modified: llvm/trunk/test/CodeGen/X86/store-narrow.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/store-narrow.ll?rev=101350&r1=101349&r2=101350&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/store-narrow.ll (original)
+++ llvm/trunk/test/CodeGen/X86/store-narrow.ll Thu Apr 15 00:40:59 2010
@@ -1,5 +1,6 @@
 ; rdar://7860110
-; RUN: llc < %s | FileCheck %s
+; RUN: llc < %s | FileCheck %s -check-prefix=X64
+; RUN: llc -march=x86 < %s | FileCheck %s -check-prefix=X32
 target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64"
 target triple = "x86_64-apple-darwin10.2"
 
@@ -12,8 +13,12 @@
   store i32 %D, i32* %a0, align 4
   ret void
   
-; CHECK: test1:
-; CHECK: movb	%sil, (%rdi)
+; X64: test1:
+; X64: movb	%sil, (%rdi)
+
+; X32: test1:
+; X32: movb	8(%esp), %al
+; X32: movb	%al, (%{{.*}})
 }
 
 define void @test2(i32* nocapture %a0, i8 zeroext %a1) nounwind ssp {
@@ -25,8 +30,12 @@
   %D = or i32 %B, %CS
   store i32 %D, i32* %a0, align 4
   ret void
-; CHECK: test2:
-; CHECK: movb	%sil, 1(%rdi)
+; X64: test2:
+; X64: movb	%sil, 1(%rdi)
+
+; X32: test2:
+; X32: movb	8(%esp), %al
+; X32: movb	%al, 1(%{{.*}})
 }
 
 define void @test3(i32* nocapture %a0, i16 zeroext %a1) nounwind ssp {
@@ -37,8 +46,12 @@
   %D = or i32 %B, %C
   store i32 %D, i32* %a0, align 4
   ret void
-; CHECK: test3:
-; CHECK: movw	%si, (%rdi)
+; X64: test3:
+; X64: movw	%si, (%rdi)
+
+; X32: test3:
+; X32: movw	8(%esp), %ax
+; X32: movw	%ax, (%{{.*}})
 }
 
 define void @test4(i32* nocapture %a0, i16 zeroext %a1) nounwind ssp {
@@ -50,8 +63,12 @@
   %D = or i32 %B, %CS
   store i32 %D, i32* %a0, align 4
   ret void
-; CHECK: test4:
-; CHECK: movw	%si, 2(%rdi)
+; X64: test4:
+; X64: movw	%si, 2(%rdi)
+
+; X32: test4:
+; X32: movw	8(%esp), %ax
+; X32: movw	%ax, 2(%{{.*}})
 }
 
 define void @test5(i64* nocapture %a0, i16 zeroext %a1) nounwind ssp {
@@ -63,8 +80,12 @@
   %D = or i64 %B, %CS
   store i64 %D, i64* %a0, align 4
   ret void
-; CHECK: test5:
-; CHECK: movw	%si, 2(%rdi)
+; X64: test5:
+; X64: movw	%si, 2(%rdi)
+
+; X32: test5:
+; X32: movw	8(%esp), %ax
+; X32: movw	%ax, 2(%{{.*}})
 }
 
 define void @test6(i64* nocapture %a0, i8 zeroext %a1) nounwind ssp {
@@ -76,6 +97,11 @@
   %D = or i64 %B, %CS
   store i64 %D, i64* %a0, align 4
   ret void
-; CHECK: test6:
-; CHECK: movb	%sil, 5(%rdi)
+; X64: test6:
+; X64: movb	%sil, 5(%rdi)
+
+
+; X32: test6:
+; X32: movb	8(%esp), %al
+; X32: movb	%al, 5(%{{.*}})
 }





More information about the llvm-commits mailing list