[llvm-commits] [llvm] r115294 - in /llvm/trunk: lib/CodeGen/SelectionDAG/DAGCombiner.cpp test/CodeGen/X86/shift-folding.ll test/CodeGen/X86/store-narrow.ll
Chris Lattner
sabre at nondot.org
Thu Sep 30 22:36:09 PDT 2010
Author: lattner
Date: Fri Oct 1 00:36:09 2010
New Revision: 115294
URL: http://llvm.org/viewvc/llvm-project?rev=115294&view=rev
Log:
fix rdar://8494845 + PR8244 - a miscompile exposed by my patch in r101350
Modified:
llvm/trunk/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
llvm/trunk/test/CodeGen/X86/shift-folding.ll
llvm/trunk/test/CodeGen/X86/store-narrow.ll
Modified: llvm/trunk/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/CodeGen/SelectionDAG/DAGCombiner.cpp?rev=115294&r1=115293&r2=115294&view=diff
==============================================================================
--- llvm/trunk/lib/CodeGen/SelectionDAG/DAGCombiner.cpp (original)
+++ llvm/trunk/lib/CodeGen/SelectionDAG/DAGCombiner.cpp Fri Oct 1 00:36:09 2010
@@ -4087,6 +4087,15 @@
if ((N0.getValueType().getSizeInBits() & (EVTBits-1)) != 0)
return SDValue();
}
+
+ // If the shift amount is larger than the input type then we're not
+ // accessing any of the loaded bytes. If the load was a zextload/extload
+ // then the result of the shift+trunc is zero/undef (handled elsewhere).
+ // If the load was a sextload then the result is a splat of the sign bit
+ // of the extended byte. This is not worth optimizing for.
+ if (ShAmt >= VT.getSizeInBits())
+ return SDValue();
+
}
}
Modified: llvm/trunk/test/CodeGen/X86/shift-folding.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/shift-folding.ll?rev=115294&r1=115293&r2=115294&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/shift-folding.ll (original)
+++ llvm/trunk/test/CodeGen/X86/shift-folding.ll Fri Oct 1 00:36:09 2010
@@ -1,21 +1,21 @@
; RUN: llc < %s -march=x86 | \
; RUN: grep {s\[ah\]\[rl\]l} | count 1
-define i32* @test1(i32* %P, i32 %X) {
+define i32* @test1(i32* %P, i32 %X) nounwind {
%Y = lshr i32 %X, 2 ; <i32> [#uses=1]
%gep.upgrd.1 = zext i32 %Y to i64 ; <i64> [#uses=1]
%P2 = getelementptr i32* %P, i64 %gep.upgrd.1 ; <i32*> [#uses=1]
ret i32* %P2
}
-define i32* @test2(i32* %P, i32 %X) {
+define i32* @test2(i32* %P, i32 %X) nounwind {
%Y = shl i32 %X, 2 ; <i32> [#uses=1]
%gep.upgrd.2 = zext i32 %Y to i64 ; <i64> [#uses=1]
%P2 = getelementptr i32* %P, i64 %gep.upgrd.2 ; <i32*> [#uses=1]
ret i32* %P2
}
-define i32* @test3(i32* %P, i32 %X) {
+define i32* @test3(i32* %P, i32 %X) nounwind {
%Y = ashr i32 %X, 2 ; <i32> [#uses=1]
%P2 = getelementptr i32* %P, i32 %Y ; <i32*> [#uses=1]
ret i32* %P2
Modified: llvm/trunk/test/CodeGen/X86/store-narrow.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/store-narrow.ll?rev=115294&r1=115293&r2=115294&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/store-narrow.ll (original)
+++ llvm/trunk/test/CodeGen/X86/store-narrow.ll Fri Oct 1 00:36:09 2010
@@ -152,3 +152,17 @@
store i32 %or, i32* @g_16
ret void
}
+
+; rdar://8494845 + PR8244
+; X64: test10:
+; X64-NEXT: movsbl (%rdi), %eax
+; X64-NEXT: shrl $8, %eax
+; X64-NEXT: ret
+define i8 @test10(i8* %P) nounwind ssp {
+entry:
+ %tmp = load i8* %P, align 1
+ %conv = sext i8 %tmp to i32
+ %shr3 = lshr i32 %conv, 8
+ %conv2 = trunc i32 %shr3 to i8
+ ret i8 %conv2
+}
More information about the llvm-commits
mailing list