[llvm] r314599 - [X86][SSE] Fold (VSRAI (VSHLI X, C1), C1) --> X iff NumSignBits(X) > C1
Simon Pilgrim via llvm-commits
llvm-commits at lists.llvm.org
Sat Sep 30 10:57:34 PDT 2017
Author: rksimon
Date: Sat Sep 30 10:57:34 2017
New Revision: 314599
URL: http://llvm.org/viewvc/llvm-project?rev=314599&view=rev
Log:
[X86][SSE] Fold (VSRAI (VSHLI X, C1), C1) --> X iff NumSignBits(X) > C1
Remove sign extend in register style pattern if the sign is already extended enough
Modified:
llvm/trunk/lib/Target/X86/X86ISelLowering.cpp
llvm/trunk/test/CodeGen/X86/vector-trunc.ll
Modified: llvm/trunk/lib/Target/X86/X86ISelLowering.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/X86/X86ISelLowering.cpp?rev=314599&r1=314598&r2=314599&view=diff
==============================================================================
--- llvm/trunk/lib/Target/X86/X86ISelLowering.cpp (original)
+++ llvm/trunk/lib/Target/X86/X86ISelLowering.cpp Sat Sep 30 10:57:34 2017
@@ -31845,6 +31845,15 @@ static SDValue combineVectorShiftImm(SDN
N0.getOpcode() == X86ISD::VSRAI)
return DAG.getNode(X86ISD::VSRLI, SDLoc(N), VT, N0.getOperand(0), N1);
+ // fold (VSRAI (VSHLI X, C1), C1) --> X iff NumSignBits(X) > C1
+ if (Opcode == X86ISD::VSRAI && N0.getOpcode() == X86ISD::VSHLI &&
+ N1 == N0.getOperand(1)) {
+ SDValue N00 = N0.getOperand(0);
+ unsigned NumSignBits = DAG.ComputeNumSignBits(N00);
+ if (ShiftVal.ult(NumSignBits))
+ return N00;
+ }
+
// We can decode 'whole byte' logical bit shifts as shuffles.
if (LogicalShift && (ShiftVal.getZExtValue() % 8) == 0) {
SDValue Op(N, 0);
Modified: llvm/trunk/test/CodeGen/X86/vector-trunc.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/vector-trunc.ll?rev=314599&r1=314598&r2=314599&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/vector-trunc.ll (original)
+++ llvm/trunk/test/CodeGen/X86/vector-trunc.ll Sat Sep 30 10:57:34 2017
@@ -389,11 +389,7 @@ entry:
define <8 x i16> @trunc8i32_8i16_ashr(<8 x i32> %a) {
; SSE2-LABEL: trunc8i32_8i16_ashr:
; SSE2: # BB#0: # %entry
-; SSE2-NEXT: psrad $16, %xmm0
-; SSE2-NEXT: psrad $16, %xmm1
-; SSE2-NEXT: pslld $16, %xmm1
; SSE2-NEXT: psrad $16, %xmm1
-; SSE2-NEXT: pslld $16, %xmm0
; SSE2-NEXT: psrad $16, %xmm0
; SSE2-NEXT: packssdw %xmm1, %xmm0
; SSE2-NEXT: retq
@@ -727,40 +723,24 @@ entry:
define void @trunc16i32_16i16_ashr(<16 x i32> %a) {
; SSE2-LABEL: trunc16i32_16i16_ashr:
; SSE2: # BB#0: # %entry
-; SSE2-NEXT: psrad $16, %xmm2
; SSE2-NEXT: psrad $16, %xmm3
-; SSE2-NEXT: psrad $16, %xmm0
-; SSE2-NEXT: psrad $16, %xmm1
-; SSE2-NEXT: pslld $16, %xmm1
+; SSE2-NEXT: psrad $16, %xmm2
+; SSE2-NEXT: packssdw %xmm3, %xmm2
; SSE2-NEXT: psrad $16, %xmm1
-; SSE2-NEXT: pslld $16, %xmm0
; SSE2-NEXT: psrad $16, %xmm0
; SSE2-NEXT: packssdw %xmm1, %xmm0
-; SSE2-NEXT: pslld $16, %xmm3
-; SSE2-NEXT: psrad $16, %xmm3
-; SSE2-NEXT: pslld $16, %xmm2
-; SSE2-NEXT: psrad $16, %xmm2
-; SSE2-NEXT: packssdw %xmm3, %xmm2
; SSE2-NEXT: movdqu %xmm2, (%rax)
; SSE2-NEXT: movdqu %xmm0, (%rax)
; SSE2-NEXT: retq
;
; SSSE3-LABEL: trunc16i32_16i16_ashr:
; SSSE3: # BB#0: # %entry
-; SSSE3-NEXT: psrad $16, %xmm2
; SSSE3-NEXT: psrad $16, %xmm3
-; SSSE3-NEXT: psrad $16, %xmm0
-; SSSE3-NEXT: psrad $16, %xmm1
-; SSSE3-NEXT: pslld $16, %xmm1
+; SSSE3-NEXT: psrad $16, %xmm2
+; SSSE3-NEXT: packssdw %xmm3, %xmm2
; SSSE3-NEXT: psrad $16, %xmm1
-; SSSE3-NEXT: pslld $16, %xmm0
; SSSE3-NEXT: psrad $16, %xmm0
; SSSE3-NEXT: packssdw %xmm1, %xmm0
-; SSSE3-NEXT: pslld $16, %xmm3
-; SSSE3-NEXT: psrad $16, %xmm3
-; SSSE3-NEXT: pslld $16, %xmm2
-; SSSE3-NEXT: psrad $16, %xmm2
-; SSSE3-NEXT: packssdw %xmm3, %xmm2
; SSSE3-NEXT: movdqu %xmm2, (%rax)
; SSSE3-NEXT: movdqu %xmm0, (%rax)
; SSSE3-NEXT: retq
More information about the llvm-commits
mailing list