[llvm] r334023 - [X86][SSE] Use multiplication scale factors for v8i16 SHL on pre-AVX2 targets.
Simon Pilgrim via llvm-commits
llvm-commits at lists.llvm.org
Tue Jun 5 08:17:39 PDT 2018
Author: rksimon
Date: Tue Jun 5 08:17:39 2018
New Revision: 334023
URL: http://llvm.org/viewvc/llvm-project?rev=334023&view=rev
Log:
[X86][SSE] Use multiplication scale factors for v8i16 SHL on pre-AVX2 targets.
Similar to v4i32 SHL, convert v8i16 shift amounts to scale factors instead to improve performance and reduce instruction count. We were already doing this for constant shifts, this adds variable shift support.
Reduces the serial nature of the codegen, which relies on chains of plendvb/pand+pandn+por shifts.
This is a step towards adding support for vXi16 vector rotates.
Differential Revision: https://reviews.llvm.org/D47546
Modified:
llvm/trunk/lib/Target/X86/X86ISelLowering.cpp
llvm/trunk/test/Analysis/CostModel/X86/testshiftshl.ll
llvm/trunk/test/CodeGen/X86/vector-rotate-128.ll
llvm/trunk/test/CodeGen/X86/vector-rotate-256.ll
llvm/trunk/test/CodeGen/X86/vector-shift-shl-128.ll
llvm/trunk/test/CodeGen/X86/vector-shift-shl-256.ll
Modified: llvm/trunk/lib/Target/X86/X86ISelLowering.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/X86/X86ISelLowering.cpp?rev=334023&r1=334022&r2=334023&view=diff
==============================================================================
--- llvm/trunk/lib/Target/X86/X86ISelLowering.cpp (original)
+++ llvm/trunk/lib/Target/X86/X86ISelLowering.cpp Tue Jun 5 08:17:39 2018
@@ -23240,10 +23240,11 @@ static SDValue convertShiftLeftToScale(S
const X86Subtarget &Subtarget,
SelectionDAG &DAG) {
MVT VT = Amt.getSimpleValueType();
- bool ConstantAmt = ISD::isBuildVectorOfConstantSDNodes(Amt.getNode());
+ if (!(VT == MVT::v8i16 || VT == MVT::v4i32 ||
+ (Subtarget.hasInt256() && VT == MVT::v16i16)))
+ return SDValue();
- if (ConstantAmt && (VT == MVT::v8i16 || VT == MVT::v4i32 ||
- (Subtarget.hasInt256() && VT == MVT::v16i16))) {
+ if (ISD::isBuildVectorOfConstantSDNodes(Amt.getNode())) {
SmallVector<SDValue, 8> Elts;
MVT SVT = VT.getVectorElementType();
unsigned SVTBits = SVT.getSizeInBits();
@@ -23269,6 +23270,8 @@ static SDValue convertShiftLeftToScale(S
return DAG.getBuildVector(VT, dl, Elts);
}
+ // If the target doesn't support variable shifts, use either FP conversion
+ // or integer multiplication to avoid shifting each element individually.
if (VT == MVT::v4i32) {
Amt = DAG.getNode(ISD::SHL, dl, VT, Amt, DAG.getConstant(23, dl, VT));
Amt = DAG.getNode(ISD::ADD, dl, VT, Amt,
@@ -23277,6 +23280,21 @@ static SDValue convertShiftLeftToScale(S
return DAG.getNode(ISD::FP_TO_SINT, dl, VT, Amt);
}
+ // AVX2 can more effectively perform this as a zext/trunc to/from v8i32.
+ if (VT == MVT::v8i16 && !Subtarget.hasAVX2()) {
+ SDValue Z = getZeroVector(VT, Subtarget, DAG, dl);
+ SDValue Lo = DAG.getBitcast(MVT::v4i32, getUnpackl(DAG, dl, VT, Amt, Z));
+ SDValue Hi = DAG.getBitcast(MVT::v4i32, getUnpackh(DAG, dl, VT, Amt, Z));
+ Lo = convertShiftLeftToScale(Lo, dl, Subtarget, DAG);
+ Hi = convertShiftLeftToScale(Hi, dl, Subtarget, DAG);
+ if (Subtarget.hasSSE41())
+ return DAG.getNode(X86ISD::PACKUS, dl, VT, Lo, Hi);
+
+ return DAG.getVectorShuffle(VT, dl, DAG.getBitcast(VT, Lo),
+ DAG.getBitcast(VT, Hi),
+ {0, 2, 4, 6, 8, 10, 12, 14});
+ }
+
return SDValue();
}
Modified: llvm/trunk/test/Analysis/CostModel/X86/testshiftshl.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Analysis/CostModel/X86/testshiftshl.ll?rev=334023&r1=334022&r2=334023&view=diff
==============================================================================
--- llvm/trunk/test/Analysis/CostModel/X86/testshiftshl.ll (original)
+++ llvm/trunk/test/Analysis/CostModel/X86/testshiftshl.ll Tue Jun 5 08:17:39 2018
@@ -31,7 +31,7 @@ entry:
; SSE2: shift8i16
; SSE2: cost of 32 {{.*}} shl
; SSE2-CODEGEN: shift8i16
- ; SSE2-CODEGEN: psllw
+ ; SSE2-CODEGEN: pmullw
%0 = shl %shifttype8i16 %a , %b
ret %shifttype8i16 %0
@@ -43,7 +43,7 @@ entry:
; SSE2: shift16i16
; SSE2: cost of 64 {{.*}} shl
; SSE2-CODEGEN: shift16i16
- ; SSE2-CODEGEN: psllw
+ ; SSE2-CODEGEN: pmullw
%0 = shl %shifttype16i16 %a , %b
ret %shifttype16i16 %0
@@ -55,7 +55,7 @@ entry:
; SSE2: shift32i16
; SSE2: cost of 128 {{.*}} shl
; SSE2-CODEGEN: shift32i16
- ; SSE2-CODEGEN: psllw
+ ; SSE2-CODEGEN: pmullw
%0 = shl %shifttype32i16 %a , %b
ret %shifttype32i16 %0
@@ -211,7 +211,7 @@ entry:
; SSE2: shift8i8
; SSE2: cost of 32 {{.*}} shl
; SSE2-CODEGEN: shift8i8
- ; SSE2-CODEGEN: psllw
+ ; SSE2-CODEGEN: pmullw
%0 = shl %shifttype8i8 %a , %b
ret %shifttype8i8 %0
Modified: llvm/trunk/test/CodeGen/X86/vector-rotate-128.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/vector-rotate-128.ll?rev=334023&r1=334022&r2=334023&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/vector-rotate-128.ll (original)
+++ llvm/trunk/test/CodeGen/X86/vector-rotate-128.ll Tue Jun 5 08:17:39 2018
@@ -225,149 +225,121 @@ define <4 x i32> @var_rotate_v4i32(<4 x
define <8 x i16> @var_rotate_v8i16(<8 x i16> %a, <8 x i16> %b) nounwind {
; SSE2-LABEL: var_rotate_v8i16:
; SSE2: # %bb.0:
-; SSE2-NEXT: movdqa {{.*#+}} xmm3 = [16,16,16,16,16,16,16,16]
-; SSE2-NEXT: psubw %xmm1, %xmm3
-; SSE2-NEXT: psllw $12, %xmm1
-; SSE2-NEXT: movdqa %xmm1, %xmm2
-; SSE2-NEXT: psraw $15, %xmm2
-; SSE2-NEXT: movdqa %xmm0, %xmm4
-; SSE2-NEXT: psllw $8, %xmm4
-; SSE2-NEXT: pand %xmm2, %xmm4
-; SSE2-NEXT: pandn %xmm0, %xmm2
-; SSE2-NEXT: por %xmm4, %xmm2
-; SSE2-NEXT: paddw %xmm1, %xmm1
-; SSE2-NEXT: movdqa %xmm1, %xmm4
-; SSE2-NEXT: psraw $15, %xmm4
-; SSE2-NEXT: movdqa %xmm4, %xmm5
-; SSE2-NEXT: pandn %xmm2, %xmm5
-; SSE2-NEXT: psllw $4, %xmm2
-; SSE2-NEXT: pand %xmm4, %xmm2
-; SSE2-NEXT: por %xmm5, %xmm2
-; SSE2-NEXT: paddw %xmm1, %xmm1
-; SSE2-NEXT: movdqa %xmm1, %xmm4
-; SSE2-NEXT: psraw $15, %xmm4
-; SSE2-NEXT: movdqa %xmm4, %xmm5
-; SSE2-NEXT: pandn %xmm2, %xmm5
-; SSE2-NEXT: psllw $2, %xmm2
-; SSE2-NEXT: pand %xmm4, %xmm2
-; SSE2-NEXT: por %xmm5, %xmm2
-; SSE2-NEXT: paddw %xmm1, %xmm1
-; SSE2-NEXT: psraw $15, %xmm1
+; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [16,16,16,16,16,16,16,16]
+; SSE2-NEXT: psubw %xmm1, %xmm2
+; SSE2-NEXT: pxor %xmm3, %xmm3
; SSE2-NEXT: movdqa %xmm1, %xmm4
-; SSE2-NEXT: pandn %xmm2, %xmm4
-; SSE2-NEXT: psllw $1, %xmm2
-; SSE2-NEXT: pand %xmm1, %xmm2
-; SSE2-NEXT: psllw $12, %xmm3
-; SSE2-NEXT: movdqa %xmm3, %xmm1
-; SSE2-NEXT: psraw $15, %xmm1
-; SSE2-NEXT: movdqa %xmm1, %xmm5
-; SSE2-NEXT: pandn %xmm0, %xmm5
+; SSE2-NEXT: punpckhwd {{.*#+}} xmm4 = xmm4[4],xmm3[4],xmm4[5],xmm3[5],xmm4[6],xmm3[6],xmm4[7],xmm3[7]
+; SSE2-NEXT: pslld $23, %xmm4
+; SSE2-NEXT: movdqa {{.*#+}} xmm5 = [1065353216,1065353216,1065353216,1065353216]
+; SSE2-NEXT: paddd %xmm5, %xmm4
+; SSE2-NEXT: cvttps2dq %xmm4, %xmm4
+; SSE2-NEXT: pshuflw {{.*#+}} xmm4 = xmm4[0,2,2,3,4,5,6,7]
+; SSE2-NEXT: pshufhw {{.*#+}} xmm4 = xmm4[0,1,2,3,4,6,6,7]
+; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm4[0,2,2,3]
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm3[0],xmm1[1],xmm3[1],xmm1[2],xmm3[2],xmm1[3],xmm3[3]
+; SSE2-NEXT: pslld $23, %xmm1
+; SSE2-NEXT: paddd %xmm5, %xmm1
+; SSE2-NEXT: cvttps2dq %xmm1, %xmm1
+; SSE2-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[0,2,2,3,4,5,6,7]
+; SSE2-NEXT: pshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,4,6,6,7]
+; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
+; SSE2-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm4[0]
+; SSE2-NEXT: pmullw %xmm0, %xmm1
+; SSE2-NEXT: psllw $12, %xmm2
+; SSE2-NEXT: movdqa %xmm2, %xmm3
+; SSE2-NEXT: psraw $15, %xmm3
+; SSE2-NEXT: movdqa %xmm3, %xmm4
+; SSE2-NEXT: pandn %xmm0, %xmm4
; SSE2-NEXT: psrlw $8, %xmm0
-; SSE2-NEXT: pand %xmm1, %xmm0
-; SSE2-NEXT: por %xmm5, %xmm0
-; SSE2-NEXT: paddw %xmm3, %xmm3
-; SSE2-NEXT: movdqa %xmm3, %xmm1
-; SSE2-NEXT: psraw $15, %xmm1
-; SSE2-NEXT: movdqa %xmm1, %xmm5
-; SSE2-NEXT: pandn %xmm0, %xmm5
+; SSE2-NEXT: pand %xmm3, %xmm0
+; SSE2-NEXT: por %xmm4, %xmm0
+; SSE2-NEXT: paddw %xmm2, %xmm2
+; SSE2-NEXT: movdqa %xmm2, %xmm3
+; SSE2-NEXT: psraw $15, %xmm3
+; SSE2-NEXT: movdqa %xmm3, %xmm4
+; SSE2-NEXT: pandn %xmm0, %xmm4
; SSE2-NEXT: psrlw $4, %xmm0
-; SSE2-NEXT: pand %xmm1, %xmm0
-; SSE2-NEXT: por %xmm5, %xmm0
-; SSE2-NEXT: paddw %xmm3, %xmm3
-; SSE2-NEXT: movdqa %xmm3, %xmm1
-; SSE2-NEXT: psraw $15, %xmm1
-; SSE2-NEXT: movdqa %xmm1, %xmm5
-; SSE2-NEXT: pandn %xmm0, %xmm5
-; SSE2-NEXT: psrlw $2, %xmm0
-; SSE2-NEXT: pand %xmm1, %xmm0
-; SSE2-NEXT: por %xmm5, %xmm0
-; SSE2-NEXT: paddw %xmm3, %xmm3
+; SSE2-NEXT: pand %xmm3, %xmm0
+; SSE2-NEXT: por %xmm4, %xmm0
+; SSE2-NEXT: paddw %xmm2, %xmm2
+; SSE2-NEXT: movdqa %xmm2, %xmm3
; SSE2-NEXT: psraw $15, %xmm3
-; SSE2-NEXT: movdqa %xmm3, %xmm1
-; SSE2-NEXT: pandn %xmm0, %xmm1
-; SSE2-NEXT: psrlw $1, %xmm0
+; SSE2-NEXT: movdqa %xmm3, %xmm4
+; SSE2-NEXT: pandn %xmm0, %xmm4
+; SSE2-NEXT: psrlw $2, %xmm0
; SSE2-NEXT: pand %xmm3, %xmm0
-; SSE2-NEXT: por %xmm1, %xmm0
; SSE2-NEXT: por %xmm4, %xmm0
-; SSE2-NEXT: por %xmm2, %xmm0
+; SSE2-NEXT: paddw %xmm2, %xmm2
+; SSE2-NEXT: psraw $15, %xmm2
+; SSE2-NEXT: movdqa %xmm2, %xmm3
+; SSE2-NEXT: pandn %xmm0, %xmm3
+; SSE2-NEXT: por %xmm1, %xmm3
+; SSE2-NEXT: psrlw $1, %xmm0
+; SSE2-NEXT: pand %xmm2, %xmm0
+; SSE2-NEXT: por %xmm3, %xmm0
; SSE2-NEXT: retq
;
; SSE41-LABEL: var_rotate_v8i16:
; SSE41: # %bb.0:
-; SSE41-NEXT: movdqa %xmm0, %xmm3
-; SSE41-NEXT: movdqa {{.*#+}} xmm2 = [16,16,16,16,16,16,16,16]
-; SSE41-NEXT: psubw %xmm1, %xmm2
-; SSE41-NEXT: movdqa %xmm1, %xmm0
-; SSE41-NEXT: psllw $12, %xmm0
-; SSE41-NEXT: psllw $4, %xmm1
-; SSE41-NEXT: por %xmm0, %xmm1
-; SSE41-NEXT: movdqa %xmm1, %xmm4
-; SSE41-NEXT: paddw %xmm1, %xmm4
-; SSE41-NEXT: movdqa %xmm3, %xmm6
-; SSE41-NEXT: psllw $8, %xmm6
-; SSE41-NEXT: movdqa %xmm3, %xmm5
-; SSE41-NEXT: movdqa %xmm1, %xmm0
-; SSE41-NEXT: pblendvb %xmm0, %xmm6, %xmm5
-; SSE41-NEXT: movdqa %xmm5, %xmm1
-; SSE41-NEXT: psllw $4, %xmm1
-; SSE41-NEXT: movdqa %xmm4, %xmm0
-; SSE41-NEXT: pblendvb %xmm0, %xmm1, %xmm5
-; SSE41-NEXT: movdqa %xmm5, %xmm1
-; SSE41-NEXT: psllw $2, %xmm1
-; SSE41-NEXT: paddw %xmm4, %xmm4
-; SSE41-NEXT: movdqa %xmm4, %xmm0
-; SSE41-NEXT: pblendvb %xmm0, %xmm1, %xmm5
-; SSE41-NEXT: movdqa %xmm5, %xmm1
-; SSE41-NEXT: psllw $1, %xmm1
-; SSE41-NEXT: paddw %xmm4, %xmm4
-; SSE41-NEXT: movdqa %xmm4, %xmm0
-; SSE41-NEXT: pblendvb %xmm0, %xmm1, %xmm5
-; SSE41-NEXT: movdqa %xmm2, %xmm0
-; SSE41-NEXT: psllw $12, %xmm0
-; SSE41-NEXT: psllw $4, %xmm2
-; SSE41-NEXT: por %xmm0, %xmm2
-; SSE41-NEXT: movdqa %xmm2, %xmm1
-; SSE41-NEXT: paddw %xmm2, %xmm1
-; SSE41-NEXT: movdqa %xmm3, %xmm4
+; SSE41-NEXT: movdqa %xmm0, %xmm2
+; SSE41-NEXT: movdqa {{.*#+}} xmm0 = [16,16,16,16,16,16,16,16]
+; SSE41-NEXT: psubw %xmm1, %xmm0
+; SSE41-NEXT: pxor %xmm3, %xmm3
+; SSE41-NEXT: pmovzxwd {{.*#+}} xmm4 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero
+; SSE41-NEXT: punpckhwd {{.*#+}} xmm1 = xmm1[4],xmm3[4],xmm1[5],xmm3[5],xmm1[6],xmm3[6],xmm1[7],xmm3[7]
+; SSE41-NEXT: pslld $23, %xmm1
+; SSE41-NEXT: movdqa {{.*#+}} xmm3 = [1065353216,1065353216,1065353216,1065353216]
+; SSE41-NEXT: paddd %xmm3, %xmm1
+; SSE41-NEXT: cvttps2dq %xmm1, %xmm1
+; SSE41-NEXT: pslld $23, %xmm4
+; SSE41-NEXT: paddd %xmm3, %xmm4
+; SSE41-NEXT: cvttps2dq %xmm4, %xmm3
+; SSE41-NEXT: packusdw %xmm1, %xmm3
+; SSE41-NEXT: pmullw %xmm2, %xmm3
+; SSE41-NEXT: movdqa %xmm0, %xmm1
+; SSE41-NEXT: psllw $12, %xmm1
+; SSE41-NEXT: psllw $4, %xmm0
+; SSE41-NEXT: por %xmm1, %xmm0
+; SSE41-NEXT: movdqa %xmm0, %xmm1
+; SSE41-NEXT: paddw %xmm0, %xmm1
+; SSE41-NEXT: movdqa %xmm2, %xmm4
; SSE41-NEXT: psrlw $8, %xmm4
-; SSE41-NEXT: movdqa %xmm2, %xmm0
-; SSE41-NEXT: pblendvb %xmm0, %xmm4, %xmm3
-; SSE41-NEXT: movdqa %xmm3, %xmm2
-; SSE41-NEXT: psrlw $4, %xmm2
+; SSE41-NEXT: pblendvb %xmm0, %xmm4, %xmm2
+; SSE41-NEXT: movdqa %xmm2, %xmm4
+; SSE41-NEXT: psrlw $4, %xmm4
; SSE41-NEXT: movdqa %xmm1, %xmm0
-; SSE41-NEXT: pblendvb %xmm0, %xmm2, %xmm3
-; SSE41-NEXT: movdqa %xmm3, %xmm2
-; SSE41-NEXT: psrlw $2, %xmm2
+; SSE41-NEXT: pblendvb %xmm0, %xmm4, %xmm2
+; SSE41-NEXT: movdqa %xmm2, %xmm4
+; SSE41-NEXT: psrlw $2, %xmm4
; SSE41-NEXT: paddw %xmm1, %xmm1
; SSE41-NEXT: movdqa %xmm1, %xmm0
-; SSE41-NEXT: pblendvb %xmm0, %xmm2, %xmm3
-; SSE41-NEXT: movdqa %xmm3, %xmm2
-; SSE41-NEXT: psrlw $1, %xmm2
+; SSE41-NEXT: pblendvb %xmm0, %xmm4, %xmm2
+; SSE41-NEXT: movdqa %xmm2, %xmm4
+; SSE41-NEXT: psrlw $1, %xmm4
; SSE41-NEXT: paddw %xmm1, %xmm1
; SSE41-NEXT: movdqa %xmm1, %xmm0
-; SSE41-NEXT: pblendvb %xmm0, %xmm2, %xmm3
-; SSE41-NEXT: por %xmm5, %xmm3
-; SSE41-NEXT: movdqa %xmm3, %xmm0
+; SSE41-NEXT: pblendvb %xmm0, %xmm4, %xmm2
+; SSE41-NEXT: por %xmm3, %xmm2
+; SSE41-NEXT: movdqa %xmm2, %xmm0
; SSE41-NEXT: retq
;
; AVX1-LABEL: var_rotate_v8i16:
; AVX1: # %bb.0:
; AVX1-NEXT: vmovdqa {{.*#+}} xmm2 = [16,16,16,16,16,16,16,16]
; AVX1-NEXT: vpsubw %xmm1, %xmm2, %xmm2
-; AVX1-NEXT: vpsllw $12, %xmm1, %xmm3
-; AVX1-NEXT: vpsllw $4, %xmm1, %xmm1
-; AVX1-NEXT: vpor %xmm3, %xmm1, %xmm1
-; AVX1-NEXT: vpaddw %xmm1, %xmm1, %xmm3
-; AVX1-NEXT: vpsllw $8, %xmm0, %xmm4
-; AVX1-NEXT: vpblendvb %xmm1, %xmm4, %xmm0, %xmm1
-; AVX1-NEXT: vpsllw $4, %xmm1, %xmm4
-; AVX1-NEXT: vpblendvb %xmm3, %xmm4, %xmm1, %xmm1
-; AVX1-NEXT: vpsllw $2, %xmm1, %xmm4
-; AVX1-NEXT: vpaddw %xmm3, %xmm3, %xmm3
-; AVX1-NEXT: vpblendvb %xmm3, %xmm4, %xmm1, %xmm1
-; AVX1-NEXT: vpsllw $1, %xmm1, %xmm4
-; AVX1-NEXT: vpaddw %xmm3, %xmm3, %xmm3
-; AVX1-NEXT: vpblendvb %xmm3, %xmm4, %xmm1, %xmm1
+; AVX1-NEXT: vpxor %xmm3, %xmm3, %xmm3
+; AVX1-NEXT: vpunpckhwd {{.*#+}} xmm3 = xmm1[4],xmm3[4],xmm1[5],xmm3[5],xmm1[6],xmm3[6],xmm1[7],xmm3[7]
+; AVX1-NEXT: vpslld $23, %xmm3, %xmm3
+; AVX1-NEXT: vmovdqa {{.*#+}} xmm4 = [1065353216,1065353216,1065353216,1065353216]
+; AVX1-NEXT: vpaddd %xmm4, %xmm3, %xmm3
+; AVX1-NEXT: vcvttps2dq %xmm3, %xmm3
+; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero
+; AVX1-NEXT: vpslld $23, %xmm1, %xmm1
+; AVX1-NEXT: vpaddd %xmm4, %xmm1, %xmm1
+; AVX1-NEXT: vcvttps2dq %xmm1, %xmm1
+; AVX1-NEXT: vpackusdw %xmm3, %xmm1, %xmm1
+; AVX1-NEXT: vpmullw %xmm1, %xmm0, %xmm1
; AVX1-NEXT: vpsllw $12, %xmm2, %xmm3
; AVX1-NEXT: vpsllw $4, %xmm2, %xmm2
; AVX1-NEXT: vpor %xmm3, %xmm2, %xmm2
@@ -431,71 +403,59 @@ define <8 x i16> @var_rotate_v8i16(<8 x
;
; X32-SSE-LABEL: var_rotate_v8i16:
; X32-SSE: # %bb.0:
-; X32-SSE-NEXT: movdqa {{.*#+}} xmm3 = [16,16,16,16,16,16,16,16]
-; X32-SSE-NEXT: psubw %xmm1, %xmm3
-; X32-SSE-NEXT: psllw $12, %xmm1
-; X32-SSE-NEXT: movdqa %xmm1, %xmm2
-; X32-SSE-NEXT: psraw $15, %xmm2
-; X32-SSE-NEXT: movdqa %xmm0, %xmm4
-; X32-SSE-NEXT: psllw $8, %xmm4
-; X32-SSE-NEXT: pand %xmm2, %xmm4
-; X32-SSE-NEXT: pandn %xmm0, %xmm2
-; X32-SSE-NEXT: por %xmm4, %xmm2
-; X32-SSE-NEXT: paddw %xmm1, %xmm1
-; X32-SSE-NEXT: movdqa %xmm1, %xmm4
-; X32-SSE-NEXT: psraw $15, %xmm4
-; X32-SSE-NEXT: movdqa %xmm4, %xmm5
-; X32-SSE-NEXT: pandn %xmm2, %xmm5
-; X32-SSE-NEXT: psllw $4, %xmm2
-; X32-SSE-NEXT: pand %xmm4, %xmm2
-; X32-SSE-NEXT: por %xmm5, %xmm2
-; X32-SSE-NEXT: paddw %xmm1, %xmm1
+; X32-SSE-NEXT: movdqa {{.*#+}} xmm2 = [16,16,16,16,16,16,16,16]
+; X32-SSE-NEXT: psubw %xmm1, %xmm2
+; X32-SSE-NEXT: pxor %xmm3, %xmm3
; X32-SSE-NEXT: movdqa %xmm1, %xmm4
-; X32-SSE-NEXT: psraw $15, %xmm4
-; X32-SSE-NEXT: movdqa %xmm4, %xmm5
-; X32-SSE-NEXT: pandn %xmm2, %xmm5
-; X32-SSE-NEXT: psllw $2, %xmm2
-; X32-SSE-NEXT: pand %xmm4, %xmm2
-; X32-SSE-NEXT: por %xmm5, %xmm2
-; X32-SSE-NEXT: paddw %xmm1, %xmm1
-; X32-SSE-NEXT: psraw $15, %xmm1
-; X32-SSE-NEXT: movdqa %xmm1, %xmm4
-; X32-SSE-NEXT: pandn %xmm2, %xmm4
-; X32-SSE-NEXT: psllw $1, %xmm2
-; X32-SSE-NEXT: pand %xmm1, %xmm2
-; X32-SSE-NEXT: psllw $12, %xmm3
-; X32-SSE-NEXT: movdqa %xmm3, %xmm1
-; X32-SSE-NEXT: psraw $15, %xmm1
-; X32-SSE-NEXT: movdqa %xmm1, %xmm5
-; X32-SSE-NEXT: pandn %xmm0, %xmm5
+; X32-SSE-NEXT: punpckhwd {{.*#+}} xmm4 = xmm4[4],xmm3[4],xmm4[5],xmm3[5],xmm4[6],xmm3[6],xmm4[7],xmm3[7]
+; X32-SSE-NEXT: pslld $23, %xmm4
+; X32-SSE-NEXT: movdqa {{.*#+}} xmm5 = [1065353216,1065353216,1065353216,1065353216]
+; X32-SSE-NEXT: paddd %xmm5, %xmm4
+; X32-SSE-NEXT: cvttps2dq %xmm4, %xmm4
+; X32-SSE-NEXT: pshuflw {{.*#+}} xmm4 = xmm4[0,2,2,3,4,5,6,7]
+; X32-SSE-NEXT: pshufhw {{.*#+}} xmm4 = xmm4[0,1,2,3,4,6,6,7]
+; X32-SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm4[0,2,2,3]
+; X32-SSE-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm3[0],xmm1[1],xmm3[1],xmm1[2],xmm3[2],xmm1[3],xmm3[3]
+; X32-SSE-NEXT: pslld $23, %xmm1
+; X32-SSE-NEXT: paddd %xmm5, %xmm1
+; X32-SSE-NEXT: cvttps2dq %xmm1, %xmm1
+; X32-SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[0,2,2,3,4,5,6,7]
+; X32-SSE-NEXT: pshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,4,6,6,7]
+; X32-SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
+; X32-SSE-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm4[0]
+; X32-SSE-NEXT: pmullw %xmm0, %xmm1
+; X32-SSE-NEXT: psllw $12, %xmm2
+; X32-SSE-NEXT: movdqa %xmm2, %xmm3
+; X32-SSE-NEXT: psraw $15, %xmm3
+; X32-SSE-NEXT: movdqa %xmm3, %xmm4
+; X32-SSE-NEXT: pandn %xmm0, %xmm4
; X32-SSE-NEXT: psrlw $8, %xmm0
-; X32-SSE-NEXT: pand %xmm1, %xmm0
-; X32-SSE-NEXT: por %xmm5, %xmm0
-; X32-SSE-NEXT: paddw %xmm3, %xmm3
-; X32-SSE-NEXT: movdqa %xmm3, %xmm1
-; X32-SSE-NEXT: psraw $15, %xmm1
-; X32-SSE-NEXT: movdqa %xmm1, %xmm5
-; X32-SSE-NEXT: pandn %xmm0, %xmm5
+; X32-SSE-NEXT: pand %xmm3, %xmm0
+; X32-SSE-NEXT: por %xmm4, %xmm0
+; X32-SSE-NEXT: paddw %xmm2, %xmm2
+; X32-SSE-NEXT: movdqa %xmm2, %xmm3
+; X32-SSE-NEXT: psraw $15, %xmm3
+; X32-SSE-NEXT: movdqa %xmm3, %xmm4
+; X32-SSE-NEXT: pandn %xmm0, %xmm4
; X32-SSE-NEXT: psrlw $4, %xmm0
-; X32-SSE-NEXT: pand %xmm1, %xmm0
-; X32-SSE-NEXT: por %xmm5, %xmm0
-; X32-SSE-NEXT: paddw %xmm3, %xmm3
-; X32-SSE-NEXT: movdqa %xmm3, %xmm1
-; X32-SSE-NEXT: psraw $15, %xmm1
-; X32-SSE-NEXT: movdqa %xmm1, %xmm5
-; X32-SSE-NEXT: pandn %xmm0, %xmm5
-; X32-SSE-NEXT: psrlw $2, %xmm0
-; X32-SSE-NEXT: pand %xmm1, %xmm0
-; X32-SSE-NEXT: por %xmm5, %xmm0
-; X32-SSE-NEXT: paddw %xmm3, %xmm3
+; X32-SSE-NEXT: pand %xmm3, %xmm0
+; X32-SSE-NEXT: por %xmm4, %xmm0
+; X32-SSE-NEXT: paddw %xmm2, %xmm2
+; X32-SSE-NEXT: movdqa %xmm2, %xmm3
; X32-SSE-NEXT: psraw $15, %xmm3
-; X32-SSE-NEXT: movdqa %xmm3, %xmm1
-; X32-SSE-NEXT: pandn %xmm0, %xmm1
-; X32-SSE-NEXT: psrlw $1, %xmm0
+; X32-SSE-NEXT: movdqa %xmm3, %xmm4
+; X32-SSE-NEXT: pandn %xmm0, %xmm4
+; X32-SSE-NEXT: psrlw $2, %xmm0
; X32-SSE-NEXT: pand %xmm3, %xmm0
-; X32-SSE-NEXT: por %xmm1, %xmm0
; X32-SSE-NEXT: por %xmm4, %xmm0
-; X32-SSE-NEXT: por %xmm2, %xmm0
+; X32-SSE-NEXT: paddw %xmm2, %xmm2
+; X32-SSE-NEXT: psraw $15, %xmm2
+; X32-SSE-NEXT: movdqa %xmm2, %xmm3
+; X32-SSE-NEXT: pandn %xmm0, %xmm3
+; X32-SSE-NEXT: por %xmm1, %xmm3
+; X32-SSE-NEXT: psrlw $1, %xmm0
+; X32-SSE-NEXT: pand %xmm2, %xmm0
+; X32-SSE-NEXT: por %xmm3, %xmm0
; X32-SSE-NEXT: retl
%b16 = sub <8 x i16> <i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16>, %b
%shl = shl <8 x i16> %a, %b
Modified: llvm/trunk/test/CodeGen/X86/vector-rotate-256.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/vector-rotate-256.ll?rev=334023&r1=334022&r2=334023&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/vector-rotate-256.ll (original)
+++ llvm/trunk/test/CodeGen/X86/vector-rotate-256.ll Tue Jun 5 08:17:39 2018
@@ -171,50 +171,44 @@ define <16 x i16> @var_rotate_v16i16(<16
; AVX1-NEXT: vpsubw %xmm1, %xmm3, %xmm2
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm4
; AVX1-NEXT: vpsubw %xmm4, %xmm3, %xmm3
-; AVX1-NEXT: vpsllw $12, %xmm4, %xmm5
-; AVX1-NEXT: vpsllw $4, %xmm4, %xmm4
-; AVX1-NEXT: vpor %xmm5, %xmm4, %xmm5
-; AVX1-NEXT: vpaddw %xmm5, %xmm5, %xmm6
-; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm4
-; AVX1-NEXT: vpsllw $8, %xmm4, %xmm7
-; AVX1-NEXT: vpblendvb %xmm5, %xmm7, %xmm4, %xmm5
-; AVX1-NEXT: vpsllw $4, %xmm5, %xmm7
-; AVX1-NEXT: vpblendvb %xmm6, %xmm7, %xmm5, %xmm5
-; AVX1-NEXT: vpsllw $2, %xmm5, %xmm7
-; AVX1-NEXT: vpaddw %xmm6, %xmm6, %xmm6
-; AVX1-NEXT: vpblendvb %xmm6, %xmm7, %xmm5, %xmm5
-; AVX1-NEXT: vpsllw $1, %xmm5, %xmm7
-; AVX1-NEXT: vpaddw %xmm6, %xmm6, %xmm6
-; AVX1-NEXT: vpblendvb %xmm6, %xmm7, %xmm5, %xmm5
-; AVX1-NEXT: vpsllw $12, %xmm1, %xmm6
-; AVX1-NEXT: vpsllw $4, %xmm1, %xmm1
-; AVX1-NEXT: vpor %xmm6, %xmm1, %xmm1
-; AVX1-NEXT: vpaddw %xmm1, %xmm1, %xmm6
-; AVX1-NEXT: vpsllw $8, %xmm0, %xmm7
-; AVX1-NEXT: vpblendvb %xmm1, %xmm7, %xmm0, %xmm1
-; AVX1-NEXT: vpsllw $4, %xmm1, %xmm7
-; AVX1-NEXT: vpblendvb %xmm6, %xmm7, %xmm1, %xmm1
-; AVX1-NEXT: vpsllw $2, %xmm1, %xmm7
-; AVX1-NEXT: vpaddw %xmm6, %xmm6, %xmm6
-; AVX1-NEXT: vpblendvb %xmm6, %xmm7, %xmm1, %xmm1
-; AVX1-NEXT: vpsllw $1, %xmm1, %xmm7
-; AVX1-NEXT: vpaddw %xmm6, %xmm6, %xmm6
-; AVX1-NEXT: vpblendvb %xmm6, %xmm7, %xmm1, %xmm1
-; AVX1-NEXT: vinsertf128 $1, %xmm5, %ymm1, %ymm1
-; AVX1-NEXT: vpsllw $12, %xmm3, %xmm5
+; AVX1-NEXT: vpxor %xmm5, %xmm5, %xmm5
+; AVX1-NEXT: vpunpckhwd {{.*#+}} xmm6 = xmm4[4],xmm5[4],xmm4[5],xmm5[5],xmm4[6],xmm5[6],xmm4[7],xmm5[7]
+; AVX1-NEXT: vpslld $23, %xmm6, %xmm6
+; AVX1-NEXT: vmovdqa {{.*#+}} xmm7 = [1065353216,1065353216,1065353216,1065353216]
+; AVX1-NEXT: vpaddd %xmm7, %xmm6, %xmm6
+; AVX1-NEXT: vcvttps2dq %xmm6, %xmm6
+; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm4 = xmm4[0],zero,xmm4[1],zero,xmm4[2],zero,xmm4[3],zero
+; AVX1-NEXT: vpslld $23, %xmm4, %xmm4
+; AVX1-NEXT: vpaddd %xmm7, %xmm4, %xmm4
+; AVX1-NEXT: vcvttps2dq %xmm4, %xmm4
+; AVX1-NEXT: vpackusdw %xmm6, %xmm4, %xmm4
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm6
+; AVX1-NEXT: vpmullw %xmm4, %xmm6, %xmm4
+; AVX1-NEXT: vpunpckhwd {{.*#+}} xmm5 = xmm1[4],xmm5[4],xmm1[5],xmm5[5],xmm1[6],xmm5[6],xmm1[7],xmm5[7]
+; AVX1-NEXT: vpslld $23, %xmm5, %xmm5
+; AVX1-NEXT: vpaddd %xmm7, %xmm5, %xmm5
+; AVX1-NEXT: vcvttps2dq %xmm5, %xmm5
+; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero
+; AVX1-NEXT: vpslld $23, %xmm1, %xmm1
+; AVX1-NEXT: vpaddd %xmm7, %xmm1, %xmm1
+; AVX1-NEXT: vcvttps2dq %xmm1, %xmm1
+; AVX1-NEXT: vpackusdw %xmm5, %xmm1, %xmm1
+; AVX1-NEXT: vpmullw %xmm1, %xmm0, %xmm1
+; AVX1-NEXT: vinsertf128 $1, %xmm4, %ymm1, %ymm1
+; AVX1-NEXT: vpsllw $12, %xmm3, %xmm4
; AVX1-NEXT: vpsllw $4, %xmm3, %xmm3
-; AVX1-NEXT: vpor %xmm5, %xmm3, %xmm3
-; AVX1-NEXT: vpaddw %xmm3, %xmm3, %xmm5
-; AVX1-NEXT: vpsrlw $8, %xmm4, %xmm6
-; AVX1-NEXT: vpblendvb %xmm3, %xmm6, %xmm4, %xmm3
-; AVX1-NEXT: vpsrlw $4, %xmm3, %xmm4
-; AVX1-NEXT: vpblendvb %xmm5, %xmm4, %xmm3, %xmm3
-; AVX1-NEXT: vpsrlw $2, %xmm3, %xmm4
-; AVX1-NEXT: vpaddw %xmm5, %xmm5, %xmm5
-; AVX1-NEXT: vpblendvb %xmm5, %xmm4, %xmm3, %xmm3
-; AVX1-NEXT: vpsrlw $1, %xmm3, %xmm4
-; AVX1-NEXT: vpaddw %xmm5, %xmm5, %xmm5
-; AVX1-NEXT: vpblendvb %xmm5, %xmm4, %xmm3, %xmm3
+; AVX1-NEXT: vpor %xmm4, %xmm3, %xmm3
+; AVX1-NEXT: vpaddw %xmm3, %xmm3, %xmm4
+; AVX1-NEXT: vpsrlw $8, %xmm6, %xmm5
+; AVX1-NEXT: vpblendvb %xmm3, %xmm5, %xmm6, %xmm3
+; AVX1-NEXT: vpsrlw $4, %xmm3, %xmm5
+; AVX1-NEXT: vpblendvb %xmm4, %xmm5, %xmm3, %xmm3
+; AVX1-NEXT: vpsrlw $2, %xmm3, %xmm5
+; AVX1-NEXT: vpaddw %xmm4, %xmm4, %xmm4
+; AVX1-NEXT: vpblendvb %xmm4, %xmm5, %xmm3, %xmm3
+; AVX1-NEXT: vpsrlw $1, %xmm3, %xmm5
+; AVX1-NEXT: vpaddw %xmm4, %xmm4, %xmm4
+; AVX1-NEXT: vpblendvb %xmm4, %xmm5, %xmm3, %xmm3
; AVX1-NEXT: vpsllw $12, %xmm2, %xmm4
; AVX1-NEXT: vpsllw $4, %xmm2, %xmm2
; AVX1-NEXT: vpor %xmm4, %xmm2, %xmm2
Modified: llvm/trunk/test/CodeGen/X86/vector-shift-shl-128.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/vector-shift-shl-128.ll?rev=334023&r1=334022&r2=334023&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/vector-shift-shl-128.ll (original)
+++ llvm/trunk/test/CodeGen/X86/vector-shift-shl-128.ll Tue Jun 5 08:17:39 2018
@@ -157,85 +157,57 @@ define <4 x i32> @var_shift_v4i32(<4 x i
define <8 x i16> @var_shift_v8i16(<8 x i16> %a, <8 x i16> %b) nounwind {
; SSE2-LABEL: var_shift_v8i16:
; SSE2: # %bb.0:
-; SSE2-NEXT: psllw $12, %xmm1
-; SSE2-NEXT: movdqa %xmm1, %xmm2
-; SSE2-NEXT: psraw $15, %xmm2
-; SSE2-NEXT: movdqa %xmm2, %xmm3
-; SSE2-NEXT: pandn %xmm0, %xmm3
-; SSE2-NEXT: psllw $8, %xmm0
-; SSE2-NEXT: pand %xmm2, %xmm0
-; SSE2-NEXT: por %xmm3, %xmm0
-; SSE2-NEXT: paddw %xmm1, %xmm1
-; SSE2-NEXT: movdqa %xmm1, %xmm2
-; SSE2-NEXT: psraw $15, %xmm2
-; SSE2-NEXT: movdqa %xmm2, %xmm3
-; SSE2-NEXT: pandn %xmm0, %xmm3
-; SSE2-NEXT: psllw $4, %xmm0
-; SSE2-NEXT: pand %xmm2, %xmm0
-; SSE2-NEXT: por %xmm3, %xmm0
-; SSE2-NEXT: paddw %xmm1, %xmm1
-; SSE2-NEXT: movdqa %xmm1, %xmm2
-; SSE2-NEXT: psraw $15, %xmm2
-; SSE2-NEXT: movdqa %xmm2, %xmm3
-; SSE2-NEXT: pandn %xmm0, %xmm3
-; SSE2-NEXT: psllw $2, %xmm0
-; SSE2-NEXT: pand %xmm2, %xmm0
-; SSE2-NEXT: por %xmm3, %xmm0
-; SSE2-NEXT: paddw %xmm1, %xmm1
-; SSE2-NEXT: psraw $15, %xmm1
-; SSE2-NEXT: movdqa %xmm1, %xmm2
-; SSE2-NEXT: pandn %xmm0, %xmm2
-; SSE2-NEXT: psllw $1, %xmm0
-; SSE2-NEXT: pand %xmm1, %xmm0
-; SSE2-NEXT: por %xmm2, %xmm0
+; SSE2-NEXT: pxor %xmm2, %xmm2
+; SSE2-NEXT: movdqa %xmm1, %xmm3
+; SSE2-NEXT: punpckhwd {{.*#+}} xmm3 = xmm3[4],xmm2[4],xmm3[5],xmm2[5],xmm3[6],xmm2[6],xmm3[7],xmm2[7]
+; SSE2-NEXT: pslld $23, %xmm3
+; SSE2-NEXT: movdqa {{.*#+}} xmm4 = [1065353216,1065353216,1065353216,1065353216]
+; SSE2-NEXT: paddd %xmm4, %xmm3
+; SSE2-NEXT: cvttps2dq %xmm3, %xmm3
+; SSE2-NEXT: pshuflw {{.*#+}} xmm3 = xmm3[0,2,2,3,4,5,6,7]
+; SSE2-NEXT: pshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,4,6,6,7]
+; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm3[0,2,2,3]
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3]
+; SSE2-NEXT: pslld $23, %xmm1
+; SSE2-NEXT: paddd %xmm4, %xmm1
+; SSE2-NEXT: cvttps2dq %xmm1, %xmm1
+; SSE2-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[0,2,2,3,4,5,6,7]
+; SSE2-NEXT: pshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,4,6,6,7]
+; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
+; SSE2-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm3[0]
+; SSE2-NEXT: pmullw %xmm1, %xmm0
; SSE2-NEXT: retq
;
; SSE41-LABEL: var_shift_v8i16:
; SSE41: # %bb.0:
-; SSE41-NEXT: movdqa %xmm0, %xmm2
-; SSE41-NEXT: movdqa %xmm1, %xmm0
-; SSE41-NEXT: psllw $12, %xmm0
-; SSE41-NEXT: psllw $4, %xmm1
-; SSE41-NEXT: por %xmm0, %xmm1
-; SSE41-NEXT: movdqa %xmm1, %xmm3
-; SSE41-NEXT: paddw %xmm1, %xmm3
-; SSE41-NEXT: movdqa %xmm2, %xmm4
-; SSE41-NEXT: psllw $8, %xmm4
-; SSE41-NEXT: movdqa %xmm1, %xmm0
-; SSE41-NEXT: pblendvb %xmm0, %xmm4, %xmm2
-; SSE41-NEXT: movdqa %xmm2, %xmm1
-; SSE41-NEXT: psllw $4, %xmm1
-; SSE41-NEXT: movdqa %xmm3, %xmm0
-; SSE41-NEXT: pblendvb %xmm0, %xmm1, %xmm2
-; SSE41-NEXT: movdqa %xmm2, %xmm1
-; SSE41-NEXT: psllw $2, %xmm1
-; SSE41-NEXT: paddw %xmm3, %xmm3
-; SSE41-NEXT: movdqa %xmm3, %xmm0
-; SSE41-NEXT: pblendvb %xmm0, %xmm1, %xmm2
-; SSE41-NEXT: movdqa %xmm2, %xmm1
-; SSE41-NEXT: psllw $1, %xmm1
-; SSE41-NEXT: paddw %xmm3, %xmm3
-; SSE41-NEXT: movdqa %xmm3, %xmm0
-; SSE41-NEXT: pblendvb %xmm0, %xmm1, %xmm2
-; SSE41-NEXT: movdqa %xmm2, %xmm0
+; SSE41-NEXT: pxor %xmm2, %xmm2
+; SSE41-NEXT: pmovzxwd {{.*#+}} xmm3 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero
+; SSE41-NEXT: punpckhwd {{.*#+}} xmm1 = xmm1[4],xmm2[4],xmm1[5],xmm2[5],xmm1[6],xmm2[6],xmm1[7],xmm2[7]
+; SSE41-NEXT: pslld $23, %xmm1
+; SSE41-NEXT: movdqa {{.*#+}} xmm2 = [1065353216,1065353216,1065353216,1065353216]
+; SSE41-NEXT: paddd %xmm2, %xmm1
+; SSE41-NEXT: cvttps2dq %xmm1, %xmm1
+; SSE41-NEXT: pslld $23, %xmm3
+; SSE41-NEXT: paddd %xmm2, %xmm3
+; SSE41-NEXT: cvttps2dq %xmm3, %xmm2
+; SSE41-NEXT: packusdw %xmm1, %xmm2
+; SSE41-NEXT: pmullw %xmm2, %xmm0
; SSE41-NEXT: retq
;
; AVX1-LABEL: var_shift_v8i16:
; AVX1: # %bb.0:
-; AVX1-NEXT: vpsllw $12, %xmm1, %xmm2
-; AVX1-NEXT: vpsllw $4, %xmm1, %xmm1
-; AVX1-NEXT: vpor %xmm2, %xmm1, %xmm1
-; AVX1-NEXT: vpaddw %xmm1, %xmm1, %xmm2
-; AVX1-NEXT: vpsllw $8, %xmm0, %xmm3
-; AVX1-NEXT: vpblendvb %xmm1, %xmm3, %xmm0, %xmm0
-; AVX1-NEXT: vpsllw $4, %xmm0, %xmm1
-; AVX1-NEXT: vpblendvb %xmm2, %xmm1, %xmm0, %xmm0
-; AVX1-NEXT: vpsllw $2, %xmm0, %xmm1
-; AVX1-NEXT: vpaddw %xmm2, %xmm2, %xmm2
-; AVX1-NEXT: vpblendvb %xmm2, %xmm1, %xmm0, %xmm0
-; AVX1-NEXT: vpsllw $1, %xmm0, %xmm1
-; AVX1-NEXT: vpaddw %xmm2, %xmm2, %xmm2
-; AVX1-NEXT: vpblendvb %xmm2, %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vpxor %xmm2, %xmm2, %xmm2
+; AVX1-NEXT: vpunpckhwd {{.*#+}} xmm2 = xmm1[4],xmm2[4],xmm1[5],xmm2[5],xmm1[6],xmm2[6],xmm1[7],xmm2[7]
+; AVX1-NEXT: vpslld $23, %xmm2, %xmm2
+; AVX1-NEXT: vmovdqa {{.*#+}} xmm3 = [1065353216,1065353216,1065353216,1065353216]
+; AVX1-NEXT: vpaddd %xmm3, %xmm2, %xmm2
+; AVX1-NEXT: vcvttps2dq %xmm2, %xmm2
+; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero
+; AVX1-NEXT: vpslld $23, %xmm1, %xmm1
+; AVX1-NEXT: vpaddd %xmm3, %xmm1, %xmm1
+; AVX1-NEXT: vcvttps2dq %xmm1, %xmm1
+; AVX1-NEXT: vpackusdw %xmm2, %xmm1, %xmm1
+; AVX1-NEXT: vpmullw %xmm1, %xmm0, %xmm0
; AVX1-NEXT: retq
;
; AVX2-LABEL: var_shift_v8i16:
@@ -289,37 +261,25 @@ define <8 x i16> @var_shift_v8i16(<8 x i
;
; X32-SSE-LABEL: var_shift_v8i16:
; X32-SSE: # %bb.0:
-; X32-SSE-NEXT: psllw $12, %xmm1
-; X32-SSE-NEXT: movdqa %xmm1, %xmm2
-; X32-SSE-NEXT: psraw $15, %xmm2
-; X32-SSE-NEXT: movdqa %xmm2, %xmm3
-; X32-SSE-NEXT: pandn %xmm0, %xmm3
-; X32-SSE-NEXT: psllw $8, %xmm0
-; X32-SSE-NEXT: pand %xmm2, %xmm0
-; X32-SSE-NEXT: por %xmm3, %xmm0
-; X32-SSE-NEXT: paddw %xmm1, %xmm1
-; X32-SSE-NEXT: movdqa %xmm1, %xmm2
-; X32-SSE-NEXT: psraw $15, %xmm2
-; X32-SSE-NEXT: movdqa %xmm2, %xmm3
-; X32-SSE-NEXT: pandn %xmm0, %xmm3
-; X32-SSE-NEXT: psllw $4, %xmm0
-; X32-SSE-NEXT: pand %xmm2, %xmm0
-; X32-SSE-NEXT: por %xmm3, %xmm0
-; X32-SSE-NEXT: paddw %xmm1, %xmm1
-; X32-SSE-NEXT: movdqa %xmm1, %xmm2
-; X32-SSE-NEXT: psraw $15, %xmm2
-; X32-SSE-NEXT: movdqa %xmm2, %xmm3
-; X32-SSE-NEXT: pandn %xmm0, %xmm3
-; X32-SSE-NEXT: psllw $2, %xmm0
-; X32-SSE-NEXT: pand %xmm2, %xmm0
-; X32-SSE-NEXT: por %xmm3, %xmm0
-; X32-SSE-NEXT: paddw %xmm1, %xmm1
-; X32-SSE-NEXT: psraw $15, %xmm1
-; X32-SSE-NEXT: movdqa %xmm1, %xmm2
-; X32-SSE-NEXT: pandn %xmm0, %xmm2
-; X32-SSE-NEXT: psllw $1, %xmm0
-; X32-SSE-NEXT: pand %xmm1, %xmm0
-; X32-SSE-NEXT: por %xmm2, %xmm0
+; X32-SSE-NEXT: pxor %xmm2, %xmm2
+; X32-SSE-NEXT: movdqa %xmm1, %xmm3
+; X32-SSE-NEXT: punpckhwd {{.*#+}} xmm3 = xmm3[4],xmm2[4],xmm3[5],xmm2[5],xmm3[6],xmm2[6],xmm3[7],xmm2[7]
+; X32-SSE-NEXT: pslld $23, %xmm3
+; X32-SSE-NEXT: movdqa {{.*#+}} xmm4 = [1065353216,1065353216,1065353216,1065353216]
+; X32-SSE-NEXT: paddd %xmm4, %xmm3
+; X32-SSE-NEXT: cvttps2dq %xmm3, %xmm3
+; X32-SSE-NEXT: pshuflw {{.*#+}} xmm3 = xmm3[0,2,2,3,4,5,6,7]
+; X32-SSE-NEXT: pshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,4,6,6,7]
+; X32-SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm3[0,2,2,3]
+; X32-SSE-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3]
+; X32-SSE-NEXT: pslld $23, %xmm1
+; X32-SSE-NEXT: paddd %xmm4, %xmm1
+; X32-SSE-NEXT: cvttps2dq %xmm1, %xmm1
+; X32-SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[0,2,2,3,4,5,6,7]
+; X32-SSE-NEXT: pshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,4,6,6,7]
+; X32-SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
+; X32-SSE-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm3[0]
+; X32-SSE-NEXT: pmullw %xmm1, %xmm0
; X32-SSE-NEXT: retl
%shift = shl <8 x i16> %a, %b
ret <8 x i16> %shift
Modified: llvm/trunk/test/CodeGen/X86/vector-shift-shl-256.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/vector-shift-shl-256.ll?rev=334023&r1=334022&r2=334023&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/vector-shift-shl-256.ll (original)
+++ llvm/trunk/test/CodeGen/X86/vector-shift-shl-256.ll Tue Jun 5 08:17:39 2018
@@ -158,35 +158,29 @@ define <16 x i16> @var_shift_v16i16(<16
; AVX1-LABEL: var_shift_v16i16:
; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
-; AVX1-NEXT: vpsllw $12, %xmm2, %xmm3
-; AVX1-NEXT: vpsllw $4, %xmm2, %xmm2
-; AVX1-NEXT: vpor %xmm3, %xmm2, %xmm2
-; AVX1-NEXT: vpaddw %xmm2, %xmm2, %xmm3
+; AVX1-NEXT: vpxor %xmm3, %xmm3, %xmm3
+; AVX1-NEXT: vpunpckhwd {{.*#+}} xmm4 = xmm2[4],xmm3[4],xmm2[5],xmm3[5],xmm2[6],xmm3[6],xmm2[7],xmm3[7]
+; AVX1-NEXT: vpslld $23, %xmm4, %xmm4
+; AVX1-NEXT: vmovdqa {{.*#+}} xmm5 = [1065353216,1065353216,1065353216,1065353216]
+; AVX1-NEXT: vpaddd %xmm5, %xmm4, %xmm4
+; AVX1-NEXT: vcvttps2dq %xmm4, %xmm4
+; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm2 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero
+; AVX1-NEXT: vpslld $23, %xmm2, %xmm2
+; AVX1-NEXT: vpaddd %xmm5, %xmm2, %xmm2
+; AVX1-NEXT: vcvttps2dq %xmm2, %xmm2
+; AVX1-NEXT: vpackusdw %xmm4, %xmm2, %xmm2
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm4
-; AVX1-NEXT: vpsllw $8, %xmm4, %xmm5
-; AVX1-NEXT: vpblendvb %xmm2, %xmm5, %xmm4, %xmm2
-; AVX1-NEXT: vpsllw $4, %xmm2, %xmm4
-; AVX1-NEXT: vpblendvb %xmm3, %xmm4, %xmm2, %xmm2
-; AVX1-NEXT: vpsllw $2, %xmm2, %xmm4
-; AVX1-NEXT: vpaddw %xmm3, %xmm3, %xmm3
-; AVX1-NEXT: vpblendvb %xmm3, %xmm4, %xmm2, %xmm2
-; AVX1-NEXT: vpsllw $1, %xmm2, %xmm4
-; AVX1-NEXT: vpaddw %xmm3, %xmm3, %xmm3
-; AVX1-NEXT: vpblendvb %xmm3, %xmm4, %xmm2, %xmm2
-; AVX1-NEXT: vpsllw $12, %xmm1, %xmm3
-; AVX1-NEXT: vpsllw $4, %xmm1, %xmm1
-; AVX1-NEXT: vpor %xmm3, %xmm1, %xmm1
-; AVX1-NEXT: vpaddw %xmm1, %xmm1, %xmm3
-; AVX1-NEXT: vpsllw $8, %xmm0, %xmm4
-; AVX1-NEXT: vpblendvb %xmm1, %xmm4, %xmm0, %xmm0
-; AVX1-NEXT: vpsllw $4, %xmm0, %xmm1
-; AVX1-NEXT: vpblendvb %xmm3, %xmm1, %xmm0, %xmm0
-; AVX1-NEXT: vpsllw $2, %xmm0, %xmm1
-; AVX1-NEXT: vpaddw %xmm3, %xmm3, %xmm3
-; AVX1-NEXT: vpblendvb %xmm3, %xmm1, %xmm0, %xmm0
-; AVX1-NEXT: vpsllw $1, %xmm0, %xmm1
-; AVX1-NEXT: vpaddw %xmm3, %xmm3, %xmm3
-; AVX1-NEXT: vpblendvb %xmm3, %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vpmullw %xmm2, %xmm4, %xmm2
+; AVX1-NEXT: vpunpckhwd {{.*#+}} xmm3 = xmm1[4],xmm3[4],xmm1[5],xmm3[5],xmm1[6],xmm3[6],xmm1[7],xmm3[7]
+; AVX1-NEXT: vpslld $23, %xmm3, %xmm3
+; AVX1-NEXT: vpaddd %xmm5, %xmm3, %xmm3
+; AVX1-NEXT: vcvttps2dq %xmm3, %xmm3
+; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero
+; AVX1-NEXT: vpslld $23, %xmm1, %xmm1
+; AVX1-NEXT: vpaddd %xmm5, %xmm1, %xmm1
+; AVX1-NEXT: vcvttps2dq %xmm1, %xmm1
+; AVX1-NEXT: vpackusdw %xmm3, %xmm1, %xmm1
+; AVX1-NEXT: vpmullw %xmm1, %xmm0, %xmm0
; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
; AVX1-NEXT: retq
;
@@ -254,35 +248,29 @@ define <16 x i16> @var_shift_v16i16(<16
; X32-AVX1-LABEL: var_shift_v16i16:
; X32-AVX1: # %bb.0:
; X32-AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
-; X32-AVX1-NEXT: vpsllw $12, %xmm2, %xmm3
-; X32-AVX1-NEXT: vpsllw $4, %xmm2, %xmm2
-; X32-AVX1-NEXT: vpor %xmm3, %xmm2, %xmm2
-; X32-AVX1-NEXT: vpaddw %xmm2, %xmm2, %xmm3
+; X32-AVX1-NEXT: vpxor %xmm3, %xmm3, %xmm3
+; X32-AVX1-NEXT: vpunpckhwd {{.*#+}} xmm4 = xmm2[4],xmm3[4],xmm2[5],xmm3[5],xmm2[6],xmm3[6],xmm2[7],xmm3[7]
+; X32-AVX1-NEXT: vpslld $23, %xmm4, %xmm4
+; X32-AVX1-NEXT: vmovdqa {{.*#+}} xmm5 = [1065353216,1065353216,1065353216,1065353216]
+; X32-AVX1-NEXT: vpaddd %xmm5, %xmm4, %xmm4
+; X32-AVX1-NEXT: vcvttps2dq %xmm4, %xmm4
+; X32-AVX1-NEXT: vpmovzxwd {{.*#+}} xmm2 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero
+; X32-AVX1-NEXT: vpslld $23, %xmm2, %xmm2
+; X32-AVX1-NEXT: vpaddd %xmm5, %xmm2, %xmm2
+; X32-AVX1-NEXT: vcvttps2dq %xmm2, %xmm2
+; X32-AVX1-NEXT: vpackusdw %xmm4, %xmm2, %xmm2
; X32-AVX1-NEXT: vextractf128 $1, %ymm0, %xmm4
-; X32-AVX1-NEXT: vpsllw $8, %xmm4, %xmm5
-; X32-AVX1-NEXT: vpblendvb %xmm2, %xmm5, %xmm4, %xmm2
-; X32-AVX1-NEXT: vpsllw $4, %xmm2, %xmm4
-; X32-AVX1-NEXT: vpblendvb %xmm3, %xmm4, %xmm2, %xmm2
-; X32-AVX1-NEXT: vpsllw $2, %xmm2, %xmm4
-; X32-AVX1-NEXT: vpaddw %xmm3, %xmm3, %xmm3
-; X32-AVX1-NEXT: vpblendvb %xmm3, %xmm4, %xmm2, %xmm2
-; X32-AVX1-NEXT: vpsllw $1, %xmm2, %xmm4
-; X32-AVX1-NEXT: vpaddw %xmm3, %xmm3, %xmm3
-; X32-AVX1-NEXT: vpblendvb %xmm3, %xmm4, %xmm2, %xmm2
-; X32-AVX1-NEXT: vpsllw $12, %xmm1, %xmm3
-; X32-AVX1-NEXT: vpsllw $4, %xmm1, %xmm1
-; X32-AVX1-NEXT: vpor %xmm3, %xmm1, %xmm1
-; X32-AVX1-NEXT: vpaddw %xmm1, %xmm1, %xmm3
-; X32-AVX1-NEXT: vpsllw $8, %xmm0, %xmm4
-; X32-AVX1-NEXT: vpblendvb %xmm1, %xmm4, %xmm0, %xmm0
-; X32-AVX1-NEXT: vpsllw $4, %xmm0, %xmm1
-; X32-AVX1-NEXT: vpblendvb %xmm3, %xmm1, %xmm0, %xmm0
-; X32-AVX1-NEXT: vpsllw $2, %xmm0, %xmm1
-; X32-AVX1-NEXT: vpaddw %xmm3, %xmm3, %xmm3
-; X32-AVX1-NEXT: vpblendvb %xmm3, %xmm1, %xmm0, %xmm0
-; X32-AVX1-NEXT: vpsllw $1, %xmm0, %xmm1
-; X32-AVX1-NEXT: vpaddw %xmm3, %xmm3, %xmm3
-; X32-AVX1-NEXT: vpblendvb %xmm3, %xmm1, %xmm0, %xmm0
+; X32-AVX1-NEXT: vpmullw %xmm2, %xmm4, %xmm2
+; X32-AVX1-NEXT: vpunpckhwd {{.*#+}} xmm3 = xmm1[4],xmm3[4],xmm1[5],xmm3[5],xmm1[6],xmm3[6],xmm1[7],xmm3[7]
+; X32-AVX1-NEXT: vpslld $23, %xmm3, %xmm3
+; X32-AVX1-NEXT: vpaddd %xmm5, %xmm3, %xmm3
+; X32-AVX1-NEXT: vcvttps2dq %xmm3, %xmm3
+; X32-AVX1-NEXT: vpmovzxwd {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero
+; X32-AVX1-NEXT: vpslld $23, %xmm1, %xmm1
+; X32-AVX1-NEXT: vpaddd %xmm5, %xmm1, %xmm1
+; X32-AVX1-NEXT: vcvttps2dq %xmm1, %xmm1
+; X32-AVX1-NEXT: vpackusdw %xmm3, %xmm1, %xmm1
+; X32-AVX1-NEXT: vpmullw %xmm1, %xmm0, %xmm0
; X32-AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
; X32-AVX1-NEXT: retl
;
More information about the llvm-commits
mailing list