[llvm] r241989 - [X86][SSE] Vectorized v4i32 non-uniform shifts.
Simon Pilgrim
llvm-dev at redking.me.uk
Sun Jul 12 04:15:20 PDT 2015
Author: rksimon
Date: Sun Jul 12 06:15:19 2015
New Revision: 241989
URL: http://llvm.org/viewvc/llvm-project?rev=241989&view=rev
Log:
[X86][SSE] Vectorized v4i32 non-uniform shifts.
While the v4i32 shl operation is already vectorized using a cvttps2dq/pmulld pattern, the lshr/ashr opeations are still scalarized.
This patch adds vectorization support for non-uniform v4i32 shift operations - it splats constant shift amounts to allow them to use the immediate sse shift instructions, or extracts/zero-extends non-constant shift amounts. The individual results are then blended together.
Differential Revision: http://reviews.llvm.org/D11063
Modified:
llvm/trunk/lib/Target/X86/X86ISelLowering.cpp
llvm/trunk/lib/Target/X86/X86TargetTransformInfo.cpp
llvm/trunk/test/Analysis/CostModel/X86/testshiftashr.ll
llvm/trunk/test/Analysis/CostModel/X86/testshiftlshr.ll
llvm/trunk/test/CodeGen/X86/vector-shift-ashr-128.ll
llvm/trunk/test/CodeGen/X86/vector-shift-ashr-256.ll
llvm/trunk/test/CodeGen/X86/vector-shift-lshr-128.ll
llvm/trunk/test/CodeGen/X86/vector-shift-lshr-256.ll
llvm/trunk/test/CodeGen/X86/widen_load-2.ll
Modified: llvm/trunk/lib/Target/X86/X86ISelLowering.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/X86/X86ISelLowering.cpp?rev=241989&r1=241988&r2=241989&view=diff
==============================================================================
--- llvm/trunk/lib/Target/X86/X86ISelLowering.cpp (original)
+++ llvm/trunk/lib/Target/X86/X86ISelLowering.cpp Sun Jul 12 06:15:19 2015
@@ -17417,6 +17417,53 @@ static SDValue LowerShift(SDValue Op, co
}
}
+ // v4i32 Non Uniform Shifts.
+ // If the shift amount is constant we can shift each lane using the SSE2
+ // immediate shifts, else we need to zero-extend each lane to the lower i64
+ // and shift using the SSE2 variable shifts.
+ // The separate results can then be blended together.
+ if (VT == MVT::v4i32) {
+ unsigned Opc = Op.getOpcode();
+ SDValue Amt0, Amt1, Amt2, Amt3;
+ if (ISD::isBuildVectorOfConstantSDNodes(Amt.getNode())) {
+ Amt0 = DAG.getVectorShuffle(VT, dl, Amt, DAG.getUNDEF(VT), {0, 0, 0, 0});
+ Amt1 = DAG.getVectorShuffle(VT, dl, Amt, DAG.getUNDEF(VT), {1, 1, 1, 1});
+ Amt2 = DAG.getVectorShuffle(VT, dl, Amt, DAG.getUNDEF(VT), {2, 2, 2, 2});
+ Amt3 = DAG.getVectorShuffle(VT, dl, Amt, DAG.getUNDEF(VT), {3, 3, 3, 3});
+ } else {
+ // ISD::SHL is handled above but we include it here for completeness.
+ switch (Opc) {
+ default:
+ llvm_unreachable("Unknown target vector shift node");
+ case ISD::SHL:
+ Opc = X86ISD::VSHL;
+ break;
+ case ISD::SRL:
+ Opc = X86ISD::VSRL;
+ break;
+ case ISD::SRA:
+ Opc = X86ISD::VSRA;
+ break;
+ }
+ // The SSE2 shifts use the lower i64 as the same shift amount for
+ // all lanes and the upper i64 is ignored. These shuffle masks
+ // optimally zero-extend each lanes on SSE2/SSE41/AVX targets.
+ SDValue Z = getZeroVector(VT, Subtarget, DAG, dl);
+ Amt0 = DAG.getVectorShuffle(VT, dl, Amt, Z, {0, 4, -1, -1});
+ Amt1 = DAG.getVectorShuffle(VT, dl, Amt, Z, {1, 5, -1, -1});
+ Amt2 = DAG.getVectorShuffle(VT, dl, Amt, Z, {2, 6, -1, -1});
+ Amt3 = DAG.getVectorShuffle(VT, dl, Amt, Z, {3, 7, -1, -1});
+ }
+
+ SDValue R0 = DAG.getNode(Opc, dl, VT, R, Amt0);
+ SDValue R1 = DAG.getNode(Opc, dl, VT, R, Amt1);
+ SDValue R2 = DAG.getNode(Opc, dl, VT, R, Amt2);
+ SDValue R3 = DAG.getNode(Opc, dl, VT, R, Amt3);
+ SDValue R02 = DAG.getVectorShuffle(VT, dl, R0, R2, {0, -1, 6, -1});
+ SDValue R13 = DAG.getVectorShuffle(VT, dl, R1, R3, {-1, 1, -1, 7});
+ return DAG.getVectorShuffle(VT, dl, R02, R13, {0, 5, 2, 7});
+ }
+
if (VT == MVT::v16i8 || (VT == MVT::v32i8 && Subtarget->hasInt256())) {
MVT ExtVT = MVT::getVectorVT(MVT::i16, VT.getVectorNumElements() / 2);
unsigned ShiftOpcode = Op->getOpcode();
Modified: llvm/trunk/lib/Target/X86/X86TargetTransformInfo.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/X86/X86TargetTransformInfo.cpp?rev=241989&r1=241988&r2=241989&view=diff
==============================================================================
--- llvm/trunk/lib/Target/X86/X86TargetTransformInfo.cpp (original)
+++ llvm/trunk/lib/Target/X86/X86TargetTransformInfo.cpp Sun Jul 12 06:15:19 2015
@@ -261,18 +261,18 @@ unsigned X86TTIImpl::getArithmeticInstrC
{ ISD::SHL, MVT::v4i32, 2*5 }, // We optimized this using mul.
{ ISD::SHL, MVT::v2i64, 2*10 }, // Scalarized.
{ ISD::SHL, MVT::v4i64, 4*10 }, // Scalarized.
-
- { ISD::SRL, MVT::v16i8, 26 }, // cmpgtb sequence.
- { ISD::SRL, MVT::v8i16, 32 }, // cmpgtb sequence.
- { ISD::SRL, MVT::v4i32, 4*10 }, // Scalarized.
- { ISD::SRL, MVT::v2i64, 2*10 }, // Scalarized.
-
- { ISD::SRA, MVT::v16i8, 54 }, // unpacked cmpgtb sequence.
- { ISD::SRA, MVT::v8i16, 32 }, // cmpgtb sequence.
- { ISD::SRA, MVT::v4i32, 4*10 }, // Scalarized.
- { ISD::SRA, MVT::v2i64, 2*10 }, // Scalarized.
-
- // It is not a good idea to vectorize division. We have to scalarize it and
+
+ { ISD::SRL, MVT::v16i8, 26 }, // cmpgtb sequence.
+ { ISD::SRL, MVT::v8i16, 32 }, // cmpgtb sequence.
+ { ISD::SRL, MVT::v4i32, 16 }, // Shift each lane + blend.
+ { ISD::SRL, MVT::v2i64, 2*10 }, // Scalarized.
+
+ { ISD::SRA, MVT::v16i8, 54 }, // unpacked cmpgtb sequence.
+ { ISD::SRA, MVT::v8i16, 32 }, // cmpgtb sequence.
+ { ISD::SRA, MVT::v4i32, 16 }, // Shift each lane + blend.
+ { ISD::SRA, MVT::v2i64, 2*10 }, // Scalarized.
+
+ // It is not a good idea to vectorize division. We have to scalarize it and
// in the process we will often end up having to spilling regular
// registers. The overhead of division is going to dominate most kernels
// anyways so try hard to prevent vectorization of division - it is
@@ -1117,17 +1117,17 @@ unsigned X86TTIImpl::getIntImmCost(Intri
}
return X86TTIImpl::getIntImmCost(Imm, Ty);
}
-
-bool X86TTIImpl::isLegalMaskedLoad(Type *DataTy, int Consecutive) {
- int DataWidth = DataTy->getPrimitiveSizeInBits();
-
- // Todo: AVX512 allows gather/scatter, works with strided and random as well
- if ((DataWidth < 32) || (Consecutive == 0))
- return false;
- if (ST->hasAVX512() || ST->hasAVX2())
- return true;
- return false;
-}
+
+bool X86TTIImpl::isLegalMaskedLoad(Type *DataTy, int Consecutive) {
+ int DataWidth = DataTy->getPrimitiveSizeInBits();
+
+ // Todo: AVX512 allows gather/scatter, works with strided and random as well
+ if ((DataWidth < 32) || (Consecutive == 0))
+ return false;
+ if (ST->hasAVX512() || ST->hasAVX2())
+ return true;
+ return false;
+}
bool X86TTIImpl::isLegalMaskedStore(Type *DataType, int Consecutive) {
return isLegalMaskedLoad(DataType, Consecutive);
Modified: llvm/trunk/test/Analysis/CostModel/X86/testshiftashr.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Analysis/CostModel/X86/testshiftashr.ll?rev=241989&r1=241988&r2=241989&view=diff
==============================================================================
--- llvm/trunk/test/Analysis/CostModel/X86/testshiftashr.ll (original)
+++ llvm/trunk/test/Analysis/CostModel/X86/testshiftashr.ll Sun Jul 12 06:15:19 2015
@@ -17,9 +17,9 @@ entry:
define %shifttype4i16 @shift4i16(%shifttype4i16 %a, %shifttype4i16 %b) {
entry:
; SSE2: shift4i16
- ; SSE2: cost of 40 {{.*}} ashr
+ ; SSE2: cost of 16 {{.*}} ashr
; SSE2-CODEGEN: shift4i16
- ; SSE2-CODEGEN: sarl %cl
+ ; SSE2-CODEGEN: psrad
%0 = ashr %shifttype4i16 %a , %b
ret %shifttype4i16 %0
@@ -77,9 +77,9 @@ entry:
define %shifttype4i32 @shift4i32(%shifttype4i32 %a, %shifttype4i32 %b) {
entry:
; SSE2: shift4i32
- ; SSE2: cost of 40 {{.*}} ashr
+ ; SSE2: cost of 16 {{.*}} ashr
; SSE2-CODEGEN: shift4i32
- ; SSE2-CODEGEN: sarl %cl
+ ; SSE2-CODEGEN: psrad
%0 = ashr %shifttype4i32 %a , %b
ret %shifttype4i32 %0
@@ -89,9 +89,9 @@ entry:
define %shifttype8i32 @shift8i32(%shifttype8i32 %a, %shifttype8i32 %b) {
entry:
; SSE2: shift8i32
- ; SSE2: cost of 80 {{.*}} ashr
+ ; SSE2: cost of 32 {{.*}} ashr
; SSE2-CODEGEN: shift8i32
- ; SSE2-CODEGEN: sarl %cl
+ ; SSE2-CODEGEN: psrad
%0 = ashr %shifttype8i32 %a , %b
ret %shifttype8i32 %0
@@ -101,9 +101,9 @@ entry:
define %shifttype16i32 @shift16i32(%shifttype16i32 %a, %shifttype16i32 %b) {
entry:
; SSE2: shift16i32
- ; SSE2: cost of 160 {{.*}} ashr
+ ; SSE2: cost of 64 {{.*}} ashr
; SSE2-CODEGEN: shift16i32
- ; SSE2-CODEGEN: sarl %cl
+ ; SSE2-CODEGEN: psrad
%0 = ashr %shifttype16i32 %a , %b
ret %shifttype16i32 %0
@@ -113,9 +113,9 @@ entry:
define %shifttype32i32 @shift32i32(%shifttype32i32 %a, %shifttype32i32 %b) {
entry:
; SSE2: shift32i32
- ; SSE2: cost of 320 {{.*}} ashr
+ ; SSE2: cost of 128 {{.*}} ashr
; SSE2-CODEGEN: shift32i32
- ; SSE2-CODEGEN: sarl %cl
+ ; SSE2-CODEGEN: psrad
%0 = ashr %shifttype32i32 %a , %b
ret %shifttype32i32 %0
@@ -197,9 +197,9 @@ entry:
define %shifttype4i8 @shift4i8(%shifttype4i8 %a, %shifttype4i8 %b) {
entry:
; SSE2: shift4i8
- ; SSE2: cost of 40 {{.*}} ashr
+ ; SSE2: cost of 16 {{.*}} ashr
; SSE2-CODEGEN: shift4i8
- ; SSE2-CODEGEN: sarl %cl
+ ; SSE2-CODEGEN: psrad
%0 = ashr %shifttype4i8 %a , %b
ret %shifttype4i8 %0
Modified: llvm/trunk/test/Analysis/CostModel/X86/testshiftlshr.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Analysis/CostModel/X86/testshiftlshr.ll?rev=241989&r1=241988&r2=241989&view=diff
==============================================================================
--- llvm/trunk/test/Analysis/CostModel/X86/testshiftlshr.ll (original)
+++ llvm/trunk/test/Analysis/CostModel/X86/testshiftlshr.ll Sun Jul 12 06:15:19 2015
@@ -17,9 +17,9 @@ entry:
define %shifttype4i16 @shift4i16(%shifttype4i16 %a, %shifttype4i16 %b) {
entry:
; SSE2: shift4i16
- ; SSE2: cost of 40 {{.*}} lshr
+ ; SSE2: cost of 16 {{.*}} lshr
; SSE2-CODEGEN: shift4i16
- ; SSE2-CODEGEN: shrl %cl
+ ; SSE2-CODEGEN: psrld
%0 = lshr %shifttype4i16 %a , %b
ret %shifttype4i16 %0
@@ -77,9 +77,9 @@ entry:
define %shifttype4i32 @shift4i32(%shifttype4i32 %a, %shifttype4i32 %b) {
entry:
; SSE2: shift4i32
- ; SSE2: cost of 40 {{.*}} lshr
+ ; SSE2: cost of 16 {{.*}} lshr
; SSE2-CODEGEN: shift4i32
- ; SSE2-CODEGEN: shrl %cl
+ ; SSE2-CODEGEN: psrld
%0 = lshr %shifttype4i32 %a , %b
ret %shifttype4i32 %0
@@ -89,9 +89,9 @@ entry:
define %shifttype8i32 @shift8i32(%shifttype8i32 %a, %shifttype8i32 %b) {
entry:
; SSE2: shift8i32
- ; SSE2: cost of 80 {{.*}} lshr
+ ; SSE2: cost of 32 {{.*}} lshr
; SSE2-CODEGEN: shift8i32
- ; SSE2-CODEGEN: shrl %cl
+ ; SSE2-CODEGEN: psrld
%0 = lshr %shifttype8i32 %a , %b
ret %shifttype8i32 %0
@@ -101,9 +101,9 @@ entry:
define %shifttype16i32 @shift16i32(%shifttype16i32 %a, %shifttype16i32 %b) {
entry:
; SSE2: shift16i32
- ; SSE2: cost of 160 {{.*}} lshr
+ ; SSE2: cost of 64 {{.*}} lshr
; SSE2-CODEGEN: shift16i32
- ; SSE2-CODEGEN: shrl %cl
+ ; SSE2-CODEGEN: psrld
%0 = lshr %shifttype16i32 %a , %b
ret %shifttype16i32 %0
@@ -113,9 +113,9 @@ entry:
define %shifttype32i32 @shift32i32(%shifttype32i32 %a, %shifttype32i32 %b) {
entry:
; SSE2: shift32i32
- ; SSE2: cost of 320 {{.*}} lshr
+ ; SSE2: cost of 128 {{.*}} lshr
; SSE2-CODEGEN: shift32i32
- ; SSE2-CODEGEN: shrl %cl
+ ; SSE2-CODEGEN: psrld
%0 = lshr %shifttype32i32 %a , %b
ret %shifttype32i32 %0
@@ -197,9 +197,9 @@ entry:
define %shifttype4i8 @shift4i8(%shifttype4i8 %a, %shifttype4i8 %b) {
entry:
; SSE2: shift4i8
- ; SSE2: cost of 40 {{.*}} lshr
+ ; SSE2: cost of 16 {{.*}} lshr
; SSE2-CODEGEN: shift4i8
- ; SSE2-CODEGEN: shrl %cl
+ ; SSE2-CODEGEN: psrld
%0 = lshr %shifttype4i8 %a , %b
ret %shifttype4i8 %0
Modified: llvm/trunk/test/CodeGen/X86/vector-shift-ashr-128.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/vector-shift-ashr-128.ll?rev=241989&r1=241988&r2=241989&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/vector-shift-ashr-128.ll (original)
+++ llvm/trunk/test/CodeGen/X86/vector-shift-ashr-128.ll Sun Jul 12 06:15:19 2015
@@ -56,73 +56,63 @@ define <2 x i64> @var_shift_v2i64(<2 x i
define <4 x i32> @var_shift_v4i32(<4 x i32> %a, <4 x i32> %b) {
; SSE2-LABEL: var_shift_v4i32:
; SSE2: # BB#0:
-; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm0[3,1,2,3]
-; SSE2-NEXT: movd %xmm2, %eax
-; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm1[3,1,2,3]
-; SSE2-NEXT: movd %xmm2, %ecx
-; SSE2-NEXT: sarl %cl, %eax
-; SSE2-NEXT: movd %eax, %xmm2
-; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm0[1,1,2,3]
-; SSE2-NEXT: movd %xmm3, %eax
-; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm1[1,1,2,3]
-; SSE2-NEXT: movd %xmm3, %ecx
-; SSE2-NEXT: sarl %cl, %eax
-; SSE2-NEXT: movd %eax, %xmm3
-; SSE2-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1]
-; SSE2-NEXT: movd %xmm0, %eax
-; SSE2-NEXT: movd %xmm1, %ecx
-; SSE2-NEXT: sarl %cl, %eax
-; SSE2-NEXT: movd %eax, %xmm2
-; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
-; SSE2-NEXT: movd %xmm0, %eax
-; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm1[2,3,0,1]
-; SSE2-NEXT: movd %xmm0, %ecx
-; SSE2-NEXT: sarl %cl, %eax
-; SSE2-NEXT: movd %eax, %xmm0
-; SSE2-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1]
-; SSE2-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1]
-; SSE2-NEXT: movdqa %xmm2, %xmm0
+; SSE2-NEXT: movdqa %xmm1, %xmm2
+; SSE2-NEXT: psrldq {{.*#+}} xmm2 = xmm2[12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; SSE2-NEXT: movdqa %xmm0, %xmm3
+; SSE2-NEXT: psrad %xmm2, %xmm3
+; SSE2-NEXT: movdqa %xmm1, %xmm2
+; SSE2-NEXT: psrlq $32, %xmm2
+; SSE2-NEXT: movdqa %xmm0, %xmm4
+; SSE2-NEXT: psrad %xmm2, %xmm4
+; SSE2-NEXT: movsd {{.*#+}} xmm3 = xmm4[0],xmm3[1]
+; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm3[1,3,2,3]
+; SSE2-NEXT: pxor %xmm3, %xmm3
+; SSE2-NEXT: movdqa %xmm1, %xmm4
+; SSE2-NEXT: punpckhdq {{.*#+}} xmm4 = xmm4[2],xmm3[2],xmm4[3],xmm3[3]
+; SSE2-NEXT: movdqa %xmm0, %xmm5
+; SSE2-NEXT: psrad %xmm4, %xmm5
+; SSE2-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm3[0],xmm1[1],xmm3[1]
+; SSE2-NEXT: psrad %xmm1, %xmm0
+; SSE2-NEXT: movsd {{.*#+}} xmm5 = xmm0[0],xmm5[1]
+; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm5[0,2,2,3]
+; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
; SSE2-NEXT: retq
;
; SSE41-LABEL: var_shift_v4i32:
; SSE41: # BB#0:
-; SSE41-NEXT: pextrd $1, %xmm0, %eax
-; SSE41-NEXT: pextrd $1, %xmm1, %ecx
-; SSE41-NEXT: sarl %cl, %eax
-; SSE41-NEXT: movd %xmm0, %edx
-; SSE41-NEXT: movd %xmm1, %ecx
-; SSE41-NEXT: sarl %cl, %edx
-; SSE41-NEXT: movd %edx, %xmm2
-; SSE41-NEXT: pinsrd $1, %eax, %xmm2
-; SSE41-NEXT: pextrd $2, %xmm0, %eax
-; SSE41-NEXT: pextrd $2, %xmm1, %ecx
-; SSE41-NEXT: sarl %cl, %eax
-; SSE41-NEXT: pinsrd $2, %eax, %xmm2
-; SSE41-NEXT: pextrd $3, %xmm0, %eax
-; SSE41-NEXT: pextrd $3, %xmm1, %ecx
-; SSE41-NEXT: sarl %cl, %eax
-; SSE41-NEXT: pinsrd $3, %eax, %xmm2
-; SSE41-NEXT: movdqa %xmm2, %xmm0
+; SSE41-NEXT: movdqa %xmm1, %xmm2
+; SSE41-NEXT: psrldq {{.*#+}} xmm2 = xmm2[12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; SSE41-NEXT: movdqa %xmm0, %xmm3
+; SSE41-NEXT: psrad %xmm2, %xmm3
+; SSE41-NEXT: movdqa %xmm1, %xmm2
+; SSE41-NEXT: psrlq $32, %xmm2
+; SSE41-NEXT: movdqa %xmm0, %xmm4
+; SSE41-NEXT: psrad %xmm2, %xmm4
+; SSE41-NEXT: pblendw {{.*#+}} xmm4 = xmm4[0,1,2,3],xmm3[4,5,6,7]
+; SSE41-NEXT: pxor %xmm2, %xmm2
+; SSE41-NEXT: pmovzxdq {{.*#+}} xmm3 = xmm1[0],zero,xmm1[1],zero
+; SSE41-NEXT: punpckhdq {{.*#+}} xmm1 = xmm1[2],xmm2[2],xmm1[3],xmm2[3]
+; SSE41-NEXT: movdqa %xmm0, %xmm2
+; SSE41-NEXT: psrad %xmm1, %xmm2
+; SSE41-NEXT: psrad %xmm3, %xmm0
+; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm2[4,5,6,7]
+; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1],xmm4[2,3],xmm0[4,5],xmm4[6,7]
; SSE41-NEXT: retq
;
; AVX1-LABEL: var_shift_v4i32:
; AVX1: # BB#0:
-; AVX1-NEXT: vpextrd $1, %xmm0, %eax
-; AVX1-NEXT: vpextrd $1, %xmm1, %ecx
-; AVX1-NEXT: sarl %cl, %eax
-; AVX1-NEXT: vmovd %xmm0, %edx
-; AVX1-NEXT: vmovd %xmm1, %ecx
-; AVX1-NEXT: sarl %cl, %edx
-; AVX1-NEXT: vmovd %edx, %xmm2
-; AVX1-NEXT: vpinsrd $1, %eax, %xmm2, %xmm2
-; AVX1-NEXT: vpextrd $2, %xmm0, %eax
-; AVX1-NEXT: vpextrd $2, %xmm1, %ecx
-; AVX1-NEXT: sarl %cl, %eax
-; AVX1-NEXT: vpinsrd $2, %eax, %xmm2, %xmm2
-; AVX1-NEXT: vpextrd $3, %xmm0, %eax
-; AVX1-NEXT: vpextrd $3, %xmm1, %ecx
-; AVX1-NEXT: sarl %cl, %eax
-; AVX1-NEXT: vpinsrd $3, %eax, %xmm2, %xmm0
+; AVX1-NEXT: vpsrldq {{.*#+}} xmm2 = xmm1[12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; AVX1-NEXT: vpsrad %xmm2, %xmm0, %xmm2
+; AVX1-NEXT: vpsrlq $32, %xmm1, %xmm3
+; AVX1-NEXT: vpsrad %xmm3, %xmm0, %xmm3
+; AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm3[0,1,2,3],xmm2[4,5,6,7]
+; AVX1-NEXT: vpxor %xmm3, %xmm3, %xmm3
+; AVX1-NEXT: vpunpckhdq {{.*#+}} xmm3 = xmm1[2],xmm3[2],xmm1[3],xmm3[3]
+; AVX1-NEXT: vpsrad %xmm3, %xmm0, %xmm3
+; AVX1-NEXT: vpmovzxdq {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero
+; AVX1-NEXT: vpsrad %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm3[4,5,6,7]
+; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm2[2,3],xmm0[4,5],xmm2[6,7]
; AVX1-NEXT: retq
;
; AVX2-LABEL: var_shift_v4i32:
@@ -687,58 +677,43 @@ define <2 x i64> @constant_shift_v2i64(<
define <4 x i32> @constant_shift_v4i32(<4 x i32> %a) {
; SSE2-LABEL: constant_shift_v4i32:
; SSE2: # BB#0:
-; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[3,1,2,3]
-; SSE2-NEXT: movd %xmm1, %eax
-; SSE2-NEXT: sarl $7, %eax
-; SSE2-NEXT: movd %eax, %xmm1
-; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,1,2,3]
-; SSE2-NEXT: movd %xmm2, %eax
-; SSE2-NEXT: sarl $5, %eax
-; SSE2-NEXT: movd %eax, %xmm2
-; SSE2-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1]
-; SSE2-NEXT: movd %xmm0, %eax
-; SSE2-NEXT: sarl $4, %eax
-; SSE2-NEXT: movd %eax, %xmm1
-; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
-; SSE2-NEXT: movd %xmm0, %eax
-; SSE2-NEXT: sarl $6, %eax
-; SSE2-NEXT: movd %eax, %xmm0
-; SSE2-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
-; SSE2-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1]
-; SSE2-NEXT: movdqa %xmm1, %xmm0
+; SSE2-NEXT: movdqa %xmm0, %xmm1
+; SSE2-NEXT: psrad $7, %xmm1
+; SSE2-NEXT: movdqa %xmm0, %xmm2
+; SSE2-NEXT: psrad $5, %xmm2
+; SSE2-NEXT: movsd {{.*#+}} xmm1 = xmm2[0],xmm1[1]
+; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,3,2,3]
+; SSE2-NEXT: movdqa %xmm0, %xmm2
+; SSE2-NEXT: psrad $6, %xmm2
+; SSE2-NEXT: psrad $4, %xmm0
+; SSE2-NEXT: movsd {{.*#+}} xmm2 = xmm0[0],xmm2[1]
+; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm2[0,2,2,3]
+; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
; SSE2-NEXT: retq
;
; SSE41-LABEL: constant_shift_v4i32:
; SSE41: # BB#0:
-; SSE41-NEXT: pextrd $1, %xmm0, %eax
-; SSE41-NEXT: sarl $5, %eax
-; SSE41-NEXT: movd %xmm0, %ecx
-; SSE41-NEXT: sarl $4, %ecx
-; SSE41-NEXT: movd %ecx, %xmm1
-; SSE41-NEXT: pinsrd $1, %eax, %xmm1
-; SSE41-NEXT: pextrd $2, %xmm0, %eax
-; SSE41-NEXT: sarl $6, %eax
-; SSE41-NEXT: pinsrd $2, %eax, %xmm1
-; SSE41-NEXT: pextrd $3, %xmm0, %eax
-; SSE41-NEXT: sarl $7, %eax
-; SSE41-NEXT: pinsrd $3, %eax, %xmm1
-; SSE41-NEXT: movdqa %xmm1, %xmm0
+; SSE41-NEXT: movdqa %xmm0, %xmm1
+; SSE41-NEXT: psrad $7, %xmm1
+; SSE41-NEXT: movdqa %xmm0, %xmm2
+; SSE41-NEXT: psrad $5, %xmm2
+; SSE41-NEXT: pblendw {{.*#+}} xmm2 = xmm2[0,1,2,3],xmm1[4,5,6,7]
+; SSE41-NEXT: movdqa %xmm0, %xmm1
+; SSE41-NEXT: psrad $6, %xmm1
+; SSE41-NEXT: psrad $4, %xmm0
+; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm1[4,5,6,7]
+; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1],xmm2[2,3],xmm0[4,5],xmm2[6,7]
; SSE41-NEXT: retq
;
; AVX1-LABEL: constant_shift_v4i32:
; AVX1: # BB#0:
-; AVX1-NEXT: vpextrd $1, %xmm0, %eax
-; AVX1-NEXT: sarl $5, %eax
-; AVX1-NEXT: vmovd %xmm0, %ecx
-; AVX1-NEXT: sarl $4, %ecx
-; AVX1-NEXT: vmovd %ecx, %xmm1
-; AVX1-NEXT: vpinsrd $1, %eax, %xmm1, %xmm1
-; AVX1-NEXT: vpextrd $2, %xmm0, %eax
-; AVX1-NEXT: sarl $6, %eax
-; AVX1-NEXT: vpinsrd $2, %eax, %xmm1, %xmm1
-; AVX1-NEXT: vpextrd $3, %xmm0, %eax
-; AVX1-NEXT: sarl $7, %eax
-; AVX1-NEXT: vpinsrd $3, %eax, %xmm1, %xmm0
+; AVX1-NEXT: vpsrad $7, %xmm0, %xmm1
+; AVX1-NEXT: vpsrad $5, %xmm0, %xmm2
+; AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm2[0,1,2,3],xmm1[4,5,6,7]
+; AVX1-NEXT: vpsrad $6, %xmm0, %xmm2
+; AVX1-NEXT: vpsrad $4, %xmm0, %xmm0
+; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm2[4,5,6,7]
+; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3],xmm0[4,5],xmm1[6,7]
; AVX1-NEXT: retq
;
; AVX2-LABEL: constant_shift_v4i32:
Modified: llvm/trunk/test/CodeGen/X86/vector-shift-ashr-256.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/vector-shift-ashr-256.ll?rev=241989&r1=241988&r2=241989&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/vector-shift-ashr-256.ll (original)
+++ llvm/trunk/test/CodeGen/X86/vector-shift-ashr-256.ll Sun Jul 12 06:15:19 2015
@@ -63,39 +63,30 @@ define <8 x i32> @var_shift_v8i32(<8 x i
; AVX1-LABEL: var_shift_v8i32:
; AVX1: # BB#0:
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
-; AVX1-NEXT: vpextrd $1, %xmm2, %eax
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm3
-; AVX1-NEXT: vpextrd $1, %xmm3, %ecx
-; AVX1-NEXT: sarl %cl, %eax
-; AVX1-NEXT: vmovd %xmm2, %edx
-; AVX1-NEXT: vmovd %xmm3, %ecx
-; AVX1-NEXT: sarl %cl, %edx
-; AVX1-NEXT: vmovd %edx, %xmm4
-; AVX1-NEXT: vpinsrd $1, %eax, %xmm4, %xmm4
-; AVX1-NEXT: vpextrd $2, %xmm2, %eax
-; AVX1-NEXT: vpextrd $2, %xmm3, %ecx
-; AVX1-NEXT: sarl %cl, %eax
-; AVX1-NEXT: vpinsrd $2, %eax, %xmm4, %xmm4
-; AVX1-NEXT: vpextrd $3, %xmm2, %eax
-; AVX1-NEXT: vpextrd $3, %xmm3, %ecx
-; AVX1-NEXT: sarl %cl, %eax
-; AVX1-NEXT: vpinsrd $3, %eax, %xmm4, %xmm2
-; AVX1-NEXT: vpextrd $1, %xmm0, %eax
-; AVX1-NEXT: vpextrd $1, %xmm1, %ecx
-; AVX1-NEXT: sarl %cl, %eax
-; AVX1-NEXT: vmovd %xmm0, %edx
-; AVX1-NEXT: vmovd %xmm1, %ecx
-; AVX1-NEXT: sarl %cl, %edx
-; AVX1-NEXT: vmovd %edx, %xmm3
-; AVX1-NEXT: vpinsrd $1, %eax, %xmm3, %xmm3
-; AVX1-NEXT: vpextrd $2, %xmm0, %eax
-; AVX1-NEXT: vpextrd $2, %xmm1, %ecx
-; AVX1-NEXT: sarl %cl, %eax
-; AVX1-NEXT: vpinsrd $2, %eax, %xmm3, %xmm3
-; AVX1-NEXT: vpextrd $3, %xmm0, %eax
-; AVX1-NEXT: vpextrd $3, %xmm1, %ecx
-; AVX1-NEXT: sarl %cl, %eax
-; AVX1-NEXT: vpinsrd $3, %eax, %xmm3, %xmm0
+; AVX1-NEXT: vpsrldq {{.*#+}} xmm4 = xmm3[12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; AVX1-NEXT: vpsrad %xmm4, %xmm2, %xmm4
+; AVX1-NEXT: vpsrlq $32, %xmm3, %xmm5
+; AVX1-NEXT: vpsrad %xmm5, %xmm2, %xmm5
+; AVX1-NEXT: vpblendw {{.*#+}} xmm4 = xmm5[0,1,2,3],xmm4[4,5,6,7]
+; AVX1-NEXT: vpxor %xmm5, %xmm5, %xmm5
+; AVX1-NEXT: vpunpckhdq {{.*#+}} xmm6 = xmm3[2],xmm5[2],xmm3[3],xmm5[3]
+; AVX1-NEXT: vpsrad %xmm6, %xmm2, %xmm6
+; AVX1-NEXT: vpmovzxdq {{.*#+}} xmm3 = xmm3[0],zero,xmm3[1],zero
+; AVX1-NEXT: vpsrad %xmm3, %xmm2, %xmm2
+; AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0,1,2,3],xmm6[4,5,6,7]
+; AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0,1],xmm4[2,3],xmm2[4,5],xmm4[6,7]
+; AVX1-NEXT: vpsrldq {{.*#+}} xmm3 = xmm1[12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; AVX1-NEXT: vpsrad %xmm3, %xmm0, %xmm3
+; AVX1-NEXT: vpsrlq $32, %xmm1, %xmm4
+; AVX1-NEXT: vpsrad %xmm4, %xmm0, %xmm4
+; AVX1-NEXT: vpblendw {{.*#+}} xmm3 = xmm4[0,1,2,3],xmm3[4,5,6,7]
+; AVX1-NEXT: vpunpckhdq {{.*#+}} xmm4 = xmm1[2],xmm5[2],xmm1[3],xmm5[3]
+; AVX1-NEXT: vpsrad %xmm4, %xmm0, %xmm4
+; AVX1-NEXT: vpmovzxdq {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero
+; AVX1-NEXT: vpsrad %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm4[4,5,6,7]
+; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm3[2,3],xmm0[4,5],xmm3[6,7]
; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
; AVX1-NEXT: retq
;
@@ -489,32 +480,20 @@ define <4 x i64> @constant_shift_v4i64(<
define <8 x i32> @constant_shift_v8i32(<8 x i32> %a) {
; AVX1-LABEL: constant_shift_v8i32:
; AVX1: # BB#0:
-; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
-; AVX1-NEXT: vpextrd $1, %xmm1, %eax
-; AVX1-NEXT: sarl $9, %eax
-; AVX1-NEXT: vmovd %xmm1, %ecx
-; AVX1-NEXT: sarl $8, %ecx
-; AVX1-NEXT: vmovd %ecx, %xmm2
-; AVX1-NEXT: vpinsrd $1, %eax, %xmm2, %xmm2
-; AVX1-NEXT: vpextrd $2, %xmm1, %eax
-; AVX1-NEXT: sarl $8, %eax
-; AVX1-NEXT: vpinsrd $2, %eax, %xmm2, %xmm2
-; AVX1-NEXT: vpextrd $3, %xmm1, %eax
-; AVX1-NEXT: sarl $7, %eax
-; AVX1-NEXT: vpinsrd $3, %eax, %xmm2, %xmm1
-; AVX1-NEXT: vpextrd $1, %xmm0, %eax
-; AVX1-NEXT: sarl $5, %eax
-; AVX1-NEXT: vmovd %xmm0, %ecx
-; AVX1-NEXT: sarl $4, %ecx
-; AVX1-NEXT: vmovd %ecx, %xmm2
-; AVX1-NEXT: vpinsrd $1, %eax, %xmm2, %xmm2
-; AVX1-NEXT: vpextrd $2, %xmm0, %eax
-; AVX1-NEXT: sarl $6, %eax
-; AVX1-NEXT: vpinsrd $2, %eax, %xmm2, %xmm2
-; AVX1-NEXT: vpextrd $3, %xmm0, %eax
-; AVX1-NEXT: sarl $7, %eax
-; AVX1-NEXT: vpinsrd $3, %eax, %xmm2, %xmm0
-; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; AVX1-NEXT: vpsrad $7, %xmm0, %xmm1
+; AVX1-NEXT: vpsrad $5, %xmm0, %xmm2
+; AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm2[0,1,2,3],xmm1[4,5,6,7]
+; AVX1-NEXT: vpsrad $6, %xmm0, %xmm2
+; AVX1-NEXT: vpsrad $4, %xmm0, %xmm3
+; AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm3[0,1,2,3],xmm2[4,5,6,7]
+; AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm2[0,1],xmm1[2,3],xmm2[4,5],xmm1[6,7]
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
+; AVX1-NEXT: vpsrad $7, %xmm0, %xmm2
+; AVX1-NEXT: vpsrad $9, %xmm0, %xmm3
+; AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm3[0,1,2,3],xmm2[4,5,6,7]
+; AVX1-NEXT: vpsrad $8, %xmm0, %xmm0
+; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm2[2,3],xmm0[4,5],xmm2[6,7]
+; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
; AVX1-NEXT: retq
;
; AVX2-LABEL: constant_shift_v8i32:
Modified: llvm/trunk/test/CodeGen/X86/vector-shift-lshr-128.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/vector-shift-lshr-128.ll?rev=241989&r1=241988&r2=241989&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/vector-shift-lshr-128.ll (original)
+++ llvm/trunk/test/CodeGen/X86/vector-shift-lshr-128.ll Sun Jul 12 06:15:19 2015
@@ -46,73 +46,63 @@ define <2 x i64> @var_shift_v2i64(<2 x i
define <4 x i32> @var_shift_v4i32(<4 x i32> %a, <4 x i32> %b) {
; SSE2-LABEL: var_shift_v4i32:
; SSE2: # BB#0:
-; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm0[3,1,2,3]
-; SSE2-NEXT: movd %xmm2, %eax
-; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm1[3,1,2,3]
-; SSE2-NEXT: movd %xmm2, %ecx
-; SSE2-NEXT: shrl %cl, %eax
-; SSE2-NEXT: movd %eax, %xmm2
-; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm0[1,1,2,3]
-; SSE2-NEXT: movd %xmm3, %eax
-; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm1[1,1,2,3]
-; SSE2-NEXT: movd %xmm3, %ecx
-; SSE2-NEXT: shrl %cl, %eax
-; SSE2-NEXT: movd %eax, %xmm3
-; SSE2-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1]
-; SSE2-NEXT: movd %xmm0, %eax
-; SSE2-NEXT: movd %xmm1, %ecx
-; SSE2-NEXT: shrl %cl, %eax
-; SSE2-NEXT: movd %eax, %xmm2
-; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
-; SSE2-NEXT: movd %xmm0, %eax
-; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm1[2,3,0,1]
-; SSE2-NEXT: movd %xmm0, %ecx
-; SSE2-NEXT: shrl %cl, %eax
-; SSE2-NEXT: movd %eax, %xmm0
-; SSE2-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1]
-; SSE2-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1]
-; SSE2-NEXT: movdqa %xmm2, %xmm0
+; SSE2-NEXT: movdqa %xmm1, %xmm2
+; SSE2-NEXT: psrldq {{.*#+}} xmm2 = xmm2[12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; SSE2-NEXT: movdqa %xmm0, %xmm3
+; SSE2-NEXT: psrld %xmm2, %xmm3
+; SSE2-NEXT: movdqa %xmm1, %xmm2
+; SSE2-NEXT: psrlq $32, %xmm2
+; SSE2-NEXT: movdqa %xmm0, %xmm4
+; SSE2-NEXT: psrld %xmm2, %xmm4
+; SSE2-NEXT: movsd {{.*#+}} xmm3 = xmm4[0],xmm3[1]
+; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm3[1,3,2,3]
+; SSE2-NEXT: pxor %xmm3, %xmm3
+; SSE2-NEXT: movdqa %xmm1, %xmm4
+; SSE2-NEXT: punpckhdq {{.*#+}} xmm4 = xmm4[2],xmm3[2],xmm4[3],xmm3[3]
+; SSE2-NEXT: movdqa %xmm0, %xmm5
+; SSE2-NEXT: psrld %xmm4, %xmm5
+; SSE2-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm3[0],xmm1[1],xmm3[1]
+; SSE2-NEXT: psrld %xmm1, %xmm0
+; SSE2-NEXT: movsd {{.*#+}} xmm5 = xmm0[0],xmm5[1]
+; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm5[0,2,2,3]
+; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
; SSE2-NEXT: retq
;
; SSE41-LABEL: var_shift_v4i32:
; SSE41: # BB#0:
-; SSE41-NEXT: pextrd $1, %xmm0, %eax
-; SSE41-NEXT: pextrd $1, %xmm1, %ecx
-; SSE41-NEXT: shrl %cl, %eax
-; SSE41-NEXT: movd %xmm0, %edx
-; SSE41-NEXT: movd %xmm1, %ecx
-; SSE41-NEXT: shrl %cl, %edx
-; SSE41-NEXT: movd %edx, %xmm2
-; SSE41-NEXT: pinsrd $1, %eax, %xmm2
-; SSE41-NEXT: pextrd $2, %xmm0, %eax
-; SSE41-NEXT: pextrd $2, %xmm1, %ecx
-; SSE41-NEXT: shrl %cl, %eax
-; SSE41-NEXT: pinsrd $2, %eax, %xmm2
-; SSE41-NEXT: pextrd $3, %xmm0, %eax
-; SSE41-NEXT: pextrd $3, %xmm1, %ecx
-; SSE41-NEXT: shrl %cl, %eax
-; SSE41-NEXT: pinsrd $3, %eax, %xmm2
-; SSE41-NEXT: movdqa %xmm2, %xmm0
+; SSE41-NEXT: movdqa %xmm1, %xmm2
+; SSE41-NEXT: psrldq {{.*#+}} xmm2 = xmm2[12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; SSE41-NEXT: movdqa %xmm0, %xmm3
+; SSE41-NEXT: psrld %xmm2, %xmm3
+; SSE41-NEXT: movdqa %xmm1, %xmm2
+; SSE41-NEXT: psrlq $32, %xmm2
+; SSE41-NEXT: movdqa %xmm0, %xmm4
+; SSE41-NEXT: psrld %xmm2, %xmm4
+; SSE41-NEXT: pblendw {{.*#+}} xmm4 = xmm4[0,1,2,3],xmm3[4,5,6,7]
+; SSE41-NEXT: pxor %xmm2, %xmm2
+; SSE41-NEXT: pmovzxdq {{.*#+}} xmm3 = xmm1[0],zero,xmm1[1],zero
+; SSE41-NEXT: punpckhdq {{.*#+}} xmm1 = xmm1[2],xmm2[2],xmm1[3],xmm2[3]
+; SSE41-NEXT: movdqa %xmm0, %xmm2
+; SSE41-NEXT: psrld %xmm1, %xmm2
+; SSE41-NEXT: psrld %xmm3, %xmm0
+; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm2[4,5,6,7]
+; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1],xmm4[2,3],xmm0[4,5],xmm4[6,7]
; SSE41-NEXT: retq
;
; AVX1-LABEL: var_shift_v4i32:
; AVX1: # BB#0:
-; AVX1-NEXT: vpextrd $1, %xmm0, %eax
-; AVX1-NEXT: vpextrd $1, %xmm1, %ecx
-; AVX1-NEXT: shrl %cl, %eax
-; AVX1-NEXT: vmovd %xmm0, %edx
-; AVX1-NEXT: vmovd %xmm1, %ecx
-; AVX1-NEXT: shrl %cl, %edx
-; AVX1-NEXT: vmovd %edx, %xmm2
-; AVX1-NEXT: vpinsrd $1, %eax, %xmm2, %xmm2
-; AVX1-NEXT: vpextrd $2, %xmm0, %eax
-; AVX1-NEXT: vpextrd $2, %xmm1, %ecx
-; AVX1-NEXT: shrl %cl, %eax
-; AVX1-NEXT: vpinsrd $2, %eax, %xmm2, %xmm2
-; AVX1-NEXT: vpextrd $3, %xmm0, %eax
-; AVX1-NEXT: vpextrd $3, %xmm1, %ecx
-; AVX1-NEXT: shrl %cl, %eax
-; AVX1-NEXT: vpinsrd $3, %eax, %xmm2, %xmm0
+; AVX1-NEXT: vpsrldq {{.*#+}} xmm2 = xmm1[12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; AVX1-NEXT: vpsrld %xmm2, %xmm0, %xmm2
+; AVX1-NEXT: vpsrlq $32, %xmm1, %xmm3
+; AVX1-NEXT: vpsrld %xmm3, %xmm0, %xmm3
+; AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm3[0,1,2,3],xmm2[4,5,6,7]
+; AVX1-NEXT: vpxor %xmm3, %xmm3, %xmm3
+; AVX1-NEXT: vpunpckhdq {{.*#+}} xmm3 = xmm1[2],xmm3[2],xmm1[3],xmm3[3]
+; AVX1-NEXT: vpsrld %xmm3, %xmm0, %xmm3
+; AVX1-NEXT: vpmovzxdq {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero
+; AVX1-NEXT: vpsrld %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm3[4,5,6,7]
+; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm2[2,3],xmm0[4,5],xmm2[6,7]
; AVX1-NEXT: retq
;
; AVX2-LABEL: var_shift_v4i32:
@@ -509,59 +499,44 @@ define <2 x i64> @constant_shift_v2i64(<
define <4 x i32> @constant_shift_v4i32(<4 x i32> %a) {
; SSE2-LABEL: constant_shift_v4i32:
; SSE2: # BB#0:
-; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[3,1,2,3]
-; SSE2-NEXT: movd %xmm1, %eax
-; SSE2-NEXT: shrl $7, %eax
-; SSE2-NEXT: movd %eax, %xmm1
-; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,1,2,3]
-; SSE2-NEXT: movd %xmm2, %eax
-; SSE2-NEXT: shrl $5, %eax
-; SSE2-NEXT: movd %eax, %xmm2
-; SSE2-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1]
-; SSE2-NEXT: movd %xmm0, %eax
-; SSE2-NEXT: shrl $4, %eax
-; SSE2-NEXT: movd %eax, %xmm1
-; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
-; SSE2-NEXT: movd %xmm0, %eax
-; SSE2-NEXT: shrl $6, %eax
-; SSE2-NEXT: movd %eax, %xmm0
-; SSE2-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
-; SSE2-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1]
-; SSE2-NEXT: movdqa %xmm1, %xmm0
-; SSE2-NEXT: retq
+; SSE2-NEXT: movdqa %xmm0, %xmm1
+; SSE2-NEXT: psrld $7, %xmm1
+; SSE2-NEXT: movdqa %xmm0, %xmm2
+; SSE2-NEXT: psrld $5, %xmm2
+; SSE2-NEXT: movsd {{.*#+}} xmm1 = xmm2[0],xmm1[1]
+; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,3,2,3]
+; SSE2-NEXT: movdqa %xmm0, %xmm2
+; SSE2-NEXT: psrld $6, %xmm2
+; SSE2-NEXT: psrld $4, %xmm0
+; SSE2-NEXT: movsd {{.*#+}} xmm2 = xmm0[0],xmm2[1]
+; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm2[0,2,2,3]
+; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
+; SSE2-NEXT: retq
;
; SSE41-LABEL: constant_shift_v4i32:
-; SSE41: # BB#0:
-; SSE41-NEXT: pextrd $1, %xmm0, %eax
-; SSE41-NEXT: shrl $5, %eax
-; SSE41-NEXT: movd %xmm0, %ecx
-; SSE41-NEXT: shrl $4, %ecx
-; SSE41-NEXT: movd %ecx, %xmm1
-; SSE41-NEXT: pinsrd $1, %eax, %xmm1
-; SSE41-NEXT: pextrd $2, %xmm0, %eax
-; SSE41-NEXT: shrl $6, %eax
-; SSE41-NEXT: pinsrd $2, %eax, %xmm1
-; SSE41-NEXT: pextrd $3, %xmm0, %eax
-; SSE41-NEXT: shrl $7, %eax
-; SSE41-NEXT: pinsrd $3, %eax, %xmm1
-; SSE41-NEXT: movdqa %xmm1, %xmm0
-; SSE41-NEXT: retq
+; SSE41: # BB#0:
+; SSE41-NEXT: movdqa %xmm0, %xmm1
+; SSE41-NEXT: psrld $7, %xmm1
+; SSE41-NEXT: movdqa %xmm0, %xmm2
+; SSE41-NEXT: psrld $5, %xmm2
+; SSE41-NEXT: pblendw {{.*#+}} xmm2 = xmm2[0,1,2,3],xmm1[4,5,6,7]
+; SSE41-NEXT: movdqa %xmm0, %xmm1
+; SSE41-NEXT: psrld $6, %xmm1
+; SSE41-NEXT: psrld $4, %xmm0
+; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm1[4,5,6,7]
+; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1],xmm2[2,3],xmm0[4,5],xmm2[6,7]
+; SSE41-NEXT: retq
;
; AVX1-LABEL: constant_shift_v4i32:
-; AVX1: # BB#0:
-; AVX1-NEXT: vpextrd $1, %xmm0, %eax
-; AVX1-NEXT: shrl $5, %eax
-; AVX1-NEXT: vmovd %xmm0, %ecx
-; AVX1-NEXT: shrl $4, %ecx
-; AVX1-NEXT: vmovd %ecx, %xmm1
-; AVX1-NEXT: vpinsrd $1, %eax, %xmm1, %xmm1
-; AVX1-NEXT: vpextrd $2, %xmm0, %eax
-; AVX1-NEXT: shrl $6, %eax
-; AVX1-NEXT: vpinsrd $2, %eax, %xmm1, %xmm1
-; AVX1-NEXT: vpextrd $3, %xmm0, %eax
-; AVX1-NEXT: shrl $7, %eax
-; AVX1-NEXT: vpinsrd $3, %eax, %xmm1, %xmm0
-; AVX1-NEXT: retq
+; AVX1: # BB#0:
+; AVX1-NEXT: vpsrld $7, %xmm0, %xmm1
+; AVX1-NEXT: vpsrld $5, %xmm0, %xmm2
+; AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm2[0,1,2,3],xmm1[4,5,6,7]
+; AVX1-NEXT: vpsrld $6, %xmm0, %xmm2
+; AVX1-NEXT: vpsrld $4, %xmm0, %xmm0
+; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm2[4,5,6,7]
+; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3],xmm0[4,5],xmm1[6,7]
+; AVX1-NEXT: retq
;
; AVX2-LABEL: constant_shift_v4i32:
; AVX2: # BB#0:
Modified: llvm/trunk/test/CodeGen/X86/vector-shift-lshr-256.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/vector-shift-lshr-256.ll?rev=241989&r1=241988&r2=241989&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/vector-shift-lshr-256.ll (original)
+++ llvm/trunk/test/CodeGen/X86/vector-shift-lshr-256.ll Sun Jul 12 06:15:19 2015
@@ -33,39 +33,30 @@ define <8 x i32> @var_shift_v8i32(<8 x i
; AVX1-LABEL: var_shift_v8i32:
; AVX1: # BB#0:
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
-; AVX1-NEXT: vpextrd $1, %xmm2, %eax
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm3
-; AVX1-NEXT: vpextrd $1, %xmm3, %ecx
-; AVX1-NEXT: shrl %cl, %eax
-; AVX1-NEXT: vmovd %xmm2, %edx
-; AVX1-NEXT: vmovd %xmm3, %ecx
-; AVX1-NEXT: shrl %cl, %edx
-; AVX1-NEXT: vmovd %edx, %xmm4
-; AVX1-NEXT: vpinsrd $1, %eax, %xmm4, %xmm4
-; AVX1-NEXT: vpextrd $2, %xmm2, %eax
-; AVX1-NEXT: vpextrd $2, %xmm3, %ecx
-; AVX1-NEXT: shrl %cl, %eax
-; AVX1-NEXT: vpinsrd $2, %eax, %xmm4, %xmm4
-; AVX1-NEXT: vpextrd $3, %xmm2, %eax
-; AVX1-NEXT: vpextrd $3, %xmm3, %ecx
-; AVX1-NEXT: shrl %cl, %eax
-; AVX1-NEXT: vpinsrd $3, %eax, %xmm4, %xmm2
-; AVX1-NEXT: vpextrd $1, %xmm0, %eax
-; AVX1-NEXT: vpextrd $1, %xmm1, %ecx
-; AVX1-NEXT: shrl %cl, %eax
-; AVX1-NEXT: vmovd %xmm0, %edx
-; AVX1-NEXT: vmovd %xmm1, %ecx
-; AVX1-NEXT: shrl %cl, %edx
-; AVX1-NEXT: vmovd %edx, %xmm3
-; AVX1-NEXT: vpinsrd $1, %eax, %xmm3, %xmm3
-; AVX1-NEXT: vpextrd $2, %xmm0, %eax
-; AVX1-NEXT: vpextrd $2, %xmm1, %ecx
-; AVX1-NEXT: shrl %cl, %eax
-; AVX1-NEXT: vpinsrd $2, %eax, %xmm3, %xmm3
-; AVX1-NEXT: vpextrd $3, %xmm0, %eax
-; AVX1-NEXT: vpextrd $3, %xmm1, %ecx
-; AVX1-NEXT: shrl %cl, %eax
-; AVX1-NEXT: vpinsrd $3, %eax, %xmm3, %xmm0
+; AVX1-NEXT: vpsrldq {{.*#+}} xmm4 = xmm3[12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; AVX1-NEXT: vpsrld %xmm4, %xmm2, %xmm4
+; AVX1-NEXT: vpsrlq $32, %xmm3, %xmm5
+; AVX1-NEXT: vpsrld %xmm5, %xmm2, %xmm5
+; AVX1-NEXT: vpblendw {{.*#+}} xmm4 = xmm5[0,1,2,3],xmm4[4,5,6,7]
+; AVX1-NEXT: vpxor %xmm5, %xmm5, %xmm5
+; AVX1-NEXT: vpunpckhdq {{.*#+}} xmm6 = xmm3[2],xmm5[2],xmm3[3],xmm5[3]
+; AVX1-NEXT: vpsrld %xmm6, %xmm2, %xmm6
+; AVX1-NEXT: vpmovzxdq {{.*#+}} xmm3 = xmm3[0],zero,xmm3[1],zero
+; AVX1-NEXT: vpsrld %xmm3, %xmm2, %xmm2
+; AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0,1,2,3],xmm6[4,5,6,7]
+; AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0,1],xmm4[2,3],xmm2[4,5],xmm4[6,7]
+; AVX1-NEXT: vpsrldq {{.*#+}} xmm3 = xmm1[12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; AVX1-NEXT: vpsrld %xmm3, %xmm0, %xmm3
+; AVX1-NEXT: vpsrlq $32, %xmm1, %xmm4
+; AVX1-NEXT: vpsrld %xmm4, %xmm0, %xmm4
+; AVX1-NEXT: vpblendw {{.*#+}} xmm3 = xmm4[0,1,2,3],xmm3[4,5,6,7]
+; AVX1-NEXT: vpunpckhdq {{.*#+}} xmm4 = xmm1[2],xmm5[2],xmm1[3],xmm5[3]
+; AVX1-NEXT: vpsrld %xmm4, %xmm0, %xmm4
+; AVX1-NEXT: vpmovzxdq {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero
+; AVX1-NEXT: vpsrld %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm4[4,5,6,7]
+; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm3[2,3],xmm0[4,5],xmm3[6,7]
; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
; AVX1-NEXT: retq
;
@@ -334,32 +325,20 @@ define <4 x i64> @constant_shift_v4i64(<
define <8 x i32> @constant_shift_v8i32(<8 x i32> %a) {
; AVX1-LABEL: constant_shift_v8i32:
; AVX1: # BB#0:
-; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
-; AVX1-NEXT: vpextrd $1, %xmm1, %eax
-; AVX1-NEXT: shrl $9, %eax
-; AVX1-NEXT: vmovd %xmm1, %ecx
-; AVX1-NEXT: shrl $8, %ecx
-; AVX1-NEXT: vmovd %ecx, %xmm2
-; AVX1-NEXT: vpinsrd $1, %eax, %xmm2, %xmm2
-; AVX1-NEXT: vpextrd $2, %xmm1, %eax
-; AVX1-NEXT: shrl $8, %eax
-; AVX1-NEXT: vpinsrd $2, %eax, %xmm2, %xmm2
-; AVX1-NEXT: vpextrd $3, %xmm1, %eax
-; AVX1-NEXT: shrl $7, %eax
-; AVX1-NEXT: vpinsrd $3, %eax, %xmm2, %xmm1
-; AVX1-NEXT: vpextrd $1, %xmm0, %eax
-; AVX1-NEXT: shrl $5, %eax
-; AVX1-NEXT: vmovd %xmm0, %ecx
-; AVX1-NEXT: shrl $4, %ecx
-; AVX1-NEXT: vmovd %ecx, %xmm2
-; AVX1-NEXT: vpinsrd $1, %eax, %xmm2, %xmm2
-; AVX1-NEXT: vpextrd $2, %xmm0, %eax
-; AVX1-NEXT: shrl $6, %eax
-; AVX1-NEXT: vpinsrd $2, %eax, %xmm2, %xmm2
-; AVX1-NEXT: vpextrd $3, %xmm0, %eax
-; AVX1-NEXT: shrl $7, %eax
-; AVX1-NEXT: vpinsrd $3, %eax, %xmm2, %xmm0
-; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; AVX1-NEXT: vpsrld $7, %xmm0, %xmm1
+; AVX1-NEXT: vpsrld $5, %xmm0, %xmm2
+; AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm2[0,1,2,3],xmm1[4,5,6,7]
+; AVX1-NEXT: vpsrld $6, %xmm0, %xmm2
+; AVX1-NEXT: vpsrld $4, %xmm0, %xmm3
+; AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm3[0,1,2,3],xmm2[4,5,6,7]
+; AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm2[0,1],xmm1[2,3],xmm2[4,5],xmm1[6,7]
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
+; AVX1-NEXT: vpsrld $7, %xmm0, %xmm2
+; AVX1-NEXT: vpsrld $9, %xmm0, %xmm3
+; AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm3[0,1,2,3],xmm2[4,5,6,7]
+; AVX1-NEXT: vpsrld $8, %xmm0, %xmm0
+; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm2[2,3],xmm0[4,5],xmm2[6,7]
+; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
; AVX1-NEXT: retq
;
; AVX2-LABEL: constant_shift_v8i32:
Modified: llvm/trunk/test/CodeGen/X86/widen_load-2.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/widen_load-2.ll?rev=241989&r1=241988&r2=241989&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/widen_load-2.ll (original)
+++ llvm/trunk/test/CodeGen/X86/widen_load-2.ll Sun Jul 12 06:15:19 2015
@@ -194,17 +194,9 @@ define void @rot(%i8vec3pack* nocapture
; CHECK-NEXT: movl (%[[PTR0]]), [[TMP1:%e[abcd]+x]]
; CHECK-NEXT: movl [[TMP1]], [[TMP2:.*]]
; CHECK-NEXT: pmovzxbd [[TMP2]], %[[X0:xmm[0-9]+]]
-; CHECK-NEXT: pextrd $1, %[[X0]], %e[[R0:[abcd]]]x
-; CHECK-NEXT: shrl %e[[R0]]x
-; CHECK-NEXT: movd %[[X0]], %e[[R1:[abcd]]]x
-; CHECK-NEXT: shrl %e[[R1]]x
-; CHECK-NEXT: movd %e[[R1]]x, %[[X1:xmm[0-9]+]]
-; CHECK-NEXT: pinsrd $1, %e[[R0]]x, %[[X1]]
-; CHECK-NEXT: pextrd $2, %[[X0]], %e[[R0:[abcd]]]x
-; CHECK-NEXT: shrl %e[[R0]]x
-; CHECK-NEXT: pinsrd $2, %e[[R0]]x, %[[X1]]
-; CHECK-NEXT: pextrd $3, %[[X0]], %e[[R0:[abcd]]]x
-; CHECK-NEXT: pinsrd $3, %e[[R0]]x, %[[X1]]
+; CHECK-NEXT: movdqa %[[X0]], %[[X1:xmm[0-9]+]]
+; CHECK-NEXT: psrld $1, %[[X1]]
+; CHECK-NEXT: pblendw $192, %[[X0]], %[[X1]]
; CHECK-NEXT: pextrb $8, %[[X1]], 2(%{{.*}})
; CHECK-NEXT: pshufb %[[SHUFFLE_MASK]], %[[X1]]
; CHECK-NEXT: pmovzxwq %[[X1]], %[[X3:xmm[0-9]+]]
More information about the llvm-commits
mailing list