[llvm] r314076 - [X86][SSE] Add support for extending bool vectors bitcasted from scalars
Simon Pilgrim via llvm-commits
llvm-commits at lists.llvm.org
Sun Sep 24 06:42:32 PDT 2017
Author: rksimon
Date: Sun Sep 24 06:42:31 2017
New Revision: 314076
URL: http://llvm.org/viewvc/llvm-project?rev=314076&view=rev
Log:
[X86][SSE] Add support for extending bool vectors bitcasted from scalars
This patch acts as a reverse to combineBitcastvxi1 - bitcasting a scalar integer to a boolean vector and extending it 'in place' to the requested legal type.
Currently this doesn't handle AVX512 at all - but the current mask register approach is lacking for some cases.
Differential Revision: https://reviews.llvm.org/D35320
Modified:
llvm/trunk/lib/Target/X86/X86ISelLowering.cpp
llvm/trunk/test/CodeGen/X86/bitcast-int-to-vector-bool-sext.ll
llvm/trunk/test/CodeGen/X86/bitcast-int-to-vector-bool-zext.ll
llvm/trunk/test/CodeGen/X86/bitcast-int-to-vector-bool.ll
Modified: llvm/trunk/lib/Target/X86/X86ISelLowering.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/X86/X86ISelLowering.cpp?rev=314076&r1=314075&r2=314076&view=diff
==============================================================================
--- llvm/trunk/lib/Target/X86/X86ISelLowering.cpp (original)
+++ llvm/trunk/lib/Target/X86/X86ISelLowering.cpp Sun Sep 24 06:42:31 2017
@@ -17274,6 +17274,24 @@ static SDValue LowerVSETCC(SDValue Op, c
DAG.getConstant(CmpMode, dl, MVT::i8));
}
+ // (X & Y) != 0 --> (X & Y) == Y iff Y is power-of-2.
+ // Revert part of the simplifySetCCWithAnd combine, to avoid an invert.
+ if (Cond == ISD::SETNE && ISD::isBuildVectorAllZeros(Op1.getNode())) {
+ SDValue BC0 = peekThroughBitcasts(Op0);
+ if (BC0.getOpcode() == ISD::AND) {
+ APInt UndefElts;
+ SmallVector<APInt, 64> EltBits;
+ if (getTargetConstantBitsFromNode(BC0.getOperand(1),
+ VT.getScalarSizeInBits(), UndefElts,
+ EltBits, false, false)) {
+ if (llvm::all_of(EltBits, [](APInt &V) { return V.isPowerOf2(); })) {
+ Cond = ISD::SETEQ;
+ Op1 = DAG.getBitcast(VT, BC0.getOperand(1));
+ }
+ }
+ }
+ }
+
// We are handling one of the integer comparisons here. Since SSE only has
// GT and EQ comparisons for integer, swapping operands and multiple
// operations may be required for some comparisons.
@@ -34480,6 +34498,95 @@ static SDValue combineToExtendCMOV(SDNod
CMovN.getOperand(2), CMovN.getOperand(3));
}
+// Convert (vXiY *ext(vXi1 bitcast(iX))) to extend_in_reg(broadcast(iX)).
+// This is more or less the reverse of combineBitcastvxi1.
+static SDValue
+combineToExtendBoolVectorInReg(SDNode *N, SelectionDAG &DAG,
+ TargetLowering::DAGCombinerInfo &DCI,
+ const X86Subtarget &Subtarget) {
+ unsigned Opcode = N->getOpcode();
+ if (Opcode != ISD::SIGN_EXTEND && Opcode != ISD::ZERO_EXTEND &&
+ Opcode != ISD::ANY_EXTEND)
+ return SDValue();
+ if (!DCI.isBeforeLegalizeOps())
+ return SDValue();
+ if (!Subtarget.hasSSE2() || Subtarget.hasAVX512())
+ return SDValue();
+
+ SDValue N0 = N->getOperand(0);
+ EVT VT = N->getValueType(0);
+ EVT SVT = VT.getScalarType();
+ EVT InSVT = N0.getValueType().getScalarType();
+ unsigned EltSizeInBits = SVT.getSizeInBits();
+
+ // Input type must be extending a bool vector (bit-casted from a scalar
+ // integer) to legal integer types.
+ if (!VT.isVector())
+ return SDValue();
+ if (SVT != MVT::i64 && SVT != MVT::i32 && SVT != MVT::i16 && SVT != MVT::i8)
+ return SDValue();
+ if (InSVT != MVT::i1 || N0.getOpcode() != ISD::BITCAST)
+ return SDValue();
+
+ SDValue N00 = N0.getOperand(0);
+ EVT SclVT = N0.getOperand(0).getValueType();
+ if (!SclVT.isScalarInteger())
+ return SDValue();
+
+ SDLoc DL(N);
+ SDValue Vec;
+ SmallVector<int, 32> ShuffleMask;
+ unsigned NumElts = VT.getVectorNumElements();
+ assert(NumElts == SclVT.getSizeInBits() && "Unexpected bool vector size");
+
+ // Broadcast the scalar integer to the vector elements.
+ if (NumElts > EltSizeInBits) {
+ // If the scalar integer is greater than the vector element size, then we
+ // must split it down into sub-sections for broadcasting. For example:
+ // i16 -> v16i8 (i16 -> v8i16 -> v16i8) with 2 sub-sections.
+ // i32 -> v32i8 (i32 -> v8i32 -> v32i8) with 4 sub-sections.
+ assert((NumElts % EltSizeInBits) == 0 && "Unexpected integer scale");
+ unsigned Scale = NumElts / EltSizeInBits;
+ EVT BroadcastVT =
+ EVT::getVectorVT(*DAG.getContext(), SclVT, EltSizeInBits);
+ Vec = DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, BroadcastVT, N00);
+ Vec = DAG.getBitcast(VT, Vec);
+
+ for (unsigned i = 0; i != Scale; ++i)
+ ShuffleMask.append(EltSizeInBits, i);
+ } else {
+ // For smaller scalar integers, we can simply any-extend it to the vector
+ // element size (we don't care about the upper bits) and broadcast it to all
+ // elements.
+ SDValue Scl = DAG.getAnyExtOrTrunc(N00, DL, SVT);
+ Vec = DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, VT, Scl);
+ ShuffleMask.append(NumElts, 0);
+ }
+ Vec = DAG.getVectorShuffle(VT, DL, Vec, Vec, ShuffleMask);
+
+ // Now, mask the relevant bit in each element.
+ SmallVector<SDValue, 32> Bits;
+ for (int i = 0; i != NumElts; ++i) {
+ int BitIdx = (i % EltSizeInBits);
+ APInt Bit = APInt::getBitsSet(EltSizeInBits, BitIdx, BitIdx + 1);
+ Bits.push_back(DAG.getConstant(Bit, DL, SVT));
+ }
+ SDValue BitMask = DAG.getBuildVector(VT, DL, Bits);
+ Vec = DAG.getNode(ISD::AND, DL, VT, Vec, BitMask);
+
+ // Compare against the bitmask and extend the result.
+ EVT CCVT = EVT::getVectorVT(*DAG.getContext(), MVT::i1, NumElts);
+ Vec = DAG.getSetCC(DL, CCVT, Vec, BitMask, ISD::SETEQ);
+ Vec = DAG.getSExtOrTrunc(Vec, DL, VT);
+
+ // For SEXT, this is now done, otherwise shift the result down for
+ // zero-extension.
+ if (Opcode == ISD::SIGN_EXTEND)
+ return Vec;
+ return DAG.getNode(ISD::SRL, DL, VT, Vec,
+ DAG.getConstant(EltSizeInBits - 1, DL, VT));
+}
+
/// Convert a SEXT or ZEXT of a vector to a SIGN_EXTEND_VECTOR_INREG or
/// ZERO_EXTEND_VECTOR_INREG, this requires the splitting (or concatenating
/// with UNDEFs) of the input to vectors of the same size as the target type
@@ -34619,6 +34726,9 @@ static SDValue combineSext(SDNode *N, Se
if (SDValue V = combineToExtendVectorInReg(N, DAG, DCI, Subtarget))
return V;
+ if (SDValue V = combineToExtendBoolVectorInReg(N, DAG, DCI, Subtarget))
+ return V;
+
if (Subtarget.hasAVX() && VT.is256BitVector())
if (SDValue R = WidenMaskArithmetic(N, DAG, DCI, Subtarget))
return R;
@@ -34755,6 +34865,9 @@ static SDValue combineZext(SDNode *N, Se
if (SDValue V = combineToExtendVectorInReg(N, DAG, DCI, Subtarget))
return V;
+ if (SDValue V = combineToExtendBoolVectorInReg(N, DAG, DCI, Subtarget))
+ return V;
+
if (VT.is256BitVector())
if (SDValue R = WidenMaskArithmetic(N, DAG, DCI, Subtarget))
return R;
Modified: llvm/trunk/test/CodeGen/X86/bitcast-int-to-vector-bool-sext.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/bitcast-int-to-vector-bool-sext.ll?rev=314076&r1=314075&r2=314076&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/bitcast-int-to-vector-bool-sext.ll (original)
+++ llvm/trunk/test/CodeGen/X86/bitcast-int-to-vector-bool-sext.ll Sun Sep 24 06:42:31 2017
@@ -12,33 +12,35 @@
define <2 x i64> @ext_i2_2i64(i2 %a0) {
; SSE2-SSSE3-LABEL: ext_i2_2i64:
; SSE2-SSSE3: # BB#0:
-; SSE2-SSSE3-NEXT: andb $3, %dil
-; SSE2-SSSE3-NEXT: movb %dil, -{{[0-9]+}}(%rsp)
-; SSE2-SSSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax
-; SSE2-SSSE3-NEXT: movq %rax, %rcx
-; SSE2-SSSE3-NEXT: shlq $62, %rcx
-; SSE2-SSSE3-NEXT: sarq $63, %rcx
-; SSE2-SSSE3-NEXT: movq %rcx, %xmm1
-; SSE2-SSSE3-NEXT: shlq $63, %rax
-; SSE2-SSSE3-NEXT: sarq $63, %rax
-; SSE2-SSSE3-NEXT: movq %rax, %xmm0
-; SSE2-SSSE3-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; SSE2-SSSE3-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; SSE2-SSSE3-NEXT: movq %rdi, %xmm0
+; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,1,0,1]
+; SSE2-SSSE3-NEXT: movdqa {{.*#+}} xmm0 = [1,2]
+; SSE2-SSSE3-NEXT: pand %xmm0, %xmm1
+; SSE2-SSSE3-NEXT: pcmpeqd %xmm0, %xmm1
+; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,0,3,2]
+; SSE2-SSSE3-NEXT: pand %xmm1, %xmm0
; SSE2-SSSE3-NEXT: retq
;
-; AVX12-LABEL: ext_i2_2i64:
-; AVX12: # BB#0:
-; AVX12-NEXT: andb $3, %dil
-; AVX12-NEXT: movb %dil, -{{[0-9]+}}(%rsp)
-; AVX12-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax
-; AVX12-NEXT: movq %rax, %rcx
-; AVX12-NEXT: shlq $62, %rcx
-; AVX12-NEXT: sarq $63, %rcx
-; AVX12-NEXT: vmovq %rcx, %xmm0
-; AVX12-NEXT: shlq $63, %rax
-; AVX12-NEXT: sarq $63, %rax
-; AVX12-NEXT: vmovq %rax, %xmm1
-; AVX12-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm1[0],xmm0[0]
-; AVX12-NEXT: retq
+; AVX1-LABEL: ext_i2_2i64:
+; AVX1: # BB#0:
+; AVX1-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; AVX1-NEXT: vmovq %rdi, %xmm0
+; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,1,0,1]
+; AVX1-NEXT: vmovdqa {{.*#+}} xmm1 = [1,2]
+; AVX1-NEXT: vpand %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vpcmpeqq %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: ext_i2_2i64:
+; AVX2: # BB#0:
+; AVX2-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; AVX2-NEXT: vmovq %rdi, %xmm0
+; AVX2-NEXT: vpbroadcastq %xmm0, %xmm0
+; AVX2-NEXT: vmovdqa {{.*#+}} xmm1 = [1,2]
+; AVX2-NEXT: vpand %xmm1, %xmm0, %xmm0
+; AVX2-NEXT: vpcmpeqq %xmm1, %xmm0, %xmm0
+; AVX2-NEXT: retq
;
; AVX512-LABEL: ext_i2_2i64:
; AVX512: # BB#0:
@@ -58,50 +60,30 @@ define <2 x i64> @ext_i2_2i64(i2 %a0) {
define <4 x i32> @ext_i4_4i32(i4 %a0) {
; SSE2-SSSE3-LABEL: ext_i4_4i32:
; SSE2-SSSE3: # BB#0:
-; SSE2-SSSE3-NEXT: andb $15, %dil
-; SSE2-SSSE3-NEXT: movb %dil, -{{[0-9]+}}(%rsp)
-; SSE2-SSSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax
-; SSE2-SSSE3-NEXT: movq %rax, %rcx
-; SSE2-SSSE3-NEXT: shlq $60, %rcx
-; SSE2-SSSE3-NEXT: sarq $63, %rcx
-; SSE2-SSSE3-NEXT: movd %ecx, %xmm0
-; SSE2-SSSE3-NEXT: movq %rax, %rcx
-; SSE2-SSSE3-NEXT: shlq $61, %rcx
-; SSE2-SSSE3-NEXT: sarq $63, %rcx
-; SSE2-SSSE3-NEXT: movd %ecx, %xmm1
-; SSE2-SSSE3-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
-; SSE2-SSSE3-NEXT: movq %rax, %rcx
-; SSE2-SSSE3-NEXT: shlq $62, %rcx
-; SSE2-SSSE3-NEXT: sarq $63, %rcx
-; SSE2-SSSE3-NEXT: movd %ecx, %xmm2
-; SSE2-SSSE3-NEXT: shlq $63, %rax
-; SSE2-SSSE3-NEXT: sarq $63, %rax
-; SSE2-SSSE3-NEXT: movd %eax, %xmm0
-; SSE2-SSSE3-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
-; SSE2-SSSE3-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; SSE2-SSSE3-NEXT: movd %edi, %xmm0
+; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,0,0]
+; SSE2-SSSE3-NEXT: movdqa {{.*#+}} xmm1 = [1,2,4,8]
+; SSE2-SSSE3-NEXT: pand %xmm1, %xmm0
+; SSE2-SSSE3-NEXT: pcmpeqd %xmm1, %xmm0
; SSE2-SSSE3-NEXT: retq
;
-; AVX12-LABEL: ext_i4_4i32:
-; AVX12: # BB#0:
-; AVX12-NEXT: andb $15, %dil
-; AVX12-NEXT: movb %dil, -{{[0-9]+}}(%rsp)
-; AVX12-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax
-; AVX12-NEXT: movq %rax, %rcx
-; AVX12-NEXT: shlq $62, %rcx
-; AVX12-NEXT: sarq $63, %rcx
-; AVX12-NEXT: movq %rax, %rdx
-; AVX12-NEXT: shlq $63, %rdx
-; AVX12-NEXT: sarq $63, %rdx
-; AVX12-NEXT: vmovd %edx, %xmm0
-; AVX12-NEXT: vpinsrd $1, %ecx, %xmm0, %xmm0
-; AVX12-NEXT: movq %rax, %rcx
-; AVX12-NEXT: shlq $61, %rcx
-; AVX12-NEXT: sarq $63, %rcx
-; AVX12-NEXT: vpinsrd $2, %ecx, %xmm0, %xmm0
-; AVX12-NEXT: shlq $60, %rax
-; AVX12-NEXT: sarq $63, %rax
-; AVX12-NEXT: vpinsrd $3, %eax, %xmm0, %xmm0
-; AVX12-NEXT: retq
+; AVX1-LABEL: ext_i4_4i32:
+; AVX1: # BB#0:
+; AVX1-NEXT: vmovd %edi, %xmm0
+; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,0,0,0]
+; AVX1-NEXT: vmovdqa {{.*#+}} xmm1 = [1,2,4,8]
+; AVX1-NEXT: vpand %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vpcmpeqd %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: ext_i4_4i32:
+; AVX2: # BB#0:
+; AVX2-NEXT: vmovd %edi, %xmm0
+; AVX2-NEXT: vpbroadcastd %xmm0, %xmm0
+; AVX2-NEXT: vmovdqa {{.*#+}} xmm1 = [1,2,4,8]
+; AVX2-NEXT: vpand %xmm1, %xmm0, %xmm0
+; AVX2-NEXT: vpcmpeqd %xmm1, %xmm0, %xmm0
+; AVX2-NEXT: retq
;
; AVX512-LABEL: ext_i4_4i32:
; AVX512: # BB#0:
@@ -122,82 +104,32 @@ define <4 x i32> @ext_i4_4i32(i4 %a0) {
define <8 x i16> @ext_i8_8i16(i8 %a0) {
; SSE2-SSSE3-LABEL: ext_i8_8i16:
; SSE2-SSSE3: # BB#0:
-; SSE2-SSSE3-NEXT: movb %dil, -{{[0-9]+}}(%rsp)
-; SSE2-SSSE3-NEXT: movsbq -{{[0-9]+}}(%rsp), %rax
-; SSE2-SSSE3-NEXT: movq %rax, %rcx
-; SSE2-SSSE3-NEXT: shrq $7, %rcx
-; SSE2-SSSE3-NEXT: movd %ecx, %xmm0
-; SSE2-SSSE3-NEXT: movq %rax, %rcx
-; SSE2-SSSE3-NEXT: shlq $57, %rcx
-; SSE2-SSSE3-NEXT: sarq $63, %rcx
-; SSE2-SSSE3-NEXT: movd %ecx, %xmm2
-; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3]
-; SSE2-SSSE3-NEXT: movq %rax, %rcx
-; SSE2-SSSE3-NEXT: shlq $58, %rcx
-; SSE2-SSSE3-NEXT: sarq $63, %rcx
-; SSE2-SSSE3-NEXT: movd %ecx, %xmm0
-; SSE2-SSSE3-NEXT: movq %rax, %rcx
-; SSE2-SSSE3-NEXT: shlq $59, %rcx
-; SSE2-SSSE3-NEXT: sarq $63, %rcx
-; SSE2-SSSE3-NEXT: movd %ecx, %xmm1
-; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
-; SSE2-SSSE3-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1]
-; SSE2-SSSE3-NEXT: movq %rax, %rcx
-; SSE2-SSSE3-NEXT: shlq $60, %rcx
-; SSE2-SSSE3-NEXT: sarq $63, %rcx
-; SSE2-SSSE3-NEXT: movd %ecx, %xmm0
-; SSE2-SSSE3-NEXT: movq %rax, %rcx
-; SSE2-SSSE3-NEXT: shlq $61, %rcx
-; SSE2-SSSE3-NEXT: sarq $63, %rcx
-; SSE2-SSSE3-NEXT: movd %ecx, %xmm2
-; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3]
-; SSE2-SSSE3-NEXT: movq %rax, %rcx
-; SSE2-SSSE3-NEXT: shlq $62, %rcx
-; SSE2-SSSE3-NEXT: sarq $63, %rcx
-; SSE2-SSSE3-NEXT: movd %ecx, %xmm3
-; SSE2-SSSE3-NEXT: shlq $63, %rax
-; SSE2-SSSE3-NEXT: sarq $63, %rax
-; SSE2-SSSE3-NEXT: movd %eax, %xmm0
-; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1],xmm0[2],xmm3[2],xmm0[3],xmm3[3]
-; SSE2-SSSE3-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
-; SSE2-SSSE3-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; SSE2-SSSE3-NEXT: movd %edi, %xmm0
+; SSE2-SSSE3-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,0,0,0,4,5,6,7]
+; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,1,1]
+; SSE2-SSSE3-NEXT: movdqa {{.*#+}} xmm1 = [1,2,4,8,16,32,64,128]
+; SSE2-SSSE3-NEXT: pand %xmm1, %xmm0
+; SSE2-SSSE3-NEXT: pcmpeqw %xmm1, %xmm0
; SSE2-SSSE3-NEXT: retq
;
-; AVX12-LABEL: ext_i8_8i16:
-; AVX12: # BB#0:
-; AVX12-NEXT: movb %dil, -{{[0-9]+}}(%rsp)
-; AVX12-NEXT: movsbq -{{[0-9]+}}(%rsp), %rax
-; AVX12-NEXT: movq %rax, %rcx
-; AVX12-NEXT: shlq $62, %rcx
-; AVX12-NEXT: sarq $63, %rcx
-; AVX12-NEXT: movq %rax, %rdx
-; AVX12-NEXT: shlq $63, %rdx
-; AVX12-NEXT: sarq $63, %rdx
-; AVX12-NEXT: vmovd %edx, %xmm0
-; AVX12-NEXT: vpinsrw $1, %ecx, %xmm0, %xmm0
-; AVX12-NEXT: movq %rax, %rcx
-; AVX12-NEXT: shlq $61, %rcx
-; AVX12-NEXT: sarq $63, %rcx
-; AVX12-NEXT: vpinsrw $2, %ecx, %xmm0, %xmm0
-; AVX12-NEXT: movq %rax, %rcx
-; AVX12-NEXT: shlq $60, %rcx
-; AVX12-NEXT: sarq $63, %rcx
-; AVX12-NEXT: vpinsrw $3, %ecx, %xmm0, %xmm0
-; AVX12-NEXT: movq %rax, %rcx
-; AVX12-NEXT: shlq $59, %rcx
-; AVX12-NEXT: sarq $63, %rcx
-; AVX12-NEXT: vpinsrw $4, %ecx, %xmm0, %xmm0
-; AVX12-NEXT: movq %rax, %rcx
-; AVX12-NEXT: shlq $58, %rcx
-; AVX12-NEXT: sarq $63, %rcx
-; AVX12-NEXT: vpinsrw $5, %ecx, %xmm0, %xmm0
-; AVX12-NEXT: movq %rax, %rcx
-; AVX12-NEXT: shlq $57, %rcx
-; AVX12-NEXT: sarq $63, %rcx
-; AVX12-NEXT: vpinsrw $6, %ecx, %xmm0, %xmm0
-; AVX12-NEXT: shrq $7, %rax
-; AVX12-NEXT: vpinsrw $7, %eax, %xmm0, %xmm0
-; AVX12-NEXT: retq
+; AVX1-LABEL: ext_i8_8i16:
+; AVX1: # BB#0:
+; AVX1-NEXT: vmovd %edi, %xmm0
+; AVX1-NEXT: vpshuflw {{.*#+}} xmm0 = xmm0[0,0,0,0,4,5,6,7]
+; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,0,1,1]
+; AVX1-NEXT: vmovdqa {{.*#+}} xmm1 = [1,2,4,8,16,32,64,128]
+; AVX1-NEXT: vpand %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vpcmpeqw %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: ext_i8_8i16:
+; AVX2: # BB#0:
+; AVX2-NEXT: vmovd %edi, %xmm0
+; AVX2-NEXT: vpbroadcastw %xmm0, %xmm0
+; AVX2-NEXT: vmovdqa {{.*#+}} xmm1 = [1,2,4,8,16,32,64,128]
+; AVX2-NEXT: vpand %xmm1, %xmm0, %xmm0
+; AVX2-NEXT: vpcmpeqw %xmm1, %xmm0, %xmm0
+; AVX2-NEXT: retq
;
; AVX512-LABEL: ext_i8_8i16:
; AVX512: # BB#0:
@@ -210,191 +142,43 @@ define <8 x i16> @ext_i8_8i16(i8 %a0) {
}
define <16 x i8> @ext_i16_16i8(i16 %a0) {
-; SSE2-SSSE3-LABEL: ext_i16_16i8:
-; SSE2-SSSE3: # BB#0:
-; SSE2-SSSE3-NEXT: pushq %rbp
-; SSE2-SSSE3-NEXT: .Lcfi0:
-; SSE2-SSSE3-NEXT: .cfi_def_cfa_offset 16
-; SSE2-SSSE3-NEXT: pushq %r15
-; SSE2-SSSE3-NEXT: .Lcfi1:
-; SSE2-SSSE3-NEXT: .cfi_def_cfa_offset 24
-; SSE2-SSSE3-NEXT: pushq %r14
-; SSE2-SSSE3-NEXT: .Lcfi2:
-; SSE2-SSSE3-NEXT: .cfi_def_cfa_offset 32
-; SSE2-SSSE3-NEXT: pushq %r13
-; SSE2-SSSE3-NEXT: .Lcfi3:
-; SSE2-SSSE3-NEXT: .cfi_def_cfa_offset 40
-; SSE2-SSSE3-NEXT: pushq %r12
-; SSE2-SSSE3-NEXT: .Lcfi4:
-; SSE2-SSSE3-NEXT: .cfi_def_cfa_offset 48
-; SSE2-SSSE3-NEXT: pushq %rbx
-; SSE2-SSSE3-NEXT: .Lcfi5:
-; SSE2-SSSE3-NEXT: .cfi_def_cfa_offset 56
-; SSE2-SSSE3-NEXT: .Lcfi6:
-; SSE2-SSSE3-NEXT: .cfi_offset %rbx, -56
-; SSE2-SSSE3-NEXT: .Lcfi7:
-; SSE2-SSSE3-NEXT: .cfi_offset %r12, -48
-; SSE2-SSSE3-NEXT: .Lcfi8:
-; SSE2-SSSE3-NEXT: .cfi_offset %r13, -40
-; SSE2-SSSE3-NEXT: .Lcfi9:
-; SSE2-SSSE3-NEXT: .cfi_offset %r14, -32
-; SSE2-SSSE3-NEXT: .Lcfi10:
-; SSE2-SSSE3-NEXT: .cfi_offset %r15, -24
-; SSE2-SSSE3-NEXT: .Lcfi11:
-; SSE2-SSSE3-NEXT: .cfi_offset %rbp, -16
-; SSE2-SSSE3-NEXT: movw %di, -{{[0-9]+}}(%rsp)
-; SSE2-SSSE3-NEXT: movswq -{{[0-9]+}}(%rsp), %rax
-; SSE2-SSSE3-NEXT: movq %rax, %r8
-; SSE2-SSSE3-NEXT: movq %rax, %r9
-; SSE2-SSSE3-NEXT: movq %rax, %r10
-; SSE2-SSSE3-NEXT: movq %rax, %r11
-; SSE2-SSSE3-NEXT: movq %rax, %r14
-; SSE2-SSSE3-NEXT: movq %rax, %r15
-; SSE2-SSSE3-NEXT: movq %rax, %r12
-; SSE2-SSSE3-NEXT: movq %rax, %r13
-; SSE2-SSSE3-NEXT: movq %rax, %rbx
-; SSE2-SSSE3-NEXT: movq %rax, %rcx
-; SSE2-SSSE3-NEXT: movq %rax, %rdx
-; SSE2-SSSE3-NEXT: movq %rax, %rsi
-; SSE2-SSSE3-NEXT: movq %rax, %rdi
-; SSE2-SSSE3-NEXT: movq %rax, %rbp
-; SSE2-SSSE3-NEXT: shrq $15, %rbp
-; SSE2-SSSE3-NEXT: movd %ebp, %xmm0
-; SSE2-SSSE3-NEXT: movq %rax, %rbp
-; SSE2-SSSE3-NEXT: movsbq %al, %rax
-; SSE2-SSSE3-NEXT: shlq $49, %r8
-; SSE2-SSSE3-NEXT: sarq $63, %r8
-; SSE2-SSSE3-NEXT: movd %r8d, %xmm1
-; SSE2-SSSE3-NEXT: shlq $50, %r9
-; SSE2-SSSE3-NEXT: sarq $63, %r9
-; SSE2-SSSE3-NEXT: movd %r9d, %xmm2
-; SSE2-SSSE3-NEXT: shlq $51, %r10
-; SSE2-SSSE3-NEXT: sarq $63, %r10
-; SSE2-SSSE3-NEXT: movd %r10d, %xmm3
-; SSE2-SSSE3-NEXT: shlq $52, %r11
-; SSE2-SSSE3-NEXT: sarq $63, %r11
-; SSE2-SSSE3-NEXT: movd %r11d, %xmm4
-; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
-; SSE2-SSSE3-NEXT: shlq $53, %r14
-; SSE2-SSSE3-NEXT: sarq $63, %r14
-; SSE2-SSSE3-NEXT: movd %r14d, %xmm0
-; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1],xmm3[2],xmm2[2],xmm3[3],xmm2[3],xmm3[4],xmm2[4],xmm3[5],xmm2[5],xmm3[6],xmm2[6],xmm3[7],xmm2[7]
-; SSE2-SSSE3-NEXT: shlq $54, %r15
-; SSE2-SSSE3-NEXT: sarq $63, %r15
-; SSE2-SSSE3-NEXT: movd %r15d, %xmm2
-; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm3 = xmm3[0],xmm1[0],xmm3[1],xmm1[1],xmm3[2],xmm1[2],xmm3[3],xmm1[3]
-; SSE2-SSSE3-NEXT: shlq $55, %r12
-; SSE2-SSSE3-NEXT: sarq $63, %r12
-; SSE2-SSSE3-NEXT: movd %r12d, %xmm1
-; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm4[0],xmm0[1],xmm4[1],xmm0[2],xmm4[2],xmm0[3],xmm4[3],xmm0[4],xmm4[4],xmm0[5],xmm4[5],xmm0[6],xmm4[6],xmm0[7],xmm4[7]
-; SSE2-SSSE3-NEXT: shlq $60, %r13
-; SSE2-SSSE3-NEXT: sarq $63, %r13
-; SSE2-SSSE3-NEXT: movd %r13d, %xmm4
-; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3],xmm1[4],xmm2[4],xmm1[5],xmm2[5],xmm1[6],xmm2[6],xmm1[7],xmm2[7]
-; SSE2-SSSE3-NEXT: shlq $61, %rbx
-; SSE2-SSSE3-NEXT: sarq $63, %rbx
-; SSE2-SSSE3-NEXT: movd %ebx, %xmm2
-; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
-; SSE2-SSSE3-NEXT: shlq $62, %rcx
-; SSE2-SSSE3-NEXT: sarq $63, %rcx
-; SSE2-SSSE3-NEXT: movd %ecx, %xmm5
-; SSE2-SSSE3-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm3[0],xmm1[1],xmm3[1]
-; SSE2-SSSE3-NEXT: shlq $63, %rdx
-; SSE2-SSSE3-NEXT: sarq $63, %rdx
-; SSE2-SSSE3-NEXT: movd %edx, %xmm0
-; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm4[0],xmm2[1],xmm4[1],xmm2[2],xmm4[2],xmm2[3],xmm4[3],xmm2[4],xmm4[4],xmm2[5],xmm4[5],xmm2[6],xmm4[6],xmm2[7],xmm4[7]
-; SSE2-SSSE3-NEXT: shlq $58, %rsi
-; SSE2-SSSE3-NEXT: sarq $63, %rsi
-; SSE2-SSSE3-NEXT: movd %esi, %xmm3
-; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm5[0],xmm0[1],xmm5[1],xmm0[2],xmm5[2],xmm0[3],xmm5[3],xmm0[4],xmm5[4],xmm0[5],xmm5[5],xmm0[6],xmm5[6],xmm0[7],xmm5[7]
-; SSE2-SSSE3-NEXT: shlq $59, %rdi
-; SSE2-SSSE3-NEXT: sarq $63, %rdi
-; SSE2-SSSE3-NEXT: movd %edi, %xmm4
-; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3]
-; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm4 = xmm4[0],xmm3[0],xmm4[1],xmm3[1],xmm4[2],xmm3[2],xmm4[3],xmm3[3],xmm4[4],xmm3[4],xmm4[5],xmm3[5],xmm4[6],xmm3[6],xmm4[7],xmm3[7]
-; SSE2-SSSE3-NEXT: shlq $57, %rbp
-; SSE2-SSSE3-NEXT: sarq $63, %rbp
-; SSE2-SSSE3-NEXT: movd %ebp, %xmm2
-; SSE2-SSSE3-NEXT: shrq $7, %rax
-; SSE2-SSSE3-NEXT: movd %eax, %xmm3
-; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1],xmm2[2],xmm3[2],xmm2[3],xmm3[3],xmm2[4],xmm3[4],xmm2[5],xmm3[5],xmm2[6],xmm3[6],xmm2[7],xmm3[7]
-; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm4 = xmm4[0],xmm2[0],xmm4[1],xmm2[1],xmm4[2],xmm2[2],xmm4[3],xmm2[3]
-; SSE2-SSSE3-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm4[0],xmm0[1],xmm4[1]
-; SSE2-SSSE3-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
-; SSE2-SSSE3-NEXT: popq %rbx
-; SSE2-SSSE3-NEXT: popq %r12
-; SSE2-SSSE3-NEXT: popq %r13
-; SSE2-SSSE3-NEXT: popq %r14
-; SSE2-SSSE3-NEXT: popq %r15
-; SSE2-SSSE3-NEXT: popq %rbp
-; SSE2-SSSE3-NEXT: retq
+; SSE2-LABEL: ext_i16_16i8:
+; SSE2: # BB#0:
+; SSE2-NEXT: movd %edi, %xmm0
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
+; SSE2-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,0,1,1,4,5,6,7]
+; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,1,1]
+; SSE2-NEXT: movdqa {{.*#+}} xmm1 = [1,2,4,8,16,32,64,128,1,2,4,8,16,32,64,128]
+; SSE2-NEXT: pand %xmm1, %xmm0
+; SSE2-NEXT: pcmpeqb %xmm1, %xmm0
+; SSE2-NEXT: retq
+;
+; SSSE3-LABEL: ext_i16_16i8:
+; SSSE3: # BB#0:
+; SSSE3-NEXT: movd %edi, %xmm0
+; SSSE3-NEXT: pshufb {{.*#+}} xmm0 = xmm0[0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1]
+; SSSE3-NEXT: movdqa {{.*#+}} xmm1 = [1,2,4,8,16,32,64,128,1,2,4,8,16,32,64,128]
+; SSSE3-NEXT: pand %xmm1, %xmm0
+; SSSE3-NEXT: pcmpeqb %xmm1, %xmm0
+; SSSE3-NEXT: retq
;
-; AVX12-LABEL: ext_i16_16i8:
-; AVX12: # BB#0:
-; AVX12-NEXT: movw %di, -{{[0-9]+}}(%rsp)
-; AVX12-NEXT: movswq -{{[0-9]+}}(%rsp), %rax
-; AVX12-NEXT: movq %rax, %rcx
-; AVX12-NEXT: shlq $62, %rcx
-; AVX12-NEXT: sarq $63, %rcx
-; AVX12-NEXT: movq %rax, %rdx
-; AVX12-NEXT: shlq $63, %rdx
-; AVX12-NEXT: sarq $63, %rdx
-; AVX12-NEXT: vmovd %edx, %xmm0
-; AVX12-NEXT: vpinsrb $1, %ecx, %xmm0, %xmm0
-; AVX12-NEXT: movq %rax, %rcx
-; AVX12-NEXT: shlq $61, %rcx
-; AVX12-NEXT: sarq $63, %rcx
-; AVX12-NEXT: vpinsrb $2, %ecx, %xmm0, %xmm0
-; AVX12-NEXT: movq %rax, %rcx
-; AVX12-NEXT: shlq $60, %rcx
-; AVX12-NEXT: sarq $63, %rcx
-; AVX12-NEXT: vpinsrb $3, %ecx, %xmm0, %xmm0
-; AVX12-NEXT: movq %rax, %rcx
-; AVX12-NEXT: shlq $59, %rcx
-; AVX12-NEXT: sarq $63, %rcx
-; AVX12-NEXT: vpinsrb $4, %ecx, %xmm0, %xmm0
-; AVX12-NEXT: movq %rax, %rcx
-; AVX12-NEXT: shlq $58, %rcx
-; AVX12-NEXT: sarq $63, %rcx
-; AVX12-NEXT: vpinsrb $5, %ecx, %xmm0, %xmm0
-; AVX12-NEXT: movq %rax, %rcx
-; AVX12-NEXT: shlq $57, %rcx
-; AVX12-NEXT: sarq $63, %rcx
-; AVX12-NEXT: vpinsrb $6, %ecx, %xmm0, %xmm0
-; AVX12-NEXT: movsbq %al, %rcx
-; AVX12-NEXT: shrq $7, %rcx
-; AVX12-NEXT: vpinsrb $7, %ecx, %xmm0, %xmm0
-; AVX12-NEXT: movq %rax, %rcx
-; AVX12-NEXT: shlq $55, %rcx
-; AVX12-NEXT: sarq $63, %rcx
-; AVX12-NEXT: vpinsrb $8, %ecx, %xmm0, %xmm0
-; AVX12-NEXT: movq %rax, %rcx
-; AVX12-NEXT: shlq $54, %rcx
-; AVX12-NEXT: sarq $63, %rcx
-; AVX12-NEXT: vpinsrb $9, %ecx, %xmm0, %xmm0
-; AVX12-NEXT: movq %rax, %rcx
-; AVX12-NEXT: shlq $53, %rcx
-; AVX12-NEXT: sarq $63, %rcx
-; AVX12-NEXT: vpinsrb $10, %ecx, %xmm0, %xmm0
-; AVX12-NEXT: movq %rax, %rcx
-; AVX12-NEXT: shlq $52, %rcx
-; AVX12-NEXT: sarq $63, %rcx
-; AVX12-NEXT: vpinsrb $11, %ecx, %xmm0, %xmm0
-; AVX12-NEXT: movq %rax, %rcx
-; AVX12-NEXT: shlq $51, %rcx
-; AVX12-NEXT: sarq $63, %rcx
-; AVX12-NEXT: vpinsrb $12, %ecx, %xmm0, %xmm0
-; AVX12-NEXT: movq %rax, %rcx
-; AVX12-NEXT: shlq $50, %rcx
-; AVX12-NEXT: sarq $63, %rcx
-; AVX12-NEXT: vpinsrb $13, %ecx, %xmm0, %xmm0
-; AVX12-NEXT: movq %rax, %rcx
-; AVX12-NEXT: shlq $49, %rcx
-; AVX12-NEXT: sarq $63, %rcx
-; AVX12-NEXT: vpinsrb $14, %ecx, %xmm0, %xmm0
-; AVX12-NEXT: shrq $15, %rax
-; AVX12-NEXT: vpinsrb $15, %eax, %xmm0, %xmm0
-; AVX12-NEXT: retq
+; AVX1-LABEL: ext_i16_16i8:
+; AVX1: # BB#0:
+; AVX1-NEXT: vmovd %edi, %xmm0
+; AVX1-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1]
+; AVX1-NEXT: vmovddup {{.*#+}} xmm1 = mem[0,0]
+; AVX1-NEXT: vpand %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vpcmpeqb %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: ext_i16_16i8:
+; AVX2: # BB#0:
+; AVX2-NEXT: vmovd %edi, %xmm0
+; AVX2-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1]
+; AVX2-NEXT: vpbroadcastq {{.*#+}} xmm1 = [9241421688590303745,9241421688590303745]
+; AVX2-NEXT: vpand %xmm1, %xmm0, %xmm0
+; AVX2-NEXT: vpcmpeqb %xmm1, %xmm0, %xmm0
+; AVX2-NEXT: retq
;
; AVX512-LABEL: ext_i16_16i8:
; AVX512: # BB#0:
@@ -413,80 +197,47 @@ define <16 x i8> @ext_i16_16i8(i16 %a0)
define <4 x i64> @ext_i4_4i64(i4 %a0) {
; SSE2-SSSE3-LABEL: ext_i4_4i64:
; SSE2-SSSE3: # BB#0:
-; SSE2-SSSE3-NEXT: andb $15, %dil
-; SSE2-SSSE3-NEXT: movb %dil, -{{[0-9]+}}(%rsp)
-; SSE2-SSSE3-NEXT: movl -{{[0-9]+}}(%rsp), %eax
-; SSE2-SSSE3-NEXT: movl %eax, %ecx
-; SSE2-SSSE3-NEXT: shrl $3, %ecx
-; SSE2-SSSE3-NEXT: movd %ecx, %xmm0
-; SSE2-SSSE3-NEXT: movl %eax, %ecx
-; SSE2-SSSE3-NEXT: shrl $2, %ecx
-; SSE2-SSSE3-NEXT: movd %ecx, %xmm1
-; SSE2-SSSE3-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
-; SSE2-SSSE3-NEXT: movd %eax, %xmm2
-; SSE2-SSSE3-NEXT: shrl %eax
-; SSE2-SSSE3-NEXT: movd %eax, %xmm0
-; SSE2-SSSE3-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1]
-; SSE2-SSSE3-NEXT: punpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm1[0]
-; SSE2-SSSE3-NEXT: pand {{.*}}(%rip), %xmm2
-; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm2[0,1,1,3]
-; SSE2-SSSE3-NEXT: psllq $63, %xmm0
-; SSE2-SSSE3-NEXT: psrad $31, %xmm0
-; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
-; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm2[2,1,3,3]
-; SSE2-SSSE3-NEXT: psllq $63, %xmm1
-; SSE2-SSSE3-NEXT: psrad $31, %xmm1
-; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
+; SSE2-SSSE3-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; SSE2-SSSE3-NEXT: movq %rdi, %xmm0
+; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm2 = xmm0[0,1,0,1]
+; SSE2-SSSE3-NEXT: movdqa {{.*#+}} xmm0 = [1,2]
+; SSE2-SSSE3-NEXT: movdqa %xmm2, %xmm1
+; SSE2-SSSE3-NEXT: pand %xmm0, %xmm1
+; SSE2-SSSE3-NEXT: pcmpeqd %xmm0, %xmm1
+; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,0,3,2]
+; SSE2-SSSE3-NEXT: pand %xmm1, %xmm0
+; SSE2-SSSE3-NEXT: movdqa {{.*#+}} xmm1 = [4,8]
+; SSE2-SSSE3-NEXT: pand %xmm1, %xmm2
+; SSE2-SSSE3-NEXT: pcmpeqd %xmm1, %xmm2
+; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm2[1,0,3,2]
+; SSE2-SSSE3-NEXT: pand %xmm2, %xmm1
; SSE2-SSSE3-NEXT: retq
;
; AVX1-LABEL: ext_i4_4i64:
; AVX1: # BB#0:
-; AVX1-NEXT: andb $15, %dil
-; AVX1-NEXT: movb %dil, -{{[0-9]+}}(%rsp)
-; AVX1-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax
-; AVX1-NEXT: movq %rax, %rcx
-; AVX1-NEXT: shlq $60, %rcx
-; AVX1-NEXT: sarq $63, %rcx
-; AVX1-NEXT: vmovq %rcx, %xmm0
-; AVX1-NEXT: movq %rax, %rcx
-; AVX1-NEXT: shlq $61, %rcx
-; AVX1-NEXT: sarq $63, %rcx
-; AVX1-NEXT: vmovq %rcx, %xmm1
-; AVX1-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm1[0],xmm0[0]
-; AVX1-NEXT: movq %rax, %rcx
-; AVX1-NEXT: shlq $62, %rcx
-; AVX1-NEXT: sarq $63, %rcx
-; AVX1-NEXT: vmovq %rcx, %xmm1
-; AVX1-NEXT: shlq $63, %rax
-; AVX1-NEXT: sarq $63, %rax
-; AVX1-NEXT: vmovq %rax, %xmm2
-; AVX1-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm2[0],xmm1[0]
-; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
+; AVX1-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; AVX1-NEXT: vmovq %rdi, %xmm0
+; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,1,0,1]
+; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
+; AVX1-NEXT: vandps {{.*}}(%rip), %ymm0, %ymm0
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
+; AVX1-NEXT: vpxor %xmm2, %xmm2, %xmm2
+; AVX1-NEXT: vpcmpeqq %xmm2, %xmm1, %xmm1
+; AVX1-NEXT: vpcmpeqd %xmm3, %xmm3, %xmm3
+; AVX1-NEXT: vpxor %xmm3, %xmm1, %xmm1
+; AVX1-NEXT: vpcmpeqq %xmm2, %xmm0, %xmm0
+; AVX1-NEXT: vpxor %xmm3, %xmm0, %xmm0
+; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
; AVX1-NEXT: retq
;
; AVX2-LABEL: ext_i4_4i64:
; AVX2: # BB#0:
-; AVX2-NEXT: andb $15, %dil
-; AVX2-NEXT: movb %dil, -{{[0-9]+}}(%rsp)
-; AVX2-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax
-; AVX2-NEXT: movq %rax, %rcx
-; AVX2-NEXT: shlq $60, %rcx
-; AVX2-NEXT: sarq $63, %rcx
-; AVX2-NEXT: vmovq %rcx, %xmm0
-; AVX2-NEXT: movq %rax, %rcx
-; AVX2-NEXT: shlq $61, %rcx
-; AVX2-NEXT: sarq $63, %rcx
-; AVX2-NEXT: vmovq %rcx, %xmm1
-; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm1[0],xmm0[0]
-; AVX2-NEXT: movq %rax, %rcx
-; AVX2-NEXT: shlq $62, %rcx
-; AVX2-NEXT: sarq $63, %rcx
-; AVX2-NEXT: vmovq %rcx, %xmm1
-; AVX2-NEXT: shlq $63, %rax
-; AVX2-NEXT: sarq $63, %rax
-; AVX2-NEXT: vmovq %rax, %xmm2
-; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm2[0],xmm1[0]
-; AVX2-NEXT: vinserti128 $1, %xmm0, %ymm1, %ymm0
+; AVX2-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; AVX2-NEXT: vmovq %rdi, %xmm0
+; AVX2-NEXT: vpbroadcastq %xmm0, %ymm0
+; AVX2-NEXT: vmovdqa {{.*#+}} ymm1 = [1,2,4,8]
+; AVX2-NEXT: vpand %ymm1, %ymm0, %ymm0
+; AVX2-NEXT: vpcmpeqq %ymm1, %ymm0, %ymm0
; AVX2-NEXT: retq
;
; AVX512-LABEL: ext_i4_4i64:
@@ -506,126 +257,40 @@ define <4 x i64> @ext_i4_4i64(i4 %a0) {
define <8 x i32> @ext_i8_8i32(i8 %a0) {
; SSE2-SSSE3-LABEL: ext_i8_8i32:
; SSE2-SSSE3: # BB#0:
-; SSE2-SSSE3-NEXT: movb %dil, -{{[0-9]+}}(%rsp)
-; SSE2-SSSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax
-; SSE2-SSSE3-NEXT: movl %eax, %ecx
-; SSE2-SSSE3-NEXT: shrl $3, %ecx
-; SSE2-SSSE3-NEXT: andl $1, %ecx
-; SSE2-SSSE3-NEXT: movd %ecx, %xmm0
-; SSE2-SSSE3-NEXT: movl %eax, %ecx
-; SSE2-SSSE3-NEXT: shrl $2, %ecx
-; SSE2-SSSE3-NEXT: andl $1, %ecx
-; SSE2-SSSE3-NEXT: movd %ecx, %xmm2
-; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3]
-; SSE2-SSSE3-NEXT: movl %eax, %ecx
-; SSE2-SSSE3-NEXT: andl $1, %ecx
-; SSE2-SSSE3-NEXT: movd %ecx, %xmm1
-; SSE2-SSSE3-NEXT: movl %eax, %ecx
-; SSE2-SSSE3-NEXT: shrl %ecx
-; SSE2-SSSE3-NEXT: andl $1, %ecx
-; SSE2-SSSE3-NEXT: movd %ecx, %xmm0
-; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
-; SSE2-SSSE3-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1]
-; SSE2-SSSE3-NEXT: movl %eax, %ecx
-; SSE2-SSSE3-NEXT: shrl $5, %ecx
-; SSE2-SSSE3-NEXT: andl $1, %ecx
-; SSE2-SSSE3-NEXT: movd %ecx, %xmm0
-; SSE2-SSSE3-NEXT: movl %eax, %ecx
-; SSE2-SSSE3-NEXT: shrl $4, %ecx
-; SSE2-SSSE3-NEXT: andl $1, %ecx
-; SSE2-SSSE3-NEXT: movd %ecx, %xmm2
-; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3]
-; SSE2-SSSE3-NEXT: movl %eax, %ecx
-; SSE2-SSSE3-NEXT: shrl $6, %ecx
-; SSE2-SSSE3-NEXT: andl $1, %ecx
-; SSE2-SSSE3-NEXT: movd %ecx, %xmm0
-; SSE2-SSSE3-NEXT: shrl $7, %eax
-; SSE2-SSSE3-NEXT: movzwl %ax, %eax
-; SSE2-SSSE3-NEXT: movd %eax, %xmm3
-; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1],xmm0[2],xmm3[2],xmm0[3],xmm3[3]
-; SSE2-SSSE3-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1]
-; SSE2-SSSE3-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm2[0]
+; SSE2-SSSE3-NEXT: movd %edi, %xmm0
+; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,0,0,0]
+; SSE2-SSSE3-NEXT: movdqa {{.*#+}} xmm2 = [1,2,4,8]
; SSE2-SSSE3-NEXT: movdqa %xmm1, %xmm0
-; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3]
-; SSE2-SSSE3-NEXT: pslld $31, %xmm0
-; SSE2-SSSE3-NEXT: psrad $31, %xmm0
-; SSE2-SSSE3-NEXT: punpckhwd {{.*#+}} xmm1 = xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
-; SSE2-SSSE3-NEXT: pslld $31, %xmm1
-; SSE2-SSSE3-NEXT: psrad $31, %xmm1
+; SSE2-SSSE3-NEXT: pand %xmm2, %xmm0
+; SSE2-SSSE3-NEXT: pcmpeqd %xmm2, %xmm0
+; SSE2-SSSE3-NEXT: movdqa {{.*#+}} xmm2 = [16,32,64,128]
+; SSE2-SSSE3-NEXT: pand %xmm2, %xmm1
+; SSE2-SSSE3-NEXT: pcmpeqd %xmm2, %xmm1
; SSE2-SSSE3-NEXT: retq
;
; AVX1-LABEL: ext_i8_8i32:
; AVX1: # BB#0:
-; AVX1-NEXT: movb %dil, -{{[0-9]+}}(%rsp)
-; AVX1-NEXT: movsbq -{{[0-9]+}}(%rsp), %rax
-; AVX1-NEXT: movq %rax, %rcx
-; AVX1-NEXT: shlq $58, %rcx
-; AVX1-NEXT: sarq $63, %rcx
-; AVX1-NEXT: movq %rax, %rdx
-; AVX1-NEXT: shlq $59, %rdx
-; AVX1-NEXT: sarq $63, %rdx
-; AVX1-NEXT: vmovd %edx, %xmm0
-; AVX1-NEXT: vpinsrd $1, %ecx, %xmm0, %xmm0
-; AVX1-NEXT: movq %rax, %rcx
-; AVX1-NEXT: shlq $57, %rcx
-; AVX1-NEXT: sarq $63, %rcx
-; AVX1-NEXT: vpinsrd $2, %ecx, %xmm0, %xmm0
-; AVX1-NEXT: movq %rax, %rcx
-; AVX1-NEXT: shrq $7, %rcx
-; AVX1-NEXT: vpinsrd $3, %ecx, %xmm0, %xmm0
-; AVX1-NEXT: movq %rax, %rcx
-; AVX1-NEXT: shlq $62, %rcx
-; AVX1-NEXT: sarq $63, %rcx
-; AVX1-NEXT: movq %rax, %rdx
-; AVX1-NEXT: shlq $63, %rdx
-; AVX1-NEXT: sarq $63, %rdx
-; AVX1-NEXT: vmovd %edx, %xmm1
-; AVX1-NEXT: vpinsrd $1, %ecx, %xmm1, %xmm1
-; AVX1-NEXT: movq %rax, %rcx
-; AVX1-NEXT: shlq $61, %rcx
-; AVX1-NEXT: sarq $63, %rcx
-; AVX1-NEXT: vpinsrd $2, %ecx, %xmm1, %xmm1
-; AVX1-NEXT: shlq $60, %rax
-; AVX1-NEXT: sarq $63, %rax
-; AVX1-NEXT: vpinsrd $3, %eax, %xmm1, %xmm1
-; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
+; AVX1-NEXT: vmovd %edi, %xmm0
+; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,0,0,0]
+; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
+; AVX1-NEXT: vandps {{.*}}(%rip), %ymm0, %ymm0
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
+; AVX1-NEXT: vpxor %xmm2, %xmm2, %xmm2
+; AVX1-NEXT: vpcmpeqd %xmm2, %xmm1, %xmm1
+; AVX1-NEXT: vpcmpeqd %xmm3, %xmm3, %xmm3
+; AVX1-NEXT: vpxor %xmm3, %xmm1, %xmm1
+; AVX1-NEXT: vpcmpeqd %xmm2, %xmm0, %xmm0
+; AVX1-NEXT: vpxor %xmm3, %xmm0, %xmm0
+; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
; AVX1-NEXT: retq
;
; AVX2-LABEL: ext_i8_8i32:
; AVX2: # BB#0:
-; AVX2-NEXT: movb %dil, -{{[0-9]+}}(%rsp)
-; AVX2-NEXT: movsbq -{{[0-9]+}}(%rsp), %rax
-; AVX2-NEXT: movq %rax, %rcx
-; AVX2-NEXT: shlq $58, %rcx
-; AVX2-NEXT: sarq $63, %rcx
-; AVX2-NEXT: movq %rax, %rdx
-; AVX2-NEXT: shlq $59, %rdx
-; AVX2-NEXT: sarq $63, %rdx
-; AVX2-NEXT: vmovd %edx, %xmm0
-; AVX2-NEXT: vpinsrd $1, %ecx, %xmm0, %xmm0
-; AVX2-NEXT: movq %rax, %rcx
-; AVX2-NEXT: shlq $57, %rcx
-; AVX2-NEXT: sarq $63, %rcx
-; AVX2-NEXT: vpinsrd $2, %ecx, %xmm0, %xmm0
-; AVX2-NEXT: movq %rax, %rcx
-; AVX2-NEXT: shrq $7, %rcx
-; AVX2-NEXT: vpinsrd $3, %ecx, %xmm0, %xmm0
-; AVX2-NEXT: movq %rax, %rcx
-; AVX2-NEXT: shlq $62, %rcx
-; AVX2-NEXT: sarq $63, %rcx
-; AVX2-NEXT: movq %rax, %rdx
-; AVX2-NEXT: shlq $63, %rdx
-; AVX2-NEXT: sarq $63, %rdx
-; AVX2-NEXT: vmovd %edx, %xmm1
-; AVX2-NEXT: vpinsrd $1, %ecx, %xmm1, %xmm1
-; AVX2-NEXT: movq %rax, %rcx
-; AVX2-NEXT: shlq $61, %rcx
-; AVX2-NEXT: sarq $63, %rcx
-; AVX2-NEXT: vpinsrd $2, %ecx, %xmm1, %xmm1
-; AVX2-NEXT: shlq $60, %rax
-; AVX2-NEXT: sarq $63, %rax
-; AVX2-NEXT: vpinsrd $3, %eax, %xmm1, %xmm1
-; AVX2-NEXT: vinserti128 $1, %xmm0, %ymm1, %ymm0
+; AVX2-NEXT: vmovd %edi, %xmm0
+; AVX2-NEXT: vpbroadcastd %xmm0, %ymm0
+; AVX2-NEXT: vmovdqa {{.*#+}} ymm1 = [1,2,4,8,16,32,64,128]
+; AVX2-NEXT: vpand %ymm1, %ymm0, %ymm0
+; AVX2-NEXT: vpcmpeqd %ymm1, %ymm0, %ymm0
; AVX2-NEXT: retq
;
; AVX512-LABEL: ext_i8_8i32:
@@ -642,300 +307,42 @@ define <8 x i32> @ext_i8_8i32(i8 %a0) {
define <16 x i16> @ext_i16_16i16(i16 %a0) {
; SSE2-SSSE3-LABEL: ext_i16_16i16:
; SSE2-SSSE3: # BB#0:
-; SSE2-SSSE3-NEXT: movw %di, -{{[0-9]+}}(%rsp)
-; SSE2-SSSE3-NEXT: movzwl -{{[0-9]+}}(%rsp), %eax
-; SSE2-SSSE3-NEXT: movl %eax, %ecx
-; SSE2-SSSE3-NEXT: shrl $7, %ecx
-; SSE2-SSSE3-NEXT: andl $1, %ecx
-; SSE2-SSSE3-NEXT: movd %ecx, %xmm0
-; SSE2-SSSE3-NEXT: movl %eax, %ecx
-; SSE2-SSSE3-NEXT: shrl $6, %ecx
-; SSE2-SSSE3-NEXT: andl $1, %ecx
-; SSE2-SSSE3-NEXT: movd %ecx, %xmm1
-; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
-; SSE2-SSSE3-NEXT: movl %eax, %ecx
-; SSE2-SSSE3-NEXT: shrl $5, %ecx
-; SSE2-SSSE3-NEXT: andl $1, %ecx
-; SSE2-SSSE3-NEXT: movd %ecx, %xmm0
-; SSE2-SSSE3-NEXT: movl %eax, %ecx
-; SSE2-SSSE3-NEXT: shrl $4, %ecx
-; SSE2-SSSE3-NEXT: andl $1, %ecx
-; SSE2-SSSE3-NEXT: movd %ecx, %xmm2
-; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3],xmm2[4],xmm0[4],xmm2[5],xmm0[5],xmm2[6],xmm0[6],xmm2[7],xmm0[7]
-; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3]
-; SSE2-SSSE3-NEXT: movl %eax, %ecx
-; SSE2-SSSE3-NEXT: shrl $3, %ecx
-; SSE2-SSSE3-NEXT: andl $1, %ecx
-; SSE2-SSSE3-NEXT: movd %ecx, %xmm0
-; SSE2-SSSE3-NEXT: movl %eax, %ecx
-; SSE2-SSSE3-NEXT: shrl $2, %ecx
-; SSE2-SSSE3-NEXT: andl $1, %ecx
-; SSE2-SSSE3-NEXT: movd %ecx, %xmm3
-; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm0[0],xmm3[1],xmm0[1],xmm3[2],xmm0[2],xmm3[3],xmm0[3],xmm3[4],xmm0[4],xmm3[5],xmm0[5],xmm3[6],xmm0[6],xmm3[7],xmm0[7]
-; SSE2-SSSE3-NEXT: movl %eax, %ecx
-; SSE2-SSSE3-NEXT: andl $1, %ecx
-; SSE2-SSSE3-NEXT: movd %ecx, %xmm1
-; SSE2-SSSE3-NEXT: movl %eax, %ecx
-; SSE2-SSSE3-NEXT: shrl %ecx
-; SSE2-SSSE3-NEXT: andl $1, %ecx
-; SSE2-SSSE3-NEXT: movd %ecx, %xmm0
-; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
-; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm3[0],xmm1[1],xmm3[1],xmm1[2],xmm3[2],xmm1[3],xmm3[3]
-; SSE2-SSSE3-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1]
-; SSE2-SSSE3-NEXT: movl %eax, %ecx
-; SSE2-SSSE3-NEXT: shrl $11, %ecx
-; SSE2-SSSE3-NEXT: andl $1, %ecx
-; SSE2-SSSE3-NEXT: movd %ecx, %xmm0
-; SSE2-SSSE3-NEXT: movl %eax, %ecx
-; SSE2-SSSE3-NEXT: shrl $10, %ecx
-; SSE2-SSSE3-NEXT: andl $1, %ecx
-; SSE2-SSSE3-NEXT: movd %ecx, %xmm2
-; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3],xmm2[4],xmm0[4],xmm2[5],xmm0[5],xmm2[6],xmm0[6],xmm2[7],xmm0[7]
-; SSE2-SSSE3-NEXT: movl %eax, %ecx
-; SSE2-SSSE3-NEXT: shrl $9, %ecx
-; SSE2-SSSE3-NEXT: andl $1, %ecx
-; SSE2-SSSE3-NEXT: movd %ecx, %xmm3
-; SSE2-SSSE3-NEXT: movl %eax, %ecx
-; SSE2-SSSE3-NEXT: shrl $8, %ecx
-; SSE2-SSSE3-NEXT: andl $1, %ecx
-; SSE2-SSSE3-NEXT: movd %ecx, %xmm0
-; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1],xmm0[2],xmm3[2],xmm0[3],xmm3[3],xmm0[4],xmm3[4],xmm0[5],xmm3[5],xmm0[6],xmm3[6],xmm0[7],xmm3[7]
-; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3]
-; SSE2-SSSE3-NEXT: movl %eax, %ecx
-; SSE2-SSSE3-NEXT: shrl $13, %ecx
-; SSE2-SSSE3-NEXT: andl $1, %ecx
-; SSE2-SSSE3-NEXT: movd %ecx, %xmm2
-; SSE2-SSSE3-NEXT: movl %eax, %ecx
-; SSE2-SSSE3-NEXT: shrl $12, %ecx
-; SSE2-SSSE3-NEXT: andl $1, %ecx
-; SSE2-SSSE3-NEXT: movd %ecx, %xmm3
-; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1],xmm3[2],xmm2[2],xmm3[3],xmm2[3],xmm3[4],xmm2[4],xmm3[5],xmm2[5],xmm3[6],xmm2[6],xmm3[7],xmm2[7]
-; SSE2-SSSE3-NEXT: movl %eax, %ecx
-; SSE2-SSSE3-NEXT: shrl $14, %ecx
-; SSE2-SSSE3-NEXT: andl $1, %ecx
-; SSE2-SSSE3-NEXT: movd %ecx, %xmm2
-; SSE2-SSSE3-NEXT: shrl $15, %eax
-; SSE2-SSSE3-NEXT: movzwl %ax, %eax
-; SSE2-SSSE3-NEXT: movd %eax, %xmm4
-; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm4[0],xmm2[1],xmm4[1],xmm2[2],xmm4[2],xmm2[3],xmm4[3],xmm2[4],xmm4[4],xmm2[5],xmm4[5],xmm2[6],xmm4[6],xmm2[7],xmm4[7]
-; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1],xmm3[2],xmm2[2],xmm3[3],xmm2[3]
-; SSE2-SSSE3-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1]
-; SSE2-SSSE3-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm0[0]
+; SSE2-SSSE3-NEXT: movd %edi, %xmm0
+; SSE2-SSSE3-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,0,0,0,4,5,6,7]
+; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,0,1,1]
+; SSE2-SSSE3-NEXT: movdqa {{.*#+}} xmm2 = [1,2,4,8,16,32,64,128]
; SSE2-SSSE3-NEXT: movdqa %xmm1, %xmm0
-; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
-; SSE2-SSSE3-NEXT: psllw $15, %xmm0
-; SSE2-SSSE3-NEXT: psraw $15, %xmm0
-; SSE2-SSSE3-NEXT: punpckhbw {{.*#+}} xmm1 = xmm1[8],xmm0[8],xmm1[9],xmm0[9],xmm1[10],xmm0[10],xmm1[11],xmm0[11],xmm1[12],xmm0[12],xmm1[13],xmm0[13],xmm1[14],xmm0[14],xmm1[15],xmm0[15]
-; SSE2-SSSE3-NEXT: psllw $15, %xmm1
-; SSE2-SSSE3-NEXT: psraw $15, %xmm1
+; SSE2-SSSE3-NEXT: pand %xmm2, %xmm0
+; SSE2-SSSE3-NEXT: pcmpeqw %xmm2, %xmm0
+; SSE2-SSSE3-NEXT: movdqa {{.*#+}} xmm2 = [256,512,1024,2048,4096,8192,16384,32768]
+; SSE2-SSSE3-NEXT: pand %xmm2, %xmm1
+; SSE2-SSSE3-NEXT: pcmpeqw %xmm2, %xmm1
; SSE2-SSSE3-NEXT: retq
;
; AVX1-LABEL: ext_i16_16i16:
; AVX1: # BB#0:
-; AVX1-NEXT: pushq %rbp
-; AVX1-NEXT: .Lcfi0:
-; AVX1-NEXT: .cfi_def_cfa_offset 16
-; AVX1-NEXT: pushq %r15
-; AVX1-NEXT: .Lcfi1:
-; AVX1-NEXT: .cfi_def_cfa_offset 24
-; AVX1-NEXT: pushq %r14
-; AVX1-NEXT: .Lcfi2:
-; AVX1-NEXT: .cfi_def_cfa_offset 32
-; AVX1-NEXT: pushq %r13
-; AVX1-NEXT: .Lcfi3:
-; AVX1-NEXT: .cfi_def_cfa_offset 40
-; AVX1-NEXT: pushq %r12
-; AVX1-NEXT: .Lcfi4:
-; AVX1-NEXT: .cfi_def_cfa_offset 48
-; AVX1-NEXT: pushq %rbx
-; AVX1-NEXT: .Lcfi5:
-; AVX1-NEXT: .cfi_def_cfa_offset 56
-; AVX1-NEXT: .Lcfi6:
-; AVX1-NEXT: .cfi_offset %rbx, -56
-; AVX1-NEXT: .Lcfi7:
-; AVX1-NEXT: .cfi_offset %r12, -48
-; AVX1-NEXT: .Lcfi8:
-; AVX1-NEXT: .cfi_offset %r13, -40
-; AVX1-NEXT: .Lcfi9:
-; AVX1-NEXT: .cfi_offset %r14, -32
-; AVX1-NEXT: .Lcfi10:
-; AVX1-NEXT: .cfi_offset %r15, -24
-; AVX1-NEXT: .Lcfi11:
-; AVX1-NEXT: .cfi_offset %rbp, -16
-; AVX1-NEXT: movw %di, -{{[0-9]+}}(%rsp)
-; AVX1-NEXT: movswq -{{[0-9]+}}(%rsp), %rax
-; AVX1-NEXT: movq %rax, %rcx
-; AVX1-NEXT: shlq $55, %rcx
-; AVX1-NEXT: sarq $63, %rcx
-; AVX1-NEXT: vmovd %ecx, %xmm0
-; AVX1-NEXT: movq %rax, %r8
-; AVX1-NEXT: movq %rax, %r10
-; AVX1-NEXT: movq %rax, %r11
-; AVX1-NEXT: movq %rax, %r14
-; AVX1-NEXT: movq %rax, %r15
-; AVX1-NEXT: movq %rax, %r9
-; AVX1-NEXT: movq %rax, %r12
-; AVX1-NEXT: movq %rax, %r13
-; AVX1-NEXT: movq %rax, %rbx
-; AVX1-NEXT: movq %rax, %rdi
-; AVX1-NEXT: movq %rax, %rcx
-; AVX1-NEXT: movq %rax, %rdx
-; AVX1-NEXT: movq %rax, %rsi
-; AVX1-NEXT: movsbq %al, %rbp
-; AVX1-NEXT: shlq $54, %rax
-; AVX1-NEXT: sarq $63, %rax
-; AVX1-NEXT: vpinsrw $1, %eax, %xmm0, %xmm0
-; AVX1-NEXT: shlq $53, %r8
-; AVX1-NEXT: sarq $63, %r8
-; AVX1-NEXT: vpinsrw $2, %r8d, %xmm0, %xmm0
-; AVX1-NEXT: shlq $52, %r10
-; AVX1-NEXT: sarq $63, %r10
-; AVX1-NEXT: vpinsrw $3, %r10d, %xmm0, %xmm0
-; AVX1-NEXT: shlq $51, %r11
-; AVX1-NEXT: sarq $63, %r11
-; AVX1-NEXT: vpinsrw $4, %r11d, %xmm0, %xmm0
-; AVX1-NEXT: shlq $50, %r14
-; AVX1-NEXT: sarq $63, %r14
-; AVX1-NEXT: vpinsrw $5, %r14d, %xmm0, %xmm0
-; AVX1-NEXT: shlq $49, %r15
-; AVX1-NEXT: sarq $63, %r15
-; AVX1-NEXT: vpinsrw $6, %r15d, %xmm0, %xmm0
-; AVX1-NEXT: shrq $15, %r9
-; AVX1-NEXT: vpinsrw $7, %r9d, %xmm0, %xmm0
-; AVX1-NEXT: shlq $63, %r13
-; AVX1-NEXT: sarq $63, %r13
-; AVX1-NEXT: vmovd %r13d, %xmm1
-; AVX1-NEXT: shlq $62, %r12
-; AVX1-NEXT: sarq $63, %r12
-; AVX1-NEXT: vpinsrw $1, %r12d, %xmm1, %xmm1
-; AVX1-NEXT: shlq $61, %rbx
-; AVX1-NEXT: sarq $63, %rbx
-; AVX1-NEXT: vpinsrw $2, %ebx, %xmm1, %xmm1
-; AVX1-NEXT: shlq $60, %rdi
-; AVX1-NEXT: sarq $63, %rdi
-; AVX1-NEXT: vpinsrw $3, %edi, %xmm1, %xmm1
-; AVX1-NEXT: shlq $59, %rcx
-; AVX1-NEXT: sarq $63, %rcx
-; AVX1-NEXT: vpinsrw $4, %ecx, %xmm1, %xmm1
-; AVX1-NEXT: shlq $58, %rdx
-; AVX1-NEXT: sarq $63, %rdx
-; AVX1-NEXT: vpinsrw $5, %edx, %xmm1, %xmm1
-; AVX1-NEXT: shlq $57, %rsi
-; AVX1-NEXT: sarq $63, %rsi
-; AVX1-NEXT: vpinsrw $6, %esi, %xmm1, %xmm1
-; AVX1-NEXT: shrq $7, %rbp
-; AVX1-NEXT: vpinsrw $7, %ebp, %xmm1, %xmm1
-; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
-; AVX1-NEXT: popq %rbx
-; AVX1-NEXT: popq %r12
-; AVX1-NEXT: popq %r13
-; AVX1-NEXT: popq %r14
-; AVX1-NEXT: popq %r15
-; AVX1-NEXT: popq %rbp
+; AVX1-NEXT: vmovd %edi, %xmm0
+; AVX1-NEXT: vpshuflw {{.*#+}} xmm0 = xmm0[0,0,0,0,4,5,6,7]
+; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,0,1,1]
+; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
+; AVX1-NEXT: vandps {{.*}}(%rip), %ymm0, %ymm0
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
+; AVX1-NEXT: vpxor %xmm2, %xmm2, %xmm2
+; AVX1-NEXT: vpcmpeqw %xmm2, %xmm1, %xmm1
+; AVX1-NEXT: vpcmpeqd %xmm3, %xmm3, %xmm3
+; AVX1-NEXT: vpxor %xmm3, %xmm1, %xmm1
+; AVX1-NEXT: vpcmpeqw %xmm2, %xmm0, %xmm0
+; AVX1-NEXT: vpxor %xmm3, %xmm0, %xmm0
+; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
; AVX1-NEXT: retq
;
; AVX2-LABEL: ext_i16_16i16:
; AVX2: # BB#0:
-; AVX2-NEXT: pushq %rbp
-; AVX2-NEXT: .Lcfi0:
-; AVX2-NEXT: .cfi_def_cfa_offset 16
-; AVX2-NEXT: pushq %r15
-; AVX2-NEXT: .Lcfi1:
-; AVX2-NEXT: .cfi_def_cfa_offset 24
-; AVX2-NEXT: pushq %r14
-; AVX2-NEXT: .Lcfi2:
-; AVX2-NEXT: .cfi_def_cfa_offset 32
-; AVX2-NEXT: pushq %r13
-; AVX2-NEXT: .Lcfi3:
-; AVX2-NEXT: .cfi_def_cfa_offset 40
-; AVX2-NEXT: pushq %r12
-; AVX2-NEXT: .Lcfi4:
-; AVX2-NEXT: .cfi_def_cfa_offset 48
-; AVX2-NEXT: pushq %rbx
-; AVX2-NEXT: .Lcfi5:
-; AVX2-NEXT: .cfi_def_cfa_offset 56
-; AVX2-NEXT: .Lcfi6:
-; AVX2-NEXT: .cfi_offset %rbx, -56
-; AVX2-NEXT: .Lcfi7:
-; AVX2-NEXT: .cfi_offset %r12, -48
-; AVX2-NEXT: .Lcfi8:
-; AVX2-NEXT: .cfi_offset %r13, -40
-; AVX2-NEXT: .Lcfi9:
-; AVX2-NEXT: .cfi_offset %r14, -32
-; AVX2-NEXT: .Lcfi10:
-; AVX2-NEXT: .cfi_offset %r15, -24
-; AVX2-NEXT: .Lcfi11:
-; AVX2-NEXT: .cfi_offset %rbp, -16
-; AVX2-NEXT: movw %di, -{{[0-9]+}}(%rsp)
-; AVX2-NEXT: movswq -{{[0-9]+}}(%rsp), %rax
-; AVX2-NEXT: movq %rax, %rcx
-; AVX2-NEXT: shlq $55, %rcx
-; AVX2-NEXT: sarq $63, %rcx
-; AVX2-NEXT: vmovd %ecx, %xmm0
-; AVX2-NEXT: movq %rax, %r8
-; AVX2-NEXT: movq %rax, %r10
-; AVX2-NEXT: movq %rax, %r11
-; AVX2-NEXT: movq %rax, %r14
-; AVX2-NEXT: movq %rax, %r15
-; AVX2-NEXT: movq %rax, %r9
-; AVX2-NEXT: movq %rax, %r12
-; AVX2-NEXT: movq %rax, %r13
-; AVX2-NEXT: movq %rax, %rbx
-; AVX2-NEXT: movq %rax, %rdi
-; AVX2-NEXT: movq %rax, %rcx
-; AVX2-NEXT: movq %rax, %rdx
-; AVX2-NEXT: movq %rax, %rsi
-; AVX2-NEXT: movsbq %al, %rbp
-; AVX2-NEXT: shlq $54, %rax
-; AVX2-NEXT: sarq $63, %rax
-; AVX2-NEXT: vpinsrw $1, %eax, %xmm0, %xmm0
-; AVX2-NEXT: shlq $53, %r8
-; AVX2-NEXT: sarq $63, %r8
-; AVX2-NEXT: vpinsrw $2, %r8d, %xmm0, %xmm0
-; AVX2-NEXT: shlq $52, %r10
-; AVX2-NEXT: sarq $63, %r10
-; AVX2-NEXT: vpinsrw $3, %r10d, %xmm0, %xmm0
-; AVX2-NEXT: shlq $51, %r11
-; AVX2-NEXT: sarq $63, %r11
-; AVX2-NEXT: vpinsrw $4, %r11d, %xmm0, %xmm0
-; AVX2-NEXT: shlq $50, %r14
-; AVX2-NEXT: sarq $63, %r14
-; AVX2-NEXT: vpinsrw $5, %r14d, %xmm0, %xmm0
-; AVX2-NEXT: shlq $49, %r15
-; AVX2-NEXT: sarq $63, %r15
-; AVX2-NEXT: vpinsrw $6, %r15d, %xmm0, %xmm0
-; AVX2-NEXT: shrq $15, %r9
-; AVX2-NEXT: vpinsrw $7, %r9d, %xmm0, %xmm0
-; AVX2-NEXT: shlq $63, %r13
-; AVX2-NEXT: sarq $63, %r13
-; AVX2-NEXT: vmovd %r13d, %xmm1
-; AVX2-NEXT: shlq $62, %r12
-; AVX2-NEXT: sarq $63, %r12
-; AVX2-NEXT: vpinsrw $1, %r12d, %xmm1, %xmm1
-; AVX2-NEXT: shlq $61, %rbx
-; AVX2-NEXT: sarq $63, %rbx
-; AVX2-NEXT: vpinsrw $2, %ebx, %xmm1, %xmm1
-; AVX2-NEXT: shlq $60, %rdi
-; AVX2-NEXT: sarq $63, %rdi
-; AVX2-NEXT: vpinsrw $3, %edi, %xmm1, %xmm1
-; AVX2-NEXT: shlq $59, %rcx
-; AVX2-NEXT: sarq $63, %rcx
-; AVX2-NEXT: vpinsrw $4, %ecx, %xmm1, %xmm1
-; AVX2-NEXT: shlq $58, %rdx
-; AVX2-NEXT: sarq $63, %rdx
-; AVX2-NEXT: vpinsrw $5, %edx, %xmm1, %xmm1
-; AVX2-NEXT: shlq $57, %rsi
-; AVX2-NEXT: sarq $63, %rsi
-; AVX2-NEXT: vpinsrw $6, %esi, %xmm1, %xmm1
-; AVX2-NEXT: shrq $7, %rbp
-; AVX2-NEXT: vpinsrw $7, %ebp, %xmm1, %xmm1
-; AVX2-NEXT: vinserti128 $1, %xmm0, %ymm1, %ymm0
-; AVX2-NEXT: popq %rbx
-; AVX2-NEXT: popq %r12
-; AVX2-NEXT: popq %r13
-; AVX2-NEXT: popq %r14
-; AVX2-NEXT: popq %r15
-; AVX2-NEXT: popq %rbp
+; AVX2-NEXT: vmovd %edi, %xmm0
+; AVX2-NEXT: vpbroadcastw %xmm0, %ymm0
+; AVX2-NEXT: vmovdqa {{.*#+}} ymm1 = [1,2,4,8,16,32,64,128,256,512,1024,2048,4096,8192,16384,32768]
+; AVX2-NEXT: vpand %ymm1, %ymm0, %ymm0
+; AVX2-NEXT: vpcmpeqw %ymm1, %ymm0, %ymm0
; AVX2-NEXT: retq
;
; AVX512-LABEL: ext_i16_16i16:
@@ -951,539 +358,51 @@ define <16 x i16> @ext_i16_16i16(i16 %a0
define <32 x i8> @ext_i32_32i8(i32 %a0) {
; SSE2-SSSE3-LABEL: ext_i32_32i8:
; SSE2-SSSE3: # BB#0:
-; SSE2-SSSE3-NEXT: pushq %rbp
-; SSE2-SSSE3-NEXT: .Lcfi12:
-; SSE2-SSSE3-NEXT: .cfi_def_cfa_offset 16
-; SSE2-SSSE3-NEXT: pushq %r15
-; SSE2-SSSE3-NEXT: .Lcfi13:
-; SSE2-SSSE3-NEXT: .cfi_def_cfa_offset 24
-; SSE2-SSSE3-NEXT: pushq %r14
-; SSE2-SSSE3-NEXT: .Lcfi14:
-; SSE2-SSSE3-NEXT: .cfi_def_cfa_offset 32
-; SSE2-SSSE3-NEXT: pushq %r13
-; SSE2-SSSE3-NEXT: .Lcfi15:
-; SSE2-SSSE3-NEXT: .cfi_def_cfa_offset 40
-; SSE2-SSSE3-NEXT: pushq %r12
-; SSE2-SSSE3-NEXT: .Lcfi16:
-; SSE2-SSSE3-NEXT: .cfi_def_cfa_offset 48
-; SSE2-SSSE3-NEXT: pushq %rbx
-; SSE2-SSSE3-NEXT: .Lcfi17:
-; SSE2-SSSE3-NEXT: .cfi_def_cfa_offset 56
-; SSE2-SSSE3-NEXT: .Lcfi18:
-; SSE2-SSSE3-NEXT: .cfi_offset %rbx, -56
-; SSE2-SSSE3-NEXT: .Lcfi19:
-; SSE2-SSSE3-NEXT: .cfi_offset %r12, -48
-; SSE2-SSSE3-NEXT: .Lcfi20:
-; SSE2-SSSE3-NEXT: .cfi_offset %r13, -40
-; SSE2-SSSE3-NEXT: .Lcfi21:
-; SSE2-SSSE3-NEXT: .cfi_offset %r14, -32
-; SSE2-SSSE3-NEXT: .Lcfi22:
-; SSE2-SSSE3-NEXT: .cfi_offset %r15, -24
-; SSE2-SSSE3-NEXT: .Lcfi23:
-; SSE2-SSSE3-NEXT: .cfi_offset %rbp, -16
-; SSE2-SSSE3-NEXT: movw %di, -{{[0-9]+}}(%rsp)
-; SSE2-SSSE3-NEXT: shrl $16, %edi
-; SSE2-SSSE3-NEXT: movw %di, -{{[0-9]+}}(%rsp)
-; SSE2-SSSE3-NEXT: movswq -{{[0-9]+}}(%rsp), %rbx
-; SSE2-SSSE3-NEXT: movq %rbx, %r8
-; SSE2-SSSE3-NEXT: movq %rbx, %r9
-; SSE2-SSSE3-NEXT: movq %rbx, %r10
-; SSE2-SSSE3-NEXT: movq %rbx, %r11
-; SSE2-SSSE3-NEXT: movq %rbx, %r14
-; SSE2-SSSE3-NEXT: movq %rbx, %r15
-; SSE2-SSSE3-NEXT: movq %rbx, %r12
-; SSE2-SSSE3-NEXT: movq %rbx, %r13
-; SSE2-SSSE3-NEXT: movq %rbx, %rdi
-; SSE2-SSSE3-NEXT: movq %rbx, %rcx
-; SSE2-SSSE3-NEXT: movq %rbx, %rdx
-; SSE2-SSSE3-NEXT: movq %rbx, %rbp
-; SSE2-SSSE3-NEXT: movq %rbx, %rsi
-; SSE2-SSSE3-NEXT: movq %rbx, %rax
-; SSE2-SSSE3-NEXT: shrq $15, %rax
-; SSE2-SSSE3-NEXT: movd %eax, %xmm0
-; SSE2-SSSE3-NEXT: movq %rbx, %rax
-; SSE2-SSSE3-NEXT: movsbq %bl, %rbx
-; SSE2-SSSE3-NEXT: shlq $49, %r8
-; SSE2-SSSE3-NEXT: sarq $63, %r8
-; SSE2-SSSE3-NEXT: movd %r8d, %xmm15
-; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm15 = xmm15[0],xmm0[0],xmm15[1],xmm0[1],xmm15[2],xmm0[2],xmm15[3],xmm0[3],xmm15[4],xmm0[4],xmm15[5],xmm0[5],xmm15[6],xmm0[6],xmm15[7],xmm0[7]
-; SSE2-SSSE3-NEXT: shlq $50, %r9
-; SSE2-SSSE3-NEXT: sarq $63, %r9
-; SSE2-SSSE3-NEXT: movd %r9d, %xmm8
-; SSE2-SSSE3-NEXT: shlq $51, %r10
-; SSE2-SSSE3-NEXT: sarq $63, %r10
-; SSE2-SSSE3-NEXT: movd %r10d, %xmm3
-; SSE2-SSSE3-NEXT: shlq $52, %r11
-; SSE2-SSSE3-NEXT: sarq $63, %r11
-; SSE2-SSSE3-NEXT: movd %r11d, %xmm9
-; SSE2-SSSE3-NEXT: shlq $53, %r14
-; SSE2-SSSE3-NEXT: sarq $63, %r14
-; SSE2-SSSE3-NEXT: movd %r14d, %xmm6
-; SSE2-SSSE3-NEXT: shlq $54, %r15
-; SSE2-SSSE3-NEXT: sarq $63, %r15
-; SSE2-SSSE3-NEXT: movd %r15d, %xmm10
-; SSE2-SSSE3-NEXT: shlq $55, %r12
-; SSE2-SSSE3-NEXT: sarq $63, %r12
-; SSE2-SSSE3-NEXT: movd %r12d, %xmm1
-; SSE2-SSSE3-NEXT: shlq $60, %r13
-; SSE2-SSSE3-NEXT: sarq $63, %r13
-; SSE2-SSSE3-NEXT: movd %r13d, %xmm11
-; SSE2-SSSE3-NEXT: shlq $61, %rdi
-; SSE2-SSSE3-NEXT: sarq $63, %rdi
-; SSE2-SSSE3-NEXT: movd %edi, %xmm5
-; SSE2-SSSE3-NEXT: shlq $62, %rcx
-; SSE2-SSSE3-NEXT: sarq $63, %rcx
-; SSE2-SSSE3-NEXT: movd %ecx, %xmm12
-; SSE2-SSSE3-NEXT: shlq $63, %rdx
-; SSE2-SSSE3-NEXT: sarq $63, %rdx
-; SSE2-SSSE3-NEXT: movd %edx, %xmm0
-; SSE2-SSSE3-NEXT: shlq $58, %rbp
-; SSE2-SSSE3-NEXT: sarq $63, %rbp
-; SSE2-SSSE3-NEXT: movd %ebp, %xmm13
-; SSE2-SSSE3-NEXT: shlq $59, %rsi
-; SSE2-SSSE3-NEXT: sarq $63, %rsi
-; SSE2-SSSE3-NEXT: movd %esi, %xmm7
-; SSE2-SSSE3-NEXT: shlq $57, %rax
-; SSE2-SSSE3-NEXT: sarq $63, %rax
-; SSE2-SSSE3-NEXT: movd %eax, %xmm4
-; SSE2-SSSE3-NEXT: shrq $7, %rbx
-; SSE2-SSSE3-NEXT: movd %ebx, %xmm14
-; SSE2-SSSE3-NEXT: movswq -{{[0-9]+}}(%rsp), %rsi
-; SSE2-SSSE3-NEXT: movq %rsi, %r8
-; SSE2-SSSE3-NEXT: movq %rsi, %r9
-; SSE2-SSSE3-NEXT: movq %rsi, %r10
-; SSE2-SSSE3-NEXT: movq %rsi, %r11
-; SSE2-SSSE3-NEXT: movq %rsi, %r14
-; SSE2-SSSE3-NEXT: movq %rsi, %r15
-; SSE2-SSSE3-NEXT: movq %rsi, %r12
-; SSE2-SSSE3-NEXT: movq %rsi, %r13
-; SSE2-SSSE3-NEXT: movq %rsi, %rbx
-; SSE2-SSSE3-NEXT: movq %rsi, %rax
-; SSE2-SSSE3-NEXT: movq %rsi, %rcx
-; SSE2-SSSE3-NEXT: movq %rsi, %rdx
-; SSE2-SSSE3-NEXT: movq %rsi, %rdi
-; SSE2-SSSE3-NEXT: movq %rsi, %rbp
-; SSE2-SSSE3-NEXT: shrq $15, %rbp
-; SSE2-SSSE3-NEXT: movd %ebp, %xmm2
-; SSE2-SSSE3-NEXT: movq %rsi, %rbp
-; SSE2-SSSE3-NEXT: movsbq %sil, %rsi
-; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm8[0],xmm3[1],xmm8[1],xmm3[2],xmm8[2],xmm3[3],xmm8[3],xmm3[4],xmm8[4],xmm3[5],xmm8[5],xmm3[6],xmm8[6],xmm3[7],xmm8[7]
-; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm3 = xmm3[0],xmm15[0],xmm3[1],xmm15[1],xmm3[2],xmm15[2],xmm3[3],xmm15[3]
-; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm6 = xmm6[0],xmm9[0],xmm6[1],xmm9[1],xmm6[2],xmm9[2],xmm6[3],xmm9[3],xmm6[4],xmm9[4],xmm6[5],xmm9[5],xmm6[6],xmm9[6],xmm6[7],xmm9[7]
-; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm10[0],xmm1[1],xmm10[1],xmm1[2],xmm10[2],xmm1[3],xmm10[3],xmm1[4],xmm10[4],xmm1[5],xmm10[5],xmm1[6],xmm10[6],xmm1[7],xmm10[7]
-; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm6[0],xmm1[1],xmm6[1],xmm1[2],xmm6[2],xmm1[3],xmm6[3]
-; SSE2-SSSE3-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm3[0],xmm1[1],xmm3[1]
-; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm5 = xmm5[0],xmm11[0],xmm5[1],xmm11[1],xmm5[2],xmm11[2],xmm5[3],xmm11[3],xmm5[4],xmm11[4],xmm5[5],xmm11[5],xmm5[6],xmm11[6],xmm5[7],xmm11[7]
-; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm12[0],xmm0[1],xmm12[1],xmm0[2],xmm12[2],xmm0[3],xmm12[3],xmm0[4],xmm12[4],xmm0[5],xmm12[5],xmm0[6],xmm12[6],xmm0[7],xmm12[7]
-; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm5[0],xmm0[1],xmm5[1],xmm0[2],xmm5[2],xmm0[3],xmm5[3]
-; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm7 = xmm7[0],xmm13[0],xmm7[1],xmm13[1],xmm7[2],xmm13[2],xmm7[3],xmm13[3],xmm7[4],xmm13[4],xmm7[5],xmm13[5],xmm7[6],xmm13[6],xmm7[7],xmm13[7]
-; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm4 = xmm4[0],xmm14[0],xmm4[1],xmm14[1],xmm4[2],xmm14[2],xmm4[3],xmm14[3],xmm4[4],xmm14[4],xmm4[5],xmm14[5],xmm4[6],xmm14[6],xmm4[7],xmm14[7]
-; SSE2-SSSE3-NEXT: shlq $49, %r8
-; SSE2-SSSE3-NEXT: sarq $63, %r8
-; SSE2-SSSE3-NEXT: movd %r8d, %xmm3
-; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm7 = xmm7[0],xmm4[0],xmm7[1],xmm4[1],xmm7[2],xmm4[2],xmm7[3],xmm4[3]
-; SSE2-SSSE3-NEXT: shlq $50, %r9
-; SSE2-SSSE3-NEXT: sarq $63, %r9
-; SSE2-SSSE3-NEXT: movd %r9d, %xmm4
-; SSE2-SSSE3-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm7[0],xmm0[1],xmm7[1]
-; SSE2-SSSE3-NEXT: shlq $51, %r10
-; SSE2-SSSE3-NEXT: sarq $63, %r10
-; SSE2-SSSE3-NEXT: movd %r10d, %xmm5
-; SSE2-SSSE3-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
-; SSE2-SSSE3-NEXT: shlq $52, %r11
-; SSE2-SSSE3-NEXT: sarq $63, %r11
-; SSE2-SSSE3-NEXT: movd %r11d, %xmm1
-; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1],xmm3[2],xmm2[2],xmm3[3],xmm2[3],xmm3[4],xmm2[4],xmm3[5],xmm2[5],xmm3[6],xmm2[6],xmm3[7],xmm2[7]
-; SSE2-SSSE3-NEXT: shlq $53, %r14
-; SSE2-SSSE3-NEXT: sarq $63, %r14
-; SSE2-SSSE3-NEXT: movd %r14d, %xmm2
-; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm5 = xmm5[0],xmm4[0],xmm5[1],xmm4[1],xmm5[2],xmm4[2],xmm5[3],xmm4[3],xmm5[4],xmm4[4],xmm5[5],xmm4[5],xmm5[6],xmm4[6],xmm5[7],xmm4[7]
-; SSE2-SSSE3-NEXT: shlq $54, %r15
-; SSE2-SSSE3-NEXT: sarq $63, %r15
-; SSE2-SSSE3-NEXT: movd %r15d, %xmm4
-; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm5 = xmm5[0],xmm3[0],xmm5[1],xmm3[1],xmm5[2],xmm3[2],xmm5[3],xmm3[3]
-; SSE2-SSSE3-NEXT: shlq $55, %r12
-; SSE2-SSSE3-NEXT: sarq $63, %r12
-; SSE2-SSSE3-NEXT: movd %r12d, %xmm3
-; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3],xmm2[4],xmm1[4],xmm2[5],xmm1[5],xmm2[6],xmm1[6],xmm2[7],xmm1[7]
-; SSE2-SSSE3-NEXT: shlq $60, %r13
-; SSE2-SSSE3-NEXT: sarq $63, %r13
-; SSE2-SSSE3-NEXT: movd %r13d, %xmm6
-; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm4[0],xmm3[1],xmm4[1],xmm3[2],xmm4[2],xmm3[3],xmm4[3],xmm3[4],xmm4[4],xmm3[5],xmm4[5],xmm3[6],xmm4[6],xmm3[7],xmm4[7]
-; SSE2-SSSE3-NEXT: shlq $61, %rbx
-; SSE2-SSSE3-NEXT: sarq $63, %rbx
-; SSE2-SSSE3-NEXT: movd %ebx, %xmm4
-; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1],xmm3[2],xmm2[2],xmm3[3],xmm2[3]
-; SSE2-SSSE3-NEXT: shlq $62, %rax
-; SSE2-SSSE3-NEXT: sarq $63, %rax
-; SSE2-SSSE3-NEXT: movd %eax, %xmm2
-; SSE2-SSSE3-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm5[0],xmm3[1],xmm5[1]
-; SSE2-SSSE3-NEXT: shlq $63, %rcx
-; SSE2-SSSE3-NEXT: sarq $63, %rcx
-; SSE2-SSSE3-NEXT: movd %ecx, %xmm1
-; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm4 = xmm4[0],xmm6[0],xmm4[1],xmm6[1],xmm4[2],xmm6[2],xmm4[3],xmm6[3],xmm4[4],xmm6[4],xmm4[5],xmm6[5],xmm4[6],xmm6[6],xmm4[7],xmm6[7]
-; SSE2-SSSE3-NEXT: shlq $58, %rdx
-; SSE2-SSSE3-NEXT: sarq $63, %rdx
-; SSE2-SSSE3-NEXT: movd %edx, %xmm5
-; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3],xmm1[4],xmm2[4],xmm1[5],xmm2[5],xmm1[6],xmm2[6],xmm1[7],xmm2[7]
-; SSE2-SSSE3-NEXT: shlq $59, %rdi
-; SSE2-SSSE3-NEXT: sarq $63, %rdi
-; SSE2-SSSE3-NEXT: movd %edi, %xmm2
-; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm4[0],xmm1[1],xmm4[1],xmm1[2],xmm4[2],xmm1[3],xmm4[3]
-; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm5[0],xmm2[1],xmm5[1],xmm2[2],xmm5[2],xmm2[3],xmm5[3],xmm2[4],xmm5[4],xmm2[5],xmm5[5],xmm2[6],xmm5[6],xmm2[7],xmm5[7]
-; SSE2-SSSE3-NEXT: shlq $57, %rbp
-; SSE2-SSSE3-NEXT: sarq $63, %rbp
-; SSE2-SSSE3-NEXT: movd %ebp, %xmm4
-; SSE2-SSSE3-NEXT: shrq $7, %rsi
-; SSE2-SSSE3-NEXT: movd %esi, %xmm5
-; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm4 = xmm4[0],xmm5[0],xmm4[1],xmm5[1],xmm4[2],xmm5[2],xmm4[3],xmm5[3],xmm4[4],xmm5[4],xmm4[5],xmm5[5],xmm4[6],xmm5[6],xmm4[7],xmm5[7]
-; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm4[0],xmm2[1],xmm4[1],xmm2[2],xmm4[2],xmm2[3],xmm4[3]
-; SSE2-SSSE3-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1]
-; SSE2-SSSE3-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm3[0]
-; SSE2-SSSE3-NEXT: popq %rbx
-; SSE2-SSSE3-NEXT: popq %r12
-; SSE2-SSSE3-NEXT: popq %r13
-; SSE2-SSSE3-NEXT: popq %r14
-; SSE2-SSSE3-NEXT: popq %r15
-; SSE2-SSSE3-NEXT: popq %rbp
+; SSE2-SSSE3-NEXT: movd %edi, %xmm1
+; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
+; SSE2-SSSE3-NEXT: pshuflw {{.*#+}} xmm0 = xmm1[0,0,1,1,4,5,6,7]
+; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,1,1]
+; SSE2-SSSE3-NEXT: movdqa {{.*#+}} xmm2 = [1,2,4,8,16,32,64,128,1,2,4,8,16,32,64,128]
+; SSE2-SSSE3-NEXT: pand %xmm2, %xmm0
+; SSE2-SSSE3-NEXT: pcmpeqb %xmm2, %xmm0
+; SSE2-SSSE3-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[2,2,3,3,4,5,6,7]
+; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,0,1,1]
+; SSE2-SSSE3-NEXT: pand %xmm2, %xmm1
+; SSE2-SSSE3-NEXT: pcmpeqb %xmm2, %xmm1
; SSE2-SSSE3-NEXT: retq
;
; AVX1-LABEL: ext_i32_32i8:
; AVX1: # BB#0:
-; AVX1-NEXT: pushq %rbp
-; AVX1-NEXT: .Lcfi12:
-; AVX1-NEXT: .cfi_def_cfa_offset 16
-; AVX1-NEXT: .Lcfi13:
-; AVX1-NEXT: .cfi_offset %rbp, -16
-; AVX1-NEXT: movq %rsp, %rbp
-; AVX1-NEXT: .Lcfi14:
-; AVX1-NEXT: .cfi_def_cfa_register %rbp
-; AVX1-NEXT: pushq %r15
-; AVX1-NEXT: pushq %r14
-; AVX1-NEXT: pushq %r13
-; AVX1-NEXT: pushq %r12
-; AVX1-NEXT: pushq %rbx
-; AVX1-NEXT: andq $-32, %rsp
-; AVX1-NEXT: subq $64, %rsp
-; AVX1-NEXT: .Lcfi15:
-; AVX1-NEXT: .cfi_offset %rbx, -56
-; AVX1-NEXT: .Lcfi16:
-; AVX1-NEXT: .cfi_offset %r12, -48
-; AVX1-NEXT: .Lcfi17:
-; AVX1-NEXT: .cfi_offset %r13, -40
-; AVX1-NEXT: .Lcfi18:
-; AVX1-NEXT: .cfi_offset %r14, -32
-; AVX1-NEXT: .Lcfi19:
-; AVX1-NEXT: .cfi_offset %r15, -24
-; AVX1-NEXT: movl %edi, (%rsp)
-; AVX1-NEXT: movslq (%rsp), %rdx
-; AVX1-NEXT: movq %rdx, %rcx
-; AVX1-NEXT: shlq $47, %rcx
-; AVX1-NEXT: sarq $63, %rcx
-; AVX1-NEXT: vmovd %ecx, %xmm0
-; AVX1-NEXT: movq %rdx, {{[0-9]+}}(%rsp) # 8-byte Spill
-; AVX1-NEXT: movq %rdx, %r8
-; AVX1-NEXT: movq %rdx, %rcx
-; AVX1-NEXT: movq %rdx, %rdi
-; AVX1-NEXT: movq %rdx, %r13
-; AVX1-NEXT: movq %rdx, %rsi
-; AVX1-NEXT: movq %rdx, %r10
-; AVX1-NEXT: movq %rdx, %r11
-; AVX1-NEXT: movq %rdx, %r9
-; AVX1-NEXT: movq %rdx, %rbx
-; AVX1-NEXT: movq %rdx, %r14
-; AVX1-NEXT: movq %rdx, %r15
-; AVX1-NEXT: movq %rdx, %r12
-; AVX1-NEXT: movq %rdx, %rax
-; AVX1-NEXT: shlq $46, %rax
-; AVX1-NEXT: sarq $63, %rax
-; AVX1-NEXT: vpinsrb $1, %eax, %xmm0, %xmm0
-; AVX1-NEXT: movq %rdx, {{[0-9]+}}(%rsp) # 8-byte Spill
-; AVX1-NEXT: movq {{[0-9]+}}(%rsp), %rax # 8-byte Reload
-; AVX1-NEXT: shlq $45, %rax
-; AVX1-NEXT: sarq $63, %rax
-; AVX1-NEXT: vpinsrb $2, %eax, %xmm0, %xmm0
-; AVX1-NEXT: movq %rdx, {{[0-9]+}}(%rsp) # 8-byte Spill
-; AVX1-NEXT: shlq $44, %r8
-; AVX1-NEXT: sarq $63, %r8
-; AVX1-NEXT: vpinsrb $3, %r8d, %xmm0, %xmm0
-; AVX1-NEXT: movq %rdx, %r8
-; AVX1-NEXT: shlq $43, %rcx
-; AVX1-NEXT: sarq $63, %rcx
-; AVX1-NEXT: vpinsrb $4, %ecx, %xmm0, %xmm0
-; AVX1-NEXT: movq %rdx, %rcx
-; AVX1-NEXT: shlq $42, %rdi
-; AVX1-NEXT: sarq $63, %rdi
-; AVX1-NEXT: vpinsrb $5, %edi, %xmm0, %xmm0
-; AVX1-NEXT: movq %rdx, %rdi
-; AVX1-NEXT: shlq $41, %r13
-; AVX1-NEXT: sarq $63, %r13
-; AVX1-NEXT: vpinsrb $6, %r13d, %xmm0, %xmm0
-; AVX1-NEXT: movq %rdx, %r13
-; AVX1-NEXT: shlq $40, %rsi
-; AVX1-NEXT: sarq $63, %rsi
-; AVX1-NEXT: vpinsrb $7, %esi, %xmm0, %xmm0
-; AVX1-NEXT: movq %rdx, %rsi
-; AVX1-NEXT: shlq $39, %r10
-; AVX1-NEXT: sarq $63, %r10
-; AVX1-NEXT: vpinsrb $8, %r10d, %xmm0, %xmm0
-; AVX1-NEXT: movq %rdx, %r10
-; AVX1-NEXT: shlq $38, %r11
-; AVX1-NEXT: sarq $63, %r11
-; AVX1-NEXT: vpinsrb $9, %r11d, %xmm0, %xmm0
-; AVX1-NEXT: movsbq %dl, %rax
-; AVX1-NEXT: movq %rax, {{[0-9]+}}(%rsp) # 8-byte Spill
-; AVX1-NEXT: shlq $37, %r9
-; AVX1-NEXT: sarq $63, %r9
-; AVX1-NEXT: vpinsrb $10, %r9d, %xmm0, %xmm0
-; AVX1-NEXT: movq %rdx, %r9
-; AVX1-NEXT: shlq $36, %rbx
-; AVX1-NEXT: sarq $63, %rbx
-; AVX1-NEXT: vpinsrb $11, %ebx, %xmm0, %xmm0
-; AVX1-NEXT: movq %rdx, %rbx
-; AVX1-NEXT: shlq $35, %r14
-; AVX1-NEXT: sarq $63, %r14
-; AVX1-NEXT: vpinsrb $12, %r14d, %xmm0, %xmm0
-; AVX1-NEXT: movq %rdx, %r14
-; AVX1-NEXT: shlq $34, %r15
-; AVX1-NEXT: sarq $63, %r15
-; AVX1-NEXT: vpinsrb $13, %r15d, %xmm0, %xmm0
-; AVX1-NEXT: movq %rdx, %r15
-; AVX1-NEXT: shlq $33, %r12
-; AVX1-NEXT: sarq $63, %r12
-; AVX1-NEXT: vpinsrb $14, %r12d, %xmm0, %xmm0
-; AVX1-NEXT: movq %rdx, %r12
-; AVX1-NEXT: movq {{[0-9]+}}(%rsp), %rax # 8-byte Reload
-; AVX1-NEXT: shrq $31, %rax
-; AVX1-NEXT: vpinsrb $15, %eax, %xmm0, %xmm0
-; AVX1-NEXT: movq %rdx, %rax
-; AVX1-NEXT: shlq $63, %r8
-; AVX1-NEXT: sarq $63, %r8
-; AVX1-NEXT: vmovd %r8d, %xmm1
-; AVX1-NEXT: movq %rdx, %r8
-; AVX1-NEXT: movswq %dx, %rdx
-; AVX1-NEXT: movq {{[0-9]+}}(%rsp), %r11 # 8-byte Reload
-; AVX1-NEXT: shlq $62, %r11
-; AVX1-NEXT: sarq $63, %r11
-; AVX1-NEXT: vpinsrb $1, %r11d, %xmm1, %xmm1
-; AVX1-NEXT: shlq $61, %rcx
-; AVX1-NEXT: sarq $63, %rcx
-; AVX1-NEXT: vpinsrb $2, %ecx, %xmm1, %xmm1
-; AVX1-NEXT: shlq $60, %rdi
-; AVX1-NEXT: sarq $63, %rdi
-; AVX1-NEXT: vpinsrb $3, %edi, %xmm1, %xmm1
-; AVX1-NEXT: shlq $59, %r13
-; AVX1-NEXT: sarq $63, %r13
-; AVX1-NEXT: vpinsrb $4, %r13d, %xmm1, %xmm1
-; AVX1-NEXT: shlq $58, %rsi
-; AVX1-NEXT: sarq $63, %rsi
-; AVX1-NEXT: vpinsrb $5, %esi, %xmm1, %xmm1
-; AVX1-NEXT: shlq $57, %r10
-; AVX1-NEXT: sarq $63, %r10
-; AVX1-NEXT: vpinsrb $6, %r10d, %xmm1, %xmm1
-; AVX1-NEXT: movq {{[0-9]+}}(%rsp), %rcx # 8-byte Reload
-; AVX1-NEXT: shrq $7, %rcx
-; AVX1-NEXT: vpinsrb $7, %ecx, %xmm1, %xmm1
-; AVX1-NEXT: shlq $55, %r9
-; AVX1-NEXT: sarq $63, %r9
-; AVX1-NEXT: vpinsrb $8, %r9d, %xmm1, %xmm1
-; AVX1-NEXT: shlq $54, %rbx
-; AVX1-NEXT: sarq $63, %rbx
-; AVX1-NEXT: vpinsrb $9, %ebx, %xmm1, %xmm1
-; AVX1-NEXT: shlq $53, %r14
-; AVX1-NEXT: sarq $63, %r14
-; AVX1-NEXT: vpinsrb $10, %r14d, %xmm1, %xmm1
-; AVX1-NEXT: shlq $52, %r15
-; AVX1-NEXT: sarq $63, %r15
-; AVX1-NEXT: vpinsrb $11, %r15d, %xmm1, %xmm1
-; AVX1-NEXT: shlq $51, %r12
-; AVX1-NEXT: sarq $63, %r12
-; AVX1-NEXT: vpinsrb $12, %r12d, %xmm1, %xmm1
-; AVX1-NEXT: shlq $50, %rax
-; AVX1-NEXT: sarq $63, %rax
-; AVX1-NEXT: vpinsrb $13, %eax, %xmm1, %xmm1
-; AVX1-NEXT: shlq $49, %r8
-; AVX1-NEXT: sarq $63, %r8
-; AVX1-NEXT: vpinsrb $14, %r8d, %xmm1, %xmm1
-; AVX1-NEXT: shrq $15, %rdx
-; AVX1-NEXT: vpinsrb $15, %edx, %xmm1, %xmm1
+; AVX1-NEXT: vmovd %edi, %xmm0
+; AVX1-NEXT: vpunpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
+; AVX1-NEXT: vpshuflw {{.*#+}} xmm1 = xmm0[0,0,1,1,4,5,6,7]
+; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,0,1,1]
+; AVX1-NEXT: vpshuflw {{.*#+}} xmm0 = xmm0[2,2,3,3,4,5,6,7]
+; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,0,1,1]
; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
-; AVX1-NEXT: leaq -40(%rbp), %rsp
-; AVX1-NEXT: popq %rbx
-; AVX1-NEXT: popq %r12
-; AVX1-NEXT: popq %r13
-; AVX1-NEXT: popq %r14
-; AVX1-NEXT: popq %r15
-; AVX1-NEXT: popq %rbp
+; AVX1-NEXT: vandps {{.*}}(%rip), %ymm0, %ymm0
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
+; AVX1-NEXT: vpxor %xmm2, %xmm2, %xmm2
+; AVX1-NEXT: vpcmpeqb %xmm2, %xmm1, %xmm1
+; AVX1-NEXT: vpcmpeqd %xmm3, %xmm3, %xmm3
+; AVX1-NEXT: vpxor %xmm3, %xmm1, %xmm1
+; AVX1-NEXT: vpcmpeqb %xmm2, %xmm0, %xmm0
+; AVX1-NEXT: vpxor %xmm3, %xmm0, %xmm0
+; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
; AVX1-NEXT: retq
;
; AVX2-LABEL: ext_i32_32i8:
; AVX2: # BB#0:
-; AVX2-NEXT: pushq %rbp
-; AVX2-NEXT: .Lcfi12:
-; AVX2-NEXT: .cfi_def_cfa_offset 16
-; AVX2-NEXT: .Lcfi13:
-; AVX2-NEXT: .cfi_offset %rbp, -16
-; AVX2-NEXT: movq %rsp, %rbp
-; AVX2-NEXT: .Lcfi14:
-; AVX2-NEXT: .cfi_def_cfa_register %rbp
-; AVX2-NEXT: pushq %r15
-; AVX2-NEXT: pushq %r14
-; AVX2-NEXT: pushq %r13
-; AVX2-NEXT: pushq %r12
-; AVX2-NEXT: pushq %rbx
-; AVX2-NEXT: andq $-32, %rsp
-; AVX2-NEXT: subq $64, %rsp
-; AVX2-NEXT: .Lcfi15:
-; AVX2-NEXT: .cfi_offset %rbx, -56
-; AVX2-NEXT: .Lcfi16:
-; AVX2-NEXT: .cfi_offset %r12, -48
-; AVX2-NEXT: .Lcfi17:
-; AVX2-NEXT: .cfi_offset %r13, -40
-; AVX2-NEXT: .Lcfi18:
-; AVX2-NEXT: .cfi_offset %r14, -32
-; AVX2-NEXT: .Lcfi19:
-; AVX2-NEXT: .cfi_offset %r15, -24
-; AVX2-NEXT: movl %edi, (%rsp)
-; AVX2-NEXT: movslq (%rsp), %rdx
-; AVX2-NEXT: movq %rdx, %rcx
-; AVX2-NEXT: shlq $47, %rcx
-; AVX2-NEXT: sarq $63, %rcx
-; AVX2-NEXT: vmovd %ecx, %xmm0
-; AVX2-NEXT: movq %rdx, {{[0-9]+}}(%rsp) # 8-byte Spill
-; AVX2-NEXT: movq %rdx, %r8
-; AVX2-NEXT: movq %rdx, %rcx
-; AVX2-NEXT: movq %rdx, %rdi
-; AVX2-NEXT: movq %rdx, %r13
-; AVX2-NEXT: movq %rdx, %rsi
-; AVX2-NEXT: movq %rdx, %r10
-; AVX2-NEXT: movq %rdx, %r11
-; AVX2-NEXT: movq %rdx, %r9
-; AVX2-NEXT: movq %rdx, %rbx
-; AVX2-NEXT: movq %rdx, %r14
-; AVX2-NEXT: movq %rdx, %r15
-; AVX2-NEXT: movq %rdx, %r12
-; AVX2-NEXT: movq %rdx, %rax
-; AVX2-NEXT: shlq $46, %rax
-; AVX2-NEXT: sarq $63, %rax
-; AVX2-NEXT: vpinsrb $1, %eax, %xmm0, %xmm0
-; AVX2-NEXT: movq %rdx, {{[0-9]+}}(%rsp) # 8-byte Spill
-; AVX2-NEXT: movq {{[0-9]+}}(%rsp), %rax # 8-byte Reload
-; AVX2-NEXT: shlq $45, %rax
-; AVX2-NEXT: sarq $63, %rax
-; AVX2-NEXT: vpinsrb $2, %eax, %xmm0, %xmm0
-; AVX2-NEXT: movq %rdx, {{[0-9]+}}(%rsp) # 8-byte Spill
-; AVX2-NEXT: shlq $44, %r8
-; AVX2-NEXT: sarq $63, %r8
-; AVX2-NEXT: vpinsrb $3, %r8d, %xmm0, %xmm0
-; AVX2-NEXT: movq %rdx, %r8
-; AVX2-NEXT: shlq $43, %rcx
-; AVX2-NEXT: sarq $63, %rcx
-; AVX2-NEXT: vpinsrb $4, %ecx, %xmm0, %xmm0
-; AVX2-NEXT: movq %rdx, %rcx
-; AVX2-NEXT: shlq $42, %rdi
-; AVX2-NEXT: sarq $63, %rdi
-; AVX2-NEXT: vpinsrb $5, %edi, %xmm0, %xmm0
-; AVX2-NEXT: movq %rdx, %rdi
-; AVX2-NEXT: shlq $41, %r13
-; AVX2-NEXT: sarq $63, %r13
-; AVX2-NEXT: vpinsrb $6, %r13d, %xmm0, %xmm0
-; AVX2-NEXT: movq %rdx, %r13
-; AVX2-NEXT: shlq $40, %rsi
-; AVX2-NEXT: sarq $63, %rsi
-; AVX2-NEXT: vpinsrb $7, %esi, %xmm0, %xmm0
-; AVX2-NEXT: movq %rdx, %rsi
-; AVX2-NEXT: shlq $39, %r10
-; AVX2-NEXT: sarq $63, %r10
-; AVX2-NEXT: vpinsrb $8, %r10d, %xmm0, %xmm0
-; AVX2-NEXT: movq %rdx, %r10
-; AVX2-NEXT: shlq $38, %r11
-; AVX2-NEXT: sarq $63, %r11
-; AVX2-NEXT: vpinsrb $9, %r11d, %xmm0, %xmm0
-; AVX2-NEXT: movsbq %dl, %rax
-; AVX2-NEXT: movq %rax, {{[0-9]+}}(%rsp) # 8-byte Spill
-; AVX2-NEXT: shlq $37, %r9
-; AVX2-NEXT: sarq $63, %r9
-; AVX2-NEXT: vpinsrb $10, %r9d, %xmm0, %xmm0
-; AVX2-NEXT: movq %rdx, %r9
-; AVX2-NEXT: shlq $36, %rbx
-; AVX2-NEXT: sarq $63, %rbx
-; AVX2-NEXT: vpinsrb $11, %ebx, %xmm0, %xmm0
-; AVX2-NEXT: movq %rdx, %rbx
-; AVX2-NEXT: shlq $35, %r14
-; AVX2-NEXT: sarq $63, %r14
-; AVX2-NEXT: vpinsrb $12, %r14d, %xmm0, %xmm0
-; AVX2-NEXT: movq %rdx, %r14
-; AVX2-NEXT: shlq $34, %r15
-; AVX2-NEXT: sarq $63, %r15
-; AVX2-NEXT: vpinsrb $13, %r15d, %xmm0, %xmm0
-; AVX2-NEXT: movq %rdx, %r15
-; AVX2-NEXT: shlq $33, %r12
-; AVX2-NEXT: sarq $63, %r12
-; AVX2-NEXT: vpinsrb $14, %r12d, %xmm0, %xmm0
-; AVX2-NEXT: movq %rdx, %r12
-; AVX2-NEXT: movq {{[0-9]+}}(%rsp), %rax # 8-byte Reload
-; AVX2-NEXT: shrq $31, %rax
-; AVX2-NEXT: vpinsrb $15, %eax, %xmm0, %xmm0
-; AVX2-NEXT: movq %rdx, %rax
-; AVX2-NEXT: shlq $63, %r8
-; AVX2-NEXT: sarq $63, %r8
-; AVX2-NEXT: vmovd %r8d, %xmm1
-; AVX2-NEXT: movq %rdx, %r8
-; AVX2-NEXT: movswq %dx, %rdx
-; AVX2-NEXT: movq {{[0-9]+}}(%rsp), %r11 # 8-byte Reload
-; AVX2-NEXT: shlq $62, %r11
-; AVX2-NEXT: sarq $63, %r11
-; AVX2-NEXT: vpinsrb $1, %r11d, %xmm1, %xmm1
-; AVX2-NEXT: shlq $61, %rcx
-; AVX2-NEXT: sarq $63, %rcx
-; AVX2-NEXT: vpinsrb $2, %ecx, %xmm1, %xmm1
-; AVX2-NEXT: shlq $60, %rdi
-; AVX2-NEXT: sarq $63, %rdi
-; AVX2-NEXT: vpinsrb $3, %edi, %xmm1, %xmm1
-; AVX2-NEXT: shlq $59, %r13
-; AVX2-NEXT: sarq $63, %r13
-; AVX2-NEXT: vpinsrb $4, %r13d, %xmm1, %xmm1
-; AVX2-NEXT: shlq $58, %rsi
-; AVX2-NEXT: sarq $63, %rsi
-; AVX2-NEXT: vpinsrb $5, %esi, %xmm1, %xmm1
-; AVX2-NEXT: shlq $57, %r10
-; AVX2-NEXT: sarq $63, %r10
-; AVX2-NEXT: vpinsrb $6, %r10d, %xmm1, %xmm1
-; AVX2-NEXT: movq {{[0-9]+}}(%rsp), %rcx # 8-byte Reload
-; AVX2-NEXT: shrq $7, %rcx
-; AVX2-NEXT: vpinsrb $7, %ecx, %xmm1, %xmm1
-; AVX2-NEXT: shlq $55, %r9
-; AVX2-NEXT: sarq $63, %r9
-; AVX2-NEXT: vpinsrb $8, %r9d, %xmm1, %xmm1
-; AVX2-NEXT: shlq $54, %rbx
-; AVX2-NEXT: sarq $63, %rbx
-; AVX2-NEXT: vpinsrb $9, %ebx, %xmm1, %xmm1
-; AVX2-NEXT: shlq $53, %r14
-; AVX2-NEXT: sarq $63, %r14
-; AVX2-NEXT: vpinsrb $10, %r14d, %xmm1, %xmm1
-; AVX2-NEXT: shlq $52, %r15
-; AVX2-NEXT: sarq $63, %r15
-; AVX2-NEXT: vpinsrb $11, %r15d, %xmm1, %xmm1
-; AVX2-NEXT: shlq $51, %r12
-; AVX2-NEXT: sarq $63, %r12
-; AVX2-NEXT: vpinsrb $12, %r12d, %xmm1, %xmm1
-; AVX2-NEXT: shlq $50, %rax
-; AVX2-NEXT: sarq $63, %rax
-; AVX2-NEXT: vpinsrb $13, %eax, %xmm1, %xmm1
-; AVX2-NEXT: shlq $49, %r8
-; AVX2-NEXT: sarq $63, %r8
-; AVX2-NEXT: vpinsrb $14, %r8d, %xmm1, %xmm1
-; AVX2-NEXT: shrq $15, %rdx
-; AVX2-NEXT: vpinsrb $15, %edx, %xmm1, %xmm1
+; AVX2-NEXT: vmovd %edi, %xmm0
+; AVX2-NEXT: vpunpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
+; AVX2-NEXT: vpshuflw {{.*#+}} xmm1 = xmm0[0,0,1,1,4,5,6,7]
+; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,0,1,1]
+; AVX2-NEXT: vpshuflw {{.*#+}} xmm0 = xmm0[2,2,3,3,4,5,6,7]
+; AVX2-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,0,1,1]
; AVX2-NEXT: vinserti128 $1, %xmm0, %ymm1, %ymm0
-; AVX2-NEXT: leaq -40(%rbp), %rsp
-; AVX2-NEXT: popq %rbx
-; AVX2-NEXT: popq %r12
-; AVX2-NEXT: popq %r13
-; AVX2-NEXT: popq %r14
-; AVX2-NEXT: popq %r15
-; AVX2-NEXT: popq %rbp
+; AVX2-NEXT: vpbroadcastq {{.*#+}} ymm1 = [9241421688590303745,9241421688590303745,9241421688590303745,9241421688590303745]
+; AVX2-NEXT: vpand %ymm1, %ymm0, %ymm0
+; AVX2-NEXT: vpcmpeqb %ymm1, %ymm0, %ymm0
; AVX2-NEXT: retq
;
; AVX512-LABEL: ext_i32_32i8:
@@ -1503,159 +422,69 @@ define <32 x i8> @ext_i32_32i8(i32 %a0)
define <8 x i64> @ext_i8_8i64(i8 %a0) {
; SSE2-SSSE3-LABEL: ext_i8_8i64:
; SSE2-SSSE3: # BB#0:
-; SSE2-SSSE3-NEXT: movb %dil, -{{[0-9]+}}(%rsp)
-; SSE2-SSSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax
-; SSE2-SSSE3-NEXT: movl %eax, %ecx
-; SSE2-SSSE3-NEXT: shrl $3, %ecx
-; SSE2-SSSE3-NEXT: andl $1, %ecx
-; SSE2-SSSE3-NEXT: movd %ecx, %xmm0
-; SSE2-SSSE3-NEXT: movl %eax, %ecx
-; SSE2-SSSE3-NEXT: shrl $2, %ecx
-; SSE2-SSSE3-NEXT: andl $1, %ecx
-; SSE2-SSSE3-NEXT: movd %ecx, %xmm1
-; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
-; SSE2-SSSE3-NEXT: movl %eax, %ecx
-; SSE2-SSSE3-NEXT: andl $1, %ecx
-; SSE2-SSSE3-NEXT: movd %ecx, %xmm3
-; SSE2-SSSE3-NEXT: movl %eax, %ecx
-; SSE2-SSSE3-NEXT: shrl %ecx
-; SSE2-SSSE3-NEXT: andl $1, %ecx
-; SSE2-SSSE3-NEXT: movd %ecx, %xmm0
-; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm3 = xmm3[0],xmm0[0],xmm3[1],xmm0[1],xmm3[2],xmm0[2],xmm3[3],xmm0[3]
-; SSE2-SSSE3-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm1[0],xmm3[1],xmm1[1]
-; SSE2-SSSE3-NEXT: movl %eax, %ecx
-; SSE2-SSSE3-NEXT: shrl $5, %ecx
-; SSE2-SSSE3-NEXT: andl $1, %ecx
-; SSE2-SSSE3-NEXT: movd %ecx, %xmm0
-; SSE2-SSSE3-NEXT: movl %eax, %ecx
-; SSE2-SSSE3-NEXT: shrl $4, %ecx
-; SSE2-SSSE3-NEXT: andl $1, %ecx
-; SSE2-SSSE3-NEXT: movd %ecx, %xmm1
-; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
-; SSE2-SSSE3-NEXT: movl %eax, %ecx
-; SSE2-SSSE3-NEXT: shrl $6, %ecx
-; SSE2-SSSE3-NEXT: andl $1, %ecx
-; SSE2-SSSE3-NEXT: movd %ecx, %xmm0
-; SSE2-SSSE3-NEXT: shrl $7, %eax
-; SSE2-SSSE3-NEXT: movzwl %ax, %eax
-; SSE2-SSSE3-NEXT: movd %eax, %xmm2
-; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3]
-; SSE2-SSSE3-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
-; SSE2-SSSE3-NEXT: punpcklqdq {{.*#+}} xmm3 = xmm3[0],xmm1[0]
-; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm3[0,1,0,3]
-; SSE2-SSSE3-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,5,5,6,7]
-; SSE2-SSSE3-NEXT: psllq $63, %xmm0
-; SSE2-SSSE3-NEXT: psrad $31, %xmm0
-; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
-; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm3[1,1,1,3]
-; SSE2-SSSE3-NEXT: pshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,5,5,6,7]
-; SSE2-SSSE3-NEXT: psllq $63, %xmm1
-; SSE2-SSSE3-NEXT: psrad $31, %xmm1
-; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
-; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm2 = xmm3[2,1,2,3]
-; SSE2-SSSE3-NEXT: pshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,5,5,6,7]
-; SSE2-SSSE3-NEXT: psllq $63, %xmm2
-; SSE2-SSSE3-NEXT: psrad $31, %xmm2
-; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm2 = xmm2[1,1,3,3]
-; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm3 = xmm3[3,1,3,3]
-; SSE2-SSSE3-NEXT: pshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,5,5,6,7]
-; SSE2-SSSE3-NEXT: psllq $63, %xmm3
-; SSE2-SSSE3-NEXT: psrad $31, %xmm3
-; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm3 = xmm3[1,1,3,3]
+; SSE2-SSSE3-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; SSE2-SSSE3-NEXT: movq %rdi, %xmm0
+; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm4 = xmm0[0,1,0,1]
+; SSE2-SSSE3-NEXT: movdqa {{.*#+}} xmm0 = [1,2]
+; SSE2-SSSE3-NEXT: movdqa %xmm4, %xmm1
+; SSE2-SSSE3-NEXT: pand %xmm0, %xmm1
+; SSE2-SSSE3-NEXT: pcmpeqd %xmm0, %xmm1
+; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,0,3,2]
+; SSE2-SSSE3-NEXT: pand %xmm1, %xmm0
+; SSE2-SSSE3-NEXT: movdqa {{.*#+}} xmm1 = [4,8]
+; SSE2-SSSE3-NEXT: movdqa %xmm4, %xmm2
+; SSE2-SSSE3-NEXT: pand %xmm1, %xmm2
+; SSE2-SSSE3-NEXT: pcmpeqd %xmm1, %xmm2
+; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm2[1,0,3,2]
+; SSE2-SSSE3-NEXT: pand %xmm2, %xmm1
+; SSE2-SSSE3-NEXT: movdqa {{.*#+}} xmm2 = [16,32]
+; SSE2-SSSE3-NEXT: movdqa %xmm4, %xmm3
+; SSE2-SSSE3-NEXT: pand %xmm2, %xmm3
+; SSE2-SSSE3-NEXT: pcmpeqd %xmm2, %xmm3
+; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm2 = xmm3[1,0,3,2]
+; SSE2-SSSE3-NEXT: pand %xmm3, %xmm2
+; SSE2-SSSE3-NEXT: movdqa {{.*#+}} xmm3 = [64,128]
+; SSE2-SSSE3-NEXT: pand %xmm3, %xmm4
+; SSE2-SSSE3-NEXT: pcmpeqd %xmm3, %xmm4
+; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm3 = xmm4[1,0,3,2]
+; SSE2-SSSE3-NEXT: pand %xmm4, %xmm3
; SSE2-SSSE3-NEXT: retq
;
; AVX1-LABEL: ext_i8_8i64:
; AVX1: # BB#0:
-; AVX1-NEXT: movb %dil, -{{[0-9]+}}(%rsp)
-; AVX1-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax
-; AVX1-NEXT: movl %eax, %ecx
-; AVX1-NEXT: shrl %ecx
-; AVX1-NEXT: andl $1, %ecx
-; AVX1-NEXT: movl %eax, %edx
-; AVX1-NEXT: andl $1, %edx
-; AVX1-NEXT: vmovd %edx, %xmm0
-; AVX1-NEXT: vpinsrw $1, %ecx, %xmm0, %xmm0
-; AVX1-NEXT: movl %eax, %ecx
-; AVX1-NEXT: shrl $2, %ecx
-; AVX1-NEXT: andl $1, %ecx
-; AVX1-NEXT: vpinsrw $2, %ecx, %xmm0, %xmm0
-; AVX1-NEXT: movl %eax, %ecx
-; AVX1-NEXT: shrl $3, %ecx
-; AVX1-NEXT: andl $1, %ecx
-; AVX1-NEXT: vpinsrw $3, %ecx, %xmm0, %xmm0
-; AVX1-NEXT: movl %eax, %ecx
-; AVX1-NEXT: shrl $4, %ecx
-; AVX1-NEXT: andl $1, %ecx
-; AVX1-NEXT: vpinsrw $4, %ecx, %xmm0, %xmm0
-; AVX1-NEXT: movl %eax, %ecx
-; AVX1-NEXT: shrl $5, %ecx
-; AVX1-NEXT: andl $1, %ecx
-; AVX1-NEXT: vpinsrw $5, %ecx, %xmm0, %xmm0
-; AVX1-NEXT: movl %eax, %ecx
-; AVX1-NEXT: shrl $6, %ecx
-; AVX1-NEXT: andl $1, %ecx
-; AVX1-NEXT: vpinsrw $6, %ecx, %xmm0, %xmm0
-; AVX1-NEXT: shrl $7, %eax
-; AVX1-NEXT: movzwl %ax, %eax
-; AVX1-NEXT: vpinsrw $7, %eax, %xmm0, %xmm1
-; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm0 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero
-; AVX1-NEXT: vpslld $31, %xmm0, %xmm0
-; AVX1-NEXT: vpsrad $31, %xmm0, %xmm0
-; AVX1-NEXT: vpmovsxdq %xmm0, %xmm2
-; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
-; AVX1-NEXT: vpmovsxdq %xmm0, %xmm0
-; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm2, %ymm0
-; AVX1-NEXT: vpunpckhwd {{.*#+}} xmm1 = xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
-; AVX1-NEXT: vpslld $31, %xmm1, %xmm1
-; AVX1-NEXT: vpsrad $31, %xmm1, %xmm1
-; AVX1-NEXT: vpmovsxdq %xmm1, %xmm2
-; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[2,3,0,1]
-; AVX1-NEXT: vpmovsxdq %xmm1, %xmm1
-; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm2, %ymm1
+; AVX1-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; AVX1-NEXT: vmovq %rdi, %xmm0
+; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,1,0,1]
+; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm1
+; AVX1-NEXT: vandps {{.*}}(%rip), %ymm1, %ymm0
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
+; AVX1-NEXT: vpxor %xmm3, %xmm3, %xmm3
+; AVX1-NEXT: vpcmpeqq %xmm3, %xmm2, %xmm2
+; AVX1-NEXT: vpcmpeqd %xmm4, %xmm4, %xmm4
+; AVX1-NEXT: vpxor %xmm4, %xmm2, %xmm2
+; AVX1-NEXT: vpcmpeqq %xmm3, %xmm0, %xmm0
+; AVX1-NEXT: vpxor %xmm4, %xmm0, %xmm0
+; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; AVX1-NEXT: vandps {{.*}}(%rip), %ymm1, %ymm1
+; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
+; AVX1-NEXT: vpcmpeqq %xmm3, %xmm2, %xmm2
+; AVX1-NEXT: vpxor %xmm4, %xmm2, %xmm2
+; AVX1-NEXT: vpcmpeqq %xmm3, %xmm1, %xmm1
+; AVX1-NEXT: vpxor %xmm4, %xmm1, %xmm1
+; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm1, %ymm1
; AVX1-NEXT: retq
;
; AVX2-LABEL: ext_i8_8i64:
; AVX2: # BB#0:
-; AVX2-NEXT: movb %dil, -{{[0-9]+}}(%rsp)
-; AVX2-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax
-; AVX2-NEXT: movl %eax, %ecx
-; AVX2-NEXT: shrl %ecx
-; AVX2-NEXT: andl $1, %ecx
-; AVX2-NEXT: movl %eax, %edx
-; AVX2-NEXT: andl $1, %edx
-; AVX2-NEXT: vmovd %edx, %xmm0
-; AVX2-NEXT: vpinsrw $1, %ecx, %xmm0, %xmm0
-; AVX2-NEXT: movl %eax, %ecx
-; AVX2-NEXT: shrl $2, %ecx
-; AVX2-NEXT: andl $1, %ecx
-; AVX2-NEXT: vpinsrw $2, %ecx, %xmm0, %xmm0
-; AVX2-NEXT: movl %eax, %ecx
-; AVX2-NEXT: shrl $3, %ecx
-; AVX2-NEXT: andl $1, %ecx
-; AVX2-NEXT: vpinsrw $3, %ecx, %xmm0, %xmm0
-; AVX2-NEXT: movl %eax, %ecx
-; AVX2-NEXT: shrl $4, %ecx
-; AVX2-NEXT: andl $1, %ecx
-; AVX2-NEXT: vpinsrw $4, %ecx, %xmm0, %xmm0
-; AVX2-NEXT: movl %eax, %ecx
-; AVX2-NEXT: shrl $5, %ecx
-; AVX2-NEXT: andl $1, %ecx
-; AVX2-NEXT: vpinsrw $5, %ecx, %xmm0, %xmm0
-; AVX2-NEXT: movl %eax, %ecx
-; AVX2-NEXT: shrl $6, %ecx
-; AVX2-NEXT: andl $1, %ecx
-; AVX2-NEXT: vpinsrw $6, %ecx, %xmm0, %xmm0
-; AVX2-NEXT: shrl $7, %eax
-; AVX2-NEXT: movzwl %ax, %eax
-; AVX2-NEXT: vpinsrw $7, %eax, %xmm0, %xmm1
-; AVX2-NEXT: vpmovzxwd {{.*#+}} xmm0 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero
-; AVX2-NEXT: vpslld $31, %xmm0, %xmm0
-; AVX2-NEXT: vpsrad $31, %xmm0, %xmm0
-; AVX2-NEXT: vpmovsxdq %xmm0, %ymm0
-; AVX2-NEXT: vpunpckhwd {{.*#+}} xmm1 = xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
-; AVX2-NEXT: vpslld $31, %xmm1, %xmm1
-; AVX2-NEXT: vpsrad $31, %xmm1, %xmm1
-; AVX2-NEXT: vpmovsxdq %xmm1, %ymm1
+; AVX2-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; AVX2-NEXT: vmovq %rdi, %xmm0
+; AVX2-NEXT: vpbroadcastq %xmm0, %ymm1
+; AVX2-NEXT: vmovdqa {{.*#+}} ymm0 = [1,2,4,8]
+; AVX2-NEXT: vpand %ymm0, %ymm1, %ymm2
+; AVX2-NEXT: vpcmpeqq %ymm0, %ymm2, %ymm0
+; AVX2-NEXT: vmovdqa {{.*#+}} ymm2 = [16,32,64,128]
+; AVX2-NEXT: vpand %ymm2, %ymm1, %ymm1
+; AVX2-NEXT: vpcmpeqq %ymm2, %ymm1, %ymm1
; AVX2-NEXT: retq
;
; AVX512-LABEL: ext_i8_8i64:
@@ -1671,262 +500,58 @@ define <8 x i64> @ext_i8_8i64(i8 %a0) {
define <16 x i32> @ext_i16_16i32(i16 %a0) {
; SSE2-SSSE3-LABEL: ext_i16_16i32:
; SSE2-SSSE3: # BB#0:
-; SSE2-SSSE3-NEXT: movw %di, -{{[0-9]+}}(%rsp)
-; SSE2-SSSE3-NEXT: movzwl -{{[0-9]+}}(%rsp), %eax
-; SSE2-SSSE3-NEXT: movl %eax, %ecx
-; SSE2-SSSE3-NEXT: shrl $7, %ecx
-; SSE2-SSSE3-NEXT: andl $1, %ecx
-; SSE2-SSSE3-NEXT: movd %ecx, %xmm0
-; SSE2-SSSE3-NEXT: movl %eax, %ecx
-; SSE2-SSSE3-NEXT: shrl $6, %ecx
-; SSE2-SSSE3-NEXT: andl $1, %ecx
-; SSE2-SSSE3-NEXT: movd %ecx, %xmm1
-; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
-; SSE2-SSSE3-NEXT: movl %eax, %ecx
-; SSE2-SSSE3-NEXT: shrl $5, %ecx
-; SSE2-SSSE3-NEXT: andl $1, %ecx
-; SSE2-SSSE3-NEXT: movd %ecx, %xmm0
-; SSE2-SSSE3-NEXT: movl %eax, %ecx
-; SSE2-SSSE3-NEXT: shrl $4, %ecx
-; SSE2-SSSE3-NEXT: andl $1, %ecx
-; SSE2-SSSE3-NEXT: movd %ecx, %xmm2
-; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3],xmm2[4],xmm0[4],xmm2[5],xmm0[5],xmm2[6],xmm0[6],xmm2[7],xmm0[7]
-; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3]
-; SSE2-SSSE3-NEXT: movl %eax, %ecx
-; SSE2-SSSE3-NEXT: shrl $3, %ecx
-; SSE2-SSSE3-NEXT: andl $1, %ecx
-; SSE2-SSSE3-NEXT: movd %ecx, %xmm0
-; SSE2-SSSE3-NEXT: movl %eax, %ecx
-; SSE2-SSSE3-NEXT: shrl $2, %ecx
-; SSE2-SSSE3-NEXT: andl $1, %ecx
-; SSE2-SSSE3-NEXT: movd %ecx, %xmm1
-; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
-; SSE2-SSSE3-NEXT: movl %eax, %ecx
-; SSE2-SSSE3-NEXT: andl $1, %ecx
-; SSE2-SSSE3-NEXT: movd %ecx, %xmm3
-; SSE2-SSSE3-NEXT: movl %eax, %ecx
-; SSE2-SSSE3-NEXT: shrl %ecx
-; SSE2-SSSE3-NEXT: andl $1, %ecx
-; SSE2-SSSE3-NEXT: movd %ecx, %xmm0
-; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm0[0],xmm3[1],xmm0[1],xmm3[2],xmm0[2],xmm3[3],xmm0[3],xmm3[4],xmm0[4],xmm3[5],xmm0[5],xmm3[6],xmm0[6],xmm3[7],xmm0[7]
-; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm3 = xmm3[0],xmm1[0],xmm3[1],xmm1[1],xmm3[2],xmm1[2],xmm3[3],xmm1[3]
-; SSE2-SSSE3-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1]
-; SSE2-SSSE3-NEXT: movl %eax, %ecx
-; SSE2-SSSE3-NEXT: shrl $11, %ecx
-; SSE2-SSSE3-NEXT: andl $1, %ecx
-; SSE2-SSSE3-NEXT: movd %ecx, %xmm0
-; SSE2-SSSE3-NEXT: movl %eax, %ecx
-; SSE2-SSSE3-NEXT: shrl $10, %ecx
-; SSE2-SSSE3-NEXT: andl $1, %ecx
-; SSE2-SSSE3-NEXT: movd %ecx, %xmm1
-; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
-; SSE2-SSSE3-NEXT: movl %eax, %ecx
-; SSE2-SSSE3-NEXT: shrl $9, %ecx
-; SSE2-SSSE3-NEXT: andl $1, %ecx
-; SSE2-SSSE3-NEXT: movd %ecx, %xmm2
-; SSE2-SSSE3-NEXT: movl %eax, %ecx
-; SSE2-SSSE3-NEXT: shrl $8, %ecx
-; SSE2-SSSE3-NEXT: andl $1, %ecx
-; SSE2-SSSE3-NEXT: movd %ecx, %xmm0
-; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3],xmm0[4],xmm2[4],xmm0[5],xmm2[5],xmm0[6],xmm2[6],xmm0[7],xmm2[7]
-; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
-; SSE2-SSSE3-NEXT: movl %eax, %ecx
-; SSE2-SSSE3-NEXT: shrl $13, %ecx
-; SSE2-SSSE3-NEXT: andl $1, %ecx
-; SSE2-SSSE3-NEXT: movd %ecx, %xmm1
-; SSE2-SSSE3-NEXT: movl %eax, %ecx
-; SSE2-SSSE3-NEXT: shrl $12, %ecx
-; SSE2-SSSE3-NEXT: andl $1, %ecx
-; SSE2-SSSE3-NEXT: movd %ecx, %xmm2
-; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3],xmm2[4],xmm1[4],xmm2[5],xmm1[5],xmm2[6],xmm1[6],xmm2[7],xmm1[7]
-; SSE2-SSSE3-NEXT: movl %eax, %ecx
-; SSE2-SSSE3-NEXT: shrl $14, %ecx
-; SSE2-SSSE3-NEXT: andl $1, %ecx
-; SSE2-SSSE3-NEXT: movd %ecx, %xmm1
-; SSE2-SSSE3-NEXT: shrl $15, %eax
-; SSE2-SSSE3-NEXT: movzwl %ax, %eax
-; SSE2-SSSE3-NEXT: movd %eax, %xmm4
-; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm4[0],xmm1[1],xmm4[1],xmm1[2],xmm4[2],xmm1[3],xmm4[3],xmm1[4],xmm4[4],xmm1[5],xmm4[5],xmm1[6],xmm4[6],xmm1[7],xmm4[7]
-; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3]
-; SSE2-SSSE3-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
-; SSE2-SSSE3-NEXT: punpcklqdq {{.*#+}} xmm3 = xmm3[0],xmm0[0]
+; SSE2-SSSE3-NEXT: movd %edi, %xmm0
+; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm3 = xmm0[0,0,0,0]
+; SSE2-SSSE3-NEXT: movdqa {{.*#+}} xmm1 = [1,2,4,8]
+; SSE2-SSSE3-NEXT: movdqa %xmm3, %xmm0
+; SSE2-SSSE3-NEXT: pand %xmm1, %xmm0
+; SSE2-SSSE3-NEXT: pcmpeqd %xmm1, %xmm0
+; SSE2-SSSE3-NEXT: movdqa {{.*#+}} xmm2 = [16,32,64,128]
; SSE2-SSSE3-NEXT: movdqa %xmm3, %xmm1
-; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
-; SSE2-SSSE3-NEXT: movdqa %xmm1, %xmm0
-; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3]
-; SSE2-SSSE3-NEXT: pslld $31, %xmm0
-; SSE2-SSSE3-NEXT: psrad $31, %xmm0
-; SSE2-SSSE3-NEXT: punpckhwd {{.*#+}} xmm1 = xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
-; SSE2-SSSE3-NEXT: pslld $31, %xmm1
-; SSE2-SSSE3-NEXT: psrad $31, %xmm1
-; SSE2-SSSE3-NEXT: punpckhbw {{.*#+}} xmm3 = xmm3[8],xmm0[8],xmm3[9],xmm0[9],xmm3[10],xmm0[10],xmm3[11],xmm0[11],xmm3[12],xmm0[12],xmm3[13],xmm0[13],xmm3[14],xmm0[14],xmm3[15],xmm0[15]
+; SSE2-SSSE3-NEXT: pand %xmm2, %xmm1
+; SSE2-SSSE3-NEXT: pcmpeqd %xmm2, %xmm1
+; SSE2-SSSE3-NEXT: movdqa {{.*#+}} xmm4 = [256,512,1024,2048]
; SSE2-SSSE3-NEXT: movdqa %xmm3, %xmm2
-; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3]
-; SSE2-SSSE3-NEXT: pslld $31, %xmm2
-; SSE2-SSSE3-NEXT: psrad $31, %xmm2
-; SSE2-SSSE3-NEXT: punpckhwd {{.*#+}} xmm3 = xmm3[4],xmm0[4],xmm3[5],xmm0[5],xmm3[6],xmm0[6],xmm3[7],xmm0[7]
-; SSE2-SSSE3-NEXT: pslld $31, %xmm3
-; SSE2-SSSE3-NEXT: psrad $31, %xmm3
+; SSE2-SSSE3-NEXT: pand %xmm4, %xmm2
+; SSE2-SSSE3-NEXT: pcmpeqd %xmm4, %xmm2
+; SSE2-SSSE3-NEXT: movdqa {{.*#+}} xmm4 = [4096,8192,16384,32768]
+; SSE2-SSSE3-NEXT: pand %xmm4, %xmm3
+; SSE2-SSSE3-NEXT: pcmpeqd %xmm4, %xmm3
; SSE2-SSSE3-NEXT: retq
;
; AVX1-LABEL: ext_i16_16i32:
; AVX1: # BB#0:
-; AVX1-NEXT: movw %di, -{{[0-9]+}}(%rsp)
-; AVX1-NEXT: movzwl -{{[0-9]+}}(%rsp), %eax
-; AVX1-NEXT: movl %eax, %ecx
-; AVX1-NEXT: shrl %ecx
-; AVX1-NEXT: andl $1, %ecx
-; AVX1-NEXT: movl %eax, %edx
-; AVX1-NEXT: andl $1, %edx
-; AVX1-NEXT: vmovd %edx, %xmm0
-; AVX1-NEXT: vpinsrb $1, %ecx, %xmm0, %xmm0
-; AVX1-NEXT: movl %eax, %ecx
-; AVX1-NEXT: shrl $2, %ecx
-; AVX1-NEXT: andl $1, %ecx
-; AVX1-NEXT: vpinsrb $2, %ecx, %xmm0, %xmm0
-; AVX1-NEXT: movl %eax, %ecx
-; AVX1-NEXT: shrl $3, %ecx
-; AVX1-NEXT: andl $1, %ecx
-; AVX1-NEXT: vpinsrb $3, %ecx, %xmm0, %xmm0
-; AVX1-NEXT: movl %eax, %ecx
-; AVX1-NEXT: shrl $4, %ecx
-; AVX1-NEXT: andl $1, %ecx
-; AVX1-NEXT: vpinsrb $4, %ecx, %xmm0, %xmm0
-; AVX1-NEXT: movl %eax, %ecx
-; AVX1-NEXT: shrl $5, %ecx
-; AVX1-NEXT: andl $1, %ecx
-; AVX1-NEXT: vpinsrb $5, %ecx, %xmm0, %xmm0
-; AVX1-NEXT: movl %eax, %ecx
-; AVX1-NEXT: shrl $6, %ecx
-; AVX1-NEXT: andl $1, %ecx
-; AVX1-NEXT: vpinsrb $6, %ecx, %xmm0, %xmm0
-; AVX1-NEXT: movl %eax, %ecx
-; AVX1-NEXT: shrl $7, %ecx
-; AVX1-NEXT: andl $1, %ecx
-; AVX1-NEXT: vpinsrb $7, %ecx, %xmm0, %xmm0
-; AVX1-NEXT: movl %eax, %ecx
-; AVX1-NEXT: shrl $8, %ecx
-; AVX1-NEXT: andl $1, %ecx
-; AVX1-NEXT: vpinsrb $8, %ecx, %xmm0, %xmm0
-; AVX1-NEXT: movl %eax, %ecx
-; AVX1-NEXT: shrl $9, %ecx
-; AVX1-NEXT: andl $1, %ecx
-; AVX1-NEXT: vpinsrb $9, %ecx, %xmm0, %xmm0
-; AVX1-NEXT: movl %eax, %ecx
-; AVX1-NEXT: shrl $10, %ecx
-; AVX1-NEXT: andl $1, %ecx
-; AVX1-NEXT: vpinsrb $10, %ecx, %xmm0, %xmm0
-; AVX1-NEXT: movl %eax, %ecx
-; AVX1-NEXT: shrl $11, %ecx
-; AVX1-NEXT: andl $1, %ecx
-; AVX1-NEXT: vpinsrb $11, %ecx, %xmm0, %xmm0
-; AVX1-NEXT: movl %eax, %ecx
-; AVX1-NEXT: shrl $12, %ecx
-; AVX1-NEXT: andl $1, %ecx
-; AVX1-NEXT: vpinsrb $12, %ecx, %xmm0, %xmm0
-; AVX1-NEXT: movl %eax, %ecx
-; AVX1-NEXT: shrl $13, %ecx
-; AVX1-NEXT: andl $1, %ecx
-; AVX1-NEXT: vpinsrb $13, %ecx, %xmm0, %xmm0
-; AVX1-NEXT: movl %eax, %ecx
-; AVX1-NEXT: shrl $14, %ecx
-; AVX1-NEXT: andl $1, %ecx
-; AVX1-NEXT: vpinsrb $14, %ecx, %xmm0, %xmm0
-; AVX1-NEXT: shrl $15, %eax
-; AVX1-NEXT: movzwl %ax, %eax
-; AVX1-NEXT: vpinsrb $15, %eax, %xmm0, %xmm1
-; AVX1-NEXT: vpmovzxbw {{.*#+}} xmm0 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
-; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm2 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
-; AVX1-NEXT: vpslld $31, %xmm2, %xmm2
-; AVX1-NEXT: vpsrad $31, %xmm2, %xmm2
-; AVX1-NEXT: vpunpckhwd {{.*#+}} xmm0 = xmm0[4,4,5,5,6,6,7,7]
-; AVX1-NEXT: vpslld $31, %xmm0, %xmm0
-; AVX1-NEXT: vpsrad $31, %xmm0, %xmm0
-; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm2, %ymm0
-; AVX1-NEXT: vpunpckhbw {{.*#+}} xmm1 = xmm1[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
-; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm2 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero
-; AVX1-NEXT: vpslld $31, %xmm2, %xmm2
-; AVX1-NEXT: vpsrad $31, %xmm2, %xmm2
-; AVX1-NEXT: vpunpckhwd {{.*#+}} xmm1 = xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
-; AVX1-NEXT: vpslld $31, %xmm1, %xmm1
-; AVX1-NEXT: vpsrad $31, %xmm1, %xmm1
-; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm2, %ymm1
+; AVX1-NEXT: vmovd %edi, %xmm0
+; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,0,0,0]
+; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm1
+; AVX1-NEXT: vandps {{.*}}(%rip), %ymm1, %ymm0
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
+; AVX1-NEXT: vpxor %xmm3, %xmm3, %xmm3
+; AVX1-NEXT: vpcmpeqd %xmm3, %xmm2, %xmm2
+; AVX1-NEXT: vpcmpeqd %xmm4, %xmm4, %xmm4
+; AVX1-NEXT: vpxor %xmm4, %xmm2, %xmm2
+; AVX1-NEXT: vpcmpeqd %xmm3, %xmm0, %xmm0
+; AVX1-NEXT: vpxor %xmm4, %xmm0, %xmm0
+; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; AVX1-NEXT: vandps {{.*}}(%rip), %ymm1, %ymm1
+; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
+; AVX1-NEXT: vpcmpeqd %xmm3, %xmm2, %xmm2
+; AVX1-NEXT: vpxor %xmm4, %xmm2, %xmm2
+; AVX1-NEXT: vpcmpeqd %xmm3, %xmm1, %xmm1
+; AVX1-NEXT: vpxor %xmm4, %xmm1, %xmm1
+; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm1, %ymm1
; AVX1-NEXT: retq
;
; AVX2-LABEL: ext_i16_16i32:
; AVX2: # BB#0:
-; AVX2-NEXT: movw %di, -{{[0-9]+}}(%rsp)
-; AVX2-NEXT: movzwl -{{[0-9]+}}(%rsp), %eax
-; AVX2-NEXT: movl %eax, %ecx
-; AVX2-NEXT: shrl %ecx
-; AVX2-NEXT: andl $1, %ecx
-; AVX2-NEXT: movl %eax, %edx
-; AVX2-NEXT: andl $1, %edx
-; AVX2-NEXT: vmovd %edx, %xmm0
-; AVX2-NEXT: vpinsrb $1, %ecx, %xmm0, %xmm0
-; AVX2-NEXT: movl %eax, %ecx
-; AVX2-NEXT: shrl $2, %ecx
-; AVX2-NEXT: andl $1, %ecx
-; AVX2-NEXT: vpinsrb $2, %ecx, %xmm0, %xmm0
-; AVX2-NEXT: movl %eax, %ecx
-; AVX2-NEXT: shrl $3, %ecx
-; AVX2-NEXT: andl $1, %ecx
-; AVX2-NEXT: vpinsrb $3, %ecx, %xmm0, %xmm0
-; AVX2-NEXT: movl %eax, %ecx
-; AVX2-NEXT: shrl $4, %ecx
-; AVX2-NEXT: andl $1, %ecx
-; AVX2-NEXT: vpinsrb $4, %ecx, %xmm0, %xmm0
-; AVX2-NEXT: movl %eax, %ecx
-; AVX2-NEXT: shrl $5, %ecx
-; AVX2-NEXT: andl $1, %ecx
-; AVX2-NEXT: vpinsrb $5, %ecx, %xmm0, %xmm0
-; AVX2-NEXT: movl %eax, %ecx
-; AVX2-NEXT: shrl $6, %ecx
-; AVX2-NEXT: andl $1, %ecx
-; AVX2-NEXT: vpinsrb $6, %ecx, %xmm0, %xmm0
-; AVX2-NEXT: movl %eax, %ecx
-; AVX2-NEXT: shrl $7, %ecx
-; AVX2-NEXT: andl $1, %ecx
-; AVX2-NEXT: vpinsrb $7, %ecx, %xmm0, %xmm0
-; AVX2-NEXT: movl %eax, %ecx
-; AVX2-NEXT: shrl $8, %ecx
-; AVX2-NEXT: andl $1, %ecx
-; AVX2-NEXT: vpinsrb $8, %ecx, %xmm0, %xmm0
-; AVX2-NEXT: movl %eax, %ecx
-; AVX2-NEXT: shrl $9, %ecx
-; AVX2-NEXT: andl $1, %ecx
-; AVX2-NEXT: vpinsrb $9, %ecx, %xmm0, %xmm0
-; AVX2-NEXT: movl %eax, %ecx
-; AVX2-NEXT: shrl $10, %ecx
-; AVX2-NEXT: andl $1, %ecx
-; AVX2-NEXT: vpinsrb $10, %ecx, %xmm0, %xmm0
-; AVX2-NEXT: movl %eax, %ecx
-; AVX2-NEXT: shrl $11, %ecx
-; AVX2-NEXT: andl $1, %ecx
-; AVX2-NEXT: vpinsrb $11, %ecx, %xmm0, %xmm0
-; AVX2-NEXT: movl %eax, %ecx
-; AVX2-NEXT: shrl $12, %ecx
-; AVX2-NEXT: andl $1, %ecx
-; AVX2-NEXT: vpinsrb $12, %ecx, %xmm0, %xmm0
-; AVX2-NEXT: movl %eax, %ecx
-; AVX2-NEXT: shrl $13, %ecx
-; AVX2-NEXT: andl $1, %ecx
-; AVX2-NEXT: vpinsrb $13, %ecx, %xmm0, %xmm0
-; AVX2-NEXT: movl %eax, %ecx
-; AVX2-NEXT: shrl $14, %ecx
-; AVX2-NEXT: andl $1, %ecx
-; AVX2-NEXT: vpinsrb $14, %ecx, %xmm0, %xmm0
-; AVX2-NEXT: shrl $15, %eax
-; AVX2-NEXT: movzwl %ax, %eax
-; AVX2-NEXT: vpinsrb $15, %eax, %xmm0, %xmm1
-; AVX2-NEXT: vpmovzxbw {{.*#+}} xmm0 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
-; AVX2-NEXT: vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
-; AVX2-NEXT: vpslld $31, %ymm0, %ymm0
-; AVX2-NEXT: vpsrad $31, %ymm0, %ymm0
-; AVX2-NEXT: vpunpckhbw {{.*#+}} xmm1 = xmm1[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
-; AVX2-NEXT: vpmovzxwd {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
-; AVX2-NEXT: vpslld $31, %ymm1, %ymm1
-; AVX2-NEXT: vpsrad $31, %ymm1, %ymm1
+; AVX2-NEXT: vmovd %edi, %xmm0
+; AVX2-NEXT: vpbroadcastd %xmm0, %ymm1
+; AVX2-NEXT: vmovdqa {{.*#+}} ymm0 = [1,2,4,8,16,32,64,128]
+; AVX2-NEXT: vpand %ymm0, %ymm1, %ymm2
+; AVX2-NEXT: vpcmpeqd %ymm0, %ymm2, %ymm0
+; AVX2-NEXT: vmovdqa {{.*#+}} ymm2 = [256,512,1024,2048,4096,8192,16384,32768]
+; AVX2-NEXT: vpand %ymm2, %ymm1, %ymm1
+; AVX2-NEXT: vpcmpeqd %ymm2, %ymm1, %ymm1
; AVX2-NEXT: retq
;
; AVX512-LABEL: ext_i16_16i32:
@@ -1942,558 +567,65 @@ define <16 x i32> @ext_i16_16i32(i16 %a0
define <32 x i16> @ext_i32_32i16(i32 %a0) {
; SSE2-SSSE3-LABEL: ext_i32_32i16:
; SSE2-SSSE3: # BB#0:
-; SSE2-SSSE3-NEXT: movl %edi, %eax
-; SSE2-SSSE3-NEXT: shrl $16, %eax
-; SSE2-SSSE3-NEXT: movw %ax, -{{[0-9]+}}(%rsp)
-; SSE2-SSSE3-NEXT: movw %di, -{{[0-9]+}}(%rsp)
-; SSE2-SSSE3-NEXT: movzwl -{{[0-9]+}}(%rsp), %eax
-; SSE2-SSSE3-NEXT: movl %eax, %ecx
-; SSE2-SSSE3-NEXT: shrl $7, %ecx
-; SSE2-SSSE3-NEXT: andl $1, %ecx
-; SSE2-SSSE3-NEXT: movd %ecx, %xmm0
-; SSE2-SSSE3-NEXT: movl %eax, %ecx
-; SSE2-SSSE3-NEXT: shrl $6, %ecx
-; SSE2-SSSE3-NEXT: andl $1, %ecx
-; SSE2-SSSE3-NEXT: movd %ecx, %xmm1
-; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
-; SSE2-SSSE3-NEXT: movl %eax, %ecx
-; SSE2-SSSE3-NEXT: shrl $5, %ecx
-; SSE2-SSSE3-NEXT: andl $1, %ecx
-; SSE2-SSSE3-NEXT: movd %ecx, %xmm0
-; SSE2-SSSE3-NEXT: movl %eax, %ecx
-; SSE2-SSSE3-NEXT: shrl $4, %ecx
-; SSE2-SSSE3-NEXT: andl $1, %ecx
-; SSE2-SSSE3-NEXT: movd %ecx, %xmm2
-; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3],xmm2[4],xmm0[4],xmm2[5],xmm0[5],xmm2[6],xmm0[6],xmm2[7],xmm0[7]
-; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3]
-; SSE2-SSSE3-NEXT: movl %eax, %ecx
-; SSE2-SSSE3-NEXT: shrl $3, %ecx
-; SSE2-SSSE3-NEXT: andl $1, %ecx
-; SSE2-SSSE3-NEXT: movd %ecx, %xmm0
-; SSE2-SSSE3-NEXT: movl %eax, %ecx
-; SSE2-SSSE3-NEXT: shrl $2, %ecx
-; SSE2-SSSE3-NEXT: andl $1, %ecx
-; SSE2-SSSE3-NEXT: movd %ecx, %xmm1
-; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
-; SSE2-SSSE3-NEXT: movl %eax, %ecx
-; SSE2-SSSE3-NEXT: andl $1, %ecx
-; SSE2-SSSE3-NEXT: movd %ecx, %xmm3
-; SSE2-SSSE3-NEXT: movl %eax, %ecx
-; SSE2-SSSE3-NEXT: shrl %ecx
-; SSE2-SSSE3-NEXT: andl $1, %ecx
-; SSE2-SSSE3-NEXT: movd %ecx, %xmm0
-; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm0[0],xmm3[1],xmm0[1],xmm3[2],xmm0[2],xmm3[3],xmm0[3],xmm3[4],xmm0[4],xmm3[5],xmm0[5],xmm3[6],xmm0[6],xmm3[7],xmm0[7]
-; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm3 = xmm3[0],xmm1[0],xmm3[1],xmm1[1],xmm3[2],xmm1[2],xmm3[3],xmm1[3]
-; SSE2-SSSE3-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1]
-; SSE2-SSSE3-NEXT: movl %eax, %ecx
-; SSE2-SSSE3-NEXT: shrl $11, %ecx
-; SSE2-SSSE3-NEXT: andl $1, %ecx
-; SSE2-SSSE3-NEXT: movd %ecx, %xmm0
-; SSE2-SSSE3-NEXT: movl %eax, %ecx
-; SSE2-SSSE3-NEXT: shrl $10, %ecx
-; SSE2-SSSE3-NEXT: andl $1, %ecx
-; SSE2-SSSE3-NEXT: movd %ecx, %xmm1
-; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
-; SSE2-SSSE3-NEXT: movl %eax, %ecx
-; SSE2-SSSE3-NEXT: shrl $9, %ecx
-; SSE2-SSSE3-NEXT: andl $1, %ecx
-; SSE2-SSSE3-NEXT: movd %ecx, %xmm2
-; SSE2-SSSE3-NEXT: movl %eax, %ecx
-; SSE2-SSSE3-NEXT: shrl $8, %ecx
-; SSE2-SSSE3-NEXT: andl $1, %ecx
-; SSE2-SSSE3-NEXT: movd %ecx, %xmm0
-; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3],xmm0[4],xmm2[4],xmm0[5],xmm2[5],xmm0[6],xmm2[6],xmm0[7],xmm2[7]
-; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
-; SSE2-SSSE3-NEXT: movl %eax, %ecx
-; SSE2-SSSE3-NEXT: shrl $13, %ecx
-; SSE2-SSSE3-NEXT: andl $1, %ecx
-; SSE2-SSSE3-NEXT: movd %ecx, %xmm1
-; SSE2-SSSE3-NEXT: movl %eax, %ecx
-; SSE2-SSSE3-NEXT: shrl $12, %ecx
-; SSE2-SSSE3-NEXT: andl $1, %ecx
-; SSE2-SSSE3-NEXT: movd %ecx, %xmm2
-; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3],xmm2[4],xmm1[4],xmm2[5],xmm1[5],xmm2[6],xmm1[6],xmm2[7],xmm1[7]
-; SSE2-SSSE3-NEXT: movl %eax, %ecx
-; SSE2-SSSE3-NEXT: shrl $14, %ecx
-; SSE2-SSSE3-NEXT: andl $1, %ecx
-; SSE2-SSSE3-NEXT: movd %ecx, %xmm1
-; SSE2-SSSE3-NEXT: shrl $15, %eax
-; SSE2-SSSE3-NEXT: movzwl %ax, %eax
-; SSE2-SSSE3-NEXT: movd %eax, %xmm4
-; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm4[0],xmm1[1],xmm4[1],xmm1[2],xmm4[2],xmm1[3],xmm4[3],xmm1[4],xmm4[4],xmm1[5],xmm4[5],xmm1[6],xmm4[6],xmm1[7],xmm4[7]
-; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3]
-; SSE2-SSSE3-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
-; SSE2-SSSE3-NEXT: punpcklqdq {{.*#+}} xmm3 = xmm3[0],xmm0[0]
-; SSE2-SSSE3-NEXT: movzwl -{{[0-9]+}}(%rsp), %eax
-; SSE2-SSSE3-NEXT: movl %eax, %ecx
-; SSE2-SSSE3-NEXT: shrl $7, %ecx
-; SSE2-SSSE3-NEXT: andl $1, %ecx
-; SSE2-SSSE3-NEXT: movd %ecx, %xmm0
-; SSE2-SSSE3-NEXT: movl %eax, %ecx
-; SSE2-SSSE3-NEXT: shrl $6, %ecx
-; SSE2-SSSE3-NEXT: andl $1, %ecx
-; SSE2-SSSE3-NEXT: movd %ecx, %xmm1
-; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
-; SSE2-SSSE3-NEXT: movl %eax, %ecx
-; SSE2-SSSE3-NEXT: shrl $5, %ecx
-; SSE2-SSSE3-NEXT: andl $1, %ecx
-; SSE2-SSSE3-NEXT: movd %ecx, %xmm0
-; SSE2-SSSE3-NEXT: movl %eax, %ecx
-; SSE2-SSSE3-NEXT: shrl $4, %ecx
-; SSE2-SSSE3-NEXT: andl $1, %ecx
-; SSE2-SSSE3-NEXT: movd %ecx, %xmm2
-; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3],xmm2[4],xmm0[4],xmm2[5],xmm0[5],xmm2[6],xmm0[6],xmm2[7],xmm0[7]
-; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3]
-; SSE2-SSSE3-NEXT: movl %eax, %ecx
-; SSE2-SSSE3-NEXT: shrl $3, %ecx
-; SSE2-SSSE3-NEXT: andl $1, %ecx
-; SSE2-SSSE3-NEXT: movd %ecx, %xmm0
-; SSE2-SSSE3-NEXT: movl %eax, %ecx
-; SSE2-SSSE3-NEXT: shrl $2, %ecx
-; SSE2-SSSE3-NEXT: andl $1, %ecx
-; SSE2-SSSE3-NEXT: movd %ecx, %xmm4
-; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm4 = xmm4[0],xmm0[0],xmm4[1],xmm0[1],xmm4[2],xmm0[2],xmm4[3],xmm0[3],xmm4[4],xmm0[4],xmm4[5],xmm0[5],xmm4[6],xmm0[6],xmm4[7],xmm0[7]
-; SSE2-SSSE3-NEXT: movl %eax, %ecx
-; SSE2-SSSE3-NEXT: andl $1, %ecx
-; SSE2-SSSE3-NEXT: movd %ecx, %xmm1
-; SSE2-SSSE3-NEXT: movl %eax, %ecx
-; SSE2-SSSE3-NEXT: shrl %ecx
-; SSE2-SSSE3-NEXT: andl $1, %ecx
-; SSE2-SSSE3-NEXT: movd %ecx, %xmm0
-; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
-; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm4[0],xmm1[1],xmm4[1],xmm1[2],xmm4[2],xmm1[3],xmm4[3]
-; SSE2-SSSE3-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1]
-; SSE2-SSSE3-NEXT: movl %eax, %ecx
-; SSE2-SSSE3-NEXT: shrl $11, %ecx
-; SSE2-SSSE3-NEXT: andl $1, %ecx
-; SSE2-SSSE3-NEXT: movd %ecx, %xmm0
-; SSE2-SSSE3-NEXT: movl %eax, %ecx
-; SSE2-SSSE3-NEXT: shrl $10, %ecx
-; SSE2-SSSE3-NEXT: andl $1, %ecx
-; SSE2-SSSE3-NEXT: movd %ecx, %xmm2
-; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3],xmm2[4],xmm0[4],xmm2[5],xmm0[5],xmm2[6],xmm0[6],xmm2[7],xmm0[7]
-; SSE2-SSSE3-NEXT: movl %eax, %ecx
-; SSE2-SSSE3-NEXT: shrl $9, %ecx
-; SSE2-SSSE3-NEXT: andl $1, %ecx
-; SSE2-SSSE3-NEXT: movd %ecx, %xmm4
-; SSE2-SSSE3-NEXT: movl %eax, %ecx
-; SSE2-SSSE3-NEXT: shrl $8, %ecx
-; SSE2-SSSE3-NEXT: andl $1, %ecx
-; SSE2-SSSE3-NEXT: movd %ecx, %xmm0
-; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm4[0],xmm0[1],xmm4[1],xmm0[2],xmm4[2],xmm0[3],xmm4[3],xmm0[4],xmm4[4],xmm0[5],xmm4[5],xmm0[6],xmm4[6],xmm0[7],xmm4[7]
-; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3]
-; SSE2-SSSE3-NEXT: movl %eax, %ecx
-; SSE2-SSSE3-NEXT: shrl $13, %ecx
-; SSE2-SSSE3-NEXT: andl $1, %ecx
-; SSE2-SSSE3-NEXT: movd %ecx, %xmm2
-; SSE2-SSSE3-NEXT: movl %eax, %ecx
-; SSE2-SSSE3-NEXT: shrl $12, %ecx
-; SSE2-SSSE3-NEXT: andl $1, %ecx
-; SSE2-SSSE3-NEXT: movd %ecx, %xmm4
-; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm4 = xmm4[0],xmm2[0],xmm4[1],xmm2[1],xmm4[2],xmm2[2],xmm4[3],xmm2[3],xmm4[4],xmm2[4],xmm4[5],xmm2[5],xmm4[6],xmm2[6],xmm4[7],xmm2[7]
-; SSE2-SSSE3-NEXT: movl %eax, %ecx
-; SSE2-SSSE3-NEXT: shrl $14, %ecx
-; SSE2-SSSE3-NEXT: andl $1, %ecx
-; SSE2-SSSE3-NEXT: movd %ecx, %xmm2
-; SSE2-SSSE3-NEXT: shrl $15, %eax
-; SSE2-SSSE3-NEXT: movzwl %ax, %eax
-; SSE2-SSSE3-NEXT: movd %eax, %xmm5
-; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm5[0],xmm2[1],xmm5[1],xmm2[2],xmm5[2],xmm2[3],xmm5[3],xmm2[4],xmm5[4],xmm2[5],xmm5[5],xmm2[6],xmm5[6],xmm2[7],xmm5[7]
-; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm4 = xmm4[0],xmm2[0],xmm4[1],xmm2[1],xmm4[2],xmm2[2],xmm4[3],xmm2[3]
-; SSE2-SSSE3-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm4[0],xmm0[1],xmm4[1]
-; SSE2-SSSE3-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm0[0]
+; SSE2-SSSE3-NEXT: movd %edi, %xmm2
+; SSE2-SSSE3-NEXT: pshuflw {{.*#+}} xmm0 = xmm2[0,0,0,0,4,5,6,7]
+; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,0,1,1]
+; SSE2-SSSE3-NEXT: movdqa {{.*#+}} xmm4 = [1,2,4,8,16,32,64,128]
; SSE2-SSSE3-NEXT: movdqa %xmm1, %xmm0
-; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
-; SSE2-SSSE3-NEXT: psllw $15, %xmm0
-; SSE2-SSSE3-NEXT: psraw $15, %xmm0
-; SSE2-SSSE3-NEXT: punpckhbw {{.*#+}} xmm1 = xmm1[8],xmm0[8],xmm1[9],xmm0[9],xmm1[10],xmm0[10],xmm1[11],xmm0[11],xmm1[12],xmm0[12],xmm1[13],xmm0[13],xmm1[14],xmm0[14],xmm1[15],xmm0[15]
-; SSE2-SSSE3-NEXT: psllw $15, %xmm1
-; SSE2-SSSE3-NEXT: psraw $15, %xmm1
+; SSE2-SSSE3-NEXT: pand %xmm4, %xmm0
+; SSE2-SSSE3-NEXT: pcmpeqw %xmm4, %xmm0
+; SSE2-SSSE3-NEXT: movdqa {{.*#+}} xmm5 = [256,512,1024,2048,4096,8192,16384,32768]
+; SSE2-SSSE3-NEXT: pand %xmm5, %xmm1
+; SSE2-SSSE3-NEXT: pcmpeqw %xmm5, %xmm1
+; SSE2-SSSE3-NEXT: pshuflw {{.*#+}} xmm2 = xmm2[1,1,1,1,4,5,6,7]
+; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm3 = xmm2[0,0,1,1]
; SSE2-SSSE3-NEXT: movdqa %xmm3, %xmm2
-; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3],xmm2[4],xmm0[4],xmm2[5],xmm0[5],xmm2[6],xmm0[6],xmm2[7],xmm0[7]
-; SSE2-SSSE3-NEXT: psllw $15, %xmm2
-; SSE2-SSSE3-NEXT: psraw $15, %xmm2
-; SSE2-SSSE3-NEXT: punpckhbw {{.*#+}} xmm3 = xmm3[8],xmm0[8],xmm3[9],xmm0[9],xmm3[10],xmm0[10],xmm3[11],xmm0[11],xmm3[12],xmm0[12],xmm3[13],xmm0[13],xmm3[14],xmm0[14],xmm3[15],xmm0[15]
-; SSE2-SSSE3-NEXT: psllw $15, %xmm3
-; SSE2-SSSE3-NEXT: psraw $15, %xmm3
+; SSE2-SSSE3-NEXT: pand %xmm4, %xmm2
+; SSE2-SSSE3-NEXT: pcmpeqw %xmm4, %xmm2
+; SSE2-SSSE3-NEXT: pand %xmm5, %xmm3
+; SSE2-SSSE3-NEXT: pcmpeqw %xmm5, %xmm3
; SSE2-SSSE3-NEXT: retq
;
; AVX1-LABEL: ext_i32_32i16:
; AVX1: # BB#0:
-; AVX1-NEXT: pushq %rbp
-; AVX1-NEXT: .Lcfi20:
-; AVX1-NEXT: .cfi_def_cfa_offset 16
-; AVX1-NEXT: .Lcfi21:
-; AVX1-NEXT: .cfi_offset %rbp, -16
-; AVX1-NEXT: movq %rsp, %rbp
-; AVX1-NEXT: .Lcfi22:
-; AVX1-NEXT: .cfi_def_cfa_register %rbp
-; AVX1-NEXT: pushq %r15
-; AVX1-NEXT: pushq %r14
-; AVX1-NEXT: pushq %r13
-; AVX1-NEXT: pushq %r12
-; AVX1-NEXT: pushq %rbx
-; AVX1-NEXT: andq $-32, %rsp
-; AVX1-NEXT: subq $128, %rsp
-; AVX1-NEXT: .Lcfi23:
-; AVX1-NEXT: .cfi_offset %rbx, -56
-; AVX1-NEXT: .Lcfi24:
-; AVX1-NEXT: .cfi_offset %r12, -48
-; AVX1-NEXT: .Lcfi25:
-; AVX1-NEXT: .cfi_offset %r13, -40
-; AVX1-NEXT: .Lcfi26:
-; AVX1-NEXT: .cfi_offset %r14, -32
-; AVX1-NEXT: .Lcfi27:
-; AVX1-NEXT: .cfi_offset %r15, -24
-; AVX1-NEXT: movl %edi, {{[0-9]+}}(%rsp) # 4-byte Spill
-; AVX1-NEXT: movl %edi, {{[0-9]+}}(%rsp) # 4-byte Spill
-; AVX1-NEXT: movl %edi, {{[0-9]+}}(%rsp) # 4-byte Spill
-; AVX1-NEXT: movl %edi, {{[0-9]+}}(%rsp) # 4-byte Spill
-; AVX1-NEXT: movl %edi, {{[0-9]+}}(%rsp) # 4-byte Spill
-; AVX1-NEXT: movl %edi, {{[0-9]+}}(%rsp) # 4-byte Spill
-; AVX1-NEXT: movl %edi, {{[0-9]+}}(%rsp) # 4-byte Spill
-; AVX1-NEXT: movl %edi, {{[0-9]+}}(%rsp) # 4-byte Spill
-; AVX1-NEXT: movl %edi, {{[0-9]+}}(%rsp) # 4-byte Spill
-; AVX1-NEXT: movl %edi, {{[0-9]+}}(%rsp) # 4-byte Spill
-; AVX1-NEXT: movl %edi, {{[0-9]+}}(%rsp) # 4-byte Spill
-; AVX1-NEXT: movl %edi, {{[0-9]+}}(%rsp) # 4-byte Spill
-; AVX1-NEXT: movl %edi, {{[0-9]+}}(%rsp) # 4-byte Spill
-; AVX1-NEXT: movl %edi, {{[0-9]+}}(%rsp) # 4-byte Spill
-; AVX1-NEXT: movl %edi, {{[0-9]+}}(%rsp) # 4-byte Spill
-; AVX1-NEXT: movl %edi, {{[0-9]+}}(%rsp) # 4-byte Spill
-; AVX1-NEXT: movl %edi, {{[0-9]+}}(%rsp) # 4-byte Spill
-; AVX1-NEXT: movl %edi, {{[0-9]+}}(%rsp) # 4-byte Spill
-; AVX1-NEXT: movl %edi, %r13d
-; AVX1-NEXT: movl %edi, %r12d
-; AVX1-NEXT: movl %edi, %r15d
-; AVX1-NEXT: movl %edi, %r14d
-; AVX1-NEXT: movl %edi, %ebx
-; AVX1-NEXT: movl %edi, %r11d
-; AVX1-NEXT: movl %edi, %r10d
-; AVX1-NEXT: movl %edi, %r9d
-; AVX1-NEXT: movl %edi, %r8d
-; AVX1-NEXT: movl %edi, %esi
-; AVX1-NEXT: movl %edi, %edx
-; AVX1-NEXT: movl %edi, %ecx
-; AVX1-NEXT: movl %edi, %eax
-; AVX1-NEXT: andl $1, %edi
-; AVX1-NEXT: vmovd %edi, %xmm0
-; AVX1-NEXT: shrl %eax
-; AVX1-NEXT: andl $1, %eax
-; AVX1-NEXT: vpinsrb $1, %eax, %xmm0, %xmm0
-; AVX1-NEXT: shrl $2, %ecx
-; AVX1-NEXT: andl $1, %ecx
-; AVX1-NEXT: vpinsrb $2, %ecx, %xmm0, %xmm0
-; AVX1-NEXT: shrl $3, %edx
-; AVX1-NEXT: andl $1, %edx
-; AVX1-NEXT: vpinsrb $3, %edx, %xmm0, %xmm0
-; AVX1-NEXT: shrl $4, %esi
-; AVX1-NEXT: andl $1, %esi
-; AVX1-NEXT: vpinsrb $4, %esi, %xmm0, %xmm0
-; AVX1-NEXT: shrl $5, %r8d
-; AVX1-NEXT: andl $1, %r8d
-; AVX1-NEXT: vpinsrb $5, %r8d, %xmm0, %xmm0
-; AVX1-NEXT: shrl $6, %r9d
-; AVX1-NEXT: andl $1, %r9d
-; AVX1-NEXT: vpinsrb $6, %r9d, %xmm0, %xmm0
-; AVX1-NEXT: shrl $7, %r10d
-; AVX1-NEXT: andl $1, %r10d
-; AVX1-NEXT: vpinsrb $7, %r10d, %xmm0, %xmm0
-; AVX1-NEXT: shrl $8, %r11d
-; AVX1-NEXT: andl $1, %r11d
-; AVX1-NEXT: vpinsrb $8, %r11d, %xmm0, %xmm0
-; AVX1-NEXT: shrl $9, %ebx
-; AVX1-NEXT: andl $1, %ebx
-; AVX1-NEXT: vpinsrb $9, %ebx, %xmm0, %xmm0
-; AVX1-NEXT: shrl $10, %r14d
-; AVX1-NEXT: andl $1, %r14d
-; AVX1-NEXT: vpinsrb $10, %r14d, %xmm0, %xmm0
-; AVX1-NEXT: shrl $11, %r15d
-; AVX1-NEXT: andl $1, %r15d
-; AVX1-NEXT: vpinsrb $11, %r15d, %xmm0, %xmm0
-; AVX1-NEXT: shrl $12, %r12d
-; AVX1-NEXT: andl $1, %r12d
-; AVX1-NEXT: vpinsrb $12, %r12d, %xmm0, %xmm0
-; AVX1-NEXT: shrl $13, %r13d
-; AVX1-NEXT: andl $1, %r13d
-; AVX1-NEXT: vpinsrb $13, %r13d, %xmm0, %xmm0
-; AVX1-NEXT: movl {{[0-9]+}}(%rsp), %eax # 4-byte Reload
-; AVX1-NEXT: shrl $14, %eax
-; AVX1-NEXT: andl $1, %eax
-; AVX1-NEXT: vpinsrb $14, %eax, %xmm0, %xmm0
-; AVX1-NEXT: movl {{[0-9]+}}(%rsp), %eax # 4-byte Reload
-; AVX1-NEXT: shrl $15, %eax
-; AVX1-NEXT: andl $1, %eax
-; AVX1-NEXT: vpinsrb $15, %eax, %xmm0, %xmm0
-; AVX1-NEXT: movl {{[0-9]+}}(%rsp), %eax # 4-byte Reload
-; AVX1-NEXT: shrl $16, %eax
-; AVX1-NEXT: andl $1, %eax
-; AVX1-NEXT: vmovd %eax, %xmm1
-; AVX1-NEXT: movl {{[0-9]+}}(%rsp), %eax # 4-byte Reload
-; AVX1-NEXT: shrl $17, %eax
-; AVX1-NEXT: andl $1, %eax
-; AVX1-NEXT: vpinsrb $1, %eax, %xmm1, %xmm1
-; AVX1-NEXT: movl {{[0-9]+}}(%rsp), %eax # 4-byte Reload
-; AVX1-NEXT: shrl $18, %eax
-; AVX1-NEXT: andl $1, %eax
-; AVX1-NEXT: vpinsrb $2, %eax, %xmm1, %xmm1
-; AVX1-NEXT: movl {{[0-9]+}}(%rsp), %eax # 4-byte Reload
-; AVX1-NEXT: shrl $19, %eax
-; AVX1-NEXT: andl $1, %eax
-; AVX1-NEXT: vpinsrb $3, %eax, %xmm1, %xmm1
-; AVX1-NEXT: movl {{[0-9]+}}(%rsp), %eax # 4-byte Reload
-; AVX1-NEXT: shrl $20, %eax
-; AVX1-NEXT: andl $1, %eax
-; AVX1-NEXT: vpinsrb $4, %eax, %xmm1, %xmm1
-; AVX1-NEXT: movl {{[0-9]+}}(%rsp), %eax # 4-byte Reload
-; AVX1-NEXT: shrl $21, %eax
-; AVX1-NEXT: andl $1, %eax
-; AVX1-NEXT: vpinsrb $5, %eax, %xmm1, %xmm1
-; AVX1-NEXT: movl {{[0-9]+}}(%rsp), %eax # 4-byte Reload
-; AVX1-NEXT: shrl $22, %eax
-; AVX1-NEXT: andl $1, %eax
-; AVX1-NEXT: vpinsrb $6, %eax, %xmm1, %xmm1
-; AVX1-NEXT: movl {{[0-9]+}}(%rsp), %eax # 4-byte Reload
-; AVX1-NEXT: shrl $23, %eax
-; AVX1-NEXT: andl $1, %eax
-; AVX1-NEXT: vpinsrb $7, %eax, %xmm1, %xmm1
-; AVX1-NEXT: movl {{[0-9]+}}(%rsp), %eax # 4-byte Reload
-; AVX1-NEXT: shrl $24, %eax
-; AVX1-NEXT: andl $1, %eax
-; AVX1-NEXT: vpinsrb $8, %eax, %xmm1, %xmm1
-; AVX1-NEXT: movl {{[0-9]+}}(%rsp), %eax # 4-byte Reload
-; AVX1-NEXT: shrl $25, %eax
-; AVX1-NEXT: andl $1, %eax
-; AVX1-NEXT: vpinsrb $9, %eax, %xmm1, %xmm1
-; AVX1-NEXT: movl {{[0-9]+}}(%rsp), %eax # 4-byte Reload
-; AVX1-NEXT: shrl $26, %eax
-; AVX1-NEXT: andl $1, %eax
-; AVX1-NEXT: vpinsrb $10, %eax, %xmm1, %xmm1
-; AVX1-NEXT: movl {{[0-9]+}}(%rsp), %eax # 4-byte Reload
-; AVX1-NEXT: shrl $27, %eax
-; AVX1-NEXT: andl $1, %eax
-; AVX1-NEXT: vpinsrb $11, %eax, %xmm1, %xmm1
-; AVX1-NEXT: movl {{[0-9]+}}(%rsp), %eax # 4-byte Reload
-; AVX1-NEXT: shrl $28, %eax
-; AVX1-NEXT: andl $1, %eax
-; AVX1-NEXT: vpinsrb $12, %eax, %xmm1, %xmm1
-; AVX1-NEXT: movl {{[0-9]+}}(%rsp), %eax # 4-byte Reload
-; AVX1-NEXT: shrl $29, %eax
-; AVX1-NEXT: andl $1, %eax
-; AVX1-NEXT: vpinsrb $13, %eax, %xmm1, %xmm1
-; AVX1-NEXT: movl {{[0-9]+}}(%rsp), %eax # 4-byte Reload
-; AVX1-NEXT: shrl $30, %eax
-; AVX1-NEXT: andl $1, %eax
-; AVX1-NEXT: vpinsrb $14, %eax, %xmm1, %xmm1
-; AVX1-NEXT: movl {{[0-9]+}}(%rsp), %eax # 4-byte Reload
-; AVX1-NEXT: shrl $31, %eax
-; AVX1-NEXT: vpinsrb $15, %eax, %xmm1, %xmm1
-; AVX1-NEXT: vpmovzxbw {{.*#+}} xmm2 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
-; AVX1-NEXT: vpsllw $15, %xmm2, %xmm2
-; AVX1-NEXT: vpsraw $15, %xmm2, %xmm2
-; AVX1-NEXT: vpunpckhbw {{.*#+}} xmm0 = xmm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
-; AVX1-NEXT: vpsllw $15, %xmm0, %xmm0
-; AVX1-NEXT: vpsraw $15, %xmm0, %xmm0
-; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm2, %ymm0
-; AVX1-NEXT: vpmovzxbw {{.*#+}} xmm2 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
-; AVX1-NEXT: vpsllw $15, %xmm2, %xmm2
-; AVX1-NEXT: vpsraw $15, %xmm2, %xmm2
-; AVX1-NEXT: vpunpckhbw {{.*#+}} xmm1 = xmm1[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
-; AVX1-NEXT: vpsllw $15, %xmm1, %xmm1
-; AVX1-NEXT: vpsraw $15, %xmm1, %xmm1
-; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm2, %ymm1
-; AVX1-NEXT: leaq -40(%rbp), %rsp
-; AVX1-NEXT: popq %rbx
-; AVX1-NEXT: popq %r12
-; AVX1-NEXT: popq %r13
-; AVX1-NEXT: popq %r14
-; AVX1-NEXT: popq %r15
-; AVX1-NEXT: popq %rbp
+; AVX1-NEXT: vmovd %edi, %xmm1
+; AVX1-NEXT: vpshuflw {{.*#+}} xmm0 = xmm1[0,0,0,0,4,5,6,7]
+; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,0,1,1]
+; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
+; AVX1-NEXT: vmovaps {{.*#+}} ymm2 = [1,2,4,8,16,32,64,128,256,512,1024,2048,4096,8192,16384,32768]
+; AVX1-NEXT: vandps %ymm2, %ymm0, %ymm0
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
+; AVX1-NEXT: vpxor %xmm4, %xmm4, %xmm4
+; AVX1-NEXT: vpcmpeqw %xmm4, %xmm3, %xmm3
+; AVX1-NEXT: vpcmpeqd %xmm5, %xmm5, %xmm5
+; AVX1-NEXT: vpxor %xmm5, %xmm3, %xmm3
+; AVX1-NEXT: vpcmpeqw %xmm4, %xmm0, %xmm0
+; AVX1-NEXT: vpxor %xmm5, %xmm0, %xmm0
+; AVX1-NEXT: vinsertf128 $1, %xmm3, %ymm0, %ymm0
+; AVX1-NEXT: vpshuflw {{.*#+}} xmm1 = xmm1[1,1,1,1,4,5,6,7]
+; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,0,1,1]
+; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm1, %ymm1
+; AVX1-NEXT: vandps %ymm2, %ymm1, %ymm1
+; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
+; AVX1-NEXT: vpcmpeqw %xmm4, %xmm2, %xmm2
+; AVX1-NEXT: vpxor %xmm5, %xmm2, %xmm2
+; AVX1-NEXT: vpcmpeqw %xmm4, %xmm1, %xmm1
+; AVX1-NEXT: vpxor %xmm5, %xmm1, %xmm1
+; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm1, %ymm1
; AVX1-NEXT: retq
;
; AVX2-LABEL: ext_i32_32i16:
; AVX2: # BB#0:
-; AVX2-NEXT: pushq %rbp
-; AVX2-NEXT: .Lcfi20:
-; AVX2-NEXT: .cfi_def_cfa_offset 16
-; AVX2-NEXT: .Lcfi21:
-; AVX2-NEXT: .cfi_offset %rbp, -16
-; AVX2-NEXT: movq %rsp, %rbp
-; AVX2-NEXT: .Lcfi22:
-; AVX2-NEXT: .cfi_def_cfa_register %rbp
-; AVX2-NEXT: pushq %r15
-; AVX2-NEXT: pushq %r14
-; AVX2-NEXT: pushq %r13
-; AVX2-NEXT: pushq %r12
-; AVX2-NEXT: pushq %rbx
-; AVX2-NEXT: andq $-32, %rsp
-; AVX2-NEXT: subq $128, %rsp
-; AVX2-NEXT: .Lcfi23:
-; AVX2-NEXT: .cfi_offset %rbx, -56
-; AVX2-NEXT: .Lcfi24:
-; AVX2-NEXT: .cfi_offset %r12, -48
-; AVX2-NEXT: .Lcfi25:
-; AVX2-NEXT: .cfi_offset %r13, -40
-; AVX2-NEXT: .Lcfi26:
-; AVX2-NEXT: .cfi_offset %r14, -32
-; AVX2-NEXT: .Lcfi27:
-; AVX2-NEXT: .cfi_offset %r15, -24
-; AVX2-NEXT: movl %edi, {{[0-9]+}}(%rsp) # 4-byte Spill
-; AVX2-NEXT: movl %edi, {{[0-9]+}}(%rsp) # 4-byte Spill
-; AVX2-NEXT: movl %edi, {{[0-9]+}}(%rsp) # 4-byte Spill
-; AVX2-NEXT: movl %edi, {{[0-9]+}}(%rsp) # 4-byte Spill
-; AVX2-NEXT: movl %edi, {{[0-9]+}}(%rsp) # 4-byte Spill
-; AVX2-NEXT: movl %edi, {{[0-9]+}}(%rsp) # 4-byte Spill
-; AVX2-NEXT: movl %edi, {{[0-9]+}}(%rsp) # 4-byte Spill
-; AVX2-NEXT: movl %edi, {{[0-9]+}}(%rsp) # 4-byte Spill
-; AVX2-NEXT: movl %edi, {{[0-9]+}}(%rsp) # 4-byte Spill
-; AVX2-NEXT: movl %edi, {{[0-9]+}}(%rsp) # 4-byte Spill
-; AVX2-NEXT: movl %edi, {{[0-9]+}}(%rsp) # 4-byte Spill
-; AVX2-NEXT: movl %edi, {{[0-9]+}}(%rsp) # 4-byte Spill
-; AVX2-NEXT: movl %edi, {{[0-9]+}}(%rsp) # 4-byte Spill
-; AVX2-NEXT: movl %edi, {{[0-9]+}}(%rsp) # 4-byte Spill
-; AVX2-NEXT: movl %edi, {{[0-9]+}}(%rsp) # 4-byte Spill
-; AVX2-NEXT: movl %edi, {{[0-9]+}}(%rsp) # 4-byte Spill
-; AVX2-NEXT: movl %edi, {{[0-9]+}}(%rsp) # 4-byte Spill
-; AVX2-NEXT: movl %edi, {{[0-9]+}}(%rsp) # 4-byte Spill
-; AVX2-NEXT: movl %edi, %r13d
-; AVX2-NEXT: movl %edi, %r12d
-; AVX2-NEXT: movl %edi, %r15d
-; AVX2-NEXT: movl %edi, %r14d
-; AVX2-NEXT: movl %edi, %ebx
-; AVX2-NEXT: movl %edi, %r11d
-; AVX2-NEXT: movl %edi, %r10d
-; AVX2-NEXT: movl %edi, %r9d
-; AVX2-NEXT: movl %edi, %r8d
-; AVX2-NEXT: movl %edi, %esi
-; AVX2-NEXT: movl %edi, %edx
-; AVX2-NEXT: movl %edi, %ecx
-; AVX2-NEXT: movl %edi, %eax
-; AVX2-NEXT: andl $1, %edi
; AVX2-NEXT: vmovd %edi, %xmm0
-; AVX2-NEXT: shrl %eax
-; AVX2-NEXT: andl $1, %eax
-; AVX2-NEXT: vpinsrb $1, %eax, %xmm0, %xmm0
-; AVX2-NEXT: shrl $2, %ecx
-; AVX2-NEXT: andl $1, %ecx
-; AVX2-NEXT: vpinsrb $2, %ecx, %xmm0, %xmm0
-; AVX2-NEXT: shrl $3, %edx
-; AVX2-NEXT: andl $1, %edx
-; AVX2-NEXT: vpinsrb $3, %edx, %xmm0, %xmm0
-; AVX2-NEXT: shrl $4, %esi
-; AVX2-NEXT: andl $1, %esi
-; AVX2-NEXT: vpinsrb $4, %esi, %xmm0, %xmm0
-; AVX2-NEXT: shrl $5, %r8d
-; AVX2-NEXT: andl $1, %r8d
-; AVX2-NEXT: vpinsrb $5, %r8d, %xmm0, %xmm0
-; AVX2-NEXT: shrl $6, %r9d
-; AVX2-NEXT: andl $1, %r9d
-; AVX2-NEXT: vpinsrb $6, %r9d, %xmm0, %xmm0
-; AVX2-NEXT: shrl $7, %r10d
-; AVX2-NEXT: andl $1, %r10d
-; AVX2-NEXT: vpinsrb $7, %r10d, %xmm0, %xmm0
-; AVX2-NEXT: shrl $8, %r11d
-; AVX2-NEXT: andl $1, %r11d
-; AVX2-NEXT: vpinsrb $8, %r11d, %xmm0, %xmm0
-; AVX2-NEXT: shrl $9, %ebx
-; AVX2-NEXT: andl $1, %ebx
-; AVX2-NEXT: vpinsrb $9, %ebx, %xmm0, %xmm0
-; AVX2-NEXT: shrl $10, %r14d
-; AVX2-NEXT: andl $1, %r14d
-; AVX2-NEXT: vpinsrb $10, %r14d, %xmm0, %xmm0
-; AVX2-NEXT: shrl $11, %r15d
-; AVX2-NEXT: andl $1, %r15d
-; AVX2-NEXT: vpinsrb $11, %r15d, %xmm0, %xmm0
-; AVX2-NEXT: shrl $12, %r12d
-; AVX2-NEXT: andl $1, %r12d
-; AVX2-NEXT: vpinsrb $12, %r12d, %xmm0, %xmm0
-; AVX2-NEXT: shrl $13, %r13d
-; AVX2-NEXT: andl $1, %r13d
-; AVX2-NEXT: vpinsrb $13, %r13d, %xmm0, %xmm0
-; AVX2-NEXT: movl {{[0-9]+}}(%rsp), %eax # 4-byte Reload
-; AVX2-NEXT: shrl $14, %eax
-; AVX2-NEXT: andl $1, %eax
-; AVX2-NEXT: vpinsrb $14, %eax, %xmm0, %xmm0
-; AVX2-NEXT: movl {{[0-9]+}}(%rsp), %eax # 4-byte Reload
-; AVX2-NEXT: shrl $15, %eax
-; AVX2-NEXT: andl $1, %eax
-; AVX2-NEXT: vpinsrb $15, %eax, %xmm0, %xmm0
-; AVX2-NEXT: movl {{[0-9]+}}(%rsp), %eax # 4-byte Reload
-; AVX2-NEXT: shrl $16, %eax
-; AVX2-NEXT: andl $1, %eax
-; AVX2-NEXT: vmovd %eax, %xmm1
-; AVX2-NEXT: movl {{[0-9]+}}(%rsp), %eax # 4-byte Reload
-; AVX2-NEXT: shrl $17, %eax
-; AVX2-NEXT: andl $1, %eax
-; AVX2-NEXT: vpinsrb $1, %eax, %xmm1, %xmm1
-; AVX2-NEXT: movl {{[0-9]+}}(%rsp), %eax # 4-byte Reload
-; AVX2-NEXT: shrl $18, %eax
-; AVX2-NEXT: andl $1, %eax
-; AVX2-NEXT: vpinsrb $2, %eax, %xmm1, %xmm1
-; AVX2-NEXT: movl {{[0-9]+}}(%rsp), %eax # 4-byte Reload
-; AVX2-NEXT: shrl $19, %eax
-; AVX2-NEXT: andl $1, %eax
-; AVX2-NEXT: vpinsrb $3, %eax, %xmm1, %xmm1
-; AVX2-NEXT: movl {{[0-9]+}}(%rsp), %eax # 4-byte Reload
-; AVX2-NEXT: shrl $20, %eax
-; AVX2-NEXT: andl $1, %eax
-; AVX2-NEXT: vpinsrb $4, %eax, %xmm1, %xmm1
-; AVX2-NEXT: movl {{[0-9]+}}(%rsp), %eax # 4-byte Reload
-; AVX2-NEXT: shrl $21, %eax
-; AVX2-NEXT: andl $1, %eax
-; AVX2-NEXT: vpinsrb $5, %eax, %xmm1, %xmm1
-; AVX2-NEXT: movl {{[0-9]+}}(%rsp), %eax # 4-byte Reload
-; AVX2-NEXT: shrl $22, %eax
-; AVX2-NEXT: andl $1, %eax
-; AVX2-NEXT: vpinsrb $6, %eax, %xmm1, %xmm1
-; AVX2-NEXT: movl {{[0-9]+}}(%rsp), %eax # 4-byte Reload
-; AVX2-NEXT: shrl $23, %eax
-; AVX2-NEXT: andl $1, %eax
-; AVX2-NEXT: vpinsrb $7, %eax, %xmm1, %xmm1
-; AVX2-NEXT: movl {{[0-9]+}}(%rsp), %eax # 4-byte Reload
-; AVX2-NEXT: shrl $24, %eax
-; AVX2-NEXT: andl $1, %eax
-; AVX2-NEXT: vpinsrb $8, %eax, %xmm1, %xmm1
-; AVX2-NEXT: movl {{[0-9]+}}(%rsp), %eax # 4-byte Reload
-; AVX2-NEXT: shrl $25, %eax
-; AVX2-NEXT: andl $1, %eax
-; AVX2-NEXT: vpinsrb $9, %eax, %xmm1, %xmm1
-; AVX2-NEXT: movl {{[0-9]+}}(%rsp), %eax # 4-byte Reload
-; AVX2-NEXT: shrl $26, %eax
-; AVX2-NEXT: andl $1, %eax
-; AVX2-NEXT: vpinsrb $10, %eax, %xmm1, %xmm1
-; AVX2-NEXT: movl {{[0-9]+}}(%rsp), %eax # 4-byte Reload
-; AVX2-NEXT: shrl $27, %eax
-; AVX2-NEXT: andl $1, %eax
-; AVX2-NEXT: vpinsrb $11, %eax, %xmm1, %xmm1
-; AVX2-NEXT: movl {{[0-9]+}}(%rsp), %eax # 4-byte Reload
-; AVX2-NEXT: shrl $28, %eax
-; AVX2-NEXT: andl $1, %eax
-; AVX2-NEXT: vpinsrb $12, %eax, %xmm1, %xmm1
-; AVX2-NEXT: movl {{[0-9]+}}(%rsp), %eax # 4-byte Reload
-; AVX2-NEXT: shrl $29, %eax
-; AVX2-NEXT: andl $1, %eax
-; AVX2-NEXT: vpinsrb $13, %eax, %xmm1, %xmm1
-; AVX2-NEXT: movl {{[0-9]+}}(%rsp), %eax # 4-byte Reload
-; AVX2-NEXT: shrl $30, %eax
-; AVX2-NEXT: andl $1, %eax
-; AVX2-NEXT: vpinsrb $14, %eax, %xmm1, %xmm1
-; AVX2-NEXT: movl {{[0-9]+}}(%rsp), %eax # 4-byte Reload
-; AVX2-NEXT: shrl $31, %eax
-; AVX2-NEXT: vpinsrb $15, %eax, %xmm1, %xmm1
-; AVX2-NEXT: vpmovzxbw {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero
-; AVX2-NEXT: vpsllw $15, %ymm0, %ymm0
-; AVX2-NEXT: vpsraw $15, %ymm0, %ymm0
-; AVX2-NEXT: vpmovzxbw {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero,xmm1[8],zero,xmm1[9],zero,xmm1[10],zero,xmm1[11],zero,xmm1[12],zero,xmm1[13],zero,xmm1[14],zero,xmm1[15],zero
-; AVX2-NEXT: vpsllw $15, %ymm1, %ymm1
-; AVX2-NEXT: vpsraw $15, %ymm1, %ymm1
-; AVX2-NEXT: leaq -40(%rbp), %rsp
-; AVX2-NEXT: popq %rbx
-; AVX2-NEXT: popq %r12
-; AVX2-NEXT: popq %r13
-; AVX2-NEXT: popq %r14
-; AVX2-NEXT: popq %r15
-; AVX2-NEXT: popq %rbp
+; AVX2-NEXT: vpbroadcastw %xmm0, %ymm0
+; AVX2-NEXT: vmovdqa {{.*#+}} ymm1 = [1,2,4,8,16,32,64,128,256,512,1024,2048,4096,8192,16384,32768]
+; AVX2-NEXT: vpand %ymm1, %ymm0, %ymm0
+; AVX2-NEXT: vpcmpeqw %ymm1, %ymm0, %ymm0
+; AVX2-NEXT: shrl $16, %edi
+; AVX2-NEXT: vmovd %edi, %xmm2
+; AVX2-NEXT: vpbroadcastw %xmm2, %ymm2
+; AVX2-NEXT: vpand %ymm1, %ymm2, %ymm2
+; AVX2-NEXT: vpcmpeqw %ymm1, %ymm2, %ymm1
; AVX2-NEXT: retq
;
; AVX512-LABEL: ext_i32_32i16:
@@ -2509,967 +641,79 @@ define <32 x i16> @ext_i32_32i16(i32 %a0
define <64 x i8> @ext_i64_64i8(i64 %a0) {
; SSE2-SSSE3-LABEL: ext_i64_64i8:
; SSE2-SSSE3: # BB#0:
-; SSE2-SSSE3-NEXT: pushq %rbp
-; SSE2-SSSE3-NEXT: .Lcfi24:
-; SSE2-SSSE3-NEXT: .cfi_def_cfa_offset 16
-; SSE2-SSSE3-NEXT: pushq %r15
-; SSE2-SSSE3-NEXT: .Lcfi25:
-; SSE2-SSSE3-NEXT: .cfi_def_cfa_offset 24
-; SSE2-SSSE3-NEXT: pushq %r14
-; SSE2-SSSE3-NEXT: .Lcfi26:
-; SSE2-SSSE3-NEXT: .cfi_def_cfa_offset 32
-; SSE2-SSSE3-NEXT: pushq %r13
-; SSE2-SSSE3-NEXT: .Lcfi27:
-; SSE2-SSSE3-NEXT: .cfi_def_cfa_offset 40
-; SSE2-SSSE3-NEXT: pushq %r12
-; SSE2-SSSE3-NEXT: .Lcfi28:
-; SSE2-SSSE3-NEXT: .cfi_def_cfa_offset 48
-; SSE2-SSSE3-NEXT: pushq %rbx
-; SSE2-SSSE3-NEXT: .Lcfi29:
-; SSE2-SSSE3-NEXT: .cfi_def_cfa_offset 56
-; SSE2-SSSE3-NEXT: .Lcfi30:
-; SSE2-SSSE3-NEXT: .cfi_offset %rbx, -56
-; SSE2-SSSE3-NEXT: .Lcfi31:
-; SSE2-SSSE3-NEXT: .cfi_offset %r12, -48
-; SSE2-SSSE3-NEXT: .Lcfi32:
-; SSE2-SSSE3-NEXT: .cfi_offset %r13, -40
-; SSE2-SSSE3-NEXT: .Lcfi33:
-; SSE2-SSSE3-NEXT: .cfi_offset %r14, -32
-; SSE2-SSSE3-NEXT: .Lcfi34:
-; SSE2-SSSE3-NEXT: .cfi_offset %r15, -24
-; SSE2-SSSE3-NEXT: .Lcfi35:
-; SSE2-SSSE3-NEXT: .cfi_offset %rbp, -16
-; SSE2-SSSE3-NEXT: movw %di, -{{[0-9]+}}(%rsp)
-; SSE2-SSSE3-NEXT: movq %rdi, %rax
-; SSE2-SSSE3-NEXT: shrq $32, %rax
-; SSE2-SSSE3-NEXT: movw %ax, -{{[0-9]+}}(%rsp)
-; SSE2-SSSE3-NEXT: movq %rdi, %rax
-; SSE2-SSSE3-NEXT: shrq $48, %rax
-; SSE2-SSSE3-NEXT: movw %ax, -{{[0-9]+}}(%rsp)
-; SSE2-SSSE3-NEXT: shrl $16, %edi
-; SSE2-SSSE3-NEXT: movw %di, -{{[0-9]+}}(%rsp)
-; SSE2-SSSE3-NEXT: movswq -{{[0-9]+}}(%rsp), %rbx
-; SSE2-SSSE3-NEXT: movq %rbx, %r8
-; SSE2-SSSE3-NEXT: movq %rbx, %r9
-; SSE2-SSSE3-NEXT: movq %rbx, %r10
-; SSE2-SSSE3-NEXT: movq %rbx, %r11
-; SSE2-SSSE3-NEXT: movq %rbx, %r14
-; SSE2-SSSE3-NEXT: movq %rbx, %r15
-; SSE2-SSSE3-NEXT: movq %rbx, %r12
-; SSE2-SSSE3-NEXT: movq %rbx, %r13
-; SSE2-SSSE3-NEXT: movq %rbx, %rdi
-; SSE2-SSSE3-NEXT: movq %rbx, %rcx
-; SSE2-SSSE3-NEXT: movq %rbx, %rdx
-; SSE2-SSSE3-NEXT: movq %rbx, %rsi
-; SSE2-SSSE3-NEXT: movq %rbx, %rbp
-; SSE2-SSSE3-NEXT: movq %rbx, %rax
-; SSE2-SSSE3-NEXT: shrq $15, %rax
-; SSE2-SSSE3-NEXT: movd %eax, %xmm0
-; SSE2-SSSE3-NEXT: movq %rbx, %rax
-; SSE2-SSSE3-NEXT: movsbq %bl, %rbx
-; SSE2-SSSE3-NEXT: shlq $49, %r8
-; SSE2-SSSE3-NEXT: sarq $63, %r8
-; SSE2-SSSE3-NEXT: movd %r8d, %xmm15
-; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm15 = xmm15[0],xmm0[0],xmm15[1],xmm0[1],xmm15[2],xmm0[2],xmm15[3],xmm0[3],xmm15[4],xmm0[4],xmm15[5],xmm0[5],xmm15[6],xmm0[6],xmm15[7],xmm0[7]
-; SSE2-SSSE3-NEXT: shlq $50, %r9
-; SSE2-SSSE3-NEXT: sarq $63, %r9
-; SSE2-SSSE3-NEXT: movd %r9d, %xmm8
-; SSE2-SSSE3-NEXT: shlq $51, %r10
-; SSE2-SSSE3-NEXT: sarq $63, %r10
-; SSE2-SSSE3-NEXT: movd %r10d, %xmm2
-; SSE2-SSSE3-NEXT: shlq $52, %r11
-; SSE2-SSSE3-NEXT: sarq $63, %r11
-; SSE2-SSSE3-NEXT: movd %r11d, %xmm9
-; SSE2-SSSE3-NEXT: shlq $53, %r14
-; SSE2-SSSE3-NEXT: sarq $63, %r14
-; SSE2-SSSE3-NEXT: movd %r14d, %xmm6
-; SSE2-SSSE3-NEXT: shlq $54, %r15
-; SSE2-SSSE3-NEXT: sarq $63, %r15
-; SSE2-SSSE3-NEXT: movd %r15d, %xmm10
-; SSE2-SSSE3-NEXT: shlq $55, %r12
-; SSE2-SSSE3-NEXT: sarq $63, %r12
-; SSE2-SSSE3-NEXT: movd %r12d, %xmm4
-; SSE2-SSSE3-NEXT: shlq $60, %r13
-; SSE2-SSSE3-NEXT: sarq $63, %r13
-; SSE2-SSSE3-NEXT: movd %r13d, %xmm11
-; SSE2-SSSE3-NEXT: shlq $61, %rdi
-; SSE2-SSSE3-NEXT: sarq $63, %rdi
-; SSE2-SSSE3-NEXT: movd %edi, %xmm5
-; SSE2-SSSE3-NEXT: shlq $62, %rcx
-; SSE2-SSSE3-NEXT: sarq $63, %rcx
-; SSE2-SSSE3-NEXT: movd %ecx, %xmm12
-; SSE2-SSSE3-NEXT: shlq $63, %rdx
-; SSE2-SSSE3-NEXT: sarq $63, %rdx
-; SSE2-SSSE3-NEXT: movd %edx, %xmm0
-; SSE2-SSSE3-NEXT: shlq $58, %rsi
-; SSE2-SSSE3-NEXT: sarq $63, %rsi
-; SSE2-SSSE3-NEXT: movd %esi, %xmm13
-; SSE2-SSSE3-NEXT: shlq $59, %rbp
-; SSE2-SSSE3-NEXT: sarq $63, %rbp
-; SSE2-SSSE3-NEXT: movd %ebp, %xmm7
-; SSE2-SSSE3-NEXT: shlq $57, %rax
-; SSE2-SSSE3-NEXT: sarq $63, %rax
-; SSE2-SSSE3-NEXT: movd %eax, %xmm3
-; SSE2-SSSE3-NEXT: shrq $7, %rbx
-; SSE2-SSSE3-NEXT: movd %ebx, %xmm14
-; SSE2-SSSE3-NEXT: movswq -{{[0-9]+}}(%rsp), %rsi
-; SSE2-SSSE3-NEXT: movq %rsi, %r8
-; SSE2-SSSE3-NEXT: movq %rsi, %r9
-; SSE2-SSSE3-NEXT: movq %rsi, %r10
-; SSE2-SSSE3-NEXT: movq %rsi, %r11
-; SSE2-SSSE3-NEXT: movq %rsi, %r14
-; SSE2-SSSE3-NEXT: movq %rsi, %r15
-; SSE2-SSSE3-NEXT: movq %rsi, %r12
-; SSE2-SSSE3-NEXT: movq %rsi, %r13
-; SSE2-SSSE3-NEXT: movq %rsi, %rbx
-; SSE2-SSSE3-NEXT: movq %rsi, %rax
-; SSE2-SSSE3-NEXT: movq %rsi, %rcx
-; SSE2-SSSE3-NEXT: movq %rsi, %rdx
-; SSE2-SSSE3-NEXT: movq %rsi, %rdi
-; SSE2-SSSE3-NEXT: movq %rsi, %rbp
-; SSE2-SSSE3-NEXT: shrq $15, %rbp
-; SSE2-SSSE3-NEXT: movd %ebp, %xmm1
-; SSE2-SSSE3-NEXT: movq %rsi, %rbp
-; SSE2-SSSE3-NEXT: movsbq %sil, %rsi
-; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm8[0],xmm2[1],xmm8[1],xmm2[2],xmm8[2],xmm2[3],xmm8[3],xmm2[4],xmm8[4],xmm2[5],xmm8[5],xmm2[6],xmm8[6],xmm2[7],xmm8[7]
-; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm15[0],xmm2[1],xmm15[1],xmm2[2],xmm15[2],xmm2[3],xmm15[3]
-; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm6 = xmm6[0],xmm9[0],xmm6[1],xmm9[1],xmm6[2],xmm9[2],xmm6[3],xmm9[3],xmm6[4],xmm9[4],xmm6[5],xmm9[5],xmm6[6],xmm9[6],xmm6[7],xmm9[7]
-; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm4 = xmm4[0],xmm10[0],xmm4[1],xmm10[1],xmm4[2],xmm10[2],xmm4[3],xmm10[3],xmm4[4],xmm10[4],xmm4[5],xmm10[5],xmm4[6],xmm10[6],xmm4[7],xmm10[7]
-; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm4 = xmm4[0],xmm6[0],xmm4[1],xmm6[1],xmm4[2],xmm6[2],xmm4[3],xmm6[3]
-; SSE2-SSSE3-NEXT: punpckldq {{.*#+}} xmm4 = xmm4[0],xmm2[0],xmm4[1],xmm2[1]
-; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm5 = xmm5[0],xmm11[0],xmm5[1],xmm11[1],xmm5[2],xmm11[2],xmm5[3],xmm11[3],xmm5[4],xmm11[4],xmm5[5],xmm11[5],xmm5[6],xmm11[6],xmm5[7],xmm11[7]
-; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm12[0],xmm0[1],xmm12[1],xmm0[2],xmm12[2],xmm0[3],xmm12[3],xmm0[4],xmm12[4],xmm0[5],xmm12[5],xmm0[6],xmm12[6],xmm0[7],xmm12[7]
-; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm5[0],xmm0[1],xmm5[1],xmm0[2],xmm5[2],xmm0[3],xmm5[3]
-; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm7 = xmm7[0],xmm13[0],xmm7[1],xmm13[1],xmm7[2],xmm13[2],xmm7[3],xmm13[3],xmm7[4],xmm13[4],xmm7[5],xmm13[5],xmm7[6],xmm13[6],xmm7[7],xmm13[7]
-; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm14[0],xmm3[1],xmm14[1],xmm3[2],xmm14[2],xmm3[3],xmm14[3],xmm3[4],xmm14[4],xmm3[5],xmm14[5],xmm3[6],xmm14[6],xmm3[7],xmm14[7]
-; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm7 = xmm7[0],xmm3[0],xmm7[1],xmm3[1],xmm7[2],xmm3[2],xmm7[3],xmm3[3]
-; SSE2-SSSE3-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm7[0],xmm0[1],xmm7[1]
-; SSE2-SSSE3-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm4[0]
-; SSE2-SSSE3-NEXT: shlq $49, %r8
-; SSE2-SSSE3-NEXT: sarq $63, %r8
-; SSE2-SSSE3-NEXT: movd %r8d, %xmm13
-; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm13 = xmm13[0],xmm1[0],xmm13[1],xmm1[1],xmm13[2],xmm1[2],xmm13[3],xmm1[3],xmm13[4],xmm1[4],xmm13[5],xmm1[5],xmm13[6],xmm1[6],xmm13[7],xmm1[7]
-; SSE2-SSSE3-NEXT: shlq $50, %r9
-; SSE2-SSSE3-NEXT: sarq $63, %r9
-; SSE2-SSSE3-NEXT: movd %r9d, %xmm1
-; SSE2-SSSE3-NEXT: shlq $51, %r10
-; SSE2-SSSE3-NEXT: sarq $63, %r10
-; SSE2-SSSE3-NEXT: movd %r10d, %xmm3
-; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm1[0],xmm3[1],xmm1[1],xmm3[2],xmm1[2],xmm3[3],xmm1[3],xmm3[4],xmm1[4],xmm3[5],xmm1[5],xmm3[6],xmm1[6],xmm3[7],xmm1[7]
-; SSE2-SSSE3-NEXT: shlq $52, %r11
-; SSE2-SSSE3-NEXT: sarq $63, %r11
-; SSE2-SSSE3-NEXT: movd %r11d, %xmm8
-; SSE2-SSSE3-NEXT: shlq $53, %r14
-; SSE2-SSSE3-NEXT: sarq $63, %r14
-; SSE2-SSSE3-NEXT: movd %r14d, %xmm15
-; SSE2-SSSE3-NEXT: shlq $54, %r15
-; SSE2-SSSE3-NEXT: sarq $63, %r15
-; SSE2-SSSE3-NEXT: movd %r15d, %xmm9
-; SSE2-SSSE3-NEXT: shlq $55, %r12
-; SSE2-SSSE3-NEXT: sarq $63, %r12
-; SSE2-SSSE3-NEXT: movd %r12d, %xmm4
-; SSE2-SSSE3-NEXT: shlq $60, %r13
-; SSE2-SSSE3-NEXT: sarq $63, %r13
-; SSE2-SSSE3-NEXT: movd %r13d, %xmm10
-; SSE2-SSSE3-NEXT: shlq $61, %rbx
-; SSE2-SSSE3-NEXT: sarq $63, %rbx
-; SSE2-SSSE3-NEXT: movd %ebx, %xmm7
-; SSE2-SSSE3-NEXT: shlq $62, %rax
-; SSE2-SSSE3-NEXT: sarq $63, %rax
-; SSE2-SSSE3-NEXT: movd %eax, %xmm11
-; SSE2-SSSE3-NEXT: shlq $63, %rcx
-; SSE2-SSSE3-NEXT: sarq $63, %rcx
-; SSE2-SSSE3-NEXT: movd %ecx, %xmm2
-; SSE2-SSSE3-NEXT: shlq $58, %rdx
-; SSE2-SSSE3-NEXT: sarq $63, %rdx
-; SSE2-SSSE3-NEXT: movd %edx, %xmm12
-; SSE2-SSSE3-NEXT: shlq $59, %rdi
-; SSE2-SSSE3-NEXT: sarq $63, %rdi
-; SSE2-SSSE3-NEXT: movd %edi, %xmm5
-; SSE2-SSSE3-NEXT: shlq $57, %rbp
-; SSE2-SSSE3-NEXT: sarq $63, %rbp
-; SSE2-SSSE3-NEXT: movd %ebp, %xmm1
-; SSE2-SSSE3-NEXT: shrq $7, %rsi
-; SSE2-SSSE3-NEXT: movd %esi, %xmm14
-; SSE2-SSSE3-NEXT: movswq -{{[0-9]+}}(%rsp), %rsi
-; SSE2-SSSE3-NEXT: movq %rsi, %r8
-; SSE2-SSSE3-NEXT: movq %rsi, %r9
-; SSE2-SSSE3-NEXT: movq %rsi, %r10
-; SSE2-SSSE3-NEXT: movq %rsi, %r11
-; SSE2-SSSE3-NEXT: movq %rsi, %r14
-; SSE2-SSSE3-NEXT: movq %rsi, %r15
-; SSE2-SSSE3-NEXT: movq %rsi, %r12
-; SSE2-SSSE3-NEXT: movq %rsi, %r13
-; SSE2-SSSE3-NEXT: movq %rsi, %rbx
-; SSE2-SSSE3-NEXT: movq %rsi, %rax
-; SSE2-SSSE3-NEXT: movq %rsi, %rcx
-; SSE2-SSSE3-NEXT: movq %rsi, %rdx
-; SSE2-SSSE3-NEXT: movq %rsi, %rdi
-; SSE2-SSSE3-NEXT: movq %rsi, %rbp
-; SSE2-SSSE3-NEXT: shrq $15, %rbp
-; SSE2-SSSE3-NEXT: movd %ebp, %xmm6
-; SSE2-SSSE3-NEXT: movq %rsi, %rbp
-; SSE2-SSSE3-NEXT: movsbq %sil, %rsi
-; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm3 = xmm3[0],xmm13[0],xmm3[1],xmm13[1],xmm3[2],xmm13[2],xmm3[3],xmm13[3]
-; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm15 = xmm15[0],xmm8[0],xmm15[1],xmm8[1],xmm15[2],xmm8[2],xmm15[3],xmm8[3],xmm15[4],xmm8[4],xmm15[5],xmm8[5],xmm15[6],xmm8[6],xmm15[7],xmm8[7]
-; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm4 = xmm4[0],xmm9[0],xmm4[1],xmm9[1],xmm4[2],xmm9[2],xmm4[3],xmm9[3],xmm4[4],xmm9[4],xmm4[5],xmm9[5],xmm4[6],xmm9[6],xmm4[7],xmm9[7]
-; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm4 = xmm4[0],xmm15[0],xmm4[1],xmm15[1],xmm4[2],xmm15[2],xmm4[3],xmm15[3]
-; SSE2-SSSE3-NEXT: punpckldq {{.*#+}} xmm4 = xmm4[0],xmm3[0],xmm4[1],xmm3[1]
-; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm7 = xmm7[0],xmm10[0],xmm7[1],xmm10[1],xmm7[2],xmm10[2],xmm7[3],xmm10[3],xmm7[4],xmm10[4],xmm7[5],xmm10[5],xmm7[6],xmm10[6],xmm7[7],xmm10[7]
-; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm11[0],xmm2[1],xmm11[1],xmm2[2],xmm11[2],xmm2[3],xmm11[3],xmm2[4],xmm11[4],xmm2[5],xmm11[5],xmm2[6],xmm11[6],xmm2[7],xmm11[7]
-; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm7[0],xmm2[1],xmm7[1],xmm2[2],xmm7[2],xmm2[3],xmm7[3]
-; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm5 = xmm5[0],xmm12[0],xmm5[1],xmm12[1],xmm5[2],xmm12[2],xmm5[3],xmm12[3],xmm5[4],xmm12[4],xmm5[5],xmm12[5],xmm5[6],xmm12[6],xmm5[7],xmm12[7]
-; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm14[0],xmm1[1],xmm14[1],xmm1[2],xmm14[2],xmm1[3],xmm14[3],xmm1[4],xmm14[4],xmm1[5],xmm14[5],xmm1[6],xmm14[6],xmm1[7],xmm14[7]
-; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm5 = xmm5[0],xmm1[0],xmm5[1],xmm1[1],xmm5[2],xmm1[2],xmm5[3],xmm1[3]
-; SSE2-SSSE3-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm5[0],xmm2[1],xmm5[1]
-; SSE2-SSSE3-NEXT: punpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm4[0]
-; SSE2-SSSE3-NEXT: shlq $49, %r8
-; SSE2-SSSE3-NEXT: sarq $63, %r8
-; SSE2-SSSE3-NEXT: movd %r8d, %xmm1
-; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm6[0],xmm1[1],xmm6[1],xmm1[2],xmm6[2],xmm1[3],xmm6[3],xmm1[4],xmm6[4],xmm1[5],xmm6[5],xmm1[6],xmm6[6],xmm1[7],xmm6[7]
-; SSE2-SSSE3-NEXT: shlq $50, %r9
-; SSE2-SSSE3-NEXT: sarq $63, %r9
-; SSE2-SSSE3-NEXT: movd %r9d, %xmm3
-; SSE2-SSSE3-NEXT: shlq $51, %r10
-; SSE2-SSSE3-NEXT: sarq $63, %r10
-; SSE2-SSSE3-NEXT: movd %r10d, %xmm4
-; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm4 = xmm4[0],xmm3[0],xmm4[1],xmm3[1],xmm4[2],xmm3[2],xmm4[3],xmm3[3],xmm4[4],xmm3[4],xmm4[5],xmm3[5],xmm4[6],xmm3[6],xmm4[7],xmm3[7]
-; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm4 = xmm4[0],xmm1[0],xmm4[1],xmm1[1],xmm4[2],xmm1[2],xmm4[3],xmm1[3]
-; SSE2-SSSE3-NEXT: shlq $52, %r11
-; SSE2-SSSE3-NEXT: sarq $63, %r11
-; SSE2-SSSE3-NEXT: movd %r11d, %xmm8
-; SSE2-SSSE3-NEXT: shlq $53, %r14
-; SSE2-SSSE3-NEXT: sarq $63, %r14
-; SSE2-SSSE3-NEXT: movd %r14d, %xmm13
-; SSE2-SSSE3-NEXT: shlq $54, %r15
-; SSE2-SSSE3-NEXT: sarq $63, %r15
-; SSE2-SSSE3-NEXT: movd %r15d, %xmm9
-; SSE2-SSSE3-NEXT: shlq $55, %r12
-; SSE2-SSSE3-NEXT: sarq $63, %r12
-; SSE2-SSSE3-NEXT: movd %r12d, %xmm1
-; SSE2-SSSE3-NEXT: shlq $60, %r13
-; SSE2-SSSE3-NEXT: sarq $63, %r13
-; SSE2-SSSE3-NEXT: movd %r13d, %xmm10
-; SSE2-SSSE3-NEXT: shlq $61, %rbx
-; SSE2-SSSE3-NEXT: sarq $63, %rbx
-; SSE2-SSSE3-NEXT: movd %ebx, %xmm15
-; SSE2-SSSE3-NEXT: shlq $62, %rax
-; SSE2-SSSE3-NEXT: sarq $63, %rax
-; SSE2-SSSE3-NEXT: movd %eax, %xmm11
-; SSE2-SSSE3-NEXT: shlq $63, %rcx
-; SSE2-SSSE3-NEXT: sarq $63, %rcx
-; SSE2-SSSE3-NEXT: movd %ecx, %xmm3
-; SSE2-SSSE3-NEXT: shlq $58, %rdx
-; SSE2-SSSE3-NEXT: sarq $63, %rdx
-; SSE2-SSSE3-NEXT: movd %edx, %xmm12
-; SSE2-SSSE3-NEXT: shlq $59, %rdi
-; SSE2-SSSE3-NEXT: sarq $63, %rdi
-; SSE2-SSSE3-NEXT: movd %edi, %xmm5
-; SSE2-SSSE3-NEXT: shlq $57, %rbp
-; SSE2-SSSE3-NEXT: sarq $63, %rbp
-; SSE2-SSSE3-NEXT: movd %ebp, %xmm6
-; SSE2-SSSE3-NEXT: shrq $7, %rsi
-; SSE2-SSSE3-NEXT: movd %esi, %xmm14
-; SSE2-SSSE3-NEXT: movswq -{{[0-9]+}}(%rsp), %rsi
-; SSE2-SSSE3-NEXT: movq %rsi, %r8
-; SSE2-SSSE3-NEXT: movq %rsi, %r9
-; SSE2-SSSE3-NEXT: movq %rsi, %r10
-; SSE2-SSSE3-NEXT: movq %rsi, %r11
-; SSE2-SSSE3-NEXT: movq %rsi, %r14
-; SSE2-SSSE3-NEXT: movq %rsi, %r15
-; SSE2-SSSE3-NEXT: movq %rsi, %r12
-; SSE2-SSSE3-NEXT: movq %rsi, %r13
-; SSE2-SSSE3-NEXT: movq %rsi, %rbx
-; SSE2-SSSE3-NEXT: movq %rsi, %rax
-; SSE2-SSSE3-NEXT: movq %rsi, %rcx
-; SSE2-SSSE3-NEXT: movq %rsi, %rdx
-; SSE2-SSSE3-NEXT: movq %rsi, %rdi
-; SSE2-SSSE3-NEXT: movq %rsi, %rbp
-; SSE2-SSSE3-NEXT: shrq $15, %rbp
-; SSE2-SSSE3-NEXT: movd %ebp, %xmm7
-; SSE2-SSSE3-NEXT: movq %rsi, %rbp
-; SSE2-SSSE3-NEXT: movsbq %sil, %rsi
-; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm13 = xmm13[0],xmm8[0],xmm13[1],xmm8[1],xmm13[2],xmm8[2],xmm13[3],xmm8[3],xmm13[4],xmm8[4],xmm13[5],xmm8[5],xmm13[6],xmm8[6],xmm13[7],xmm8[7]
-; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm9[0],xmm1[1],xmm9[1],xmm1[2],xmm9[2],xmm1[3],xmm9[3],xmm1[4],xmm9[4],xmm1[5],xmm9[5],xmm1[6],xmm9[6],xmm1[7],xmm9[7]
-; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm13[0],xmm1[1],xmm13[1],xmm1[2],xmm13[2],xmm1[3],xmm13[3]
-; SSE2-SSSE3-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm4[0],xmm1[1],xmm4[1]
-; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm15 = xmm15[0],xmm10[0],xmm15[1],xmm10[1],xmm15[2],xmm10[2],xmm15[3],xmm10[3],xmm15[4],xmm10[4],xmm15[5],xmm10[5],xmm15[6],xmm10[6],xmm15[7],xmm10[7]
-; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm11[0],xmm3[1],xmm11[1],xmm3[2],xmm11[2],xmm3[3],xmm11[3],xmm3[4],xmm11[4],xmm3[5],xmm11[5],xmm3[6],xmm11[6],xmm3[7],xmm11[7]
-; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm3 = xmm3[0],xmm15[0],xmm3[1],xmm15[1],xmm3[2],xmm15[2],xmm3[3],xmm15[3]
-; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm5 = xmm5[0],xmm12[0],xmm5[1],xmm12[1],xmm5[2],xmm12[2],xmm5[3],xmm12[3],xmm5[4],xmm12[4],xmm5[5],xmm12[5],xmm5[6],xmm12[6],xmm5[7],xmm12[7]
-; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm6 = xmm6[0],xmm14[0],xmm6[1],xmm14[1],xmm6[2],xmm14[2],xmm6[3],xmm14[3],xmm6[4],xmm14[4],xmm6[5],xmm14[5],xmm6[6],xmm14[6],xmm6[7],xmm14[7]
-; SSE2-SSSE3-NEXT: shlq $49, %r8
-; SSE2-SSSE3-NEXT: sarq $63, %r8
-; SSE2-SSSE3-NEXT: movd %r8d, %xmm4
-; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm5 = xmm5[0],xmm6[0],xmm5[1],xmm6[1],xmm5[2],xmm6[2],xmm5[3],xmm6[3]
-; SSE2-SSSE3-NEXT: shlq $50, %r9
-; SSE2-SSSE3-NEXT: sarq $63, %r9
-; SSE2-SSSE3-NEXT: movd %r9d, %xmm6
-; SSE2-SSSE3-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm5[0],xmm3[1],xmm5[1]
-; SSE2-SSSE3-NEXT: shlq $51, %r10
-; SSE2-SSSE3-NEXT: sarq $63, %r10
-; SSE2-SSSE3-NEXT: movd %r10d, %xmm5
-; SSE2-SSSE3-NEXT: punpcklqdq {{.*#+}} xmm3 = xmm3[0],xmm1[0]
-; SSE2-SSSE3-NEXT: shlq $52, %r11
-; SSE2-SSSE3-NEXT: sarq $63, %r11
-; SSE2-SSSE3-NEXT: movd %r11d, %xmm1
-; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm4 = xmm4[0],xmm7[0],xmm4[1],xmm7[1],xmm4[2],xmm7[2],xmm4[3],xmm7[3],xmm4[4],xmm7[4],xmm4[5],xmm7[5],xmm4[6],xmm7[6],xmm4[7],xmm7[7]
-; SSE2-SSSE3-NEXT: shlq $53, %r14
-; SSE2-SSSE3-NEXT: sarq $63, %r14
-; SSE2-SSSE3-NEXT: movd %r14d, %xmm7
-; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm5 = xmm5[0],xmm6[0],xmm5[1],xmm6[1],xmm5[2],xmm6[2],xmm5[3],xmm6[3],xmm5[4],xmm6[4],xmm5[5],xmm6[5],xmm5[6],xmm6[6],xmm5[7],xmm6[7]
-; SSE2-SSSE3-NEXT: shlq $54, %r15
-; SSE2-SSSE3-NEXT: sarq $63, %r15
-; SSE2-SSSE3-NEXT: movd %r15d, %xmm6
-; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm5 = xmm5[0],xmm4[0],xmm5[1],xmm4[1],xmm5[2],xmm4[2],xmm5[3],xmm4[3]
-; SSE2-SSSE3-NEXT: shlq $55, %r12
-; SSE2-SSSE3-NEXT: sarq $63, %r12
-; SSE2-SSSE3-NEXT: movd %r12d, %xmm4
-; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm7 = xmm7[0],xmm1[0],xmm7[1],xmm1[1],xmm7[2],xmm1[2],xmm7[3],xmm1[3],xmm7[4],xmm1[4],xmm7[5],xmm1[5],xmm7[6],xmm1[6],xmm7[7],xmm1[7]
-; SSE2-SSSE3-NEXT: shlq $60, %r13
-; SSE2-SSSE3-NEXT: sarq $63, %r13
-; SSE2-SSSE3-NEXT: movd %r13d, %xmm8
-; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm4 = xmm4[0],xmm6[0],xmm4[1],xmm6[1],xmm4[2],xmm6[2],xmm4[3],xmm6[3],xmm4[4],xmm6[4],xmm4[5],xmm6[5],xmm4[6],xmm6[6],xmm4[7],xmm6[7]
-; SSE2-SSSE3-NEXT: shlq $61, %rbx
-; SSE2-SSSE3-NEXT: sarq $63, %rbx
-; SSE2-SSSE3-NEXT: movd %ebx, %xmm6
-; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm4 = xmm4[0],xmm7[0],xmm4[1],xmm7[1],xmm4[2],xmm7[2],xmm4[3],xmm7[3]
-; SSE2-SSSE3-NEXT: shlq $62, %rax
-; SSE2-SSSE3-NEXT: sarq $63, %rax
-; SSE2-SSSE3-NEXT: movd %eax, %xmm7
-; SSE2-SSSE3-NEXT: punpckldq {{.*#+}} xmm4 = xmm4[0],xmm5[0],xmm4[1],xmm5[1]
-; SSE2-SSSE3-NEXT: shlq $63, %rcx
-; SSE2-SSSE3-NEXT: sarq $63, %rcx
-; SSE2-SSSE3-NEXT: movd %ecx, %xmm1
-; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm6 = xmm6[0],xmm8[0],xmm6[1],xmm8[1],xmm6[2],xmm8[2],xmm6[3],xmm8[3],xmm6[4],xmm8[4],xmm6[5],xmm8[5],xmm6[6],xmm8[6],xmm6[7],xmm8[7]
-; SSE2-SSSE3-NEXT: shlq $58, %rdx
-; SSE2-SSSE3-NEXT: sarq $63, %rdx
-; SSE2-SSSE3-NEXT: movd %edx, %xmm5
-; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm7[0],xmm1[1],xmm7[1],xmm1[2],xmm7[2],xmm1[3],xmm7[3],xmm1[4],xmm7[4],xmm1[5],xmm7[5],xmm1[6],xmm7[6],xmm1[7],xmm7[7]
-; SSE2-SSSE3-NEXT: shlq $59, %rdi
-; SSE2-SSSE3-NEXT: sarq $63, %rdi
-; SSE2-SSSE3-NEXT: movd %edi, %xmm7
-; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm6[0],xmm1[1],xmm6[1],xmm1[2],xmm6[2],xmm1[3],xmm6[3]
-; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm7 = xmm7[0],xmm5[0],xmm7[1],xmm5[1],xmm7[2],xmm5[2],xmm7[3],xmm5[3],xmm7[4],xmm5[4],xmm7[5],xmm5[5],xmm7[6],xmm5[6],xmm7[7],xmm5[7]
-; SSE2-SSSE3-NEXT: shlq $57, %rbp
-; SSE2-SSSE3-NEXT: sarq $63, %rbp
-; SSE2-SSSE3-NEXT: movd %ebp, %xmm5
-; SSE2-SSSE3-NEXT: shrq $7, %rsi
-; SSE2-SSSE3-NEXT: movd %esi, %xmm6
-; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm5 = xmm5[0],xmm6[0],xmm5[1],xmm6[1],xmm5[2],xmm6[2],xmm5[3],xmm6[3],xmm5[4],xmm6[4],xmm5[5],xmm6[5],xmm5[6],xmm6[6],xmm5[7],xmm6[7]
-; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm7 = xmm7[0],xmm5[0],xmm7[1],xmm5[1],xmm7[2],xmm5[2],xmm7[3],xmm5[3]
-; SSE2-SSSE3-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm7[0],xmm1[1],xmm7[1]
-; SSE2-SSSE3-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm4[0]
-; SSE2-SSSE3-NEXT: popq %rbx
-; SSE2-SSSE3-NEXT: popq %r12
-; SSE2-SSSE3-NEXT: popq %r13
-; SSE2-SSSE3-NEXT: popq %r14
-; SSE2-SSSE3-NEXT: popq %r15
-; SSE2-SSSE3-NEXT: popq %rbp
+; SSE2-SSSE3-NEXT: movq %rdi, %xmm3
+; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
+; SSE2-SSSE3-NEXT: pshuflw {{.*#+}} xmm0 = xmm3[0,0,1,1,4,5,6,7]
+; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,1,1]
+; SSE2-SSSE3-NEXT: movdqa {{.*#+}} xmm4 = [1,2,4,8,16,32,64,128,1,2,4,8,16,32,64,128]
+; SSE2-SSSE3-NEXT: pand %xmm4, %xmm0
+; SSE2-SSSE3-NEXT: pcmpeqb %xmm4, %xmm0
+; SSE2-SSSE3-NEXT: pshuflw {{.*#+}} xmm1 = xmm3[2,2,3,3,4,5,6,7]
+; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,0,1,1]
+; SSE2-SSSE3-NEXT: pand %xmm4, %xmm1
+; SSE2-SSSE3-NEXT: pcmpeqb %xmm4, %xmm1
+; SSE2-SSSE3-NEXT: pshufhw {{.*#+}} xmm2 = xmm3[0,1,2,3,4,4,5,5]
+; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm2 = xmm2[2,2,3,3]
+; SSE2-SSSE3-NEXT: pand %xmm4, %xmm2
+; SSE2-SSSE3-NEXT: pcmpeqb %xmm4, %xmm2
+; SSE2-SSSE3-NEXT: pshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,6,6,7,7]
+; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm3 = xmm3[2,2,3,3]
+; SSE2-SSSE3-NEXT: pand %xmm4, %xmm3
+; SSE2-SSSE3-NEXT: pcmpeqb %xmm4, %xmm3
; SSE2-SSSE3-NEXT: retq
;
; AVX1-LABEL: ext_i64_64i8:
; AVX1: # BB#0:
-; AVX1-NEXT: pushq %rbp
-; AVX1-NEXT: .Lcfi28:
-; AVX1-NEXT: .cfi_def_cfa_offset 16
-; AVX1-NEXT: .Lcfi29:
-; AVX1-NEXT: .cfi_offset %rbp, -16
-; AVX1-NEXT: movq %rsp, %rbp
-; AVX1-NEXT: .Lcfi30:
-; AVX1-NEXT: .cfi_def_cfa_register %rbp
-; AVX1-NEXT: pushq %r15
-; AVX1-NEXT: pushq %r14
-; AVX1-NEXT: pushq %r13
-; AVX1-NEXT: pushq %r12
-; AVX1-NEXT: pushq %rbx
-; AVX1-NEXT: andq $-32, %rsp
-; AVX1-NEXT: subq $128, %rsp
-; AVX1-NEXT: .Lcfi31:
-; AVX1-NEXT: .cfi_offset %rbx, -56
-; AVX1-NEXT: .Lcfi32:
-; AVX1-NEXT: .cfi_offset %r12, -48
-; AVX1-NEXT: .Lcfi33:
-; AVX1-NEXT: .cfi_offset %r13, -40
-; AVX1-NEXT: .Lcfi34:
-; AVX1-NEXT: .cfi_offset %r14, -32
-; AVX1-NEXT: .Lcfi35:
-; AVX1-NEXT: .cfi_offset %r15, -24
-; AVX1-NEXT: movl %edi, {{[0-9]+}}(%rsp)
-; AVX1-NEXT: shrq $32, %rdi
-; AVX1-NEXT: movl %edi, {{[0-9]+}}(%rsp)
-; AVX1-NEXT: movslq {{[0-9]+}}(%rsp), %rdx
-; AVX1-NEXT: movq %rdx, %rcx
-; AVX1-NEXT: shlq $47, %rcx
-; AVX1-NEXT: sarq $63, %rcx
-; AVX1-NEXT: vmovd %ecx, %xmm0
-; AVX1-NEXT: movq %rdx, {{[0-9]+}}(%rsp) # 8-byte Spill
-; AVX1-NEXT: movq %rdx, %r8
-; AVX1-NEXT: movq %rdx, %rcx
-; AVX1-NEXT: movq %rdx, %rdi
-; AVX1-NEXT: movq %rdx, %r13
-; AVX1-NEXT: movq %rdx, %rsi
-; AVX1-NEXT: movq %rdx, %r10
-; AVX1-NEXT: movq %rdx, %r11
-; AVX1-NEXT: movq %rdx, %r9
-; AVX1-NEXT: movq %rdx, %rbx
-; AVX1-NEXT: movq %rdx, %r14
-; AVX1-NEXT: movq %rdx, %r15
-; AVX1-NEXT: movq %rdx, %r12
-; AVX1-NEXT: movq %rdx, %rax
-; AVX1-NEXT: shlq $46, %rax
-; AVX1-NEXT: sarq $63, %rax
-; AVX1-NEXT: vpinsrb $1, %eax, %xmm0, %xmm0
-; AVX1-NEXT: movq %rdx, {{[0-9]+}}(%rsp) # 8-byte Spill
-; AVX1-NEXT: movq {{[0-9]+}}(%rsp), %rax # 8-byte Reload
-; AVX1-NEXT: shlq $45, %rax
-; AVX1-NEXT: sarq $63, %rax
-; AVX1-NEXT: vpinsrb $2, %eax, %xmm0, %xmm0
-; AVX1-NEXT: movq %rdx, {{[0-9]+}}(%rsp) # 8-byte Spill
-; AVX1-NEXT: shlq $44, %r8
-; AVX1-NEXT: sarq $63, %r8
-; AVX1-NEXT: vpinsrb $3, %r8d, %xmm0, %xmm0
-; AVX1-NEXT: movq %rdx, %r8
-; AVX1-NEXT: shlq $43, %rcx
-; AVX1-NEXT: sarq $63, %rcx
-; AVX1-NEXT: vpinsrb $4, %ecx, %xmm0, %xmm0
-; AVX1-NEXT: movq %rdx, %rcx
-; AVX1-NEXT: shlq $42, %rdi
-; AVX1-NEXT: sarq $63, %rdi
-; AVX1-NEXT: vpinsrb $5, %edi, %xmm0, %xmm0
-; AVX1-NEXT: movq %rdx, %rdi
-; AVX1-NEXT: shlq $41, %r13
-; AVX1-NEXT: sarq $63, %r13
-; AVX1-NEXT: vpinsrb $6, %r13d, %xmm0, %xmm0
-; AVX1-NEXT: movq %rdx, %r13
-; AVX1-NEXT: shlq $40, %rsi
-; AVX1-NEXT: sarq $63, %rsi
-; AVX1-NEXT: vpinsrb $7, %esi, %xmm0, %xmm0
-; AVX1-NEXT: movq %rdx, %rsi
-; AVX1-NEXT: shlq $39, %r10
-; AVX1-NEXT: sarq $63, %r10
-; AVX1-NEXT: vpinsrb $8, %r10d, %xmm0, %xmm0
-; AVX1-NEXT: movq %rdx, %r10
-; AVX1-NEXT: shlq $38, %r11
-; AVX1-NEXT: sarq $63, %r11
-; AVX1-NEXT: vpinsrb $9, %r11d, %xmm0, %xmm0
-; AVX1-NEXT: movsbq %dl, %rax
-; AVX1-NEXT: movq %rax, {{[0-9]+}}(%rsp) # 8-byte Spill
-; AVX1-NEXT: shlq $37, %r9
-; AVX1-NEXT: sarq $63, %r9
-; AVX1-NEXT: vpinsrb $10, %r9d, %xmm0, %xmm0
-; AVX1-NEXT: movq %rdx, %r9
-; AVX1-NEXT: shlq $36, %rbx
-; AVX1-NEXT: sarq $63, %rbx
-; AVX1-NEXT: vpinsrb $11, %ebx, %xmm0, %xmm0
-; AVX1-NEXT: movq %rdx, %rbx
-; AVX1-NEXT: shlq $35, %r14
-; AVX1-NEXT: sarq $63, %r14
-; AVX1-NEXT: vpinsrb $12, %r14d, %xmm0, %xmm0
-; AVX1-NEXT: movq %rdx, %r14
-; AVX1-NEXT: shlq $34, %r15
-; AVX1-NEXT: sarq $63, %r15
-; AVX1-NEXT: vpinsrb $13, %r15d, %xmm0, %xmm0
-; AVX1-NEXT: movq %rdx, %r15
-; AVX1-NEXT: shlq $33, %r12
-; AVX1-NEXT: sarq $63, %r12
-; AVX1-NEXT: vpinsrb $14, %r12d, %xmm0, %xmm0
-; AVX1-NEXT: movq %rdx, %r12
-; AVX1-NEXT: movq {{[0-9]+}}(%rsp), %rax # 8-byte Reload
-; AVX1-NEXT: shrq $31, %rax
-; AVX1-NEXT: vpinsrb $15, %eax, %xmm0, %xmm0
-; AVX1-NEXT: movq %rdx, %rax
-; AVX1-NEXT: shlq $63, %r8
-; AVX1-NEXT: sarq $63, %r8
-; AVX1-NEXT: vmovd %r8d, %xmm1
-; AVX1-NEXT: movq %rdx, %r8
-; AVX1-NEXT: movswq %dx, %rdx
-; AVX1-NEXT: movq {{[0-9]+}}(%rsp), %r11 # 8-byte Reload
-; AVX1-NEXT: shlq $62, %r11
-; AVX1-NEXT: sarq $63, %r11
-; AVX1-NEXT: vpinsrb $1, %r11d, %xmm1, %xmm1
-; AVX1-NEXT: shlq $61, %rcx
-; AVX1-NEXT: sarq $63, %rcx
-; AVX1-NEXT: vpinsrb $2, %ecx, %xmm1, %xmm1
-; AVX1-NEXT: shlq $60, %rdi
-; AVX1-NEXT: sarq $63, %rdi
-; AVX1-NEXT: vpinsrb $3, %edi, %xmm1, %xmm1
-; AVX1-NEXT: shlq $59, %r13
-; AVX1-NEXT: sarq $63, %r13
-; AVX1-NEXT: vpinsrb $4, %r13d, %xmm1, %xmm1
-; AVX1-NEXT: shlq $58, %rsi
-; AVX1-NEXT: sarq $63, %rsi
-; AVX1-NEXT: vpinsrb $5, %esi, %xmm1, %xmm1
-; AVX1-NEXT: shlq $57, %r10
-; AVX1-NEXT: sarq $63, %r10
-; AVX1-NEXT: vpinsrb $6, %r10d, %xmm1, %xmm1
-; AVX1-NEXT: movq {{[0-9]+}}(%rsp), %rcx # 8-byte Reload
-; AVX1-NEXT: shrq $7, %rcx
-; AVX1-NEXT: vpinsrb $7, %ecx, %xmm1, %xmm1
-; AVX1-NEXT: shlq $55, %r9
-; AVX1-NEXT: sarq $63, %r9
-; AVX1-NEXT: vpinsrb $8, %r9d, %xmm1, %xmm1
-; AVX1-NEXT: shlq $54, %rbx
-; AVX1-NEXT: sarq $63, %rbx
-; AVX1-NEXT: vpinsrb $9, %ebx, %xmm1, %xmm1
-; AVX1-NEXT: shlq $53, %r14
-; AVX1-NEXT: sarq $63, %r14
-; AVX1-NEXT: vpinsrb $10, %r14d, %xmm1, %xmm1
-; AVX1-NEXT: shlq $52, %r15
-; AVX1-NEXT: sarq $63, %r15
-; AVX1-NEXT: vpinsrb $11, %r15d, %xmm1, %xmm1
-; AVX1-NEXT: shlq $51, %r12
-; AVX1-NEXT: sarq $63, %r12
-; AVX1-NEXT: vpinsrb $12, %r12d, %xmm1, %xmm1
-; AVX1-NEXT: shlq $50, %rax
-; AVX1-NEXT: sarq $63, %rax
-; AVX1-NEXT: vpinsrb $13, %eax, %xmm1, %xmm1
-; AVX1-NEXT: shlq $49, %r8
-; AVX1-NEXT: sarq $63, %r8
-; AVX1-NEXT: vpinsrb $14, %r8d, %xmm1, %xmm1
-; AVX1-NEXT: shrq $15, %rdx
-; AVX1-NEXT: vpinsrb $15, %edx, %xmm1, %xmm1
-; AVX1-NEXT: movslq {{[0-9]+}}(%rsp), %rdx
-; AVX1-NEXT: movq %rdx, %rcx
-; AVX1-NEXT: shlq $47, %rcx
-; AVX1-NEXT: sarq $63, %rcx
-; AVX1-NEXT: vmovd %ecx, %xmm2
-; AVX1-NEXT: movq %rdx, %r13
-; AVX1-NEXT: movq %rdx, %rcx
-; AVX1-NEXT: movq %rdx, %r9
-; AVX1-NEXT: movq %rdx, %r12
-; AVX1-NEXT: movq %rdx, %rdi
-; AVX1-NEXT: movq %rdx, %rbx
-; AVX1-NEXT: movq %rdx, %r8
-; AVX1-NEXT: movq %rdx, %r10
-; AVX1-NEXT: movq %rdx, {{[0-9]+}}(%rsp) # 8-byte Spill
-; AVX1-NEXT: movq %rdx, %rsi
-; AVX1-NEXT: movq %rdx, %r11
-; AVX1-NEXT: movq %rdx, %r14
-; AVX1-NEXT: movq %rdx, %r15
-; AVX1-NEXT: movq %rdx, %rax
-; AVX1-NEXT: shlq $46, %rax
-; AVX1-NEXT: sarq $63, %rax
-; AVX1-NEXT: vpinsrb $1, %eax, %xmm2, %xmm2
-; AVX1-NEXT: movq %rdx, {{[0-9]+}}(%rsp) # 8-byte Spill
-; AVX1-NEXT: shlq $45, %r13
-; AVX1-NEXT: sarq $63, %r13
-; AVX1-NEXT: vpinsrb $2, %r13d, %xmm2, %xmm2
-; AVX1-NEXT: movq %rdx, %r13
-; AVX1-NEXT: shlq $44, %rcx
-; AVX1-NEXT: sarq $63, %rcx
-; AVX1-NEXT: vpinsrb $3, %ecx, %xmm2, %xmm2
-; AVX1-NEXT: movq %rdx, %rcx
-; AVX1-NEXT: shlq $43, %r9
-; AVX1-NEXT: sarq $63, %r9
-; AVX1-NEXT: vpinsrb $4, %r9d, %xmm2, %xmm2
-; AVX1-NEXT: movq %rdx, %r9
-; AVX1-NEXT: shlq $42, %r12
-; AVX1-NEXT: sarq $63, %r12
-; AVX1-NEXT: vpinsrb $5, %r12d, %xmm2, %xmm2
-; AVX1-NEXT: movq %rdx, %r12
-; AVX1-NEXT: shlq $41, %rdi
-; AVX1-NEXT: sarq $63, %rdi
-; AVX1-NEXT: vpinsrb $6, %edi, %xmm2, %xmm2
-; AVX1-NEXT: movq %rdx, %rdi
-; AVX1-NEXT: shlq $40, %rbx
-; AVX1-NEXT: sarq $63, %rbx
-; AVX1-NEXT: vpinsrb $7, %ebx, %xmm2, %xmm2
-; AVX1-NEXT: movq %rdx, %rbx
-; AVX1-NEXT: shlq $39, %r8
-; AVX1-NEXT: sarq $63, %r8
-; AVX1-NEXT: vpinsrb $8, %r8d, %xmm2, %xmm2
-; AVX1-NEXT: movq %rdx, %r8
-; AVX1-NEXT: shlq $38, %r10
-; AVX1-NEXT: sarq $63, %r10
-; AVX1-NEXT: vpinsrb $9, %r10d, %xmm2, %xmm2
-; AVX1-NEXT: movsbq %dl, %rax
-; AVX1-NEXT: movq %rax, {{[0-9]+}}(%rsp) # 8-byte Spill
-; AVX1-NEXT: movq {{[0-9]+}}(%rsp), %rax # 8-byte Reload
-; AVX1-NEXT: shlq $37, %rax
-; AVX1-NEXT: sarq $63, %rax
-; AVX1-NEXT: vpinsrb $10, %eax, %xmm2, %xmm2
-; AVX1-NEXT: movq %rdx, %r10
-; AVX1-NEXT: shlq $36, %rsi
-; AVX1-NEXT: sarq $63, %rsi
-; AVX1-NEXT: vpinsrb $11, %esi, %xmm2, %xmm2
-; AVX1-NEXT: movq %rdx, %rsi
-; AVX1-NEXT: shlq $35, %r11
-; AVX1-NEXT: sarq $63, %r11
-; AVX1-NEXT: vpinsrb $12, %r11d, %xmm2, %xmm2
-; AVX1-NEXT: movq %rdx, %r11
-; AVX1-NEXT: shlq $34, %r14
-; AVX1-NEXT: sarq $63, %r14
-; AVX1-NEXT: vpinsrb $13, %r14d, %xmm2, %xmm2
-; AVX1-NEXT: movq %rdx, %r14
-; AVX1-NEXT: shlq $33, %r15
-; AVX1-NEXT: sarq $63, %r15
-; AVX1-NEXT: vpinsrb $14, %r15d, %xmm2, %xmm2
-; AVX1-NEXT: movq %rdx, %r15
-; AVX1-NEXT: movq {{[0-9]+}}(%rsp), %rax # 8-byte Reload
-; AVX1-NEXT: shrq $31, %rax
-; AVX1-NEXT: vpinsrb $15, %eax, %xmm2, %xmm2
-; AVX1-NEXT: movq %rdx, %rax
-; AVX1-NEXT: shlq $63, %rcx
-; AVX1-NEXT: sarq $63, %rcx
-; AVX1-NEXT: vmovd %ecx, %xmm3
-; AVX1-NEXT: movq %rdx, %rcx
-; AVX1-NEXT: movswq %dx, %rdx
-; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
-; AVX1-NEXT: shlq $62, %r13
-; AVX1-NEXT: sarq $63, %r13
-; AVX1-NEXT: vpinsrb $1, %r13d, %xmm3, %xmm1
-; AVX1-NEXT: shlq $61, %r9
-; AVX1-NEXT: sarq $63, %r9
-; AVX1-NEXT: vpinsrb $2, %r9d, %xmm1, %xmm1
-; AVX1-NEXT: shlq $60, %r12
-; AVX1-NEXT: sarq $63, %r12
-; AVX1-NEXT: vpinsrb $3, %r12d, %xmm1, %xmm1
-; AVX1-NEXT: shlq $59, %rdi
-; AVX1-NEXT: sarq $63, %rdi
-; AVX1-NEXT: vpinsrb $4, %edi, %xmm1, %xmm1
-; AVX1-NEXT: shlq $58, %rbx
-; AVX1-NEXT: sarq $63, %rbx
-; AVX1-NEXT: vpinsrb $5, %ebx, %xmm1, %xmm1
-; AVX1-NEXT: shlq $57, %r8
-; AVX1-NEXT: sarq $63, %r8
-; AVX1-NEXT: vpinsrb $6, %r8d, %xmm1, %xmm1
-; AVX1-NEXT: movq {{[0-9]+}}(%rsp), %rdi # 8-byte Reload
-; AVX1-NEXT: shrq $7, %rdi
-; AVX1-NEXT: vpinsrb $7, %edi, %xmm1, %xmm1
-; AVX1-NEXT: shlq $55, %r10
-; AVX1-NEXT: sarq $63, %r10
-; AVX1-NEXT: vpinsrb $8, %r10d, %xmm1, %xmm1
-; AVX1-NEXT: shlq $54, %rsi
-; AVX1-NEXT: sarq $63, %rsi
-; AVX1-NEXT: vpinsrb $9, %esi, %xmm1, %xmm1
-; AVX1-NEXT: shlq $53, %r11
-; AVX1-NEXT: sarq $63, %r11
-; AVX1-NEXT: vpinsrb $10, %r11d, %xmm1, %xmm1
-; AVX1-NEXT: shlq $52, %r14
-; AVX1-NEXT: sarq $63, %r14
-; AVX1-NEXT: vpinsrb $11, %r14d, %xmm1, %xmm1
-; AVX1-NEXT: shlq $51, %r15
-; AVX1-NEXT: sarq $63, %r15
-; AVX1-NEXT: vpinsrb $12, %r15d, %xmm1, %xmm1
-; AVX1-NEXT: shlq $50, %rax
-; AVX1-NEXT: sarq $63, %rax
-; AVX1-NEXT: vpinsrb $13, %eax, %xmm1, %xmm1
-; AVX1-NEXT: shlq $49, %rcx
-; AVX1-NEXT: sarq $63, %rcx
-; AVX1-NEXT: vpinsrb $14, %ecx, %xmm1, %xmm1
-; AVX1-NEXT: shrq $15, %rdx
-; AVX1-NEXT: vpinsrb $15, %edx, %xmm1, %xmm1
+; AVX1-NEXT: vmovq %rdi, %xmm0
+; AVX1-NEXT: vpunpcklbw {{.*#+}} xmm1 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
+; AVX1-NEXT: vpshuflw {{.*#+}} xmm0 = xmm1[0,0,1,1,4,5,6,7]
+; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,0,1,1]
+; AVX1-NEXT: vpshuflw {{.*#+}} xmm2 = xmm1[2,2,3,3,4,5,6,7]
+; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm2[0,0,1,1]
+; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; AVX1-NEXT: vmovaps {{.*#+}} ymm2 = [1,2,4,8,16,32,64,128,1,2,4,8,16,32,64,128,1,2,4,8,16,32,64,128,1,2,4,8,16,32,64,128]
+; AVX1-NEXT: vandps %ymm2, %ymm0, %ymm0
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
+; AVX1-NEXT: vpxor %xmm4, %xmm4, %xmm4
+; AVX1-NEXT: vpcmpeqb %xmm4, %xmm3, %xmm3
+; AVX1-NEXT: vpcmpeqd %xmm5, %xmm5, %xmm5
+; AVX1-NEXT: vpxor %xmm5, %xmm3, %xmm3
+; AVX1-NEXT: vpcmpeqb %xmm4, %xmm0, %xmm0
+; AVX1-NEXT: vpxor %xmm5, %xmm0, %xmm0
+; AVX1-NEXT: vinsertf128 $1, %xmm3, %ymm0, %ymm0
+; AVX1-NEXT: vpshufhw {{.*#+}} xmm3 = xmm1[0,1,2,3,4,4,5,5]
+; AVX1-NEXT: vpshufd {{.*#+}} xmm3 = xmm3[2,2,3,3]
+; AVX1-NEXT: vpshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,6,6,7,7]
+; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[2,2,3,3]
+; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm3, %ymm1
+; AVX1-NEXT: vandps %ymm2, %ymm1, %ymm1
+; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
+; AVX1-NEXT: vpcmpeqb %xmm4, %xmm2, %xmm2
+; AVX1-NEXT: vpxor %xmm5, %xmm2, %xmm2
+; AVX1-NEXT: vpcmpeqb %xmm4, %xmm1, %xmm1
+; AVX1-NEXT: vpxor %xmm5, %xmm1, %xmm1
; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm1, %ymm1
-; AVX1-NEXT: leaq -40(%rbp), %rsp
-; AVX1-NEXT: popq %rbx
-; AVX1-NEXT: popq %r12
-; AVX1-NEXT: popq %r13
-; AVX1-NEXT: popq %r14
-; AVX1-NEXT: popq %r15
-; AVX1-NEXT: popq %rbp
; AVX1-NEXT: retq
;
; AVX2-LABEL: ext_i64_64i8:
; AVX2: # BB#0:
-; AVX2-NEXT: pushq %rbp
-; AVX2-NEXT: .Lcfi28:
-; AVX2-NEXT: .cfi_def_cfa_offset 16
-; AVX2-NEXT: .Lcfi29:
-; AVX2-NEXT: .cfi_offset %rbp, -16
-; AVX2-NEXT: movq %rsp, %rbp
-; AVX2-NEXT: .Lcfi30:
-; AVX2-NEXT: .cfi_def_cfa_register %rbp
-; AVX2-NEXT: pushq %r15
-; AVX2-NEXT: pushq %r14
-; AVX2-NEXT: pushq %r13
-; AVX2-NEXT: pushq %r12
-; AVX2-NEXT: pushq %rbx
-; AVX2-NEXT: andq $-32, %rsp
-; AVX2-NEXT: subq $128, %rsp
-; AVX2-NEXT: .Lcfi31:
-; AVX2-NEXT: .cfi_offset %rbx, -56
-; AVX2-NEXT: .Lcfi32:
-; AVX2-NEXT: .cfi_offset %r12, -48
-; AVX2-NEXT: .Lcfi33:
-; AVX2-NEXT: .cfi_offset %r13, -40
-; AVX2-NEXT: .Lcfi34:
-; AVX2-NEXT: .cfi_offset %r14, -32
-; AVX2-NEXT: .Lcfi35:
-; AVX2-NEXT: .cfi_offset %r15, -24
-; AVX2-NEXT: movl %edi, {{[0-9]+}}(%rsp)
-; AVX2-NEXT: shrq $32, %rdi
-; AVX2-NEXT: movl %edi, {{[0-9]+}}(%rsp)
-; AVX2-NEXT: movslq {{[0-9]+}}(%rsp), %rdx
-; AVX2-NEXT: movq %rdx, %rcx
-; AVX2-NEXT: shlq $47, %rcx
-; AVX2-NEXT: sarq $63, %rcx
-; AVX2-NEXT: vmovd %ecx, %xmm0
-; AVX2-NEXT: movq %rdx, {{[0-9]+}}(%rsp) # 8-byte Spill
-; AVX2-NEXT: movq %rdx, %r8
-; AVX2-NEXT: movq %rdx, %rcx
-; AVX2-NEXT: movq %rdx, %rdi
-; AVX2-NEXT: movq %rdx, %r13
-; AVX2-NEXT: movq %rdx, %rsi
-; AVX2-NEXT: movq %rdx, %r10
-; AVX2-NEXT: movq %rdx, %r11
-; AVX2-NEXT: movq %rdx, %r9
-; AVX2-NEXT: movq %rdx, %rbx
-; AVX2-NEXT: movq %rdx, %r14
-; AVX2-NEXT: movq %rdx, %r15
-; AVX2-NEXT: movq %rdx, %r12
-; AVX2-NEXT: movq %rdx, %rax
-; AVX2-NEXT: shlq $46, %rax
-; AVX2-NEXT: sarq $63, %rax
-; AVX2-NEXT: vpinsrb $1, %eax, %xmm0, %xmm0
-; AVX2-NEXT: movq %rdx, {{[0-9]+}}(%rsp) # 8-byte Spill
-; AVX2-NEXT: movq {{[0-9]+}}(%rsp), %rax # 8-byte Reload
-; AVX2-NEXT: shlq $45, %rax
-; AVX2-NEXT: sarq $63, %rax
-; AVX2-NEXT: vpinsrb $2, %eax, %xmm0, %xmm0
-; AVX2-NEXT: movq %rdx, {{[0-9]+}}(%rsp) # 8-byte Spill
-; AVX2-NEXT: shlq $44, %r8
-; AVX2-NEXT: sarq $63, %r8
-; AVX2-NEXT: vpinsrb $3, %r8d, %xmm0, %xmm0
-; AVX2-NEXT: movq %rdx, %r8
-; AVX2-NEXT: shlq $43, %rcx
-; AVX2-NEXT: sarq $63, %rcx
-; AVX2-NEXT: vpinsrb $4, %ecx, %xmm0, %xmm0
-; AVX2-NEXT: movq %rdx, %rcx
-; AVX2-NEXT: shlq $42, %rdi
-; AVX2-NEXT: sarq $63, %rdi
-; AVX2-NEXT: vpinsrb $5, %edi, %xmm0, %xmm0
-; AVX2-NEXT: movq %rdx, %rdi
-; AVX2-NEXT: shlq $41, %r13
-; AVX2-NEXT: sarq $63, %r13
-; AVX2-NEXT: vpinsrb $6, %r13d, %xmm0, %xmm0
-; AVX2-NEXT: movq %rdx, %r13
-; AVX2-NEXT: shlq $40, %rsi
-; AVX2-NEXT: sarq $63, %rsi
-; AVX2-NEXT: vpinsrb $7, %esi, %xmm0, %xmm0
-; AVX2-NEXT: movq %rdx, %rsi
-; AVX2-NEXT: shlq $39, %r10
-; AVX2-NEXT: sarq $63, %r10
-; AVX2-NEXT: vpinsrb $8, %r10d, %xmm0, %xmm0
-; AVX2-NEXT: movq %rdx, %r10
-; AVX2-NEXT: shlq $38, %r11
-; AVX2-NEXT: sarq $63, %r11
-; AVX2-NEXT: vpinsrb $9, %r11d, %xmm0, %xmm0
-; AVX2-NEXT: movsbq %dl, %rax
-; AVX2-NEXT: movq %rax, {{[0-9]+}}(%rsp) # 8-byte Spill
-; AVX2-NEXT: shlq $37, %r9
-; AVX2-NEXT: sarq $63, %r9
-; AVX2-NEXT: vpinsrb $10, %r9d, %xmm0, %xmm0
-; AVX2-NEXT: movq %rdx, %r9
-; AVX2-NEXT: shlq $36, %rbx
-; AVX2-NEXT: sarq $63, %rbx
-; AVX2-NEXT: vpinsrb $11, %ebx, %xmm0, %xmm0
-; AVX2-NEXT: movq %rdx, %rbx
-; AVX2-NEXT: shlq $35, %r14
-; AVX2-NEXT: sarq $63, %r14
-; AVX2-NEXT: vpinsrb $12, %r14d, %xmm0, %xmm0
-; AVX2-NEXT: movq %rdx, %r14
-; AVX2-NEXT: shlq $34, %r15
-; AVX2-NEXT: sarq $63, %r15
-; AVX2-NEXT: vpinsrb $13, %r15d, %xmm0, %xmm0
-; AVX2-NEXT: movq %rdx, %r15
-; AVX2-NEXT: shlq $33, %r12
-; AVX2-NEXT: sarq $63, %r12
-; AVX2-NEXT: vpinsrb $14, %r12d, %xmm0, %xmm0
-; AVX2-NEXT: movq %rdx, %r12
-; AVX2-NEXT: movq {{[0-9]+}}(%rsp), %rax # 8-byte Reload
-; AVX2-NEXT: shrq $31, %rax
-; AVX2-NEXT: vpinsrb $15, %eax, %xmm0, %xmm0
-; AVX2-NEXT: movq %rdx, %rax
-; AVX2-NEXT: shlq $63, %r8
-; AVX2-NEXT: sarq $63, %r8
-; AVX2-NEXT: vmovd %r8d, %xmm1
-; AVX2-NEXT: movq %rdx, %r8
-; AVX2-NEXT: movswq %dx, %rdx
-; AVX2-NEXT: movq {{[0-9]+}}(%rsp), %r11 # 8-byte Reload
-; AVX2-NEXT: shlq $62, %r11
-; AVX2-NEXT: sarq $63, %r11
-; AVX2-NEXT: vpinsrb $1, %r11d, %xmm1, %xmm1
-; AVX2-NEXT: shlq $61, %rcx
-; AVX2-NEXT: sarq $63, %rcx
-; AVX2-NEXT: vpinsrb $2, %ecx, %xmm1, %xmm1
-; AVX2-NEXT: shlq $60, %rdi
-; AVX2-NEXT: sarq $63, %rdi
-; AVX2-NEXT: vpinsrb $3, %edi, %xmm1, %xmm1
-; AVX2-NEXT: shlq $59, %r13
-; AVX2-NEXT: sarq $63, %r13
-; AVX2-NEXT: vpinsrb $4, %r13d, %xmm1, %xmm1
-; AVX2-NEXT: shlq $58, %rsi
-; AVX2-NEXT: sarq $63, %rsi
-; AVX2-NEXT: vpinsrb $5, %esi, %xmm1, %xmm1
-; AVX2-NEXT: shlq $57, %r10
-; AVX2-NEXT: sarq $63, %r10
-; AVX2-NEXT: vpinsrb $6, %r10d, %xmm1, %xmm1
-; AVX2-NEXT: movq {{[0-9]+}}(%rsp), %rcx # 8-byte Reload
-; AVX2-NEXT: shrq $7, %rcx
-; AVX2-NEXT: vpinsrb $7, %ecx, %xmm1, %xmm1
-; AVX2-NEXT: shlq $55, %r9
-; AVX2-NEXT: sarq $63, %r9
-; AVX2-NEXT: vpinsrb $8, %r9d, %xmm1, %xmm1
-; AVX2-NEXT: shlq $54, %rbx
-; AVX2-NEXT: sarq $63, %rbx
-; AVX2-NEXT: vpinsrb $9, %ebx, %xmm1, %xmm1
-; AVX2-NEXT: shlq $53, %r14
-; AVX2-NEXT: sarq $63, %r14
-; AVX2-NEXT: vpinsrb $10, %r14d, %xmm1, %xmm1
-; AVX2-NEXT: shlq $52, %r15
-; AVX2-NEXT: sarq $63, %r15
-; AVX2-NEXT: vpinsrb $11, %r15d, %xmm1, %xmm1
-; AVX2-NEXT: shlq $51, %r12
-; AVX2-NEXT: sarq $63, %r12
-; AVX2-NEXT: vpinsrb $12, %r12d, %xmm1, %xmm1
-; AVX2-NEXT: shlq $50, %rax
-; AVX2-NEXT: sarq $63, %rax
-; AVX2-NEXT: vpinsrb $13, %eax, %xmm1, %xmm1
-; AVX2-NEXT: shlq $49, %r8
-; AVX2-NEXT: sarq $63, %r8
-; AVX2-NEXT: vpinsrb $14, %r8d, %xmm1, %xmm1
-; AVX2-NEXT: shrq $15, %rdx
-; AVX2-NEXT: vpinsrb $15, %edx, %xmm1, %xmm1
-; AVX2-NEXT: movslq {{[0-9]+}}(%rsp), %rdx
-; AVX2-NEXT: movq %rdx, %rcx
-; AVX2-NEXT: shlq $47, %rcx
-; AVX2-NEXT: sarq $63, %rcx
-; AVX2-NEXT: vmovd %ecx, %xmm2
-; AVX2-NEXT: movq %rdx, %r13
-; AVX2-NEXT: movq %rdx, %rcx
-; AVX2-NEXT: movq %rdx, %r9
-; AVX2-NEXT: movq %rdx, %r12
-; AVX2-NEXT: movq %rdx, %rdi
-; AVX2-NEXT: movq %rdx, %rbx
-; AVX2-NEXT: movq %rdx, %r8
-; AVX2-NEXT: movq %rdx, %r10
-; AVX2-NEXT: movq %rdx, {{[0-9]+}}(%rsp) # 8-byte Spill
-; AVX2-NEXT: movq %rdx, %rsi
-; AVX2-NEXT: movq %rdx, %r11
-; AVX2-NEXT: movq %rdx, %r14
-; AVX2-NEXT: movq %rdx, %r15
-; AVX2-NEXT: movq %rdx, %rax
-; AVX2-NEXT: shlq $46, %rax
-; AVX2-NEXT: sarq $63, %rax
-; AVX2-NEXT: vpinsrb $1, %eax, %xmm2, %xmm2
-; AVX2-NEXT: movq %rdx, {{[0-9]+}}(%rsp) # 8-byte Spill
-; AVX2-NEXT: shlq $45, %r13
-; AVX2-NEXT: sarq $63, %r13
-; AVX2-NEXT: vpinsrb $2, %r13d, %xmm2, %xmm2
-; AVX2-NEXT: movq %rdx, %r13
-; AVX2-NEXT: shlq $44, %rcx
-; AVX2-NEXT: sarq $63, %rcx
-; AVX2-NEXT: vpinsrb $3, %ecx, %xmm2, %xmm2
-; AVX2-NEXT: movq %rdx, %rcx
-; AVX2-NEXT: shlq $43, %r9
-; AVX2-NEXT: sarq $63, %r9
-; AVX2-NEXT: vpinsrb $4, %r9d, %xmm2, %xmm2
-; AVX2-NEXT: movq %rdx, %r9
-; AVX2-NEXT: shlq $42, %r12
-; AVX2-NEXT: sarq $63, %r12
-; AVX2-NEXT: vpinsrb $5, %r12d, %xmm2, %xmm2
-; AVX2-NEXT: movq %rdx, %r12
-; AVX2-NEXT: shlq $41, %rdi
-; AVX2-NEXT: sarq $63, %rdi
-; AVX2-NEXT: vpinsrb $6, %edi, %xmm2, %xmm2
-; AVX2-NEXT: movq %rdx, %rdi
-; AVX2-NEXT: shlq $40, %rbx
-; AVX2-NEXT: sarq $63, %rbx
-; AVX2-NEXT: vpinsrb $7, %ebx, %xmm2, %xmm2
-; AVX2-NEXT: movq %rdx, %rbx
-; AVX2-NEXT: shlq $39, %r8
-; AVX2-NEXT: sarq $63, %r8
-; AVX2-NEXT: vpinsrb $8, %r8d, %xmm2, %xmm2
-; AVX2-NEXT: movq %rdx, %r8
-; AVX2-NEXT: shlq $38, %r10
-; AVX2-NEXT: sarq $63, %r10
-; AVX2-NEXT: vpinsrb $9, %r10d, %xmm2, %xmm2
-; AVX2-NEXT: movsbq %dl, %rax
-; AVX2-NEXT: movq %rax, {{[0-9]+}}(%rsp) # 8-byte Spill
-; AVX2-NEXT: movq {{[0-9]+}}(%rsp), %rax # 8-byte Reload
-; AVX2-NEXT: shlq $37, %rax
-; AVX2-NEXT: sarq $63, %rax
-; AVX2-NEXT: vpinsrb $10, %eax, %xmm2, %xmm2
-; AVX2-NEXT: movq %rdx, %r10
-; AVX2-NEXT: shlq $36, %rsi
-; AVX2-NEXT: sarq $63, %rsi
-; AVX2-NEXT: vpinsrb $11, %esi, %xmm2, %xmm2
-; AVX2-NEXT: movq %rdx, %rsi
-; AVX2-NEXT: shlq $35, %r11
-; AVX2-NEXT: sarq $63, %r11
-; AVX2-NEXT: vpinsrb $12, %r11d, %xmm2, %xmm2
-; AVX2-NEXT: movq %rdx, %r11
-; AVX2-NEXT: shlq $34, %r14
-; AVX2-NEXT: sarq $63, %r14
-; AVX2-NEXT: vpinsrb $13, %r14d, %xmm2, %xmm2
-; AVX2-NEXT: movq %rdx, %r14
-; AVX2-NEXT: shlq $33, %r15
-; AVX2-NEXT: sarq $63, %r15
-; AVX2-NEXT: vpinsrb $14, %r15d, %xmm2, %xmm2
-; AVX2-NEXT: movq %rdx, %r15
-; AVX2-NEXT: movq {{[0-9]+}}(%rsp), %rax # 8-byte Reload
-; AVX2-NEXT: shrq $31, %rax
-; AVX2-NEXT: vpinsrb $15, %eax, %xmm2, %xmm2
-; AVX2-NEXT: movq %rdx, %rax
-; AVX2-NEXT: shlq $63, %rcx
-; AVX2-NEXT: sarq $63, %rcx
-; AVX2-NEXT: vmovd %ecx, %xmm3
-; AVX2-NEXT: movq %rdx, %rcx
-; AVX2-NEXT: movswq %dx, %rdx
-; AVX2-NEXT: vinserti128 $1, %xmm0, %ymm1, %ymm0
-; AVX2-NEXT: shlq $62, %r13
-; AVX2-NEXT: sarq $63, %r13
-; AVX2-NEXT: vpinsrb $1, %r13d, %xmm3, %xmm1
-; AVX2-NEXT: shlq $61, %r9
-; AVX2-NEXT: sarq $63, %r9
-; AVX2-NEXT: vpinsrb $2, %r9d, %xmm1, %xmm1
-; AVX2-NEXT: shlq $60, %r12
-; AVX2-NEXT: sarq $63, %r12
-; AVX2-NEXT: vpinsrb $3, %r12d, %xmm1, %xmm1
-; AVX2-NEXT: shlq $59, %rdi
-; AVX2-NEXT: sarq $63, %rdi
-; AVX2-NEXT: vpinsrb $4, %edi, %xmm1, %xmm1
-; AVX2-NEXT: shlq $58, %rbx
-; AVX2-NEXT: sarq $63, %rbx
-; AVX2-NEXT: vpinsrb $5, %ebx, %xmm1, %xmm1
-; AVX2-NEXT: shlq $57, %r8
-; AVX2-NEXT: sarq $63, %r8
-; AVX2-NEXT: vpinsrb $6, %r8d, %xmm1, %xmm1
-; AVX2-NEXT: movq {{[0-9]+}}(%rsp), %rdi # 8-byte Reload
-; AVX2-NEXT: shrq $7, %rdi
-; AVX2-NEXT: vpinsrb $7, %edi, %xmm1, %xmm1
-; AVX2-NEXT: shlq $55, %r10
-; AVX2-NEXT: sarq $63, %r10
-; AVX2-NEXT: vpinsrb $8, %r10d, %xmm1, %xmm1
-; AVX2-NEXT: shlq $54, %rsi
-; AVX2-NEXT: sarq $63, %rsi
-; AVX2-NEXT: vpinsrb $9, %esi, %xmm1, %xmm1
-; AVX2-NEXT: shlq $53, %r11
-; AVX2-NEXT: sarq $63, %r11
-; AVX2-NEXT: vpinsrb $10, %r11d, %xmm1, %xmm1
-; AVX2-NEXT: shlq $52, %r14
-; AVX2-NEXT: sarq $63, %r14
-; AVX2-NEXT: vpinsrb $11, %r14d, %xmm1, %xmm1
-; AVX2-NEXT: shlq $51, %r15
-; AVX2-NEXT: sarq $63, %r15
-; AVX2-NEXT: vpinsrb $12, %r15d, %xmm1, %xmm1
-; AVX2-NEXT: shlq $50, %rax
-; AVX2-NEXT: sarq $63, %rax
-; AVX2-NEXT: vpinsrb $13, %eax, %xmm1, %xmm1
-; AVX2-NEXT: shlq $49, %rcx
-; AVX2-NEXT: sarq $63, %rcx
-; AVX2-NEXT: vpinsrb $14, %ecx, %xmm1, %xmm1
-; AVX2-NEXT: shrq $15, %rdx
-; AVX2-NEXT: vpinsrb $15, %edx, %xmm1, %xmm1
-; AVX2-NEXT: vinserti128 $1, %xmm2, %ymm1, %ymm1
-; AVX2-NEXT: leaq -40(%rbp), %rsp
-; AVX2-NEXT: popq %rbx
-; AVX2-NEXT: popq %r12
-; AVX2-NEXT: popq %r13
-; AVX2-NEXT: popq %r14
-; AVX2-NEXT: popq %r15
-; AVX2-NEXT: popq %rbp
+; AVX2-NEXT: vmovq %rdi, %xmm0
+; AVX2-NEXT: vpunpcklbw {{.*#+}} xmm1 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
+; AVX2-NEXT: vpshuflw {{.*#+}} xmm0 = xmm1[0,0,1,1,4,5,6,7]
+; AVX2-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,0,1,1]
+; AVX2-NEXT: vpshuflw {{.*#+}} xmm2 = xmm1[2,2,3,3,4,5,6,7]
+; AVX2-NEXT: vpshufd {{.*#+}} xmm2 = xmm2[0,0,1,1]
+; AVX2-NEXT: vinserti128 $1, %xmm2, %ymm0, %ymm0
+; AVX2-NEXT: vpbroadcastq {{.*#+}} ymm2 = [9241421688590303745,9241421688590303745,9241421688590303745,9241421688590303745]
+; AVX2-NEXT: vpand %ymm2, %ymm0, %ymm0
+; AVX2-NEXT: vpcmpeqb %ymm2, %ymm0, %ymm0
+; AVX2-NEXT: vpshufhw {{.*#+}} xmm3 = xmm1[0,1,2,3,4,4,5,5]
+; AVX2-NEXT: vpshufd {{.*#+}} xmm3 = xmm3[2,2,3,3]
+; AVX2-NEXT: vpshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,6,6,7,7]
+; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[2,2,3,3]
+; AVX2-NEXT: vinserti128 $1, %xmm1, %ymm3, %ymm1
+; AVX2-NEXT: vpand %ymm2, %ymm1, %ymm1
+; AVX2-NEXT: vpcmpeqb %ymm2, %ymm1, %ymm1
; AVX2-NEXT: retq
;
; AVX512-LABEL: ext_i64_64i8:
Modified: llvm/trunk/test/CodeGen/X86/bitcast-int-to-vector-bool-zext.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/bitcast-int-to-vector-bool-zext.ll?rev=314076&r1=314075&r2=314076&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/bitcast-int-to-vector-bool-zext.ll (original)
+++ llvm/trunk/test/CodeGen/X86/bitcast-int-to-vector-bool-zext.ll Sun Sep 24 06:42:31 2017
@@ -12,31 +12,38 @@
define <2 x i64> @ext_i2_2i64(i2 %a0) {
; SSE2-SSSE3-LABEL: ext_i2_2i64:
; SSE2-SSSE3: # BB#0:
-; SSE2-SSSE3-NEXT: andb $3, %dil
-; SSE2-SSSE3-NEXT: movb %dil, -{{[0-9]+}}(%rsp)
-; SSE2-SSSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax
-; SSE2-SSSE3-NEXT: movl %eax, %ecx
-; SSE2-SSSE3-NEXT: andl $1, %ecx
-; SSE2-SSSE3-NEXT: movq %rcx, %xmm0
-; SSE2-SSSE3-NEXT: shrl %eax
-; SSE2-SSSE3-NEXT: andl $1, %eax
-; SSE2-SSSE3-NEXT: movq %rax, %xmm1
-; SSE2-SSSE3-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; SSE2-SSSE3-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; SSE2-SSSE3-NEXT: movq %rdi, %xmm0
+; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,1,0,1]
+; SSE2-SSSE3-NEXT: movdqa {{.*#+}} xmm0 = [1,2]
+; SSE2-SSSE3-NEXT: pand %xmm0, %xmm1
+; SSE2-SSSE3-NEXT: pcmpeqd %xmm0, %xmm1
+; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,0,3,2]
+; SSE2-SSSE3-NEXT: pand %xmm1, %xmm0
+; SSE2-SSSE3-NEXT: psrlq $63, %xmm0
; SSE2-SSSE3-NEXT: retq
;
-; AVX12-LABEL: ext_i2_2i64:
-; AVX12: # BB#0:
-; AVX12-NEXT: andb $3, %dil
-; AVX12-NEXT: movb %dil, -{{[0-9]+}}(%rsp)
-; AVX12-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax
-; AVX12-NEXT: movl %eax, %ecx
-; AVX12-NEXT: andl $1, %ecx
-; AVX12-NEXT: vmovq %rcx, %xmm0
-; AVX12-NEXT: shrl %eax
-; AVX12-NEXT: andl $1, %eax
-; AVX12-NEXT: vmovq %rax, %xmm1
-; AVX12-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
-; AVX12-NEXT: retq
+; AVX1-LABEL: ext_i2_2i64:
+; AVX1: # BB#0:
+; AVX1-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; AVX1-NEXT: vmovq %rdi, %xmm0
+; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,1,0,1]
+; AVX1-NEXT: vmovdqa {{.*#+}} xmm1 = [1,2]
+; AVX1-NEXT: vpand %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vpcmpeqq %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vpsrlq $63, %xmm0, %xmm0
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: ext_i2_2i64:
+; AVX2: # BB#0:
+; AVX2-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; AVX2-NEXT: vmovq %rdi, %xmm0
+; AVX2-NEXT: vpbroadcastq %xmm0, %xmm0
+; AVX2-NEXT: vmovdqa {{.*#+}} xmm1 = [1,2]
+; AVX2-NEXT: vpand %xmm1, %xmm0, %xmm0
+; AVX2-NEXT: vpcmpeqq %xmm1, %xmm0, %xmm0
+; AVX2-NEXT: vpsrlq $63, %xmm0, %xmm0
+; AVX2-NEXT: retq
;
; AVX512-LABEL: ext_i2_2i64:
; AVX512: # BB#0:
@@ -56,57 +63,32 @@ define <2 x i64> @ext_i2_2i64(i2 %a0) {
define <4 x i32> @ext_i4_4i32(i4 %a0) {
; SSE2-SSSE3-LABEL: ext_i4_4i32:
; SSE2-SSSE3: # BB#0:
-; SSE2-SSSE3-NEXT: andb $15, %dil
-; SSE2-SSSE3-NEXT: movb %dil, -{{[0-9]+}}(%rsp)
-; SSE2-SSSE3-NEXT: movl -{{[0-9]+}}(%rsp), %eax
-; SSE2-SSSE3-NEXT: movl %eax, %ecx
-; SSE2-SSSE3-NEXT: shrl $3, %ecx
-; SSE2-SSSE3-NEXT: movd %ecx, %xmm0
-; SSE2-SSSE3-NEXT: movl %eax, %ecx
-; SSE2-SSSE3-NEXT: shrl $2, %ecx
-; SSE2-SSSE3-NEXT: movd %ecx, %xmm1
-; SSE2-SSSE3-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
-; SSE2-SSSE3-NEXT: movd %eax, %xmm0
-; SSE2-SSSE3-NEXT: shrl %eax
-; SSE2-SSSE3-NEXT: movd %eax, %xmm2
-; SSE2-SSSE3-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
-; SSE2-SSSE3-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
-; SSE2-SSSE3-NEXT: pand {{.*}}(%rip), %xmm0
+; SSE2-SSSE3-NEXT: movd %edi, %xmm0
+; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,0,0]
+; SSE2-SSSE3-NEXT: movdqa {{.*#+}} xmm1 = [1,2,4,8]
+; SSE2-SSSE3-NEXT: pand %xmm1, %xmm0
+; SSE2-SSSE3-NEXT: pcmpeqd %xmm1, %xmm0
+; SSE2-SSSE3-NEXT: psrld $31, %xmm0
; SSE2-SSSE3-NEXT: retq
;
; AVX1-LABEL: ext_i4_4i32:
; AVX1: # BB#0:
-; AVX1-NEXT: andb $15, %dil
-; AVX1-NEXT: movb %dil, -{{[0-9]+}}(%rsp)
-; AVX1-NEXT: movl -{{[0-9]+}}(%rsp), %eax
-; AVX1-NEXT: movl %eax, %ecx
-; AVX1-NEXT: shrl %ecx
-; AVX1-NEXT: vmovd %eax, %xmm0
-; AVX1-NEXT: vpinsrd $1, %ecx, %xmm0, %xmm0
-; AVX1-NEXT: movl %eax, %ecx
-; AVX1-NEXT: shrl $2, %ecx
-; AVX1-NEXT: vpinsrd $2, %ecx, %xmm0, %xmm0
-; AVX1-NEXT: shrl $3, %eax
-; AVX1-NEXT: vpinsrd $3, %eax, %xmm0, %xmm0
-; AVX1-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0
+; AVX1-NEXT: vmovd %edi, %xmm0
+; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,0,0,0]
+; AVX1-NEXT: vmovdqa {{.*#+}} xmm1 = [1,2,4,8]
+; AVX1-NEXT: vpand %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vpcmpeqd %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vpsrld $31, %xmm0, %xmm0
; AVX1-NEXT: retq
;
; AVX2-LABEL: ext_i4_4i32:
; AVX2: # BB#0:
-; AVX2-NEXT: andb $15, %dil
-; AVX2-NEXT: movb %dil, -{{[0-9]+}}(%rsp)
-; AVX2-NEXT: movl -{{[0-9]+}}(%rsp), %eax
-; AVX2-NEXT: movl %eax, %ecx
-; AVX2-NEXT: shrl %ecx
-; AVX2-NEXT: vmovd %eax, %xmm0
-; AVX2-NEXT: vpinsrd $1, %ecx, %xmm0, %xmm0
-; AVX2-NEXT: movl %eax, %ecx
-; AVX2-NEXT: shrl $2, %ecx
-; AVX2-NEXT: vpinsrd $2, %ecx, %xmm0, %xmm0
-; AVX2-NEXT: shrl $3, %eax
-; AVX2-NEXT: vpinsrd $3, %eax, %xmm0, %xmm0
-; AVX2-NEXT: vpbroadcastd {{.*#+}} xmm1 = [1,1,1,1]
+; AVX2-NEXT: vmovd %edi, %xmm0
+; AVX2-NEXT: vpbroadcastd %xmm0, %xmm0
+; AVX2-NEXT: vmovdqa {{.*#+}} xmm1 = [1,2,4,8]
; AVX2-NEXT: vpand %xmm1, %xmm0, %xmm0
+; AVX2-NEXT: vpcmpeqd %xmm1, %xmm0, %xmm0
+; AVX2-NEXT: vpsrld $31, %xmm0, %xmm0
; AVX2-NEXT: retq
;
; AVX512-LABEL: ext_i4_4i32:
@@ -127,82 +109,35 @@ define <4 x i32> @ext_i4_4i32(i4 %a0) {
define <8 x i16> @ext_i8_8i16(i8 %a0) {
; SSE2-SSSE3-LABEL: ext_i8_8i16:
; SSE2-SSSE3: # BB#0:
-; SSE2-SSSE3-NEXT: movb %dil, -{{[0-9]+}}(%rsp)
-; SSE2-SSSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax
-; SSE2-SSSE3-NEXT: movl %eax, %ecx
-; SSE2-SSSE3-NEXT: shrl $3, %ecx
-; SSE2-SSSE3-NEXT: andl $1, %ecx
-; SSE2-SSSE3-NEXT: movd %ecx, %xmm0
-; SSE2-SSSE3-NEXT: movl %eax, %ecx
-; SSE2-SSSE3-NEXT: shrl $2, %ecx
-; SSE2-SSSE3-NEXT: andl $1, %ecx
-; SSE2-SSSE3-NEXT: movd %ecx, %xmm1
-; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
-; SSE2-SSSE3-NEXT: movl %eax, %ecx
-; SSE2-SSSE3-NEXT: andl $1, %ecx
-; SSE2-SSSE3-NEXT: movd %ecx, %xmm0
-; SSE2-SSSE3-NEXT: movl %eax, %ecx
-; SSE2-SSSE3-NEXT: shrl %ecx
-; SSE2-SSSE3-NEXT: andl $1, %ecx
-; SSE2-SSSE3-NEXT: movd %ecx, %xmm2
-; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3]
-; SSE2-SSSE3-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
-; SSE2-SSSE3-NEXT: movl %eax, %ecx
-; SSE2-SSSE3-NEXT: shrl $5, %ecx
-; SSE2-SSSE3-NEXT: andl $1, %ecx
-; SSE2-SSSE3-NEXT: movd %ecx, %xmm1
-; SSE2-SSSE3-NEXT: movl %eax, %ecx
-; SSE2-SSSE3-NEXT: shrl $4, %ecx
-; SSE2-SSSE3-NEXT: andl $1, %ecx
-; SSE2-SSSE3-NEXT: movd %ecx, %xmm2
-; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3]
-; SSE2-SSSE3-NEXT: movl %eax, %ecx
-; SSE2-SSSE3-NEXT: shrl $6, %ecx
-; SSE2-SSSE3-NEXT: andl $1, %ecx
-; SSE2-SSSE3-NEXT: movd %ecx, %xmm1
-; SSE2-SSSE3-NEXT: shrl $7, %eax
-; SSE2-SSSE3-NEXT: movzwl %ax, %eax
-; SSE2-SSSE3-NEXT: movd %eax, %xmm3
-; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm3[0],xmm1[1],xmm3[1],xmm1[2],xmm3[2],xmm1[3],xmm3[3]
-; SSE2-SSSE3-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1]
-; SSE2-SSSE3-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm2[0]
+; SSE2-SSSE3-NEXT: movd %edi, %xmm0
+; SSE2-SSSE3-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,0,0,0,4,5,6,7]
+; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,1,1]
+; SSE2-SSSE3-NEXT: movdqa {{.*#+}} xmm1 = [1,2,4,8,16,32,64,128]
+; SSE2-SSSE3-NEXT: pand %xmm1, %xmm0
+; SSE2-SSSE3-NEXT: pcmpeqw %xmm1, %xmm0
+; SSE2-SSSE3-NEXT: psrlw $15, %xmm0
; SSE2-SSSE3-NEXT: retq
;
-; AVX12-LABEL: ext_i8_8i16:
-; AVX12: # BB#0:
-; AVX12-NEXT: movb %dil, -{{[0-9]+}}(%rsp)
-; AVX12-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax
-; AVX12-NEXT: movl %eax, %ecx
-; AVX12-NEXT: shrl %ecx
-; AVX12-NEXT: andl $1, %ecx
-; AVX12-NEXT: movl %eax, %edx
-; AVX12-NEXT: andl $1, %edx
-; AVX12-NEXT: vmovd %edx, %xmm0
-; AVX12-NEXT: vpinsrw $1, %ecx, %xmm0, %xmm0
-; AVX12-NEXT: movl %eax, %ecx
-; AVX12-NEXT: shrl $2, %ecx
-; AVX12-NEXT: andl $1, %ecx
-; AVX12-NEXT: vpinsrw $2, %ecx, %xmm0, %xmm0
-; AVX12-NEXT: movl %eax, %ecx
-; AVX12-NEXT: shrl $3, %ecx
-; AVX12-NEXT: andl $1, %ecx
-; AVX12-NEXT: vpinsrw $3, %ecx, %xmm0, %xmm0
-; AVX12-NEXT: movl %eax, %ecx
-; AVX12-NEXT: shrl $4, %ecx
-; AVX12-NEXT: andl $1, %ecx
-; AVX12-NEXT: vpinsrw $4, %ecx, %xmm0, %xmm0
-; AVX12-NEXT: movl %eax, %ecx
-; AVX12-NEXT: shrl $5, %ecx
-; AVX12-NEXT: andl $1, %ecx
-; AVX12-NEXT: vpinsrw $5, %ecx, %xmm0, %xmm0
-; AVX12-NEXT: movl %eax, %ecx
-; AVX12-NEXT: shrl $6, %ecx
-; AVX12-NEXT: andl $1, %ecx
-; AVX12-NEXT: vpinsrw $6, %ecx, %xmm0, %xmm0
-; AVX12-NEXT: shrl $7, %eax
-; AVX12-NEXT: movzwl %ax, %eax
-; AVX12-NEXT: vpinsrw $7, %eax, %xmm0, %xmm0
-; AVX12-NEXT: retq
+; AVX1-LABEL: ext_i8_8i16:
+; AVX1: # BB#0:
+; AVX1-NEXT: vmovd %edi, %xmm0
+; AVX1-NEXT: vpshuflw {{.*#+}} xmm0 = xmm0[0,0,0,0,4,5,6,7]
+; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,0,1,1]
+; AVX1-NEXT: vmovdqa {{.*#+}} xmm1 = [1,2,4,8,16,32,64,128]
+; AVX1-NEXT: vpand %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vpcmpeqw %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vpsrlw $15, %xmm0, %xmm0
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: ext_i8_8i16:
+; AVX2: # BB#0:
+; AVX2-NEXT: vmovd %edi, %xmm0
+; AVX2-NEXT: vpbroadcastw %xmm0, %xmm0
+; AVX2-NEXT: vmovdqa {{.*#+}} xmm1 = [1,2,4,8,16,32,64,128]
+; AVX2-NEXT: vpand %xmm1, %xmm0, %xmm0
+; AVX2-NEXT: vpcmpeqw %xmm1, %xmm0, %xmm0
+; AVX2-NEXT: vpsrlw $15, %xmm0, %xmm0
+; AVX2-NEXT: retq
;
; AVX512-LABEL: ext_i8_8i16:
; AVX512: # BB#0:
@@ -254,156 +189,51 @@ define <8 x i16> @ext_i8_8i16(i8 %a0) {
}
define <16 x i8> @ext_i16_16i8(i16 %a0) {
-; SSE2-SSSE3-LABEL: ext_i16_16i8:
-; SSE2-SSSE3: # BB#0:
-; SSE2-SSSE3-NEXT: movw %di, -{{[0-9]+}}(%rsp)
-; SSE2-SSSE3-NEXT: movzwl -{{[0-9]+}}(%rsp), %eax
-; SSE2-SSSE3-NEXT: movl %eax, %ecx
-; SSE2-SSSE3-NEXT: shrl $7, %ecx
-; SSE2-SSSE3-NEXT: andl $1, %ecx
-; SSE2-SSSE3-NEXT: movd %ecx, %xmm0
-; SSE2-SSSE3-NEXT: movl %eax, %ecx
-; SSE2-SSSE3-NEXT: shrl $6, %ecx
-; SSE2-SSSE3-NEXT: andl $1, %ecx
-; SSE2-SSSE3-NEXT: movd %ecx, %xmm1
-; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
-; SSE2-SSSE3-NEXT: movl %eax, %ecx
-; SSE2-SSSE3-NEXT: shrl $5, %ecx
-; SSE2-SSSE3-NEXT: andl $1, %ecx
-; SSE2-SSSE3-NEXT: movd %ecx, %xmm0
-; SSE2-SSSE3-NEXT: movl %eax, %ecx
-; SSE2-SSSE3-NEXT: shrl $4, %ecx
-; SSE2-SSSE3-NEXT: andl $1, %ecx
-; SSE2-SSSE3-NEXT: movd %ecx, %xmm2
-; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3],xmm2[4],xmm0[4],xmm2[5],xmm0[5],xmm2[6],xmm0[6],xmm2[7],xmm0[7]
-; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3]
-; SSE2-SSSE3-NEXT: movl %eax, %ecx
-; SSE2-SSSE3-NEXT: shrl $3, %ecx
-; SSE2-SSSE3-NEXT: andl $1, %ecx
-; SSE2-SSSE3-NEXT: movd %ecx, %xmm0
-; SSE2-SSSE3-NEXT: movl %eax, %ecx
-; SSE2-SSSE3-NEXT: shrl $2, %ecx
-; SSE2-SSSE3-NEXT: andl $1, %ecx
-; SSE2-SSSE3-NEXT: movd %ecx, %xmm1
-; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
-; SSE2-SSSE3-NEXT: movl %eax, %ecx
-; SSE2-SSSE3-NEXT: andl $1, %ecx
-; SSE2-SSSE3-NEXT: movd %ecx, %xmm0
-; SSE2-SSSE3-NEXT: movl %eax, %ecx
-; SSE2-SSSE3-NEXT: shrl %ecx
-; SSE2-SSSE3-NEXT: andl $1, %ecx
-; SSE2-SSSE3-NEXT: movd %ecx, %xmm3
-; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1],xmm0[2],xmm3[2],xmm0[3],xmm3[3],xmm0[4],xmm3[4],xmm0[5],xmm3[5],xmm0[6],xmm3[6],xmm0[7],xmm3[7]
-; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
-; SSE2-SSSE3-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
-; SSE2-SSSE3-NEXT: movl %eax, %ecx
-; SSE2-SSSE3-NEXT: shrl $11, %ecx
-; SSE2-SSSE3-NEXT: andl $1, %ecx
-; SSE2-SSSE3-NEXT: movd %ecx, %xmm1
-; SSE2-SSSE3-NEXT: movl %eax, %ecx
-; SSE2-SSSE3-NEXT: shrl $10, %ecx
-; SSE2-SSSE3-NEXT: andl $1, %ecx
-; SSE2-SSSE3-NEXT: movd %ecx, %xmm2
-; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3],xmm2[4],xmm1[4],xmm2[5],xmm1[5],xmm2[6],xmm1[6],xmm2[7],xmm1[7]
-; SSE2-SSSE3-NEXT: movl %eax, %ecx
-; SSE2-SSSE3-NEXT: shrl $9, %ecx
-; SSE2-SSSE3-NEXT: andl $1, %ecx
-; SSE2-SSSE3-NEXT: movd %ecx, %xmm3
-; SSE2-SSSE3-NEXT: movl %eax, %ecx
-; SSE2-SSSE3-NEXT: shrl $8, %ecx
-; SSE2-SSSE3-NEXT: andl $1, %ecx
-; SSE2-SSSE3-NEXT: movd %ecx, %xmm1
-; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm3[0],xmm1[1],xmm3[1],xmm1[2],xmm3[2],xmm1[3],xmm3[3],xmm1[4],xmm3[4],xmm1[5],xmm3[5],xmm1[6],xmm3[6],xmm1[7],xmm3[7]
-; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3]
-; SSE2-SSSE3-NEXT: movl %eax, %ecx
-; SSE2-SSSE3-NEXT: shrl $13, %ecx
-; SSE2-SSSE3-NEXT: andl $1, %ecx
-; SSE2-SSSE3-NEXT: movd %ecx, %xmm2
-; SSE2-SSSE3-NEXT: movl %eax, %ecx
-; SSE2-SSSE3-NEXT: shrl $12, %ecx
-; SSE2-SSSE3-NEXT: andl $1, %ecx
-; SSE2-SSSE3-NEXT: movd %ecx, %xmm3
-; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1],xmm3[2],xmm2[2],xmm3[3],xmm2[3],xmm3[4],xmm2[4],xmm3[5],xmm2[5],xmm3[6],xmm2[6],xmm3[7],xmm2[7]
-; SSE2-SSSE3-NEXT: movl %eax, %ecx
-; SSE2-SSSE3-NEXT: shrl $14, %ecx
-; SSE2-SSSE3-NEXT: andl $1, %ecx
-; SSE2-SSSE3-NEXT: movd %ecx, %xmm2
-; SSE2-SSSE3-NEXT: shrl $15, %eax
-; SSE2-SSSE3-NEXT: movzwl %ax, %eax
-; SSE2-SSSE3-NEXT: movd %eax, %xmm4
-; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm4[0],xmm2[1],xmm4[1],xmm2[2],xmm4[2],xmm2[3],xmm4[3],xmm2[4],xmm4[4],xmm2[5],xmm4[5],xmm2[6],xmm4[6],xmm2[7],xmm4[7]
-; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1],xmm3[2],xmm2[2],xmm3[3],xmm2[3]
-; SSE2-SSSE3-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm3[0],xmm1[1],xmm3[1]
-; SSE2-SSSE3-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
-; SSE2-SSSE3-NEXT: retq
+; SSE2-LABEL: ext_i16_16i8:
+; SSE2: # BB#0:
+; SSE2-NEXT: movd %edi, %xmm0
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
+; SSE2-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,0,1,1,4,5,6,7]
+; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,1,1]
+; SSE2-NEXT: movdqa {{.*#+}} xmm1 = [1,2,4,8,16,32,64,128,1,2,4,8,16,32,64,128]
+; SSE2-NEXT: pand %xmm1, %xmm0
+; SSE2-NEXT: pcmpeqb %xmm1, %xmm0
+; SSE2-NEXT: psrlw $7, %xmm0
+; SSE2-NEXT: pand {{.*}}(%rip), %xmm0
+; SSE2-NEXT: retq
+;
+; SSSE3-LABEL: ext_i16_16i8:
+; SSSE3: # BB#0:
+; SSSE3-NEXT: movd %edi, %xmm0
+; SSSE3-NEXT: pshufb {{.*#+}} xmm0 = xmm0[0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1]
+; SSSE3-NEXT: movdqa {{.*#+}} xmm1 = [1,2,4,8,16,32,64,128,1,2,4,8,16,32,64,128]
+; SSSE3-NEXT: pand %xmm1, %xmm0
+; SSSE3-NEXT: pcmpeqb %xmm1, %xmm0
+; SSSE3-NEXT: psrlw $7, %xmm0
+; SSSE3-NEXT: pand {{.*}}(%rip), %xmm0
+; SSSE3-NEXT: retq
;
-; AVX12-LABEL: ext_i16_16i8:
-; AVX12: # BB#0:
-; AVX12-NEXT: movw %di, -{{[0-9]+}}(%rsp)
-; AVX12-NEXT: movzwl -{{[0-9]+}}(%rsp), %eax
-; AVX12-NEXT: movl %eax, %ecx
-; AVX12-NEXT: shrl %ecx
-; AVX12-NEXT: andl $1, %ecx
-; AVX12-NEXT: movl %eax, %edx
-; AVX12-NEXT: andl $1, %edx
-; AVX12-NEXT: vmovd %edx, %xmm0
-; AVX12-NEXT: vpinsrb $1, %ecx, %xmm0, %xmm0
-; AVX12-NEXT: movl %eax, %ecx
-; AVX12-NEXT: shrl $2, %ecx
-; AVX12-NEXT: andl $1, %ecx
-; AVX12-NEXT: vpinsrb $2, %ecx, %xmm0, %xmm0
-; AVX12-NEXT: movl %eax, %ecx
-; AVX12-NEXT: shrl $3, %ecx
-; AVX12-NEXT: andl $1, %ecx
-; AVX12-NEXT: vpinsrb $3, %ecx, %xmm0, %xmm0
-; AVX12-NEXT: movl %eax, %ecx
-; AVX12-NEXT: shrl $4, %ecx
-; AVX12-NEXT: andl $1, %ecx
-; AVX12-NEXT: vpinsrb $4, %ecx, %xmm0, %xmm0
-; AVX12-NEXT: movl %eax, %ecx
-; AVX12-NEXT: shrl $5, %ecx
-; AVX12-NEXT: andl $1, %ecx
-; AVX12-NEXT: vpinsrb $5, %ecx, %xmm0, %xmm0
-; AVX12-NEXT: movl %eax, %ecx
-; AVX12-NEXT: shrl $6, %ecx
-; AVX12-NEXT: andl $1, %ecx
-; AVX12-NEXT: vpinsrb $6, %ecx, %xmm0, %xmm0
-; AVX12-NEXT: movl %eax, %ecx
-; AVX12-NEXT: shrl $7, %ecx
-; AVX12-NEXT: andl $1, %ecx
-; AVX12-NEXT: vpinsrb $7, %ecx, %xmm0, %xmm0
-; AVX12-NEXT: movl %eax, %ecx
-; AVX12-NEXT: shrl $8, %ecx
-; AVX12-NEXT: andl $1, %ecx
-; AVX12-NEXT: vpinsrb $8, %ecx, %xmm0, %xmm0
-; AVX12-NEXT: movl %eax, %ecx
-; AVX12-NEXT: shrl $9, %ecx
-; AVX12-NEXT: andl $1, %ecx
-; AVX12-NEXT: vpinsrb $9, %ecx, %xmm0, %xmm0
-; AVX12-NEXT: movl %eax, %ecx
-; AVX12-NEXT: shrl $10, %ecx
-; AVX12-NEXT: andl $1, %ecx
-; AVX12-NEXT: vpinsrb $10, %ecx, %xmm0, %xmm0
-; AVX12-NEXT: movl %eax, %ecx
-; AVX12-NEXT: shrl $11, %ecx
-; AVX12-NEXT: andl $1, %ecx
-; AVX12-NEXT: vpinsrb $11, %ecx, %xmm0, %xmm0
-; AVX12-NEXT: movl %eax, %ecx
-; AVX12-NEXT: shrl $12, %ecx
-; AVX12-NEXT: andl $1, %ecx
-; AVX12-NEXT: vpinsrb $12, %ecx, %xmm0, %xmm0
-; AVX12-NEXT: movl %eax, %ecx
-; AVX12-NEXT: shrl $13, %ecx
-; AVX12-NEXT: andl $1, %ecx
-; AVX12-NEXT: vpinsrb $13, %ecx, %xmm0, %xmm0
-; AVX12-NEXT: movl %eax, %ecx
-; AVX12-NEXT: shrl $14, %ecx
-; AVX12-NEXT: andl $1, %ecx
-; AVX12-NEXT: vpinsrb $14, %ecx, %xmm0, %xmm0
-; AVX12-NEXT: shrl $15, %eax
-; AVX12-NEXT: movzwl %ax, %eax
-; AVX12-NEXT: vpinsrb $15, %eax, %xmm0, %xmm0
-; AVX12-NEXT: retq
+; AVX1-LABEL: ext_i16_16i8:
+; AVX1: # BB#0:
+; AVX1-NEXT: vmovd %edi, %xmm0
+; AVX1-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1]
+; AVX1-NEXT: vmovddup {{.*#+}} xmm1 = mem[0,0]
+; AVX1-NEXT: vpand %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vpcmpeqb %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vpsrlw $7, %xmm0, %xmm0
+; AVX1-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: ext_i16_16i8:
+; AVX2: # BB#0:
+; AVX2-NEXT: vmovd %edi, %xmm0
+; AVX2-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1]
+; AVX2-NEXT: vpbroadcastq {{.*#+}} xmm1 = [9241421688590303745,9241421688590303745]
+; AVX2-NEXT: vpand %xmm1, %xmm0, %xmm0
+; AVX2-NEXT: vpcmpeqb %xmm1, %xmm0, %xmm0
+; AVX2-NEXT: vpsrlw $7, %xmm0, %xmm0
+; AVX2-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0
+; AVX2-NEXT: retq
;
; AVX512-LABEL: ext_i16_16i8:
; AVX512: # BB#0:
@@ -521,75 +351,52 @@ define <16 x i8> @ext_i16_16i8(i16 %a0)
define <4 x i64> @ext_i4_4i64(i4 %a0) {
; SSE2-SSSE3-LABEL: ext_i4_4i64:
; SSE2-SSSE3: # BB#0:
-; SSE2-SSSE3-NEXT: andb $15, %dil
-; SSE2-SSSE3-NEXT: movb %dil, -{{[0-9]+}}(%rsp)
-; SSE2-SSSE3-NEXT: movl -{{[0-9]+}}(%rsp), %eax
-; SSE2-SSSE3-NEXT: movl %eax, %ecx
-; SSE2-SSSE3-NEXT: shrl $3, %ecx
-; SSE2-SSSE3-NEXT: movd %ecx, %xmm0
-; SSE2-SSSE3-NEXT: movl %eax, %ecx
-; SSE2-SSSE3-NEXT: shrl $2, %ecx
-; SSE2-SSSE3-NEXT: movd %ecx, %xmm1
-; SSE2-SSSE3-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
-; SSE2-SSSE3-NEXT: movd %eax, %xmm2
-; SSE2-SSSE3-NEXT: shrl %eax
-; SSE2-SSSE3-NEXT: movd %eax, %xmm0
-; SSE2-SSSE3-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1]
-; SSE2-SSSE3-NEXT: punpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm1[0]
-; SSE2-SSSE3-NEXT: pand {{.*}}(%rip), %xmm2
-; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm2[0,1,1,3]
-; SSE2-SSSE3-NEXT: movdqa {{.*#+}} xmm3 = [1,1]
-; SSE2-SSSE3-NEXT: pand %xmm3, %xmm0
-; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm2[2,1,3,3]
-; SSE2-SSSE3-NEXT: pand %xmm3, %xmm1
+; SSE2-SSSE3-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; SSE2-SSSE3-NEXT: movq %rdi, %xmm0
+; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm2 = xmm0[0,1,0,1]
+; SSE2-SSSE3-NEXT: movdqa {{.*#+}} xmm0 = [1,2]
+; SSE2-SSSE3-NEXT: movdqa %xmm2, %xmm1
+; SSE2-SSSE3-NEXT: pand %xmm0, %xmm1
+; SSE2-SSSE3-NEXT: pcmpeqd %xmm0, %xmm1
+; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,0,3,2]
+; SSE2-SSSE3-NEXT: pand %xmm1, %xmm0
+; SSE2-SSSE3-NEXT: psrlq $63, %xmm0
+; SSE2-SSSE3-NEXT: movdqa {{.*#+}} xmm1 = [4,8]
+; SSE2-SSSE3-NEXT: pand %xmm1, %xmm2
+; SSE2-SSSE3-NEXT: pcmpeqd %xmm1, %xmm2
+; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm2[1,0,3,2]
+; SSE2-SSSE3-NEXT: pand %xmm2, %xmm1
+; SSE2-SSSE3-NEXT: psrlq $63, %xmm1
; SSE2-SSSE3-NEXT: retq
;
; AVX1-LABEL: ext_i4_4i64:
; AVX1: # BB#0:
-; AVX1-NEXT: andb $15, %dil
-; AVX1-NEXT: movb %dil, -{{[0-9]+}}(%rsp)
-; AVX1-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax
-; AVX1-NEXT: movl %eax, %ecx
-; AVX1-NEXT: shrl $3, %ecx
-; AVX1-NEXT: andl $1, %ecx
-; AVX1-NEXT: vmovq %rcx, %xmm0
-; AVX1-NEXT: movl %eax, %ecx
-; AVX1-NEXT: shrl $2, %ecx
-; AVX1-NEXT: andl $1, %ecx
-; AVX1-NEXT: vmovq %rcx, %xmm1
-; AVX1-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm1[0],xmm0[0]
-; AVX1-NEXT: movl %eax, %ecx
-; AVX1-NEXT: andl $1, %ecx
-; AVX1-NEXT: vmovq %rcx, %xmm1
-; AVX1-NEXT: shrl %eax
-; AVX1-NEXT: andl $1, %eax
-; AVX1-NEXT: vmovq %rax, %xmm2
-; AVX1-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm2[0]
-; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
+; AVX1-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; AVX1-NEXT: vmovq %rdi, %xmm0
+; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,1,0,1]
+; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
+; AVX1-NEXT: vandps {{.*}}(%rip), %ymm0, %ymm0
+; AVX1-NEXT: vpxor %xmm1, %xmm1, %xmm1
+; AVX1-NEXT: vpcmpeqq %xmm1, %xmm0, %xmm2
+; AVX1-NEXT: vpcmpeqd %xmm3, %xmm3, %xmm3
+; AVX1-NEXT: vpxor %xmm3, %xmm2, %xmm2
+; AVX1-NEXT: vpsrlq $63, %xmm2, %xmm2
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
+; AVX1-NEXT: vpcmpeqq %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vpxor %xmm3, %xmm0, %xmm0
+; AVX1-NEXT: vpsrlq $63, %xmm0, %xmm0
+; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm2, %ymm0
; AVX1-NEXT: retq
;
; AVX2-LABEL: ext_i4_4i64:
; AVX2: # BB#0:
-; AVX2-NEXT: andb $15, %dil
-; AVX2-NEXT: movb %dil, -{{[0-9]+}}(%rsp)
-; AVX2-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax
-; AVX2-NEXT: movl %eax, %ecx
-; AVX2-NEXT: shrl $3, %ecx
-; AVX2-NEXT: andl $1, %ecx
-; AVX2-NEXT: vmovq %rcx, %xmm0
-; AVX2-NEXT: movl %eax, %ecx
-; AVX2-NEXT: shrl $2, %ecx
-; AVX2-NEXT: andl $1, %ecx
-; AVX2-NEXT: vmovq %rcx, %xmm1
-; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm1[0],xmm0[0]
-; AVX2-NEXT: movl %eax, %ecx
-; AVX2-NEXT: andl $1, %ecx
-; AVX2-NEXT: vmovq %rcx, %xmm1
-; AVX2-NEXT: shrl %eax
-; AVX2-NEXT: andl $1, %eax
-; AVX2-NEXT: vmovq %rax, %xmm2
-; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm2[0]
-; AVX2-NEXT: vinserti128 $1, %xmm0, %ymm1, %ymm0
+; AVX2-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; AVX2-NEXT: vmovq %rdi, %xmm0
+; AVX2-NEXT: vpbroadcastq %xmm0, %ymm0
+; AVX2-NEXT: vmovdqa {{.*#+}} ymm1 = [1,2,4,8]
+; AVX2-NEXT: vpand %ymm1, %ymm0, %ymm0
+; AVX2-NEXT: vpcmpeqq %ymm1, %ymm0, %ymm0
+; AVX2-NEXT: vpsrlq $63, %ymm0, %ymm0
; AVX2-NEXT: retq
;
; AVX512-LABEL: ext_i4_4i64:
@@ -609,110 +416,45 @@ define <4 x i64> @ext_i4_4i64(i4 %a0) {
define <8 x i32> @ext_i8_8i32(i8 %a0) {
; SSE2-SSSE3-LABEL: ext_i8_8i32:
; SSE2-SSSE3: # BB#0:
-; SSE2-SSSE3-NEXT: movb %dil, -{{[0-9]+}}(%rsp)
-; SSE2-SSSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax
-; SSE2-SSSE3-NEXT: movl %eax, %ecx
-; SSE2-SSSE3-NEXT: shrl $3, %ecx
-; SSE2-SSSE3-NEXT: andl $1, %ecx
-; SSE2-SSSE3-NEXT: movd %ecx, %xmm0
-; SSE2-SSSE3-NEXT: movl %eax, %ecx
-; SSE2-SSSE3-NEXT: shrl $2, %ecx
-; SSE2-SSSE3-NEXT: andl $1, %ecx
-; SSE2-SSSE3-NEXT: movd %ecx, %xmm2
-; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3]
-; SSE2-SSSE3-NEXT: movl %eax, %ecx
-; SSE2-SSSE3-NEXT: andl $1, %ecx
-; SSE2-SSSE3-NEXT: movd %ecx, %xmm1
-; SSE2-SSSE3-NEXT: movl %eax, %ecx
-; SSE2-SSSE3-NEXT: shrl %ecx
-; SSE2-SSSE3-NEXT: andl $1, %ecx
-; SSE2-SSSE3-NEXT: movd %ecx, %xmm0
-; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
-; SSE2-SSSE3-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1]
-; SSE2-SSSE3-NEXT: movl %eax, %ecx
-; SSE2-SSSE3-NEXT: shrl $5, %ecx
-; SSE2-SSSE3-NEXT: andl $1, %ecx
-; SSE2-SSSE3-NEXT: movd %ecx, %xmm0
-; SSE2-SSSE3-NEXT: movl %eax, %ecx
-; SSE2-SSSE3-NEXT: shrl $4, %ecx
-; SSE2-SSSE3-NEXT: andl $1, %ecx
-; SSE2-SSSE3-NEXT: movd %ecx, %xmm2
-; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3]
-; SSE2-SSSE3-NEXT: movl %eax, %ecx
-; SSE2-SSSE3-NEXT: shrl $6, %ecx
-; SSE2-SSSE3-NEXT: andl $1, %ecx
-; SSE2-SSSE3-NEXT: movd %ecx, %xmm0
-; SSE2-SSSE3-NEXT: shrl $7, %eax
-; SSE2-SSSE3-NEXT: movzwl %ax, %eax
-; SSE2-SSSE3-NEXT: movd %eax, %xmm3
-; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1],xmm0[2],xmm3[2],xmm0[3],xmm3[3]
-; SSE2-SSSE3-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1]
-; SSE2-SSSE3-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm2[0]
+; SSE2-SSSE3-NEXT: movd %edi, %xmm0
+; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,0,0,0]
+; SSE2-SSSE3-NEXT: movdqa {{.*#+}} xmm2 = [1,2,4,8]
; SSE2-SSSE3-NEXT: movdqa %xmm1, %xmm0
-; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3]
-; SSE2-SSSE3-NEXT: movdqa {{.*#+}} xmm2 = [1,1,1,1]
; SSE2-SSSE3-NEXT: pand %xmm2, %xmm0
-; SSE2-SSSE3-NEXT: punpckhwd {{.*#+}} xmm1 = xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
+; SSE2-SSSE3-NEXT: pcmpeqd %xmm2, %xmm0
+; SSE2-SSSE3-NEXT: psrld $31, %xmm0
+; SSE2-SSSE3-NEXT: movdqa {{.*#+}} xmm2 = [16,32,64,128]
; SSE2-SSSE3-NEXT: pand %xmm2, %xmm1
+; SSE2-SSSE3-NEXT: pcmpeqd %xmm2, %xmm1
+; SSE2-SSSE3-NEXT: psrld $31, %xmm1
; SSE2-SSSE3-NEXT: retq
;
; AVX1-LABEL: ext_i8_8i32:
; AVX1: # BB#0:
-; AVX1-NEXT: movb %dil, -{{[0-9]+}}(%rsp)
-; AVX1-NEXT: movl -{{[0-9]+}}(%rsp), %eax
-; AVX1-NEXT: movl %eax, %ecx
-; AVX1-NEXT: shrl $5, %ecx
-; AVX1-NEXT: movl %eax, %edx
-; AVX1-NEXT: shrl $4, %edx
-; AVX1-NEXT: vmovd %edx, %xmm0
-; AVX1-NEXT: vpinsrd $1, %ecx, %xmm0, %xmm0
-; AVX1-NEXT: movl %eax, %ecx
-; AVX1-NEXT: shrl $6, %ecx
-; AVX1-NEXT: vpinsrd $2, %ecx, %xmm0, %xmm0
-; AVX1-NEXT: movl %eax, %ecx
-; AVX1-NEXT: shrl $7, %ecx
-; AVX1-NEXT: vpinsrd $3, %ecx, %xmm0, %xmm0
-; AVX1-NEXT: movl %eax, %ecx
-; AVX1-NEXT: shrl %ecx
-; AVX1-NEXT: vmovd %eax, %xmm1
-; AVX1-NEXT: vpinsrd $1, %ecx, %xmm1, %xmm1
-; AVX1-NEXT: movl %eax, %ecx
-; AVX1-NEXT: shrl $2, %ecx
-; AVX1-NEXT: vpinsrd $2, %ecx, %xmm1, %xmm1
-; AVX1-NEXT: shrl $3, %eax
-; AVX1-NEXT: vpinsrd $3, %eax, %xmm1, %xmm1
-; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
+; AVX1-NEXT: vmovd %edi, %xmm0
+; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,0,0,0]
+; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
; AVX1-NEXT: vandps {{.*}}(%rip), %ymm0, %ymm0
+; AVX1-NEXT: vpxor %xmm1, %xmm1, %xmm1
+; AVX1-NEXT: vpcmpeqd %xmm1, %xmm0, %xmm2
+; AVX1-NEXT: vpcmpeqd %xmm3, %xmm3, %xmm3
+; AVX1-NEXT: vpxor %xmm3, %xmm2, %xmm2
+; AVX1-NEXT: vpsrld $31, %xmm2, %xmm2
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
+; AVX1-NEXT: vpcmpeqd %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vpxor %xmm3, %xmm0, %xmm0
+; AVX1-NEXT: vpsrld $31, %xmm0, %xmm0
+; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm2, %ymm0
; AVX1-NEXT: retq
;
; AVX2-LABEL: ext_i8_8i32:
; AVX2: # BB#0:
-; AVX2-NEXT: movb %dil, -{{[0-9]+}}(%rsp)
-; AVX2-NEXT: movl -{{[0-9]+}}(%rsp), %eax
-; AVX2-NEXT: movl %eax, %ecx
-; AVX2-NEXT: shrl $5, %ecx
-; AVX2-NEXT: movl %eax, %edx
-; AVX2-NEXT: shrl $4, %edx
-; AVX2-NEXT: vmovd %edx, %xmm0
-; AVX2-NEXT: vpinsrd $1, %ecx, %xmm0, %xmm0
-; AVX2-NEXT: movl %eax, %ecx
-; AVX2-NEXT: shrl $6, %ecx
-; AVX2-NEXT: vpinsrd $2, %ecx, %xmm0, %xmm0
-; AVX2-NEXT: movl %eax, %ecx
-; AVX2-NEXT: shrl $7, %ecx
-; AVX2-NEXT: vpinsrd $3, %ecx, %xmm0, %xmm0
-; AVX2-NEXT: movl %eax, %ecx
-; AVX2-NEXT: shrl %ecx
-; AVX2-NEXT: vmovd %eax, %xmm1
-; AVX2-NEXT: vpinsrd $1, %ecx, %xmm1, %xmm1
-; AVX2-NEXT: movl %eax, %ecx
-; AVX2-NEXT: shrl $2, %ecx
-; AVX2-NEXT: vpinsrd $2, %ecx, %xmm1, %xmm1
-; AVX2-NEXT: shrl $3, %eax
-; AVX2-NEXT: vpinsrd $3, %eax, %xmm1, %xmm1
-; AVX2-NEXT: vinserti128 $1, %xmm0, %ymm1, %ymm0
-; AVX2-NEXT: vpbroadcastd {{.*#+}} ymm1 = [1,1,1,1,1,1,1,1]
+; AVX2-NEXT: vmovd %edi, %xmm0
+; AVX2-NEXT: vpbroadcastd %xmm0, %ymm0
+; AVX2-NEXT: vmovdqa {{.*#+}} ymm1 = [1,2,4,8,16,32,64,128]
; AVX2-NEXT: vpand %ymm1, %ymm0, %ymm0
+; AVX2-NEXT: vpcmpeqd %ymm1, %ymm0, %ymm0
+; AVX2-NEXT: vpsrld $31, %ymm0, %ymm0
; AVX2-NEXT: retq
;
; AVX512-LABEL: ext_i8_8i32:
@@ -728,229 +470,47 @@ define <8 x i32> @ext_i8_8i32(i8 %a0) {
define <16 x i16> @ext_i16_16i16(i16 %a0) {
; SSE2-SSSE3-LABEL: ext_i16_16i16:
; SSE2-SSSE3: # BB#0:
-; SSE2-SSSE3-NEXT: movw %di, -{{[0-9]+}}(%rsp)
-; SSE2-SSSE3-NEXT: movzwl -{{[0-9]+}}(%rsp), %eax
-; SSE2-SSSE3-NEXT: movl %eax, %ecx
-; SSE2-SSSE3-NEXT: shrl $7, %ecx
-; SSE2-SSSE3-NEXT: andl $1, %ecx
-; SSE2-SSSE3-NEXT: movd %ecx, %xmm0
-; SSE2-SSSE3-NEXT: movl %eax, %ecx
-; SSE2-SSSE3-NEXT: shrl $6, %ecx
-; SSE2-SSSE3-NEXT: andl $1, %ecx
-; SSE2-SSSE3-NEXT: movd %ecx, %xmm1
-; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
-; SSE2-SSSE3-NEXT: movl %eax, %ecx
-; SSE2-SSSE3-NEXT: shrl $5, %ecx
-; SSE2-SSSE3-NEXT: andl $1, %ecx
-; SSE2-SSSE3-NEXT: movd %ecx, %xmm0
-; SSE2-SSSE3-NEXT: movl %eax, %ecx
-; SSE2-SSSE3-NEXT: shrl $4, %ecx
-; SSE2-SSSE3-NEXT: andl $1, %ecx
-; SSE2-SSSE3-NEXT: movd %ecx, %xmm2
-; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3],xmm2[4],xmm0[4],xmm2[5],xmm0[5],xmm2[6],xmm0[6],xmm2[7],xmm0[7]
-; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3]
-; SSE2-SSSE3-NEXT: movl %eax, %ecx
-; SSE2-SSSE3-NEXT: shrl $3, %ecx
-; SSE2-SSSE3-NEXT: andl $1, %ecx
-; SSE2-SSSE3-NEXT: movd %ecx, %xmm0
-; SSE2-SSSE3-NEXT: movl %eax, %ecx
-; SSE2-SSSE3-NEXT: shrl $2, %ecx
-; SSE2-SSSE3-NEXT: andl $1, %ecx
-; SSE2-SSSE3-NEXT: movd %ecx, %xmm3
-; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm0[0],xmm3[1],xmm0[1],xmm3[2],xmm0[2],xmm3[3],xmm0[3],xmm3[4],xmm0[4],xmm3[5],xmm0[5],xmm3[6],xmm0[6],xmm3[7],xmm0[7]
-; SSE2-SSSE3-NEXT: movl %eax, %ecx
-; SSE2-SSSE3-NEXT: andl $1, %ecx
-; SSE2-SSSE3-NEXT: movd %ecx, %xmm1
-; SSE2-SSSE3-NEXT: movl %eax, %ecx
-; SSE2-SSSE3-NEXT: shrl %ecx
-; SSE2-SSSE3-NEXT: andl $1, %ecx
-; SSE2-SSSE3-NEXT: movd %ecx, %xmm0
-; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
-; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm3[0],xmm1[1],xmm3[1],xmm1[2],xmm3[2],xmm1[3],xmm3[3]
-; SSE2-SSSE3-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1]
-; SSE2-SSSE3-NEXT: movl %eax, %ecx
-; SSE2-SSSE3-NEXT: shrl $11, %ecx
-; SSE2-SSSE3-NEXT: andl $1, %ecx
-; SSE2-SSSE3-NEXT: movd %ecx, %xmm0
-; SSE2-SSSE3-NEXT: movl %eax, %ecx
-; SSE2-SSSE3-NEXT: shrl $10, %ecx
-; SSE2-SSSE3-NEXT: andl $1, %ecx
-; SSE2-SSSE3-NEXT: movd %ecx, %xmm2
-; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3],xmm2[4],xmm0[4],xmm2[5],xmm0[5],xmm2[6],xmm0[6],xmm2[7],xmm0[7]
-; SSE2-SSSE3-NEXT: movl %eax, %ecx
-; SSE2-SSSE3-NEXT: shrl $9, %ecx
-; SSE2-SSSE3-NEXT: andl $1, %ecx
-; SSE2-SSSE3-NEXT: movd %ecx, %xmm3
-; SSE2-SSSE3-NEXT: movl %eax, %ecx
-; SSE2-SSSE3-NEXT: shrl $8, %ecx
-; SSE2-SSSE3-NEXT: andl $1, %ecx
-; SSE2-SSSE3-NEXT: movd %ecx, %xmm0
-; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1],xmm0[2],xmm3[2],xmm0[3],xmm3[3],xmm0[4],xmm3[4],xmm0[5],xmm3[5],xmm0[6],xmm3[6],xmm0[7],xmm3[7]
-; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3]
-; SSE2-SSSE3-NEXT: movl %eax, %ecx
-; SSE2-SSSE3-NEXT: shrl $13, %ecx
-; SSE2-SSSE3-NEXT: andl $1, %ecx
-; SSE2-SSSE3-NEXT: movd %ecx, %xmm2
-; SSE2-SSSE3-NEXT: movl %eax, %ecx
-; SSE2-SSSE3-NEXT: shrl $12, %ecx
-; SSE2-SSSE3-NEXT: andl $1, %ecx
-; SSE2-SSSE3-NEXT: movd %ecx, %xmm3
-; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1],xmm3[2],xmm2[2],xmm3[3],xmm2[3],xmm3[4],xmm2[4],xmm3[5],xmm2[5],xmm3[6],xmm2[6],xmm3[7],xmm2[7]
-; SSE2-SSSE3-NEXT: movl %eax, %ecx
-; SSE2-SSSE3-NEXT: shrl $14, %ecx
-; SSE2-SSSE3-NEXT: andl $1, %ecx
-; SSE2-SSSE3-NEXT: movd %ecx, %xmm2
-; SSE2-SSSE3-NEXT: shrl $15, %eax
-; SSE2-SSSE3-NEXT: movzwl %ax, %eax
-; SSE2-SSSE3-NEXT: movd %eax, %xmm4
-; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm4[0],xmm2[1],xmm4[1],xmm2[2],xmm4[2],xmm2[3],xmm4[3],xmm2[4],xmm4[4],xmm2[5],xmm4[5],xmm2[6],xmm4[6],xmm2[7],xmm4[7]
-; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1],xmm3[2],xmm2[2],xmm3[3],xmm2[3]
-; SSE2-SSSE3-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1]
-; SSE2-SSSE3-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm0[0]
+; SSE2-SSSE3-NEXT: movd %edi, %xmm0
+; SSE2-SSSE3-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,0,0,0,4,5,6,7]
+; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,0,1,1]
+; SSE2-SSSE3-NEXT: movdqa {{.*#+}} xmm2 = [1,2,4,8,16,32,64,128]
; SSE2-SSSE3-NEXT: movdqa %xmm1, %xmm0
-; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
-; SSE2-SSSE3-NEXT: movdqa {{.*#+}} xmm2 = [1,1,1,1,1,1,1,1]
; SSE2-SSSE3-NEXT: pand %xmm2, %xmm0
-; SSE2-SSSE3-NEXT: punpckhbw {{.*#+}} xmm1 = xmm1[8],xmm0[8],xmm1[9],xmm0[9],xmm1[10],xmm0[10],xmm1[11],xmm0[11],xmm1[12],xmm0[12],xmm1[13],xmm0[13],xmm1[14],xmm0[14],xmm1[15],xmm0[15]
+; SSE2-SSSE3-NEXT: pcmpeqw %xmm2, %xmm0
+; SSE2-SSSE3-NEXT: psrlw $15, %xmm0
+; SSE2-SSSE3-NEXT: movdqa {{.*#+}} xmm2 = [256,512,1024,2048,4096,8192,16384,32768]
; SSE2-SSSE3-NEXT: pand %xmm2, %xmm1
+; SSE2-SSSE3-NEXT: pcmpeqw %xmm2, %xmm1
+; SSE2-SSSE3-NEXT: psrlw $15, %xmm1
; SSE2-SSSE3-NEXT: retq
;
; AVX1-LABEL: ext_i16_16i16:
; AVX1: # BB#0:
-; AVX1-NEXT: movw %di, -{{[0-9]+}}(%rsp)
-; AVX1-NEXT: movzwl -{{[0-9]+}}(%rsp), %eax
-; AVX1-NEXT: movl %eax, %ecx
-; AVX1-NEXT: shrl $9, %ecx
-; AVX1-NEXT: andl $1, %ecx
-; AVX1-NEXT: movl %eax, %edx
-; AVX1-NEXT: shrl $8, %edx
-; AVX1-NEXT: andl $1, %edx
-; AVX1-NEXT: vmovd %edx, %xmm0
-; AVX1-NEXT: vpinsrw $1, %ecx, %xmm0, %xmm0
-; AVX1-NEXT: movl %eax, %ecx
-; AVX1-NEXT: shrl $10, %ecx
-; AVX1-NEXT: andl $1, %ecx
-; AVX1-NEXT: vpinsrw $2, %ecx, %xmm0, %xmm0
-; AVX1-NEXT: movl %eax, %ecx
-; AVX1-NEXT: shrl $11, %ecx
-; AVX1-NEXT: andl $1, %ecx
-; AVX1-NEXT: vpinsrw $3, %ecx, %xmm0, %xmm0
-; AVX1-NEXT: movl %eax, %ecx
-; AVX1-NEXT: shrl $12, %ecx
-; AVX1-NEXT: andl $1, %ecx
-; AVX1-NEXT: vpinsrw $4, %ecx, %xmm0, %xmm0
-; AVX1-NEXT: movl %eax, %ecx
-; AVX1-NEXT: shrl $13, %ecx
-; AVX1-NEXT: andl $1, %ecx
-; AVX1-NEXT: vpinsrw $5, %ecx, %xmm0, %xmm0
-; AVX1-NEXT: movl %eax, %ecx
-; AVX1-NEXT: shrl $14, %ecx
-; AVX1-NEXT: andl $1, %ecx
-; AVX1-NEXT: vpinsrw $6, %ecx, %xmm0, %xmm0
-; AVX1-NEXT: movl %eax, %ecx
-; AVX1-NEXT: shrl $15, %ecx
-; AVX1-NEXT: movzwl %cx, %ecx
-; AVX1-NEXT: vpinsrw $7, %ecx, %xmm0, %xmm0
-; AVX1-NEXT: movl %eax, %ecx
-; AVX1-NEXT: shrl %ecx
-; AVX1-NEXT: andl $1, %ecx
-; AVX1-NEXT: movl %eax, %edx
-; AVX1-NEXT: andl $1, %edx
-; AVX1-NEXT: vmovd %edx, %xmm1
-; AVX1-NEXT: vpinsrw $1, %ecx, %xmm1, %xmm1
-; AVX1-NEXT: movl %eax, %ecx
-; AVX1-NEXT: shrl $2, %ecx
-; AVX1-NEXT: andl $1, %ecx
-; AVX1-NEXT: vpinsrw $2, %ecx, %xmm1, %xmm1
-; AVX1-NEXT: movl %eax, %ecx
-; AVX1-NEXT: shrl $3, %ecx
-; AVX1-NEXT: andl $1, %ecx
-; AVX1-NEXT: vpinsrw $3, %ecx, %xmm1, %xmm1
-; AVX1-NEXT: movl %eax, %ecx
-; AVX1-NEXT: shrl $4, %ecx
-; AVX1-NEXT: andl $1, %ecx
-; AVX1-NEXT: vpinsrw $4, %ecx, %xmm1, %xmm1
-; AVX1-NEXT: movl %eax, %ecx
-; AVX1-NEXT: shrl $5, %ecx
-; AVX1-NEXT: andl $1, %ecx
-; AVX1-NEXT: vpinsrw $5, %ecx, %xmm1, %xmm1
-; AVX1-NEXT: movl %eax, %ecx
-; AVX1-NEXT: shrl $6, %ecx
-; AVX1-NEXT: andl $1, %ecx
-; AVX1-NEXT: vpinsrw $6, %ecx, %xmm1, %xmm1
-; AVX1-NEXT: shrl $7, %eax
-; AVX1-NEXT: andl $1, %eax
-; AVX1-NEXT: vpinsrw $7, %eax, %xmm1, %xmm1
-; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
+; AVX1-NEXT: vmovd %edi, %xmm0
+; AVX1-NEXT: vpshuflw {{.*#+}} xmm0 = xmm0[0,0,0,0,4,5,6,7]
+; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,0,1,1]
+; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
+; AVX1-NEXT: vandps {{.*}}(%rip), %ymm0, %ymm0
+; AVX1-NEXT: vpxor %xmm1, %xmm1, %xmm1
+; AVX1-NEXT: vpcmpeqw %xmm1, %xmm0, %xmm2
+; AVX1-NEXT: vpcmpeqd %xmm3, %xmm3, %xmm3
+; AVX1-NEXT: vpxor %xmm3, %xmm2, %xmm2
+; AVX1-NEXT: vpsrlw $15, %xmm2, %xmm2
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
+; AVX1-NEXT: vpcmpeqw %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vpxor %xmm3, %xmm0, %xmm0
+; AVX1-NEXT: vpsrlw $15, %xmm0, %xmm0
+; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm2, %ymm0
; AVX1-NEXT: retq
;
; AVX2-LABEL: ext_i16_16i16:
; AVX2: # BB#0:
-; AVX2-NEXT: movw %di, -{{[0-9]+}}(%rsp)
-; AVX2-NEXT: movzwl -{{[0-9]+}}(%rsp), %eax
-; AVX2-NEXT: movl %eax, %ecx
-; AVX2-NEXT: shrl $9, %ecx
-; AVX2-NEXT: andl $1, %ecx
-; AVX2-NEXT: movl %eax, %edx
-; AVX2-NEXT: shrl $8, %edx
-; AVX2-NEXT: andl $1, %edx
-; AVX2-NEXT: vmovd %edx, %xmm0
-; AVX2-NEXT: vpinsrw $1, %ecx, %xmm0, %xmm0
-; AVX2-NEXT: movl %eax, %ecx
-; AVX2-NEXT: shrl $10, %ecx
-; AVX2-NEXT: andl $1, %ecx
-; AVX2-NEXT: vpinsrw $2, %ecx, %xmm0, %xmm0
-; AVX2-NEXT: movl %eax, %ecx
-; AVX2-NEXT: shrl $11, %ecx
-; AVX2-NEXT: andl $1, %ecx
-; AVX2-NEXT: vpinsrw $3, %ecx, %xmm0, %xmm0
-; AVX2-NEXT: movl %eax, %ecx
-; AVX2-NEXT: shrl $12, %ecx
-; AVX2-NEXT: andl $1, %ecx
-; AVX2-NEXT: vpinsrw $4, %ecx, %xmm0, %xmm0
-; AVX2-NEXT: movl %eax, %ecx
-; AVX2-NEXT: shrl $13, %ecx
-; AVX2-NEXT: andl $1, %ecx
-; AVX2-NEXT: vpinsrw $5, %ecx, %xmm0, %xmm0
-; AVX2-NEXT: movl %eax, %ecx
-; AVX2-NEXT: shrl $14, %ecx
-; AVX2-NEXT: andl $1, %ecx
-; AVX2-NEXT: vpinsrw $6, %ecx, %xmm0, %xmm0
-; AVX2-NEXT: movl %eax, %ecx
-; AVX2-NEXT: shrl $15, %ecx
-; AVX2-NEXT: movzwl %cx, %ecx
-; AVX2-NEXT: vpinsrw $7, %ecx, %xmm0, %xmm0
-; AVX2-NEXT: movl %eax, %ecx
-; AVX2-NEXT: shrl %ecx
-; AVX2-NEXT: andl $1, %ecx
-; AVX2-NEXT: movl %eax, %edx
-; AVX2-NEXT: andl $1, %edx
-; AVX2-NEXT: vmovd %edx, %xmm1
-; AVX2-NEXT: vpinsrw $1, %ecx, %xmm1, %xmm1
-; AVX2-NEXT: movl %eax, %ecx
-; AVX2-NEXT: shrl $2, %ecx
-; AVX2-NEXT: andl $1, %ecx
-; AVX2-NEXT: vpinsrw $2, %ecx, %xmm1, %xmm1
-; AVX2-NEXT: movl %eax, %ecx
-; AVX2-NEXT: shrl $3, %ecx
-; AVX2-NEXT: andl $1, %ecx
-; AVX2-NEXT: vpinsrw $3, %ecx, %xmm1, %xmm1
-; AVX2-NEXT: movl %eax, %ecx
-; AVX2-NEXT: shrl $4, %ecx
-; AVX2-NEXT: andl $1, %ecx
-; AVX2-NEXT: vpinsrw $4, %ecx, %xmm1, %xmm1
-; AVX2-NEXT: movl %eax, %ecx
-; AVX2-NEXT: shrl $5, %ecx
-; AVX2-NEXT: andl $1, %ecx
-; AVX2-NEXT: vpinsrw $5, %ecx, %xmm1, %xmm1
-; AVX2-NEXT: movl %eax, %ecx
-; AVX2-NEXT: shrl $6, %ecx
-; AVX2-NEXT: andl $1, %ecx
-; AVX2-NEXT: vpinsrw $6, %ecx, %xmm1, %xmm1
-; AVX2-NEXT: shrl $7, %eax
-; AVX2-NEXT: andl $1, %eax
-; AVX2-NEXT: vpinsrw $7, %eax, %xmm1, %xmm1
-; AVX2-NEXT: vinserti128 $1, %xmm0, %ymm1, %ymm0
+; AVX2-NEXT: vmovd %edi, %xmm0
+; AVX2-NEXT: vpbroadcastw %xmm0, %ymm0
+; AVX2-NEXT: vmovdqa {{.*#+}} ymm1 = [1,2,4,8,16,32,64,128,256,512,1024,2048,4096,8192,16384,32768]
+; AVX2-NEXT: vpand %ymm1, %ymm0, %ymm0
+; AVX2-NEXT: vpcmpeqw %ymm1, %ymm0, %ymm0
+; AVX2-NEXT: vpsrlw $15, %ymm0, %ymm0
; AVX2-NEXT: retq
;
; AVX512-LABEL: ext_i16_16i16:
@@ -966,449 +526,63 @@ define <16 x i16> @ext_i16_16i16(i16 %a0
define <32 x i8> @ext_i32_32i8(i32 %a0) {
; SSE2-SSSE3-LABEL: ext_i32_32i8:
; SSE2-SSSE3: # BB#0:
-; SSE2-SSSE3-NEXT: movw %di, -{{[0-9]+}}(%rsp)
-; SSE2-SSSE3-NEXT: shrl $16, %edi
-; SSE2-SSSE3-NEXT: movw %di, -{{[0-9]+}}(%rsp)
-; SSE2-SSSE3-NEXT: movzwl -{{[0-9]+}}(%rsp), %eax
-; SSE2-SSSE3-NEXT: movl %eax, %ecx
-; SSE2-SSSE3-NEXT: shrl $7, %ecx
-; SSE2-SSSE3-NEXT: andl $1, %ecx
-; SSE2-SSSE3-NEXT: movd %ecx, %xmm0
-; SSE2-SSSE3-NEXT: movl %eax, %ecx
-; SSE2-SSSE3-NEXT: shrl $6, %ecx
-; SSE2-SSSE3-NEXT: andl $1, %ecx
-; SSE2-SSSE3-NEXT: movd %ecx, %xmm1
-; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
-; SSE2-SSSE3-NEXT: movl %eax, %ecx
-; SSE2-SSSE3-NEXT: shrl $5, %ecx
-; SSE2-SSSE3-NEXT: andl $1, %ecx
-; SSE2-SSSE3-NEXT: movd %ecx, %xmm0
-; SSE2-SSSE3-NEXT: movl %eax, %ecx
-; SSE2-SSSE3-NEXT: shrl $4, %ecx
-; SSE2-SSSE3-NEXT: andl $1, %ecx
-; SSE2-SSSE3-NEXT: movd %ecx, %xmm2
-; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3],xmm2[4],xmm0[4],xmm2[5],xmm0[5],xmm2[6],xmm0[6],xmm2[7],xmm0[7]
-; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3]
-; SSE2-SSSE3-NEXT: movl %eax, %ecx
-; SSE2-SSSE3-NEXT: shrl $3, %ecx
-; SSE2-SSSE3-NEXT: andl $1, %ecx
-; SSE2-SSSE3-NEXT: movd %ecx, %xmm0
-; SSE2-SSSE3-NEXT: movl %eax, %ecx
-; SSE2-SSSE3-NEXT: shrl $2, %ecx
-; SSE2-SSSE3-NEXT: andl $1, %ecx
-; SSE2-SSSE3-NEXT: movd %ecx, %xmm1
-; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
-; SSE2-SSSE3-NEXT: movl %eax, %ecx
-; SSE2-SSSE3-NEXT: andl $1, %ecx
-; SSE2-SSSE3-NEXT: movd %ecx, %xmm0
-; SSE2-SSSE3-NEXT: movl %eax, %ecx
-; SSE2-SSSE3-NEXT: shrl %ecx
-; SSE2-SSSE3-NEXT: andl $1, %ecx
-; SSE2-SSSE3-NEXT: movd %ecx, %xmm3
-; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1],xmm0[2],xmm3[2],xmm0[3],xmm3[3],xmm0[4],xmm3[4],xmm0[5],xmm3[5],xmm0[6],xmm3[6],xmm0[7],xmm3[7]
-; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
-; SSE2-SSSE3-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
-; SSE2-SSSE3-NEXT: movl %eax, %ecx
-; SSE2-SSSE3-NEXT: shrl $11, %ecx
-; SSE2-SSSE3-NEXT: andl $1, %ecx
-; SSE2-SSSE3-NEXT: movd %ecx, %xmm1
-; SSE2-SSSE3-NEXT: movl %eax, %ecx
-; SSE2-SSSE3-NEXT: shrl $10, %ecx
-; SSE2-SSSE3-NEXT: andl $1, %ecx
-; SSE2-SSSE3-NEXT: movd %ecx, %xmm2
-; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3],xmm2[4],xmm1[4],xmm2[5],xmm1[5],xmm2[6],xmm1[6],xmm2[7],xmm1[7]
-; SSE2-SSSE3-NEXT: movl %eax, %ecx
-; SSE2-SSSE3-NEXT: shrl $9, %ecx
-; SSE2-SSSE3-NEXT: andl $1, %ecx
-; SSE2-SSSE3-NEXT: movd %ecx, %xmm3
-; SSE2-SSSE3-NEXT: movl %eax, %ecx
-; SSE2-SSSE3-NEXT: shrl $8, %ecx
-; SSE2-SSSE3-NEXT: andl $1, %ecx
-; SSE2-SSSE3-NEXT: movd %ecx, %xmm1
-; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm3[0],xmm1[1],xmm3[1],xmm1[2],xmm3[2],xmm1[3],xmm3[3],xmm1[4],xmm3[4],xmm1[5],xmm3[5],xmm1[6],xmm3[6],xmm1[7],xmm3[7]
-; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3]
-; SSE2-SSSE3-NEXT: movl %eax, %ecx
-; SSE2-SSSE3-NEXT: shrl $13, %ecx
-; SSE2-SSSE3-NEXT: andl $1, %ecx
-; SSE2-SSSE3-NEXT: movd %ecx, %xmm2
-; SSE2-SSSE3-NEXT: movl %eax, %ecx
-; SSE2-SSSE3-NEXT: shrl $12, %ecx
-; SSE2-SSSE3-NEXT: andl $1, %ecx
-; SSE2-SSSE3-NEXT: movd %ecx, %xmm3
-; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1],xmm3[2],xmm2[2],xmm3[3],xmm2[3],xmm3[4],xmm2[4],xmm3[5],xmm2[5],xmm3[6],xmm2[6],xmm3[7],xmm2[7]
-; SSE2-SSSE3-NEXT: movl %eax, %ecx
-; SSE2-SSSE3-NEXT: shrl $14, %ecx
-; SSE2-SSSE3-NEXT: andl $1, %ecx
-; SSE2-SSSE3-NEXT: movd %ecx, %xmm2
-; SSE2-SSSE3-NEXT: shrl $15, %eax
-; SSE2-SSSE3-NEXT: movzwl %ax, %eax
-; SSE2-SSSE3-NEXT: movd %eax, %xmm4
-; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm4[0],xmm2[1],xmm4[1],xmm2[2],xmm4[2],xmm2[3],xmm4[3],xmm2[4],xmm4[4],xmm2[5],xmm4[5],xmm2[6],xmm4[6],xmm2[7],xmm4[7]
-; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1],xmm3[2],xmm2[2],xmm3[3],xmm2[3]
-; SSE2-SSSE3-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm3[0],xmm1[1],xmm3[1]
-; SSE2-SSSE3-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
-; SSE2-SSSE3-NEXT: movzwl -{{[0-9]+}}(%rsp), %eax
-; SSE2-SSSE3-NEXT: movl %eax, %ecx
-; SSE2-SSSE3-NEXT: shrl $7, %ecx
-; SSE2-SSSE3-NEXT: andl $1, %ecx
-; SSE2-SSSE3-NEXT: movd %ecx, %xmm1
-; SSE2-SSSE3-NEXT: movl %eax, %ecx
-; SSE2-SSSE3-NEXT: shrl $6, %ecx
-; SSE2-SSSE3-NEXT: andl $1, %ecx
-; SSE2-SSSE3-NEXT: movd %ecx, %xmm2
-; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3],xmm2[4],xmm1[4],xmm2[5],xmm1[5],xmm2[6],xmm1[6],xmm2[7],xmm1[7]
-; SSE2-SSSE3-NEXT: movl %eax, %ecx
-; SSE2-SSSE3-NEXT: shrl $5, %ecx
-; SSE2-SSSE3-NEXT: andl $1, %ecx
-; SSE2-SSSE3-NEXT: movd %ecx, %xmm1
-; SSE2-SSSE3-NEXT: movl %eax, %ecx
-; SSE2-SSSE3-NEXT: shrl $4, %ecx
-; SSE2-SSSE3-NEXT: andl $1, %ecx
-; SSE2-SSSE3-NEXT: movd %ecx, %xmm3
-; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm1[0],xmm3[1],xmm1[1],xmm3[2],xmm1[2],xmm3[3],xmm1[3],xmm3[4],xmm1[4],xmm3[5],xmm1[5],xmm3[6],xmm1[6],xmm3[7],xmm1[7]
-; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1],xmm3[2],xmm2[2],xmm3[3],xmm2[3]
-; SSE2-SSSE3-NEXT: movl %eax, %ecx
-; SSE2-SSSE3-NEXT: shrl $3, %ecx
-; SSE2-SSSE3-NEXT: andl $1, %ecx
-; SSE2-SSSE3-NEXT: movd %ecx, %xmm1
-; SSE2-SSSE3-NEXT: movl %eax, %ecx
-; SSE2-SSSE3-NEXT: shrl $2, %ecx
-; SSE2-SSSE3-NEXT: andl $1, %ecx
-; SSE2-SSSE3-NEXT: movd %ecx, %xmm2
-; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3],xmm2[4],xmm1[4],xmm2[5],xmm1[5],xmm2[6],xmm1[6],xmm2[7],xmm1[7]
-; SSE2-SSSE3-NEXT: movl %eax, %ecx
-; SSE2-SSSE3-NEXT: andl $1, %ecx
-; SSE2-SSSE3-NEXT: movd %ecx, %xmm1
-; SSE2-SSSE3-NEXT: movl %eax, %ecx
-; SSE2-SSSE3-NEXT: shrl %ecx
-; SSE2-SSSE3-NEXT: andl $1, %ecx
-; SSE2-SSSE3-NEXT: movd %ecx, %xmm4
-; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm4[0],xmm1[1],xmm4[1],xmm1[2],xmm4[2],xmm1[3],xmm4[3],xmm1[4],xmm4[4],xmm1[5],xmm4[5],xmm1[6],xmm4[6],xmm1[7],xmm4[7]
-; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3]
-; SSE2-SSSE3-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm3[0],xmm1[1],xmm3[1]
-; SSE2-SSSE3-NEXT: movl %eax, %ecx
-; SSE2-SSSE3-NEXT: shrl $11, %ecx
-; SSE2-SSSE3-NEXT: andl $1, %ecx
-; SSE2-SSSE3-NEXT: movd %ecx, %xmm2
-; SSE2-SSSE3-NEXT: movl %eax, %ecx
-; SSE2-SSSE3-NEXT: shrl $10, %ecx
-; SSE2-SSSE3-NEXT: andl $1, %ecx
-; SSE2-SSSE3-NEXT: movd %ecx, %xmm3
-; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1],xmm3[2],xmm2[2],xmm3[3],xmm2[3],xmm3[4],xmm2[4],xmm3[5],xmm2[5],xmm3[6],xmm2[6],xmm3[7],xmm2[7]
-; SSE2-SSSE3-NEXT: movl %eax, %ecx
-; SSE2-SSSE3-NEXT: shrl $9, %ecx
-; SSE2-SSSE3-NEXT: andl $1, %ecx
-; SSE2-SSSE3-NEXT: movd %ecx, %xmm4
-; SSE2-SSSE3-NEXT: movl %eax, %ecx
-; SSE2-SSSE3-NEXT: shrl $8, %ecx
-; SSE2-SSSE3-NEXT: andl $1, %ecx
-; SSE2-SSSE3-NEXT: movd %ecx, %xmm2
-; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm4[0],xmm2[1],xmm4[1],xmm2[2],xmm4[2],xmm2[3],xmm4[3],xmm2[4],xmm4[4],xmm2[5],xmm4[5],xmm2[6],xmm4[6],xmm2[7],xmm4[7]
-; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1],xmm2[2],xmm3[2],xmm2[3],xmm3[3]
-; SSE2-SSSE3-NEXT: movl %eax, %ecx
-; SSE2-SSSE3-NEXT: shrl $13, %ecx
-; SSE2-SSSE3-NEXT: andl $1, %ecx
-; SSE2-SSSE3-NEXT: movd %ecx, %xmm3
-; SSE2-SSSE3-NEXT: movl %eax, %ecx
-; SSE2-SSSE3-NEXT: shrl $12, %ecx
-; SSE2-SSSE3-NEXT: andl $1, %ecx
-; SSE2-SSSE3-NEXT: movd %ecx, %xmm4
-; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm4 = xmm4[0],xmm3[0],xmm4[1],xmm3[1],xmm4[2],xmm3[2],xmm4[3],xmm3[3],xmm4[4],xmm3[4],xmm4[5],xmm3[5],xmm4[6],xmm3[6],xmm4[7],xmm3[7]
-; SSE2-SSSE3-NEXT: movl %eax, %ecx
-; SSE2-SSSE3-NEXT: shrl $14, %ecx
-; SSE2-SSSE3-NEXT: andl $1, %ecx
-; SSE2-SSSE3-NEXT: movd %ecx, %xmm3
-; SSE2-SSSE3-NEXT: shrl $15, %eax
-; SSE2-SSSE3-NEXT: movzwl %ax, %eax
-; SSE2-SSSE3-NEXT: movd %eax, %xmm5
-; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm5[0],xmm3[1],xmm5[1],xmm3[2],xmm5[2],xmm3[3],xmm5[3],xmm3[4],xmm5[4],xmm3[5],xmm5[5],xmm3[6],xmm5[6],xmm3[7],xmm5[7]
-; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm4 = xmm4[0],xmm3[0],xmm4[1],xmm3[1],xmm4[2],xmm3[2],xmm4[3],xmm3[3]
-; SSE2-SSSE3-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm4[0],xmm2[1],xmm4[1]
-; SSE2-SSSE3-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm2[0]
+; SSE2-SSSE3-NEXT: movd %edi, %xmm1
+; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
+; SSE2-SSSE3-NEXT: pshuflw {{.*#+}} xmm0 = xmm1[0,0,1,1,4,5,6,7]
+; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,1,1]
+; SSE2-SSSE3-NEXT: movdqa {{.*#+}} xmm2 = [1,2,4,8,16,32,64,128,1,2,4,8,16,32,64,128]
+; SSE2-SSSE3-NEXT: pand %xmm2, %xmm0
+; SSE2-SSSE3-NEXT: pcmpeqb %xmm2, %xmm0
+; SSE2-SSSE3-NEXT: psrlw $7, %xmm0
+; SSE2-SSSE3-NEXT: movdqa {{.*#+}} xmm3 = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
+; SSE2-SSSE3-NEXT: pand %xmm3, %xmm0
+; SSE2-SSSE3-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[2,2,3,3,4,5,6,7]
+; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,0,1,1]
+; SSE2-SSSE3-NEXT: pand %xmm2, %xmm1
+; SSE2-SSSE3-NEXT: pcmpeqb %xmm2, %xmm1
+; SSE2-SSSE3-NEXT: psrlw $7, %xmm1
+; SSE2-SSSE3-NEXT: pand %xmm3, %xmm1
; SSE2-SSSE3-NEXT: retq
;
; AVX1-LABEL: ext_i32_32i8:
; AVX1: # BB#0:
-; AVX1-NEXT: pushq %rbp
-; AVX1-NEXT: .Lcfi0:
-; AVX1-NEXT: .cfi_def_cfa_offset 16
-; AVX1-NEXT: .Lcfi1:
-; AVX1-NEXT: .cfi_offset %rbp, -16
-; AVX1-NEXT: movq %rsp, %rbp
-; AVX1-NEXT: .Lcfi2:
-; AVX1-NEXT: .cfi_def_cfa_register %rbp
-; AVX1-NEXT: andq $-32, %rsp
-; AVX1-NEXT: subq $32, %rsp
-; AVX1-NEXT: movl %edi, %eax
-; AVX1-NEXT: shrl $17, %eax
-; AVX1-NEXT: andl $1, %eax
-; AVX1-NEXT: movl %edi, %ecx
-; AVX1-NEXT: shrl $16, %ecx
-; AVX1-NEXT: andl $1, %ecx
-; AVX1-NEXT: vmovd %ecx, %xmm0
-; AVX1-NEXT: vpinsrb $1, %eax, %xmm0, %xmm0
-; AVX1-NEXT: movl %edi, %eax
-; AVX1-NEXT: shrl $18, %eax
-; AVX1-NEXT: andl $1, %eax
-; AVX1-NEXT: vpinsrb $2, %eax, %xmm0, %xmm0
-; AVX1-NEXT: movl %edi, %eax
-; AVX1-NEXT: shrl $19, %eax
-; AVX1-NEXT: andl $1, %eax
-; AVX1-NEXT: vpinsrb $3, %eax, %xmm0, %xmm0
-; AVX1-NEXT: movl %edi, %eax
-; AVX1-NEXT: shrl $20, %eax
-; AVX1-NEXT: andl $1, %eax
-; AVX1-NEXT: vpinsrb $4, %eax, %xmm0, %xmm0
-; AVX1-NEXT: movl %edi, %eax
-; AVX1-NEXT: shrl $21, %eax
-; AVX1-NEXT: andl $1, %eax
-; AVX1-NEXT: vpinsrb $5, %eax, %xmm0, %xmm0
-; AVX1-NEXT: movl %edi, %eax
-; AVX1-NEXT: shrl $22, %eax
-; AVX1-NEXT: andl $1, %eax
-; AVX1-NEXT: vpinsrb $6, %eax, %xmm0, %xmm0
-; AVX1-NEXT: movl %edi, %eax
-; AVX1-NEXT: shrl $23, %eax
-; AVX1-NEXT: andl $1, %eax
-; AVX1-NEXT: vpinsrb $7, %eax, %xmm0, %xmm0
-; AVX1-NEXT: movl %edi, %eax
-; AVX1-NEXT: shrl $24, %eax
-; AVX1-NEXT: andl $1, %eax
-; AVX1-NEXT: vpinsrb $8, %eax, %xmm0, %xmm0
-; AVX1-NEXT: movl %edi, %eax
-; AVX1-NEXT: shrl $25, %eax
-; AVX1-NEXT: andl $1, %eax
-; AVX1-NEXT: vpinsrb $9, %eax, %xmm0, %xmm0
-; AVX1-NEXT: movl %edi, %eax
-; AVX1-NEXT: shrl $26, %eax
-; AVX1-NEXT: andl $1, %eax
-; AVX1-NEXT: vpinsrb $10, %eax, %xmm0, %xmm0
-; AVX1-NEXT: movl %edi, %eax
-; AVX1-NEXT: shrl $27, %eax
-; AVX1-NEXT: andl $1, %eax
-; AVX1-NEXT: vpinsrb $11, %eax, %xmm0, %xmm0
-; AVX1-NEXT: movl %edi, %eax
-; AVX1-NEXT: shrl $28, %eax
-; AVX1-NEXT: andl $1, %eax
-; AVX1-NEXT: vpinsrb $12, %eax, %xmm0, %xmm0
-; AVX1-NEXT: movl %edi, %eax
-; AVX1-NEXT: shrl $29, %eax
-; AVX1-NEXT: andl $1, %eax
-; AVX1-NEXT: vpinsrb $13, %eax, %xmm0, %xmm0
-; AVX1-NEXT: movl %edi, %eax
-; AVX1-NEXT: shrl $30, %eax
-; AVX1-NEXT: andl $1, %eax
-; AVX1-NEXT: vpinsrb $14, %eax, %xmm0, %xmm0
-; AVX1-NEXT: movl %edi, %eax
-; AVX1-NEXT: shrl $31, %eax
-; AVX1-NEXT: vpinsrb $15, %eax, %xmm0, %xmm0
-; AVX1-NEXT: movl %edi, %eax
-; AVX1-NEXT: shrl %eax
-; AVX1-NEXT: andl $1, %eax
-; AVX1-NEXT: movl %edi, %ecx
-; AVX1-NEXT: andl $1, %ecx
-; AVX1-NEXT: vmovd %ecx, %xmm1
-; AVX1-NEXT: vpinsrb $1, %eax, %xmm1, %xmm1
-; AVX1-NEXT: movl %edi, %eax
-; AVX1-NEXT: shrl $2, %eax
-; AVX1-NEXT: andl $1, %eax
-; AVX1-NEXT: vpinsrb $2, %eax, %xmm1, %xmm1
-; AVX1-NEXT: movl %edi, %eax
-; AVX1-NEXT: shrl $3, %eax
-; AVX1-NEXT: andl $1, %eax
-; AVX1-NEXT: vpinsrb $3, %eax, %xmm1, %xmm1
-; AVX1-NEXT: movl %edi, %eax
-; AVX1-NEXT: shrl $4, %eax
-; AVX1-NEXT: andl $1, %eax
-; AVX1-NEXT: vpinsrb $4, %eax, %xmm1, %xmm1
-; AVX1-NEXT: movl %edi, %eax
-; AVX1-NEXT: shrl $5, %eax
-; AVX1-NEXT: andl $1, %eax
-; AVX1-NEXT: vpinsrb $5, %eax, %xmm1, %xmm1
-; AVX1-NEXT: movl %edi, %eax
-; AVX1-NEXT: shrl $6, %eax
-; AVX1-NEXT: andl $1, %eax
-; AVX1-NEXT: vpinsrb $6, %eax, %xmm1, %xmm1
-; AVX1-NEXT: movl %edi, %eax
-; AVX1-NEXT: shrl $7, %eax
-; AVX1-NEXT: andl $1, %eax
-; AVX1-NEXT: vpinsrb $7, %eax, %xmm1, %xmm1
-; AVX1-NEXT: movl %edi, %eax
-; AVX1-NEXT: shrl $8, %eax
-; AVX1-NEXT: andl $1, %eax
-; AVX1-NEXT: vpinsrb $8, %eax, %xmm1, %xmm1
-; AVX1-NEXT: movl %edi, %eax
-; AVX1-NEXT: shrl $9, %eax
-; AVX1-NEXT: andl $1, %eax
-; AVX1-NEXT: vpinsrb $9, %eax, %xmm1, %xmm1
-; AVX1-NEXT: movl %edi, %eax
-; AVX1-NEXT: shrl $10, %eax
-; AVX1-NEXT: andl $1, %eax
-; AVX1-NEXT: vpinsrb $10, %eax, %xmm1, %xmm1
-; AVX1-NEXT: movl %edi, %eax
-; AVX1-NEXT: shrl $11, %eax
-; AVX1-NEXT: andl $1, %eax
-; AVX1-NEXT: vpinsrb $11, %eax, %xmm1, %xmm1
-; AVX1-NEXT: movl %edi, %eax
-; AVX1-NEXT: shrl $12, %eax
-; AVX1-NEXT: andl $1, %eax
-; AVX1-NEXT: vpinsrb $12, %eax, %xmm1, %xmm1
-; AVX1-NEXT: movl %edi, %eax
-; AVX1-NEXT: shrl $13, %eax
-; AVX1-NEXT: andl $1, %eax
-; AVX1-NEXT: vpinsrb $13, %eax, %xmm1, %xmm1
-; AVX1-NEXT: movl %edi, %eax
-; AVX1-NEXT: shrl $14, %eax
-; AVX1-NEXT: andl $1, %eax
-; AVX1-NEXT: vpinsrb $14, %eax, %xmm1, %xmm1
-; AVX1-NEXT: shrl $15, %edi
-; AVX1-NEXT: andl $1, %edi
-; AVX1-NEXT: vpinsrb $15, %edi, %xmm1, %xmm1
+; AVX1-NEXT: vmovd %edi, %xmm0
+; AVX1-NEXT: vpunpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
+; AVX1-NEXT: vpshuflw {{.*#+}} xmm1 = xmm0[0,0,1,1,4,5,6,7]
+; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,0,1,1]
+; AVX1-NEXT: vpshuflw {{.*#+}} xmm0 = xmm0[2,2,3,3,4,5,6,7]
+; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,0,1,1]
; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
-; AVX1-NEXT: movq %rbp, %rsp
-; AVX1-NEXT: popq %rbp
+; AVX1-NEXT: vandps {{.*}}(%rip), %ymm0, %ymm0
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
+; AVX1-NEXT: vpxor %xmm2, %xmm2, %xmm2
+; AVX1-NEXT: vpcmpeqb %xmm2, %xmm1, %xmm1
+; AVX1-NEXT: vpcmpeqd %xmm3, %xmm3, %xmm3
+; AVX1-NEXT: vpxor %xmm3, %xmm1, %xmm1
+; AVX1-NEXT: vpsrlw $7, %xmm1, %xmm1
+; AVX1-NEXT: vmovdqa {{.*#+}} xmm4 = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
+; AVX1-NEXT: vpand %xmm4, %xmm1, %xmm1
+; AVX1-NEXT: vpcmpeqb %xmm2, %xmm0, %xmm0
+; AVX1-NEXT: vpxor %xmm3, %xmm0, %xmm0
+; AVX1-NEXT: vpsrlw $7, %xmm0, %xmm0
+; AVX1-NEXT: vpand %xmm4, %xmm0, %xmm0
+; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
; AVX1-NEXT: retq
;
; AVX2-LABEL: ext_i32_32i8:
; AVX2: # BB#0:
-; AVX2-NEXT: pushq %rbp
-; AVX2-NEXT: .Lcfi0:
-; AVX2-NEXT: .cfi_def_cfa_offset 16
-; AVX2-NEXT: .Lcfi1:
-; AVX2-NEXT: .cfi_offset %rbp, -16
-; AVX2-NEXT: movq %rsp, %rbp
-; AVX2-NEXT: .Lcfi2:
-; AVX2-NEXT: .cfi_def_cfa_register %rbp
-; AVX2-NEXT: andq $-32, %rsp
-; AVX2-NEXT: subq $32, %rsp
-; AVX2-NEXT: movl %edi, %eax
-; AVX2-NEXT: shrl $17, %eax
-; AVX2-NEXT: andl $1, %eax
-; AVX2-NEXT: movl %edi, %ecx
-; AVX2-NEXT: shrl $16, %ecx
-; AVX2-NEXT: andl $1, %ecx
-; AVX2-NEXT: vmovd %ecx, %xmm0
-; AVX2-NEXT: vpinsrb $1, %eax, %xmm0, %xmm0
-; AVX2-NEXT: movl %edi, %eax
-; AVX2-NEXT: shrl $18, %eax
-; AVX2-NEXT: andl $1, %eax
-; AVX2-NEXT: vpinsrb $2, %eax, %xmm0, %xmm0
-; AVX2-NEXT: movl %edi, %eax
-; AVX2-NEXT: shrl $19, %eax
-; AVX2-NEXT: andl $1, %eax
-; AVX2-NEXT: vpinsrb $3, %eax, %xmm0, %xmm0
-; AVX2-NEXT: movl %edi, %eax
-; AVX2-NEXT: shrl $20, %eax
-; AVX2-NEXT: andl $1, %eax
-; AVX2-NEXT: vpinsrb $4, %eax, %xmm0, %xmm0
-; AVX2-NEXT: movl %edi, %eax
-; AVX2-NEXT: shrl $21, %eax
-; AVX2-NEXT: andl $1, %eax
-; AVX2-NEXT: vpinsrb $5, %eax, %xmm0, %xmm0
-; AVX2-NEXT: movl %edi, %eax
-; AVX2-NEXT: shrl $22, %eax
-; AVX2-NEXT: andl $1, %eax
-; AVX2-NEXT: vpinsrb $6, %eax, %xmm0, %xmm0
-; AVX2-NEXT: movl %edi, %eax
-; AVX2-NEXT: shrl $23, %eax
-; AVX2-NEXT: andl $1, %eax
-; AVX2-NEXT: vpinsrb $7, %eax, %xmm0, %xmm0
-; AVX2-NEXT: movl %edi, %eax
-; AVX2-NEXT: shrl $24, %eax
-; AVX2-NEXT: andl $1, %eax
-; AVX2-NEXT: vpinsrb $8, %eax, %xmm0, %xmm0
-; AVX2-NEXT: movl %edi, %eax
-; AVX2-NEXT: shrl $25, %eax
-; AVX2-NEXT: andl $1, %eax
-; AVX2-NEXT: vpinsrb $9, %eax, %xmm0, %xmm0
-; AVX2-NEXT: movl %edi, %eax
-; AVX2-NEXT: shrl $26, %eax
-; AVX2-NEXT: andl $1, %eax
-; AVX2-NEXT: vpinsrb $10, %eax, %xmm0, %xmm0
-; AVX2-NEXT: movl %edi, %eax
-; AVX2-NEXT: shrl $27, %eax
-; AVX2-NEXT: andl $1, %eax
-; AVX2-NEXT: vpinsrb $11, %eax, %xmm0, %xmm0
-; AVX2-NEXT: movl %edi, %eax
-; AVX2-NEXT: shrl $28, %eax
-; AVX2-NEXT: andl $1, %eax
-; AVX2-NEXT: vpinsrb $12, %eax, %xmm0, %xmm0
-; AVX2-NEXT: movl %edi, %eax
-; AVX2-NEXT: shrl $29, %eax
-; AVX2-NEXT: andl $1, %eax
-; AVX2-NEXT: vpinsrb $13, %eax, %xmm0, %xmm0
-; AVX2-NEXT: movl %edi, %eax
-; AVX2-NEXT: shrl $30, %eax
-; AVX2-NEXT: andl $1, %eax
-; AVX2-NEXT: vpinsrb $14, %eax, %xmm0, %xmm0
-; AVX2-NEXT: movl %edi, %eax
-; AVX2-NEXT: shrl $31, %eax
-; AVX2-NEXT: vpinsrb $15, %eax, %xmm0, %xmm0
-; AVX2-NEXT: movl %edi, %eax
-; AVX2-NEXT: shrl %eax
-; AVX2-NEXT: andl $1, %eax
-; AVX2-NEXT: movl %edi, %ecx
-; AVX2-NEXT: andl $1, %ecx
-; AVX2-NEXT: vmovd %ecx, %xmm1
-; AVX2-NEXT: vpinsrb $1, %eax, %xmm1, %xmm1
-; AVX2-NEXT: movl %edi, %eax
-; AVX2-NEXT: shrl $2, %eax
-; AVX2-NEXT: andl $1, %eax
-; AVX2-NEXT: vpinsrb $2, %eax, %xmm1, %xmm1
-; AVX2-NEXT: movl %edi, %eax
-; AVX2-NEXT: shrl $3, %eax
-; AVX2-NEXT: andl $1, %eax
-; AVX2-NEXT: vpinsrb $3, %eax, %xmm1, %xmm1
-; AVX2-NEXT: movl %edi, %eax
-; AVX2-NEXT: shrl $4, %eax
-; AVX2-NEXT: andl $1, %eax
-; AVX2-NEXT: vpinsrb $4, %eax, %xmm1, %xmm1
-; AVX2-NEXT: movl %edi, %eax
-; AVX2-NEXT: shrl $5, %eax
-; AVX2-NEXT: andl $1, %eax
-; AVX2-NEXT: vpinsrb $5, %eax, %xmm1, %xmm1
-; AVX2-NEXT: movl %edi, %eax
-; AVX2-NEXT: shrl $6, %eax
-; AVX2-NEXT: andl $1, %eax
-; AVX2-NEXT: vpinsrb $6, %eax, %xmm1, %xmm1
-; AVX2-NEXT: movl %edi, %eax
-; AVX2-NEXT: shrl $7, %eax
-; AVX2-NEXT: andl $1, %eax
-; AVX2-NEXT: vpinsrb $7, %eax, %xmm1, %xmm1
-; AVX2-NEXT: movl %edi, %eax
-; AVX2-NEXT: shrl $8, %eax
-; AVX2-NEXT: andl $1, %eax
-; AVX2-NEXT: vpinsrb $8, %eax, %xmm1, %xmm1
-; AVX2-NEXT: movl %edi, %eax
-; AVX2-NEXT: shrl $9, %eax
-; AVX2-NEXT: andl $1, %eax
-; AVX2-NEXT: vpinsrb $9, %eax, %xmm1, %xmm1
-; AVX2-NEXT: movl %edi, %eax
-; AVX2-NEXT: shrl $10, %eax
-; AVX2-NEXT: andl $1, %eax
-; AVX2-NEXT: vpinsrb $10, %eax, %xmm1, %xmm1
-; AVX2-NEXT: movl %edi, %eax
-; AVX2-NEXT: shrl $11, %eax
-; AVX2-NEXT: andl $1, %eax
-; AVX2-NEXT: vpinsrb $11, %eax, %xmm1, %xmm1
-; AVX2-NEXT: movl %edi, %eax
-; AVX2-NEXT: shrl $12, %eax
-; AVX2-NEXT: andl $1, %eax
-; AVX2-NEXT: vpinsrb $12, %eax, %xmm1, %xmm1
-; AVX2-NEXT: movl %edi, %eax
-; AVX2-NEXT: shrl $13, %eax
-; AVX2-NEXT: andl $1, %eax
-; AVX2-NEXT: vpinsrb $13, %eax, %xmm1, %xmm1
-; AVX2-NEXT: movl %edi, %eax
-; AVX2-NEXT: shrl $14, %eax
-; AVX2-NEXT: andl $1, %eax
-; AVX2-NEXT: vpinsrb $14, %eax, %xmm1, %xmm1
-; AVX2-NEXT: shrl $15, %edi
-; AVX2-NEXT: andl $1, %edi
-; AVX2-NEXT: vpinsrb $15, %edi, %xmm1, %xmm1
+; AVX2-NEXT: vmovd %edi, %xmm0
+; AVX2-NEXT: vpunpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
+; AVX2-NEXT: vpshuflw {{.*#+}} xmm1 = xmm0[0,0,1,1,4,5,6,7]
+; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,0,1,1]
+; AVX2-NEXT: vpshuflw {{.*#+}} xmm0 = xmm0[2,2,3,3,4,5,6,7]
+; AVX2-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,0,1,1]
; AVX2-NEXT: vinserti128 $1, %xmm0, %ymm1, %ymm0
-; AVX2-NEXT: movq %rbp, %rsp
-; AVX2-NEXT: popq %rbp
+; AVX2-NEXT: vpbroadcastq {{.*#+}} ymm1 = [9241421688590303745,9241421688590303745,9241421688590303745,9241421688590303745]
+; AVX2-NEXT: vpand %ymm1, %ymm0, %ymm0
+; AVX2-NEXT: vpcmpeqb %ymm1, %ymm0, %ymm0
+; AVX2-NEXT: vpsrlw $7, %ymm0, %ymm0
+; AVX2-NEXT: vpand {{.*}}(%rip), %ymm0, %ymm0
; AVX2-NEXT: retq
;
; AVX512-LABEL: ext_i32_32i8:
@@ -1428,148 +602,79 @@ define <32 x i8> @ext_i32_32i8(i32 %a0)
define <8 x i64> @ext_i8_8i64(i8 %a0) {
; SSE2-SSSE3-LABEL: ext_i8_8i64:
; SSE2-SSSE3: # BB#0:
-; SSE2-SSSE3-NEXT: movb %dil, -{{[0-9]+}}(%rsp)
-; SSE2-SSSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax
-; SSE2-SSSE3-NEXT: movl %eax, %ecx
-; SSE2-SSSE3-NEXT: shrl $3, %ecx
-; SSE2-SSSE3-NEXT: andl $1, %ecx
-; SSE2-SSSE3-NEXT: movd %ecx, %xmm0
-; SSE2-SSSE3-NEXT: movl %eax, %ecx
-; SSE2-SSSE3-NEXT: shrl $2, %ecx
-; SSE2-SSSE3-NEXT: andl $1, %ecx
-; SSE2-SSSE3-NEXT: movd %ecx, %xmm1
-; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
-; SSE2-SSSE3-NEXT: movl %eax, %ecx
-; SSE2-SSSE3-NEXT: andl $1, %ecx
-; SSE2-SSSE3-NEXT: movd %ecx, %xmm3
-; SSE2-SSSE3-NEXT: movl %eax, %ecx
-; SSE2-SSSE3-NEXT: shrl %ecx
-; SSE2-SSSE3-NEXT: andl $1, %ecx
-; SSE2-SSSE3-NEXT: movd %ecx, %xmm0
-; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm3 = xmm3[0],xmm0[0],xmm3[1],xmm0[1],xmm3[2],xmm0[2],xmm3[3],xmm0[3]
-; SSE2-SSSE3-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm1[0],xmm3[1],xmm1[1]
-; SSE2-SSSE3-NEXT: movl %eax, %ecx
-; SSE2-SSSE3-NEXT: shrl $5, %ecx
-; SSE2-SSSE3-NEXT: andl $1, %ecx
-; SSE2-SSSE3-NEXT: movd %ecx, %xmm0
-; SSE2-SSSE3-NEXT: movl %eax, %ecx
-; SSE2-SSSE3-NEXT: shrl $4, %ecx
-; SSE2-SSSE3-NEXT: andl $1, %ecx
-; SSE2-SSSE3-NEXT: movd %ecx, %xmm1
-; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
-; SSE2-SSSE3-NEXT: movl %eax, %ecx
-; SSE2-SSSE3-NEXT: shrl $6, %ecx
-; SSE2-SSSE3-NEXT: andl $1, %ecx
-; SSE2-SSSE3-NEXT: movd %ecx, %xmm0
-; SSE2-SSSE3-NEXT: shrl $7, %eax
-; SSE2-SSSE3-NEXT: movzwl %ax, %eax
-; SSE2-SSSE3-NEXT: movd %eax, %xmm2
-; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3]
-; SSE2-SSSE3-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
-; SSE2-SSSE3-NEXT: punpcklqdq {{.*#+}} xmm3 = xmm3[0],xmm1[0]
-; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm3[0,1,0,3]
-; SSE2-SSSE3-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,5,5,6,7]
-; SSE2-SSSE3-NEXT: movdqa {{.*#+}} xmm4 = [1,1]
-; SSE2-SSSE3-NEXT: pand %xmm4, %xmm0
-; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm3[1,1,1,3]
-; SSE2-SSSE3-NEXT: pshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,5,5,6,7]
-; SSE2-SSSE3-NEXT: pand %xmm4, %xmm1
-; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm2 = xmm3[2,1,2,3]
-; SSE2-SSSE3-NEXT: pshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,5,5,6,7]
-; SSE2-SSSE3-NEXT: pand %xmm4, %xmm2
-; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm3 = xmm3[3,1,3,3]
-; SSE2-SSSE3-NEXT: pshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,5,5,6,7]
+; SSE2-SSSE3-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; SSE2-SSSE3-NEXT: movq %rdi, %xmm0
+; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm4 = xmm0[0,1,0,1]
+; SSE2-SSSE3-NEXT: movdqa {{.*#+}} xmm0 = [1,2]
+; SSE2-SSSE3-NEXT: movdqa %xmm4, %xmm1
+; SSE2-SSSE3-NEXT: pand %xmm0, %xmm1
+; SSE2-SSSE3-NEXT: pcmpeqd %xmm0, %xmm1
+; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,0,3,2]
+; SSE2-SSSE3-NEXT: pand %xmm1, %xmm0
+; SSE2-SSSE3-NEXT: psrlq $63, %xmm0
+; SSE2-SSSE3-NEXT: movdqa {{.*#+}} xmm1 = [4,8]
+; SSE2-SSSE3-NEXT: movdqa %xmm4, %xmm2
+; SSE2-SSSE3-NEXT: pand %xmm1, %xmm2
+; SSE2-SSSE3-NEXT: pcmpeqd %xmm1, %xmm2
+; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm2[1,0,3,2]
+; SSE2-SSSE3-NEXT: pand %xmm2, %xmm1
+; SSE2-SSSE3-NEXT: psrlq $63, %xmm1
+; SSE2-SSSE3-NEXT: movdqa {{.*#+}} xmm2 = [16,32]
+; SSE2-SSSE3-NEXT: movdqa %xmm4, %xmm3
+; SSE2-SSSE3-NEXT: pand %xmm2, %xmm3
+; SSE2-SSSE3-NEXT: pcmpeqd %xmm2, %xmm3
+; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm2 = xmm3[1,0,3,2]
+; SSE2-SSSE3-NEXT: pand %xmm3, %xmm2
+; SSE2-SSSE3-NEXT: psrlq $63, %xmm2
+; SSE2-SSSE3-NEXT: movdqa {{.*#+}} xmm3 = [64,128]
+; SSE2-SSSE3-NEXT: pand %xmm3, %xmm4
+; SSE2-SSSE3-NEXT: pcmpeqd %xmm3, %xmm4
+; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm3 = xmm4[1,0,3,2]
; SSE2-SSSE3-NEXT: pand %xmm4, %xmm3
+; SSE2-SSSE3-NEXT: psrlq $63, %xmm3
; SSE2-SSSE3-NEXT: retq
;
; AVX1-LABEL: ext_i8_8i64:
; AVX1: # BB#0:
-; AVX1-NEXT: movb %dil, -{{[0-9]+}}(%rsp)
-; AVX1-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax
-; AVX1-NEXT: movl %eax, %ecx
-; AVX1-NEXT: shrl %ecx
-; AVX1-NEXT: andl $1, %ecx
-; AVX1-NEXT: movl %eax, %edx
-; AVX1-NEXT: andl $1, %edx
-; AVX1-NEXT: vmovd %edx, %xmm0
-; AVX1-NEXT: vpinsrw $1, %ecx, %xmm0, %xmm0
-; AVX1-NEXT: movl %eax, %ecx
-; AVX1-NEXT: shrl $2, %ecx
-; AVX1-NEXT: andl $1, %ecx
-; AVX1-NEXT: vpinsrw $2, %ecx, %xmm0, %xmm0
-; AVX1-NEXT: movl %eax, %ecx
-; AVX1-NEXT: shrl $3, %ecx
-; AVX1-NEXT: andl $1, %ecx
-; AVX1-NEXT: vpinsrw $3, %ecx, %xmm0, %xmm0
-; AVX1-NEXT: movl %eax, %ecx
-; AVX1-NEXT: shrl $4, %ecx
-; AVX1-NEXT: andl $1, %ecx
-; AVX1-NEXT: vpinsrw $4, %ecx, %xmm0, %xmm0
-; AVX1-NEXT: movl %eax, %ecx
-; AVX1-NEXT: shrl $5, %ecx
-; AVX1-NEXT: andl $1, %ecx
-; AVX1-NEXT: vpinsrw $5, %ecx, %xmm0, %xmm0
-; AVX1-NEXT: movl %eax, %ecx
-; AVX1-NEXT: shrl $6, %ecx
-; AVX1-NEXT: andl $1, %ecx
-; AVX1-NEXT: vpinsrw $6, %ecx, %xmm0, %xmm0
-; AVX1-NEXT: shrl $7, %eax
-; AVX1-NEXT: movzwl %ax, %eax
-; AVX1-NEXT: vpinsrw $7, %eax, %xmm0, %xmm1
-; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm0 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero
-; AVX1-NEXT: vpmovzxdq {{.*#+}} xmm2 = xmm0[0],zero,xmm0[1],zero
-; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,2,3,3]
-; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm2, %ymm0
-; AVX1-NEXT: vmovaps {{.*#+}} ymm2 = [1,1,1,1]
-; AVX1-NEXT: vandps %ymm2, %ymm0, %ymm0
-; AVX1-NEXT: vpunpckhwd {{.*#+}} xmm1 = xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
-; AVX1-NEXT: vpmovzxdq {{.*#+}} xmm3 = xmm1[0],zero,xmm1[1],zero
-; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[2,2,3,3]
+; AVX1-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; AVX1-NEXT: vmovq %rdi, %xmm0
+; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,1,0,1]
+; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm1
+; AVX1-NEXT: vandps {{.*}}(%rip), %ymm1, %ymm0
+; AVX1-NEXT: vpxor %xmm2, %xmm2, %xmm2
+; AVX1-NEXT: vpcmpeqq %xmm2, %xmm0, %xmm3
+; AVX1-NEXT: vpcmpeqd %xmm4, %xmm4, %xmm4
+; AVX1-NEXT: vpxor %xmm4, %xmm3, %xmm3
+; AVX1-NEXT: vpsrlq $63, %xmm3, %xmm3
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
+; AVX1-NEXT: vpcmpeqq %xmm2, %xmm0, %xmm0
+; AVX1-NEXT: vpxor %xmm4, %xmm0, %xmm0
+; AVX1-NEXT: vpsrlq $63, %xmm0, %xmm0
+; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm3, %ymm0
+; AVX1-NEXT: vandps {{.*}}(%rip), %ymm1, %ymm1
+; AVX1-NEXT: vpcmpeqq %xmm2, %xmm1, %xmm3
+; AVX1-NEXT: vpxor %xmm4, %xmm3, %xmm3
+; AVX1-NEXT: vpsrlq $63, %xmm3, %xmm3
+; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm1
+; AVX1-NEXT: vpcmpeqq %xmm2, %xmm1, %xmm1
+; AVX1-NEXT: vpxor %xmm4, %xmm1, %xmm1
+; AVX1-NEXT: vpsrlq $63, %xmm1, %xmm1
; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm3, %ymm1
-; AVX1-NEXT: vandps %ymm2, %ymm1, %ymm1
; AVX1-NEXT: retq
;
; AVX2-LABEL: ext_i8_8i64:
; AVX2: # BB#0:
-; AVX2-NEXT: movb %dil, -{{[0-9]+}}(%rsp)
-; AVX2-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax
-; AVX2-NEXT: movl %eax, %ecx
-; AVX2-NEXT: shrl %ecx
-; AVX2-NEXT: andl $1, %ecx
-; AVX2-NEXT: movl %eax, %edx
-; AVX2-NEXT: andl $1, %edx
-; AVX2-NEXT: vmovd %edx, %xmm0
-; AVX2-NEXT: vpinsrw $1, %ecx, %xmm0, %xmm0
-; AVX2-NEXT: movl %eax, %ecx
-; AVX2-NEXT: shrl $2, %ecx
-; AVX2-NEXT: andl $1, %ecx
-; AVX2-NEXT: vpinsrw $2, %ecx, %xmm0, %xmm0
-; AVX2-NEXT: movl %eax, %ecx
-; AVX2-NEXT: shrl $3, %ecx
-; AVX2-NEXT: andl $1, %ecx
-; AVX2-NEXT: vpinsrw $3, %ecx, %xmm0, %xmm0
-; AVX2-NEXT: movl %eax, %ecx
-; AVX2-NEXT: shrl $4, %ecx
-; AVX2-NEXT: andl $1, %ecx
-; AVX2-NEXT: vpinsrw $4, %ecx, %xmm0, %xmm0
-; AVX2-NEXT: movl %eax, %ecx
-; AVX2-NEXT: shrl $5, %ecx
-; AVX2-NEXT: andl $1, %ecx
-; AVX2-NEXT: vpinsrw $5, %ecx, %xmm0, %xmm0
-; AVX2-NEXT: movl %eax, %ecx
-; AVX2-NEXT: shrl $6, %ecx
-; AVX2-NEXT: andl $1, %ecx
-; AVX2-NEXT: vpinsrw $6, %ecx, %xmm0, %xmm0
-; AVX2-NEXT: shrl $7, %eax
-; AVX2-NEXT: movzwl %ax, %eax
-; AVX2-NEXT: vpinsrw $7, %eax, %xmm0, %xmm1
-; AVX2-NEXT: vpmovzxwd {{.*#+}} xmm0 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero
-; AVX2-NEXT: vpmovzxdq {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
-; AVX2-NEXT: vpbroadcastq {{.*#+}} ymm2 = [1,1,1,1]
-; AVX2-NEXT: vpand %ymm2, %ymm0, %ymm0
-; AVX2-NEXT: vpunpckhwd {{.*#+}} xmm1 = xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
-; AVX2-NEXT: vpmovzxdq {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero
+; AVX2-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; AVX2-NEXT: vmovq %rdi, %xmm0
+; AVX2-NEXT: vpbroadcastq %xmm0, %ymm1
+; AVX2-NEXT: vmovdqa {{.*#+}} ymm0 = [1,2,4,8]
+; AVX2-NEXT: vpand %ymm0, %ymm1, %ymm2
+; AVX2-NEXT: vpcmpeqq %ymm0, %ymm2, %ymm0
+; AVX2-NEXT: vpsrlq $63, %ymm0, %ymm0
+; AVX2-NEXT: vmovdqa {{.*#+}} ymm2 = [16,32,64,128]
; AVX2-NEXT: vpand %ymm2, %ymm1, %ymm1
+; AVX2-NEXT: vpcmpeqq %ymm2, %ymm1, %ymm1
+; AVX2-NEXT: vpsrlq $63, %ymm1, %ymm1
; AVX2-NEXT: retq
;
; AVX512-LABEL: ext_i8_8i64:
@@ -1585,253 +690,68 @@ define <8 x i64> @ext_i8_8i64(i8 %a0) {
define <16 x i32> @ext_i16_16i32(i16 %a0) {
; SSE2-SSSE3-LABEL: ext_i16_16i32:
; SSE2-SSSE3: # BB#0:
-; SSE2-SSSE3-NEXT: movw %di, -{{[0-9]+}}(%rsp)
-; SSE2-SSSE3-NEXT: movzwl -{{[0-9]+}}(%rsp), %eax
-; SSE2-SSSE3-NEXT: movl %eax, %ecx
-; SSE2-SSSE3-NEXT: shrl $7, %ecx
-; SSE2-SSSE3-NEXT: andl $1, %ecx
-; SSE2-SSSE3-NEXT: movd %ecx, %xmm0
-; SSE2-SSSE3-NEXT: movl %eax, %ecx
-; SSE2-SSSE3-NEXT: shrl $6, %ecx
-; SSE2-SSSE3-NEXT: andl $1, %ecx
-; SSE2-SSSE3-NEXT: movd %ecx, %xmm1
-; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
-; SSE2-SSSE3-NEXT: movl %eax, %ecx
-; SSE2-SSSE3-NEXT: shrl $5, %ecx
-; SSE2-SSSE3-NEXT: andl $1, %ecx
-; SSE2-SSSE3-NEXT: movd %ecx, %xmm0
-; SSE2-SSSE3-NEXT: movl %eax, %ecx
-; SSE2-SSSE3-NEXT: shrl $4, %ecx
-; SSE2-SSSE3-NEXT: andl $1, %ecx
-; SSE2-SSSE3-NEXT: movd %ecx, %xmm2
-; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3],xmm2[4],xmm0[4],xmm2[5],xmm0[5],xmm2[6],xmm0[6],xmm2[7],xmm0[7]
-; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3]
-; SSE2-SSSE3-NEXT: movl %eax, %ecx
-; SSE2-SSSE3-NEXT: shrl $3, %ecx
-; SSE2-SSSE3-NEXT: andl $1, %ecx
-; SSE2-SSSE3-NEXT: movd %ecx, %xmm0
-; SSE2-SSSE3-NEXT: movl %eax, %ecx
-; SSE2-SSSE3-NEXT: shrl $2, %ecx
-; SSE2-SSSE3-NEXT: andl $1, %ecx
-; SSE2-SSSE3-NEXT: movd %ecx, %xmm1
-; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
-; SSE2-SSSE3-NEXT: movl %eax, %ecx
-; SSE2-SSSE3-NEXT: andl $1, %ecx
-; SSE2-SSSE3-NEXT: movd %ecx, %xmm3
-; SSE2-SSSE3-NEXT: movl %eax, %ecx
-; SSE2-SSSE3-NEXT: shrl %ecx
-; SSE2-SSSE3-NEXT: andl $1, %ecx
-; SSE2-SSSE3-NEXT: movd %ecx, %xmm0
-; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm0[0],xmm3[1],xmm0[1],xmm3[2],xmm0[2],xmm3[3],xmm0[3],xmm3[4],xmm0[4],xmm3[5],xmm0[5],xmm3[6],xmm0[6],xmm3[7],xmm0[7]
-; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm3 = xmm3[0],xmm1[0],xmm3[1],xmm1[1],xmm3[2],xmm1[2],xmm3[3],xmm1[3]
-; SSE2-SSSE3-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1]
-; SSE2-SSSE3-NEXT: movl %eax, %ecx
-; SSE2-SSSE3-NEXT: shrl $11, %ecx
-; SSE2-SSSE3-NEXT: andl $1, %ecx
-; SSE2-SSSE3-NEXT: movd %ecx, %xmm0
-; SSE2-SSSE3-NEXT: movl %eax, %ecx
-; SSE2-SSSE3-NEXT: shrl $10, %ecx
-; SSE2-SSSE3-NEXT: andl $1, %ecx
-; SSE2-SSSE3-NEXT: movd %ecx, %xmm1
-; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
-; SSE2-SSSE3-NEXT: movl %eax, %ecx
-; SSE2-SSSE3-NEXT: shrl $9, %ecx
-; SSE2-SSSE3-NEXT: andl $1, %ecx
-; SSE2-SSSE3-NEXT: movd %ecx, %xmm2
-; SSE2-SSSE3-NEXT: movl %eax, %ecx
-; SSE2-SSSE3-NEXT: shrl $8, %ecx
-; SSE2-SSSE3-NEXT: andl $1, %ecx
-; SSE2-SSSE3-NEXT: movd %ecx, %xmm0
-; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3],xmm0[4],xmm2[4],xmm0[5],xmm2[5],xmm0[6],xmm2[6],xmm0[7],xmm2[7]
-; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
-; SSE2-SSSE3-NEXT: movl %eax, %ecx
-; SSE2-SSSE3-NEXT: shrl $13, %ecx
-; SSE2-SSSE3-NEXT: andl $1, %ecx
-; SSE2-SSSE3-NEXT: movd %ecx, %xmm1
-; SSE2-SSSE3-NEXT: movl %eax, %ecx
-; SSE2-SSSE3-NEXT: shrl $12, %ecx
-; SSE2-SSSE3-NEXT: andl $1, %ecx
-; SSE2-SSSE3-NEXT: movd %ecx, %xmm2
-; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3],xmm2[4],xmm1[4],xmm2[5],xmm1[5],xmm2[6],xmm1[6],xmm2[7],xmm1[7]
-; SSE2-SSSE3-NEXT: movl %eax, %ecx
-; SSE2-SSSE3-NEXT: shrl $14, %ecx
-; SSE2-SSSE3-NEXT: andl $1, %ecx
-; SSE2-SSSE3-NEXT: movd %ecx, %xmm1
-; SSE2-SSSE3-NEXT: shrl $15, %eax
-; SSE2-SSSE3-NEXT: movzwl %ax, %eax
-; SSE2-SSSE3-NEXT: movd %eax, %xmm4
-; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm4[0],xmm1[1],xmm4[1],xmm1[2],xmm4[2],xmm1[3],xmm4[3],xmm1[4],xmm4[4],xmm1[5],xmm4[5],xmm1[6],xmm4[6],xmm1[7],xmm4[7]
-; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3]
-; SSE2-SSSE3-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
-; SSE2-SSSE3-NEXT: punpcklqdq {{.*#+}} xmm3 = xmm3[0],xmm0[0]
+; SSE2-SSSE3-NEXT: movd %edi, %xmm0
+; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm3 = xmm0[0,0,0,0]
+; SSE2-SSSE3-NEXT: movdqa {{.*#+}} xmm1 = [1,2,4,8]
+; SSE2-SSSE3-NEXT: movdqa %xmm3, %xmm0
+; SSE2-SSSE3-NEXT: pand %xmm1, %xmm0
+; SSE2-SSSE3-NEXT: pcmpeqd %xmm1, %xmm0
+; SSE2-SSSE3-NEXT: psrld $31, %xmm0
+; SSE2-SSSE3-NEXT: movdqa {{.*#+}} xmm2 = [16,32,64,128]
; SSE2-SSSE3-NEXT: movdqa %xmm3, %xmm1
-; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
-; SSE2-SSSE3-NEXT: movdqa %xmm1, %xmm0
-; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3]
-; SSE2-SSSE3-NEXT: movdqa {{.*#+}} xmm4 = [1,1,1,1]
-; SSE2-SSSE3-NEXT: pand %xmm4, %xmm0
-; SSE2-SSSE3-NEXT: punpckhwd {{.*#+}} xmm1 = xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
-; SSE2-SSSE3-NEXT: pand %xmm4, %xmm1
-; SSE2-SSSE3-NEXT: punpckhbw {{.*#+}} xmm3 = xmm3[8],xmm0[8],xmm3[9],xmm0[9],xmm3[10],xmm0[10],xmm3[11],xmm0[11],xmm3[12],xmm0[12],xmm3[13],xmm0[13],xmm3[14],xmm0[14],xmm3[15],xmm0[15]
+; SSE2-SSSE3-NEXT: pand %xmm2, %xmm1
+; SSE2-SSSE3-NEXT: pcmpeqd %xmm2, %xmm1
+; SSE2-SSSE3-NEXT: psrld $31, %xmm1
+; SSE2-SSSE3-NEXT: movdqa {{.*#+}} xmm4 = [256,512,1024,2048]
; SSE2-SSSE3-NEXT: movdqa %xmm3, %xmm2
-; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3]
; SSE2-SSSE3-NEXT: pand %xmm4, %xmm2
-; SSE2-SSSE3-NEXT: punpckhwd {{.*#+}} xmm3 = xmm3[4],xmm0[4],xmm3[5],xmm0[5],xmm3[6],xmm0[6],xmm3[7],xmm0[7]
+; SSE2-SSSE3-NEXT: pcmpeqd %xmm4, %xmm2
+; SSE2-SSSE3-NEXT: psrld $31, %xmm2
+; SSE2-SSSE3-NEXT: movdqa {{.*#+}} xmm4 = [4096,8192,16384,32768]
; SSE2-SSSE3-NEXT: pand %xmm4, %xmm3
+; SSE2-SSSE3-NEXT: pcmpeqd %xmm4, %xmm3
+; SSE2-SSSE3-NEXT: psrld $31, %xmm3
; SSE2-SSSE3-NEXT: retq
;
; AVX1-LABEL: ext_i16_16i32:
; AVX1: # BB#0:
-; AVX1-NEXT: movw %di, -{{[0-9]+}}(%rsp)
-; AVX1-NEXT: movzwl -{{[0-9]+}}(%rsp), %eax
-; AVX1-NEXT: movl %eax, %ecx
-; AVX1-NEXT: shrl %ecx
-; AVX1-NEXT: andl $1, %ecx
-; AVX1-NEXT: movl %eax, %edx
-; AVX1-NEXT: andl $1, %edx
-; AVX1-NEXT: vmovd %edx, %xmm0
-; AVX1-NEXT: vpinsrb $1, %ecx, %xmm0, %xmm0
-; AVX1-NEXT: movl %eax, %ecx
-; AVX1-NEXT: shrl $2, %ecx
-; AVX1-NEXT: andl $1, %ecx
-; AVX1-NEXT: vpinsrb $2, %ecx, %xmm0, %xmm0
-; AVX1-NEXT: movl %eax, %ecx
-; AVX1-NEXT: shrl $3, %ecx
-; AVX1-NEXT: andl $1, %ecx
-; AVX1-NEXT: vpinsrb $3, %ecx, %xmm0, %xmm0
-; AVX1-NEXT: movl %eax, %ecx
-; AVX1-NEXT: shrl $4, %ecx
-; AVX1-NEXT: andl $1, %ecx
-; AVX1-NEXT: vpinsrb $4, %ecx, %xmm0, %xmm0
-; AVX1-NEXT: movl %eax, %ecx
-; AVX1-NEXT: shrl $5, %ecx
-; AVX1-NEXT: andl $1, %ecx
-; AVX1-NEXT: vpinsrb $5, %ecx, %xmm0, %xmm0
-; AVX1-NEXT: movl %eax, %ecx
-; AVX1-NEXT: shrl $6, %ecx
-; AVX1-NEXT: andl $1, %ecx
-; AVX1-NEXT: vpinsrb $6, %ecx, %xmm0, %xmm0
-; AVX1-NEXT: movl %eax, %ecx
-; AVX1-NEXT: shrl $7, %ecx
-; AVX1-NEXT: andl $1, %ecx
-; AVX1-NEXT: vpinsrb $7, %ecx, %xmm0, %xmm0
-; AVX1-NEXT: movl %eax, %ecx
-; AVX1-NEXT: shrl $8, %ecx
-; AVX1-NEXT: andl $1, %ecx
-; AVX1-NEXT: vpinsrb $8, %ecx, %xmm0, %xmm0
-; AVX1-NEXT: movl %eax, %ecx
-; AVX1-NEXT: shrl $9, %ecx
-; AVX1-NEXT: andl $1, %ecx
-; AVX1-NEXT: vpinsrb $9, %ecx, %xmm0, %xmm0
-; AVX1-NEXT: movl %eax, %ecx
-; AVX1-NEXT: shrl $10, %ecx
-; AVX1-NEXT: andl $1, %ecx
-; AVX1-NEXT: vpinsrb $10, %ecx, %xmm0, %xmm0
-; AVX1-NEXT: movl %eax, %ecx
-; AVX1-NEXT: shrl $11, %ecx
-; AVX1-NEXT: andl $1, %ecx
-; AVX1-NEXT: vpinsrb $11, %ecx, %xmm0, %xmm0
-; AVX1-NEXT: movl %eax, %ecx
-; AVX1-NEXT: shrl $12, %ecx
-; AVX1-NEXT: andl $1, %ecx
-; AVX1-NEXT: vpinsrb $12, %ecx, %xmm0, %xmm0
-; AVX1-NEXT: movl %eax, %ecx
-; AVX1-NEXT: shrl $13, %ecx
-; AVX1-NEXT: andl $1, %ecx
-; AVX1-NEXT: vpinsrb $13, %ecx, %xmm0, %xmm0
-; AVX1-NEXT: movl %eax, %ecx
-; AVX1-NEXT: shrl $14, %ecx
-; AVX1-NEXT: andl $1, %ecx
-; AVX1-NEXT: vpinsrb $14, %ecx, %xmm0, %xmm0
-; AVX1-NEXT: shrl $15, %eax
-; AVX1-NEXT: movzwl %ax, %eax
-; AVX1-NEXT: vpinsrb $15, %eax, %xmm0, %xmm1
-; AVX1-NEXT: vpmovzxbw {{.*#+}} xmm0 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
-; AVX1-NEXT: vpunpckhwd {{.*#+}} xmm2 = xmm0[4,4,5,5,6,6,7,7]
-; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
-; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
-; AVX1-NEXT: vmovaps {{.*#+}} ymm2 = [1,1,1,1,1,1,1,1]
-; AVX1-NEXT: vandps %ymm2, %ymm0, %ymm0
-; AVX1-NEXT: vpunpckhbw {{.*#+}} xmm1 = xmm1[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
-; AVX1-NEXT: vpunpckhwd {{.*#+}} xmm3 = xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
-; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero
-; AVX1-NEXT: vinsertf128 $1, %xmm3, %ymm1, %ymm1
-; AVX1-NEXT: vandps %ymm2, %ymm1, %ymm1
+; AVX1-NEXT: vmovd %edi, %xmm0
+; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,0,0,0]
+; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm1
+; AVX1-NEXT: vandps {{.*}}(%rip), %ymm1, %ymm0
+; AVX1-NEXT: vpxor %xmm2, %xmm2, %xmm2
+; AVX1-NEXT: vpcmpeqd %xmm2, %xmm0, %xmm3
+; AVX1-NEXT: vpcmpeqd %xmm4, %xmm4, %xmm4
+; AVX1-NEXT: vpxor %xmm4, %xmm3, %xmm3
+; AVX1-NEXT: vpsrld $31, %xmm3, %xmm3
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
+; AVX1-NEXT: vpcmpeqd %xmm2, %xmm0, %xmm0
+; AVX1-NEXT: vpxor %xmm4, %xmm0, %xmm0
+; AVX1-NEXT: vpsrld $31, %xmm0, %xmm0
+; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm3, %ymm0
+; AVX1-NEXT: vandps {{.*}}(%rip), %ymm1, %ymm1
+; AVX1-NEXT: vpcmpeqd %xmm2, %xmm1, %xmm3
+; AVX1-NEXT: vpxor %xmm4, %xmm3, %xmm3
+; AVX1-NEXT: vpsrld $31, %xmm3, %xmm3
+; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm1
+; AVX1-NEXT: vpcmpeqd %xmm2, %xmm1, %xmm1
+; AVX1-NEXT: vpxor %xmm4, %xmm1, %xmm1
+; AVX1-NEXT: vpsrld $31, %xmm1, %xmm1
+; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm3, %ymm1
; AVX1-NEXT: retq
;
; AVX2-LABEL: ext_i16_16i32:
; AVX2: # BB#0:
-; AVX2-NEXT: movw %di, -{{[0-9]+}}(%rsp)
-; AVX2-NEXT: movzwl -{{[0-9]+}}(%rsp), %eax
-; AVX2-NEXT: movl %eax, %ecx
-; AVX2-NEXT: shrl %ecx
-; AVX2-NEXT: andl $1, %ecx
-; AVX2-NEXT: movl %eax, %edx
-; AVX2-NEXT: andl $1, %edx
-; AVX2-NEXT: vmovd %edx, %xmm0
-; AVX2-NEXT: vpinsrb $1, %ecx, %xmm0, %xmm0
-; AVX2-NEXT: movl %eax, %ecx
-; AVX2-NEXT: shrl $2, %ecx
-; AVX2-NEXT: andl $1, %ecx
-; AVX2-NEXT: vpinsrb $2, %ecx, %xmm0, %xmm0
-; AVX2-NEXT: movl %eax, %ecx
-; AVX2-NEXT: shrl $3, %ecx
-; AVX2-NEXT: andl $1, %ecx
-; AVX2-NEXT: vpinsrb $3, %ecx, %xmm0, %xmm0
-; AVX2-NEXT: movl %eax, %ecx
-; AVX2-NEXT: shrl $4, %ecx
-; AVX2-NEXT: andl $1, %ecx
-; AVX2-NEXT: vpinsrb $4, %ecx, %xmm0, %xmm0
-; AVX2-NEXT: movl %eax, %ecx
-; AVX2-NEXT: shrl $5, %ecx
-; AVX2-NEXT: andl $1, %ecx
-; AVX2-NEXT: vpinsrb $5, %ecx, %xmm0, %xmm0
-; AVX2-NEXT: movl %eax, %ecx
-; AVX2-NEXT: shrl $6, %ecx
-; AVX2-NEXT: andl $1, %ecx
-; AVX2-NEXT: vpinsrb $6, %ecx, %xmm0, %xmm0
-; AVX2-NEXT: movl %eax, %ecx
-; AVX2-NEXT: shrl $7, %ecx
-; AVX2-NEXT: andl $1, %ecx
-; AVX2-NEXT: vpinsrb $7, %ecx, %xmm0, %xmm0
-; AVX2-NEXT: movl %eax, %ecx
-; AVX2-NEXT: shrl $8, %ecx
-; AVX2-NEXT: andl $1, %ecx
-; AVX2-NEXT: vpinsrb $8, %ecx, %xmm0, %xmm0
-; AVX2-NEXT: movl %eax, %ecx
-; AVX2-NEXT: shrl $9, %ecx
-; AVX2-NEXT: andl $1, %ecx
-; AVX2-NEXT: vpinsrb $9, %ecx, %xmm0, %xmm0
-; AVX2-NEXT: movl %eax, %ecx
-; AVX2-NEXT: shrl $10, %ecx
-; AVX2-NEXT: andl $1, %ecx
-; AVX2-NEXT: vpinsrb $10, %ecx, %xmm0, %xmm0
-; AVX2-NEXT: movl %eax, %ecx
-; AVX2-NEXT: shrl $11, %ecx
-; AVX2-NEXT: andl $1, %ecx
-; AVX2-NEXT: vpinsrb $11, %ecx, %xmm0, %xmm0
-; AVX2-NEXT: movl %eax, %ecx
-; AVX2-NEXT: shrl $12, %ecx
-; AVX2-NEXT: andl $1, %ecx
-; AVX2-NEXT: vpinsrb $12, %ecx, %xmm0, %xmm0
-; AVX2-NEXT: movl %eax, %ecx
-; AVX2-NEXT: shrl $13, %ecx
-; AVX2-NEXT: andl $1, %ecx
-; AVX2-NEXT: vpinsrb $13, %ecx, %xmm0, %xmm0
-; AVX2-NEXT: movl %eax, %ecx
-; AVX2-NEXT: shrl $14, %ecx
-; AVX2-NEXT: andl $1, %ecx
-; AVX2-NEXT: vpinsrb $14, %ecx, %xmm0, %xmm0
-; AVX2-NEXT: shrl $15, %eax
-; AVX2-NEXT: movzwl %ax, %eax
-; AVX2-NEXT: vpinsrb $15, %eax, %xmm0, %xmm1
-; AVX2-NEXT: vpmovzxbw {{.*#+}} xmm0 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
-; AVX2-NEXT: vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
-; AVX2-NEXT: vpbroadcastd {{.*#+}} ymm2 = [1,1,1,1,1,1,1,1]
-; AVX2-NEXT: vpand %ymm2, %ymm0, %ymm0
-; AVX2-NEXT: vpunpckhbw {{.*#+}} xmm1 = xmm1[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
-; AVX2-NEXT: vpmovzxwd {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
+; AVX2-NEXT: vmovd %edi, %xmm0
+; AVX2-NEXT: vpbroadcastd %xmm0, %ymm1
+; AVX2-NEXT: vmovdqa {{.*#+}} ymm0 = [1,2,4,8,16,32,64,128]
+; AVX2-NEXT: vpand %ymm0, %ymm1, %ymm2
+; AVX2-NEXT: vpcmpeqd %ymm0, %ymm2, %ymm0
+; AVX2-NEXT: vpsrld $31, %ymm0, %ymm0
+; AVX2-NEXT: vmovdqa {{.*#+}} ymm2 = [256,512,1024,2048,4096,8192,16384,32768]
; AVX2-NEXT: vpand %ymm2, %ymm1, %ymm1
+; AVX2-NEXT: vpcmpeqd %ymm2, %ymm1, %ymm1
+; AVX2-NEXT: vpsrld $31, %ymm1, %ymm1
; AVX2-NEXT: retq
;
; AVX512-LABEL: ext_i16_16i32:
@@ -1847,549 +767,75 @@ define <16 x i32> @ext_i16_16i32(i16 %a0
define <32 x i16> @ext_i32_32i16(i32 %a0) {
; SSE2-SSSE3-LABEL: ext_i32_32i16:
; SSE2-SSSE3: # BB#0:
-; SSE2-SSSE3-NEXT: movl %edi, %eax
-; SSE2-SSSE3-NEXT: shrl $16, %eax
-; SSE2-SSSE3-NEXT: movw %ax, -{{[0-9]+}}(%rsp)
-; SSE2-SSSE3-NEXT: movw %di, -{{[0-9]+}}(%rsp)
-; SSE2-SSSE3-NEXT: movzwl -{{[0-9]+}}(%rsp), %eax
-; SSE2-SSSE3-NEXT: movl %eax, %ecx
-; SSE2-SSSE3-NEXT: shrl $7, %ecx
-; SSE2-SSSE3-NEXT: andl $1, %ecx
-; SSE2-SSSE3-NEXT: movd %ecx, %xmm0
-; SSE2-SSSE3-NEXT: movl %eax, %ecx
-; SSE2-SSSE3-NEXT: shrl $6, %ecx
-; SSE2-SSSE3-NEXT: andl $1, %ecx
-; SSE2-SSSE3-NEXT: movd %ecx, %xmm1
-; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
-; SSE2-SSSE3-NEXT: movl %eax, %ecx
-; SSE2-SSSE3-NEXT: shrl $5, %ecx
-; SSE2-SSSE3-NEXT: andl $1, %ecx
-; SSE2-SSSE3-NEXT: movd %ecx, %xmm0
-; SSE2-SSSE3-NEXT: movl %eax, %ecx
-; SSE2-SSSE3-NEXT: shrl $4, %ecx
-; SSE2-SSSE3-NEXT: andl $1, %ecx
-; SSE2-SSSE3-NEXT: movd %ecx, %xmm2
-; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3],xmm2[4],xmm0[4],xmm2[5],xmm0[5],xmm2[6],xmm0[6],xmm2[7],xmm0[7]
-; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3]
-; SSE2-SSSE3-NEXT: movl %eax, %ecx
-; SSE2-SSSE3-NEXT: shrl $3, %ecx
-; SSE2-SSSE3-NEXT: andl $1, %ecx
-; SSE2-SSSE3-NEXT: movd %ecx, %xmm0
-; SSE2-SSSE3-NEXT: movl %eax, %ecx
-; SSE2-SSSE3-NEXT: shrl $2, %ecx
-; SSE2-SSSE3-NEXT: andl $1, %ecx
-; SSE2-SSSE3-NEXT: movd %ecx, %xmm1
-; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
-; SSE2-SSSE3-NEXT: movl %eax, %ecx
-; SSE2-SSSE3-NEXT: andl $1, %ecx
-; SSE2-SSSE3-NEXT: movd %ecx, %xmm3
-; SSE2-SSSE3-NEXT: movl %eax, %ecx
-; SSE2-SSSE3-NEXT: shrl %ecx
-; SSE2-SSSE3-NEXT: andl $1, %ecx
-; SSE2-SSSE3-NEXT: movd %ecx, %xmm0
-; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm0[0],xmm3[1],xmm0[1],xmm3[2],xmm0[2],xmm3[3],xmm0[3],xmm3[4],xmm0[4],xmm3[5],xmm0[5],xmm3[6],xmm0[6],xmm3[7],xmm0[7]
-; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm3 = xmm3[0],xmm1[0],xmm3[1],xmm1[1],xmm3[2],xmm1[2],xmm3[3],xmm1[3]
-; SSE2-SSSE3-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1]
-; SSE2-SSSE3-NEXT: movl %eax, %ecx
-; SSE2-SSSE3-NEXT: shrl $11, %ecx
-; SSE2-SSSE3-NEXT: andl $1, %ecx
-; SSE2-SSSE3-NEXT: movd %ecx, %xmm0
-; SSE2-SSSE3-NEXT: movl %eax, %ecx
-; SSE2-SSSE3-NEXT: shrl $10, %ecx
-; SSE2-SSSE3-NEXT: andl $1, %ecx
-; SSE2-SSSE3-NEXT: movd %ecx, %xmm1
-; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
-; SSE2-SSSE3-NEXT: movl %eax, %ecx
-; SSE2-SSSE3-NEXT: shrl $9, %ecx
-; SSE2-SSSE3-NEXT: andl $1, %ecx
-; SSE2-SSSE3-NEXT: movd %ecx, %xmm2
-; SSE2-SSSE3-NEXT: movl %eax, %ecx
-; SSE2-SSSE3-NEXT: shrl $8, %ecx
-; SSE2-SSSE3-NEXT: andl $1, %ecx
-; SSE2-SSSE3-NEXT: movd %ecx, %xmm0
-; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3],xmm0[4],xmm2[4],xmm0[5],xmm2[5],xmm0[6],xmm2[6],xmm0[7],xmm2[7]
-; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
-; SSE2-SSSE3-NEXT: movl %eax, %ecx
-; SSE2-SSSE3-NEXT: shrl $13, %ecx
-; SSE2-SSSE3-NEXT: andl $1, %ecx
-; SSE2-SSSE3-NEXT: movd %ecx, %xmm1
-; SSE2-SSSE3-NEXT: movl %eax, %ecx
-; SSE2-SSSE3-NEXT: shrl $12, %ecx
-; SSE2-SSSE3-NEXT: andl $1, %ecx
-; SSE2-SSSE3-NEXT: movd %ecx, %xmm2
-; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3],xmm2[4],xmm1[4],xmm2[5],xmm1[5],xmm2[6],xmm1[6],xmm2[7],xmm1[7]
-; SSE2-SSSE3-NEXT: movl %eax, %ecx
-; SSE2-SSSE3-NEXT: shrl $14, %ecx
-; SSE2-SSSE3-NEXT: andl $1, %ecx
-; SSE2-SSSE3-NEXT: movd %ecx, %xmm1
-; SSE2-SSSE3-NEXT: shrl $15, %eax
-; SSE2-SSSE3-NEXT: movzwl %ax, %eax
-; SSE2-SSSE3-NEXT: movd %eax, %xmm4
-; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm4[0],xmm1[1],xmm4[1],xmm1[2],xmm4[2],xmm1[3],xmm4[3],xmm1[4],xmm4[4],xmm1[5],xmm4[5],xmm1[6],xmm4[6],xmm1[7],xmm4[7]
-; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3]
-; SSE2-SSSE3-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
-; SSE2-SSSE3-NEXT: punpcklqdq {{.*#+}} xmm3 = xmm3[0],xmm0[0]
-; SSE2-SSSE3-NEXT: movzwl -{{[0-9]+}}(%rsp), %eax
-; SSE2-SSSE3-NEXT: movl %eax, %ecx
-; SSE2-SSSE3-NEXT: shrl $7, %ecx
-; SSE2-SSSE3-NEXT: andl $1, %ecx
-; SSE2-SSSE3-NEXT: movd %ecx, %xmm0
-; SSE2-SSSE3-NEXT: movl %eax, %ecx
-; SSE2-SSSE3-NEXT: shrl $6, %ecx
-; SSE2-SSSE3-NEXT: andl $1, %ecx
-; SSE2-SSSE3-NEXT: movd %ecx, %xmm1
-; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
-; SSE2-SSSE3-NEXT: movl %eax, %ecx
-; SSE2-SSSE3-NEXT: shrl $5, %ecx
-; SSE2-SSSE3-NEXT: andl $1, %ecx
-; SSE2-SSSE3-NEXT: movd %ecx, %xmm0
-; SSE2-SSSE3-NEXT: movl %eax, %ecx
-; SSE2-SSSE3-NEXT: shrl $4, %ecx
-; SSE2-SSSE3-NEXT: andl $1, %ecx
-; SSE2-SSSE3-NEXT: movd %ecx, %xmm2
-; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3],xmm2[4],xmm0[4],xmm2[5],xmm0[5],xmm2[6],xmm0[6],xmm2[7],xmm0[7]
-; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3]
-; SSE2-SSSE3-NEXT: movl %eax, %ecx
-; SSE2-SSSE3-NEXT: shrl $3, %ecx
-; SSE2-SSSE3-NEXT: andl $1, %ecx
-; SSE2-SSSE3-NEXT: movd %ecx, %xmm0
-; SSE2-SSSE3-NEXT: movl %eax, %ecx
-; SSE2-SSSE3-NEXT: shrl $2, %ecx
-; SSE2-SSSE3-NEXT: andl $1, %ecx
-; SSE2-SSSE3-NEXT: movd %ecx, %xmm4
-; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm4 = xmm4[0],xmm0[0],xmm4[1],xmm0[1],xmm4[2],xmm0[2],xmm4[3],xmm0[3],xmm4[4],xmm0[4],xmm4[5],xmm0[5],xmm4[6],xmm0[6],xmm4[7],xmm0[7]
-; SSE2-SSSE3-NEXT: movl %eax, %ecx
-; SSE2-SSSE3-NEXT: andl $1, %ecx
-; SSE2-SSSE3-NEXT: movd %ecx, %xmm1
-; SSE2-SSSE3-NEXT: movl %eax, %ecx
-; SSE2-SSSE3-NEXT: shrl %ecx
-; SSE2-SSSE3-NEXT: andl $1, %ecx
-; SSE2-SSSE3-NEXT: movd %ecx, %xmm0
-; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
-; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm4[0],xmm1[1],xmm4[1],xmm1[2],xmm4[2],xmm1[3],xmm4[3]
-; SSE2-SSSE3-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1]
-; SSE2-SSSE3-NEXT: movl %eax, %ecx
-; SSE2-SSSE3-NEXT: shrl $11, %ecx
-; SSE2-SSSE3-NEXT: andl $1, %ecx
-; SSE2-SSSE3-NEXT: movd %ecx, %xmm0
-; SSE2-SSSE3-NEXT: movl %eax, %ecx
-; SSE2-SSSE3-NEXT: shrl $10, %ecx
-; SSE2-SSSE3-NEXT: andl $1, %ecx
-; SSE2-SSSE3-NEXT: movd %ecx, %xmm2
-; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3],xmm2[4],xmm0[4],xmm2[5],xmm0[5],xmm2[6],xmm0[6],xmm2[7],xmm0[7]
-; SSE2-SSSE3-NEXT: movl %eax, %ecx
-; SSE2-SSSE3-NEXT: shrl $9, %ecx
-; SSE2-SSSE3-NEXT: andl $1, %ecx
-; SSE2-SSSE3-NEXT: movd %ecx, %xmm4
-; SSE2-SSSE3-NEXT: movl %eax, %ecx
-; SSE2-SSSE3-NEXT: shrl $8, %ecx
-; SSE2-SSSE3-NEXT: andl $1, %ecx
-; SSE2-SSSE3-NEXT: movd %ecx, %xmm0
-; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm4[0],xmm0[1],xmm4[1],xmm0[2],xmm4[2],xmm0[3],xmm4[3],xmm0[4],xmm4[4],xmm0[5],xmm4[5],xmm0[6],xmm4[6],xmm0[7],xmm4[7]
-; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3]
-; SSE2-SSSE3-NEXT: movl %eax, %ecx
-; SSE2-SSSE3-NEXT: shrl $13, %ecx
-; SSE2-SSSE3-NEXT: andl $1, %ecx
-; SSE2-SSSE3-NEXT: movd %ecx, %xmm2
-; SSE2-SSSE3-NEXT: movl %eax, %ecx
-; SSE2-SSSE3-NEXT: shrl $12, %ecx
-; SSE2-SSSE3-NEXT: andl $1, %ecx
-; SSE2-SSSE3-NEXT: movd %ecx, %xmm4
-; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm4 = xmm4[0],xmm2[0],xmm4[1],xmm2[1],xmm4[2],xmm2[2],xmm4[3],xmm2[3],xmm4[4],xmm2[4],xmm4[5],xmm2[5],xmm4[6],xmm2[6],xmm4[7],xmm2[7]
-; SSE2-SSSE3-NEXT: movl %eax, %ecx
-; SSE2-SSSE3-NEXT: shrl $14, %ecx
-; SSE2-SSSE3-NEXT: andl $1, %ecx
-; SSE2-SSSE3-NEXT: movd %ecx, %xmm2
-; SSE2-SSSE3-NEXT: shrl $15, %eax
-; SSE2-SSSE3-NEXT: movzwl %ax, %eax
-; SSE2-SSSE3-NEXT: movd %eax, %xmm5
-; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm5[0],xmm2[1],xmm5[1],xmm2[2],xmm5[2],xmm2[3],xmm5[3],xmm2[4],xmm5[4],xmm2[5],xmm5[5],xmm2[6],xmm5[6],xmm2[7],xmm5[7]
-; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm4 = xmm4[0],xmm2[0],xmm4[1],xmm2[1],xmm4[2],xmm2[2],xmm4[3],xmm2[3]
-; SSE2-SSSE3-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm4[0],xmm0[1],xmm4[1]
-; SSE2-SSSE3-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm0[0]
+; SSE2-SSSE3-NEXT: movd %edi, %xmm2
+; SSE2-SSSE3-NEXT: pshuflw {{.*#+}} xmm0 = xmm2[0,0,0,0,4,5,6,7]
+; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,0,1,1]
+; SSE2-SSSE3-NEXT: movdqa {{.*#+}} xmm4 = [1,2,4,8,16,32,64,128]
; SSE2-SSSE3-NEXT: movdqa %xmm1, %xmm0
-; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
-; SSE2-SSSE3-NEXT: movdqa {{.*#+}} xmm4 = [1,1,1,1,1,1,1,1]
; SSE2-SSSE3-NEXT: pand %xmm4, %xmm0
-; SSE2-SSSE3-NEXT: punpckhbw {{.*#+}} xmm1 = xmm1[8],xmm0[8],xmm1[9],xmm0[9],xmm1[10],xmm0[10],xmm1[11],xmm0[11],xmm1[12],xmm0[12],xmm1[13],xmm0[13],xmm1[14],xmm0[14],xmm1[15],xmm0[15]
-; SSE2-SSSE3-NEXT: pand %xmm4, %xmm1
+; SSE2-SSSE3-NEXT: pcmpeqw %xmm4, %xmm0
+; SSE2-SSSE3-NEXT: psrlw $15, %xmm0
+; SSE2-SSSE3-NEXT: movdqa {{.*#+}} xmm5 = [256,512,1024,2048,4096,8192,16384,32768]
+; SSE2-SSSE3-NEXT: pand %xmm5, %xmm1
+; SSE2-SSSE3-NEXT: pcmpeqw %xmm5, %xmm1
+; SSE2-SSSE3-NEXT: psrlw $15, %xmm1
+; SSE2-SSSE3-NEXT: pshuflw {{.*#+}} xmm2 = xmm2[1,1,1,1,4,5,6,7]
+; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm3 = xmm2[0,0,1,1]
; SSE2-SSSE3-NEXT: movdqa %xmm3, %xmm2
-; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3],xmm2[4],xmm0[4],xmm2[5],xmm0[5],xmm2[6],xmm0[6],xmm2[7],xmm0[7]
; SSE2-SSSE3-NEXT: pand %xmm4, %xmm2
-; SSE2-SSSE3-NEXT: punpckhbw {{.*#+}} xmm3 = xmm3[8],xmm0[8],xmm3[9],xmm0[9],xmm3[10],xmm0[10],xmm3[11],xmm0[11],xmm3[12],xmm0[12],xmm3[13],xmm0[13],xmm3[14],xmm0[14],xmm3[15],xmm0[15]
-; SSE2-SSSE3-NEXT: pand %xmm4, %xmm3
+; SSE2-SSSE3-NEXT: pcmpeqw %xmm4, %xmm2
+; SSE2-SSSE3-NEXT: psrlw $15, %xmm2
+; SSE2-SSSE3-NEXT: pand %xmm5, %xmm3
+; SSE2-SSSE3-NEXT: pcmpeqw %xmm5, %xmm3
+; SSE2-SSSE3-NEXT: psrlw $15, %xmm3
; SSE2-SSSE3-NEXT: retq
;
; AVX1-LABEL: ext_i32_32i16:
; AVX1: # BB#0:
-; AVX1-NEXT: pushq %rbp
-; AVX1-NEXT: .Lcfi3:
-; AVX1-NEXT: .cfi_def_cfa_offset 16
-; AVX1-NEXT: .Lcfi4:
-; AVX1-NEXT: .cfi_offset %rbp, -16
-; AVX1-NEXT: movq %rsp, %rbp
-; AVX1-NEXT: .Lcfi5:
-; AVX1-NEXT: .cfi_def_cfa_register %rbp
-; AVX1-NEXT: pushq %r15
-; AVX1-NEXT: pushq %r14
-; AVX1-NEXT: pushq %r13
-; AVX1-NEXT: pushq %r12
-; AVX1-NEXT: pushq %rbx
-; AVX1-NEXT: andq $-32, %rsp
-; AVX1-NEXT: subq $128, %rsp
-; AVX1-NEXT: .Lcfi6:
-; AVX1-NEXT: .cfi_offset %rbx, -56
-; AVX1-NEXT: .Lcfi7:
-; AVX1-NEXT: .cfi_offset %r12, -48
-; AVX1-NEXT: .Lcfi8:
-; AVX1-NEXT: .cfi_offset %r13, -40
-; AVX1-NEXT: .Lcfi9:
-; AVX1-NEXT: .cfi_offset %r14, -32
-; AVX1-NEXT: .Lcfi10:
-; AVX1-NEXT: .cfi_offset %r15, -24
-; AVX1-NEXT: movl %edi, {{[0-9]+}}(%rsp) # 4-byte Spill
-; AVX1-NEXT: movl %edi, {{[0-9]+}}(%rsp) # 4-byte Spill
-; AVX1-NEXT: movl %edi, {{[0-9]+}}(%rsp) # 4-byte Spill
-; AVX1-NEXT: movl %edi, {{[0-9]+}}(%rsp) # 4-byte Spill
-; AVX1-NEXT: movl %edi, {{[0-9]+}}(%rsp) # 4-byte Spill
-; AVX1-NEXT: movl %edi, {{[0-9]+}}(%rsp) # 4-byte Spill
-; AVX1-NEXT: movl %edi, {{[0-9]+}}(%rsp) # 4-byte Spill
-; AVX1-NEXT: movl %edi, {{[0-9]+}}(%rsp) # 4-byte Spill
-; AVX1-NEXT: movl %edi, {{[0-9]+}}(%rsp) # 4-byte Spill
-; AVX1-NEXT: movl %edi, {{[0-9]+}}(%rsp) # 4-byte Spill
-; AVX1-NEXT: movl %edi, {{[0-9]+}}(%rsp) # 4-byte Spill
-; AVX1-NEXT: movl %edi, {{[0-9]+}}(%rsp) # 4-byte Spill
-; AVX1-NEXT: movl %edi, {{[0-9]+}}(%rsp) # 4-byte Spill
-; AVX1-NEXT: movl %edi, {{[0-9]+}}(%rsp) # 4-byte Spill
-; AVX1-NEXT: movl %edi, {{[0-9]+}}(%rsp) # 4-byte Spill
-; AVX1-NEXT: movl %edi, {{[0-9]+}}(%rsp) # 4-byte Spill
-; AVX1-NEXT: movl %edi, {{[0-9]+}}(%rsp) # 4-byte Spill
-; AVX1-NEXT: movl %edi, {{[0-9]+}}(%rsp) # 4-byte Spill
-; AVX1-NEXT: movl %edi, %r13d
-; AVX1-NEXT: movl %edi, %r12d
-; AVX1-NEXT: movl %edi, %r15d
-; AVX1-NEXT: movl %edi, %r14d
-; AVX1-NEXT: movl %edi, %ebx
-; AVX1-NEXT: movl %edi, %r11d
-; AVX1-NEXT: movl %edi, %r10d
-; AVX1-NEXT: movl %edi, %r9d
-; AVX1-NEXT: movl %edi, %r8d
-; AVX1-NEXT: movl %edi, %esi
-; AVX1-NEXT: movl %edi, %edx
-; AVX1-NEXT: movl %edi, %ecx
-; AVX1-NEXT: movl %edi, %eax
-; AVX1-NEXT: andl $1, %edi
-; AVX1-NEXT: vmovd %edi, %xmm0
-; AVX1-NEXT: shrl %eax
-; AVX1-NEXT: andl $1, %eax
-; AVX1-NEXT: vpinsrb $1, %eax, %xmm0, %xmm0
-; AVX1-NEXT: shrl $2, %ecx
-; AVX1-NEXT: andl $1, %ecx
-; AVX1-NEXT: vpinsrb $2, %ecx, %xmm0, %xmm0
-; AVX1-NEXT: shrl $3, %edx
-; AVX1-NEXT: andl $1, %edx
-; AVX1-NEXT: vpinsrb $3, %edx, %xmm0, %xmm0
-; AVX1-NEXT: shrl $4, %esi
-; AVX1-NEXT: andl $1, %esi
-; AVX1-NEXT: vpinsrb $4, %esi, %xmm0, %xmm0
-; AVX1-NEXT: shrl $5, %r8d
-; AVX1-NEXT: andl $1, %r8d
-; AVX1-NEXT: vpinsrb $5, %r8d, %xmm0, %xmm0
-; AVX1-NEXT: shrl $6, %r9d
-; AVX1-NEXT: andl $1, %r9d
-; AVX1-NEXT: vpinsrb $6, %r9d, %xmm0, %xmm0
-; AVX1-NEXT: shrl $7, %r10d
-; AVX1-NEXT: andl $1, %r10d
-; AVX1-NEXT: vpinsrb $7, %r10d, %xmm0, %xmm0
-; AVX1-NEXT: shrl $8, %r11d
-; AVX1-NEXT: andl $1, %r11d
-; AVX1-NEXT: vpinsrb $8, %r11d, %xmm0, %xmm0
-; AVX1-NEXT: shrl $9, %ebx
-; AVX1-NEXT: andl $1, %ebx
-; AVX1-NEXT: vpinsrb $9, %ebx, %xmm0, %xmm0
-; AVX1-NEXT: shrl $10, %r14d
-; AVX1-NEXT: andl $1, %r14d
-; AVX1-NEXT: vpinsrb $10, %r14d, %xmm0, %xmm0
-; AVX1-NEXT: shrl $11, %r15d
-; AVX1-NEXT: andl $1, %r15d
-; AVX1-NEXT: vpinsrb $11, %r15d, %xmm0, %xmm0
-; AVX1-NEXT: shrl $12, %r12d
-; AVX1-NEXT: andl $1, %r12d
-; AVX1-NEXT: vpinsrb $12, %r12d, %xmm0, %xmm0
-; AVX1-NEXT: shrl $13, %r13d
-; AVX1-NEXT: andl $1, %r13d
-; AVX1-NEXT: vpinsrb $13, %r13d, %xmm0, %xmm0
-; AVX1-NEXT: movl {{[0-9]+}}(%rsp), %eax # 4-byte Reload
-; AVX1-NEXT: shrl $14, %eax
-; AVX1-NEXT: andl $1, %eax
-; AVX1-NEXT: vpinsrb $14, %eax, %xmm0, %xmm0
-; AVX1-NEXT: movl {{[0-9]+}}(%rsp), %eax # 4-byte Reload
-; AVX1-NEXT: shrl $15, %eax
-; AVX1-NEXT: andl $1, %eax
-; AVX1-NEXT: vpinsrb $15, %eax, %xmm0, %xmm0
-; AVX1-NEXT: movl {{[0-9]+}}(%rsp), %eax # 4-byte Reload
-; AVX1-NEXT: shrl $16, %eax
-; AVX1-NEXT: andl $1, %eax
-; AVX1-NEXT: vmovd %eax, %xmm1
-; AVX1-NEXT: movl {{[0-9]+}}(%rsp), %eax # 4-byte Reload
-; AVX1-NEXT: shrl $17, %eax
-; AVX1-NEXT: andl $1, %eax
-; AVX1-NEXT: vpinsrb $1, %eax, %xmm1, %xmm1
-; AVX1-NEXT: movl {{[0-9]+}}(%rsp), %eax # 4-byte Reload
-; AVX1-NEXT: shrl $18, %eax
-; AVX1-NEXT: andl $1, %eax
-; AVX1-NEXT: vpinsrb $2, %eax, %xmm1, %xmm1
-; AVX1-NEXT: movl {{[0-9]+}}(%rsp), %eax # 4-byte Reload
-; AVX1-NEXT: shrl $19, %eax
-; AVX1-NEXT: andl $1, %eax
-; AVX1-NEXT: vpinsrb $3, %eax, %xmm1, %xmm1
-; AVX1-NEXT: movl {{[0-9]+}}(%rsp), %eax # 4-byte Reload
-; AVX1-NEXT: shrl $20, %eax
-; AVX1-NEXT: andl $1, %eax
-; AVX1-NEXT: vpinsrb $4, %eax, %xmm1, %xmm1
-; AVX1-NEXT: movl {{[0-9]+}}(%rsp), %eax # 4-byte Reload
-; AVX1-NEXT: shrl $21, %eax
-; AVX1-NEXT: andl $1, %eax
-; AVX1-NEXT: vpinsrb $5, %eax, %xmm1, %xmm1
-; AVX1-NEXT: movl {{[0-9]+}}(%rsp), %eax # 4-byte Reload
-; AVX1-NEXT: shrl $22, %eax
-; AVX1-NEXT: andl $1, %eax
-; AVX1-NEXT: vpinsrb $6, %eax, %xmm1, %xmm1
-; AVX1-NEXT: movl {{[0-9]+}}(%rsp), %eax # 4-byte Reload
-; AVX1-NEXT: shrl $23, %eax
-; AVX1-NEXT: andl $1, %eax
-; AVX1-NEXT: vpinsrb $7, %eax, %xmm1, %xmm1
-; AVX1-NEXT: movl {{[0-9]+}}(%rsp), %eax # 4-byte Reload
-; AVX1-NEXT: shrl $24, %eax
-; AVX1-NEXT: andl $1, %eax
-; AVX1-NEXT: vpinsrb $8, %eax, %xmm1, %xmm1
-; AVX1-NEXT: movl {{[0-9]+}}(%rsp), %eax # 4-byte Reload
-; AVX1-NEXT: shrl $25, %eax
-; AVX1-NEXT: andl $1, %eax
-; AVX1-NEXT: vpinsrb $9, %eax, %xmm1, %xmm1
-; AVX1-NEXT: movl {{[0-9]+}}(%rsp), %eax # 4-byte Reload
-; AVX1-NEXT: shrl $26, %eax
-; AVX1-NEXT: andl $1, %eax
-; AVX1-NEXT: vpinsrb $10, %eax, %xmm1, %xmm1
-; AVX1-NEXT: movl {{[0-9]+}}(%rsp), %eax # 4-byte Reload
-; AVX1-NEXT: shrl $27, %eax
-; AVX1-NEXT: andl $1, %eax
-; AVX1-NEXT: vpinsrb $11, %eax, %xmm1, %xmm1
-; AVX1-NEXT: movl {{[0-9]+}}(%rsp), %eax # 4-byte Reload
-; AVX1-NEXT: shrl $28, %eax
-; AVX1-NEXT: andl $1, %eax
-; AVX1-NEXT: vpinsrb $12, %eax, %xmm1, %xmm1
-; AVX1-NEXT: movl {{[0-9]+}}(%rsp), %eax # 4-byte Reload
-; AVX1-NEXT: shrl $29, %eax
-; AVX1-NEXT: andl $1, %eax
-; AVX1-NEXT: vpinsrb $13, %eax, %xmm1, %xmm1
-; AVX1-NEXT: movl {{[0-9]+}}(%rsp), %eax # 4-byte Reload
-; AVX1-NEXT: shrl $30, %eax
-; AVX1-NEXT: andl $1, %eax
-; AVX1-NEXT: vpinsrb $14, %eax, %xmm1, %xmm1
-; AVX1-NEXT: movl {{[0-9]+}}(%rsp), %eax # 4-byte Reload
-; AVX1-NEXT: shrl $31, %eax
-; AVX1-NEXT: vpinsrb $15, %eax, %xmm1, %xmm1
-; AVX1-NEXT: vpmovzxbw {{.*#+}} xmm2 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
-; AVX1-NEXT: vpunpckhbw {{.*#+}} xmm0 = xmm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
-; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm2, %ymm0
-; AVX1-NEXT: vmovaps {{.*#+}} ymm2 = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
+; AVX1-NEXT: vmovd %edi, %xmm1
+; AVX1-NEXT: vpshuflw {{.*#+}} xmm0 = xmm1[0,0,0,0,4,5,6,7]
+; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,0,1,1]
+; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
+; AVX1-NEXT: vmovaps {{.*#+}} ymm2 = [1,2,4,8,16,32,64,128,256,512,1024,2048,4096,8192,16384,32768]
; AVX1-NEXT: vandps %ymm2, %ymm0, %ymm0
-; AVX1-NEXT: vpmovzxbw {{.*#+}} xmm3 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
-; AVX1-NEXT: vpunpckhbw {{.*#+}} xmm1 = xmm1[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
-; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm3, %ymm1
+; AVX1-NEXT: vpxor %xmm3, %xmm3, %xmm3
+; AVX1-NEXT: vpcmpeqw %xmm3, %xmm0, %xmm4
+; AVX1-NEXT: vpcmpeqd %xmm5, %xmm5, %xmm5
+; AVX1-NEXT: vpxor %xmm5, %xmm4, %xmm4
+; AVX1-NEXT: vpsrlw $15, %xmm4, %xmm4
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
+; AVX1-NEXT: vpcmpeqw %xmm3, %xmm0, %xmm0
+; AVX1-NEXT: vpxor %xmm5, %xmm0, %xmm0
+; AVX1-NEXT: vpsrlw $15, %xmm0, %xmm0
+; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm4, %ymm0
+; AVX1-NEXT: vpshuflw {{.*#+}} xmm1 = xmm1[1,1,1,1,4,5,6,7]
+; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,0,1,1]
+; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm1, %ymm1
; AVX1-NEXT: vandps %ymm2, %ymm1, %ymm1
-; AVX1-NEXT: leaq -40(%rbp), %rsp
-; AVX1-NEXT: popq %rbx
-; AVX1-NEXT: popq %r12
-; AVX1-NEXT: popq %r13
-; AVX1-NEXT: popq %r14
-; AVX1-NEXT: popq %r15
-; AVX1-NEXT: popq %rbp
+; AVX1-NEXT: vpcmpeqw %xmm3, %xmm1, %xmm2
+; AVX1-NEXT: vpxor %xmm5, %xmm2, %xmm2
+; AVX1-NEXT: vpsrlw $15, %xmm2, %xmm2
+; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm1
+; AVX1-NEXT: vpcmpeqw %xmm3, %xmm1, %xmm1
+; AVX1-NEXT: vpxor %xmm5, %xmm1, %xmm1
+; AVX1-NEXT: vpsrlw $15, %xmm1, %xmm1
+; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm2, %ymm1
; AVX1-NEXT: retq
;
; AVX2-LABEL: ext_i32_32i16:
; AVX2: # BB#0:
-; AVX2-NEXT: pushq %rbp
-; AVX2-NEXT: .Lcfi3:
-; AVX2-NEXT: .cfi_def_cfa_offset 16
-; AVX2-NEXT: .Lcfi4:
-; AVX2-NEXT: .cfi_offset %rbp, -16
-; AVX2-NEXT: movq %rsp, %rbp
-; AVX2-NEXT: .Lcfi5:
-; AVX2-NEXT: .cfi_def_cfa_register %rbp
-; AVX2-NEXT: pushq %r15
-; AVX2-NEXT: pushq %r14
-; AVX2-NEXT: pushq %r13
-; AVX2-NEXT: pushq %r12
-; AVX2-NEXT: pushq %rbx
-; AVX2-NEXT: andq $-32, %rsp
-; AVX2-NEXT: subq $128, %rsp
-; AVX2-NEXT: .Lcfi6:
-; AVX2-NEXT: .cfi_offset %rbx, -56
-; AVX2-NEXT: .Lcfi7:
-; AVX2-NEXT: .cfi_offset %r12, -48
-; AVX2-NEXT: .Lcfi8:
-; AVX2-NEXT: .cfi_offset %r13, -40
-; AVX2-NEXT: .Lcfi9:
-; AVX2-NEXT: .cfi_offset %r14, -32
-; AVX2-NEXT: .Lcfi10:
-; AVX2-NEXT: .cfi_offset %r15, -24
-; AVX2-NEXT: movl %edi, {{[0-9]+}}(%rsp) # 4-byte Spill
-; AVX2-NEXT: movl %edi, {{[0-9]+}}(%rsp) # 4-byte Spill
-; AVX2-NEXT: movl %edi, {{[0-9]+}}(%rsp) # 4-byte Spill
-; AVX2-NEXT: movl %edi, {{[0-9]+}}(%rsp) # 4-byte Spill
-; AVX2-NEXT: movl %edi, {{[0-9]+}}(%rsp) # 4-byte Spill
-; AVX2-NEXT: movl %edi, {{[0-9]+}}(%rsp) # 4-byte Spill
-; AVX2-NEXT: movl %edi, {{[0-9]+}}(%rsp) # 4-byte Spill
-; AVX2-NEXT: movl %edi, {{[0-9]+}}(%rsp) # 4-byte Spill
-; AVX2-NEXT: movl %edi, {{[0-9]+}}(%rsp) # 4-byte Spill
-; AVX2-NEXT: movl %edi, {{[0-9]+}}(%rsp) # 4-byte Spill
-; AVX2-NEXT: movl %edi, {{[0-9]+}}(%rsp) # 4-byte Spill
-; AVX2-NEXT: movl %edi, {{[0-9]+}}(%rsp) # 4-byte Spill
-; AVX2-NEXT: movl %edi, {{[0-9]+}}(%rsp) # 4-byte Spill
-; AVX2-NEXT: movl %edi, {{[0-9]+}}(%rsp) # 4-byte Spill
-; AVX2-NEXT: movl %edi, {{[0-9]+}}(%rsp) # 4-byte Spill
-; AVX2-NEXT: movl %edi, {{[0-9]+}}(%rsp) # 4-byte Spill
-; AVX2-NEXT: movl %edi, {{[0-9]+}}(%rsp) # 4-byte Spill
-; AVX2-NEXT: movl %edi, {{[0-9]+}}(%rsp) # 4-byte Spill
-; AVX2-NEXT: movl %edi, %r13d
-; AVX2-NEXT: movl %edi, %r12d
-; AVX2-NEXT: movl %edi, %r15d
-; AVX2-NEXT: movl %edi, %r14d
-; AVX2-NEXT: movl %edi, %ebx
-; AVX2-NEXT: movl %edi, %r11d
-; AVX2-NEXT: movl %edi, %r10d
-; AVX2-NEXT: movl %edi, %r9d
-; AVX2-NEXT: movl %edi, %r8d
-; AVX2-NEXT: movl %edi, %esi
-; AVX2-NEXT: movl %edi, %edx
-; AVX2-NEXT: movl %edi, %ecx
-; AVX2-NEXT: movl %edi, %eax
-; AVX2-NEXT: andl $1, %edi
; AVX2-NEXT: vmovd %edi, %xmm0
-; AVX2-NEXT: shrl %eax
-; AVX2-NEXT: andl $1, %eax
-; AVX2-NEXT: vpinsrb $1, %eax, %xmm0, %xmm0
-; AVX2-NEXT: shrl $2, %ecx
-; AVX2-NEXT: andl $1, %ecx
-; AVX2-NEXT: vpinsrb $2, %ecx, %xmm0, %xmm0
-; AVX2-NEXT: shrl $3, %edx
-; AVX2-NEXT: andl $1, %edx
-; AVX2-NEXT: vpinsrb $3, %edx, %xmm0, %xmm0
-; AVX2-NEXT: shrl $4, %esi
-; AVX2-NEXT: andl $1, %esi
-; AVX2-NEXT: vpinsrb $4, %esi, %xmm0, %xmm0
-; AVX2-NEXT: shrl $5, %r8d
-; AVX2-NEXT: andl $1, %r8d
-; AVX2-NEXT: vpinsrb $5, %r8d, %xmm0, %xmm0
-; AVX2-NEXT: shrl $6, %r9d
-; AVX2-NEXT: andl $1, %r9d
-; AVX2-NEXT: vpinsrb $6, %r9d, %xmm0, %xmm0
-; AVX2-NEXT: shrl $7, %r10d
-; AVX2-NEXT: andl $1, %r10d
-; AVX2-NEXT: vpinsrb $7, %r10d, %xmm0, %xmm0
-; AVX2-NEXT: shrl $8, %r11d
-; AVX2-NEXT: andl $1, %r11d
-; AVX2-NEXT: vpinsrb $8, %r11d, %xmm0, %xmm0
-; AVX2-NEXT: shrl $9, %ebx
-; AVX2-NEXT: andl $1, %ebx
-; AVX2-NEXT: vpinsrb $9, %ebx, %xmm0, %xmm0
-; AVX2-NEXT: shrl $10, %r14d
-; AVX2-NEXT: andl $1, %r14d
-; AVX2-NEXT: vpinsrb $10, %r14d, %xmm0, %xmm0
-; AVX2-NEXT: shrl $11, %r15d
-; AVX2-NEXT: andl $1, %r15d
-; AVX2-NEXT: vpinsrb $11, %r15d, %xmm0, %xmm0
-; AVX2-NEXT: shrl $12, %r12d
-; AVX2-NEXT: andl $1, %r12d
-; AVX2-NEXT: vpinsrb $12, %r12d, %xmm0, %xmm0
-; AVX2-NEXT: shrl $13, %r13d
-; AVX2-NEXT: andl $1, %r13d
-; AVX2-NEXT: vpinsrb $13, %r13d, %xmm0, %xmm0
-; AVX2-NEXT: movl {{[0-9]+}}(%rsp), %eax # 4-byte Reload
-; AVX2-NEXT: shrl $14, %eax
-; AVX2-NEXT: andl $1, %eax
-; AVX2-NEXT: vpinsrb $14, %eax, %xmm0, %xmm0
-; AVX2-NEXT: movl {{[0-9]+}}(%rsp), %eax # 4-byte Reload
-; AVX2-NEXT: shrl $15, %eax
-; AVX2-NEXT: andl $1, %eax
-; AVX2-NEXT: vpinsrb $15, %eax, %xmm0, %xmm0
-; AVX2-NEXT: movl {{[0-9]+}}(%rsp), %eax # 4-byte Reload
-; AVX2-NEXT: shrl $16, %eax
-; AVX2-NEXT: andl $1, %eax
-; AVX2-NEXT: vmovd %eax, %xmm1
-; AVX2-NEXT: movl {{[0-9]+}}(%rsp), %eax # 4-byte Reload
-; AVX2-NEXT: shrl $17, %eax
-; AVX2-NEXT: andl $1, %eax
-; AVX2-NEXT: vpinsrb $1, %eax, %xmm1, %xmm1
-; AVX2-NEXT: movl {{[0-9]+}}(%rsp), %eax # 4-byte Reload
-; AVX2-NEXT: shrl $18, %eax
-; AVX2-NEXT: andl $1, %eax
-; AVX2-NEXT: vpinsrb $2, %eax, %xmm1, %xmm1
-; AVX2-NEXT: movl {{[0-9]+}}(%rsp), %eax # 4-byte Reload
-; AVX2-NEXT: shrl $19, %eax
-; AVX2-NEXT: andl $1, %eax
-; AVX2-NEXT: vpinsrb $3, %eax, %xmm1, %xmm1
-; AVX2-NEXT: movl {{[0-9]+}}(%rsp), %eax # 4-byte Reload
-; AVX2-NEXT: shrl $20, %eax
-; AVX2-NEXT: andl $1, %eax
-; AVX2-NEXT: vpinsrb $4, %eax, %xmm1, %xmm1
-; AVX2-NEXT: movl {{[0-9]+}}(%rsp), %eax # 4-byte Reload
-; AVX2-NEXT: shrl $21, %eax
-; AVX2-NEXT: andl $1, %eax
-; AVX2-NEXT: vpinsrb $5, %eax, %xmm1, %xmm1
-; AVX2-NEXT: movl {{[0-9]+}}(%rsp), %eax # 4-byte Reload
-; AVX2-NEXT: shrl $22, %eax
-; AVX2-NEXT: andl $1, %eax
-; AVX2-NEXT: vpinsrb $6, %eax, %xmm1, %xmm1
-; AVX2-NEXT: movl {{[0-9]+}}(%rsp), %eax # 4-byte Reload
-; AVX2-NEXT: shrl $23, %eax
-; AVX2-NEXT: andl $1, %eax
-; AVX2-NEXT: vpinsrb $7, %eax, %xmm1, %xmm1
-; AVX2-NEXT: movl {{[0-9]+}}(%rsp), %eax # 4-byte Reload
-; AVX2-NEXT: shrl $24, %eax
-; AVX2-NEXT: andl $1, %eax
-; AVX2-NEXT: vpinsrb $8, %eax, %xmm1, %xmm1
-; AVX2-NEXT: movl {{[0-9]+}}(%rsp), %eax # 4-byte Reload
-; AVX2-NEXT: shrl $25, %eax
-; AVX2-NEXT: andl $1, %eax
-; AVX2-NEXT: vpinsrb $9, %eax, %xmm1, %xmm1
-; AVX2-NEXT: movl {{[0-9]+}}(%rsp), %eax # 4-byte Reload
-; AVX2-NEXT: shrl $26, %eax
-; AVX2-NEXT: andl $1, %eax
-; AVX2-NEXT: vpinsrb $10, %eax, %xmm1, %xmm1
-; AVX2-NEXT: movl {{[0-9]+}}(%rsp), %eax # 4-byte Reload
-; AVX2-NEXT: shrl $27, %eax
-; AVX2-NEXT: andl $1, %eax
-; AVX2-NEXT: vpinsrb $11, %eax, %xmm1, %xmm1
-; AVX2-NEXT: movl {{[0-9]+}}(%rsp), %eax # 4-byte Reload
-; AVX2-NEXT: shrl $28, %eax
-; AVX2-NEXT: andl $1, %eax
-; AVX2-NEXT: vpinsrb $12, %eax, %xmm1, %xmm1
-; AVX2-NEXT: movl {{[0-9]+}}(%rsp), %eax # 4-byte Reload
-; AVX2-NEXT: shrl $29, %eax
-; AVX2-NEXT: andl $1, %eax
-; AVX2-NEXT: vpinsrb $13, %eax, %xmm1, %xmm1
-; AVX2-NEXT: movl {{[0-9]+}}(%rsp), %eax # 4-byte Reload
-; AVX2-NEXT: shrl $30, %eax
-; AVX2-NEXT: andl $1, %eax
-; AVX2-NEXT: vpinsrb $14, %eax, %xmm1, %xmm1
-; AVX2-NEXT: movl {{[0-9]+}}(%rsp), %eax # 4-byte Reload
-; AVX2-NEXT: shrl $31, %eax
-; AVX2-NEXT: vpinsrb $15, %eax, %xmm1, %xmm1
-; AVX2-NEXT: vpmovzxbw {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero
-; AVX2-NEXT: vmovdqa {{.*#+}} ymm2 = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
-; AVX2-NEXT: vpand %ymm2, %ymm0, %ymm0
-; AVX2-NEXT: vpmovzxbw {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero,xmm1[8],zero,xmm1[9],zero,xmm1[10],zero,xmm1[11],zero,xmm1[12],zero,xmm1[13],zero,xmm1[14],zero,xmm1[15],zero
-; AVX2-NEXT: vpand %ymm2, %ymm1, %ymm1
-; AVX2-NEXT: leaq -40(%rbp), %rsp
-; AVX2-NEXT: popq %rbx
-; AVX2-NEXT: popq %r12
-; AVX2-NEXT: popq %r13
-; AVX2-NEXT: popq %r14
-; AVX2-NEXT: popq %r15
-; AVX2-NEXT: popq %rbp
+; AVX2-NEXT: vpbroadcastw %xmm0, %ymm0
+; AVX2-NEXT: vmovdqa {{.*#+}} ymm1 = [1,2,4,8,16,32,64,128,256,512,1024,2048,4096,8192,16384,32768]
+; AVX2-NEXT: vpand %ymm1, %ymm0, %ymm0
+; AVX2-NEXT: vpcmpeqw %ymm1, %ymm0, %ymm0
+; AVX2-NEXT: vpsrlw $15, %ymm0, %ymm0
+; AVX2-NEXT: shrl $16, %edi
+; AVX2-NEXT: vmovd %edi, %xmm2
+; AVX2-NEXT: vpbroadcastw %xmm2, %ymm2
+; AVX2-NEXT: vpand %ymm1, %ymm2, %ymm2
+; AVX2-NEXT: vpcmpeqw %ymm1, %ymm2, %ymm1
+; AVX2-NEXT: vpsrlw $15, %ymm1, %ymm1
; AVX2-NEXT: retq
;
; AVX512-LABEL: ext_i32_32i16:
@@ -2405,867 +851,102 @@ define <32 x i16> @ext_i32_32i16(i32 %a0
define <64 x i8> @ext_i64_64i8(i64 %a0) {
; SSE2-SSSE3-LABEL: ext_i64_64i8:
; SSE2-SSSE3: # BB#0:
-; SSE2-SSSE3-NEXT: movw %di, -{{[0-9]+}}(%rsp)
-; SSE2-SSSE3-NEXT: movq %rdi, %rax
-; SSE2-SSSE3-NEXT: shrq $32, %rax
-; SSE2-SSSE3-NEXT: movw %ax, -{{[0-9]+}}(%rsp)
-; SSE2-SSSE3-NEXT: movq %rdi, %rax
-; SSE2-SSSE3-NEXT: shrq $48, %rax
-; SSE2-SSSE3-NEXT: movw %ax, -{{[0-9]+}}(%rsp)
-; SSE2-SSSE3-NEXT: shrl $16, %edi
-; SSE2-SSSE3-NEXT: movw %di, -{{[0-9]+}}(%rsp)
-; SSE2-SSSE3-NEXT: movzwl -{{[0-9]+}}(%rsp), %eax
-; SSE2-SSSE3-NEXT: movl %eax, %ecx
-; SSE2-SSSE3-NEXT: shrl $7, %ecx
-; SSE2-SSSE3-NEXT: andl $1, %ecx
-; SSE2-SSSE3-NEXT: movd %ecx, %xmm0
-; SSE2-SSSE3-NEXT: movl %eax, %ecx
-; SSE2-SSSE3-NEXT: shrl $6, %ecx
-; SSE2-SSSE3-NEXT: andl $1, %ecx
-; SSE2-SSSE3-NEXT: movd %ecx, %xmm1
-; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
-; SSE2-SSSE3-NEXT: movl %eax, %ecx
-; SSE2-SSSE3-NEXT: shrl $5, %ecx
-; SSE2-SSSE3-NEXT: andl $1, %ecx
-; SSE2-SSSE3-NEXT: movd %ecx, %xmm0
-; SSE2-SSSE3-NEXT: movl %eax, %ecx
-; SSE2-SSSE3-NEXT: shrl $4, %ecx
-; SSE2-SSSE3-NEXT: andl $1, %ecx
-; SSE2-SSSE3-NEXT: movd %ecx, %xmm2
-; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3],xmm2[4],xmm0[4],xmm2[5],xmm0[5],xmm2[6],xmm0[6],xmm2[7],xmm0[7]
-; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3]
-; SSE2-SSSE3-NEXT: movl %eax, %ecx
-; SSE2-SSSE3-NEXT: shrl $3, %ecx
-; SSE2-SSSE3-NEXT: andl $1, %ecx
-; SSE2-SSSE3-NEXT: movd %ecx, %xmm0
-; SSE2-SSSE3-NEXT: movl %eax, %ecx
-; SSE2-SSSE3-NEXT: shrl $2, %ecx
-; SSE2-SSSE3-NEXT: andl $1, %ecx
-; SSE2-SSSE3-NEXT: movd %ecx, %xmm1
-; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
-; SSE2-SSSE3-NEXT: movl %eax, %ecx
-; SSE2-SSSE3-NEXT: andl $1, %ecx
-; SSE2-SSSE3-NEXT: movd %ecx, %xmm0
-; SSE2-SSSE3-NEXT: movl %eax, %ecx
-; SSE2-SSSE3-NEXT: shrl %ecx
-; SSE2-SSSE3-NEXT: andl $1, %ecx
-; SSE2-SSSE3-NEXT: movd %ecx, %xmm3
-; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1],xmm0[2],xmm3[2],xmm0[3],xmm3[3],xmm0[4],xmm3[4],xmm0[5],xmm3[5],xmm0[6],xmm3[6],xmm0[7],xmm3[7]
-; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
-; SSE2-SSSE3-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
-; SSE2-SSSE3-NEXT: movl %eax, %ecx
-; SSE2-SSSE3-NEXT: shrl $11, %ecx
-; SSE2-SSSE3-NEXT: andl $1, %ecx
-; SSE2-SSSE3-NEXT: movd %ecx, %xmm1
-; SSE2-SSSE3-NEXT: movl %eax, %ecx
-; SSE2-SSSE3-NEXT: shrl $10, %ecx
-; SSE2-SSSE3-NEXT: andl $1, %ecx
-; SSE2-SSSE3-NEXT: movd %ecx, %xmm2
-; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3],xmm2[4],xmm1[4],xmm2[5],xmm1[5],xmm2[6],xmm1[6],xmm2[7],xmm1[7]
-; SSE2-SSSE3-NEXT: movl %eax, %ecx
-; SSE2-SSSE3-NEXT: shrl $9, %ecx
-; SSE2-SSSE3-NEXT: andl $1, %ecx
-; SSE2-SSSE3-NEXT: movd %ecx, %xmm3
-; SSE2-SSSE3-NEXT: movl %eax, %ecx
-; SSE2-SSSE3-NEXT: shrl $8, %ecx
-; SSE2-SSSE3-NEXT: andl $1, %ecx
-; SSE2-SSSE3-NEXT: movd %ecx, %xmm1
-; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm3[0],xmm1[1],xmm3[1],xmm1[2],xmm3[2],xmm1[3],xmm3[3],xmm1[4],xmm3[4],xmm1[5],xmm3[5],xmm1[6],xmm3[6],xmm1[7],xmm3[7]
-; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3]
-; SSE2-SSSE3-NEXT: movl %eax, %ecx
-; SSE2-SSSE3-NEXT: shrl $13, %ecx
-; SSE2-SSSE3-NEXT: andl $1, %ecx
-; SSE2-SSSE3-NEXT: movd %ecx, %xmm2
-; SSE2-SSSE3-NEXT: movl %eax, %ecx
-; SSE2-SSSE3-NEXT: shrl $12, %ecx
-; SSE2-SSSE3-NEXT: andl $1, %ecx
-; SSE2-SSSE3-NEXT: movd %ecx, %xmm3
-; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1],xmm3[2],xmm2[2],xmm3[3],xmm2[3],xmm3[4],xmm2[4],xmm3[5],xmm2[5],xmm3[6],xmm2[6],xmm3[7],xmm2[7]
-; SSE2-SSSE3-NEXT: movl %eax, %ecx
-; SSE2-SSSE3-NEXT: shrl $14, %ecx
-; SSE2-SSSE3-NEXT: andl $1, %ecx
-; SSE2-SSSE3-NEXT: movd %ecx, %xmm2
-; SSE2-SSSE3-NEXT: shrl $15, %eax
-; SSE2-SSSE3-NEXT: movzwl %ax, %eax
-; SSE2-SSSE3-NEXT: movd %eax, %xmm4
-; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm4[0],xmm2[1],xmm4[1],xmm2[2],xmm4[2],xmm2[3],xmm4[3],xmm2[4],xmm4[4],xmm2[5],xmm4[5],xmm2[6],xmm4[6],xmm2[7],xmm4[7]
-; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1],xmm3[2],xmm2[2],xmm3[3],xmm2[3]
-; SSE2-SSSE3-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm3[0],xmm1[1],xmm3[1]
-; SSE2-SSSE3-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
-; SSE2-SSSE3-NEXT: movzwl -{{[0-9]+}}(%rsp), %eax
-; SSE2-SSSE3-NEXT: movl %eax, %ecx
-; SSE2-SSSE3-NEXT: shrl $7, %ecx
-; SSE2-SSSE3-NEXT: andl $1, %ecx
-; SSE2-SSSE3-NEXT: movd %ecx, %xmm1
-; SSE2-SSSE3-NEXT: movl %eax, %ecx
-; SSE2-SSSE3-NEXT: shrl $6, %ecx
-; SSE2-SSSE3-NEXT: andl $1, %ecx
-; SSE2-SSSE3-NEXT: movd %ecx, %xmm2
-; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3],xmm2[4],xmm1[4],xmm2[5],xmm1[5],xmm2[6],xmm1[6],xmm2[7],xmm1[7]
-; SSE2-SSSE3-NEXT: movl %eax, %ecx
-; SSE2-SSSE3-NEXT: shrl $5, %ecx
-; SSE2-SSSE3-NEXT: andl $1, %ecx
-; SSE2-SSSE3-NEXT: movd %ecx, %xmm1
-; SSE2-SSSE3-NEXT: movl %eax, %ecx
-; SSE2-SSSE3-NEXT: shrl $4, %ecx
-; SSE2-SSSE3-NEXT: andl $1, %ecx
-; SSE2-SSSE3-NEXT: movd %ecx, %xmm3
-; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm1[0],xmm3[1],xmm1[1],xmm3[2],xmm1[2],xmm3[3],xmm1[3],xmm3[4],xmm1[4],xmm3[5],xmm1[5],xmm3[6],xmm1[6],xmm3[7],xmm1[7]
-; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1],xmm3[2],xmm2[2],xmm3[3],xmm2[3]
-; SSE2-SSSE3-NEXT: movl %eax, %ecx
-; SSE2-SSSE3-NEXT: shrl $3, %ecx
-; SSE2-SSSE3-NEXT: andl $1, %ecx
-; SSE2-SSSE3-NEXT: movd %ecx, %xmm1
-; SSE2-SSSE3-NEXT: movl %eax, %ecx
-; SSE2-SSSE3-NEXT: shrl $2, %ecx
-; SSE2-SSSE3-NEXT: andl $1, %ecx
-; SSE2-SSSE3-NEXT: movd %ecx, %xmm4
-; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm4 = xmm4[0],xmm1[0],xmm4[1],xmm1[1],xmm4[2],xmm1[2],xmm4[3],xmm1[3],xmm4[4],xmm1[4],xmm4[5],xmm1[5],xmm4[6],xmm1[6],xmm4[7],xmm1[7]
-; SSE2-SSSE3-NEXT: movl %eax, %ecx
-; SSE2-SSSE3-NEXT: andl $1, %ecx
-; SSE2-SSSE3-NEXT: movd %ecx, %xmm2
-; SSE2-SSSE3-NEXT: movl %eax, %ecx
-; SSE2-SSSE3-NEXT: shrl %ecx
-; SSE2-SSSE3-NEXT: andl $1, %ecx
-; SSE2-SSSE3-NEXT: movd %ecx, %xmm1
-; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3],xmm2[4],xmm1[4],xmm2[5],xmm1[5],xmm2[6],xmm1[6],xmm2[7],xmm1[7]
-; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm4[0],xmm2[1],xmm4[1],xmm2[2],xmm4[2],xmm2[3],xmm4[3]
-; SSE2-SSSE3-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1]
-; SSE2-SSSE3-NEXT: movl %eax, %ecx
-; SSE2-SSSE3-NEXT: shrl $11, %ecx
-; SSE2-SSSE3-NEXT: andl $1, %ecx
-; SSE2-SSSE3-NEXT: movd %ecx, %xmm1
-; SSE2-SSSE3-NEXT: movl %eax, %ecx
-; SSE2-SSSE3-NEXT: shrl $10, %ecx
-; SSE2-SSSE3-NEXT: andl $1, %ecx
-; SSE2-SSSE3-NEXT: movd %ecx, %xmm3
-; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm1[0],xmm3[1],xmm1[1],xmm3[2],xmm1[2],xmm3[3],xmm1[3],xmm3[4],xmm1[4],xmm3[5],xmm1[5],xmm3[6],xmm1[6],xmm3[7],xmm1[7]
-; SSE2-SSSE3-NEXT: movl %eax, %ecx
-; SSE2-SSSE3-NEXT: shrl $9, %ecx
-; SSE2-SSSE3-NEXT: andl $1, %ecx
-; SSE2-SSSE3-NEXT: movd %ecx, %xmm4
-; SSE2-SSSE3-NEXT: movl %eax, %ecx
-; SSE2-SSSE3-NEXT: shrl $8, %ecx
-; SSE2-SSSE3-NEXT: andl $1, %ecx
-; SSE2-SSSE3-NEXT: movd %ecx, %xmm1
-; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm4[0],xmm1[1],xmm4[1],xmm1[2],xmm4[2],xmm1[3],xmm4[3],xmm1[4],xmm4[4],xmm1[5],xmm4[5],xmm1[6],xmm4[6],xmm1[7],xmm4[7]
-; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm3[0],xmm1[1],xmm3[1],xmm1[2],xmm3[2],xmm1[3],xmm3[3]
-; SSE2-SSSE3-NEXT: movl %eax, %ecx
-; SSE2-SSSE3-NEXT: shrl $13, %ecx
-; SSE2-SSSE3-NEXT: andl $1, %ecx
-; SSE2-SSSE3-NEXT: movd %ecx, %xmm3
-; SSE2-SSSE3-NEXT: movl %eax, %ecx
-; SSE2-SSSE3-NEXT: shrl $12, %ecx
-; SSE2-SSSE3-NEXT: andl $1, %ecx
-; SSE2-SSSE3-NEXT: movd %ecx, %xmm4
-; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm4 = xmm4[0],xmm3[0],xmm4[1],xmm3[1],xmm4[2],xmm3[2],xmm4[3],xmm3[3],xmm4[4],xmm3[4],xmm4[5],xmm3[5],xmm4[6],xmm3[6],xmm4[7],xmm3[7]
-; SSE2-SSSE3-NEXT: movl %eax, %ecx
-; SSE2-SSSE3-NEXT: shrl $14, %ecx
-; SSE2-SSSE3-NEXT: andl $1, %ecx
-; SSE2-SSSE3-NEXT: movd %ecx, %xmm3
-; SSE2-SSSE3-NEXT: shrl $15, %eax
-; SSE2-SSSE3-NEXT: movzwl %ax, %eax
-; SSE2-SSSE3-NEXT: movd %eax, %xmm5
-; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm5[0],xmm3[1],xmm5[1],xmm3[2],xmm5[2],xmm3[3],xmm5[3],xmm3[4],xmm5[4],xmm3[5],xmm5[5],xmm3[6],xmm5[6],xmm3[7],xmm5[7]
-; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm4 = xmm4[0],xmm3[0],xmm4[1],xmm3[1],xmm4[2],xmm3[2],xmm4[3],xmm3[3]
-; SSE2-SSSE3-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm4[0],xmm1[1],xmm4[1]
-; SSE2-SSSE3-NEXT: punpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm1[0]
-; SSE2-SSSE3-NEXT: movzwl -{{[0-9]+}}(%rsp), %eax
-; SSE2-SSSE3-NEXT: movl %eax, %ecx
-; SSE2-SSSE3-NEXT: shrl $7, %ecx
-; SSE2-SSSE3-NEXT: andl $1, %ecx
-; SSE2-SSSE3-NEXT: movd %ecx, %xmm1
-; SSE2-SSSE3-NEXT: movl %eax, %ecx
-; SSE2-SSSE3-NEXT: shrl $6, %ecx
-; SSE2-SSSE3-NEXT: andl $1, %ecx
-; SSE2-SSSE3-NEXT: movd %ecx, %xmm3
-; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm1[0],xmm3[1],xmm1[1],xmm3[2],xmm1[2],xmm3[3],xmm1[3],xmm3[4],xmm1[4],xmm3[5],xmm1[5],xmm3[6],xmm1[6],xmm3[7],xmm1[7]
-; SSE2-SSSE3-NEXT: movl %eax, %ecx
-; SSE2-SSSE3-NEXT: shrl $5, %ecx
-; SSE2-SSSE3-NEXT: andl $1, %ecx
-; SSE2-SSSE3-NEXT: movd %ecx, %xmm1
-; SSE2-SSSE3-NEXT: movl %eax, %ecx
-; SSE2-SSSE3-NEXT: shrl $4, %ecx
-; SSE2-SSSE3-NEXT: andl $1, %ecx
-; SSE2-SSSE3-NEXT: movd %ecx, %xmm4
-; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm4 = xmm4[0],xmm1[0],xmm4[1],xmm1[1],xmm4[2],xmm1[2],xmm4[3],xmm1[3],xmm4[4],xmm1[4],xmm4[5],xmm1[5],xmm4[6],xmm1[6],xmm4[7],xmm1[7]
-; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm4 = xmm4[0],xmm3[0],xmm4[1],xmm3[1],xmm4[2],xmm3[2],xmm4[3],xmm3[3]
-; SSE2-SSSE3-NEXT: movl %eax, %ecx
-; SSE2-SSSE3-NEXT: shrl $3, %ecx
-; SSE2-SSSE3-NEXT: andl $1, %ecx
-; SSE2-SSSE3-NEXT: movd %ecx, %xmm1
-; SSE2-SSSE3-NEXT: movl %eax, %ecx
-; SSE2-SSSE3-NEXT: shrl $2, %ecx
-; SSE2-SSSE3-NEXT: andl $1, %ecx
-; SSE2-SSSE3-NEXT: movd %ecx, %xmm5
-; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm5 = xmm5[0],xmm1[0],xmm5[1],xmm1[1],xmm5[2],xmm1[2],xmm5[3],xmm1[3],xmm5[4],xmm1[4],xmm5[5],xmm1[5],xmm5[6],xmm1[6],xmm5[7],xmm1[7]
-; SSE2-SSSE3-NEXT: movl %eax, %ecx
-; SSE2-SSSE3-NEXT: andl $1, %ecx
-; SSE2-SSSE3-NEXT: movd %ecx, %xmm3
-; SSE2-SSSE3-NEXT: movl %eax, %ecx
-; SSE2-SSSE3-NEXT: shrl %ecx
-; SSE2-SSSE3-NEXT: andl $1, %ecx
-; SSE2-SSSE3-NEXT: movd %ecx, %xmm1
-; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm1[0],xmm3[1],xmm1[1],xmm3[2],xmm1[2],xmm3[3],xmm1[3],xmm3[4],xmm1[4],xmm3[5],xmm1[5],xmm3[6],xmm1[6],xmm3[7],xmm1[7]
-; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm3 = xmm3[0],xmm5[0],xmm3[1],xmm5[1],xmm3[2],xmm5[2],xmm3[3],xmm5[3]
-; SSE2-SSSE3-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm4[0],xmm3[1],xmm4[1]
-; SSE2-SSSE3-NEXT: movl %eax, %ecx
-; SSE2-SSSE3-NEXT: shrl $11, %ecx
-; SSE2-SSSE3-NEXT: andl $1, %ecx
-; SSE2-SSSE3-NEXT: movd %ecx, %xmm1
-; SSE2-SSSE3-NEXT: movl %eax, %ecx
-; SSE2-SSSE3-NEXT: shrl $10, %ecx
-; SSE2-SSSE3-NEXT: andl $1, %ecx
-; SSE2-SSSE3-NEXT: movd %ecx, %xmm4
-; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm4 = xmm4[0],xmm1[0],xmm4[1],xmm1[1],xmm4[2],xmm1[2],xmm4[3],xmm1[3],xmm4[4],xmm1[4],xmm4[5],xmm1[5],xmm4[6],xmm1[6],xmm4[7],xmm1[7]
-; SSE2-SSSE3-NEXT: movl %eax, %ecx
-; SSE2-SSSE3-NEXT: shrl $9, %ecx
-; SSE2-SSSE3-NEXT: andl $1, %ecx
-; SSE2-SSSE3-NEXT: movd %ecx, %xmm5
-; SSE2-SSSE3-NEXT: movl %eax, %ecx
-; SSE2-SSSE3-NEXT: shrl $8, %ecx
-; SSE2-SSSE3-NEXT: andl $1, %ecx
-; SSE2-SSSE3-NEXT: movd %ecx, %xmm1
-; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm5[0],xmm1[1],xmm5[1],xmm1[2],xmm5[2],xmm1[3],xmm5[3],xmm1[4],xmm5[4],xmm1[5],xmm5[5],xmm1[6],xmm5[6],xmm1[7],xmm5[7]
-; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm4[0],xmm1[1],xmm4[1],xmm1[2],xmm4[2],xmm1[3],xmm4[3]
-; SSE2-SSSE3-NEXT: movl %eax, %ecx
-; SSE2-SSSE3-NEXT: shrl $13, %ecx
-; SSE2-SSSE3-NEXT: andl $1, %ecx
-; SSE2-SSSE3-NEXT: movd %ecx, %xmm4
-; SSE2-SSSE3-NEXT: movl %eax, %ecx
-; SSE2-SSSE3-NEXT: shrl $12, %ecx
-; SSE2-SSSE3-NEXT: andl $1, %ecx
-; SSE2-SSSE3-NEXT: movd %ecx, %xmm5
-; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm5 = xmm5[0],xmm4[0],xmm5[1],xmm4[1],xmm5[2],xmm4[2],xmm5[3],xmm4[3],xmm5[4],xmm4[4],xmm5[5],xmm4[5],xmm5[6],xmm4[6],xmm5[7],xmm4[7]
-; SSE2-SSSE3-NEXT: movl %eax, %ecx
-; SSE2-SSSE3-NEXT: shrl $14, %ecx
-; SSE2-SSSE3-NEXT: andl $1, %ecx
-; SSE2-SSSE3-NEXT: movd %ecx, %xmm4
-; SSE2-SSSE3-NEXT: shrl $15, %eax
-; SSE2-SSSE3-NEXT: movzwl %ax, %eax
-; SSE2-SSSE3-NEXT: movd %eax, %xmm6
-; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm4 = xmm4[0],xmm6[0],xmm4[1],xmm6[1],xmm4[2],xmm6[2],xmm4[3],xmm6[3],xmm4[4],xmm6[4],xmm4[5],xmm6[5],xmm4[6],xmm6[6],xmm4[7],xmm6[7]
-; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm5 = xmm5[0],xmm4[0],xmm5[1],xmm4[1],xmm5[2],xmm4[2],xmm5[3],xmm4[3]
-; SSE2-SSSE3-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm5[0],xmm1[1],xmm5[1]
-; SSE2-SSSE3-NEXT: punpcklqdq {{.*#+}} xmm3 = xmm3[0],xmm1[0]
-; SSE2-SSSE3-NEXT: movzwl -{{[0-9]+}}(%rsp), %eax
-; SSE2-SSSE3-NEXT: movl %eax, %ecx
-; SSE2-SSSE3-NEXT: shrl $7, %ecx
-; SSE2-SSSE3-NEXT: andl $1, %ecx
-; SSE2-SSSE3-NEXT: movd %ecx, %xmm1
-; SSE2-SSSE3-NEXT: movl %eax, %ecx
-; SSE2-SSSE3-NEXT: shrl $6, %ecx
-; SSE2-SSSE3-NEXT: andl $1, %ecx
-; SSE2-SSSE3-NEXT: movd %ecx, %xmm4
-; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm4 = xmm4[0],xmm1[0],xmm4[1],xmm1[1],xmm4[2],xmm1[2],xmm4[3],xmm1[3],xmm4[4],xmm1[4],xmm4[5],xmm1[5],xmm4[6],xmm1[6],xmm4[7],xmm1[7]
-; SSE2-SSSE3-NEXT: movl %eax, %ecx
-; SSE2-SSSE3-NEXT: shrl $5, %ecx
-; SSE2-SSSE3-NEXT: andl $1, %ecx
-; SSE2-SSSE3-NEXT: movd %ecx, %xmm1
-; SSE2-SSSE3-NEXT: movl %eax, %ecx
-; SSE2-SSSE3-NEXT: shrl $4, %ecx
-; SSE2-SSSE3-NEXT: andl $1, %ecx
-; SSE2-SSSE3-NEXT: movd %ecx, %xmm5
-; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm5 = xmm5[0],xmm1[0],xmm5[1],xmm1[1],xmm5[2],xmm1[2],xmm5[3],xmm1[3],xmm5[4],xmm1[4],xmm5[5],xmm1[5],xmm5[6],xmm1[6],xmm5[7],xmm1[7]
-; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm5 = xmm5[0],xmm4[0],xmm5[1],xmm4[1],xmm5[2],xmm4[2],xmm5[3],xmm4[3]
-; SSE2-SSSE3-NEXT: movl %eax, %ecx
-; SSE2-SSSE3-NEXT: shrl $3, %ecx
-; SSE2-SSSE3-NEXT: andl $1, %ecx
-; SSE2-SSSE3-NEXT: movd %ecx, %xmm1
-; SSE2-SSSE3-NEXT: movl %eax, %ecx
-; SSE2-SSSE3-NEXT: shrl $2, %ecx
-; SSE2-SSSE3-NEXT: andl $1, %ecx
-; SSE2-SSSE3-NEXT: movd %ecx, %xmm4
-; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm4 = xmm4[0],xmm1[0],xmm4[1],xmm1[1],xmm4[2],xmm1[2],xmm4[3],xmm1[3],xmm4[4],xmm1[4],xmm4[5],xmm1[5],xmm4[6],xmm1[6],xmm4[7],xmm1[7]
-; SSE2-SSSE3-NEXT: movl %eax, %ecx
-; SSE2-SSSE3-NEXT: andl $1, %ecx
-; SSE2-SSSE3-NEXT: movd %ecx, %xmm1
-; SSE2-SSSE3-NEXT: movl %eax, %ecx
-; SSE2-SSSE3-NEXT: shrl %ecx
-; SSE2-SSSE3-NEXT: andl $1, %ecx
-; SSE2-SSSE3-NEXT: movd %ecx, %xmm6
-; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm6[0],xmm1[1],xmm6[1],xmm1[2],xmm6[2],xmm1[3],xmm6[3],xmm1[4],xmm6[4],xmm1[5],xmm6[5],xmm1[6],xmm6[6],xmm1[7],xmm6[7]
-; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm4[0],xmm1[1],xmm4[1],xmm1[2],xmm4[2],xmm1[3],xmm4[3]
-; SSE2-SSSE3-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm5[0],xmm1[1],xmm5[1]
-; SSE2-SSSE3-NEXT: movl %eax, %ecx
-; SSE2-SSSE3-NEXT: shrl $11, %ecx
-; SSE2-SSSE3-NEXT: andl $1, %ecx
-; SSE2-SSSE3-NEXT: movd %ecx, %xmm4
-; SSE2-SSSE3-NEXT: movl %eax, %ecx
-; SSE2-SSSE3-NEXT: shrl $10, %ecx
-; SSE2-SSSE3-NEXT: andl $1, %ecx
-; SSE2-SSSE3-NEXT: movd %ecx, %xmm5
-; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm5 = xmm5[0],xmm4[0],xmm5[1],xmm4[1],xmm5[2],xmm4[2],xmm5[3],xmm4[3],xmm5[4],xmm4[4],xmm5[5],xmm4[5],xmm5[6],xmm4[6],xmm5[7],xmm4[7]
-; SSE2-SSSE3-NEXT: movl %eax, %ecx
-; SSE2-SSSE3-NEXT: shrl $9, %ecx
-; SSE2-SSSE3-NEXT: andl $1, %ecx
-; SSE2-SSSE3-NEXT: movd %ecx, %xmm6
-; SSE2-SSSE3-NEXT: movl %eax, %ecx
-; SSE2-SSSE3-NEXT: shrl $8, %ecx
-; SSE2-SSSE3-NEXT: andl $1, %ecx
-; SSE2-SSSE3-NEXT: movd %ecx, %xmm4
-; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm4 = xmm4[0],xmm6[0],xmm4[1],xmm6[1],xmm4[2],xmm6[2],xmm4[3],xmm6[3],xmm4[4],xmm6[4],xmm4[5],xmm6[5],xmm4[6],xmm6[6],xmm4[7],xmm6[7]
-; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm4 = xmm4[0],xmm5[0],xmm4[1],xmm5[1],xmm4[2],xmm5[2],xmm4[3],xmm5[3]
-; SSE2-SSSE3-NEXT: movl %eax, %ecx
-; SSE2-SSSE3-NEXT: shrl $13, %ecx
-; SSE2-SSSE3-NEXT: andl $1, %ecx
-; SSE2-SSSE3-NEXT: movd %ecx, %xmm5
-; SSE2-SSSE3-NEXT: movl %eax, %ecx
-; SSE2-SSSE3-NEXT: shrl $12, %ecx
-; SSE2-SSSE3-NEXT: andl $1, %ecx
-; SSE2-SSSE3-NEXT: movd %ecx, %xmm6
-; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm6 = xmm6[0],xmm5[0],xmm6[1],xmm5[1],xmm6[2],xmm5[2],xmm6[3],xmm5[3],xmm6[4],xmm5[4],xmm6[5],xmm5[5],xmm6[6],xmm5[6],xmm6[7],xmm5[7]
-; SSE2-SSSE3-NEXT: movl %eax, %ecx
-; SSE2-SSSE3-NEXT: shrl $14, %ecx
-; SSE2-SSSE3-NEXT: andl $1, %ecx
-; SSE2-SSSE3-NEXT: movd %ecx, %xmm5
-; SSE2-SSSE3-NEXT: shrl $15, %eax
-; SSE2-SSSE3-NEXT: movzwl %ax, %eax
-; SSE2-SSSE3-NEXT: movd %eax, %xmm7
-; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm5 = xmm5[0],xmm7[0],xmm5[1],xmm7[1],xmm5[2],xmm7[2],xmm5[3],xmm7[3],xmm5[4],xmm7[4],xmm5[5],xmm7[5],xmm5[6],xmm7[6],xmm5[7],xmm7[7]
-; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm6 = xmm6[0],xmm5[0],xmm6[1],xmm5[1],xmm6[2],xmm5[2],xmm6[3],xmm5[3]
-; SSE2-SSSE3-NEXT: punpckldq {{.*#+}} xmm4 = xmm4[0],xmm6[0],xmm4[1],xmm6[1]
-; SSE2-SSSE3-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm4[0]
+; SSE2-SSSE3-NEXT: movq %rdi, %xmm3
+; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
+; SSE2-SSSE3-NEXT: pshuflw {{.*#+}} xmm0 = xmm3[0,0,1,1,4,5,6,7]
+; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,1,1]
+; SSE2-SSSE3-NEXT: movdqa {{.*#+}} xmm4 = [1,2,4,8,16,32,64,128,1,2,4,8,16,32,64,128]
+; SSE2-SSSE3-NEXT: pand %xmm4, %xmm0
+; SSE2-SSSE3-NEXT: pcmpeqb %xmm4, %xmm0
+; SSE2-SSSE3-NEXT: psrlw $7, %xmm0
+; SSE2-SSSE3-NEXT: movdqa {{.*#+}} xmm5 = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
+; SSE2-SSSE3-NEXT: pand %xmm5, %xmm0
+; SSE2-SSSE3-NEXT: pshuflw {{.*#+}} xmm1 = xmm3[2,2,3,3,4,5,6,7]
+; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,0,1,1]
+; SSE2-SSSE3-NEXT: pand %xmm4, %xmm1
+; SSE2-SSSE3-NEXT: pcmpeqb %xmm4, %xmm1
+; SSE2-SSSE3-NEXT: psrlw $7, %xmm1
+; SSE2-SSSE3-NEXT: pand %xmm5, %xmm1
+; SSE2-SSSE3-NEXT: pshufhw {{.*#+}} xmm2 = xmm3[0,1,2,3,4,4,5,5]
+; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm2 = xmm2[2,2,3,3]
+; SSE2-SSSE3-NEXT: pand %xmm4, %xmm2
+; SSE2-SSSE3-NEXT: pcmpeqb %xmm4, %xmm2
+; SSE2-SSSE3-NEXT: psrlw $7, %xmm2
+; SSE2-SSSE3-NEXT: pand %xmm5, %xmm2
+; SSE2-SSSE3-NEXT: pshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,6,6,7,7]
+; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm3 = xmm3[2,2,3,3]
+; SSE2-SSSE3-NEXT: pand %xmm4, %xmm3
+; SSE2-SSSE3-NEXT: pcmpeqb %xmm4, %xmm3
+; SSE2-SSSE3-NEXT: psrlw $7, %xmm3
+; SSE2-SSSE3-NEXT: pand %xmm5, %xmm3
; SSE2-SSSE3-NEXT: retq
;
; AVX1-LABEL: ext_i64_64i8:
; AVX1: # BB#0:
-; AVX1-NEXT: pushq %rbp
-; AVX1-NEXT: .Lcfi11:
-; AVX1-NEXT: .cfi_def_cfa_offset 16
-; AVX1-NEXT: .Lcfi12:
-; AVX1-NEXT: .cfi_offset %rbp, -16
-; AVX1-NEXT: movq %rsp, %rbp
-; AVX1-NEXT: .Lcfi13:
-; AVX1-NEXT: .cfi_def_cfa_register %rbp
-; AVX1-NEXT: andq $-32, %rsp
-; AVX1-NEXT: subq $64, %rsp
-; AVX1-NEXT: movl %edi, %eax
-; AVX1-NEXT: shrl $17, %eax
-; AVX1-NEXT: andl $1, %eax
-; AVX1-NEXT: movl %edi, %ecx
-; AVX1-NEXT: shrl $16, %ecx
-; AVX1-NEXT: andl $1, %ecx
-; AVX1-NEXT: vmovd %ecx, %xmm0
-; AVX1-NEXT: vpinsrb $1, %eax, %xmm0, %xmm0
-; AVX1-NEXT: movl %edi, %eax
-; AVX1-NEXT: shrl $18, %eax
-; AVX1-NEXT: andl $1, %eax
-; AVX1-NEXT: vpinsrb $2, %eax, %xmm0, %xmm0
-; AVX1-NEXT: movl %edi, %eax
-; AVX1-NEXT: shrl $19, %eax
-; AVX1-NEXT: andl $1, %eax
-; AVX1-NEXT: vpinsrb $3, %eax, %xmm0, %xmm0
-; AVX1-NEXT: movl %edi, %eax
-; AVX1-NEXT: shrl $20, %eax
-; AVX1-NEXT: andl $1, %eax
-; AVX1-NEXT: vpinsrb $4, %eax, %xmm0, %xmm0
-; AVX1-NEXT: movl %edi, %eax
-; AVX1-NEXT: shrl $21, %eax
-; AVX1-NEXT: andl $1, %eax
-; AVX1-NEXT: vpinsrb $5, %eax, %xmm0, %xmm0
-; AVX1-NEXT: movl %edi, %eax
-; AVX1-NEXT: shrl $22, %eax
-; AVX1-NEXT: andl $1, %eax
-; AVX1-NEXT: vpinsrb $6, %eax, %xmm0, %xmm0
-; AVX1-NEXT: movl %edi, %eax
-; AVX1-NEXT: shrl $23, %eax
-; AVX1-NEXT: andl $1, %eax
-; AVX1-NEXT: vpinsrb $7, %eax, %xmm0, %xmm0
-; AVX1-NEXT: movl %edi, %eax
-; AVX1-NEXT: shrl $24, %eax
-; AVX1-NEXT: andl $1, %eax
-; AVX1-NEXT: vpinsrb $8, %eax, %xmm0, %xmm0
-; AVX1-NEXT: movl %edi, %eax
-; AVX1-NEXT: shrl $25, %eax
-; AVX1-NEXT: andl $1, %eax
-; AVX1-NEXT: vpinsrb $9, %eax, %xmm0, %xmm0
-; AVX1-NEXT: movl %edi, %eax
-; AVX1-NEXT: shrl $26, %eax
-; AVX1-NEXT: andl $1, %eax
-; AVX1-NEXT: vpinsrb $10, %eax, %xmm0, %xmm0
-; AVX1-NEXT: movl %edi, %eax
-; AVX1-NEXT: shrl $27, %eax
-; AVX1-NEXT: andl $1, %eax
-; AVX1-NEXT: vpinsrb $11, %eax, %xmm0, %xmm0
-; AVX1-NEXT: movl %edi, %eax
-; AVX1-NEXT: shrl $28, %eax
-; AVX1-NEXT: andl $1, %eax
-; AVX1-NEXT: vpinsrb $12, %eax, %xmm0, %xmm0
-; AVX1-NEXT: movl %edi, %eax
-; AVX1-NEXT: shrl $29, %eax
-; AVX1-NEXT: andl $1, %eax
-; AVX1-NEXT: vpinsrb $13, %eax, %xmm0, %xmm0
-; AVX1-NEXT: movl %edi, %eax
-; AVX1-NEXT: shrl $30, %eax
-; AVX1-NEXT: andl $1, %eax
-; AVX1-NEXT: vpinsrb $14, %eax, %xmm0, %xmm0
-; AVX1-NEXT: movl %edi, %eax
-; AVX1-NEXT: shrl $31, %eax
-; AVX1-NEXT: vpinsrb $15, %eax, %xmm0, %xmm0
-; AVX1-NEXT: movl %edi, %eax
-; AVX1-NEXT: shrl %eax
-; AVX1-NEXT: andl $1, %eax
-; AVX1-NEXT: movl %edi, %ecx
-; AVX1-NEXT: andl $1, %ecx
-; AVX1-NEXT: vmovd %ecx, %xmm1
-; AVX1-NEXT: vpinsrb $1, %eax, %xmm1, %xmm1
-; AVX1-NEXT: movl %edi, %eax
-; AVX1-NEXT: shrl $2, %eax
-; AVX1-NEXT: andl $1, %eax
-; AVX1-NEXT: vpinsrb $2, %eax, %xmm1, %xmm1
-; AVX1-NEXT: movl %edi, %eax
-; AVX1-NEXT: shrl $3, %eax
-; AVX1-NEXT: andl $1, %eax
-; AVX1-NEXT: vpinsrb $3, %eax, %xmm1, %xmm1
-; AVX1-NEXT: movl %edi, %eax
-; AVX1-NEXT: shrl $4, %eax
-; AVX1-NEXT: andl $1, %eax
-; AVX1-NEXT: vpinsrb $4, %eax, %xmm1, %xmm1
-; AVX1-NEXT: movl %edi, %eax
-; AVX1-NEXT: shrl $5, %eax
-; AVX1-NEXT: andl $1, %eax
-; AVX1-NEXT: vpinsrb $5, %eax, %xmm1, %xmm1
-; AVX1-NEXT: movl %edi, %eax
-; AVX1-NEXT: shrl $6, %eax
-; AVX1-NEXT: andl $1, %eax
-; AVX1-NEXT: vpinsrb $6, %eax, %xmm1, %xmm1
-; AVX1-NEXT: movl %edi, %eax
-; AVX1-NEXT: shrl $7, %eax
-; AVX1-NEXT: andl $1, %eax
-; AVX1-NEXT: vpinsrb $7, %eax, %xmm1, %xmm1
-; AVX1-NEXT: movl %edi, %eax
-; AVX1-NEXT: shrl $8, %eax
-; AVX1-NEXT: andl $1, %eax
-; AVX1-NEXT: vpinsrb $8, %eax, %xmm1, %xmm1
-; AVX1-NEXT: movl %edi, %eax
-; AVX1-NEXT: shrl $9, %eax
-; AVX1-NEXT: andl $1, %eax
-; AVX1-NEXT: vpinsrb $9, %eax, %xmm1, %xmm1
-; AVX1-NEXT: movl %edi, %eax
-; AVX1-NEXT: shrl $10, %eax
-; AVX1-NEXT: andl $1, %eax
-; AVX1-NEXT: vpinsrb $10, %eax, %xmm1, %xmm1
-; AVX1-NEXT: movl %edi, %eax
-; AVX1-NEXT: shrl $11, %eax
-; AVX1-NEXT: andl $1, %eax
-; AVX1-NEXT: vpinsrb $11, %eax, %xmm1, %xmm1
-; AVX1-NEXT: movl %edi, %eax
-; AVX1-NEXT: shrl $12, %eax
-; AVX1-NEXT: andl $1, %eax
-; AVX1-NEXT: vpinsrb $12, %eax, %xmm1, %xmm1
-; AVX1-NEXT: movl %edi, %eax
-; AVX1-NEXT: shrl $13, %eax
-; AVX1-NEXT: andl $1, %eax
-; AVX1-NEXT: vpinsrb $13, %eax, %xmm1, %xmm1
-; AVX1-NEXT: movl %edi, %eax
-; AVX1-NEXT: shrl $14, %eax
-; AVX1-NEXT: andl $1, %eax
-; AVX1-NEXT: vpinsrb $14, %eax, %xmm1, %xmm1
-; AVX1-NEXT: movl %edi, %eax
-; AVX1-NEXT: shrl $15, %eax
-; AVX1-NEXT: andl $1, %eax
-; AVX1-NEXT: vpinsrb $15, %eax, %xmm1, %xmm1
-; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
-; AVX1-NEXT: movq %rdi, %rax
-; AVX1-NEXT: shrq $49, %rax
-; AVX1-NEXT: andl $1, %eax
-; AVX1-NEXT: movq %rdi, %rcx
-; AVX1-NEXT: shrq $48, %rcx
-; AVX1-NEXT: andl $1, %ecx
-; AVX1-NEXT: vmovd %ecx, %xmm1
-; AVX1-NEXT: vpinsrb $1, %eax, %xmm1, %xmm1
-; AVX1-NEXT: movq %rdi, %rax
-; AVX1-NEXT: shrq $50, %rax
-; AVX1-NEXT: andl $1, %eax
-; AVX1-NEXT: vpinsrb $2, %eax, %xmm1, %xmm1
-; AVX1-NEXT: movq %rdi, %rax
-; AVX1-NEXT: shrq $51, %rax
-; AVX1-NEXT: andl $1, %eax
-; AVX1-NEXT: vpinsrb $3, %eax, %xmm1, %xmm1
-; AVX1-NEXT: movq %rdi, %rax
-; AVX1-NEXT: shrq $52, %rax
-; AVX1-NEXT: andl $1, %eax
-; AVX1-NEXT: vpinsrb $4, %eax, %xmm1, %xmm1
-; AVX1-NEXT: movq %rdi, %rax
-; AVX1-NEXT: shrq $53, %rax
-; AVX1-NEXT: andl $1, %eax
-; AVX1-NEXT: vpinsrb $5, %eax, %xmm1, %xmm1
-; AVX1-NEXT: movq %rdi, %rax
-; AVX1-NEXT: shrq $54, %rax
-; AVX1-NEXT: andl $1, %eax
-; AVX1-NEXT: vpinsrb $6, %eax, %xmm1, %xmm1
-; AVX1-NEXT: movq %rdi, %rax
-; AVX1-NEXT: shrq $55, %rax
-; AVX1-NEXT: andl $1, %eax
-; AVX1-NEXT: vpinsrb $7, %eax, %xmm1, %xmm1
-; AVX1-NEXT: movq %rdi, %rax
-; AVX1-NEXT: shrq $56, %rax
-; AVX1-NEXT: andl $1, %eax
-; AVX1-NEXT: vpinsrb $8, %eax, %xmm1, %xmm1
-; AVX1-NEXT: movq %rdi, %rax
-; AVX1-NEXT: shrq $57, %rax
-; AVX1-NEXT: andl $1, %eax
-; AVX1-NEXT: vpinsrb $9, %eax, %xmm1, %xmm1
-; AVX1-NEXT: movq %rdi, %rax
-; AVX1-NEXT: shrq $58, %rax
-; AVX1-NEXT: andl $1, %eax
-; AVX1-NEXT: vpinsrb $10, %eax, %xmm1, %xmm1
-; AVX1-NEXT: movq %rdi, %rax
-; AVX1-NEXT: shrq $59, %rax
-; AVX1-NEXT: andl $1, %eax
-; AVX1-NEXT: vpinsrb $11, %eax, %xmm1, %xmm1
-; AVX1-NEXT: movq %rdi, %rax
-; AVX1-NEXT: shrq $60, %rax
-; AVX1-NEXT: andl $1, %eax
-; AVX1-NEXT: vpinsrb $12, %eax, %xmm1, %xmm1
-; AVX1-NEXT: movq %rdi, %rax
-; AVX1-NEXT: shrq $61, %rax
-; AVX1-NEXT: andl $1, %eax
-; AVX1-NEXT: vpinsrb $13, %eax, %xmm1, %xmm1
-; AVX1-NEXT: movq %rdi, %rax
-; AVX1-NEXT: shrq $62, %rax
-; AVX1-NEXT: andl $1, %eax
-; AVX1-NEXT: vpinsrb $14, %eax, %xmm1, %xmm1
-; AVX1-NEXT: movq %rdi, %rax
-; AVX1-NEXT: shrq $63, %rax
-; AVX1-NEXT: vpinsrb $15, %eax, %xmm1, %xmm1
-; AVX1-NEXT: movq %rdi, %rax
-; AVX1-NEXT: shrq $33, %rax
-; AVX1-NEXT: andl $1, %eax
-; AVX1-NEXT: movq %rdi, %rcx
-; AVX1-NEXT: shrq $32, %rcx
-; AVX1-NEXT: andl $1, %ecx
-; AVX1-NEXT: vmovd %ecx, %xmm2
-; AVX1-NEXT: vpinsrb $1, %eax, %xmm2, %xmm2
-; AVX1-NEXT: movq %rdi, %rax
-; AVX1-NEXT: shrq $34, %rax
-; AVX1-NEXT: andl $1, %eax
-; AVX1-NEXT: vpinsrb $2, %eax, %xmm2, %xmm2
-; AVX1-NEXT: movq %rdi, %rax
-; AVX1-NEXT: shrq $35, %rax
-; AVX1-NEXT: andl $1, %eax
-; AVX1-NEXT: vpinsrb $3, %eax, %xmm2, %xmm2
-; AVX1-NEXT: movq %rdi, %rax
-; AVX1-NEXT: shrq $36, %rax
-; AVX1-NEXT: andl $1, %eax
-; AVX1-NEXT: vpinsrb $4, %eax, %xmm2, %xmm2
-; AVX1-NEXT: movq %rdi, %rax
-; AVX1-NEXT: shrq $37, %rax
-; AVX1-NEXT: andl $1, %eax
-; AVX1-NEXT: vpinsrb $5, %eax, %xmm2, %xmm2
-; AVX1-NEXT: movq %rdi, %rax
-; AVX1-NEXT: shrq $38, %rax
-; AVX1-NEXT: andl $1, %eax
-; AVX1-NEXT: vpinsrb $6, %eax, %xmm2, %xmm2
-; AVX1-NEXT: movq %rdi, %rax
-; AVX1-NEXT: shrq $39, %rax
-; AVX1-NEXT: andl $1, %eax
-; AVX1-NEXT: vpinsrb $7, %eax, %xmm2, %xmm2
-; AVX1-NEXT: movq %rdi, %rax
-; AVX1-NEXT: shrq $40, %rax
-; AVX1-NEXT: andl $1, %eax
-; AVX1-NEXT: vpinsrb $8, %eax, %xmm2, %xmm2
-; AVX1-NEXT: movq %rdi, %rax
-; AVX1-NEXT: shrq $41, %rax
-; AVX1-NEXT: andl $1, %eax
-; AVX1-NEXT: vpinsrb $9, %eax, %xmm2, %xmm2
-; AVX1-NEXT: movq %rdi, %rax
-; AVX1-NEXT: shrq $42, %rax
-; AVX1-NEXT: andl $1, %eax
-; AVX1-NEXT: vpinsrb $10, %eax, %xmm2, %xmm2
-; AVX1-NEXT: movq %rdi, %rax
-; AVX1-NEXT: shrq $43, %rax
-; AVX1-NEXT: andl $1, %eax
-; AVX1-NEXT: vpinsrb $11, %eax, %xmm2, %xmm2
-; AVX1-NEXT: movq %rdi, %rax
-; AVX1-NEXT: shrq $44, %rax
-; AVX1-NEXT: andl $1, %eax
-; AVX1-NEXT: vpinsrb $12, %eax, %xmm2, %xmm2
-; AVX1-NEXT: movq %rdi, %rax
-; AVX1-NEXT: shrq $45, %rax
-; AVX1-NEXT: andl $1, %eax
-; AVX1-NEXT: vpinsrb $13, %eax, %xmm2, %xmm2
-; AVX1-NEXT: movq %rdi, %rax
-; AVX1-NEXT: shrq $46, %rax
-; AVX1-NEXT: andl $1, %eax
-; AVX1-NEXT: vpinsrb $14, %eax, %xmm2, %xmm2
-; AVX1-NEXT: shrq $47, %rdi
-; AVX1-NEXT: andl $1, %edi
-; AVX1-NEXT: vpinsrb $15, %edi, %xmm2, %xmm2
-; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm2, %ymm1
-; AVX1-NEXT: movq %rbp, %rsp
-; AVX1-NEXT: popq %rbp
+; AVX1-NEXT: vmovq %rdi, %xmm0
+; AVX1-NEXT: vpunpcklbw {{.*#+}} xmm1 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
+; AVX1-NEXT: vpshuflw {{.*#+}} xmm0 = xmm1[0,0,1,1,4,5,6,7]
+; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,0,1,1]
+; AVX1-NEXT: vpshuflw {{.*#+}} xmm2 = xmm1[2,2,3,3,4,5,6,7]
+; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm2[0,0,1,1]
+; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; AVX1-NEXT: vmovaps {{.*#+}} ymm2 = [1,2,4,8,16,32,64,128,1,2,4,8,16,32,64,128,1,2,4,8,16,32,64,128,1,2,4,8,16,32,64,128]
+; AVX1-NEXT: vandps %ymm2, %ymm0, %ymm0
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
+; AVX1-NEXT: vpxor %xmm4, %xmm4, %xmm4
+; AVX1-NEXT: vpcmpeqb %xmm4, %xmm3, %xmm3
+; AVX1-NEXT: vpcmpeqd %xmm5, %xmm5, %xmm5
+; AVX1-NEXT: vpxor %xmm5, %xmm3, %xmm3
+; AVX1-NEXT: vpsrlw $7, %xmm3, %xmm3
+; AVX1-NEXT: vmovdqa {{.*#+}} xmm6 = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
+; AVX1-NEXT: vpand %xmm6, %xmm3, %xmm3
+; AVX1-NEXT: vpcmpeqb %xmm4, %xmm0, %xmm0
+; AVX1-NEXT: vpxor %xmm5, %xmm0, %xmm0
+; AVX1-NEXT: vpsrlw $7, %xmm0, %xmm0
+; AVX1-NEXT: vpand %xmm6, %xmm0, %xmm0
+; AVX1-NEXT: vinsertf128 $1, %xmm3, %ymm0, %ymm0
+; AVX1-NEXT: vpshufhw {{.*#+}} xmm3 = xmm1[0,1,2,3,4,4,5,5]
+; AVX1-NEXT: vpshufd {{.*#+}} xmm3 = xmm3[2,2,3,3]
+; AVX1-NEXT: vpshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,6,6,7,7]
+; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[2,2,3,3]
+; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm3, %ymm1
+; AVX1-NEXT: vandps %ymm2, %ymm1, %ymm1
+; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
+; AVX1-NEXT: vpcmpeqb %xmm4, %xmm2, %xmm2
+; AVX1-NEXT: vpxor %xmm5, %xmm2, %xmm2
+; AVX1-NEXT: vpsrlw $7, %xmm2, %xmm2
+; AVX1-NEXT: vpand %xmm6, %xmm2, %xmm2
+; AVX1-NEXT: vpcmpeqb %xmm4, %xmm1, %xmm1
+; AVX1-NEXT: vpxor %xmm5, %xmm1, %xmm1
+; AVX1-NEXT: vpsrlw $7, %xmm1, %xmm1
+; AVX1-NEXT: vpand %xmm6, %xmm1, %xmm1
+; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm1, %ymm1
; AVX1-NEXT: retq
;
; AVX2-LABEL: ext_i64_64i8:
; AVX2: # BB#0:
-; AVX2-NEXT: pushq %rbp
-; AVX2-NEXT: .Lcfi11:
-; AVX2-NEXT: .cfi_def_cfa_offset 16
-; AVX2-NEXT: .Lcfi12:
-; AVX2-NEXT: .cfi_offset %rbp, -16
-; AVX2-NEXT: movq %rsp, %rbp
-; AVX2-NEXT: .Lcfi13:
-; AVX2-NEXT: .cfi_def_cfa_register %rbp
-; AVX2-NEXT: andq $-32, %rsp
-; AVX2-NEXT: subq $64, %rsp
-; AVX2-NEXT: movl %edi, %eax
-; AVX2-NEXT: shrl $17, %eax
-; AVX2-NEXT: andl $1, %eax
-; AVX2-NEXT: movl %edi, %ecx
-; AVX2-NEXT: shrl $16, %ecx
-; AVX2-NEXT: andl $1, %ecx
-; AVX2-NEXT: vmovd %ecx, %xmm0
-; AVX2-NEXT: vpinsrb $1, %eax, %xmm0, %xmm0
-; AVX2-NEXT: movl %edi, %eax
-; AVX2-NEXT: shrl $18, %eax
-; AVX2-NEXT: andl $1, %eax
-; AVX2-NEXT: vpinsrb $2, %eax, %xmm0, %xmm0
-; AVX2-NEXT: movl %edi, %eax
-; AVX2-NEXT: shrl $19, %eax
-; AVX2-NEXT: andl $1, %eax
-; AVX2-NEXT: vpinsrb $3, %eax, %xmm0, %xmm0
-; AVX2-NEXT: movl %edi, %eax
-; AVX2-NEXT: shrl $20, %eax
-; AVX2-NEXT: andl $1, %eax
-; AVX2-NEXT: vpinsrb $4, %eax, %xmm0, %xmm0
-; AVX2-NEXT: movl %edi, %eax
-; AVX2-NEXT: shrl $21, %eax
-; AVX2-NEXT: andl $1, %eax
-; AVX2-NEXT: vpinsrb $5, %eax, %xmm0, %xmm0
-; AVX2-NEXT: movl %edi, %eax
-; AVX2-NEXT: shrl $22, %eax
-; AVX2-NEXT: andl $1, %eax
-; AVX2-NEXT: vpinsrb $6, %eax, %xmm0, %xmm0
-; AVX2-NEXT: movl %edi, %eax
-; AVX2-NEXT: shrl $23, %eax
-; AVX2-NEXT: andl $1, %eax
-; AVX2-NEXT: vpinsrb $7, %eax, %xmm0, %xmm0
-; AVX2-NEXT: movl %edi, %eax
-; AVX2-NEXT: shrl $24, %eax
-; AVX2-NEXT: andl $1, %eax
-; AVX2-NEXT: vpinsrb $8, %eax, %xmm0, %xmm0
-; AVX2-NEXT: movl %edi, %eax
-; AVX2-NEXT: shrl $25, %eax
-; AVX2-NEXT: andl $1, %eax
-; AVX2-NEXT: vpinsrb $9, %eax, %xmm0, %xmm0
-; AVX2-NEXT: movl %edi, %eax
-; AVX2-NEXT: shrl $26, %eax
-; AVX2-NEXT: andl $1, %eax
-; AVX2-NEXT: vpinsrb $10, %eax, %xmm0, %xmm0
-; AVX2-NEXT: movl %edi, %eax
-; AVX2-NEXT: shrl $27, %eax
-; AVX2-NEXT: andl $1, %eax
-; AVX2-NEXT: vpinsrb $11, %eax, %xmm0, %xmm0
-; AVX2-NEXT: movl %edi, %eax
-; AVX2-NEXT: shrl $28, %eax
-; AVX2-NEXT: andl $1, %eax
-; AVX2-NEXT: vpinsrb $12, %eax, %xmm0, %xmm0
-; AVX2-NEXT: movl %edi, %eax
-; AVX2-NEXT: shrl $29, %eax
-; AVX2-NEXT: andl $1, %eax
-; AVX2-NEXT: vpinsrb $13, %eax, %xmm0, %xmm0
-; AVX2-NEXT: movl %edi, %eax
-; AVX2-NEXT: shrl $30, %eax
-; AVX2-NEXT: andl $1, %eax
-; AVX2-NEXT: vpinsrb $14, %eax, %xmm0, %xmm0
-; AVX2-NEXT: movl %edi, %eax
-; AVX2-NEXT: shrl $31, %eax
-; AVX2-NEXT: vpinsrb $15, %eax, %xmm0, %xmm0
-; AVX2-NEXT: movl %edi, %eax
-; AVX2-NEXT: shrl %eax
-; AVX2-NEXT: andl $1, %eax
-; AVX2-NEXT: movl %edi, %ecx
-; AVX2-NEXT: andl $1, %ecx
-; AVX2-NEXT: vmovd %ecx, %xmm1
-; AVX2-NEXT: vpinsrb $1, %eax, %xmm1, %xmm1
-; AVX2-NEXT: movl %edi, %eax
-; AVX2-NEXT: shrl $2, %eax
-; AVX2-NEXT: andl $1, %eax
-; AVX2-NEXT: vpinsrb $2, %eax, %xmm1, %xmm1
-; AVX2-NEXT: movl %edi, %eax
-; AVX2-NEXT: shrl $3, %eax
-; AVX2-NEXT: andl $1, %eax
-; AVX2-NEXT: vpinsrb $3, %eax, %xmm1, %xmm1
-; AVX2-NEXT: movl %edi, %eax
-; AVX2-NEXT: shrl $4, %eax
-; AVX2-NEXT: andl $1, %eax
-; AVX2-NEXT: vpinsrb $4, %eax, %xmm1, %xmm1
-; AVX2-NEXT: movl %edi, %eax
-; AVX2-NEXT: shrl $5, %eax
-; AVX2-NEXT: andl $1, %eax
-; AVX2-NEXT: vpinsrb $5, %eax, %xmm1, %xmm1
-; AVX2-NEXT: movl %edi, %eax
-; AVX2-NEXT: shrl $6, %eax
-; AVX2-NEXT: andl $1, %eax
-; AVX2-NEXT: vpinsrb $6, %eax, %xmm1, %xmm1
-; AVX2-NEXT: movl %edi, %eax
-; AVX2-NEXT: shrl $7, %eax
-; AVX2-NEXT: andl $1, %eax
-; AVX2-NEXT: vpinsrb $7, %eax, %xmm1, %xmm1
-; AVX2-NEXT: movl %edi, %eax
-; AVX2-NEXT: shrl $8, %eax
-; AVX2-NEXT: andl $1, %eax
-; AVX2-NEXT: vpinsrb $8, %eax, %xmm1, %xmm1
-; AVX2-NEXT: movl %edi, %eax
-; AVX2-NEXT: shrl $9, %eax
-; AVX2-NEXT: andl $1, %eax
-; AVX2-NEXT: vpinsrb $9, %eax, %xmm1, %xmm1
-; AVX2-NEXT: movl %edi, %eax
-; AVX2-NEXT: shrl $10, %eax
-; AVX2-NEXT: andl $1, %eax
-; AVX2-NEXT: vpinsrb $10, %eax, %xmm1, %xmm1
-; AVX2-NEXT: movl %edi, %eax
-; AVX2-NEXT: shrl $11, %eax
-; AVX2-NEXT: andl $1, %eax
-; AVX2-NEXT: vpinsrb $11, %eax, %xmm1, %xmm1
-; AVX2-NEXT: movl %edi, %eax
-; AVX2-NEXT: shrl $12, %eax
-; AVX2-NEXT: andl $1, %eax
-; AVX2-NEXT: vpinsrb $12, %eax, %xmm1, %xmm1
-; AVX2-NEXT: movl %edi, %eax
-; AVX2-NEXT: shrl $13, %eax
-; AVX2-NEXT: andl $1, %eax
-; AVX2-NEXT: vpinsrb $13, %eax, %xmm1, %xmm1
-; AVX2-NEXT: movl %edi, %eax
-; AVX2-NEXT: shrl $14, %eax
-; AVX2-NEXT: andl $1, %eax
-; AVX2-NEXT: vpinsrb $14, %eax, %xmm1, %xmm1
-; AVX2-NEXT: movl %edi, %eax
-; AVX2-NEXT: shrl $15, %eax
-; AVX2-NEXT: andl $1, %eax
-; AVX2-NEXT: vpinsrb $15, %eax, %xmm1, %xmm1
-; AVX2-NEXT: vinserti128 $1, %xmm0, %ymm1, %ymm0
-; AVX2-NEXT: movq %rdi, %rax
-; AVX2-NEXT: shrq $49, %rax
-; AVX2-NEXT: andl $1, %eax
-; AVX2-NEXT: movq %rdi, %rcx
-; AVX2-NEXT: shrq $48, %rcx
-; AVX2-NEXT: andl $1, %ecx
-; AVX2-NEXT: vmovd %ecx, %xmm1
-; AVX2-NEXT: vpinsrb $1, %eax, %xmm1, %xmm1
-; AVX2-NEXT: movq %rdi, %rax
-; AVX2-NEXT: shrq $50, %rax
-; AVX2-NEXT: andl $1, %eax
-; AVX2-NEXT: vpinsrb $2, %eax, %xmm1, %xmm1
-; AVX2-NEXT: movq %rdi, %rax
-; AVX2-NEXT: shrq $51, %rax
-; AVX2-NEXT: andl $1, %eax
-; AVX2-NEXT: vpinsrb $3, %eax, %xmm1, %xmm1
-; AVX2-NEXT: movq %rdi, %rax
-; AVX2-NEXT: shrq $52, %rax
-; AVX2-NEXT: andl $1, %eax
-; AVX2-NEXT: vpinsrb $4, %eax, %xmm1, %xmm1
-; AVX2-NEXT: movq %rdi, %rax
-; AVX2-NEXT: shrq $53, %rax
-; AVX2-NEXT: andl $1, %eax
-; AVX2-NEXT: vpinsrb $5, %eax, %xmm1, %xmm1
-; AVX2-NEXT: movq %rdi, %rax
-; AVX2-NEXT: shrq $54, %rax
-; AVX2-NEXT: andl $1, %eax
-; AVX2-NEXT: vpinsrb $6, %eax, %xmm1, %xmm1
-; AVX2-NEXT: movq %rdi, %rax
-; AVX2-NEXT: shrq $55, %rax
-; AVX2-NEXT: andl $1, %eax
-; AVX2-NEXT: vpinsrb $7, %eax, %xmm1, %xmm1
-; AVX2-NEXT: movq %rdi, %rax
-; AVX2-NEXT: shrq $56, %rax
-; AVX2-NEXT: andl $1, %eax
-; AVX2-NEXT: vpinsrb $8, %eax, %xmm1, %xmm1
-; AVX2-NEXT: movq %rdi, %rax
-; AVX2-NEXT: shrq $57, %rax
-; AVX2-NEXT: andl $1, %eax
-; AVX2-NEXT: vpinsrb $9, %eax, %xmm1, %xmm1
-; AVX2-NEXT: movq %rdi, %rax
-; AVX2-NEXT: shrq $58, %rax
-; AVX2-NEXT: andl $1, %eax
-; AVX2-NEXT: vpinsrb $10, %eax, %xmm1, %xmm1
-; AVX2-NEXT: movq %rdi, %rax
-; AVX2-NEXT: shrq $59, %rax
-; AVX2-NEXT: andl $1, %eax
-; AVX2-NEXT: vpinsrb $11, %eax, %xmm1, %xmm1
-; AVX2-NEXT: movq %rdi, %rax
-; AVX2-NEXT: shrq $60, %rax
-; AVX2-NEXT: andl $1, %eax
-; AVX2-NEXT: vpinsrb $12, %eax, %xmm1, %xmm1
-; AVX2-NEXT: movq %rdi, %rax
-; AVX2-NEXT: shrq $61, %rax
-; AVX2-NEXT: andl $1, %eax
-; AVX2-NEXT: vpinsrb $13, %eax, %xmm1, %xmm1
-; AVX2-NEXT: movq %rdi, %rax
-; AVX2-NEXT: shrq $62, %rax
-; AVX2-NEXT: andl $1, %eax
-; AVX2-NEXT: vpinsrb $14, %eax, %xmm1, %xmm1
-; AVX2-NEXT: movq %rdi, %rax
-; AVX2-NEXT: shrq $63, %rax
-; AVX2-NEXT: vpinsrb $15, %eax, %xmm1, %xmm1
-; AVX2-NEXT: movq %rdi, %rax
-; AVX2-NEXT: shrq $33, %rax
-; AVX2-NEXT: andl $1, %eax
-; AVX2-NEXT: movq %rdi, %rcx
-; AVX2-NEXT: shrq $32, %rcx
-; AVX2-NEXT: andl $1, %ecx
-; AVX2-NEXT: vmovd %ecx, %xmm2
-; AVX2-NEXT: vpinsrb $1, %eax, %xmm2, %xmm2
-; AVX2-NEXT: movq %rdi, %rax
-; AVX2-NEXT: shrq $34, %rax
-; AVX2-NEXT: andl $1, %eax
-; AVX2-NEXT: vpinsrb $2, %eax, %xmm2, %xmm2
-; AVX2-NEXT: movq %rdi, %rax
-; AVX2-NEXT: shrq $35, %rax
-; AVX2-NEXT: andl $1, %eax
-; AVX2-NEXT: vpinsrb $3, %eax, %xmm2, %xmm2
-; AVX2-NEXT: movq %rdi, %rax
-; AVX2-NEXT: shrq $36, %rax
-; AVX2-NEXT: andl $1, %eax
-; AVX2-NEXT: vpinsrb $4, %eax, %xmm2, %xmm2
-; AVX2-NEXT: movq %rdi, %rax
-; AVX2-NEXT: shrq $37, %rax
-; AVX2-NEXT: andl $1, %eax
-; AVX2-NEXT: vpinsrb $5, %eax, %xmm2, %xmm2
-; AVX2-NEXT: movq %rdi, %rax
-; AVX2-NEXT: shrq $38, %rax
-; AVX2-NEXT: andl $1, %eax
-; AVX2-NEXT: vpinsrb $6, %eax, %xmm2, %xmm2
-; AVX2-NEXT: movq %rdi, %rax
-; AVX2-NEXT: shrq $39, %rax
-; AVX2-NEXT: andl $1, %eax
-; AVX2-NEXT: vpinsrb $7, %eax, %xmm2, %xmm2
-; AVX2-NEXT: movq %rdi, %rax
-; AVX2-NEXT: shrq $40, %rax
-; AVX2-NEXT: andl $1, %eax
-; AVX2-NEXT: vpinsrb $8, %eax, %xmm2, %xmm2
-; AVX2-NEXT: movq %rdi, %rax
-; AVX2-NEXT: shrq $41, %rax
-; AVX2-NEXT: andl $1, %eax
-; AVX2-NEXT: vpinsrb $9, %eax, %xmm2, %xmm2
-; AVX2-NEXT: movq %rdi, %rax
-; AVX2-NEXT: shrq $42, %rax
-; AVX2-NEXT: andl $1, %eax
-; AVX2-NEXT: vpinsrb $10, %eax, %xmm2, %xmm2
-; AVX2-NEXT: movq %rdi, %rax
-; AVX2-NEXT: shrq $43, %rax
-; AVX2-NEXT: andl $1, %eax
-; AVX2-NEXT: vpinsrb $11, %eax, %xmm2, %xmm2
-; AVX2-NEXT: movq %rdi, %rax
-; AVX2-NEXT: shrq $44, %rax
-; AVX2-NEXT: andl $1, %eax
-; AVX2-NEXT: vpinsrb $12, %eax, %xmm2, %xmm2
-; AVX2-NEXT: movq %rdi, %rax
-; AVX2-NEXT: shrq $45, %rax
-; AVX2-NEXT: andl $1, %eax
-; AVX2-NEXT: vpinsrb $13, %eax, %xmm2, %xmm2
-; AVX2-NEXT: movq %rdi, %rax
-; AVX2-NEXT: shrq $46, %rax
-; AVX2-NEXT: andl $1, %eax
-; AVX2-NEXT: vpinsrb $14, %eax, %xmm2, %xmm2
-; AVX2-NEXT: shrq $47, %rdi
-; AVX2-NEXT: andl $1, %edi
-; AVX2-NEXT: vpinsrb $15, %edi, %xmm2, %xmm2
-; AVX2-NEXT: vinserti128 $1, %xmm1, %ymm2, %ymm1
-; AVX2-NEXT: movq %rbp, %rsp
-; AVX2-NEXT: popq %rbp
+; AVX2-NEXT: vmovq %rdi, %xmm0
+; AVX2-NEXT: vpunpcklbw {{.*#+}} xmm1 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
+; AVX2-NEXT: vpshuflw {{.*#+}} xmm0 = xmm1[0,0,1,1,4,5,6,7]
+; AVX2-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,0,1,1]
+; AVX2-NEXT: vpshuflw {{.*#+}} xmm2 = xmm1[2,2,3,3,4,5,6,7]
+; AVX2-NEXT: vpshufd {{.*#+}} xmm2 = xmm2[0,0,1,1]
+; AVX2-NEXT: vinserti128 $1, %xmm2, %ymm0, %ymm0
+; AVX2-NEXT: vpbroadcastq {{.*#+}} ymm2 = [9241421688590303745,9241421688590303745,9241421688590303745,9241421688590303745]
+; AVX2-NEXT: vpand %ymm2, %ymm0, %ymm0
+; AVX2-NEXT: vpcmpeqb %ymm2, %ymm0, %ymm0
+; AVX2-NEXT: vpsrlw $7, %ymm0, %ymm0
+; AVX2-NEXT: vmovdqa {{.*#+}} ymm3 = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
+; AVX2-NEXT: vpand %ymm3, %ymm0, %ymm0
+; AVX2-NEXT: vpshufhw {{.*#+}} xmm4 = xmm1[0,1,2,3,4,4,5,5]
+; AVX2-NEXT: vpshufd {{.*#+}} xmm4 = xmm4[2,2,3,3]
+; AVX2-NEXT: vpshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,6,6,7,7]
+; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[2,2,3,3]
+; AVX2-NEXT: vinserti128 $1, %xmm1, %ymm4, %ymm1
+; AVX2-NEXT: vpand %ymm2, %ymm1, %ymm1
+; AVX2-NEXT: vpcmpeqb %ymm2, %ymm1, %ymm1
+; AVX2-NEXT: vpsrlw $7, %ymm1, %ymm1
+; AVX2-NEXT: vpand %ymm3, %ymm1, %ymm1
; AVX2-NEXT: retq
;
; AVX512-LABEL: ext_i64_64i8:
Modified: llvm/trunk/test/CodeGen/X86/bitcast-int-to-vector-bool.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/bitcast-int-to-vector-bool.ll?rev=314076&r1=314075&r2=314076&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/bitcast-int-to-vector-bool.ll (original)
+++ llvm/trunk/test/CodeGen/X86/bitcast-int-to-vector-bool.ll Sun Sep 24 06:42:31 2017
@@ -8,29 +8,38 @@
define <2 x i1> @bitcast_i2_2i1(i2 zeroext %a0) {
; SSE2-SSSE3-LABEL: bitcast_i2_2i1:
; SSE2-SSSE3: # BB#0:
-; SSE2-SSSE3-NEXT: movb %dil, -{{[0-9]+}}(%rsp)
-; SSE2-SSSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax
-; SSE2-SSSE3-NEXT: movl %eax, %ecx
-; SSE2-SSSE3-NEXT: andl $1, %ecx
-; SSE2-SSSE3-NEXT: movq %rcx, %xmm0
-; SSE2-SSSE3-NEXT: shrl %eax
-; SSE2-SSSE3-NEXT: andl $1, %eax
-; SSE2-SSSE3-NEXT: movq %rax, %xmm1
-; SSE2-SSSE3-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; SSE2-SSSE3-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; SSE2-SSSE3-NEXT: movq %rdi, %xmm0
+; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,1,0,1]
+; SSE2-SSSE3-NEXT: movdqa {{.*#+}} xmm0 = [1,2]
+; SSE2-SSSE3-NEXT: pand %xmm0, %xmm1
+; SSE2-SSSE3-NEXT: pcmpeqd %xmm0, %xmm1
+; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,0,3,2]
+; SSE2-SSSE3-NEXT: pand %xmm1, %xmm0
+; SSE2-SSSE3-NEXT: psrlq $63, %xmm0
; SSE2-SSSE3-NEXT: retq
;
-; AVX12-LABEL: bitcast_i2_2i1:
-; AVX12: # BB#0:
-; AVX12-NEXT: movb %dil, -{{[0-9]+}}(%rsp)
-; AVX12-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax
-; AVX12-NEXT: movl %eax, %ecx
-; AVX12-NEXT: andl $1, %ecx
-; AVX12-NEXT: vmovq %rcx, %xmm0
-; AVX12-NEXT: shrl %eax
-; AVX12-NEXT: andl $1, %eax
-; AVX12-NEXT: vmovq %rax, %xmm1
-; AVX12-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
-; AVX12-NEXT: retq
+; AVX1-LABEL: bitcast_i2_2i1:
+; AVX1: # BB#0:
+; AVX1-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; AVX1-NEXT: vmovq %rdi, %xmm0
+; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,1,0,1]
+; AVX1-NEXT: vmovdqa {{.*#+}} xmm1 = [1,2]
+; AVX1-NEXT: vpand %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vpcmpeqq %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vpsrlq $63, %xmm0, %xmm0
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: bitcast_i2_2i1:
+; AVX2: # BB#0:
+; AVX2-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; AVX2-NEXT: vmovq %rdi, %xmm0
+; AVX2-NEXT: vpbroadcastq %xmm0, %xmm0
+; AVX2-NEXT: vmovdqa {{.*#+}} xmm1 = [1,2]
+; AVX2-NEXT: vpand %xmm1, %xmm0, %xmm0
+; AVX2-NEXT: vpcmpeqq %xmm1, %xmm0, %xmm0
+; AVX2-NEXT: vpsrlq $63, %xmm0, %xmm0
+; AVX2-NEXT: retq
;
; AVX512-LABEL: bitcast_i2_2i1:
; AVX512: # BB#0:
@@ -48,54 +57,32 @@ define <2 x i1> @bitcast_i2_2i1(i2 zeroe
define <4 x i1> @bitcast_i4_4i1(i4 zeroext %a0) {
; SSE2-SSSE3-LABEL: bitcast_i4_4i1:
; SSE2-SSSE3: # BB#0:
-; SSE2-SSSE3-NEXT: movb %dil, -{{[0-9]+}}(%rsp)
-; SSE2-SSSE3-NEXT: movl -{{[0-9]+}}(%rsp), %eax
-; SSE2-SSSE3-NEXT: movl %eax, %ecx
-; SSE2-SSSE3-NEXT: shrl $3, %ecx
-; SSE2-SSSE3-NEXT: movd %ecx, %xmm0
-; SSE2-SSSE3-NEXT: movl %eax, %ecx
-; SSE2-SSSE3-NEXT: shrl $2, %ecx
-; SSE2-SSSE3-NEXT: movd %ecx, %xmm1
-; SSE2-SSSE3-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
-; SSE2-SSSE3-NEXT: movd %eax, %xmm0
-; SSE2-SSSE3-NEXT: shrl %eax
-; SSE2-SSSE3-NEXT: movd %eax, %xmm2
-; SSE2-SSSE3-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
-; SSE2-SSSE3-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
-; SSE2-SSSE3-NEXT: pand {{.*}}(%rip), %xmm0
+; SSE2-SSSE3-NEXT: movd %edi, %xmm0
+; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,0,0]
+; SSE2-SSSE3-NEXT: movdqa {{.*#+}} xmm1 = [1,2,4,8]
+; SSE2-SSSE3-NEXT: pand %xmm1, %xmm0
+; SSE2-SSSE3-NEXT: pcmpeqd %xmm1, %xmm0
+; SSE2-SSSE3-NEXT: psrld $31, %xmm0
; SSE2-SSSE3-NEXT: retq
;
; AVX1-LABEL: bitcast_i4_4i1:
; AVX1: # BB#0:
-; AVX1-NEXT: movb %dil, -{{[0-9]+}}(%rsp)
-; AVX1-NEXT: movl -{{[0-9]+}}(%rsp), %eax
-; AVX1-NEXT: movl %eax, %ecx
-; AVX1-NEXT: shrl %ecx
-; AVX1-NEXT: vmovd %eax, %xmm0
-; AVX1-NEXT: vpinsrd $1, %ecx, %xmm0, %xmm0
-; AVX1-NEXT: movl %eax, %ecx
-; AVX1-NEXT: shrl $2, %ecx
-; AVX1-NEXT: vpinsrd $2, %ecx, %xmm0, %xmm0
-; AVX1-NEXT: shrl $3, %eax
-; AVX1-NEXT: vpinsrd $3, %eax, %xmm0, %xmm0
-; AVX1-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0
+; AVX1-NEXT: vmovd %edi, %xmm0
+; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,0,0,0]
+; AVX1-NEXT: vmovdqa {{.*#+}} xmm1 = [1,2,4,8]
+; AVX1-NEXT: vpand %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vpcmpeqd %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vpsrld $31, %xmm0, %xmm0
; AVX1-NEXT: retq
;
; AVX2-LABEL: bitcast_i4_4i1:
; AVX2: # BB#0:
-; AVX2-NEXT: movb %dil, -{{[0-9]+}}(%rsp)
-; AVX2-NEXT: movl -{{[0-9]+}}(%rsp), %eax
-; AVX2-NEXT: movl %eax, %ecx
-; AVX2-NEXT: shrl %ecx
-; AVX2-NEXT: vmovd %eax, %xmm0
-; AVX2-NEXT: vpinsrd $1, %ecx, %xmm0, %xmm0
-; AVX2-NEXT: movl %eax, %ecx
-; AVX2-NEXT: shrl $2, %ecx
-; AVX2-NEXT: vpinsrd $2, %ecx, %xmm0, %xmm0
-; AVX2-NEXT: shrl $3, %eax
-; AVX2-NEXT: vpinsrd $3, %eax, %xmm0, %xmm0
-; AVX2-NEXT: vpbroadcastd {{.*#+}} xmm1 = [1,1,1,1]
+; AVX2-NEXT: vmovd %edi, %xmm0
+; AVX2-NEXT: vpbroadcastd %xmm0, %xmm0
+; AVX2-NEXT: vmovdqa {{.*#+}} xmm1 = [1,2,4,8]
; AVX2-NEXT: vpand %xmm1, %xmm0, %xmm0
+; AVX2-NEXT: vpcmpeqd %xmm1, %xmm0, %xmm0
+; AVX2-NEXT: vpsrld $31, %xmm0, %xmm0
; AVX2-NEXT: retq
;
; AVX512-LABEL: bitcast_i4_4i1:
@@ -115,82 +102,35 @@ define <4 x i1> @bitcast_i4_4i1(i4 zeroe
define <8 x i1> @bitcast_i8_8i1(i8 zeroext %a0) {
; SSE2-SSSE3-LABEL: bitcast_i8_8i1:
; SSE2-SSSE3: # BB#0:
-; SSE2-SSSE3-NEXT: movb %dil, -{{[0-9]+}}(%rsp)
-; SSE2-SSSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax
-; SSE2-SSSE3-NEXT: movl %eax, %ecx
-; SSE2-SSSE3-NEXT: shrl $3, %ecx
-; SSE2-SSSE3-NEXT: andl $1, %ecx
-; SSE2-SSSE3-NEXT: movd %ecx, %xmm0
-; SSE2-SSSE3-NEXT: movl %eax, %ecx
-; SSE2-SSSE3-NEXT: shrl $2, %ecx
-; SSE2-SSSE3-NEXT: andl $1, %ecx
-; SSE2-SSSE3-NEXT: movd %ecx, %xmm1
-; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
-; SSE2-SSSE3-NEXT: movl %eax, %ecx
-; SSE2-SSSE3-NEXT: andl $1, %ecx
-; SSE2-SSSE3-NEXT: movd %ecx, %xmm0
-; SSE2-SSSE3-NEXT: movl %eax, %ecx
-; SSE2-SSSE3-NEXT: shrl %ecx
-; SSE2-SSSE3-NEXT: andl $1, %ecx
-; SSE2-SSSE3-NEXT: movd %ecx, %xmm2
-; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3]
-; SSE2-SSSE3-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
-; SSE2-SSSE3-NEXT: movl %eax, %ecx
-; SSE2-SSSE3-NEXT: shrl $5, %ecx
-; SSE2-SSSE3-NEXT: andl $1, %ecx
-; SSE2-SSSE3-NEXT: movd %ecx, %xmm1
-; SSE2-SSSE3-NEXT: movl %eax, %ecx
-; SSE2-SSSE3-NEXT: shrl $4, %ecx
-; SSE2-SSSE3-NEXT: andl $1, %ecx
-; SSE2-SSSE3-NEXT: movd %ecx, %xmm2
-; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3]
-; SSE2-SSSE3-NEXT: movl %eax, %ecx
-; SSE2-SSSE3-NEXT: shrl $6, %ecx
-; SSE2-SSSE3-NEXT: andl $1, %ecx
-; SSE2-SSSE3-NEXT: movd %ecx, %xmm1
-; SSE2-SSSE3-NEXT: shrl $7, %eax
-; SSE2-SSSE3-NEXT: movzwl %ax, %eax
-; SSE2-SSSE3-NEXT: movd %eax, %xmm3
-; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm3[0],xmm1[1],xmm3[1],xmm1[2],xmm3[2],xmm1[3],xmm3[3]
-; SSE2-SSSE3-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1]
-; SSE2-SSSE3-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm2[0]
+; SSE2-SSSE3-NEXT: movd %edi, %xmm0
+; SSE2-SSSE3-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,0,0,0,4,5,6,7]
+; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,1,1]
+; SSE2-SSSE3-NEXT: movdqa {{.*#+}} xmm1 = [1,2,4,8,16,32,64,128]
+; SSE2-SSSE3-NEXT: pand %xmm1, %xmm0
+; SSE2-SSSE3-NEXT: pcmpeqw %xmm1, %xmm0
+; SSE2-SSSE3-NEXT: psrlw $15, %xmm0
; SSE2-SSSE3-NEXT: retq
;
-; AVX12-LABEL: bitcast_i8_8i1:
-; AVX12: # BB#0:
-; AVX12-NEXT: movb %dil, -{{[0-9]+}}(%rsp)
-; AVX12-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax
-; AVX12-NEXT: movl %eax, %ecx
-; AVX12-NEXT: shrl %ecx
-; AVX12-NEXT: andl $1, %ecx
-; AVX12-NEXT: movl %eax, %edx
-; AVX12-NEXT: andl $1, %edx
-; AVX12-NEXT: vmovd %edx, %xmm0
-; AVX12-NEXT: vpinsrw $1, %ecx, %xmm0, %xmm0
-; AVX12-NEXT: movl %eax, %ecx
-; AVX12-NEXT: shrl $2, %ecx
-; AVX12-NEXT: andl $1, %ecx
-; AVX12-NEXT: vpinsrw $2, %ecx, %xmm0, %xmm0
-; AVX12-NEXT: movl %eax, %ecx
-; AVX12-NEXT: shrl $3, %ecx
-; AVX12-NEXT: andl $1, %ecx
-; AVX12-NEXT: vpinsrw $3, %ecx, %xmm0, %xmm0
-; AVX12-NEXT: movl %eax, %ecx
-; AVX12-NEXT: shrl $4, %ecx
-; AVX12-NEXT: andl $1, %ecx
-; AVX12-NEXT: vpinsrw $4, %ecx, %xmm0, %xmm0
-; AVX12-NEXT: movl %eax, %ecx
-; AVX12-NEXT: shrl $5, %ecx
-; AVX12-NEXT: andl $1, %ecx
-; AVX12-NEXT: vpinsrw $5, %ecx, %xmm0, %xmm0
-; AVX12-NEXT: movl %eax, %ecx
-; AVX12-NEXT: shrl $6, %ecx
-; AVX12-NEXT: andl $1, %ecx
-; AVX12-NEXT: vpinsrw $6, %ecx, %xmm0, %xmm0
-; AVX12-NEXT: shrl $7, %eax
-; AVX12-NEXT: movzwl %ax, %eax
-; AVX12-NEXT: vpinsrw $7, %eax, %xmm0, %xmm0
-; AVX12-NEXT: retq
+; AVX1-LABEL: bitcast_i8_8i1:
+; AVX1: # BB#0:
+; AVX1-NEXT: vmovd %edi, %xmm0
+; AVX1-NEXT: vpshuflw {{.*#+}} xmm0 = xmm0[0,0,0,0,4,5,6,7]
+; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,0,1,1]
+; AVX1-NEXT: vmovdqa {{.*#+}} xmm1 = [1,2,4,8,16,32,64,128]
+; AVX1-NEXT: vpand %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vpcmpeqw %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vpsrlw $15, %xmm0, %xmm0
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: bitcast_i8_8i1:
+; AVX2: # BB#0:
+; AVX2-NEXT: vmovd %edi, %xmm0
+; AVX2-NEXT: vpbroadcastw %xmm0, %xmm0
+; AVX2-NEXT: vmovdqa {{.*#+}} xmm1 = [1,2,4,8,16,32,64,128]
+; AVX2-NEXT: vpand %xmm1, %xmm0, %xmm0
+; AVX2-NEXT: vpcmpeqw %xmm1, %xmm0, %xmm0
+; AVX2-NEXT: vpsrlw $15, %xmm0, %xmm0
+; AVX2-NEXT: retq
;
; AVX512-LABEL: bitcast_i8_8i1:
; AVX512: # BB#0:
@@ -202,156 +142,51 @@ define <8 x i1> @bitcast_i8_8i1(i8 zeroe
}
define <16 x i1> @bitcast_i16_16i1(i16 zeroext %a0) {
-; SSE2-SSSE3-LABEL: bitcast_i16_16i1:
-; SSE2-SSSE3: # BB#0:
-; SSE2-SSSE3-NEXT: movw %di, -{{[0-9]+}}(%rsp)
-; SSE2-SSSE3-NEXT: movzwl -{{[0-9]+}}(%rsp), %eax
-; SSE2-SSSE3-NEXT: movl %eax, %ecx
-; SSE2-SSSE3-NEXT: shrl $7, %ecx
-; SSE2-SSSE3-NEXT: andl $1, %ecx
-; SSE2-SSSE3-NEXT: movd %ecx, %xmm0
-; SSE2-SSSE3-NEXT: movl %eax, %ecx
-; SSE2-SSSE3-NEXT: shrl $6, %ecx
-; SSE2-SSSE3-NEXT: andl $1, %ecx
-; SSE2-SSSE3-NEXT: movd %ecx, %xmm1
-; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
-; SSE2-SSSE3-NEXT: movl %eax, %ecx
-; SSE2-SSSE3-NEXT: shrl $5, %ecx
-; SSE2-SSSE3-NEXT: andl $1, %ecx
-; SSE2-SSSE3-NEXT: movd %ecx, %xmm0
-; SSE2-SSSE3-NEXT: movl %eax, %ecx
-; SSE2-SSSE3-NEXT: shrl $4, %ecx
-; SSE2-SSSE3-NEXT: andl $1, %ecx
-; SSE2-SSSE3-NEXT: movd %ecx, %xmm2
-; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3],xmm2[4],xmm0[4],xmm2[5],xmm0[5],xmm2[6],xmm0[6],xmm2[7],xmm0[7]
-; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3]
-; SSE2-SSSE3-NEXT: movl %eax, %ecx
-; SSE2-SSSE3-NEXT: shrl $3, %ecx
-; SSE2-SSSE3-NEXT: andl $1, %ecx
-; SSE2-SSSE3-NEXT: movd %ecx, %xmm0
-; SSE2-SSSE3-NEXT: movl %eax, %ecx
-; SSE2-SSSE3-NEXT: shrl $2, %ecx
-; SSE2-SSSE3-NEXT: andl $1, %ecx
-; SSE2-SSSE3-NEXT: movd %ecx, %xmm1
-; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
-; SSE2-SSSE3-NEXT: movl %eax, %ecx
-; SSE2-SSSE3-NEXT: andl $1, %ecx
-; SSE2-SSSE3-NEXT: movd %ecx, %xmm0
-; SSE2-SSSE3-NEXT: movl %eax, %ecx
-; SSE2-SSSE3-NEXT: shrl %ecx
-; SSE2-SSSE3-NEXT: andl $1, %ecx
-; SSE2-SSSE3-NEXT: movd %ecx, %xmm3
-; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1],xmm0[2],xmm3[2],xmm0[3],xmm3[3],xmm0[4],xmm3[4],xmm0[5],xmm3[5],xmm0[6],xmm3[6],xmm0[7],xmm3[7]
-; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
-; SSE2-SSSE3-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
-; SSE2-SSSE3-NEXT: movl %eax, %ecx
-; SSE2-SSSE3-NEXT: shrl $11, %ecx
-; SSE2-SSSE3-NEXT: andl $1, %ecx
-; SSE2-SSSE3-NEXT: movd %ecx, %xmm1
-; SSE2-SSSE3-NEXT: movl %eax, %ecx
-; SSE2-SSSE3-NEXT: shrl $10, %ecx
-; SSE2-SSSE3-NEXT: andl $1, %ecx
-; SSE2-SSSE3-NEXT: movd %ecx, %xmm2
-; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3],xmm2[4],xmm1[4],xmm2[5],xmm1[5],xmm2[6],xmm1[6],xmm2[7],xmm1[7]
-; SSE2-SSSE3-NEXT: movl %eax, %ecx
-; SSE2-SSSE3-NEXT: shrl $9, %ecx
-; SSE2-SSSE3-NEXT: andl $1, %ecx
-; SSE2-SSSE3-NEXT: movd %ecx, %xmm3
-; SSE2-SSSE3-NEXT: movl %eax, %ecx
-; SSE2-SSSE3-NEXT: shrl $8, %ecx
-; SSE2-SSSE3-NEXT: andl $1, %ecx
-; SSE2-SSSE3-NEXT: movd %ecx, %xmm1
-; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm3[0],xmm1[1],xmm3[1],xmm1[2],xmm3[2],xmm1[3],xmm3[3],xmm1[4],xmm3[4],xmm1[5],xmm3[5],xmm1[6],xmm3[6],xmm1[7],xmm3[7]
-; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3]
-; SSE2-SSSE3-NEXT: movl %eax, %ecx
-; SSE2-SSSE3-NEXT: shrl $13, %ecx
-; SSE2-SSSE3-NEXT: andl $1, %ecx
-; SSE2-SSSE3-NEXT: movd %ecx, %xmm2
-; SSE2-SSSE3-NEXT: movl %eax, %ecx
-; SSE2-SSSE3-NEXT: shrl $12, %ecx
-; SSE2-SSSE3-NEXT: andl $1, %ecx
-; SSE2-SSSE3-NEXT: movd %ecx, %xmm3
-; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1],xmm3[2],xmm2[2],xmm3[3],xmm2[3],xmm3[4],xmm2[4],xmm3[5],xmm2[5],xmm3[6],xmm2[6],xmm3[7],xmm2[7]
-; SSE2-SSSE3-NEXT: movl %eax, %ecx
-; SSE2-SSSE3-NEXT: shrl $14, %ecx
-; SSE2-SSSE3-NEXT: andl $1, %ecx
-; SSE2-SSSE3-NEXT: movd %ecx, %xmm2
-; SSE2-SSSE3-NEXT: shrl $15, %eax
-; SSE2-SSSE3-NEXT: movzwl %ax, %eax
-; SSE2-SSSE3-NEXT: movd %eax, %xmm4
-; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm4[0],xmm2[1],xmm4[1],xmm2[2],xmm4[2],xmm2[3],xmm4[3],xmm2[4],xmm4[4],xmm2[5],xmm4[5],xmm2[6],xmm4[6],xmm2[7],xmm4[7]
-; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1],xmm3[2],xmm2[2],xmm3[3],xmm2[3]
-; SSE2-SSSE3-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm3[0],xmm1[1],xmm3[1]
-; SSE2-SSSE3-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
-; SSE2-SSSE3-NEXT: retq
+; SSE2-LABEL: bitcast_i16_16i1:
+; SSE2: # BB#0:
+; SSE2-NEXT: movd %edi, %xmm0
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
+; SSE2-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,0,1,1,4,5,6,7]
+; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,1,1]
+; SSE2-NEXT: movdqa {{.*#+}} xmm1 = [1,2,4,8,16,32,64,128,1,2,4,8,16,32,64,128]
+; SSE2-NEXT: pand %xmm1, %xmm0
+; SSE2-NEXT: pcmpeqb %xmm1, %xmm0
+; SSE2-NEXT: psrlw $7, %xmm0
+; SSE2-NEXT: pand {{.*}}(%rip), %xmm0
+; SSE2-NEXT: retq
+;
+; SSSE3-LABEL: bitcast_i16_16i1:
+; SSSE3: # BB#0:
+; SSSE3-NEXT: movd %edi, %xmm0
+; SSSE3-NEXT: pshufb {{.*#+}} xmm0 = xmm0[0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1]
+; SSSE3-NEXT: movdqa {{.*#+}} xmm1 = [1,2,4,8,16,32,64,128,1,2,4,8,16,32,64,128]
+; SSSE3-NEXT: pand %xmm1, %xmm0
+; SSSE3-NEXT: pcmpeqb %xmm1, %xmm0
+; SSSE3-NEXT: psrlw $7, %xmm0
+; SSSE3-NEXT: pand {{.*}}(%rip), %xmm0
+; SSSE3-NEXT: retq
;
-; AVX12-LABEL: bitcast_i16_16i1:
-; AVX12: # BB#0:
-; AVX12-NEXT: movw %di, -{{[0-9]+}}(%rsp)
-; AVX12-NEXT: movzwl -{{[0-9]+}}(%rsp), %eax
-; AVX12-NEXT: movl %eax, %ecx
-; AVX12-NEXT: shrl %ecx
-; AVX12-NEXT: andl $1, %ecx
-; AVX12-NEXT: movl %eax, %edx
-; AVX12-NEXT: andl $1, %edx
-; AVX12-NEXT: vmovd %edx, %xmm0
-; AVX12-NEXT: vpinsrb $1, %ecx, %xmm0, %xmm0
-; AVX12-NEXT: movl %eax, %ecx
-; AVX12-NEXT: shrl $2, %ecx
-; AVX12-NEXT: andl $1, %ecx
-; AVX12-NEXT: vpinsrb $2, %ecx, %xmm0, %xmm0
-; AVX12-NEXT: movl %eax, %ecx
-; AVX12-NEXT: shrl $3, %ecx
-; AVX12-NEXT: andl $1, %ecx
-; AVX12-NEXT: vpinsrb $3, %ecx, %xmm0, %xmm0
-; AVX12-NEXT: movl %eax, %ecx
-; AVX12-NEXT: shrl $4, %ecx
-; AVX12-NEXT: andl $1, %ecx
-; AVX12-NEXT: vpinsrb $4, %ecx, %xmm0, %xmm0
-; AVX12-NEXT: movl %eax, %ecx
-; AVX12-NEXT: shrl $5, %ecx
-; AVX12-NEXT: andl $1, %ecx
-; AVX12-NEXT: vpinsrb $5, %ecx, %xmm0, %xmm0
-; AVX12-NEXT: movl %eax, %ecx
-; AVX12-NEXT: shrl $6, %ecx
-; AVX12-NEXT: andl $1, %ecx
-; AVX12-NEXT: vpinsrb $6, %ecx, %xmm0, %xmm0
-; AVX12-NEXT: movl %eax, %ecx
-; AVX12-NEXT: shrl $7, %ecx
-; AVX12-NEXT: andl $1, %ecx
-; AVX12-NEXT: vpinsrb $7, %ecx, %xmm0, %xmm0
-; AVX12-NEXT: movl %eax, %ecx
-; AVX12-NEXT: shrl $8, %ecx
-; AVX12-NEXT: andl $1, %ecx
-; AVX12-NEXT: vpinsrb $8, %ecx, %xmm0, %xmm0
-; AVX12-NEXT: movl %eax, %ecx
-; AVX12-NEXT: shrl $9, %ecx
-; AVX12-NEXT: andl $1, %ecx
-; AVX12-NEXT: vpinsrb $9, %ecx, %xmm0, %xmm0
-; AVX12-NEXT: movl %eax, %ecx
-; AVX12-NEXT: shrl $10, %ecx
-; AVX12-NEXT: andl $1, %ecx
-; AVX12-NEXT: vpinsrb $10, %ecx, %xmm0, %xmm0
-; AVX12-NEXT: movl %eax, %ecx
-; AVX12-NEXT: shrl $11, %ecx
-; AVX12-NEXT: andl $1, %ecx
-; AVX12-NEXT: vpinsrb $11, %ecx, %xmm0, %xmm0
-; AVX12-NEXT: movl %eax, %ecx
-; AVX12-NEXT: shrl $12, %ecx
-; AVX12-NEXT: andl $1, %ecx
-; AVX12-NEXT: vpinsrb $12, %ecx, %xmm0, %xmm0
-; AVX12-NEXT: movl %eax, %ecx
-; AVX12-NEXT: shrl $13, %ecx
-; AVX12-NEXT: andl $1, %ecx
-; AVX12-NEXT: vpinsrb $13, %ecx, %xmm0, %xmm0
-; AVX12-NEXT: movl %eax, %ecx
-; AVX12-NEXT: shrl $14, %ecx
-; AVX12-NEXT: andl $1, %ecx
-; AVX12-NEXT: vpinsrb $14, %ecx, %xmm0, %xmm0
-; AVX12-NEXT: shrl $15, %eax
-; AVX12-NEXT: movzwl %ax, %eax
-; AVX12-NEXT: vpinsrb $15, %eax, %xmm0, %xmm0
-; AVX12-NEXT: retq
+; AVX1-LABEL: bitcast_i16_16i1:
+; AVX1: # BB#0:
+; AVX1-NEXT: vmovd %edi, %xmm0
+; AVX1-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1]
+; AVX1-NEXT: vmovddup {{.*#+}} xmm1 = mem[0,0]
+; AVX1-NEXT: vpand %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vpcmpeqb %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vpsrlw $7, %xmm0, %xmm0
+; AVX1-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: bitcast_i16_16i1:
+; AVX2: # BB#0:
+; AVX2-NEXT: vmovd %edi, %xmm0
+; AVX2-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1]
+; AVX2-NEXT: vpbroadcastq {{.*#+}} xmm1 = [9241421688590303745,9241421688590303745]
+; AVX2-NEXT: vpand %xmm1, %xmm0, %xmm0
+; AVX2-NEXT: vpcmpeqb %xmm1, %xmm0, %xmm0
+; AVX2-NEXT: vpsrlw $7, %xmm0, %xmm0
+; AVX2-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0
+; AVX2-NEXT: retq
;
; AVX512-LABEL: bitcast_i16_16i1:
; AVX512: # BB#0:
@@ -371,286 +206,43 @@ define <32 x i1> @bitcast_i32_32i1(i32 %
;
; AVX1-LABEL: bitcast_i32_32i1:
; AVX1: # BB#0:
-; AVX1-NEXT: pushq %rbp
-; AVX1-NEXT: .Lcfi0:
-; AVX1-NEXT: .cfi_def_cfa_offset 16
-; AVX1-NEXT: .Lcfi1:
-; AVX1-NEXT: .cfi_offset %rbp, -16
-; AVX1-NEXT: movq %rsp, %rbp
-; AVX1-NEXT: .Lcfi2:
-; AVX1-NEXT: .cfi_def_cfa_register %rbp
-; AVX1-NEXT: andq $-32, %rsp
-; AVX1-NEXT: subq $32, %rsp
-; AVX1-NEXT: movl %edi, %eax
-; AVX1-NEXT: shrl $17, %eax
-; AVX1-NEXT: andl $1, %eax
-; AVX1-NEXT: movl %edi, %ecx
-; AVX1-NEXT: shrl $16, %ecx
-; AVX1-NEXT: andl $1, %ecx
-; AVX1-NEXT: vmovd %ecx, %xmm0
-; AVX1-NEXT: vpinsrb $1, %eax, %xmm0, %xmm0
-; AVX1-NEXT: movl %edi, %eax
-; AVX1-NEXT: shrl $18, %eax
-; AVX1-NEXT: andl $1, %eax
-; AVX1-NEXT: vpinsrb $2, %eax, %xmm0, %xmm0
-; AVX1-NEXT: movl %edi, %eax
-; AVX1-NEXT: shrl $19, %eax
-; AVX1-NEXT: andl $1, %eax
-; AVX1-NEXT: vpinsrb $3, %eax, %xmm0, %xmm0
-; AVX1-NEXT: movl %edi, %eax
-; AVX1-NEXT: shrl $20, %eax
-; AVX1-NEXT: andl $1, %eax
-; AVX1-NEXT: vpinsrb $4, %eax, %xmm0, %xmm0
-; AVX1-NEXT: movl %edi, %eax
-; AVX1-NEXT: shrl $21, %eax
-; AVX1-NEXT: andl $1, %eax
-; AVX1-NEXT: vpinsrb $5, %eax, %xmm0, %xmm0
-; AVX1-NEXT: movl %edi, %eax
-; AVX1-NEXT: shrl $22, %eax
-; AVX1-NEXT: andl $1, %eax
-; AVX1-NEXT: vpinsrb $6, %eax, %xmm0, %xmm0
-; AVX1-NEXT: movl %edi, %eax
-; AVX1-NEXT: shrl $23, %eax
-; AVX1-NEXT: andl $1, %eax
-; AVX1-NEXT: vpinsrb $7, %eax, %xmm0, %xmm0
-; AVX1-NEXT: movl %edi, %eax
-; AVX1-NEXT: shrl $24, %eax
-; AVX1-NEXT: andl $1, %eax
-; AVX1-NEXT: vpinsrb $8, %eax, %xmm0, %xmm0
-; AVX1-NEXT: movl %edi, %eax
-; AVX1-NEXT: shrl $25, %eax
-; AVX1-NEXT: andl $1, %eax
-; AVX1-NEXT: vpinsrb $9, %eax, %xmm0, %xmm0
-; AVX1-NEXT: movl %edi, %eax
-; AVX1-NEXT: shrl $26, %eax
-; AVX1-NEXT: andl $1, %eax
-; AVX1-NEXT: vpinsrb $10, %eax, %xmm0, %xmm0
-; AVX1-NEXT: movl %edi, %eax
-; AVX1-NEXT: shrl $27, %eax
-; AVX1-NEXT: andl $1, %eax
-; AVX1-NEXT: vpinsrb $11, %eax, %xmm0, %xmm0
-; AVX1-NEXT: movl %edi, %eax
-; AVX1-NEXT: shrl $28, %eax
-; AVX1-NEXT: andl $1, %eax
-; AVX1-NEXT: vpinsrb $12, %eax, %xmm0, %xmm0
-; AVX1-NEXT: movl %edi, %eax
-; AVX1-NEXT: shrl $29, %eax
-; AVX1-NEXT: andl $1, %eax
-; AVX1-NEXT: vpinsrb $13, %eax, %xmm0, %xmm0
-; AVX1-NEXT: movl %edi, %eax
-; AVX1-NEXT: shrl $30, %eax
-; AVX1-NEXT: andl $1, %eax
-; AVX1-NEXT: vpinsrb $14, %eax, %xmm0, %xmm0
-; AVX1-NEXT: movl %edi, %eax
-; AVX1-NEXT: shrl $31, %eax
-; AVX1-NEXT: vpinsrb $15, %eax, %xmm0, %xmm0
-; AVX1-NEXT: movl %edi, %eax
-; AVX1-NEXT: shrl %eax
-; AVX1-NEXT: andl $1, %eax
-; AVX1-NEXT: movl %edi, %ecx
-; AVX1-NEXT: andl $1, %ecx
-; AVX1-NEXT: vmovd %ecx, %xmm1
-; AVX1-NEXT: vpinsrb $1, %eax, %xmm1, %xmm1
-; AVX1-NEXT: movl %edi, %eax
-; AVX1-NEXT: shrl $2, %eax
-; AVX1-NEXT: andl $1, %eax
-; AVX1-NEXT: vpinsrb $2, %eax, %xmm1, %xmm1
-; AVX1-NEXT: movl %edi, %eax
-; AVX1-NEXT: shrl $3, %eax
-; AVX1-NEXT: andl $1, %eax
-; AVX1-NEXT: vpinsrb $3, %eax, %xmm1, %xmm1
-; AVX1-NEXT: movl %edi, %eax
-; AVX1-NEXT: shrl $4, %eax
-; AVX1-NEXT: andl $1, %eax
-; AVX1-NEXT: vpinsrb $4, %eax, %xmm1, %xmm1
-; AVX1-NEXT: movl %edi, %eax
-; AVX1-NEXT: shrl $5, %eax
-; AVX1-NEXT: andl $1, %eax
-; AVX1-NEXT: vpinsrb $5, %eax, %xmm1, %xmm1
-; AVX1-NEXT: movl %edi, %eax
-; AVX1-NEXT: shrl $6, %eax
-; AVX1-NEXT: andl $1, %eax
-; AVX1-NEXT: vpinsrb $6, %eax, %xmm1, %xmm1
-; AVX1-NEXT: movl %edi, %eax
-; AVX1-NEXT: shrl $7, %eax
-; AVX1-NEXT: andl $1, %eax
-; AVX1-NEXT: vpinsrb $7, %eax, %xmm1, %xmm1
-; AVX1-NEXT: movl %edi, %eax
-; AVX1-NEXT: shrl $8, %eax
-; AVX1-NEXT: andl $1, %eax
-; AVX1-NEXT: vpinsrb $8, %eax, %xmm1, %xmm1
-; AVX1-NEXT: movl %edi, %eax
-; AVX1-NEXT: shrl $9, %eax
-; AVX1-NEXT: andl $1, %eax
-; AVX1-NEXT: vpinsrb $9, %eax, %xmm1, %xmm1
-; AVX1-NEXT: movl %edi, %eax
-; AVX1-NEXT: shrl $10, %eax
-; AVX1-NEXT: andl $1, %eax
-; AVX1-NEXT: vpinsrb $10, %eax, %xmm1, %xmm1
-; AVX1-NEXT: movl %edi, %eax
-; AVX1-NEXT: shrl $11, %eax
-; AVX1-NEXT: andl $1, %eax
-; AVX1-NEXT: vpinsrb $11, %eax, %xmm1, %xmm1
-; AVX1-NEXT: movl %edi, %eax
-; AVX1-NEXT: shrl $12, %eax
-; AVX1-NEXT: andl $1, %eax
-; AVX1-NEXT: vpinsrb $12, %eax, %xmm1, %xmm1
-; AVX1-NEXT: movl %edi, %eax
-; AVX1-NEXT: shrl $13, %eax
-; AVX1-NEXT: andl $1, %eax
-; AVX1-NEXT: vpinsrb $13, %eax, %xmm1, %xmm1
-; AVX1-NEXT: movl %edi, %eax
-; AVX1-NEXT: shrl $14, %eax
-; AVX1-NEXT: andl $1, %eax
-; AVX1-NEXT: vpinsrb $14, %eax, %xmm1, %xmm1
-; AVX1-NEXT: shrl $15, %edi
-; AVX1-NEXT: andl $1, %edi
-; AVX1-NEXT: vpinsrb $15, %edi, %xmm1, %xmm1
+; AVX1-NEXT: vmovd %edi, %xmm0
+; AVX1-NEXT: vpunpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
+; AVX1-NEXT: vpshuflw {{.*#+}} xmm1 = xmm0[0,0,1,1,4,5,6,7]
+; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,0,1,1]
+; AVX1-NEXT: vpshuflw {{.*#+}} xmm0 = xmm0[2,2,3,3,4,5,6,7]
+; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,0,1,1]
; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
-; AVX1-NEXT: movq %rbp, %rsp
-; AVX1-NEXT: popq %rbp
+; AVX1-NEXT: vandps {{.*}}(%rip), %ymm0, %ymm0
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
+; AVX1-NEXT: vpxor %xmm2, %xmm2, %xmm2
+; AVX1-NEXT: vpcmpeqb %xmm2, %xmm1, %xmm1
+; AVX1-NEXT: vpcmpeqd %xmm3, %xmm3, %xmm3
+; AVX1-NEXT: vpxor %xmm3, %xmm1, %xmm1
+; AVX1-NEXT: vpsrlw $7, %xmm1, %xmm1
+; AVX1-NEXT: vmovdqa {{.*#+}} xmm4 = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
+; AVX1-NEXT: vpand %xmm4, %xmm1, %xmm1
+; AVX1-NEXT: vpcmpeqb %xmm2, %xmm0, %xmm0
+; AVX1-NEXT: vpxor %xmm3, %xmm0, %xmm0
+; AVX1-NEXT: vpsrlw $7, %xmm0, %xmm0
+; AVX1-NEXT: vpand %xmm4, %xmm0, %xmm0
+; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
; AVX1-NEXT: retq
;
; AVX2-LABEL: bitcast_i32_32i1:
; AVX2: # BB#0:
-; AVX2-NEXT: pushq %rbp
-; AVX2-NEXT: .Lcfi0:
-; AVX2-NEXT: .cfi_def_cfa_offset 16
-; AVX2-NEXT: .Lcfi1:
-; AVX2-NEXT: .cfi_offset %rbp, -16
-; AVX2-NEXT: movq %rsp, %rbp
-; AVX2-NEXT: .Lcfi2:
-; AVX2-NEXT: .cfi_def_cfa_register %rbp
-; AVX2-NEXT: andq $-32, %rsp
-; AVX2-NEXT: subq $32, %rsp
-; AVX2-NEXT: movl %edi, %eax
-; AVX2-NEXT: shrl $17, %eax
-; AVX2-NEXT: andl $1, %eax
-; AVX2-NEXT: movl %edi, %ecx
-; AVX2-NEXT: shrl $16, %ecx
-; AVX2-NEXT: andl $1, %ecx
-; AVX2-NEXT: vmovd %ecx, %xmm0
-; AVX2-NEXT: vpinsrb $1, %eax, %xmm0, %xmm0
-; AVX2-NEXT: movl %edi, %eax
-; AVX2-NEXT: shrl $18, %eax
-; AVX2-NEXT: andl $1, %eax
-; AVX2-NEXT: vpinsrb $2, %eax, %xmm0, %xmm0
-; AVX2-NEXT: movl %edi, %eax
-; AVX2-NEXT: shrl $19, %eax
-; AVX2-NEXT: andl $1, %eax
-; AVX2-NEXT: vpinsrb $3, %eax, %xmm0, %xmm0
-; AVX2-NEXT: movl %edi, %eax
-; AVX2-NEXT: shrl $20, %eax
-; AVX2-NEXT: andl $1, %eax
-; AVX2-NEXT: vpinsrb $4, %eax, %xmm0, %xmm0
-; AVX2-NEXT: movl %edi, %eax
-; AVX2-NEXT: shrl $21, %eax
-; AVX2-NEXT: andl $1, %eax
-; AVX2-NEXT: vpinsrb $5, %eax, %xmm0, %xmm0
-; AVX2-NEXT: movl %edi, %eax
-; AVX2-NEXT: shrl $22, %eax
-; AVX2-NEXT: andl $1, %eax
-; AVX2-NEXT: vpinsrb $6, %eax, %xmm0, %xmm0
-; AVX2-NEXT: movl %edi, %eax
-; AVX2-NEXT: shrl $23, %eax
-; AVX2-NEXT: andl $1, %eax
-; AVX2-NEXT: vpinsrb $7, %eax, %xmm0, %xmm0
-; AVX2-NEXT: movl %edi, %eax
-; AVX2-NEXT: shrl $24, %eax
-; AVX2-NEXT: andl $1, %eax
-; AVX2-NEXT: vpinsrb $8, %eax, %xmm0, %xmm0
-; AVX2-NEXT: movl %edi, %eax
-; AVX2-NEXT: shrl $25, %eax
-; AVX2-NEXT: andl $1, %eax
-; AVX2-NEXT: vpinsrb $9, %eax, %xmm0, %xmm0
-; AVX2-NEXT: movl %edi, %eax
-; AVX2-NEXT: shrl $26, %eax
-; AVX2-NEXT: andl $1, %eax
-; AVX2-NEXT: vpinsrb $10, %eax, %xmm0, %xmm0
-; AVX2-NEXT: movl %edi, %eax
-; AVX2-NEXT: shrl $27, %eax
-; AVX2-NEXT: andl $1, %eax
-; AVX2-NEXT: vpinsrb $11, %eax, %xmm0, %xmm0
-; AVX2-NEXT: movl %edi, %eax
-; AVX2-NEXT: shrl $28, %eax
-; AVX2-NEXT: andl $1, %eax
-; AVX2-NEXT: vpinsrb $12, %eax, %xmm0, %xmm0
-; AVX2-NEXT: movl %edi, %eax
-; AVX2-NEXT: shrl $29, %eax
-; AVX2-NEXT: andl $1, %eax
-; AVX2-NEXT: vpinsrb $13, %eax, %xmm0, %xmm0
-; AVX2-NEXT: movl %edi, %eax
-; AVX2-NEXT: shrl $30, %eax
-; AVX2-NEXT: andl $1, %eax
-; AVX2-NEXT: vpinsrb $14, %eax, %xmm0, %xmm0
-; AVX2-NEXT: movl %edi, %eax
-; AVX2-NEXT: shrl $31, %eax
-; AVX2-NEXT: vpinsrb $15, %eax, %xmm0, %xmm0
-; AVX2-NEXT: movl %edi, %eax
-; AVX2-NEXT: shrl %eax
-; AVX2-NEXT: andl $1, %eax
-; AVX2-NEXT: movl %edi, %ecx
-; AVX2-NEXT: andl $1, %ecx
-; AVX2-NEXT: vmovd %ecx, %xmm1
-; AVX2-NEXT: vpinsrb $1, %eax, %xmm1, %xmm1
-; AVX2-NEXT: movl %edi, %eax
-; AVX2-NEXT: shrl $2, %eax
-; AVX2-NEXT: andl $1, %eax
-; AVX2-NEXT: vpinsrb $2, %eax, %xmm1, %xmm1
-; AVX2-NEXT: movl %edi, %eax
-; AVX2-NEXT: shrl $3, %eax
-; AVX2-NEXT: andl $1, %eax
-; AVX2-NEXT: vpinsrb $3, %eax, %xmm1, %xmm1
-; AVX2-NEXT: movl %edi, %eax
-; AVX2-NEXT: shrl $4, %eax
-; AVX2-NEXT: andl $1, %eax
-; AVX2-NEXT: vpinsrb $4, %eax, %xmm1, %xmm1
-; AVX2-NEXT: movl %edi, %eax
-; AVX2-NEXT: shrl $5, %eax
-; AVX2-NEXT: andl $1, %eax
-; AVX2-NEXT: vpinsrb $5, %eax, %xmm1, %xmm1
-; AVX2-NEXT: movl %edi, %eax
-; AVX2-NEXT: shrl $6, %eax
-; AVX2-NEXT: andl $1, %eax
-; AVX2-NEXT: vpinsrb $6, %eax, %xmm1, %xmm1
-; AVX2-NEXT: movl %edi, %eax
-; AVX2-NEXT: shrl $7, %eax
-; AVX2-NEXT: andl $1, %eax
-; AVX2-NEXT: vpinsrb $7, %eax, %xmm1, %xmm1
-; AVX2-NEXT: movl %edi, %eax
-; AVX2-NEXT: shrl $8, %eax
-; AVX2-NEXT: andl $1, %eax
-; AVX2-NEXT: vpinsrb $8, %eax, %xmm1, %xmm1
-; AVX2-NEXT: movl %edi, %eax
-; AVX2-NEXT: shrl $9, %eax
-; AVX2-NEXT: andl $1, %eax
-; AVX2-NEXT: vpinsrb $9, %eax, %xmm1, %xmm1
-; AVX2-NEXT: movl %edi, %eax
-; AVX2-NEXT: shrl $10, %eax
-; AVX2-NEXT: andl $1, %eax
-; AVX2-NEXT: vpinsrb $10, %eax, %xmm1, %xmm1
-; AVX2-NEXT: movl %edi, %eax
-; AVX2-NEXT: shrl $11, %eax
-; AVX2-NEXT: andl $1, %eax
-; AVX2-NEXT: vpinsrb $11, %eax, %xmm1, %xmm1
-; AVX2-NEXT: movl %edi, %eax
-; AVX2-NEXT: shrl $12, %eax
-; AVX2-NEXT: andl $1, %eax
-; AVX2-NEXT: vpinsrb $12, %eax, %xmm1, %xmm1
-; AVX2-NEXT: movl %edi, %eax
-; AVX2-NEXT: shrl $13, %eax
-; AVX2-NEXT: andl $1, %eax
-; AVX2-NEXT: vpinsrb $13, %eax, %xmm1, %xmm1
-; AVX2-NEXT: movl %edi, %eax
-; AVX2-NEXT: shrl $14, %eax
-; AVX2-NEXT: andl $1, %eax
-; AVX2-NEXT: vpinsrb $14, %eax, %xmm1, %xmm1
-; AVX2-NEXT: shrl $15, %edi
-; AVX2-NEXT: andl $1, %edi
-; AVX2-NEXT: vpinsrb $15, %edi, %xmm1, %xmm1
+; AVX2-NEXT: vmovd %edi, %xmm0
+; AVX2-NEXT: vpunpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
+; AVX2-NEXT: vpshuflw {{.*#+}} xmm1 = xmm0[0,0,1,1,4,5,6,7]
+; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,0,1,1]
+; AVX2-NEXT: vpshuflw {{.*#+}} xmm0 = xmm0[2,2,3,3,4,5,6,7]
+; AVX2-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,0,1,1]
; AVX2-NEXT: vinserti128 $1, %xmm0, %ymm1, %ymm0
-; AVX2-NEXT: movq %rbp, %rsp
-; AVX2-NEXT: popq %rbp
+; AVX2-NEXT: vpbroadcastq {{.*#+}} ymm1 = [9241421688590303745,9241421688590303745,9241421688590303745,9241421688590303745]
+; AVX2-NEXT: vpand %ymm1, %ymm0, %ymm0
+; AVX2-NEXT: vpcmpeqb %ymm1, %ymm0, %ymm0
+; AVX2-NEXT: vpsrlw $7, %ymm0, %ymm0
+; AVX2-NEXT: vpand {{.*}}(%rip), %ymm0, %ymm0
; AVX2-NEXT: retq
;
; AVX512-LABEL: bitcast_i32_32i1:
More information about the llvm-commits
mailing list