[llvm] r265045 - [X86][SSE] Some basic tests for variable shuffles

Simon Pilgrim via llvm-commits llvm-commits at lists.llvm.org
Thu Mar 31 13:26:31 PDT 2016


Author: rksimon
Date: Thu Mar 31 15:26:30 2016
New Revision: 265045

URL: http://llvm.org/viewvc/llvm-project?rev=265045&view=rev
Log:
[X86][SSE] Some basic tests for variable shuffles

We don't really support non-constant shuffle masks, but these tests are for cases where BUILD_VECTOR is made up from vector extracts (as well as undef/zero scalars).

Added:
    llvm/trunk/test/CodeGen/X86/vector-shuffle-variable-128.ll
    llvm/trunk/test/CodeGen/X86/vector-shuffle-variable-256.ll

Added: llvm/trunk/test/CodeGen/X86/vector-shuffle-variable-128.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/vector-shuffle-variable-128.ll?rev=265045&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/X86/vector-shuffle-variable-128.ll (added)
+++ llvm/trunk/test/CodeGen/X86/vector-shuffle-variable-128.ll Thu Mar 31 15:26:30 2016
@@ -0,0 +1,1222 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=x86-64 | FileCheck %s --check-prefix=ALL --check-prefix=SSE --check-prefix=SSE2
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=x86-64 -mattr=+ssse3 | FileCheck %s --check-prefix=ALL --check-prefix=SSE --check-prefix=SSSE3
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=x86-64 -mattr=+sse4.1 | FileCheck %s --check-prefix=ALL --check-prefix=SSE --check-prefix=SSE41
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=x86-64 -mattr=+avx | FileCheck %s --check-prefix=ALL --check-prefix=AVX --check-prefix=AVX1
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=x86-64 -mattr=+avx2 | FileCheck %s --check-prefix=ALL --check-prefix=AVX --check-prefix=AVX2
+
+;
+; Unary shuffle indices from registers
+;
+
+define <2 x double> @var_shuffle_v2f64_v2f64_xx_i64(<2 x double> %x, i64 %i0, i64 %i1) nounwind {
+; SSE-LABEL: var_shuffle_v2f64_v2f64_xx_i64:
+; SSE:       # BB#0:
+; SSE-NEXT:    movaps %xmm0, -{{[0-9]+}}(%rsp)
+; SSE-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
+; SSE-NEXT:    movhpd {{.*#+}} xmm0 = xmm0[0],mem[0]
+; SSE-NEXT:    retq
+;
+; AVX-LABEL: var_shuffle_v2f64_v2f64_xx_i64:
+; AVX:       # BB#0:
+; AVX-NEXT:    vmovaps %xmm0, -{{[0-9]+}}(%rsp)
+; AVX-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
+; AVX-NEXT:    vmovhpd {{.*#+}} xmm0 = xmm0[0],mem[0]
+; AVX-NEXT:    retq
+  %x0 = extractelement <2 x double> %x, i64 %i0
+  %x1 = extractelement <2 x double> %x, i64 %i1
+  %r0 = insertelement <2 x double> undef, double %x0, i32 0
+  %r1 = insertelement <2 x double>   %r0, double %x1, i32 1
+  ret <2 x double> %r1
+}
+
+define <2 x i64> @var_shuffle_v2i64_v2i64_xx_i64(<2 x i64> %x, i32 %i0, i32 %i1) nounwind {
+; SSE-LABEL: var_shuffle_v2i64_v2i64_xx_i64:
+; SSE:       # BB#0:
+; SSE-NEXT:    movslq %edi, %rax
+; SSE-NEXT:    movaps %xmm0, -{{[0-9]+}}(%rsp)
+; SSE-NEXT:    movslq %esi, %rcx
+; SSE-NEXT:    movq {{.*#+}} xmm0 = mem[0],zero
+; SSE-NEXT:    movq {{.*#+}} xmm1 = mem[0],zero
+; SSE-NEXT:    punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; SSE-NEXT:    retq
+;
+; AVX-LABEL: var_shuffle_v2i64_v2i64_xx_i64:
+; AVX:       # BB#0:
+; AVX-NEXT:    movslq %edi, %rax
+; AVX-NEXT:    vmovaps %xmm0, -{{[0-9]+}}(%rsp)
+; AVX-NEXT:    movslq %esi, %rcx
+; AVX-NEXT:    vmovq {{.*#+}} xmm0 = mem[0],zero
+; AVX-NEXT:    vmovq {{.*#+}} xmm1 = mem[0],zero
+; AVX-NEXT:    vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; AVX-NEXT:    retq
+  %x0 = extractelement <2 x i64> %x, i32 %i0
+  %x1 = extractelement <2 x i64> %x, i32 %i1
+  %r0 = insertelement <2 x i64> undef, i64 %x0, i32 0
+  %r1 = insertelement <2 x i64>   %r0, i64 %x1, i32 1
+  ret <2 x i64> %r1
+}
+
+define <4 x float> @var_shuffle_v4f32_v4f32_xxxx_i32(<4 x float> %x, i32 %i0, i32 %i1, i32 %i2, i32 %i3) nounwind {
+; SSE2-LABEL: var_shuffle_v4f32_v4f32_xxxx_i32:
+; SSE2:       # BB#0:
+; SSE2-NEXT:    movslq %edi, %rax
+; SSE2-NEXT:    movslq %esi, %rsi
+; SSE2-NEXT:    movslq %edx, %rdx
+; SSE2-NEXT:    movaps %xmm0, -{{[0-9]+}}(%rsp)
+; SSE2-NEXT:    movslq %ecx, %rcx
+; SSE2-NEXT:    movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; SSE2-NEXT:    movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; SSE2-NEXT:    movss {{.*#+}} xmm2 = mem[0],zero,zero,zero
+; SSE2-NEXT:    unpcklps {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
+; SSE2-NEXT:    movss {{.*#+}} xmm2 = mem[0],zero,zero,zero
+; SSE2-NEXT:    unpcklps {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1]
+; SSE2-NEXT:    unpcklps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
+; SSE2-NEXT:    retq
+;
+; SSSE3-LABEL: var_shuffle_v4f32_v4f32_xxxx_i32:
+; SSSE3:       # BB#0:
+; SSSE3-NEXT:    movslq %edi, %rax
+; SSSE3-NEXT:    movslq %esi, %rsi
+; SSSE3-NEXT:    movslq %edx, %rdx
+; SSSE3-NEXT:    movaps %xmm0, -{{[0-9]+}}(%rsp)
+; SSSE3-NEXT:    movslq %ecx, %rcx
+; SSSE3-NEXT:    movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; SSSE3-NEXT:    movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; SSSE3-NEXT:    movss {{.*#+}} xmm2 = mem[0],zero,zero,zero
+; SSSE3-NEXT:    unpcklps {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
+; SSSE3-NEXT:    movss {{.*#+}} xmm2 = mem[0],zero,zero,zero
+; SSSE3-NEXT:    unpcklps {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1]
+; SSSE3-NEXT:    unpcklps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
+; SSSE3-NEXT:    retq
+;
+; SSE41-LABEL: var_shuffle_v4f32_v4f32_xxxx_i32:
+; SSE41:       # BB#0:
+; SSE41-NEXT:    movslq %edi, %rax
+; SSE41-NEXT:    movslq %esi, %rsi
+; SSE41-NEXT:    movslq %edx, %rdx
+; SSE41-NEXT:    movaps %xmm0, -{{[0-9]+}}(%rsp)
+; SSE41-NEXT:    movslq %ecx, %rcx
+; SSE41-NEXT:    movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; SSE41-NEXT:    insertps {{.*#+}} xmm0 = xmm0[0],mem[0],xmm0[2,3]
+; SSE41-NEXT:    insertps {{.*#+}} xmm0 = xmm0[0,1],mem[0],xmm0[3]
+; SSE41-NEXT:    insertps {{.*#+}} xmm0 = xmm0[0,1,2],mem[0]
+; SSE41-NEXT:    retq
+;
+; AVX-LABEL: var_shuffle_v4f32_v4f32_xxxx_i32:
+; AVX:       # BB#0:
+; AVX-NEXT:    movslq %edi, %rax
+; AVX-NEXT:    movslq %esi, %rsi
+; AVX-NEXT:    movslq %edx, %rdx
+; AVX-NEXT:    vmovaps %xmm0, -{{[0-9]+}}(%rsp)
+; AVX-NEXT:    movslq %ecx, %rcx
+; AVX-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; AVX-NEXT:    vinsertps {{.*#+}} xmm0 = xmm0[0],mem[0],xmm0[2,3]
+; AVX-NEXT:    vinsertps {{.*#+}} xmm0 = xmm0[0,1],mem[0],xmm0[3]
+; AVX-NEXT:    vinsertps {{.*#+}} xmm0 = xmm0[0,1,2],mem[0]
+; AVX-NEXT:    retq
+  %x0 = extractelement <4 x float> %x, i32 %i0
+  %x1 = extractelement <4 x float> %x, i32 %i1
+  %x2 = extractelement <4 x float> %x, i32 %i2
+  %x3 = extractelement <4 x float> %x, i32 %i3
+  %r0 = insertelement <4 x float> undef, float %x0, i32 0
+  %r1 = insertelement <4 x float>   %r0, float %x1, i32 1
+  %r2 = insertelement <4 x float>   %r1, float %x2, i32 2
+  %r3 = insertelement <4 x float>   %r2, float %x3, i32 3
+  ret <4 x float> %r3
+}
+
+define <4 x i32> @var_shuffle_v4i32_v4i32_xxxx_i32(<4 x i32> %x, i32 %i0, i32 %i1, i32 %i2, i32 %i3) nounwind {
+; SSE2-LABEL: var_shuffle_v4i32_v4i32_xxxx_i32:
+; SSE2:       # BB#0:
+; SSE2-NEXT:    movslq %edi, %rax
+; SSE2-NEXT:    movslq %esi, %rsi
+; SSE2-NEXT:    movslq %edx, %rdx
+; SSE2-NEXT:    movaps %xmm0, -{{[0-9]+}}(%rsp)
+; SSE2-NEXT:    movslq %ecx, %rcx
+; SSE2-NEXT:    movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; SSE2-NEXT:    movd {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; SSE2-NEXT:    movd {{.*#+}} xmm2 = mem[0],zero,zero,zero
+; SSE2-NEXT:    punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
+; SSE2-NEXT:    movd {{.*#+}} xmm2 = mem[0],zero,zero,zero
+; SSE2-NEXT:    punpckldq {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1]
+; SSE2-NEXT:    punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
+; SSE2-NEXT:    retq
+;
+; SSSE3-LABEL: var_shuffle_v4i32_v4i32_xxxx_i32:
+; SSSE3:       # BB#0:
+; SSSE3-NEXT:    movslq %edi, %rax
+; SSSE3-NEXT:    movslq %esi, %rsi
+; SSSE3-NEXT:    movslq %edx, %rdx
+; SSSE3-NEXT:    movaps %xmm0, -{{[0-9]+}}(%rsp)
+; SSSE3-NEXT:    movslq %ecx, %rcx
+; SSSE3-NEXT:    movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; SSSE3-NEXT:    movd {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; SSSE3-NEXT:    movd {{.*#+}} xmm2 = mem[0],zero,zero,zero
+; SSSE3-NEXT:    punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
+; SSSE3-NEXT:    movd {{.*#+}} xmm2 = mem[0],zero,zero,zero
+; SSSE3-NEXT:    punpckldq {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1]
+; SSSE3-NEXT:    punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
+; SSSE3-NEXT:    retq
+;
+; SSE41-LABEL: var_shuffle_v4i32_v4i32_xxxx_i32:
+; SSE41:       # BB#0:
+; SSE41-NEXT:    movslq %edi, %rax
+; SSE41-NEXT:    movslq %esi, %rsi
+; SSE41-NEXT:    movslq %edx, %rdx
+; SSE41-NEXT:    movaps %xmm0, -{{[0-9]+}}(%rsp)
+; SSE41-NEXT:    movslq %ecx, %rcx
+; SSE41-NEXT:    movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; SSE41-NEXT:    pinsrd $1, -24(%rsp,%rsi,4), %xmm0
+; SSE41-NEXT:    pinsrd $2, -24(%rsp,%rdx,4), %xmm0
+; SSE41-NEXT:    pinsrd $3, -24(%rsp,%rcx,4), %xmm0
+; SSE41-NEXT:    retq
+;
+; AVX-LABEL: var_shuffle_v4i32_v4i32_xxxx_i32:
+; AVX:       # BB#0:
+; AVX-NEXT:    movslq %edi, %rax
+; AVX-NEXT:    movslq %esi, %rsi
+; AVX-NEXT:    movslq %edx, %rdx
+; AVX-NEXT:    vmovaps %xmm0, -{{[0-9]+}}(%rsp)
+; AVX-NEXT:    movslq %ecx, %rcx
+; AVX-NEXT:    vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; AVX-NEXT:    vpinsrd $1, -24(%rsp,%rsi,4), %xmm0, %xmm0
+; AVX-NEXT:    vpinsrd $2, -24(%rsp,%rdx,4), %xmm0, %xmm0
+; AVX-NEXT:    vpinsrd $3, -24(%rsp,%rcx,4), %xmm0, %xmm0
+; AVX-NEXT:    retq
+  %x0 = extractelement <4 x i32> %x, i32 %i0
+  %x1 = extractelement <4 x i32> %x, i32 %i1
+  %x2 = extractelement <4 x i32> %x, i32 %i2
+  %x3 = extractelement <4 x i32> %x, i32 %i3
+  %r0 = insertelement <4 x i32> undef, i32 %x0, i32 0
+  %r1 = insertelement <4 x i32>   %r0, i32 %x1, i32 1
+  %r2 = insertelement <4 x i32>   %r1, i32 %x2, i32 2
+  %r3 = insertelement <4 x i32>   %r2, i32 %x3, i32 3
+  ret <4 x i32> %r3
+}
+
+define <8 x i16> @var_shuffle_v8i16_v8i16_xxxxxxxx_i16(<8 x i16> %x, i16 %i0, i16 %i1, i16 %i2, i16 %i3, i16 %i4, i16 %i5, i16 %i6, i16 %i7) nounwind {
+; SSE2-LABEL: var_shuffle_v8i16_v8i16_xxxxxxxx_i16:
+; SSE2:       # BB#0:
+; SSE2-NEXT:    movswq %di, %rax
+; SSE2-NEXT:    movswq %si, %rsi
+; SSE2-NEXT:    movswq %dx, %rdx
+; SSE2-NEXT:    movswq %cx, %r10
+; SSE2-NEXT:    movswq %r8w, %r11
+; SSE2-NEXT:    movaps %xmm0, -{{[0-9]+}}(%rsp)
+; SSE2-NEXT:    movswq %r9w, %r8
+; SSE2-NEXT:    movswq {{[0-9]+}}(%rsp), %rcx
+; SSE2-NEXT:    movswq {{[0-9]+}}(%rsp), %rdi
+; SSE2-NEXT:    movzwl -24(%rsp,%rcx,2), %ecx
+; SSE2-NEXT:    movzwl -24(%rsp,%rdi,2), %edi
+; SSE2-NEXT:    movzwl -24(%rsp,%rax,2), %eax
+; SSE2-NEXT:    movzwl -24(%rsp,%rsi,2), %esi
+; SSE2-NEXT:    movd %ecx, %xmm0
+; SSE2-NEXT:    movzwl -24(%rsp,%rdx,2), %ecx
+; SSE2-NEXT:    movd %ecx, %xmm1
+; SSE2-NEXT:    punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
+; SSE2-NEXT:    movzwl -24(%rsp,%r10,2), %ecx
+; SSE2-NEXT:    movd %eax, %xmm0
+; SSE2-NEXT:    movzwl -24(%rsp,%r11,2), %eax
+; SSE2-NEXT:    movd %eax, %xmm2
+; SSE2-NEXT:    punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3]
+; SSE2-NEXT:    punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
+; SSE2-NEXT:    movd %edi, %xmm1
+; SSE2-NEXT:    movd %ecx, %xmm2
+; SSE2-NEXT:    punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3]
+; SSE2-NEXT:    movd %esi, %xmm1
+; SSE2-NEXT:    movzwl -24(%rsp,%r8,2), %eax
+; SSE2-NEXT:    movd %eax, %xmm3
+; SSE2-NEXT:    punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm3[0],xmm1[1],xmm3[1],xmm1[2],xmm3[2],xmm1[3],xmm3[3]
+; SSE2-NEXT:    punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3]
+; SSE2-NEXT:    punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
+; SSE2-NEXT:    retq
+;
+; SSSE3-LABEL: var_shuffle_v8i16_v8i16_xxxxxxxx_i16:
+; SSSE3:       # BB#0:
+; SSSE3-NEXT:    movswq %di, %rax
+; SSSE3-NEXT:    movswq %si, %rsi
+; SSSE3-NEXT:    movswq %dx, %rdx
+; SSSE3-NEXT:    movswq %cx, %r10
+; SSSE3-NEXT:    movswq %r8w, %r11
+; SSSE3-NEXT:    movaps %xmm0, -{{[0-9]+}}(%rsp)
+; SSSE3-NEXT:    movswq %r9w, %r8
+; SSSE3-NEXT:    movswq {{[0-9]+}}(%rsp), %rcx
+; SSSE3-NEXT:    movswq {{[0-9]+}}(%rsp), %rdi
+; SSSE3-NEXT:    movzwl -24(%rsp,%rcx,2), %ecx
+; SSSE3-NEXT:    movzwl -24(%rsp,%rdi,2), %edi
+; SSSE3-NEXT:    movzwl -24(%rsp,%rax,2), %eax
+; SSSE3-NEXT:    movzwl -24(%rsp,%rsi,2), %esi
+; SSSE3-NEXT:    movd %ecx, %xmm0
+; SSSE3-NEXT:    movzwl -24(%rsp,%rdx,2), %ecx
+; SSSE3-NEXT:    movd %ecx, %xmm1
+; SSSE3-NEXT:    punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
+; SSSE3-NEXT:    movzwl -24(%rsp,%r10,2), %ecx
+; SSSE3-NEXT:    movd %eax, %xmm0
+; SSSE3-NEXT:    movzwl -24(%rsp,%r11,2), %eax
+; SSSE3-NEXT:    movd %eax, %xmm2
+; SSSE3-NEXT:    punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3]
+; SSSE3-NEXT:    punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
+; SSSE3-NEXT:    movd %edi, %xmm1
+; SSSE3-NEXT:    movd %ecx, %xmm2
+; SSSE3-NEXT:    punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3]
+; SSSE3-NEXT:    movd %esi, %xmm1
+; SSSE3-NEXT:    movzwl -24(%rsp,%r8,2), %eax
+; SSSE3-NEXT:    movd %eax, %xmm3
+; SSSE3-NEXT:    punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm3[0],xmm1[1],xmm3[1],xmm1[2],xmm3[2],xmm1[3],xmm3[3]
+; SSSE3-NEXT:    punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3]
+; SSSE3-NEXT:    punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
+; SSSE3-NEXT:    retq
+;
+; SSE41-LABEL: var_shuffle_v8i16_v8i16_xxxxxxxx_i16:
+; SSE41:       # BB#0:
+; SSE41-NEXT:    pushq %rbx
+; SSE41-NEXT:    movswq %di, %rax
+; SSE41-NEXT:    movswq %si, %rbx
+; SSE41-NEXT:    movswq %dx, %r11
+; SSE41-NEXT:    movswq %cx, %r10
+; SSE41-NEXT:    movswq %r8w, %rdi
+; SSE41-NEXT:    movaps %xmm0, -{{[0-9]+}}(%rsp)
+; SSE41-NEXT:    movswq %r9w, %rcx
+; SSE41-NEXT:    movswq {{[0-9]+}}(%rsp), %rdx
+; SSE41-NEXT:    movswq {{[0-9]+}}(%rsp), %rsi
+; SSE41-NEXT:    movzwl -16(%rsp,%rdx,2), %edx
+; SSE41-NEXT:    movzwl -16(%rsp,%rsi,2), %esi
+; SSE41-NEXT:    movzwl -16(%rsp,%rax,2), %eax
+; SSE41-NEXT:    movd %eax, %xmm0
+; SSE41-NEXT:    pinsrw $1, -16(%rsp,%rbx,2), %xmm0
+; SSE41-NEXT:    pinsrw $2, -16(%rsp,%r11,2), %xmm0
+; SSE41-NEXT:    pinsrw $3, -16(%rsp,%r10,2), %xmm0
+; SSE41-NEXT:    pinsrw $4, -16(%rsp,%rdi,2), %xmm0
+; SSE41-NEXT:    pinsrw $5, -16(%rsp,%rcx,2), %xmm0
+; SSE41-NEXT:    pinsrw $6, %edx, %xmm0
+; SSE41-NEXT:    pinsrw $7, %esi, %xmm0
+; SSE41-NEXT:    popq %rbx
+; SSE41-NEXT:    retq
+;
+; AVX-LABEL: var_shuffle_v8i16_v8i16_xxxxxxxx_i16:
+; AVX:       # BB#0:
+; AVX-NEXT:    pushq %r14
+; AVX-NEXT:    pushq %rbx
+; AVX-NEXT:    movswq %di, %r10
+; AVX-NEXT:    movswq %si, %r11
+; AVX-NEXT:    movswq %dx, %r14
+; AVX-NEXT:    movswq %cx, %rcx
+; AVX-NEXT:    movswq %r8w, %rdi
+; AVX-NEXT:    vmovaps %xmm0, -{{[0-9]+}}(%rsp)
+; AVX-NEXT:    movswq %r9w, %rax
+; AVX-NEXT:    movswq {{[0-9]+}}(%rsp), %rsi
+; AVX-NEXT:    movswq {{[0-9]+}}(%rsp), %rdx
+; AVX-NEXT:    movzwl -24(%rsp,%rsi,2), %esi
+; AVX-NEXT:    movzwl -24(%rsp,%rdx,2), %edx
+; AVX-NEXT:    movzwl -24(%rsp,%r10,2), %ebx
+; AVX-NEXT:    vmovd %ebx, %xmm0
+; AVX-NEXT:    vpinsrw $1, -24(%rsp,%r11,2), %xmm0, %xmm0
+; AVX-NEXT:    vpinsrw $2, -24(%rsp,%r14,2), %xmm0, %xmm0
+; AVX-NEXT:    vpinsrw $3, -24(%rsp,%rcx,2), %xmm0, %xmm0
+; AVX-NEXT:    vpinsrw $4, -24(%rsp,%rdi,2), %xmm0, %xmm0
+; AVX-NEXT:    vpinsrw $5, -24(%rsp,%rax,2), %xmm0, %xmm0
+; AVX-NEXT:    vpinsrw $6, %esi, %xmm0, %xmm0
+; AVX-NEXT:    vpinsrw $7, %edx, %xmm0, %xmm0
+; AVX-NEXT:    popq %rbx
+; AVX-NEXT:    popq %r14
+; AVX-NEXT:    retq
+  %x0 = extractelement <8 x i16> %x, i16 %i0
+  %x1 = extractelement <8 x i16> %x, i16 %i1
+  %x2 = extractelement <8 x i16> %x, i16 %i2
+  %x3 = extractelement <8 x i16> %x, i16 %i3
+  %x4 = extractelement <8 x i16> %x, i16 %i4
+  %x5 = extractelement <8 x i16> %x, i16 %i5
+  %x6 = extractelement <8 x i16> %x, i16 %i6
+  %x7 = extractelement <8 x i16> %x, i16 %i7
+  %r0 = insertelement <8 x i16> undef, i16 %x0, i32 0
+  %r1 = insertelement <8 x i16>   %r0, i16 %x1, i32 1
+  %r2 = insertelement <8 x i16>   %r1, i16 %x2, i32 2
+  %r3 = insertelement <8 x i16>   %r2, i16 %x3, i32 3
+  %r4 = insertelement <8 x i16>   %r3, i16 %x4, i32 4
+  %r5 = insertelement <8 x i16>   %r4, i16 %x5, i32 5
+  %r6 = insertelement <8 x i16>   %r5, i16 %x6, i32 6
+  %r7 = insertelement <8 x i16>   %r6, i16 %x7, i32 7
+  ret <8 x i16> %r7
+}
+
+define <16 x i8> @var_shuffle_v16i8_v16i8_xxxxxxxxxxxxxxxx_i8(<16 x i8> %x, i8 %i0, i8 %i1, i8 %i2, i8 %i3, i8 %i4, i8 %i5, i8 %i6, i8 %i7, i8 %i8, i8 %i9, i8 %i10, i8 %i11, i8 %i12, i8 %i13, i8 %i14, i8 %i15) nounwind {
+; SSE2-LABEL: var_shuffle_v16i8_v16i8_xxxxxxxxxxxxxxxx_i8:
+; SSE2:       # BB#0:
+; SSE2-NEXT:    movaps %xmm0, -{{[0-9]+}}(%rsp)
+; SSE2-NEXT:    movsbq {{[0-9]+}}(%rsp), %r10
+; SSE2-NEXT:    leaq -{{[0-9]+}}(%rsp), %r11
+; SSE2-NEXT:    movzbl (%r10,%r11), %eax
+; SSE2-NEXT:    movd %eax, %xmm15
+; SSE2-NEXT:    movsbq {{[0-9]+}}(%rsp), %rax
+; SSE2-NEXT:    movzbl (%rax,%r11), %eax
+; SSE2-NEXT:    movd %eax, %xmm8
+; SSE2-NEXT:    movsbq {{[0-9]+}}(%rsp), %rax
+; SSE2-NEXT:    movzbl (%rax,%r11), %eax
+; SSE2-NEXT:    movd %eax, %xmm9
+; SSE2-NEXT:    movsbq %dl, %rax
+; SSE2-NEXT:    movzbl (%rax,%r11), %eax
+; SSE2-NEXT:    movd %eax, %xmm3
+; SSE2-NEXT:    movsbq {{[0-9]+}}(%rsp), %rax
+; SSE2-NEXT:    movzbl (%rax,%r11), %eax
+; SSE2-NEXT:    movd %eax, %xmm10
+; SSE2-NEXT:    movsbq %dil, %rax
+; SSE2-NEXT:    movzbl (%rax,%r11), %eax
+; SSE2-NEXT:    movd %eax, %xmm0
+; SSE2-NEXT:    movsbq {{[0-9]+}}(%rsp), %rax
+; SSE2-NEXT:    movzbl (%rax,%r11), %eax
+; SSE2-NEXT:    movd %eax, %xmm11
+; SSE2-NEXT:    movsbq %r8b, %rax
+; SSE2-NEXT:    movzbl (%rax,%r11), %eax
+; SSE2-NEXT:    movd %eax, %xmm7
+; SSE2-NEXT:    movsbq {{[0-9]+}}(%rsp), %rax
+; SSE2-NEXT:    movzbl (%rax,%r11), %eax
+; SSE2-NEXT:    movd %eax, %xmm2
+; SSE2-NEXT:    movsbq {{[0-9]+}}(%rsp), %rax
+; SSE2-NEXT:    movzbl (%rax,%r11), %eax
+; SSE2-NEXT:    movd %eax, %xmm12
+; SSE2-NEXT:    movsbq {{[0-9]+}}(%rsp), %rax
+; SSE2-NEXT:    movzbl (%rax,%r11), %eax
+; SSE2-NEXT:    movd %eax, %xmm13
+; SSE2-NEXT:    movsbq %cl, %rax
+; SSE2-NEXT:    movzbl (%rax,%r11), %eax
+; SSE2-NEXT:    movd %eax, %xmm6
+; SSE2-NEXT:    movsbq {{[0-9]+}}(%rsp), %rax
+; SSE2-NEXT:    movzbl (%rax,%r11), %eax
+; SSE2-NEXT:    movd %eax, %xmm14
+; SSE2-NEXT:    movsbq %sil, %rax
+; SSE2-NEXT:    movzbl (%rax,%r11), %eax
+; SSE2-NEXT:    movd %eax, %xmm5
+; SSE2-NEXT:    movsbq {{[0-9]+}}(%rsp), %rax
+; SSE2-NEXT:    movzbl (%rax,%r11), %eax
+; SSE2-NEXT:    movd %eax, %xmm4
+; SSE2-NEXT:    movsbq %r9b, %rax
+; SSE2-NEXT:    movzbl (%rax,%r11), %eax
+; SSE2-NEXT:    movd %eax, %xmm1
+; SSE2-NEXT:    punpcklbw {{.*#+}} xmm15 = xmm15[0],xmm8[0],xmm15[1],xmm8[1],xmm15[2],xmm8[2],xmm15[3],xmm8[3],xmm15[4],xmm8[4],xmm15[5],xmm8[5],xmm15[6],xmm8[6],xmm15[7],xmm8[7]
+; SSE2-NEXT:    punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm9[0],xmm3[1],xmm9[1],xmm3[2],xmm9[2],xmm3[3],xmm9[3],xmm3[4],xmm9[4],xmm3[5],xmm9[5],xmm3[6],xmm9[6],xmm3[7],xmm9[7]
+; SSE2-NEXT:    punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm15[0],xmm3[1],xmm15[1],xmm3[2],xmm15[2],xmm3[3],xmm15[3],xmm3[4],xmm15[4],xmm3[5],xmm15[5],xmm3[6],xmm15[6],xmm3[7],xmm15[7]
+; SSE2-NEXT:    punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm10[0],xmm0[1],xmm10[1],xmm0[2],xmm10[2],xmm0[3],xmm10[3],xmm0[4],xmm10[4],xmm0[5],xmm10[5],xmm0[6],xmm10[6],xmm0[7],xmm10[7]
+; SSE2-NEXT:    punpcklbw {{.*#+}} xmm7 = xmm7[0],xmm11[0],xmm7[1],xmm11[1],xmm7[2],xmm11[2],xmm7[3],xmm11[3],xmm7[4],xmm11[4],xmm7[5],xmm11[5],xmm7[6],xmm11[6],xmm7[7],xmm11[7]
+; SSE2-NEXT:    punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm7[0],xmm0[1],xmm7[1],xmm0[2],xmm7[2],xmm0[3],xmm7[3],xmm0[4],xmm7[4],xmm0[5],xmm7[5],xmm0[6],xmm7[6],xmm0[7],xmm7[7]
+; SSE2-NEXT:    punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1],xmm0[2],xmm3[2],xmm0[3],xmm3[3],xmm0[4],xmm3[4],xmm0[5],xmm3[5],xmm0[6],xmm3[6],xmm0[7],xmm3[7]
+; SSE2-NEXT:    punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm12[0],xmm2[1],xmm12[1],xmm2[2],xmm12[2],xmm2[3],xmm12[3],xmm2[4],xmm12[4],xmm2[5],xmm12[5],xmm2[6],xmm12[6],xmm2[7],xmm12[7]
+; SSE2-NEXT:    punpcklbw {{.*#+}} xmm6 = xmm6[0],xmm13[0],xmm6[1],xmm13[1],xmm6[2],xmm13[2],xmm6[3],xmm13[3],xmm6[4],xmm13[4],xmm6[5],xmm13[5],xmm6[6],xmm13[6],xmm6[7],xmm13[7]
+; SSE2-NEXT:    punpcklbw {{.*#+}} xmm6 = xmm6[0],xmm2[0],xmm6[1],xmm2[1],xmm6[2],xmm2[2],xmm6[3],xmm2[3],xmm6[4],xmm2[4],xmm6[5],xmm2[5],xmm6[6],xmm2[6],xmm6[7],xmm2[7]
+; SSE2-NEXT:    punpcklbw {{.*#+}} xmm5 = xmm5[0],xmm14[0],xmm5[1],xmm14[1],xmm5[2],xmm14[2],xmm5[3],xmm14[3],xmm5[4],xmm14[4],xmm5[5],xmm14[5],xmm5[6],xmm14[6],xmm5[7],xmm14[7]
+; SSE2-NEXT:    punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm4[0],xmm1[1],xmm4[1],xmm1[2],xmm4[2],xmm1[3],xmm4[3],xmm1[4],xmm4[4],xmm1[5],xmm4[5],xmm1[6],xmm4[6],xmm1[7],xmm4[7]
+; SSE2-NEXT:    punpcklbw {{.*#+}} xmm5 = xmm5[0],xmm1[0],xmm5[1],xmm1[1],xmm5[2],xmm1[2],xmm5[3],xmm1[3],xmm5[4],xmm1[4],xmm5[5],xmm1[5],xmm5[6],xmm1[6],xmm5[7],xmm1[7]
+; SSE2-NEXT:    punpcklbw {{.*#+}} xmm5 = xmm5[0],xmm6[0],xmm5[1],xmm6[1],xmm5[2],xmm6[2],xmm5[3],xmm6[3],xmm5[4],xmm6[4],xmm5[5],xmm6[5],xmm5[6],xmm6[6],xmm5[7],xmm6[7]
+; SSE2-NEXT:    punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm5[0],xmm0[1],xmm5[1],xmm0[2],xmm5[2],xmm0[3],xmm5[3],xmm0[4],xmm5[4],xmm0[5],xmm5[5],xmm0[6],xmm5[6],xmm0[7],xmm5[7]
+; SSE2-NEXT:    retq
+;
+; SSSE3-LABEL: var_shuffle_v16i8_v16i8_xxxxxxxxxxxxxxxx_i8:
+; SSSE3:       # BB#0:
+; SSSE3-NEXT:    movaps %xmm0, -{{[0-9]+}}(%rsp)
+; SSSE3-NEXT:    movsbq {{[0-9]+}}(%rsp), %r10
+; SSSE3-NEXT:    leaq -{{[0-9]+}}(%rsp), %r11
+; SSSE3-NEXT:    movzbl (%r10,%r11), %eax
+; SSSE3-NEXT:    movd %eax, %xmm15
+; SSSE3-NEXT:    movsbq {{[0-9]+}}(%rsp), %rax
+; SSSE3-NEXT:    movzbl (%rax,%r11), %eax
+; SSSE3-NEXT:    movd %eax, %xmm8
+; SSSE3-NEXT:    movsbq {{[0-9]+}}(%rsp), %rax
+; SSSE3-NEXT:    movzbl (%rax,%r11), %eax
+; SSSE3-NEXT:    movd %eax, %xmm9
+; SSSE3-NEXT:    movsbq %dl, %rax
+; SSSE3-NEXT:    movzbl (%rax,%r11), %eax
+; SSSE3-NEXT:    movd %eax, %xmm3
+; SSSE3-NEXT:    movsbq {{[0-9]+}}(%rsp), %rax
+; SSSE3-NEXT:    movzbl (%rax,%r11), %eax
+; SSSE3-NEXT:    movd %eax, %xmm10
+; SSSE3-NEXT:    movsbq %dil, %rax
+; SSSE3-NEXT:    movzbl (%rax,%r11), %eax
+; SSSE3-NEXT:    movd %eax, %xmm0
+; SSSE3-NEXT:    movsbq {{[0-9]+}}(%rsp), %rax
+; SSSE3-NEXT:    movzbl (%rax,%r11), %eax
+; SSSE3-NEXT:    movd %eax, %xmm11
+; SSSE3-NEXT:    movsbq %r8b, %rax
+; SSSE3-NEXT:    movzbl (%rax,%r11), %eax
+; SSSE3-NEXT:    movd %eax, %xmm7
+; SSSE3-NEXT:    movsbq {{[0-9]+}}(%rsp), %rax
+; SSSE3-NEXT:    movzbl (%rax,%r11), %eax
+; SSSE3-NEXT:    movd %eax, %xmm2
+; SSSE3-NEXT:    movsbq {{[0-9]+}}(%rsp), %rax
+; SSSE3-NEXT:    movzbl (%rax,%r11), %eax
+; SSSE3-NEXT:    movd %eax, %xmm12
+; SSSE3-NEXT:    movsbq {{[0-9]+}}(%rsp), %rax
+; SSSE3-NEXT:    movzbl (%rax,%r11), %eax
+; SSSE3-NEXT:    movd %eax, %xmm13
+; SSSE3-NEXT:    movsbq %cl, %rax
+; SSSE3-NEXT:    movzbl (%rax,%r11), %eax
+; SSSE3-NEXT:    movd %eax, %xmm6
+; SSSE3-NEXT:    movsbq {{[0-9]+}}(%rsp), %rax
+; SSSE3-NEXT:    movzbl (%rax,%r11), %eax
+; SSSE3-NEXT:    movd %eax, %xmm14
+; SSSE3-NEXT:    movsbq %sil, %rax
+; SSSE3-NEXT:    movzbl (%rax,%r11), %eax
+; SSSE3-NEXT:    movd %eax, %xmm5
+; SSSE3-NEXT:    movsbq {{[0-9]+}}(%rsp), %rax
+; SSSE3-NEXT:    movzbl (%rax,%r11), %eax
+; SSSE3-NEXT:    movd %eax, %xmm4
+; SSSE3-NEXT:    movsbq %r9b, %rax
+; SSSE3-NEXT:    movzbl (%rax,%r11), %eax
+; SSSE3-NEXT:    movd %eax, %xmm1
+; SSSE3-NEXT:    punpcklbw {{.*#+}} xmm15 = xmm15[0],xmm8[0],xmm15[1],xmm8[1],xmm15[2],xmm8[2],xmm15[3],xmm8[3],xmm15[4],xmm8[4],xmm15[5],xmm8[5],xmm15[6],xmm8[6],xmm15[7],xmm8[7]
+; SSSE3-NEXT:    punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm9[0],xmm3[1],xmm9[1],xmm3[2],xmm9[2],xmm3[3],xmm9[3],xmm3[4],xmm9[4],xmm3[5],xmm9[5],xmm3[6],xmm9[6],xmm3[7],xmm9[7]
+; SSSE3-NEXT:    punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm15[0],xmm3[1],xmm15[1],xmm3[2],xmm15[2],xmm3[3],xmm15[3],xmm3[4],xmm15[4],xmm3[5],xmm15[5],xmm3[6],xmm15[6],xmm3[7],xmm15[7]
+; SSSE3-NEXT:    punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm10[0],xmm0[1],xmm10[1],xmm0[2],xmm10[2],xmm0[3],xmm10[3],xmm0[4],xmm10[4],xmm0[5],xmm10[5],xmm0[6],xmm10[6],xmm0[7],xmm10[7]
+; SSSE3-NEXT:    punpcklbw {{.*#+}} xmm7 = xmm7[0],xmm11[0],xmm7[1],xmm11[1],xmm7[2],xmm11[2],xmm7[3],xmm11[3],xmm7[4],xmm11[4],xmm7[5],xmm11[5],xmm7[6],xmm11[6],xmm7[7],xmm11[7]
+; SSSE3-NEXT:    punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm7[0],xmm0[1],xmm7[1],xmm0[2],xmm7[2],xmm0[3],xmm7[3],xmm0[4],xmm7[4],xmm0[5],xmm7[5],xmm0[6],xmm7[6],xmm0[7],xmm7[7]
+; SSSE3-NEXT:    punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1],xmm0[2],xmm3[2],xmm0[3],xmm3[3],xmm0[4],xmm3[4],xmm0[5],xmm3[5],xmm0[6],xmm3[6],xmm0[7],xmm3[7]
+; SSSE3-NEXT:    punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm12[0],xmm2[1],xmm12[1],xmm2[2],xmm12[2],xmm2[3],xmm12[3],xmm2[4],xmm12[4],xmm2[5],xmm12[5],xmm2[6],xmm12[6],xmm2[7],xmm12[7]
+; SSSE3-NEXT:    punpcklbw {{.*#+}} xmm6 = xmm6[0],xmm13[0],xmm6[1],xmm13[1],xmm6[2],xmm13[2],xmm6[3],xmm13[3],xmm6[4],xmm13[4],xmm6[5],xmm13[5],xmm6[6],xmm13[6],xmm6[7],xmm13[7]
+; SSSE3-NEXT:    punpcklbw {{.*#+}} xmm6 = xmm6[0],xmm2[0],xmm6[1],xmm2[1],xmm6[2],xmm2[2],xmm6[3],xmm2[3],xmm6[4],xmm2[4],xmm6[5],xmm2[5],xmm6[6],xmm2[6],xmm6[7],xmm2[7]
+; SSSE3-NEXT:    punpcklbw {{.*#+}} xmm5 = xmm5[0],xmm14[0],xmm5[1],xmm14[1],xmm5[2],xmm14[2],xmm5[3],xmm14[3],xmm5[4],xmm14[4],xmm5[5],xmm14[5],xmm5[6],xmm14[6],xmm5[7],xmm14[7]
+; SSSE3-NEXT:    punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm4[0],xmm1[1],xmm4[1],xmm1[2],xmm4[2],xmm1[3],xmm4[3],xmm1[4],xmm4[4],xmm1[5],xmm4[5],xmm1[6],xmm4[6],xmm1[7],xmm4[7]
+; SSSE3-NEXT:    punpcklbw {{.*#+}} xmm5 = xmm5[0],xmm1[0],xmm5[1],xmm1[1],xmm5[2],xmm1[2],xmm5[3],xmm1[3],xmm5[4],xmm1[4],xmm5[5],xmm1[5],xmm5[6],xmm1[6],xmm5[7],xmm1[7]
+; SSSE3-NEXT:    punpcklbw {{.*#+}} xmm5 = xmm5[0],xmm6[0],xmm5[1],xmm6[1],xmm5[2],xmm6[2],xmm5[3],xmm6[3],xmm5[4],xmm6[4],xmm5[5],xmm6[5],xmm5[6],xmm6[6],xmm5[7],xmm6[7]
+; SSSE3-NEXT:    punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm5[0],xmm0[1],xmm5[1],xmm0[2],xmm5[2],xmm0[3],xmm5[3],xmm0[4],xmm5[4],xmm0[5],xmm5[5],xmm0[6],xmm5[6],xmm0[7],xmm5[7]
+; SSSE3-NEXT:    retq
+;
+; SSE41-LABEL: var_shuffle_v16i8_v16i8_xxxxxxxxxxxxxxxx_i8:
+; SSE41:       # BB#0:
+; SSE41-NEXT:    pushq %rbp
+; SSE41-NEXT:    pushq %r15
+; SSE41-NEXT:    pushq %r14
+; SSE41-NEXT:    pushq %r13
+; SSE41-NEXT:    pushq %r12
+; SSE41-NEXT:    pushq %rbx
+; SSE41-NEXT:    movsbq %dil, %r15
+; SSE41-NEXT:    movsbq %sil, %r14
+; SSE41-NEXT:    movsbq %dl, %r11
+; SSE41-NEXT:    movsbq %cl, %r10
+; SSE41-NEXT:    movsbq %r8b, %r8
+; SSE41-NEXT:    movaps %xmm0, -{{[0-9]+}}(%rsp)
+; SSE41-NEXT:    movsbq %r9b, %r9
+; SSE41-NEXT:    movsbq {{[0-9]+}}(%rsp), %r12
+; SSE41-NEXT:    movsbq {{[0-9]+}}(%rsp), %r13
+; SSE41-NEXT:    movsbq {{[0-9]+}}(%rsp), %rbp
+; SSE41-NEXT:    movsbq {{[0-9]+}}(%rsp), %rbx
+; SSE41-NEXT:    leaq -{{[0-9]+}}(%rsp), %rax
+; SSE41-NEXT:    movzbl (%r15,%rax), %ecx
+; SSE41-NEXT:    movd %ecx, %xmm0
+; SSE41-NEXT:    movsbq {{[0-9]+}}(%rsp), %r15
+; SSE41-NEXT:    pinsrb $1, (%r14,%rax), %xmm0
+; SSE41-NEXT:    movsbq {{[0-9]+}}(%rsp), %r14
+; SSE41-NEXT:    pinsrb $2, (%r11,%rax), %xmm0
+; SSE41-NEXT:    movsbq {{[0-9]+}}(%rsp), %r11
+; SSE41-NEXT:    pinsrb $3, (%r10,%rax), %xmm0
+; SSE41-NEXT:    movsbq {{[0-9]+}}(%rsp), %r10
+; SSE41-NEXT:    pinsrb $4, (%r8,%rax), %xmm0
+; SSE41-NEXT:    movsbq {{[0-9]+}}(%rsp), %rcx
+; SSE41-NEXT:    pinsrb $5, (%r9,%rax), %xmm0
+; SSE41-NEXT:    movsbq {{[0-9]+}}(%rsp), %rdx
+; SSE41-NEXT:    movzbl (%r12,%rax), %esi
+; SSE41-NEXT:    movzbl (%r13,%rax), %edi
+; SSE41-NEXT:    movzbl (%rbp,%rax), %ebp
+; SSE41-NEXT:    movzbl (%rbx,%rax), %ebx
+; SSE41-NEXT:    movzbl (%r15,%rax), %r8d
+; SSE41-NEXT:    movzbl (%r14,%rax), %r9d
+; SSE41-NEXT:    movzbl (%r11,%rax), %r11d
+; SSE41-NEXT:    movzbl (%r10,%rax), %r10d
+; SSE41-NEXT:    movzbl (%rcx,%rax), %ecx
+; SSE41-NEXT:    movzbl (%rdx,%rax), %eax
+; SSE41-NEXT:    pinsrb $6, %esi, %xmm0
+; SSE41-NEXT:    pinsrb $7, %edi, %xmm0
+; SSE41-NEXT:    pinsrb $8, %ebp, %xmm0
+; SSE41-NEXT:    pinsrb $9, %ebx, %xmm0
+; SSE41-NEXT:    pinsrb $10, %r8d, %xmm0
+; SSE41-NEXT:    pinsrb $11, %r9d, %xmm0
+; SSE41-NEXT:    pinsrb $12, %r11d, %xmm0
+; SSE41-NEXT:    pinsrb $13, %r10d, %xmm0
+; SSE41-NEXT:    pinsrb $14, %ecx, %xmm0
+; SSE41-NEXT:    pinsrb $15, %eax, %xmm0
+; SSE41-NEXT:    popq %rbx
+; SSE41-NEXT:    popq %r12
+; SSE41-NEXT:    popq %r13
+; SSE41-NEXT:    popq %r14
+; SSE41-NEXT:    popq %r15
+; SSE41-NEXT:    popq %rbp
+; SSE41-NEXT:    retq
+;
+; AVX-LABEL: var_shuffle_v16i8_v16i8_xxxxxxxxxxxxxxxx_i8:
+; AVX:       # BB#0:
+; AVX-NEXT:    pushq %rbp
+; AVX-NEXT:    pushq %r15
+; AVX-NEXT:    pushq %r14
+; AVX-NEXT:    pushq %r13
+; AVX-NEXT:    pushq %r12
+; AVX-NEXT:    pushq %rbx
+; AVX-NEXT:    movsbq %dil, %r10
+; AVX-NEXT:    movsbq %sil, %r11
+; AVX-NEXT:    movsbq %dl, %r14
+; AVX-NEXT:    movsbq %cl, %r15
+; AVX-NEXT:    movsbq %r8b, %r8
+; AVX-NEXT:    vmovaps %xmm0, -{{[0-9]+}}(%rsp)
+; AVX-NEXT:    movsbq %r9b, %r9
+; AVX-NEXT:    movsbq {{[0-9]+}}(%rsp), %r12
+; AVX-NEXT:    movsbq {{[0-9]+}}(%rsp), %r13
+; AVX-NEXT:    movsbq {{[0-9]+}}(%rsp), %rbp
+; AVX-NEXT:    movsbq {{[0-9]+}}(%rsp), %rcx
+; AVX-NEXT:    leaq -{{[0-9]+}}(%rsp), %rdi
+; AVX-NEXT:    movzbl (%r10,%rdi), %eax
+; AVX-NEXT:    vmovd %eax, %xmm0
+; AVX-NEXT:    movsbq {{[0-9]+}}(%rsp), %r10
+; AVX-NEXT:    vpinsrb $1, (%r11,%rdi), %xmm0, %xmm0
+; AVX-NEXT:    movsbq {{[0-9]+}}(%rsp), %r11
+; AVX-NEXT:    vpinsrb $2, (%r14,%rdi), %xmm0, %xmm0
+; AVX-NEXT:    movsbq {{[0-9]+}}(%rsp), %r14
+; AVX-NEXT:    vpinsrb $3, (%r15,%rdi), %xmm0, %xmm0
+; AVX-NEXT:    movsbq {{[0-9]+}}(%rsp), %r15
+; AVX-NEXT:    vpinsrb $4, (%r8,%rdi), %xmm0, %xmm0
+; AVX-NEXT:    movsbq {{[0-9]+}}(%rsp), %r8
+; AVX-NEXT:    vpinsrb $5, (%r9,%rdi), %xmm0, %xmm0
+; AVX-NEXT:    movsbq {{[0-9]+}}(%rsp), %rsi
+; AVX-NEXT:    movzbl (%r12,%rdi), %edx
+; AVX-NEXT:    movzbl (%r13,%rdi), %ebx
+; AVX-NEXT:    movzbl (%rbp,%rdi), %ebp
+; AVX-NEXT:    movzbl (%rcx,%rdi), %ecx
+; AVX-NEXT:    movzbl (%r10,%rdi), %eax
+; AVX-NEXT:    movzbl (%r11,%rdi), %r9d
+; AVX-NEXT:    movzbl (%r14,%rdi), %r10d
+; AVX-NEXT:    movzbl (%r15,%rdi), %r11d
+; AVX-NEXT:    movzbl (%r8,%rdi), %r8d
+; AVX-NEXT:    movzbl (%rsi,%rdi), %esi
+; AVX-NEXT:    vpinsrb $6, %edx, %xmm0, %xmm0
+; AVX-NEXT:    vpinsrb $7, %ebx, %xmm0, %xmm0
+; AVX-NEXT:    vpinsrb $8, %ebp, %xmm0, %xmm0
+; AVX-NEXT:    vpinsrb $9, %ecx, %xmm0, %xmm0
+; AVX-NEXT:    vpinsrb $10, %eax, %xmm0, %xmm0
+; AVX-NEXT:    vpinsrb $11, %r9d, %xmm0, %xmm0
+; AVX-NEXT:    vpinsrb $12, %r10d, %xmm0, %xmm0
+; AVX-NEXT:    vpinsrb $13, %r11d, %xmm0, %xmm0
+; AVX-NEXT:    vpinsrb $14, %r8d, %xmm0, %xmm0
+; AVX-NEXT:    vpinsrb $15, %esi, %xmm0, %xmm0
+; AVX-NEXT:    popq %rbx
+; AVX-NEXT:    popq %r12
+; AVX-NEXT:    popq %r13
+; AVX-NEXT:    popq %r14
+; AVX-NEXT:    popq %r15
+; AVX-NEXT:    popq %rbp
+; AVX-NEXT:    retq
+  %x0  = extractelement <16 x i8> %x, i8 %i0
+  %x1  = extractelement <16 x i8> %x, i8 %i1
+  %x2  = extractelement <16 x i8> %x, i8 %i2
+  %x3  = extractelement <16 x i8> %x, i8 %i3
+  %x4  = extractelement <16 x i8> %x, i8 %i4
+  %x5  = extractelement <16 x i8> %x, i8 %i5
+  %x6  = extractelement <16 x i8> %x, i8 %i6
+  %x7  = extractelement <16 x i8> %x, i8 %i7
+  %x8  = extractelement <16 x i8> %x, i8 %i8
+  %x9  = extractelement <16 x i8> %x, i8 %i9
+  %x10 = extractelement <16 x i8> %x, i8 %i10
+  %x11 = extractelement <16 x i8> %x, i8 %i11
+  %x12 = extractelement <16 x i8> %x, i8 %i12
+  %x13 = extractelement <16 x i8> %x, i8 %i13
+  %x14 = extractelement <16 x i8> %x, i8 %i14
+  %x15 = extractelement <16 x i8> %x, i8 %i15
+  %r0  = insertelement <16 x i8> undef, i8 %x0 , i32 0
+  %r1  = insertelement <16 x i8>  %r0 , i8 %x1 , i32 1
+  %r2  = insertelement <16 x i8>  %r1 , i8 %x2 , i32 2
+  %r3  = insertelement <16 x i8>  %r2 , i8 %x3 , i32 3
+  %r4  = insertelement <16 x i8>  %r3 , i8 %x4 , i32 4
+  %r5  = insertelement <16 x i8>  %r4 , i8 %x5 , i32 5
+  %r6  = insertelement <16 x i8>  %r5 , i8 %x6 , i32 6
+  %r7  = insertelement <16 x i8>  %r6 , i8 %x7 , i32 7
+  %r8  = insertelement <16 x i8>  %r7 , i8 %x8 , i32 8
+  %r9  = insertelement <16 x i8>  %r8 , i8 %x9 , i32 9
+  %r10 = insertelement <16 x i8>  %r9 , i8 %x10, i32 10
+  %r11 = insertelement <16 x i8>  %r10, i8 %x11, i32 11
+  %r12 = insertelement <16 x i8>  %r11, i8 %x12, i32 12
+  %r13 = insertelement <16 x i8>  %r12, i8 %x13, i32 13
+  %r14 = insertelement <16 x i8>  %r13, i8 %x14, i32 14
+  %r15 = insertelement <16 x i8>  %r14, i8 %x15, i32 15
+  ret <16 x i8> %r15
+}
+
+;
+; Unary shuffle indices from memory
+;
+
+define <4 x i32> @mem_shuffle_v4i32_v4i32_xxxx_i32(<4 x i32> %x, i32* %i) nounwind {
+; SSE2-LABEL: mem_shuffle_v4i32_v4i32_xxxx_i32:
+; SSE2:       # BB#0:
+; SSE2-NEXT:    movslq (%rdi), %rax
+; SSE2-NEXT:    movaps %xmm0, -{{[0-9]+}}(%rsp)
+; SSE2-NEXT:    movslq 4(%rdi), %rcx
+; SSE2-NEXT:    movslq 8(%rdi), %rdx
+; SSE2-NEXT:    movslq 12(%rdi), %rsi
+; SSE2-NEXT:    movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; SSE2-NEXT:    movd {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; SSE2-NEXT:    movd {{.*#+}} xmm2 = mem[0],zero,zero,zero
+; SSE2-NEXT:    punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
+; SSE2-NEXT:    movd {{.*#+}} xmm2 = mem[0],zero,zero,zero
+; SSE2-NEXT:    punpckldq {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1]
+; SSE2-NEXT:    punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
+; SSE2-NEXT:    retq
+;
+; SSSE3-LABEL: mem_shuffle_v4i32_v4i32_xxxx_i32:
+; SSSE3:       # BB#0:
+; SSSE3-NEXT:    movslq (%rdi), %rax
+; SSSE3-NEXT:    movaps %xmm0, -{{[0-9]+}}(%rsp)
+; SSSE3-NEXT:    movslq 4(%rdi), %rcx
+; SSSE3-NEXT:    movslq 8(%rdi), %rdx
+; SSSE3-NEXT:    movslq 12(%rdi), %rsi
+; SSSE3-NEXT:    movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; SSSE3-NEXT:    movd {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; SSSE3-NEXT:    movd {{.*#+}} xmm2 = mem[0],zero,zero,zero
+; SSSE3-NEXT:    punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
+; SSSE3-NEXT:    movd {{.*#+}} xmm2 = mem[0],zero,zero,zero
+; SSSE3-NEXT:    punpckldq {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1]
+; SSSE3-NEXT:    punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
+; SSSE3-NEXT:    retq
+;
+; SSE41-LABEL: mem_shuffle_v4i32_v4i32_xxxx_i32:
+; SSE41:       # BB#0:
+; SSE41-NEXT:    movslq (%rdi), %rax
+; SSE41-NEXT:    movaps %xmm0, -{{[0-9]+}}(%rsp)
+; SSE41-NEXT:    movslq 4(%rdi), %rcx
+; SSE41-NEXT:    movslq 8(%rdi), %rdx
+; SSE41-NEXT:    movslq 12(%rdi), %rsi
+; SSE41-NEXT:    movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; SSE41-NEXT:    pinsrd $1, -24(%rsp,%rcx,4), %xmm0
+; SSE41-NEXT:    pinsrd $2, -24(%rsp,%rdx,4), %xmm0
+; SSE41-NEXT:    pinsrd $3, -24(%rsp,%rsi,4), %xmm0
+; SSE41-NEXT:    retq
+;
+; AVX-LABEL: mem_shuffle_v4i32_v4i32_xxxx_i32:
+; AVX:       # BB#0:
+; AVX-NEXT:    movslq (%rdi), %rax
+; AVX-NEXT:    vmovaps %xmm0, -{{[0-9]+}}(%rsp)
+; AVX-NEXT:    movslq 4(%rdi), %rcx
+; AVX-NEXT:    movslq 8(%rdi), %rdx
+; AVX-NEXT:    movslq 12(%rdi), %rsi
+; AVX-NEXT:    vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; AVX-NEXT:    vpinsrd $1, -24(%rsp,%rcx,4), %xmm0, %xmm0
+; AVX-NEXT:    vpinsrd $2, -24(%rsp,%rdx,4), %xmm0, %xmm0
+; AVX-NEXT:    vpinsrd $3, -24(%rsp,%rsi,4), %xmm0, %xmm0
+; AVX-NEXT:    retq
+  %p0  = getelementptr inbounds i32, i32* %i, i64 0
+  %p1  = getelementptr inbounds i32, i32* %i, i64 1
+  %p2  = getelementptr inbounds i32, i32* %i, i64 2
+  %p3  = getelementptr inbounds i32, i32* %i, i64 3
+  %i0  = load i32, i32* %p0, align 4
+  %i1  = load i32, i32* %p1, align 4
+  %i2  = load i32, i32* %p2, align 4
+  %i3  = load i32, i32* %p3, align 4
+  %x0 = extractelement <4 x i32> %x, i32 %i0
+  %x1 = extractelement <4 x i32> %x, i32 %i1
+  %x2 = extractelement <4 x i32> %x, i32 %i2
+  %x3 = extractelement <4 x i32> %x, i32 %i3
+  %r0 = insertelement <4 x i32> undef, i32 %x0, i32 0
+  %r1 = insertelement <4 x i32>   %r0, i32 %x1, i32 1
+  %r2 = insertelement <4 x i32>   %r1, i32 %x2, i32 2
+  %r3 = insertelement <4 x i32>   %r2, i32 %x3, i32 3
+  ret <4 x i32> %r3
+}
+
+define <16 x i8> @mem_shuffle_v16i8_v16i8_xxxxxxxxxxxxxxxx_i8(<16 x i8> %x, i8* %i) nounwind {
+; SSE2-LABEL: mem_shuffle_v16i8_v16i8_xxxxxxxxxxxxxxxx_i8:
+; SSE2:       # BB#0:
+; SSE2-NEXT:    movsbq (%rdi), %rcx
+; SSE2-NEXT:    movaps %xmm0, -{{[0-9]+}}(%rsp)
+; SSE2-NEXT:    leaq -{{[0-9]+}}(%rsp), %rax
+; SSE2-NEXT:    movzbl (%rcx,%rax), %ecx
+; SSE2-NEXT:    movd %ecx, %xmm0
+; SSE2-NEXT:    movsbq 8(%rdi), %rcx
+; SSE2-NEXT:    movzbl (%rcx,%rax), %ecx
+; SSE2-NEXT:    movd %ecx, %xmm8
+; SSE2-NEXT:    movsbq 12(%rdi), %rcx
+; SSE2-NEXT:    movzbl (%rcx,%rax), %ecx
+; SSE2-NEXT:    movd %ecx, %xmm9
+; SSE2-NEXT:    movsbq 4(%rdi), %rcx
+; SSE2-NEXT:    movzbl (%rcx,%rax), %ecx
+; SSE2-NEXT:    movd %ecx, %xmm3
+; SSE2-NEXT:    movsbq 14(%rdi), %rcx
+; SSE2-NEXT:    movzbl (%rcx,%rax), %ecx
+; SSE2-NEXT:    movd %ecx, %xmm10
+; SSE2-NEXT:    movsbq 6(%rdi), %rcx
+; SSE2-NEXT:    movzbl (%rcx,%rax), %ecx
+; SSE2-NEXT:    movd %ecx, %xmm5
+; SSE2-NEXT:    movsbq 10(%rdi), %rcx
+; SSE2-NEXT:    movzbl (%rcx,%rax), %ecx
+; SSE2-NEXT:    movd %ecx, %xmm11
+; SSE2-NEXT:    movsbq 2(%rdi), %rcx
+; SSE2-NEXT:    movzbl (%rcx,%rax), %ecx
+; SSE2-NEXT:    movd %ecx, %xmm7
+; SSE2-NEXT:    movsbq 15(%rdi), %rcx
+; SSE2-NEXT:    movzbl (%rcx,%rax), %ecx
+; SSE2-NEXT:    movd %ecx, %xmm12
+; SSE2-NEXT:    movsbq 7(%rdi), %rcx
+; SSE2-NEXT:    movzbl (%rcx,%rax), %ecx
+; SSE2-NEXT:    movd %ecx, %xmm2
+; SSE2-NEXT:    movsbq 11(%rdi), %rcx
+; SSE2-NEXT:    movzbl (%rcx,%rax), %ecx
+; SSE2-NEXT:    movd %ecx, %xmm13
+; SSE2-NEXT:    movsbq 3(%rdi), %rcx
+; SSE2-NEXT:    movzbl (%rcx,%rax), %ecx
+; SSE2-NEXT:    movd %ecx, %xmm6
+; SSE2-NEXT:    movsbq 13(%rdi), %rcx
+; SSE2-NEXT:    movzbl (%rcx,%rax), %ecx
+; SSE2-NEXT:    movd %ecx, %xmm14
+; SSE2-NEXT:    movsbq 5(%rdi), %rcx
+; SSE2-NEXT:    movzbl (%rcx,%rax), %ecx
+; SSE2-NEXT:    movd %ecx, %xmm4
+; SSE2-NEXT:    movsbq 9(%rdi), %rcx
+; SSE2-NEXT:    movzbl (%rcx,%rax), %ecx
+; SSE2-NEXT:    movd %ecx, %xmm15
+; SSE2-NEXT:    movsbq 1(%rdi), %rcx
+; SSE2-NEXT:    movzbl (%rcx,%rax), %eax
+; SSE2-NEXT:    movd %eax, %xmm1
+; SSE2-NEXT:    punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm8[0],xmm0[1],xmm8[1],xmm0[2],xmm8[2],xmm0[3],xmm8[3],xmm0[4],xmm8[4],xmm0[5],xmm8[5],xmm0[6],xmm8[6],xmm0[7],xmm8[7]
+; SSE2-NEXT:    punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm9[0],xmm3[1],xmm9[1],xmm3[2],xmm9[2],xmm3[3],xmm9[3],xmm3[4],xmm9[4],xmm3[5],xmm9[5],xmm3[6],xmm9[6],xmm3[7],xmm9[7]
+; SSE2-NEXT:    punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1],xmm0[2],xmm3[2],xmm0[3],xmm3[3],xmm0[4],xmm3[4],xmm0[5],xmm3[5],xmm0[6],xmm3[6],xmm0[7],xmm3[7]
+; SSE2-NEXT:    punpcklbw {{.*#+}} xmm5 = xmm5[0],xmm10[0],xmm5[1],xmm10[1],xmm5[2],xmm10[2],xmm5[3],xmm10[3],xmm5[4],xmm10[4],xmm5[5],xmm10[5],xmm5[6],xmm10[6],xmm5[7],xmm10[7]
+; SSE2-NEXT:    punpcklbw {{.*#+}} xmm7 = xmm7[0],xmm11[0],xmm7[1],xmm11[1],xmm7[2],xmm11[2],xmm7[3],xmm11[3],xmm7[4],xmm11[4],xmm7[5],xmm11[5],xmm7[6],xmm11[6],xmm7[7],xmm11[7]
+; SSE2-NEXT:    punpcklbw {{.*#+}} xmm7 = xmm7[0],xmm5[0],xmm7[1],xmm5[1],xmm7[2],xmm5[2],xmm7[3],xmm5[3],xmm7[4],xmm5[4],xmm7[5],xmm5[5],xmm7[6],xmm5[6],xmm7[7],xmm5[7]
+; SSE2-NEXT:    punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm7[0],xmm0[1],xmm7[1],xmm0[2],xmm7[2],xmm0[3],xmm7[3],xmm0[4],xmm7[4],xmm0[5],xmm7[5],xmm0[6],xmm7[6],xmm0[7],xmm7[7]
+; SSE2-NEXT:    punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm12[0],xmm2[1],xmm12[1],xmm2[2],xmm12[2],xmm2[3],xmm12[3],xmm2[4],xmm12[4],xmm2[5],xmm12[5],xmm2[6],xmm12[6],xmm2[7],xmm12[7]
+; SSE2-NEXT:    punpcklbw {{.*#+}} xmm6 = xmm6[0],xmm13[0],xmm6[1],xmm13[1],xmm6[2],xmm13[2],xmm6[3],xmm13[3],xmm6[4],xmm13[4],xmm6[5],xmm13[5],xmm6[6],xmm13[6],xmm6[7],xmm13[7]
+; SSE2-NEXT:    punpcklbw {{.*#+}} xmm6 = xmm6[0],xmm2[0],xmm6[1],xmm2[1],xmm6[2],xmm2[2],xmm6[3],xmm2[3],xmm6[4],xmm2[4],xmm6[5],xmm2[5],xmm6[6],xmm2[6],xmm6[7],xmm2[7]
+; SSE2-NEXT:    punpcklbw {{.*#+}} xmm4 = xmm4[0],xmm14[0],xmm4[1],xmm14[1],xmm4[2],xmm14[2],xmm4[3],xmm14[3],xmm4[4],xmm14[4],xmm4[5],xmm14[5],xmm4[6],xmm14[6],xmm4[7],xmm14[7]
+; SSE2-NEXT:    punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm15[0],xmm1[1],xmm15[1],xmm1[2],xmm15[2],xmm1[3],xmm15[3],xmm1[4],xmm15[4],xmm1[5],xmm15[5],xmm1[6],xmm15[6],xmm1[7],xmm15[7]
+; SSE2-NEXT:    punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm4[0],xmm1[1],xmm4[1],xmm1[2],xmm4[2],xmm1[3],xmm4[3],xmm1[4],xmm4[4],xmm1[5],xmm4[5],xmm1[6],xmm4[6],xmm1[7],xmm4[7]
+; SSE2-NEXT:    punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm6[0],xmm1[1],xmm6[1],xmm1[2],xmm6[2],xmm1[3],xmm6[3],xmm1[4],xmm6[4],xmm1[5],xmm6[5],xmm1[6],xmm6[6],xmm1[7],xmm6[7]
+; SSE2-NEXT:    punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
+; SSE2-NEXT:    retq
+;
+; SSSE3-LABEL: mem_shuffle_v16i8_v16i8_xxxxxxxxxxxxxxxx_i8:
+; SSSE3:       # BB#0:
+; SSSE3-NEXT:    movsbq (%rdi), %rcx
+; SSSE3-NEXT:    movaps %xmm0, -{{[0-9]+}}(%rsp)
+; SSSE3-NEXT:    leaq -{{[0-9]+}}(%rsp), %rax
+; SSSE3-NEXT:    movzbl (%rcx,%rax), %ecx
+; SSSE3-NEXT:    movd %ecx, %xmm0
+; SSSE3-NEXT:    movsbq 8(%rdi), %rcx
+; SSSE3-NEXT:    movzbl (%rcx,%rax), %ecx
+; SSSE3-NEXT:    movd %ecx, %xmm8
+; SSSE3-NEXT:    movsbq 12(%rdi), %rcx
+; SSSE3-NEXT:    movzbl (%rcx,%rax), %ecx
+; SSSE3-NEXT:    movd %ecx, %xmm9
+; SSSE3-NEXT:    movsbq 4(%rdi), %rcx
+; SSSE3-NEXT:    movzbl (%rcx,%rax), %ecx
+; SSSE3-NEXT:    movd %ecx, %xmm3
+; SSSE3-NEXT:    movsbq 14(%rdi), %rcx
+; SSSE3-NEXT:    movzbl (%rcx,%rax), %ecx
+; SSSE3-NEXT:    movd %ecx, %xmm10
+; SSSE3-NEXT:    movsbq 6(%rdi), %rcx
+; SSSE3-NEXT:    movzbl (%rcx,%rax), %ecx
+; SSSE3-NEXT:    movd %ecx, %xmm5
+; SSSE3-NEXT:    movsbq 10(%rdi), %rcx
+; SSSE3-NEXT:    movzbl (%rcx,%rax), %ecx
+; SSSE3-NEXT:    movd %ecx, %xmm11
+; SSSE3-NEXT:    movsbq 2(%rdi), %rcx
+; SSSE3-NEXT:    movzbl (%rcx,%rax), %ecx
+; SSSE3-NEXT:    movd %ecx, %xmm7
+; SSSE3-NEXT:    movsbq 15(%rdi), %rcx
+; SSSE3-NEXT:    movzbl (%rcx,%rax), %ecx
+; SSSE3-NEXT:    movd %ecx, %xmm12
+; SSSE3-NEXT:    movsbq 7(%rdi), %rcx
+; SSSE3-NEXT:    movzbl (%rcx,%rax), %ecx
+; SSSE3-NEXT:    movd %ecx, %xmm2
+; SSSE3-NEXT:    movsbq 11(%rdi), %rcx
+; SSSE3-NEXT:    movzbl (%rcx,%rax), %ecx
+; SSSE3-NEXT:    movd %ecx, %xmm13
+; SSSE3-NEXT:    movsbq 3(%rdi), %rcx
+; SSSE3-NEXT:    movzbl (%rcx,%rax), %ecx
+; SSSE3-NEXT:    movd %ecx, %xmm6
+; SSSE3-NEXT:    movsbq 13(%rdi), %rcx
+; SSSE3-NEXT:    movzbl (%rcx,%rax), %ecx
+; SSSE3-NEXT:    movd %ecx, %xmm14
+; SSSE3-NEXT:    movsbq 5(%rdi), %rcx
+; SSSE3-NEXT:    movzbl (%rcx,%rax), %ecx
+; SSSE3-NEXT:    movd %ecx, %xmm4
+; SSSE3-NEXT:    movsbq 9(%rdi), %rcx
+; SSSE3-NEXT:    movzbl (%rcx,%rax), %ecx
+; SSSE3-NEXT:    movd %ecx, %xmm15
+; SSSE3-NEXT:    movsbq 1(%rdi), %rcx
+; SSSE3-NEXT:    movzbl (%rcx,%rax), %eax
+; SSSE3-NEXT:    movd %eax, %xmm1
+; SSSE3-NEXT:    punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm8[0],xmm0[1],xmm8[1],xmm0[2],xmm8[2],xmm0[3],xmm8[3],xmm0[4],xmm8[4],xmm0[5],xmm8[5],xmm0[6],xmm8[6],xmm0[7],xmm8[7]
+; SSSE3-NEXT:    punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm9[0],xmm3[1],xmm9[1],xmm3[2],xmm9[2],xmm3[3],xmm9[3],xmm3[4],xmm9[4],xmm3[5],xmm9[5],xmm3[6],xmm9[6],xmm3[7],xmm9[7]
+; SSSE3-NEXT:    punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1],xmm0[2],xmm3[2],xmm0[3],xmm3[3],xmm0[4],xmm3[4],xmm0[5],xmm3[5],xmm0[6],xmm3[6],xmm0[7],xmm3[7]
+; SSSE3-NEXT:    punpcklbw {{.*#+}} xmm5 = xmm5[0],xmm10[0],xmm5[1],xmm10[1],xmm5[2],xmm10[2],xmm5[3],xmm10[3],xmm5[4],xmm10[4],xmm5[5],xmm10[5],xmm5[6],xmm10[6],xmm5[7],xmm10[7]
+; SSSE3-NEXT:    punpcklbw {{.*#+}} xmm7 = xmm7[0],xmm11[0],xmm7[1],xmm11[1],xmm7[2],xmm11[2],xmm7[3],xmm11[3],xmm7[4],xmm11[4],xmm7[5],xmm11[5],xmm7[6],xmm11[6],xmm7[7],xmm11[7]
+; SSSE3-NEXT:    punpcklbw {{.*#+}} xmm7 = xmm7[0],xmm5[0],xmm7[1],xmm5[1],xmm7[2],xmm5[2],xmm7[3],xmm5[3],xmm7[4],xmm5[4],xmm7[5],xmm5[5],xmm7[6],xmm5[6],xmm7[7],xmm5[7]
+; SSSE3-NEXT:    punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm7[0],xmm0[1],xmm7[1],xmm0[2],xmm7[2],xmm0[3],xmm7[3],xmm0[4],xmm7[4],xmm0[5],xmm7[5],xmm0[6],xmm7[6],xmm0[7],xmm7[7]
+; SSSE3-NEXT:    punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm12[0],xmm2[1],xmm12[1],xmm2[2],xmm12[2],xmm2[3],xmm12[3],xmm2[4],xmm12[4],xmm2[5],xmm12[5],xmm2[6],xmm12[6],xmm2[7],xmm12[7]
+; SSSE3-NEXT:    punpcklbw {{.*#+}} xmm6 = xmm6[0],xmm13[0],xmm6[1],xmm13[1],xmm6[2],xmm13[2],xmm6[3],xmm13[3],xmm6[4],xmm13[4],xmm6[5],xmm13[5],xmm6[6],xmm13[6],xmm6[7],xmm13[7]
+; SSSE3-NEXT:    punpcklbw {{.*#+}} xmm6 = xmm6[0],xmm2[0],xmm6[1],xmm2[1],xmm6[2],xmm2[2],xmm6[3],xmm2[3],xmm6[4],xmm2[4],xmm6[5],xmm2[5],xmm6[6],xmm2[6],xmm6[7],xmm2[7]
+; SSSE3-NEXT:    punpcklbw {{.*#+}} xmm4 = xmm4[0],xmm14[0],xmm4[1],xmm14[1],xmm4[2],xmm14[2],xmm4[3],xmm14[3],xmm4[4],xmm14[4],xmm4[5],xmm14[5],xmm4[6],xmm14[6],xmm4[7],xmm14[7]
+; SSSE3-NEXT:    punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm15[0],xmm1[1],xmm15[1],xmm1[2],xmm15[2],xmm1[3],xmm15[3],xmm1[4],xmm15[4],xmm1[5],xmm15[5],xmm1[6],xmm15[6],xmm1[7],xmm15[7]
+; SSSE3-NEXT:    punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm4[0],xmm1[1],xmm4[1],xmm1[2],xmm4[2],xmm1[3],xmm4[3],xmm1[4],xmm4[4],xmm1[5],xmm4[5],xmm1[6],xmm4[6],xmm1[7],xmm4[7]
+; SSSE3-NEXT:    punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm6[0],xmm1[1],xmm6[1],xmm1[2],xmm6[2],xmm1[3],xmm6[3],xmm1[4],xmm6[4],xmm1[5],xmm6[5],xmm1[6],xmm6[6],xmm1[7],xmm6[7]
+; SSSE3-NEXT:    punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
+; SSSE3-NEXT:    retq
+;
+; SSE41-LABEL: mem_shuffle_v16i8_v16i8_xxxxxxxxxxxxxxxx_i8:
+; SSE41:       # BB#0:
+; SSE41-NEXT:    pushq %rbp
+; SSE41-NEXT:    pushq %r15
+; SSE41-NEXT:    pushq %r14
+; SSE41-NEXT:    pushq %r13
+; SSE41-NEXT:    pushq %r12
+; SSE41-NEXT:    pushq %rbx
+; SSE41-NEXT:    movsbq (%rdi), %rax
+; SSE41-NEXT:    movaps %xmm0, -{{[0-9]+}}(%rsp)
+; SSE41-NEXT:    movsbq 1(%rdi), %r15
+; SSE41-NEXT:    movsbq 2(%rdi), %r8
+; SSE41-NEXT:    movsbq 3(%rdi), %r9
+; SSE41-NEXT:    movsbq 4(%rdi), %r10
+; SSE41-NEXT:    movsbq 5(%rdi), %r11
+; SSE41-NEXT:    movsbq 6(%rdi), %r14
+; SSE41-NEXT:    movsbq 7(%rdi), %r12
+; SSE41-NEXT:    movsbq 8(%rdi), %r13
+; SSE41-NEXT:    movsbq 9(%rdi), %rdx
+; SSE41-NEXT:    movsbq 10(%rdi), %rcx
+; SSE41-NEXT:    movsbq 11(%rdi), %rsi
+; SSE41-NEXT:    movsbq 12(%rdi), %rbx
+; SSE41-NEXT:    leaq -{{[0-9]+}}(%rsp), %rbp
+; SSE41-NEXT:    movzbl (%rax,%rbp), %eax
+; SSE41-NEXT:    movd %eax, %xmm0
+; SSE41-NEXT:    movsbq 13(%rdi), %rax
+; SSE41-NEXT:    pinsrb $1, (%r15,%rbp), %xmm0
+; SSE41-NEXT:    movsbq 14(%rdi), %r15
+; SSE41-NEXT:    movsbq 15(%rdi), %rdi
+; SSE41-NEXT:    movzbl (%rdi,%rbp), %edi
+; SSE41-NEXT:    movzbl (%r15,%rbp), %r15d
+; SSE41-NEXT:    movzbl (%rax,%rbp), %eax
+; SSE41-NEXT:    movzbl (%rbx,%rbp), %ebx
+; SSE41-NEXT:    movzbl (%rsi,%rbp), %esi
+; SSE41-NEXT:    movzbl (%rcx,%rbp), %ecx
+; SSE41-NEXT:    movzbl (%rdx,%rbp), %edx
+; SSE41-NEXT:    movzbl (%r13,%rbp), %r13d
+; SSE41-NEXT:    movzbl (%r12,%rbp), %r12d
+; SSE41-NEXT:    movzbl (%r14,%rbp), %r14d
+; SSE41-NEXT:    movzbl (%r11,%rbp), %r11d
+; SSE41-NEXT:    movzbl (%r10,%rbp), %r10d
+; SSE41-NEXT:    movzbl (%r9,%rbp), %r9d
+; SSE41-NEXT:    movzbl (%r8,%rbp), %ebp
+; SSE41-NEXT:    pinsrb $2, %ebp, %xmm0
+; SSE41-NEXT:    pinsrb $3, %r9d, %xmm0
+; SSE41-NEXT:    pinsrb $4, %r10d, %xmm0
+; SSE41-NEXT:    pinsrb $5, %r11d, %xmm0
+; SSE41-NEXT:    pinsrb $6, %r14d, %xmm0
+; SSE41-NEXT:    pinsrb $7, %r12d, %xmm0
+; SSE41-NEXT:    pinsrb $8, %r13d, %xmm0
+; SSE41-NEXT:    pinsrb $9, %edx, %xmm0
+; SSE41-NEXT:    pinsrb $10, %ecx, %xmm0
+; SSE41-NEXT:    pinsrb $11, %esi, %xmm0
+; SSE41-NEXT:    pinsrb $12, %ebx, %xmm0
+; SSE41-NEXT:    pinsrb $13, %eax, %xmm0
+; SSE41-NEXT:    pinsrb $14, %r15d, %xmm0
+; SSE41-NEXT:    pinsrb $15, %edi, %xmm0
+; SSE41-NEXT:    popq %rbx
+; SSE41-NEXT:    popq %r12
+; SSE41-NEXT:    popq %r13
+; SSE41-NEXT:    popq %r14
+; SSE41-NEXT:    popq %r15
+; SSE41-NEXT:    popq %rbp
+; SSE41-NEXT:    retq
+;
+; AVX-LABEL: mem_shuffle_v16i8_v16i8_xxxxxxxxxxxxxxxx_i8:
+; AVX:       # BB#0:
+; AVX-NEXT:    pushq %rbp
+; AVX-NEXT:    pushq %r15
+; AVX-NEXT:    pushq %r14
+; AVX-NEXT:    pushq %r13
+; AVX-NEXT:    pushq %r12
+; AVX-NEXT:    pushq %rbx
+; AVX-NEXT:    movsbq (%rdi), %rsi
+; AVX-NEXT:    vmovaps %xmm0, -{{[0-9]+}}(%rsp)
+; AVX-NEXT:    movsbq 1(%rdi), %r15
+; AVX-NEXT:    movsbq 2(%rdi), %r8
+; AVX-NEXT:    movsbq 3(%rdi), %r9
+; AVX-NEXT:    movsbq 4(%rdi), %r10
+; AVX-NEXT:    movsbq 5(%rdi), %r11
+; AVX-NEXT:    movsbq 6(%rdi), %r14
+; AVX-NEXT:    movsbq 7(%rdi), %r12
+; AVX-NEXT:    movsbq 8(%rdi), %r13
+; AVX-NEXT:    movsbq 9(%rdi), %rdx
+; AVX-NEXT:    movsbq 10(%rdi), %rax
+; AVX-NEXT:    movsbq 11(%rdi), %rcx
+; AVX-NEXT:    movsbq 12(%rdi), %rbx
+; AVX-NEXT:    leaq -{{[0-9]+}}(%rsp), %rbp
+; AVX-NEXT:    movzbl (%rsi,%rbp), %esi
+; AVX-NEXT:    vmovd %esi, %xmm0
+; AVX-NEXT:    movsbq 13(%rdi), %rsi
+; AVX-NEXT:    vpinsrb $1, (%r15,%rbp), %xmm0, %xmm0
+; AVX-NEXT:    movsbq 14(%rdi), %r15
+; AVX-NEXT:    movsbq 15(%rdi), %rdi
+; AVX-NEXT:    movzbl (%rdi,%rbp), %edi
+; AVX-NEXT:    movzbl (%r15,%rbp), %r15d
+; AVX-NEXT:    movzbl (%rsi,%rbp), %esi
+; AVX-NEXT:    movzbl (%rbx,%rbp), %ebx
+; AVX-NEXT:    movzbl (%rcx,%rbp), %ecx
+; AVX-NEXT:    movzbl (%rax,%rbp), %eax
+; AVX-NEXT:    movzbl (%rdx,%rbp), %edx
+; AVX-NEXT:    movzbl (%r13,%rbp), %r13d
+; AVX-NEXT:    movzbl (%r12,%rbp), %r12d
+; AVX-NEXT:    movzbl (%r14,%rbp), %r14d
+; AVX-NEXT:    movzbl (%r11,%rbp), %r11d
+; AVX-NEXT:    movzbl (%r10,%rbp), %r10d
+; AVX-NEXT:    movzbl (%r9,%rbp), %r9d
+; AVX-NEXT:    movzbl (%r8,%rbp), %ebp
+; AVX-NEXT:    vpinsrb $2, %ebp, %xmm0, %xmm0
+; AVX-NEXT:    vpinsrb $3, %r9d, %xmm0, %xmm0
+; AVX-NEXT:    vpinsrb $4, %r10d, %xmm0, %xmm0
+; AVX-NEXT:    vpinsrb $5, %r11d, %xmm0, %xmm0
+; AVX-NEXT:    vpinsrb $6, %r14d, %xmm0, %xmm0
+; AVX-NEXT:    vpinsrb $7, %r12d, %xmm0, %xmm0
+; AVX-NEXT:    vpinsrb $8, %r13d, %xmm0, %xmm0
+; AVX-NEXT:    vpinsrb $9, %edx, %xmm0, %xmm0
+; AVX-NEXT:    vpinsrb $10, %eax, %xmm0, %xmm0
+; AVX-NEXT:    vpinsrb $11, %ecx, %xmm0, %xmm0
+; AVX-NEXT:    vpinsrb $12, %ebx, %xmm0, %xmm0
+; AVX-NEXT:    vpinsrb $13, %esi, %xmm0, %xmm0
+; AVX-NEXT:    vpinsrb $14, %r15d, %xmm0, %xmm0
+; AVX-NEXT:    vpinsrb $15, %edi, %xmm0, %xmm0
+; AVX-NEXT:    popq %rbx
+; AVX-NEXT:    popq %r12
+; AVX-NEXT:    popq %r13
+; AVX-NEXT:    popq %r14
+; AVX-NEXT:    popq %r15
+; AVX-NEXT:    popq %rbp
+; AVX-NEXT:    retq
+  %p0  = getelementptr inbounds i8, i8* %i, i64 0
+  %p1  = getelementptr inbounds i8, i8* %i, i64 1
+  %p2  = getelementptr inbounds i8, i8* %i, i64 2
+  %p3  = getelementptr inbounds i8, i8* %i, i64 3
+  %p4  = getelementptr inbounds i8, i8* %i, i64 4
+  %p5  = getelementptr inbounds i8, i8* %i, i64 5
+  %p6  = getelementptr inbounds i8, i8* %i, i64 6
+  %p7  = getelementptr inbounds i8, i8* %i, i64 7
+  %p8  = getelementptr inbounds i8, i8* %i, i64 8
+  %p9  = getelementptr inbounds i8, i8* %i, i64 9
+  %p10 = getelementptr inbounds i8, i8* %i, i64 10
+  %p11 = getelementptr inbounds i8, i8* %i, i64 11
+  %p12 = getelementptr inbounds i8, i8* %i, i64 12
+  %p13 = getelementptr inbounds i8, i8* %i, i64 13
+  %p14 = getelementptr inbounds i8, i8* %i, i64 14
+  %p15 = getelementptr inbounds i8, i8* %i, i64 15
+  %i0  = load i8, i8* %p0 , align 4
+  %i1  = load i8, i8* %p1 , align 4
+  %i2  = load i8, i8* %p2 , align 4
+  %i3  = load i8, i8* %p3 , align 4
+  %i4  = load i8, i8* %p4 , align 4
+  %i5  = load i8, i8* %p5 , align 4
+  %i6  = load i8, i8* %p6 , align 4
+  %i7  = load i8, i8* %p7 , align 4
+  %i8  = load i8, i8* %p8 , align 4
+  %i9  = load i8, i8* %p9 , align 4
+  %i10 = load i8, i8* %p10, align 4
+  %i11 = load i8, i8* %p11, align 4
+  %i12 = load i8, i8* %p12, align 4
+  %i13 = load i8, i8* %p13, align 4
+  %i14 = load i8, i8* %p14, align 4
+  %i15 = load i8, i8* %p15, align 4
+  %x0  = extractelement <16 x i8> %x, i8 %i0
+  %x1  = extractelement <16 x i8> %x, i8 %i1
+  %x2  = extractelement <16 x i8> %x, i8 %i2
+  %x3  = extractelement <16 x i8> %x, i8 %i3
+  %x4  = extractelement <16 x i8> %x, i8 %i4
+  %x5  = extractelement <16 x i8> %x, i8 %i5
+  %x6  = extractelement <16 x i8> %x, i8 %i6
+  %x7  = extractelement <16 x i8> %x, i8 %i7
+  %x8  = extractelement <16 x i8> %x, i8 %i8
+  %x9  = extractelement <16 x i8> %x, i8 %i9
+  %x10 = extractelement <16 x i8> %x, i8 %i10
+  %x11 = extractelement <16 x i8> %x, i8 %i11
+  %x12 = extractelement <16 x i8> %x, i8 %i12
+  %x13 = extractelement <16 x i8> %x, i8 %i13
+  %x14 = extractelement <16 x i8> %x, i8 %i14
+  %x15 = extractelement <16 x i8> %x, i8 %i15
+  %r0  = insertelement <16 x i8> undef, i8 %x0 , i32 0
+  %r1  = insertelement <16 x i8>  %r0 , i8 %x1 , i32 1
+  %r2  = insertelement <16 x i8>  %r1 , i8 %x2 , i32 2
+  %r3  = insertelement <16 x i8>  %r2 , i8 %x3 , i32 3
+  %r4  = insertelement <16 x i8>  %r3 , i8 %x4 , i32 4
+  %r5  = insertelement <16 x i8>  %r4 , i8 %x5 , i32 5
+  %r6  = insertelement <16 x i8>  %r5 , i8 %x6 , i32 6
+  %r7  = insertelement <16 x i8>  %r6 , i8 %x7 , i32 7
+  %r8  = insertelement <16 x i8>  %r7 , i8 %x8 , i32 8
+  %r9  = insertelement <16 x i8>  %r8 , i8 %x9 , i32 9
+  %r10 = insertelement <16 x i8>  %r9 , i8 %x10, i32 10
+  %r11 = insertelement <16 x i8>  %r10, i8 %x11, i32 11
+  %r12 = insertelement <16 x i8>  %r11, i8 %x12, i32 12
+  %r13 = insertelement <16 x i8>  %r12, i8 %x13, i32 13
+  %r14 = insertelement <16 x i8>  %r13, i8 %x14, i32 14
+  %r15 = insertelement <16 x i8>  %r14, i8 %x15, i32 15
+  ret <16 x i8> %r15
+}
+
+;
+; Binary shuffle indices from registers
+;
+
+define <4 x float> @var_shuffle_v4f32_v4f32_x0yx_i32(<4 x float> %x, <4 x float> %y, i32 %i0, i32 %i1, i32 %i2, i32 %i3) nounwind {
+; SSE-LABEL: var_shuffle_v4f32_v4f32_x0yx_i32:
+; SSE:       # BB#0:
+; SSE-NEXT:    movslq %edi, %rax
+; SSE-NEXT:    movaps %xmm1, -{{[0-9]+}}(%rsp)
+; SSE-NEXT:    movslq %edx, %rdx
+; SSE-NEXT:    movaps %xmm0, -{{[0-9]+}}(%rsp)
+; SSE-NEXT:    movslq %ecx, %rcx
+; SSE-NEXT:    movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; SSE-NEXT:    movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; SSE-NEXT:    movss {{.*#+}} xmm2 = mem[0],zero,zero,zero
+; SSE-NEXT:    unpcklps {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1]
+; SSE-NEXT:    unpcklpd {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; SSE-NEXT:    retq
+;
+; AVX-LABEL: var_shuffle_v4f32_v4f32_x0yx_i32:
+; AVX:       # BB#0:
+; AVX-NEXT:    movslq %edi, %rax
+; AVX-NEXT:    vmovaps %xmm1, -{{[0-9]+}}(%rsp)
+; AVX-NEXT:    movslq %edx, %rdx
+; AVX-NEXT:    vmovaps %xmm0, -{{[0-9]+}}(%rsp)
+; AVX-NEXT:    movslq %ecx, %rcx
+; AVX-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; AVX-NEXT:    vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; AVX-NEXT:    vmovss {{.*#+}} xmm2 = mem[0],zero,zero,zero
+; AVX-NEXT:    vunpcklps {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1]
+; AVX-NEXT:    vunpcklpd {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; AVX-NEXT:    retq
+  %x0 = extractelement <4 x float> %x, i32 %i0
+  %x1 = extractelement <4 x float> %x, i32 %i1
+  %y2 = extractelement <4 x float> %y, i32 %i2
+  %x3 = extractelement <4 x float> %x, i32 %i3
+  %r0 = insertelement <4 x float> undef, float %x0, i32 0
+  %r1 = insertelement <4 x float>   %r0, float 0.0, i32 1
+  %r2 = insertelement <4 x float>   %r1, float %y2, i32 2
+  %r3 = insertelement <4 x float>   %r2, float %x3, i32 3
+  ret <4 x float> %r3
+}
+
+define <8 x i16> @var_shuffle_v8i16_v8i16_xyxyxy00_i16(<8 x i16> %x, <8 x i16> %y, i16 %i0, i16 %i1, i16 %i2, i16 %i3, i16 %i4, i16 %i5, i16 %i6, i16 %i7) nounwind {
+; SSE2-LABEL: var_shuffle_v8i16_v8i16_xyxyxy00_i16:
+; SSE2:       # BB#0:
+; SSE2-NEXT:    movswq %di, %r10
+; SSE2-NEXT:    movswq %si, %rsi
+; SSE2-NEXT:    movswq %dx, %r11
+; SSE2-NEXT:    movswq %cx, %rcx
+; SSE2-NEXT:    movaps %xmm0, -{{[0-9]+}}(%rsp)
+; SSE2-NEXT:    movswq %r8w, %rdi
+; SSE2-NEXT:    movaps %xmm1, -{{[0-9]+}}(%rsp)
+; SSE2-NEXT:    movswq %r9w, %rax
+; SSE2-NEXT:    movzwl -24(%rsp,%rsi,2), %esi
+; SSE2-NEXT:    xorl %edx, %edx
+; SSE2-NEXT:    movd %edx, %xmm0
+; SSE2-NEXT:    movzwl -24(%rsp,%rcx,2), %ecx
+; SSE2-NEXT:    movd %ecx, %xmm1
+; SSE2-NEXT:    punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
+; SSE2-NEXT:    movd %esi, %xmm2
+; SSE2-NEXT:    movzwl -24(%rsp,%rax,2), %eax
+; SSE2-NEXT:    movd %eax, %xmm3
+; SSE2-NEXT:    punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1],xmm2[2],xmm3[2],xmm2[3],xmm3[3]
+; SSE2-NEXT:    punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3]
+; SSE2-NEXT:    movzwl -40(%rsp,%r10,2), %eax
+; SSE2-NEXT:    movzwl -40(%rsp,%r11,2), %ecx
+; SSE2-NEXT:    movd %ecx, %xmm1
+; SSE2-NEXT:    punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
+; SSE2-NEXT:    movd %eax, %xmm0
+; SSE2-NEXT:    movzwl -40(%rsp,%rdi,2), %eax
+; SSE2-NEXT:    movd %eax, %xmm3
+; SSE2-NEXT:    punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1],xmm0[2],xmm3[2],xmm0[3],xmm3[3]
+; SSE2-NEXT:    punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
+; SSE2-NEXT:    punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3]
+; SSE2-NEXT:    retq
+;
+; SSSE3-LABEL: var_shuffle_v8i16_v8i16_xyxyxy00_i16:
+; SSSE3:       # BB#0:
+; SSSE3-NEXT:    movswq %di, %r10
+; SSSE3-NEXT:    movswq %si, %rsi
+; SSSE3-NEXT:    movswq %dx, %r11
+; SSSE3-NEXT:    movswq %cx, %rcx
+; SSSE3-NEXT:    movaps %xmm0, -{{[0-9]+}}(%rsp)
+; SSSE3-NEXT:    movswq %r8w, %rdi
+; SSSE3-NEXT:    movaps %xmm1, -{{[0-9]+}}(%rsp)
+; SSSE3-NEXT:    movswq %r9w, %rax
+; SSSE3-NEXT:    movzwl -24(%rsp,%rsi,2), %esi
+; SSSE3-NEXT:    xorl %edx, %edx
+; SSSE3-NEXT:    movd %edx, %xmm0
+; SSSE3-NEXT:    movzwl -24(%rsp,%rcx,2), %ecx
+; SSSE3-NEXT:    movd %ecx, %xmm1
+; SSSE3-NEXT:    punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
+; SSSE3-NEXT:    movd %esi, %xmm2
+; SSSE3-NEXT:    movzwl -24(%rsp,%rax,2), %eax
+; SSSE3-NEXT:    movd %eax, %xmm3
+; SSSE3-NEXT:    punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1],xmm2[2],xmm3[2],xmm2[3],xmm3[3]
+; SSSE3-NEXT:    punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3]
+; SSSE3-NEXT:    movzwl -40(%rsp,%r10,2), %eax
+; SSSE3-NEXT:    movzwl -40(%rsp,%r11,2), %ecx
+; SSSE3-NEXT:    movd %ecx, %xmm1
+; SSSE3-NEXT:    punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
+; SSSE3-NEXT:    movd %eax, %xmm0
+; SSSE3-NEXT:    movzwl -40(%rsp,%rdi,2), %eax
+; SSSE3-NEXT:    movd %eax, %xmm3
+; SSSE3-NEXT:    punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1],xmm0[2],xmm3[2],xmm0[3],xmm3[3]
+; SSSE3-NEXT:    punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
+; SSSE3-NEXT:    punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3]
+; SSSE3-NEXT:    retq
+;
+; SSE41-LABEL: var_shuffle_v8i16_v8i16_xyxyxy00_i16:
+; SSE41:       # BB#0:
+; SSE41-NEXT:    movswq %di, %rax
+; SSE41-NEXT:    movswq %si, %rsi
+; SSE41-NEXT:    movswq %dx, %rdx
+; SSE41-NEXT:    movswq %cx, %r10
+; SSE41-NEXT:    movdqa %xmm0, -{{[0-9]+}}(%rsp)
+; SSE41-NEXT:    movswq %r8w, %rdi
+; SSE41-NEXT:    movaps %xmm1, -{{[0-9]+}}(%rsp)
+; SSE41-NEXT:    movswq %r9w, %rcx
+; SSE41-NEXT:    movzwl -40(%rsp,%rax,2), %eax
+; SSE41-NEXT:    movd %eax, %xmm1
+; SSE41-NEXT:    pinsrw $1, -24(%rsp,%rsi,2), %xmm1
+; SSE41-NEXT:    pinsrw $2, -40(%rsp,%rdx,2), %xmm1
+; SSE41-NEXT:    pinsrw $3, -24(%rsp,%r10,2), %xmm1
+; SSE41-NEXT:    pinsrw $4, -40(%rsp,%rdi,2), %xmm1
+; SSE41-NEXT:    pinsrw $5, -24(%rsp,%rcx,2), %xmm1
+; SSE41-NEXT:    pxor %xmm0, %xmm0
+; SSE41-NEXT:    pblendw {{.*#+}} xmm0 = xmm1[0,1,2,3,4,5],xmm0[6,7]
+; SSE41-NEXT:    retq
+;
+; AVX-LABEL: var_shuffle_v8i16_v8i16_xyxyxy00_i16:
+; AVX:       # BB#0:
+; AVX-NEXT:    movswq %di, %r10
+; AVX-NEXT:    movswq %si, %r11
+; AVX-NEXT:    movswq %dx, %rdx
+; AVX-NEXT:    movswq %cx, %rcx
+; AVX-NEXT:    vmovaps %xmm0, -{{[0-9]+}}(%rsp)
+; AVX-NEXT:    movswq %r8w, %rdi
+; AVX-NEXT:    vmovdqa %xmm1, -{{[0-9]+}}(%rsp)
+; AVX-NEXT:    movswq %r9w, %rax
+; AVX-NEXT:    movzwl -40(%rsp,%r10,2), %esi
+; AVX-NEXT:    vmovd %esi, %xmm0
+; AVX-NEXT:    vpinsrw $1, -24(%rsp,%r11,2), %xmm0, %xmm0
+; AVX-NEXT:    vpinsrw $2, -40(%rsp,%rdx,2), %xmm0, %xmm0
+; AVX-NEXT:    vpinsrw $3, -24(%rsp,%rcx,2), %xmm0, %xmm0
+; AVX-NEXT:    vpinsrw $4, -40(%rsp,%rdi,2), %xmm0, %xmm0
+; AVX-NEXT:    vpinsrw $5, -24(%rsp,%rax,2), %xmm0, %xmm0
+; AVX-NEXT:    vpxor %xmm1, %xmm1, %xmm1
+; AVX-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,5],xmm1[6,7]
+; AVX-NEXT:    retq
+  %x0 = extractelement <8 x i16> %x, i16 %i0
+  %y1 = extractelement <8 x i16> %y, i16 %i1
+  %x2 = extractelement <8 x i16> %x, i16 %i2
+  %y3 = extractelement <8 x i16> %y, i16 %i3
+  %x4 = extractelement <8 x i16> %x, i16 %i4
+  %y5 = extractelement <8 x i16> %y, i16 %i5
+  %x6 = extractelement <8 x i16> %x, i16 %i6
+  %x7 = extractelement <8 x i16> %x, i16 %i7
+  %r0 = insertelement <8 x i16> undef, i16 %x0, i32 0
+  %r1 = insertelement <8 x i16>   %r0, i16 %y1, i32 1
+  %r2 = insertelement <8 x i16>   %r1, i16 %x2, i32 2
+  %r3 = insertelement <8 x i16>   %r2, i16 %y3, i32 3
+  %r4 = insertelement <8 x i16>   %r3, i16 %x4, i32 4
+  %r5 = insertelement <8 x i16>   %r4, i16 %y5, i32 5
+  %r6 = insertelement <8 x i16>   %r5, i16   0, i32 6
+  %r7 = insertelement <8 x i16>   %r6, i16   0, i32 7
+  ret <8 x i16> %r7
+}

Added: llvm/trunk/test/CodeGen/X86/vector-shuffle-variable-256.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/vector-shuffle-variable-256.ll?rev=265045&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/X86/vector-shuffle-variable-256.ll (added)
+++ llvm/trunk/test/CodeGen/X86/vector-shuffle-variable-256.ll Thu Mar 31 15:26:30 2016
@@ -0,0 +1,720 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=x86-64 -mattr=+avx | FileCheck %s --check-prefix=ALL --check-prefix=AVX --check-prefix=AVX1
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=x86-64 -mattr=+avx2 | FileCheck %s --check-prefix=ALL --check-prefix=AVX --check-prefix=AVX2
+
+;
+; Unary shuffle indices from registers
+;
+
+define <4 x double> @var_shuffle_v4f64_v4f64_xxxx_i64(<4 x double> %x, i64 %i0, i64 %i1, i64 %i2, i64 %i3) nounwind {
+; ALL-LABEL: var_shuffle_v4f64_v4f64_xxxx_i64:
+; ALL:       # BB#0:
+; ALL-NEXT:    pushq %rbp
+; ALL-NEXT:    movq %rsp, %rbp
+; ALL-NEXT:    andq $-32, %rsp
+; ALL-NEXT:    subq $64, %rsp
+; ALL-NEXT:    vmovaps %ymm0, (%rsp)
+; ALL-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
+; ALL-NEXT:    vmovhpd {{.*#+}} xmm0 = xmm0[0],mem[0]
+; ALL-NEXT:    vmovsd {{.*#+}} xmm1 = mem[0],zero
+; ALL-NEXT:    vmovhpd {{.*#+}} xmm1 = xmm1[0],mem[0]
+; ALL-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; ALL-NEXT:    movq %rbp, %rsp
+; ALL-NEXT:    popq %rbp
+; ALL-NEXT:    retq
+  %x0 = extractelement <4 x double> %x, i64 %i0
+  %x1 = extractelement <4 x double> %x, i64 %i1
+  %x2 = extractelement <4 x double> %x, i64 %i2
+  %x3 = extractelement <4 x double> %x, i64 %i3
+  %r0 = insertelement <4 x double> undef, double %x0, i32 0
+  %r1 = insertelement <4 x double>   %r0, double %x1, i32 1
+  %r2 = insertelement <4 x double>   %r1, double %x2, i32 2
+  %r3 = insertelement <4 x double>   %r2, double %x3, i32 3
+  ret <4 x double> %r3
+}
+
+define <4 x double> @var_shuffle_v4f64_v4f64_uxx0_i64(<4 x double> %x, i64 %i0, i64 %i1, i64 %i2, i64 %i3) nounwind {
+; ALL-LABEL: var_shuffle_v4f64_v4f64_uxx0_i64:
+; ALL:       # BB#0:
+; ALL-NEXT:    pushq %rbp
+; ALL-NEXT:    movq %rsp, %rbp
+; ALL-NEXT:    andq $-32, %rsp
+; ALL-NEXT:    subq $64, %rsp
+; ALL-NEXT:    vmovaps %ymm0, (%rsp)
+; ALL-NEXT:    vmovddup {{.*#+}} xmm0 = mem[0,0]
+; ALL-NEXT:    vmovsd {{.*#+}} xmm1 = mem[0],zero
+; ALL-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; ALL-NEXT:    movq %rbp, %rsp
+; ALL-NEXT:    popq %rbp
+; ALL-NEXT:    retq
+  %x0 = extractelement <4 x double> %x, i64 %i0
+  %x1 = extractelement <4 x double> %x, i64 %i1
+  %x2 = extractelement <4 x double> %x, i64 %i2
+  %x3 = extractelement <4 x double> %x, i64 %i3
+  %r0 = insertelement <4 x double> undef, double undef, i32 0
+  %r1 = insertelement <4 x double>   %r0, double   %x1, i32 1
+  %r2 = insertelement <4 x double>   %r1, double   %x2, i32 2
+  %r3 = insertelement <4 x double>   %r2, double   0.0, i32 3
+  ret <4 x double> %r3
+}
+
+define <4 x double> @var_shuffle_v4f64_v2f64_xxxx_i64(<2 x double> %x, i64 %i0, i64 %i1, i64 %i2, i64 %i3) nounwind {
+; ALL-LABEL: var_shuffle_v4f64_v2f64_xxxx_i64:
+; ALL:       # BB#0:
+; ALL-NEXT:    vmovaps %xmm0, -{{[0-9]+}}(%rsp)
+; ALL-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
+; ALL-NEXT:    vmovhpd {{.*#+}} xmm0 = xmm0[0],mem[0]
+; ALL-NEXT:    vmovsd {{.*#+}} xmm1 = mem[0],zero
+; ALL-NEXT:    vmovhpd {{.*#+}} xmm1 = xmm1[0],mem[0]
+; ALL-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; ALL-NEXT:    retq
+  %x0 = extractelement <2 x double> %x, i64 %i0
+  %x1 = extractelement <2 x double> %x, i64 %i1
+  %x2 = extractelement <2 x double> %x, i64 %i2
+  %x3 = extractelement <2 x double> %x, i64 %i3
+  %r0 = insertelement <4 x double> undef, double %x0, i32 0
+  %r1 = insertelement <4 x double>   %r0, double %x1, i32 1
+  %r2 = insertelement <4 x double>   %r1, double %x2, i32 2
+  %r3 = insertelement <4 x double>   %r2, double %x3, i32 3
+  ret <4 x double> %r3
+}
+
+define <4 x i64> @var_shuffle_v4i64_v4i64_xxxx_i64(<4 x i64> %x, i64 %i0, i64 %i1, i64 %i2, i64 %i3) nounwind {
+; AVX1-LABEL: var_shuffle_v4i64_v4i64_xxxx_i64:
+; AVX1:       # BB#0:
+; AVX1-NEXT:    pushq %rbp
+; AVX1-NEXT:    movq %rsp, %rbp
+; AVX1-NEXT:    andq $-32, %rsp
+; AVX1-NEXT:    subq $64, %rsp
+; AVX1-NEXT:    vmovaps %ymm0, (%rsp)
+; AVX1-NEXT:    vmovq {{.*#+}} xmm0 = mem[0],zero
+; AVX1-NEXT:    vmovq {{.*#+}} xmm1 = mem[0],zero
+; AVX1-NEXT:    vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; AVX1-NEXT:    vmovq {{.*#+}} xmm1 = mem[0],zero
+; AVX1-NEXT:    vmovq {{.*#+}} xmm2 = mem[0],zero
+; AVX1-NEXT:    vpunpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm2[0]
+; AVX1-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; AVX1-NEXT:    movq %rbp, %rsp
+; AVX1-NEXT:    popq %rbp
+; AVX1-NEXT:    retq
+;
+; AVX2-LABEL: var_shuffle_v4i64_v4i64_xxxx_i64:
+; AVX2:       # BB#0:
+; AVX2-NEXT:    pushq %rbp
+; AVX2-NEXT:    movq %rsp, %rbp
+; AVX2-NEXT:    andq $-32, %rsp
+; AVX2-NEXT:    subq $64, %rsp
+; AVX2-NEXT:    vmovaps %ymm0, (%rsp)
+; AVX2-NEXT:    vmovq {{.*#+}} xmm0 = mem[0],zero
+; AVX2-NEXT:    vmovq {{.*#+}} xmm1 = mem[0],zero
+; AVX2-NEXT:    vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; AVX2-NEXT:    vmovq {{.*#+}} xmm1 = mem[0],zero
+; AVX2-NEXT:    vmovq {{.*#+}} xmm2 = mem[0],zero
+; AVX2-NEXT:    vpunpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm2[0]
+; AVX2-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm0
+; AVX2-NEXT:    movq %rbp, %rsp
+; AVX2-NEXT:    popq %rbp
+; AVX2-NEXT:    retq
+  %x0 = extractelement <4 x i64> %x, i64 %i0
+  %x1 = extractelement <4 x i64> %x, i64 %i1
+  %x2 = extractelement <4 x i64> %x, i64 %i2
+  %x3 = extractelement <4 x i64> %x, i64 %i3
+  %r0 = insertelement <4 x i64> undef, i64 %x0, i32 0
+  %r1 = insertelement <4 x i64>   %r0, i64 %x1, i32 1
+  %r2 = insertelement <4 x i64>   %r1, i64 %x2, i32 2
+  %r3 = insertelement <4 x i64>   %r2, i64 %x3, i32 3
+  ret <4 x i64> %r3
+}
+
+define <4 x i64> @var_shuffle_v4i64_v4i64_xx00_i64(<4 x i64> %x, i64 %i0, i64 %i1, i64 %i2, i64 %i3) nounwind {
+; AVX1-LABEL: var_shuffle_v4i64_v4i64_xx00_i64:
+; AVX1:       # BB#0:
+; AVX1-NEXT:    pushq %rbp
+; AVX1-NEXT:    movq %rsp, %rbp
+; AVX1-NEXT:    andq $-32, %rsp
+; AVX1-NEXT:    subq $64, %rsp
+; AVX1-NEXT:    vmovaps %ymm0, (%rsp)
+; AVX1-NEXT:    vmovq {{.*#+}} xmm0 = mem[0],zero
+; AVX1-NEXT:    vmovq {{.*#+}} xmm1 = mem[0],zero
+; AVX1-NEXT:    vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; AVX1-NEXT:    vpxor %xmm1, %xmm1, %xmm1
+; AVX1-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; AVX1-NEXT:    movq %rbp, %rsp
+; AVX1-NEXT:    popq %rbp
+; AVX1-NEXT:    retq
+;
+; AVX2-LABEL: var_shuffle_v4i64_v4i64_xx00_i64:
+; AVX2:       # BB#0:
+; AVX2-NEXT:    pushq %rbp
+; AVX2-NEXT:    movq %rsp, %rbp
+; AVX2-NEXT:    andq $-32, %rsp
+; AVX2-NEXT:    subq $64, %rsp
+; AVX2-NEXT:    vmovaps %ymm0, (%rsp)
+; AVX2-NEXT:    vmovq {{.*#+}} xmm0 = mem[0],zero
+; AVX2-NEXT:    vmovq {{.*#+}} xmm1 = mem[0],zero
+; AVX2-NEXT:    vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; AVX2-NEXT:    vpxor %xmm1, %xmm1, %xmm1
+; AVX2-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm0
+; AVX2-NEXT:    movq %rbp, %rsp
+; AVX2-NEXT:    popq %rbp
+; AVX2-NEXT:    retq
+  %x0 = extractelement <4 x i64> %x, i64 %i0
+  %x1 = extractelement <4 x i64> %x, i64 %i1
+  %x2 = extractelement <4 x i64> %x, i64 %i2
+  %x3 = extractelement <4 x i64> %x, i64 %i3
+  %r0 = insertelement <4 x i64> undef, i64 %x0, i32 0
+  %r1 = insertelement <4 x i64>   %r0, i64 %x1, i32 1
+  %r2 = insertelement <4 x i64>   %r1, i64   0, i32 2
+  %r3 = insertelement <4 x i64>   %r2, i64   0, i32 3
+  ret <4 x i64> %r3
+}
+
+define <4 x i64> @var_shuffle_v4i64_v2i64_xxxx_i64(<2 x i64> %x, i64 %i0, i64 %i1, i64 %i2, i64 %i3) nounwind {
+; AVX1-LABEL: var_shuffle_v4i64_v2i64_xxxx_i64:
+; AVX1:       # BB#0:
+; AVX1-NEXT:    vmovaps %xmm0, -{{[0-9]+}}(%rsp)
+; AVX1-NEXT:    vmovq {{.*#+}} xmm0 = mem[0],zero
+; AVX1-NEXT:    vmovq {{.*#+}} xmm1 = mem[0],zero
+; AVX1-NEXT:    vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; AVX1-NEXT:    vmovq {{.*#+}} xmm1 = mem[0],zero
+; AVX1-NEXT:    vmovq {{.*#+}} xmm2 = mem[0],zero
+; AVX1-NEXT:    vpunpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm2[0]
+; AVX1-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; AVX1-NEXT:    retq
+;
+; AVX2-LABEL: var_shuffle_v4i64_v2i64_xxxx_i64:
+; AVX2:       # BB#0:
+; AVX2-NEXT:    vmovaps %xmm0, -{{[0-9]+}}(%rsp)
+; AVX2-NEXT:    vmovq {{.*#+}} xmm0 = mem[0],zero
+; AVX2-NEXT:    vmovq {{.*#+}} xmm1 = mem[0],zero
+; AVX2-NEXT:    vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; AVX2-NEXT:    vmovq {{.*#+}} xmm1 = mem[0],zero
+; AVX2-NEXT:    vmovq {{.*#+}} xmm2 = mem[0],zero
+; AVX2-NEXT:    vpunpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm2[0]
+; AVX2-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm0
+; AVX2-NEXT:    retq
+  %x0 = extractelement <2 x i64> %x, i64 %i0
+  %x1 = extractelement <2 x i64> %x, i64 %i1
+  %x2 = extractelement <2 x i64> %x, i64 %i2
+  %x3 = extractelement <2 x i64> %x, i64 %i3
+  %r0 = insertelement <4 x i64> undef, i64 %x0, i32 0
+  %r1 = insertelement <4 x i64>   %r0, i64 %x1, i32 1
+  %r2 = insertelement <4 x i64>   %r1, i64 %x2, i32 2
+  %r3 = insertelement <4 x i64>   %r2, i64 %x3, i32 3
+  ret <4 x i64> %r3
+}
+
+define <8 x float> @var_shuffle_v8f32_v8f32_xxxxxxxx_i32(<8 x float> %x, i32 %i0, i32 %i1, i32 %i2, i32 %i3, i32 %i4, i32 %i5, i32 %i6, i32 %i7) nounwind {
+; AVX1-LABEL: var_shuffle_v8f32_v8f32_xxxxxxxx_i32:
+; AVX1:       # BB#0:
+; AVX1-NEXT:    pushq %rbp
+; AVX1-NEXT:    movq %rsp, %rbp
+; AVX1-NEXT:    andq $-32, %rsp
+; AVX1-NEXT:    subq $64, %rsp
+; AVX1-NEXT:    movslq %edi, %rax
+; AVX1-NEXT:    movslq %esi, %rsi
+; AVX1-NEXT:    movslq %edx, %rdx
+; AVX1-NEXT:    movslq %ecx, %r11
+; AVX1-NEXT:    movslq %r8d, %r10
+; AVX1-NEXT:    vmovaps %ymm0, (%rsp)
+; AVX1-NEXT:    movslq %r9d, %r8
+; AVX1-NEXT:    movslq 16(%rbp), %rdi
+; AVX1-NEXT:    movslq 24(%rbp), %rcx
+; AVX1-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; AVX1-NEXT:    vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; AVX1-NEXT:    vmovss {{.*#+}} xmm2 = mem[0],zero,zero,zero
+; AVX1-NEXT:    vinsertps {{.*#+}} xmm2 = xmm2[0],mem[0],xmm2[2,3]
+; AVX1-NEXT:    vinsertps {{.*#+}} xmm2 = xmm2[0,1],mem[0],xmm2[3]
+; AVX1-NEXT:    vinsertps {{.*#+}} xmm2 = xmm2[0,1,2],mem[0]
+; AVX1-NEXT:    vmovss {{.*#+}} xmm3 = mem[0],zero,zero,zero
+; AVX1-NEXT:    vinsertps {{.*#+}} xmm3 = xmm3[0],mem[0],xmm3[2,3]
+; AVX1-NEXT:    vinsertps {{.*#+}} xmm0 = xmm3[0,1],xmm0[0],xmm3[3]
+; AVX1-NEXT:    vinsertps {{.*#+}} xmm0 = xmm0[0,1,2],xmm1[0]
+; AVX1-NEXT:    vinsertf128 $1, %xmm0, %ymm2, %ymm0
+; AVX1-NEXT:    movq %rbp, %rsp
+; AVX1-NEXT:    popq %rbp
+; AVX1-NEXT:    retq
+;
+; AVX2-LABEL: var_shuffle_v8f32_v8f32_xxxxxxxx_i32:
+; AVX2:       # BB#0:
+; AVX2-NEXT:    vmovd %edi, %xmm1
+; AVX2-NEXT:    vpermps %ymm0, %ymm1, %ymm1
+; AVX2-NEXT:    vmovd %esi, %xmm2
+; AVX2-NEXT:    vpermps %ymm0, %ymm2, %ymm2
+; AVX2-NEXT:    vmovd %edx, %xmm3
+; AVX2-NEXT:    vpermps %ymm0, %ymm3, %ymm3
+; AVX2-NEXT:    vmovd %ecx, %xmm4
+; AVX2-NEXT:    vpermps %ymm0, %ymm4, %ymm4
+; AVX2-NEXT:    vmovd %r8d, %xmm5
+; AVX2-NEXT:    vpermps %ymm0, %ymm5, %ymm5
+; AVX2-NEXT:    vmovd %r9d, %xmm6
+; AVX2-NEXT:    vpermps %ymm0, %ymm6, %ymm6
+; AVX2-NEXT:    vmovd {{.*#+}} xmm7 = mem[0],zero,zero,zero
+; AVX2-NEXT:    vpermps %ymm0, %ymm7, %ymm7
+; AVX2-NEXT:    vmovd {{.*#+}} xmm8 = mem[0],zero,zero,zero
+; AVX2-NEXT:    vpermps %ymm0, %ymm8, %ymm0
+; AVX2-NEXT:    vinsertps {{.*#+}} xmm5 = xmm5[0],xmm6[0],xmm5[2,3]
+; AVX2-NEXT:    vinsertps {{.*#+}} xmm5 = xmm5[0,1],xmm7[0],xmm5[3]
+; AVX2-NEXT:    vinsertps {{.*#+}} xmm0 = xmm5[0,1,2],xmm0[0]
+; AVX2-NEXT:    vinsertps {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[2,3]
+; AVX2-NEXT:    vinsertps {{.*#+}} xmm1 = xmm1[0,1],xmm3[0],xmm1[3]
+; AVX2-NEXT:    vinsertps {{.*#+}} xmm1 = xmm1[0,1,2],xmm4[0]
+; AVX2-NEXT:    vinsertf128 $1, %xmm0, %ymm1, %ymm0
+; AVX2-NEXT:    retq
+  %x0 = extractelement <8 x float> %x, i32 %i0
+  %x1 = extractelement <8 x float> %x, i32 %i1
+  %x2 = extractelement <8 x float> %x, i32 %i2
+  %x3 = extractelement <8 x float> %x, i32 %i3
+  %x4 = extractelement <8 x float> %x, i32 %i4
+  %x5 = extractelement <8 x float> %x, i32 %i5
+  %x6 = extractelement <8 x float> %x, i32 %i6
+  %x7 = extractelement <8 x float> %x, i32 %i7
+  %r0 = insertelement <8 x float> undef, float %x0, i32 0
+  %r1 = insertelement <8 x float>   %r0, float %x1, i32 1
+  %r2 = insertelement <8 x float>   %r1, float %x2, i32 2
+  %r3 = insertelement <8 x float>   %r2, float %x3, i32 3
+  %r4 = insertelement <8 x float>   %r3, float %x4, i32 4
+  %r5 = insertelement <8 x float>   %r4, float %x5, i32 5
+  %r6 = insertelement <8 x float>   %r5, float %x6, i32 6
+  %r7 = insertelement <8 x float>   %r6, float %x7, i32 7
+  ret <8 x float> %r7
+}
+
+define <8 x float> @var_shuffle_v8f32_v4f32_xxxxxxxx_i32(<4 x float> %x, i32 %i0, i32 %i1, i32 %i2, i32 %i3, i32 %i4, i32 %i5, i32 %i6, i32 %i7) nounwind {
+; ALL-LABEL: var_shuffle_v8f32_v4f32_xxxxxxxx_i32:
+; ALL:       # BB#0:
+; ALL-NEXT:    movslq %edi, %rax
+; ALL-NEXT:    movslq %esi, %rsi
+; ALL-NEXT:    movslq %edx, %rdx
+; ALL-NEXT:    movslq %ecx, %r11
+; ALL-NEXT:    movslq %r8d, %r10
+; ALL-NEXT:    vmovaps %xmm0, -{{[0-9]+}}(%rsp)
+; ALL-NEXT:    movslq %r9d, %r8
+; ALL-NEXT:    movslq {{[0-9]+}}(%rsp), %rdi
+; ALL-NEXT:    movslq {{[0-9]+}}(%rsp), %rcx
+; ALL-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; ALL-NEXT:    vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; ALL-NEXT:    vmovss {{.*#+}} xmm2 = mem[0],zero,zero,zero
+; ALL-NEXT:    vinsertps {{.*#+}} xmm2 = xmm2[0],mem[0],xmm2[2,3]
+; ALL-NEXT:    vinsertps {{.*#+}} xmm2 = xmm2[0,1],mem[0],xmm2[3]
+; ALL-NEXT:    vinsertps {{.*#+}} xmm2 = xmm2[0,1,2],mem[0]
+; ALL-NEXT:    vmovss {{.*#+}} xmm3 = mem[0],zero,zero,zero
+; ALL-NEXT:    vinsertps {{.*#+}} xmm3 = xmm3[0],mem[0],xmm3[2,3]
+; ALL-NEXT:    vinsertps {{.*#+}} xmm0 = xmm3[0,1],xmm0[0],xmm3[3]
+; ALL-NEXT:    vinsertps {{.*#+}} xmm0 = xmm0[0,1,2],xmm1[0]
+; ALL-NEXT:    vinsertf128 $1, %xmm0, %ymm2, %ymm0
+; ALL-NEXT:    retq
+  %x0 = extractelement <4 x float> %x, i32 %i0
+  %x1 = extractelement <4 x float> %x, i32 %i1
+  %x2 = extractelement <4 x float> %x, i32 %i2
+  %x3 = extractelement <4 x float> %x, i32 %i3
+  %x4 = extractelement <4 x float> %x, i32 %i4
+  %x5 = extractelement <4 x float> %x, i32 %i5
+  %x6 = extractelement <4 x float> %x, i32 %i6
+  %x7 = extractelement <4 x float> %x, i32 %i7
+  %r0 = insertelement <8 x float> undef, float %x0, i32 0
+  %r1 = insertelement <8 x float>   %r0, float %x1, i32 1
+  %r2 = insertelement <8 x float>   %r1, float %x2, i32 2
+  %r3 = insertelement <8 x float>   %r2, float %x3, i32 3
+  %r4 = insertelement <8 x float>   %r3, float %x4, i32 4
+  %r5 = insertelement <8 x float>   %r4, float %x5, i32 5
+  %r6 = insertelement <8 x float>   %r5, float %x6, i32 6
+  %r7 = insertelement <8 x float>   %r6, float %x7, i32 7
+  ret <8 x float> %r7
+}
+
+define <16 x i16> @var_shuffle_v16i16_v16i16_xxxxxxxxxxxxxxxx_i16(<16 x i16> %x, i32 %i0, i32 %i1, i32 %i2, i32 %i3, i32 %i4, i32 %i5, i32 %i6, i32 %i7, i32 %i8, i32 %i9, i32 %i10, i32 %i11, i32 %i12, i32 %i13, i32 %i14, i32 %i15) nounwind {
+; AVX1-LABEL: var_shuffle_v16i16_v16i16_xxxxxxxxxxxxxxxx_i16:
+; AVX1:       # BB#0:
+; AVX1-NEXT:    pushq %rbp
+; AVX1-NEXT:    movq %rsp, %rbp
+; AVX1-NEXT:    andq $-32, %rsp
+; AVX1-NEXT:    subq $64, %rsp
+; AVX1-NEXT:    vmovaps %ymm0, (%rsp)
+; AVX1-NEXT:    movslq 32(%rbp), %rax
+; AVX1-NEXT:    movzwl (%rsp,%rax,2), %eax
+; AVX1-NEXT:    vmovd %eax, %xmm0
+; AVX1-NEXT:    movslq 40(%rbp), %rax
+; AVX1-NEXT:    movzwl (%rsp,%rax,2), %eax
+; AVX1-NEXT:    vpinsrw $1, %eax, %xmm0, %xmm0
+; AVX1-NEXT:    movslq 48(%rbp), %rax
+; AVX1-NEXT:    movzwl (%rsp,%rax,2), %eax
+; AVX1-NEXT:    vpinsrw $2, %eax, %xmm0, %xmm0
+; AVX1-NEXT:    movslq 56(%rbp), %rax
+; AVX1-NEXT:    movzwl (%rsp,%rax,2), %eax
+; AVX1-NEXT:    vpinsrw $3, %eax, %xmm0, %xmm0
+; AVX1-NEXT:    movslq 64(%rbp), %rax
+; AVX1-NEXT:    movzwl (%rsp,%rax,2), %eax
+; AVX1-NEXT:    vpinsrw $4, %eax, %xmm0, %xmm0
+; AVX1-NEXT:    movslq 72(%rbp), %rax
+; AVX1-NEXT:    movzwl (%rsp,%rax,2), %eax
+; AVX1-NEXT:    vpinsrw $5, %eax, %xmm0, %xmm0
+; AVX1-NEXT:    movslq 80(%rbp), %rax
+; AVX1-NEXT:    movzwl (%rsp,%rax,2), %eax
+; AVX1-NEXT:    vpinsrw $6, %eax, %xmm0, %xmm0
+; AVX1-NEXT:    movslq 88(%rbp), %rax
+; AVX1-NEXT:    movzwl (%rsp,%rax,2), %eax
+; AVX1-NEXT:    vpinsrw $7, %eax, %xmm0, %xmm0
+; AVX1-NEXT:    movslq %edi, %rax
+; AVX1-NEXT:    movzwl (%rsp,%rax,2), %eax
+; AVX1-NEXT:    vmovd %eax, %xmm1
+; AVX1-NEXT:    movslq %esi, %rax
+; AVX1-NEXT:    vpinsrw $1, (%rsp,%rax,2), %xmm1, %xmm1
+; AVX1-NEXT:    movslq %edx, %rax
+; AVX1-NEXT:    vpinsrw $2, (%rsp,%rax,2), %xmm1, %xmm1
+; AVX1-NEXT:    movslq %ecx, %rax
+; AVX1-NEXT:    vpinsrw $3, (%rsp,%rax,2), %xmm1, %xmm1
+; AVX1-NEXT:    movslq %r8d, %rax
+; AVX1-NEXT:    vpinsrw $4, (%rsp,%rax,2), %xmm1, %xmm1
+; AVX1-NEXT:    movslq %r9d, %rax
+; AVX1-NEXT:    vpinsrw $5, (%rsp,%rax,2), %xmm1, %xmm1
+; AVX1-NEXT:    movslq 16(%rbp), %rax
+; AVX1-NEXT:    movzwl (%rsp,%rax,2), %eax
+; AVX1-NEXT:    vpinsrw $6, %eax, %xmm1, %xmm1
+; AVX1-NEXT:    movslq 24(%rbp), %rax
+; AVX1-NEXT:    movzwl (%rsp,%rax,2), %eax
+; AVX1-NEXT:    vpinsrw $7, %eax, %xmm1, %xmm1
+; AVX1-NEXT:    vinsertf128 $1, %xmm0, %ymm1, %ymm0
+; AVX1-NEXT:    movq %rbp, %rsp
+; AVX1-NEXT:    popq %rbp
+; AVX1-NEXT:    retq
+;
+; AVX2-LABEL: var_shuffle_v16i16_v16i16_xxxxxxxxxxxxxxxx_i16:
+; AVX2:       # BB#0:
+; AVX2-NEXT:    pushq %rbp
+; AVX2-NEXT:    movq %rsp, %rbp
+; AVX2-NEXT:    andq $-32, %rsp
+; AVX2-NEXT:    subq $64, %rsp
+; AVX2-NEXT:    vmovaps %ymm0, (%rsp)
+; AVX2-NEXT:    movslq 32(%rbp), %rax
+; AVX2-NEXT:    movzwl (%rsp,%rax,2), %eax
+; AVX2-NEXT:    vmovd %eax, %xmm0
+; AVX2-NEXT:    movslq 40(%rbp), %rax
+; AVX2-NEXT:    movzwl (%rsp,%rax,2), %eax
+; AVX2-NEXT:    vpinsrw $1, %eax, %xmm0, %xmm0
+; AVX2-NEXT:    movslq 48(%rbp), %rax
+; AVX2-NEXT:    movzwl (%rsp,%rax,2), %eax
+; AVX2-NEXT:    vpinsrw $2, %eax, %xmm0, %xmm0
+; AVX2-NEXT:    movslq 56(%rbp), %rax
+; AVX2-NEXT:    movzwl (%rsp,%rax,2), %eax
+; AVX2-NEXT:    vpinsrw $3, %eax, %xmm0, %xmm0
+; AVX2-NEXT:    movslq 64(%rbp), %rax
+; AVX2-NEXT:    movzwl (%rsp,%rax,2), %eax
+; AVX2-NEXT:    vpinsrw $4, %eax, %xmm0, %xmm0
+; AVX2-NEXT:    movslq 72(%rbp), %rax
+; AVX2-NEXT:    movzwl (%rsp,%rax,2), %eax
+; AVX2-NEXT:    vpinsrw $5, %eax, %xmm0, %xmm0
+; AVX2-NEXT:    movslq 80(%rbp), %rax
+; AVX2-NEXT:    movzwl (%rsp,%rax,2), %eax
+; AVX2-NEXT:    vpinsrw $6, %eax, %xmm0, %xmm0
+; AVX2-NEXT:    movslq 88(%rbp), %rax
+; AVX2-NEXT:    movzwl (%rsp,%rax,2), %eax
+; AVX2-NEXT:    vpinsrw $7, %eax, %xmm0, %xmm0
+; AVX2-NEXT:    movslq %edi, %rax
+; AVX2-NEXT:    movzwl (%rsp,%rax,2), %eax
+; AVX2-NEXT:    vmovd %eax, %xmm1
+; AVX2-NEXT:    movslq %esi, %rax
+; AVX2-NEXT:    vpinsrw $1, (%rsp,%rax,2), %xmm1, %xmm1
+; AVX2-NEXT:    movslq %edx, %rax
+; AVX2-NEXT:    vpinsrw $2, (%rsp,%rax,2), %xmm1, %xmm1
+; AVX2-NEXT:    movslq %ecx, %rax
+; AVX2-NEXT:    vpinsrw $3, (%rsp,%rax,2), %xmm1, %xmm1
+; AVX2-NEXT:    movslq %r8d, %rax
+; AVX2-NEXT:    vpinsrw $4, (%rsp,%rax,2), %xmm1, %xmm1
+; AVX2-NEXT:    movslq %r9d, %rax
+; AVX2-NEXT:    vpinsrw $5, (%rsp,%rax,2), %xmm1, %xmm1
+; AVX2-NEXT:    movslq 16(%rbp), %rax
+; AVX2-NEXT:    movzwl (%rsp,%rax,2), %eax
+; AVX2-NEXT:    vpinsrw $6, %eax, %xmm1, %xmm1
+; AVX2-NEXT:    movslq 24(%rbp), %rax
+; AVX2-NEXT:    movzwl (%rsp,%rax,2), %eax
+; AVX2-NEXT:    vpinsrw $7, %eax, %xmm1, %xmm1
+; AVX2-NEXT:    vinserti128 $1, %xmm0, %ymm1, %ymm0
+; AVX2-NEXT:    movq %rbp, %rsp
+; AVX2-NEXT:    popq %rbp
+; AVX2-NEXT:    retq
+  %x0  = extractelement <16 x i16> %x, i32 %i0
+  %x1  = extractelement <16 x i16> %x, i32 %i1
+  %x2  = extractelement <16 x i16> %x, i32 %i2
+  %x3  = extractelement <16 x i16> %x, i32 %i3
+  %x4  = extractelement <16 x i16> %x, i32 %i4
+  %x5  = extractelement <16 x i16> %x, i32 %i5
+  %x6  = extractelement <16 x i16> %x, i32 %i6
+  %x7  = extractelement <16 x i16> %x, i32 %i7
+  %x8  = extractelement <16 x i16> %x, i32 %i8
+  %x9  = extractelement <16 x i16> %x, i32 %i9
+  %x10 = extractelement <16 x i16> %x, i32 %i10
+  %x11 = extractelement <16 x i16> %x, i32 %i11
+  %x12 = extractelement <16 x i16> %x, i32 %i12
+  %x13 = extractelement <16 x i16> %x, i32 %i13
+  %x14 = extractelement <16 x i16> %x, i32 %i14
+  %x15 = extractelement <16 x i16> %x, i32 %i15
+  %r0  = insertelement <16 x i16> undef, i16 %x0 , i32 0
+  %r1  = insertelement <16 x i16>  %r0 , i16 %x1 , i32 1
+  %r2  = insertelement <16 x i16>  %r1 , i16 %x2 , i32 2
+  %r3  = insertelement <16 x i16>  %r2 , i16 %x3 , i32 3
+  %r4  = insertelement <16 x i16>  %r3 , i16 %x4 , i32 4
+  %r5  = insertelement <16 x i16>  %r4 , i16 %x5 , i32 5
+  %r6  = insertelement <16 x i16>  %r5 , i16 %x6 , i32 6
+  %r7  = insertelement <16 x i16>  %r6 , i16 %x7 , i32 7
+  %r8  = insertelement <16 x i16>  %r7 , i16 %x8 , i32 8
+  %r9  = insertelement <16 x i16>  %r8 , i16 %x9 , i32 9
+  %r10 = insertelement <16 x i16>  %r9 , i16 %x10, i32 10
+  %r11 = insertelement <16 x i16>  %r10, i16 %x11, i32 11
+  %r12 = insertelement <16 x i16>  %r11, i16 %x12, i32 12
+  %r13 = insertelement <16 x i16>  %r12, i16 %x13, i32 13
+  %r14 = insertelement <16 x i16>  %r13, i16 %x14, i32 14
+  %r15 = insertelement <16 x i16>  %r14, i16 %x15, i32 15
+  ret <16 x i16> %r15
+}
+
+define <16 x i16> @var_shuffle_v16i16_v8i16_xxxxxxxxxxxxxxxx_i16(<8 x i16> %x, i32 %i0, i32 %i1, i32 %i2, i32 %i3, i32 %i4, i32 %i5, i32 %i6, i32 %i7, i32 %i8, i32 %i9, i32 %i10, i32 %i11, i32 %i12, i32 %i13, i32 %i14, i32 %i15) nounwind {
+; AVX1-LABEL: var_shuffle_v16i16_v8i16_xxxxxxxxxxxxxxxx_i16:
+; AVX1:       # BB#0:
+; AVX1-NEXT:    vmovaps %xmm0, -{{[0-9]+}}(%rsp)
+; AVX1-NEXT:    movslq {{[0-9]+}}(%rsp), %rax
+; AVX1-NEXT:    movzwl -24(%rsp,%rax,2), %eax
+; AVX1-NEXT:    vmovd %eax, %xmm0
+; AVX1-NEXT:    movslq {{[0-9]+}}(%rsp), %rax
+; AVX1-NEXT:    movzwl -24(%rsp,%rax,2), %eax
+; AVX1-NEXT:    vpinsrw $1, %eax, %xmm0, %xmm0
+; AVX1-NEXT:    movslq {{[0-9]+}}(%rsp), %rax
+; AVX1-NEXT:    movzwl -24(%rsp,%rax,2), %eax
+; AVX1-NEXT:    vpinsrw $2, %eax, %xmm0, %xmm0
+; AVX1-NEXT:    movslq {{[0-9]+}}(%rsp), %rax
+; AVX1-NEXT:    movzwl -24(%rsp,%rax,2), %eax
+; AVX1-NEXT:    vpinsrw $3, %eax, %xmm0, %xmm0
+; AVX1-NEXT:    movslq {{[0-9]+}}(%rsp), %rax
+; AVX1-NEXT:    movzwl -24(%rsp,%rax,2), %eax
+; AVX1-NEXT:    vpinsrw $4, %eax, %xmm0, %xmm0
+; AVX1-NEXT:    movslq {{[0-9]+}}(%rsp), %rax
+; AVX1-NEXT:    movzwl -24(%rsp,%rax,2), %eax
+; AVX1-NEXT:    vpinsrw $5, %eax, %xmm0, %xmm0
+; AVX1-NEXT:    movslq {{[0-9]+}}(%rsp), %rax
+; AVX1-NEXT:    movzwl -24(%rsp,%rax,2), %eax
+; AVX1-NEXT:    vpinsrw $6, %eax, %xmm0, %xmm0
+; AVX1-NEXT:    movslq {{[0-9]+}}(%rsp), %rax
+; AVX1-NEXT:    movzwl -24(%rsp,%rax,2), %eax
+; AVX1-NEXT:    vpinsrw $7, %eax, %xmm0, %xmm0
+; AVX1-NEXT:    movslq %edi, %rax
+; AVX1-NEXT:    movzwl -24(%rsp,%rax,2), %eax
+; AVX1-NEXT:    vmovd %eax, %xmm1
+; AVX1-NEXT:    movslq %esi, %rax
+; AVX1-NEXT:    vpinsrw $1, -24(%rsp,%rax,2), %xmm1, %xmm1
+; AVX1-NEXT:    movslq %edx, %rax
+; AVX1-NEXT:    vpinsrw $2, -24(%rsp,%rax,2), %xmm1, %xmm1
+; AVX1-NEXT:    movslq %ecx, %rax
+; AVX1-NEXT:    vpinsrw $3, -24(%rsp,%rax,2), %xmm1, %xmm1
+; AVX1-NEXT:    movslq %r8d, %rax
+; AVX1-NEXT:    vpinsrw $4, -24(%rsp,%rax,2), %xmm1, %xmm1
+; AVX1-NEXT:    movslq %r9d, %rax
+; AVX1-NEXT:    vpinsrw $5, -24(%rsp,%rax,2), %xmm1, %xmm1
+; AVX1-NEXT:    movslq {{[0-9]+}}(%rsp), %rax
+; AVX1-NEXT:    movzwl -24(%rsp,%rax,2), %eax
+; AVX1-NEXT:    vpinsrw $6, %eax, %xmm1, %xmm1
+; AVX1-NEXT:    movslq {{[0-9]+}}(%rsp), %rax
+; AVX1-NEXT:    movzwl -24(%rsp,%rax,2), %eax
+; AVX1-NEXT:    vpinsrw $7, %eax, %xmm1, %xmm1
+; AVX1-NEXT:    vinsertf128 $1, %xmm0, %ymm1, %ymm0
+; AVX1-NEXT:    retq
+;
+; AVX2-LABEL: var_shuffle_v16i16_v8i16_xxxxxxxxxxxxxxxx_i16:
+; AVX2:       # BB#0:
+; AVX2-NEXT:    vmovaps %xmm0, -{{[0-9]+}}(%rsp)
+; AVX2-NEXT:    movslq {{[0-9]+}}(%rsp), %rax
+; AVX2-NEXT:    movzwl -24(%rsp,%rax,2), %eax
+; AVX2-NEXT:    vmovd %eax, %xmm0
+; AVX2-NEXT:    movslq {{[0-9]+}}(%rsp), %rax
+; AVX2-NEXT:    movzwl -24(%rsp,%rax,2), %eax
+; AVX2-NEXT:    vpinsrw $1, %eax, %xmm0, %xmm0
+; AVX2-NEXT:    movslq {{[0-9]+}}(%rsp), %rax
+; AVX2-NEXT:    movzwl -24(%rsp,%rax,2), %eax
+; AVX2-NEXT:    vpinsrw $2, %eax, %xmm0, %xmm0
+; AVX2-NEXT:    movslq {{[0-9]+}}(%rsp), %rax
+; AVX2-NEXT:    movzwl -24(%rsp,%rax,2), %eax
+; AVX2-NEXT:    vpinsrw $3, %eax, %xmm0, %xmm0
+; AVX2-NEXT:    movslq {{[0-9]+}}(%rsp), %rax
+; AVX2-NEXT:    movzwl -24(%rsp,%rax,2), %eax
+; AVX2-NEXT:    vpinsrw $4, %eax, %xmm0, %xmm0
+; AVX2-NEXT:    movslq {{[0-9]+}}(%rsp), %rax
+; AVX2-NEXT:    movzwl -24(%rsp,%rax,2), %eax
+; AVX2-NEXT:    vpinsrw $5, %eax, %xmm0, %xmm0
+; AVX2-NEXT:    movslq {{[0-9]+}}(%rsp), %rax
+; AVX2-NEXT:    movzwl -24(%rsp,%rax,2), %eax
+; AVX2-NEXT:    vpinsrw $6, %eax, %xmm0, %xmm0
+; AVX2-NEXT:    movslq {{[0-9]+}}(%rsp), %rax
+; AVX2-NEXT:    movzwl -24(%rsp,%rax,2), %eax
+; AVX2-NEXT:    vpinsrw $7, %eax, %xmm0, %xmm0
+; AVX2-NEXT:    movslq %edi, %rax
+; AVX2-NEXT:    movzwl -24(%rsp,%rax,2), %eax
+; AVX2-NEXT:    vmovd %eax, %xmm1
+; AVX2-NEXT:    movslq %esi, %rax
+; AVX2-NEXT:    vpinsrw $1, -24(%rsp,%rax,2), %xmm1, %xmm1
+; AVX2-NEXT:    movslq %edx, %rax
+; AVX2-NEXT:    vpinsrw $2, -24(%rsp,%rax,2), %xmm1, %xmm1
+; AVX2-NEXT:    movslq %ecx, %rax
+; AVX2-NEXT:    vpinsrw $3, -24(%rsp,%rax,2), %xmm1, %xmm1
+; AVX2-NEXT:    movslq %r8d, %rax
+; AVX2-NEXT:    vpinsrw $4, -24(%rsp,%rax,2), %xmm1, %xmm1
+; AVX2-NEXT:    movslq %r9d, %rax
+; AVX2-NEXT:    vpinsrw $5, -24(%rsp,%rax,2), %xmm1, %xmm1
+; AVX2-NEXT:    movslq {{[0-9]+}}(%rsp), %rax
+; AVX2-NEXT:    movzwl -24(%rsp,%rax,2), %eax
+; AVX2-NEXT:    vpinsrw $6, %eax, %xmm1, %xmm1
+; AVX2-NEXT:    movslq {{[0-9]+}}(%rsp), %rax
+; AVX2-NEXT:    movzwl -24(%rsp,%rax,2), %eax
+; AVX2-NEXT:    vpinsrw $7, %eax, %xmm1, %xmm1
+; AVX2-NEXT:    vinserti128 $1, %xmm0, %ymm1, %ymm0
+; AVX2-NEXT:    retq
+  %x0  = extractelement <8 x i16> %x, i32 %i0
+  %x1  = extractelement <8 x i16> %x, i32 %i1
+  %x2  = extractelement <8 x i16> %x, i32 %i2
+  %x3  = extractelement <8 x i16> %x, i32 %i3
+  %x4  = extractelement <8 x i16> %x, i32 %i4
+  %x5  = extractelement <8 x i16> %x, i32 %i5
+  %x6  = extractelement <8 x i16> %x, i32 %i6
+  %x7  = extractelement <8 x i16> %x, i32 %i7
+  %x8  = extractelement <8 x i16> %x, i32 %i8
+  %x9  = extractelement <8 x i16> %x, i32 %i9
+  %x10 = extractelement <8 x i16> %x, i32 %i10
+  %x11 = extractelement <8 x i16> %x, i32 %i11
+  %x12 = extractelement <8 x i16> %x, i32 %i12
+  %x13 = extractelement <8 x i16> %x, i32 %i13
+  %x14 = extractelement <8 x i16> %x, i32 %i14
+  %x15 = extractelement <8 x i16> %x, i32 %i15
+  %r0  = insertelement <16 x i16> undef, i16 %x0 , i32 0
+  %r1  = insertelement <16 x i16>  %r0 , i16 %x1 , i32 1
+  %r2  = insertelement <16 x i16>  %r1 , i16 %x2 , i32 2
+  %r3  = insertelement <16 x i16>  %r2 , i16 %x3 , i32 3
+  %r4  = insertelement <16 x i16>  %r3 , i16 %x4 , i32 4
+  %r5  = insertelement <16 x i16>  %r4 , i16 %x5 , i32 5
+  %r6  = insertelement <16 x i16>  %r5 , i16 %x6 , i32 6
+  %r7  = insertelement <16 x i16>  %r6 , i16 %x7 , i32 7
+  %r8  = insertelement <16 x i16>  %r7 , i16 %x8 , i32 8
+  %r9  = insertelement <16 x i16>  %r8 , i16 %x9 , i32 9
+  %r10 = insertelement <16 x i16>  %r9 , i16 %x10, i32 10
+  %r11 = insertelement <16 x i16>  %r10, i16 %x11, i32 11
+  %r12 = insertelement <16 x i16>  %r11, i16 %x12, i32 12
+  %r13 = insertelement <16 x i16>  %r12, i16 %x13, i32 13
+  %r14 = insertelement <16 x i16>  %r13, i16 %x14, i32 14
+  %r15 = insertelement <16 x i16>  %r14, i16 %x15, i32 15
+  ret <16 x i16> %r15
+}
+
+;
+; Unary shuffle indices from memory
+;
+
+define <4 x i64> @mem_shuffle_v4i64_v4i64_xxxx_i64(<4 x i64> %x, i64* %i) nounwind {
+; AVX1-LABEL: mem_shuffle_v4i64_v4i64_xxxx_i64:
+; AVX1:       # BB#0:
+; AVX1-NEXT:    pushq %rbp
+; AVX1-NEXT:    movq %rsp, %rbp
+; AVX1-NEXT:    andq $-32, %rsp
+; AVX1-NEXT:    subq $64, %rsp
+; AVX1-NEXT:    movq (%rdi), %rax
+; AVX1-NEXT:    movq 8(%rdi), %rcx
+; AVX1-NEXT:    movq 16(%rdi), %rdx
+; AVX1-NEXT:    movq 24(%rdi), %rsi
+; AVX1-NEXT:    vmovaps %ymm0, (%rsp)
+; AVX1-NEXT:    vmovq {{.*#+}} xmm0 = mem[0],zero
+; AVX1-NEXT:    vmovq {{.*#+}} xmm1 = mem[0],zero
+; AVX1-NEXT:    vmovq {{.*#+}} xmm2 = mem[0],zero
+; AVX1-NEXT:    vpunpcklqdq {{.*#+}} xmm1 = xmm2[0],xmm1[0]
+; AVX1-NEXT:    vmovq {{.*#+}} xmm2 = mem[0],zero
+; AVX1-NEXT:    vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm2[0]
+; AVX1-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; AVX1-NEXT:    movq %rbp, %rsp
+; AVX1-NEXT:    popq %rbp
+; AVX1-NEXT:    retq
+;
+; AVX2-LABEL: mem_shuffle_v4i64_v4i64_xxxx_i64:
+; AVX2:       # BB#0:
+; AVX2-NEXT:    pushq %rbp
+; AVX2-NEXT:    movq %rsp, %rbp
+; AVX2-NEXT:    andq $-32, %rsp
+; AVX2-NEXT:    subq $64, %rsp
+; AVX2-NEXT:    movq (%rdi), %rax
+; AVX2-NEXT:    movq 8(%rdi), %rcx
+; AVX2-NEXT:    movq 16(%rdi), %rdx
+; AVX2-NEXT:    movq 24(%rdi), %rsi
+; AVX2-NEXT:    vmovaps %ymm0, (%rsp)
+; AVX2-NEXT:    vmovq {{.*#+}} xmm0 = mem[0],zero
+; AVX2-NEXT:    vmovq {{.*#+}} xmm1 = mem[0],zero
+; AVX2-NEXT:    vmovq {{.*#+}} xmm2 = mem[0],zero
+; AVX2-NEXT:    vpunpcklqdq {{.*#+}} xmm1 = xmm2[0],xmm1[0]
+; AVX2-NEXT:    vmovq {{.*#+}} xmm2 = mem[0],zero
+; AVX2-NEXT:    vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm2[0]
+; AVX2-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm0
+; AVX2-NEXT:    movq %rbp, %rsp
+; AVX2-NEXT:    popq %rbp
+; AVX2-NEXT:    retq
+  %p0  = getelementptr inbounds i64, i64* %i, i32 0
+  %p1  = getelementptr inbounds i64, i64* %i, i32 1
+  %p2  = getelementptr inbounds i64, i64* %i, i32 2
+  %p3  = getelementptr inbounds i64, i64* %i, i32 3
+  %i0  = load i64, i64* %p0, align 4
+  %i1  = load i64, i64* %p1, align 4
+  %i2  = load i64, i64* %p2, align 4
+  %i3  = load i64, i64* %p3, align 4
+  %x0 = extractelement <4 x i64> %x, i64 %i0
+  %x1 = extractelement <4 x i64> %x, i64 %i1
+  %x2 = extractelement <4 x i64> %x, i64 %i2
+  %x3 = extractelement <4 x i64> %x, i64 %i3
+  %r0 = insertelement <4 x i64> undef, i64 %x0, i32 0
+  %r1 = insertelement <4 x i64>   %r0, i64 %x1, i32 1
+  %r2 = insertelement <4 x i64>   %r1, i64 %x2, i32 2
+  %r3 = insertelement <4 x i64>   %r2, i64 %x3, i32 3
+  ret <4 x i64> %r3
+}
+
+define <4 x i64> @mem_shuffle_v4i64_v2i64_xxxx_i64(<2 x i64> %x, i64* %i) nounwind {
+; AVX1-LABEL: mem_shuffle_v4i64_v2i64_xxxx_i64:
+; AVX1:       # BB#0:
+; AVX1-NEXT:    movq (%rdi), %rax
+; AVX1-NEXT:    movq 8(%rdi), %rcx
+; AVX1-NEXT:    movq 16(%rdi), %rdx
+; AVX1-NEXT:    movq 24(%rdi), %rsi
+; AVX1-NEXT:    vmovaps %xmm0, -{{[0-9]+}}(%rsp)
+; AVX1-NEXT:    vmovq {{.*#+}} xmm0 = mem[0],zero
+; AVX1-NEXT:    vmovq {{.*#+}} xmm1 = mem[0],zero
+; AVX1-NEXT:    vmovq {{.*#+}} xmm2 = mem[0],zero
+; AVX1-NEXT:    vpunpcklqdq {{.*#+}} xmm1 = xmm2[0],xmm1[0]
+; AVX1-NEXT:    vmovq {{.*#+}} xmm2 = mem[0],zero
+; AVX1-NEXT:    vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm2[0]
+; AVX1-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; AVX1-NEXT:    retq
+;
+; AVX2-LABEL: mem_shuffle_v4i64_v2i64_xxxx_i64:
+; AVX2:       # BB#0:
+; AVX2-NEXT:    movq (%rdi), %rax
+; AVX2-NEXT:    movq 8(%rdi), %rcx
+; AVX2-NEXT:    movq 16(%rdi), %rdx
+; AVX2-NEXT:    movq 24(%rdi), %rsi
+; AVX2-NEXT:    vmovaps %xmm0, -{{[0-9]+}}(%rsp)
+; AVX2-NEXT:    vmovq {{.*#+}} xmm0 = mem[0],zero
+; AVX2-NEXT:    vmovq {{.*#+}} xmm1 = mem[0],zero
+; AVX2-NEXT:    vmovq {{.*#+}} xmm2 = mem[0],zero
+; AVX2-NEXT:    vpunpcklqdq {{.*#+}} xmm1 = xmm2[0],xmm1[0]
+; AVX2-NEXT:    vmovq {{.*#+}} xmm2 = mem[0],zero
+; AVX2-NEXT:    vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm2[0]
+; AVX2-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm0
+; AVX2-NEXT:    retq
+  %p0  = getelementptr inbounds i64, i64* %i, i32 0
+  %p1  = getelementptr inbounds i64, i64* %i, i32 1
+  %p2  = getelementptr inbounds i64, i64* %i, i32 2
+  %p3  = getelementptr inbounds i64, i64* %i, i32 3
+  %i0  = load i64, i64* %p0, align 4
+  %i1  = load i64, i64* %p1, align 4
+  %i2  = load i64, i64* %p2, align 4
+  %i3  = load i64, i64* %p3, align 4
+  %x0 = extractelement <2 x i64> %x, i64 %i0
+  %x1 = extractelement <2 x i64> %x, i64 %i1
+  %x2 = extractelement <2 x i64> %x, i64 %i2
+  %x3 = extractelement <2 x i64> %x, i64 %i3
+  %r0 = insertelement <4 x i64> undef, i64 %x0, i32 0
+  %r1 = insertelement <4 x i64>   %r0, i64 %x1, i32 1
+  %r2 = insertelement <4 x i64>   %r1, i64 %x2, i32 2
+  %r3 = insertelement <4 x i64>   %r2, i64 %x3, i32 3
+  ret <4 x i64> %r3
+}




More information about the llvm-commits mailing list