[llvm] b4218a1 - [NFC][X86][Codegen] Add test coverage for interleaved i64 load/store stride=6

Roman Lebedev via llvm-commits llvm-commits at lists.llvm.org
Mon Oct 4 10:58:16 PDT 2021


Author: Roman Lebedev
Date: 2021-10-04T20:57:36+03:00
New Revision: b4218a1451d388b58fb26539198f0f8419c2b700

URL: https://github.com/llvm/llvm-project/commit/b4218a1451d388b58fb26539198f0f8419c2b700
DIFF: https://github.com/llvm/llvm-project/commit/b4218a1451d388b58fb26539198f0f8419c2b700.diff

LOG: [NFC][X86][Codegen] Add test coverage for interleaved i64 load/store stride=6

Added: 
    llvm/test/CodeGen/X86/vector-interleaved-load-i64-stride-6.ll
    llvm/test/CodeGen/X86/vector-interleaved-store-i64-stride-6.ll

Modified: 
    

Removed: 
    


################################################################################
diff  --git a/llvm/test/CodeGen/X86/vector-interleaved-load-i64-stride-6.ll b/llvm/test/CodeGen/X86/vector-interleaved-load-i64-stride-6.ll
new file mode 100644
index 000000000000..f2a8343272b3
--- /dev/null
+++ b/llvm/test/CodeGen/X86/vector-interleaved-load-i64-stride-6.ll
@@ -0,0 +1,679 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+sse2 | FileCheck %s --check-prefixes=SSE
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx  | FileCheck %s --check-prefixes=AVX,AVX1
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx2 | FileCheck %s --check-prefixes=AVX,AVX2
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx2,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx2,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512bw,+avx512vl | FileCheck %s --check-prefixes=AVX512
+
+; These patterns are produced by LoopVectorizer for interleaved loads.
+
+define void @load_i64_stride6_vf2(<12 x i64>* %in.vec, <2 x i64>* %out.vec0, <2 x i64>* %out.vec1, <2 x i64>* %out.vec2, <2 x i64>* %out.vec3, <2 x i64>* %out.vec4, <2 x i64>* %out.vec5) nounwind {
+; SSE-LABEL: load_i64_stride6_vf2:
+; SSE:       # %bb.0:
+; SSE-NEXT:    movq {{[0-9]+}}(%rsp), %rax
+; SSE-NEXT:    movaps 80(%rdi), %xmm0
+; SSE-NEXT:    movaps 64(%rdi), %xmm1
+; SSE-NEXT:    movaps (%rdi), %xmm2
+; SSE-NEXT:    movaps 16(%rdi), %xmm3
+; SSE-NEXT:    movaps 32(%rdi), %xmm4
+; SSE-NEXT:    movaps 48(%rdi), %xmm5
+; SSE-NEXT:    movaps %xmm2, %xmm6
+; SSE-NEXT:    movlhps {{.*#+}} xmm6 = xmm6[0],xmm5[0]
+; SSE-NEXT:    unpckhpd {{.*#+}} xmm2 = xmm2[1],xmm5[1]
+; SSE-NEXT:    movaps %xmm3, %xmm5
+; SSE-NEXT:    movlhps {{.*#+}} xmm5 = xmm5[0],xmm1[0]
+; SSE-NEXT:    unpckhpd {{.*#+}} xmm3 = xmm3[1],xmm1[1]
+; SSE-NEXT:    movaps %xmm4, %xmm1
+; SSE-NEXT:    movlhps {{.*#+}} xmm1 = xmm1[0],xmm0[0]
+; SSE-NEXT:    unpckhpd {{.*#+}} xmm4 = xmm4[1],xmm0[1]
+; SSE-NEXT:    movaps %xmm6, (%rsi)
+; SSE-NEXT:    movaps %xmm2, (%rdx)
+; SSE-NEXT:    movaps %xmm5, (%rcx)
+; SSE-NEXT:    movaps %xmm3, (%r8)
+; SSE-NEXT:    movaps %xmm1, (%r9)
+; SSE-NEXT:    movaps %xmm4, (%rax)
+; SSE-NEXT:    retq
+;
+; AVX-LABEL: load_i64_stride6_vf2:
+; AVX:       # %bb.0:
+; AVX-NEXT:    movq {{[0-9]+}}(%rsp), %rax
+; AVX-NEXT:    vmovaps (%rdi), %xmm0
+; AVX-NEXT:    vmovaps 16(%rdi), %xmm1
+; AVX-NEXT:    vmovaps 32(%rdi), %xmm2
+; AVX-NEXT:    vmovaps 48(%rdi), %xmm3
+; AVX-NEXT:    vmovlhps {{.*#+}} xmm4 = xmm0[0],xmm3[0]
+; AVX-NEXT:    vunpckhpd {{.*#+}} xmm0 = xmm0[1],xmm3[1]
+; AVX-NEXT:    vmovaps 64(%rdi), %xmm3
+; AVX-NEXT:    vmovlhps {{.*#+}} xmm5 = xmm1[0],xmm3[0]
+; AVX-NEXT:    vunpckhpd {{.*#+}} xmm1 = xmm1[1],xmm3[1]
+; AVX-NEXT:    vmovaps 80(%rdi), %xmm3
+; AVX-NEXT:    vmovlhps {{.*#+}} xmm6 = xmm2[0],xmm3[0]
+; AVX-NEXT:    vunpckhpd {{.*#+}} xmm2 = xmm2[1],xmm3[1]
+; AVX-NEXT:    vmovaps %xmm4, (%rsi)
+; AVX-NEXT:    vmovaps %xmm0, (%rdx)
+; AVX-NEXT:    vmovaps %xmm5, (%rcx)
+; AVX-NEXT:    vmovaps %xmm1, (%r8)
+; AVX-NEXT:    vmovaps %xmm6, (%r9)
+; AVX-NEXT:    vmovaps %xmm2, (%rax)
+; AVX-NEXT:    retq
+;
+; AVX512-LABEL: load_i64_stride6_vf2:
+; AVX512:       # %bb.0:
+; AVX512-NEXT:    movq {{[0-9]+}}(%rsp), %rax
+; AVX512-NEXT:    vmovaps (%rdi), %xmm0
+; AVX512-NEXT:    vmovaps 16(%rdi), %xmm1
+; AVX512-NEXT:    vmovaps 32(%rdi), %xmm2
+; AVX512-NEXT:    vmovaps 48(%rdi), %xmm3
+; AVX512-NEXT:    vmovlhps {{.*#+}} xmm4 = xmm0[0],xmm3[0]
+; AVX512-NEXT:    vunpckhpd {{.*#+}} xmm0 = xmm0[1],xmm3[1]
+; AVX512-NEXT:    vmovaps 64(%rdi), %xmm3
+; AVX512-NEXT:    vmovlhps {{.*#+}} xmm5 = xmm1[0],xmm3[0]
+; AVX512-NEXT:    vunpckhpd {{.*#+}} xmm1 = xmm1[1],xmm3[1]
+; AVX512-NEXT:    vmovaps 80(%rdi), %xmm3
+; AVX512-NEXT:    vmovlhps {{.*#+}} xmm6 = xmm2[0],xmm3[0]
+; AVX512-NEXT:    vunpckhpd {{.*#+}} xmm2 = xmm2[1],xmm3[1]
+; AVX512-NEXT:    vmovaps %xmm4, (%rsi)
+; AVX512-NEXT:    vmovaps %xmm0, (%rdx)
+; AVX512-NEXT:    vmovaps %xmm5, (%rcx)
+; AVX512-NEXT:    vmovaps %xmm1, (%r8)
+; AVX512-NEXT:    vmovaps %xmm6, (%r9)
+; AVX512-NEXT:    vmovaps %xmm2, (%rax)
+; AVX512-NEXT:    retq
+  %wide.vec = load <12 x i64>, <12 x i64>* %in.vec, align 32
+
+  %strided.vec0 = shufflevector <12 x i64> %wide.vec, <12 x i64> poison, <2 x i32> <i32 0, i32 6>
+  %strided.vec1 = shufflevector <12 x i64> %wide.vec, <12 x i64> poison, <2 x i32> <i32 1, i32 7>
+  %strided.vec2 = shufflevector <12 x i64> %wide.vec, <12 x i64> poison, <2 x i32> <i32 2, i32 8>
+  %strided.vec3 = shufflevector <12 x i64> %wide.vec, <12 x i64> poison, <2 x i32> <i32 3, i32 9>
+  %strided.vec4 = shufflevector <12 x i64> %wide.vec, <12 x i64> poison, <2 x i32> <i32 4, i32 10>
+  %strided.vec5 = shufflevector <12 x i64> %wide.vec, <12 x i64> poison, <2 x i32> <i32 5, i32 11>
+
+  store <2 x i64> %strided.vec0, <2 x i64>* %out.vec0, align 32
+  store <2 x i64> %strided.vec1, <2 x i64>* %out.vec1, align 32
+  store <2 x i64> %strided.vec2, <2 x i64>* %out.vec2, align 32
+  store <2 x i64> %strided.vec3, <2 x i64>* %out.vec3, align 32
+  store <2 x i64> %strided.vec4, <2 x i64>* %out.vec4, align 32
+  store <2 x i64> %strided.vec5, <2 x i64>* %out.vec5, align 32
+
+  ret void
+}
+
+define void @load_i64_stride6_vf4(<24 x i64>* %in.vec, <4 x i64>* %out.vec0, <4 x i64>* %out.vec1, <4 x i64>* %out.vec2, <4 x i64>* %out.vec3, <4 x i64>* %out.vec4, <4 x i64>* %out.vec5) nounwind {
+; SSE-LABEL: load_i64_stride6_vf4:
+; SSE:       # %bb.0:
+; SSE-NEXT:    movq {{[0-9]+}}(%rsp), %rax
+; SSE-NEXT:    movaps 176(%rdi), %xmm8
+; SSE-NEXT:    movaps 128(%rdi), %xmm12
+; SSE-NEXT:    movaps 80(%rdi), %xmm9
+; SSE-NEXT:    movaps 160(%rdi), %xmm10
+; SSE-NEXT:    movaps 112(%rdi), %xmm14
+; SSE-NEXT:    movaps 64(%rdi), %xmm11
+; SSE-NEXT:    movaps (%rdi), %xmm5
+; SSE-NEXT:    movaps 16(%rdi), %xmm3
+; SSE-NEXT:    movaps 32(%rdi), %xmm13
+; SSE-NEXT:    movaps 48(%rdi), %xmm6
+; SSE-NEXT:    movaps 144(%rdi), %xmm7
+; SSE-NEXT:    movaps 96(%rdi), %xmm4
+; SSE-NEXT:    movaps %xmm4, %xmm15
+; SSE-NEXT:    movlhps {{.*#+}} xmm15 = xmm15[0],xmm7[0]
+; SSE-NEXT:    movaps %xmm5, %xmm1
+; SSE-NEXT:    movlhps {{.*#+}} xmm1 = xmm1[0],xmm6[0]
+; SSE-NEXT:    unpckhpd {{.*#+}} xmm4 = xmm4[1],xmm7[1]
+; SSE-NEXT:    unpckhpd {{.*#+}} xmm5 = xmm5[1],xmm6[1]
+; SSE-NEXT:    movaps %xmm3, %xmm6
+; SSE-NEXT:    movlhps {{.*#+}} xmm6 = xmm6[0],xmm11[0]
+; SSE-NEXT:    movaps %xmm14, %xmm7
+; SSE-NEXT:    movlhps {{.*#+}} xmm7 = xmm7[0],xmm10[0]
+; SSE-NEXT:    unpckhpd {{.*#+}} xmm3 = xmm3[1],xmm11[1]
+; SSE-NEXT:    unpckhpd {{.*#+}} xmm14 = xmm14[1],xmm10[1]
+; SSE-NEXT:    movaps %xmm13, %xmm2
+; SSE-NEXT:    movlhps {{.*#+}} xmm2 = xmm2[0],xmm9[0]
+; SSE-NEXT:    movaps %xmm12, %xmm0
+; SSE-NEXT:    movlhps {{.*#+}} xmm0 = xmm0[0],xmm8[0]
+; SSE-NEXT:    unpckhpd {{.*#+}} xmm13 = xmm13[1],xmm9[1]
+; SSE-NEXT:    unpckhpd {{.*#+}} xmm12 = xmm12[1],xmm8[1]
+; SSE-NEXT:    movaps %xmm15, 16(%rsi)
+; SSE-NEXT:    movaps %xmm1, (%rsi)
+; SSE-NEXT:    movaps %xmm4, 16(%rdx)
+; SSE-NEXT:    movaps %xmm5, (%rdx)
+; SSE-NEXT:    movaps %xmm7, 16(%rcx)
+; SSE-NEXT:    movaps %xmm6, (%rcx)
+; SSE-NEXT:    movaps %xmm14, 16(%r8)
+; SSE-NEXT:    movaps %xmm3, (%r8)
+; SSE-NEXT:    movaps %xmm0, 16(%r9)
+; SSE-NEXT:    movaps %xmm2, (%r9)
+; SSE-NEXT:    movaps %xmm12, 16(%rax)
+; SSE-NEXT:    movaps %xmm13, (%rax)
+; SSE-NEXT:    retq
+;
+; AVX1-LABEL: load_i64_stride6_vf4:
+; AVX1:       # %bb.0:
+; AVX1-NEXT:    movq {{[0-9]+}}(%rsp), %rax
+; AVX1-NEXT:    vmovaps 160(%rdi), %ymm8
+; AVX1-NEXT:    vmovaps 96(%rdi), %ymm1
+; AVX1-NEXT:    vmovaps 128(%rdi), %ymm2
+; AVX1-NEXT:    vinsertf128 $1, 96(%rdi), %ymm0, %ymm3
+; AVX1-NEXT:    vunpcklpd {{.*#+}} ymm4 = ymm3[0],ymm2[0],ymm3[2],ymm2[2]
+; AVX1-NEXT:    vmovaps (%rdi), %xmm5
+; AVX1-NEXT:    vmovaps 16(%rdi), %xmm6
+; AVX1-NEXT:    vmovaps 32(%rdi), %xmm7
+; AVX1-NEXT:    vmovaps 48(%rdi), %xmm0
+; AVX1-NEXT:    vmovlhps {{.*#+}} xmm9 = xmm5[0],xmm0[0]
+; AVX1-NEXT:    vblendps {{.*#+}} ymm4 = ymm9[0,1,2,3],ymm4[4,5,6,7]
+; AVX1-NEXT:    vunpckhpd {{.*#+}} ymm2 = ymm3[1],ymm2[1],ymm3[3],ymm2[3]
+; AVX1-NEXT:    vunpckhpd {{.*#+}} xmm0 = xmm5[1],xmm0[1]
+; AVX1-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm2[4,5,6,7]
+; AVX1-NEXT:    vinsertf128 $1, 160(%rdi), %ymm0, %ymm2
+; AVX1-NEXT:    vunpcklpd {{.*#+}} ymm3 = ymm1[0],ymm2[0],ymm1[2],ymm2[2]
+; AVX1-NEXT:    vmovaps 64(%rdi), %xmm5
+; AVX1-NEXT:    vmovlhps {{.*#+}} xmm9 = xmm6[0],xmm5[0]
+; AVX1-NEXT:    vblendps {{.*#+}} ymm3 = ymm9[0,1,2,3],ymm3[4,5,6,7]
+; AVX1-NEXT:    vunpckhpd {{.*#+}} ymm1 = ymm1[1],ymm2[1],ymm1[3],ymm2[3]
+; AVX1-NEXT:    vunpckhpd {{.*#+}} xmm2 = xmm6[1],xmm5[1]
+; AVX1-NEXT:    vblendps {{.*#+}} ymm1 = ymm2[0,1,2,3],ymm1[4,5,6,7]
+; AVX1-NEXT:    vinsertf128 $1, 128(%rdi), %ymm0, %ymm2
+; AVX1-NEXT:    vunpcklpd {{.*#+}} ymm5 = ymm2[0],ymm8[0],ymm2[2],ymm8[2]
+; AVX1-NEXT:    vmovaps 80(%rdi), %xmm6
+; AVX1-NEXT:    vmovlhps {{.*#+}} xmm9 = xmm7[0],xmm6[0]
+; AVX1-NEXT:    vblendps {{.*#+}} ymm5 = ymm9[0,1,2,3],ymm5[4,5,6,7]
+; AVX1-NEXT:    vunpckhpd {{.*#+}} ymm2 = ymm2[1],ymm8[1],ymm2[3],ymm8[3]
+; AVX1-NEXT:    vunpckhpd {{.*#+}} xmm6 = xmm7[1],xmm6[1]
+; AVX1-NEXT:    vblendps {{.*#+}} ymm2 = ymm6[0,1,2,3],ymm2[4,5,6,7]
+; AVX1-NEXT:    vmovaps %ymm4, (%rsi)
+; AVX1-NEXT:    vmovaps %ymm0, (%rdx)
+; AVX1-NEXT:    vmovaps %ymm3, (%rcx)
+; AVX1-NEXT:    vmovaps %ymm1, (%r8)
+; AVX1-NEXT:    vmovaps %ymm5, (%r9)
+; AVX1-NEXT:    vmovaps %ymm2, (%rax)
+; AVX1-NEXT:    vzeroupper
+; AVX1-NEXT:    retq
+;
+; AVX2-LABEL: load_i64_stride6_vf4:
+; AVX2:       # %bb.0:
+; AVX2-NEXT:    movq {{[0-9]+}}(%rsp), %rax
+; AVX2-NEXT:    vmovaps 160(%rdi), %ymm0
+; AVX2-NEXT:    vmovaps 128(%rdi), %ymm1
+; AVX2-NEXT:    vmovaps 96(%rdi), %ymm2
+; AVX2-NEXT:    vmovaps (%rdi), %xmm3
+; AVX2-NEXT:    vmovaps 16(%rdi), %xmm4
+; AVX2-NEXT:    vmovaps 32(%rdi), %xmm5
+; AVX2-NEXT:    vmovaps 48(%rdi), %xmm6
+; AVX2-NEXT:    vmovlhps {{.*#+}} xmm7 = xmm3[0],xmm6[0]
+; AVX2-NEXT:    vunpcklpd {{.*#+}} ymm8 = ymm2[0],ymm1[0],ymm2[2],ymm1[2]
+; AVX2-NEXT:    vpermpd {{.*#+}} ymm8 = ymm8[0,1,0,3]
+; AVX2-NEXT:    vblendps {{.*#+}} ymm9 = ymm7[0,1,2,3],ymm8[4,5,6,7]
+; AVX2-NEXT:    vbroadcastsd 104(%rdi), %ymm8
+; AVX2-NEXT:    vunpckhpd {{.*#+}} ymm8 = ymm8[1],ymm1[1],ymm8[3],ymm1[3]
+; AVX2-NEXT:    vunpckhpd {{.*#+}} xmm3 = xmm3[1],xmm6[1]
+; AVX2-NEXT:    vblendps {{.*#+}} ymm3 = ymm3[0,1,2,3],ymm8[4,5,6,7]
+; AVX2-NEXT:    vbroadcastsd 160(%rdi), %ymm6
+; AVX2-NEXT:    vunpcklpd {{.*#+}} ymm6 = ymm2[0],ymm6[0],ymm2[2],ymm6[2]
+; AVX2-NEXT:    vmovaps 64(%rdi), %xmm7
+; AVX2-NEXT:    vmovlhps {{.*#+}} xmm8 = xmm4[0],xmm7[0]
+; AVX2-NEXT:    vblendps {{.*#+}} ymm6 = ymm8[0,1,2,3],ymm6[4,5,6,7]
+; AVX2-NEXT:    vunpckhpd {{.*#+}} xmm4 = xmm4[1],xmm7[1]
+; AVX2-NEXT:    vunpckhpd {{.*#+}} ymm2 = ymm2[1],ymm0[1],ymm2[3],ymm0[3]
+; AVX2-NEXT:    vpermpd {{.*#+}} ymm2 = ymm2[0,1,2,1]
+; AVX2-NEXT:    vblendps {{.*#+}} ymm2 = ymm4[0,1,2,3],ymm2[4,5,6,7]
+; AVX2-NEXT:    vmovaps 80(%rdi), %xmm4
+; AVX2-NEXT:    vmovlhps {{.*#+}} xmm7 = xmm5[0],xmm4[0]
+; AVX2-NEXT:    vunpcklpd {{.*#+}} ymm1 = ymm1[0],ymm0[0],ymm1[2],ymm0[2]
+; AVX2-NEXT:    vpermpd {{.*#+}} ymm1 = ymm1[0,1,0,3]
+; AVX2-NEXT:    vblendps {{.*#+}} ymm1 = ymm7[0,1,2,3],ymm1[4,5,6,7]
+; AVX2-NEXT:    vbroadcastsd 136(%rdi), %ymm7
+; AVX2-NEXT:    vunpckhpd {{.*#+}} ymm0 = ymm7[1],ymm0[1],ymm7[3],ymm0[3]
+; AVX2-NEXT:    vunpckhpd {{.*#+}} xmm4 = xmm5[1],xmm4[1]
+; AVX2-NEXT:    vblendps {{.*#+}} ymm0 = ymm4[0,1,2,3],ymm0[4,5,6,7]
+; AVX2-NEXT:    vmovaps %ymm9, (%rsi)
+; AVX2-NEXT:    vmovaps %ymm3, (%rdx)
+; AVX2-NEXT:    vmovaps %ymm6, (%rcx)
+; AVX2-NEXT:    vmovaps %ymm2, (%r8)
+; AVX2-NEXT:    vmovaps %ymm1, (%r9)
+; AVX2-NEXT:    vmovaps %ymm0, (%rax)
+; AVX2-NEXT:    vzeroupper
+; AVX2-NEXT:    retq
+;
+; AVX512-LABEL: load_i64_stride6_vf4:
+; AVX512:       # %bb.0:
+; AVX512-NEXT:    movq {{[0-9]+}}(%rsp), %rax
+; AVX512-NEXT:    vmovdqu64 (%rdi), %zmm2
+; AVX512-NEXT:    vmovdqu64 64(%rdi), %zmm3
+; AVX512-NEXT:    vmovdqa {{.*#+}} ymm0 = <0,6,12,u>
+; AVX512-NEXT:    vpermi2q %zmm3, %zmm2, %zmm0
+; AVX512-NEXT:    vpbroadcastq 144(%rdi), %ymm1
+; AVX512-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],ymm1[6,7]
+; AVX512-NEXT:    vmovdqa {{.*#+}} ymm1 = <1,7,13,u>
+; AVX512-NEXT:    vpermi2q %zmm3, %zmm2, %zmm1
+; AVX512-NEXT:    vmovdqa 128(%rdi), %ymm4
+; AVX512-NEXT:    vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3,4,5],ymm4[6,7]
+; AVX512-NEXT:    vmovdqa {{.*#+}} ymm5 = <10,0,6,u>
+; AVX512-NEXT:    vpermi2q %zmm2, %zmm3, %zmm5
+; AVX512-NEXT:    vpbroadcastq 160(%rdi), %ymm6
+; AVX512-NEXT:    vpblendd {{.*#+}} ymm5 = ymm5[0,1,2,3,4,5],ymm6[6,7]
+; AVX512-NEXT:    vinserti128 $1, 160(%rdi), %ymm0, %ymm6
+; AVX512-NEXT:    vmovdqa {{.*#+}} ymm7 = <11,1,7,u>
+; AVX512-NEXT:    vpermi2q %zmm2, %zmm3, %zmm7
+; AVX512-NEXT:    vpblendd {{.*#+}} ymm6 = ymm7[0,1,2,3,4,5],ymm6[6,7]
+; AVX512-NEXT:    vmovdqa 160(%rdi), %ymm7
+; AVX512-NEXT:    vpunpcklqdq {{.*#+}} ymm4 = ymm4[0],ymm7[0],ymm4[2],ymm7[2]
+; AVX512-NEXT:    vpermq {{.*#+}} ymm4 = ymm4[0,1,0,3]
+; AVX512-NEXT:    vmovdqa {{.*#+}} xmm8 = [4,10]
+; AVX512-NEXT:    vpermi2q %zmm3, %zmm2, %zmm8
+; AVX512-NEXT:    vpblendd {{.*#+}} ymm4 = ymm8[0,1,2,3],ymm4[4,5,6,7]
+; AVX512-NEXT:    vpbroadcastq 136(%rdi), %ymm8
+; AVX512-NEXT:    vpunpckhqdq {{.*#+}} ymm7 = ymm8[1],ymm7[1],ymm8[3],ymm7[3]
+; AVX512-NEXT:    vmovdqa {{.*#+}} xmm8 = [5,11]
+; AVX512-NEXT:    vpermi2q %zmm3, %zmm2, %zmm8
+; AVX512-NEXT:    vpblendd {{.*#+}} ymm2 = ymm8[0,1,2,3],ymm7[4,5,6,7]
+; AVX512-NEXT:    vmovdqa %ymm0, (%rsi)
+; AVX512-NEXT:    vmovdqa %ymm1, (%rdx)
+; AVX512-NEXT:    vmovdqa %ymm5, (%rcx)
+; AVX512-NEXT:    vmovdqa %ymm6, (%r8)
+; AVX512-NEXT:    vmovdqa %ymm4, (%r9)
+; AVX512-NEXT:    vmovdqa %ymm2, (%rax)
+; AVX512-NEXT:    vzeroupper
+; AVX512-NEXT:    retq
+  %wide.vec = load <24 x i64>, <24 x i64>* %in.vec, align 32
+
+  %strided.vec0 = shufflevector <24 x i64> %wide.vec, <24 x i64> poison, <4 x i32> <i32 0, i32 6, i32 12, i32 18>
+  %strided.vec1 = shufflevector <24 x i64> %wide.vec, <24 x i64> poison, <4 x i32> <i32 1, i32 7, i32 13, i32 19>
+  %strided.vec2 = shufflevector <24 x i64> %wide.vec, <24 x i64> poison, <4 x i32> <i32 2, i32 8, i32 14, i32 20>
+  %strided.vec3 = shufflevector <24 x i64> %wide.vec, <24 x i64> poison, <4 x i32> <i32 3, i32 9, i32 15, i32 21>
+  %strided.vec4 = shufflevector <24 x i64> %wide.vec, <24 x i64> poison, <4 x i32> <i32 4, i32 10, i32 16, i32 22>
+  %strided.vec5 = shufflevector <24 x i64> %wide.vec, <24 x i64> poison, <4 x i32> <i32 5, i32 11, i32 17, i32 23>
+
+  store <4 x i64> %strided.vec0, <4 x i64>* %out.vec0, align 32
+  store <4 x i64> %strided.vec1, <4 x i64>* %out.vec1, align 32
+  store <4 x i64> %strided.vec2, <4 x i64>* %out.vec2, align 32
+  store <4 x i64> %strided.vec3, <4 x i64>* %out.vec3, align 32
+  store <4 x i64> %strided.vec4, <4 x i64>* %out.vec4, align 32
+  store <4 x i64> %strided.vec5, <4 x i64>* %out.vec5, align 32
+
+  ret void
+}
+
+define void @load_i64_stride6_vf8(<48 x i64>* %in.vec, <8 x i64>* %out.vec0, <8 x i64>* %out.vec1, <8 x i64>* %out.vec2, <8 x i64>* %out.vec3, <8 x i64>* %out.vec4, <8 x i64>* %out.vec5) nounwind {
+; SSE-LABEL: load_i64_stride6_vf8:
+; SSE:       # %bb.0:
+; SSE-NEXT:    subq $40, %rsp
+; SSE-NEXT:    movaps 160(%rdi), %xmm8
+; SSE-NEXT:    movaps 112(%rdi), %xmm0
+; SSE-NEXT:    movaps 352(%rdi), %xmm1
+; SSE-NEXT:    movaps %xmm1, (%rsp) # 16-byte Spill
+; SSE-NEXT:    movaps 256(%rdi), %xmm12
+; SSE-NEXT:    movaps 208(%rdi), %xmm9
+; SSE-NEXT:    movaps 64(%rdi), %xmm7
+; SSE-NEXT:    movaps (%rdi), %xmm11
+; SSE-NEXT:    movaps 16(%rdi), %xmm10
+; SSE-NEXT:    movaps 48(%rdi), %xmm2
+; SSE-NEXT:    movaps 336(%rdi), %xmm3
+; SSE-NEXT:    movaps 288(%rdi), %xmm14
+; SSE-NEXT:    movaps 144(%rdi), %xmm4
+; SSE-NEXT:    movaps 96(%rdi), %xmm5
+; SSE-NEXT:    movaps 240(%rdi), %xmm1
+; SSE-NEXT:    movaps 192(%rdi), %xmm13
+; SSE-NEXT:    movaps %xmm13, %xmm6
+; SSE-NEXT:    movlhps {{.*#+}} xmm6 = xmm6[0],xmm1[0]
+; SSE-NEXT:    movaps %xmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT:    unpckhpd {{.*#+}} xmm13 = xmm13[1],xmm1[1]
+; SSE-NEXT:    movaps %xmm5, %xmm1
+; SSE-NEXT:    movlhps {{.*#+}} xmm5 = xmm5[0],xmm4[0]
+; SSE-NEXT:    movaps %xmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT:    unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm4[1]
+; SSE-NEXT:    movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT:    movaps %xmm14, %xmm1
+; SSE-NEXT:    movlhps {{.*#+}} xmm14 = xmm14[0],xmm3[0]
+; SSE-NEXT:    unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm3[1]
+; SSE-NEXT:    movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT:    movaps %xmm11, %xmm15
+; SSE-NEXT:    movlhps {{.*#+}} xmm15 = xmm15[0],xmm2[0]
+; SSE-NEXT:    unpckhpd {{.*#+}} xmm11 = xmm11[1],xmm2[1]
+; SSE-NEXT:    movaps %xmm11, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT:    movaps %xmm10, %xmm2
+; SSE-NEXT:    movlhps {{.*#+}} xmm2 = xmm2[0],xmm7[0]
+; SSE-NEXT:    movaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT:    unpckhpd {{.*#+}} xmm10 = xmm10[1],xmm7[1]
+; SSE-NEXT:    movaps %xmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT:    movaps %xmm9, %xmm11
+; SSE-NEXT:    movlhps {{.*#+}} xmm11 = xmm11[0],xmm12[0]
+; SSE-NEXT:    unpckhpd {{.*#+}} xmm9 = xmm9[1],xmm12[1]
+; SSE-NEXT:    movaps %xmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT:    movaps %xmm0, %xmm12
+; SSE-NEXT:    movlhps {{.*#+}} xmm12 = xmm12[0],xmm8[0]
+; SSE-NEXT:    unpckhpd {{.*#+}} xmm0 = xmm0[1],xmm8[1]
+; SSE-NEXT:    movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT:    movaps 304(%rdi), %xmm8
+; SSE-NEXT:    movaps %xmm8, %xmm9
+; SSE-NEXT:    movaps (%rsp), %xmm0 # 16-byte Reload
+; SSE-NEXT:    movlhps {{.*#+}} xmm9 = xmm9[0],xmm0[0]
+; SSE-NEXT:    unpckhpd {{.*#+}} xmm8 = xmm8[1],xmm0[1]
+; SSE-NEXT:    movaps 80(%rdi), %xmm1
+; SSE-NEXT:    movaps 32(%rdi), %xmm7
+; SSE-NEXT:    movaps %xmm7, %xmm10
+; SSE-NEXT:    movlhps {{.*#+}} xmm10 = xmm10[0],xmm1[0]
+; SSE-NEXT:    unpckhpd {{.*#+}} xmm7 = xmm7[1],xmm1[1]
+; SSE-NEXT:    movaps 272(%rdi), %xmm1
+; SSE-NEXT:    movaps 224(%rdi), %xmm3
+; SSE-NEXT:    movaps %xmm3, %xmm6
+; SSE-NEXT:    movlhps {{.*#+}} xmm6 = xmm6[0],xmm1[0]
+; SSE-NEXT:    unpckhpd {{.*#+}} xmm3 = xmm3[1],xmm1[1]
+; SSE-NEXT:    movaps 176(%rdi), %xmm1
+; SSE-NEXT:    movaps 128(%rdi), %xmm4
+; SSE-NEXT:    movaps %xmm4, %xmm5
+; SSE-NEXT:    movlhps {{.*#+}} xmm5 = xmm5[0],xmm1[0]
+; SSE-NEXT:    unpckhpd {{.*#+}} xmm4 = xmm4[1],xmm1[1]
+; SSE-NEXT:    movaps 368(%rdi), %xmm1
+; SSE-NEXT:    movaps 320(%rdi), %xmm0
+; SSE-NEXT:    movaps %xmm0, %xmm2
+; SSE-NEXT:    movlhps {{.*#+}} xmm2 = xmm2[0],xmm1[0]
+; SSE-NEXT:    unpckhpd {{.*#+}} xmm0 = xmm0[1],xmm1[1]
+; SSE-NEXT:    movaps %xmm14, 48(%rsi)
+; SSE-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
+; SSE-NEXT:    movaps %xmm1, 16(%rsi)
+; SSE-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
+; SSE-NEXT:    movaps %xmm1, 32(%rsi)
+; SSE-NEXT:    movaps %xmm15, (%rsi)
+; SSE-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
+; SSE-NEXT:    movaps %xmm1, 48(%rdx)
+; SSE-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
+; SSE-NEXT:    movaps %xmm1, 16(%rdx)
+; SSE-NEXT:    movaps %xmm13, 32(%rdx)
+; SSE-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
+; SSE-NEXT:    movaps %xmm1, (%rdx)
+; SSE-NEXT:    movaps %xmm12, 16(%rcx)
+; SSE-NEXT:    movaps %xmm9, 48(%rcx)
+; SSE-NEXT:    movaps %xmm11, 32(%rcx)
+; SSE-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
+; SSE-NEXT:    movaps %xmm1, (%rcx)
+; SSE-NEXT:    movaps %xmm8, 48(%r8)
+; SSE-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
+; SSE-NEXT:    movaps %xmm1, 16(%r8)
+; SSE-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
+; SSE-NEXT:    movaps %xmm1, 32(%r8)
+; SSE-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
+; SSE-NEXT:    movaps %xmm1, (%r8)
+; SSE-NEXT:    movaps %xmm2, 48(%r9)
+; SSE-NEXT:    movaps %xmm5, 16(%r9)
+; SSE-NEXT:    movaps %xmm6, 32(%r9)
+; SSE-NEXT:    movaps %xmm10, (%r9)
+; SSE-NEXT:    movq {{[0-9]+}}(%rsp), %rax
+; SSE-NEXT:    movaps %xmm0, 48(%rax)
+; SSE-NEXT:    movaps %xmm4, 16(%rax)
+; SSE-NEXT:    movaps %xmm3, 32(%rax)
+; SSE-NEXT:    movaps %xmm7, (%rax)
+; SSE-NEXT:    addq $40, %rsp
+; SSE-NEXT:    retq
+;
+; AVX1-LABEL: load_i64_stride6_vf8:
+; AVX1:       # %bb.0:
+; AVX1-NEXT:    vmovaps 352(%rdi), %ymm10
+; AVX1-NEXT:    vmovaps 288(%rdi), %ymm12
+; AVX1-NEXT:    vmovaps 96(%rdi), %ymm13
+; AVX1-NEXT:    vmovaps 128(%rdi), %ymm3
+; AVX1-NEXT:    vmovaps 320(%rdi), %ymm5
+; AVX1-NEXT:    vinsertf128 $1, 288(%rdi), %ymm0, %ymm6
+; AVX1-NEXT:    vunpcklpd {{.*#+}} ymm1 = ymm6[0],ymm5[0],ymm6[2],ymm5[2]
+; AVX1-NEXT:    vmovaps 240(%rdi), %xmm2
+; AVX1-NEXT:    vmovaps 192(%rdi), %xmm0
+; AVX1-NEXT:    vmovlhps {{.*#+}} xmm8 = xmm0[0],xmm2[0]
+; AVX1-NEXT:    vblendps {{.*#+}} ymm1 = ymm8[0,1,2,3],ymm1[4,5,6,7]
+; AVX1-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX1-NEXT:    vinsertf128 $1, 96(%rdi), %ymm0, %ymm8
+; AVX1-NEXT:    vunpcklpd {{.*#+}} ymm9 = ymm8[0],ymm3[0],ymm8[2],ymm3[2]
+; AVX1-NEXT:    vmovaps (%rdi), %xmm1
+; AVX1-NEXT:    vmovaps 16(%rdi), %xmm4
+; AVX1-NEXT:    vmovaps 48(%rdi), %xmm7
+; AVX1-NEXT:    vmovlhps {{.*#+}} xmm14 = xmm1[0],xmm7[0]
+; AVX1-NEXT:    vblendps {{.*#+}} ymm9 = ymm14[0,1,2,3],ymm9[4,5,6,7]
+; AVX1-NEXT:    vmovups %ymm9, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX1-NEXT:    vunpckhpd {{.*#+}} ymm3 = ymm8[1],ymm3[1],ymm8[3],ymm3[3]
+; AVX1-NEXT:    vunpckhpd {{.*#+}} xmm1 = xmm1[1],xmm7[1]
+; AVX1-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm3[4,5,6,7]
+; AVX1-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX1-NEXT:    vunpckhpd {{.*#+}} ymm1 = ymm6[1],ymm5[1],ymm6[3],ymm5[3]
+; AVX1-NEXT:    vunpckhpd {{.*#+}} xmm0 = xmm0[1],xmm2[1]
+; AVX1-NEXT:    vblendps {{.*#+}} ymm15 = ymm0[0,1,2,3],ymm1[4,5,6,7]
+; AVX1-NEXT:    vinsertf128 $1, 160(%rdi), %ymm0, %ymm0
+; AVX1-NEXT:    vunpcklpd {{.*#+}} ymm1 = ymm13[0],ymm0[0],ymm13[2],ymm0[2]
+; AVX1-NEXT:    vmovaps 64(%rdi), %xmm2
+; AVX1-NEXT:    vmovlhps {{.*#+}} xmm6 = xmm4[0],xmm2[0]
+; AVX1-NEXT:    vblendps {{.*#+}} ymm11 = ymm6[0,1,2,3],ymm1[4,5,6,7]
+; AVX1-NEXT:    vinsertf128 $1, 352(%rdi), %ymm0, %ymm1
+; AVX1-NEXT:    vunpcklpd {{.*#+}} ymm7 = ymm12[0],ymm1[0],ymm12[2],ymm1[2]
+; AVX1-NEXT:    vmovaps 256(%rdi), %xmm3
+; AVX1-NEXT:    vmovaps 208(%rdi), %xmm5
+; AVX1-NEXT:    vmovlhps {{.*#+}} xmm8 = xmm5[0],xmm3[0]
+; AVX1-NEXT:    vblendps {{.*#+}} ymm9 = ymm8[0,1,2,3],ymm7[4,5,6,7]
+; AVX1-NEXT:    vmovaps 160(%rdi), %ymm7
+; AVX1-NEXT:    vunpckhpd {{.*#+}} ymm0 = ymm13[1],ymm0[1],ymm13[3],ymm0[3]
+; AVX1-NEXT:    vmovaps 32(%rdi), %xmm13
+; AVX1-NEXT:    vunpckhpd {{.*#+}} xmm2 = xmm4[1],xmm2[1]
+; AVX1-NEXT:    vblendps {{.*#+}} ymm8 = ymm2[0,1,2,3],ymm0[4,5,6,7]
+; AVX1-NEXT:    vunpckhpd {{.*#+}} ymm1 = ymm12[1],ymm1[1],ymm12[3],ymm1[3]
+; AVX1-NEXT:    vunpckhpd {{.*#+}} xmm2 = xmm5[1],xmm3[1]
+; AVX1-NEXT:    vblendps {{.*#+}} ymm6 = ymm2[0,1,2,3],ymm1[4,5,6,7]
+; AVX1-NEXT:    vinsertf128 $1, 128(%rdi), %ymm0, %ymm2
+; AVX1-NEXT:    vunpcklpd {{.*#+}} ymm3 = ymm2[0],ymm7[0],ymm2[2],ymm7[2]
+; AVX1-NEXT:    vmovaps 80(%rdi), %xmm4
+; AVX1-NEXT:    vmovlhps {{.*#+}} xmm5 = xmm13[0],xmm4[0]
+; AVX1-NEXT:    vblendps {{.*#+}} ymm3 = ymm5[0,1,2,3],ymm3[4,5,6,7]
+; AVX1-NEXT:    vinsertf128 $1, 320(%rdi), %ymm0, %ymm5
+; AVX1-NEXT:    vunpcklpd {{.*#+}} ymm12 = ymm5[0],ymm10[0],ymm5[2],ymm10[2]
+; AVX1-NEXT:    vmovaps 272(%rdi), %xmm1
+; AVX1-NEXT:    vmovaps 224(%rdi), %xmm0
+; AVX1-NEXT:    vmovlhps {{.*#+}} xmm14 = xmm0[0],xmm1[0]
+; AVX1-NEXT:    vblendps {{.*#+}} ymm12 = ymm14[0,1,2,3],ymm12[4,5,6,7]
+; AVX1-NEXT:    vunpckhpd {{.*#+}} ymm2 = ymm2[1],ymm7[1],ymm2[3],ymm7[3]
+; AVX1-NEXT:    vunpckhpd {{.*#+}} xmm4 = xmm13[1],xmm4[1]
+; AVX1-NEXT:    vblendps {{.*#+}} ymm2 = ymm4[0,1,2,3],ymm2[4,5,6,7]
+; AVX1-NEXT:    vunpckhpd {{.*#+}} ymm4 = ymm5[1],ymm10[1],ymm5[3],ymm10[3]
+; AVX1-NEXT:    vunpckhpd {{.*#+}} xmm0 = xmm0[1],xmm1[1]
+; AVX1-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm4[4,5,6,7]
+; AVX1-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
+; AVX1-NEXT:    vmovaps %ymm1, (%rsi)
+; AVX1-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
+; AVX1-NEXT:    vmovaps %ymm1, 32(%rsi)
+; AVX1-NEXT:    vmovaps %ymm15, 32(%rdx)
+; AVX1-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
+; AVX1-NEXT:    vmovaps %ymm1, (%rdx)
+; AVX1-NEXT:    vmovaps %ymm9, 32(%rcx)
+; AVX1-NEXT:    vmovaps %ymm11, (%rcx)
+; AVX1-NEXT:    vmovaps %ymm6, 32(%r8)
+; AVX1-NEXT:    vmovaps %ymm8, (%r8)
+; AVX1-NEXT:    vmovaps %ymm12, 32(%r9)
+; AVX1-NEXT:    vmovaps %ymm3, (%r9)
+; AVX1-NEXT:    movq {{[0-9]+}}(%rsp), %rax
+; AVX1-NEXT:    vmovaps %ymm0, 32(%rax)
+; AVX1-NEXT:    vmovaps %ymm2, (%rax)
+; AVX1-NEXT:    vzeroupper
+; AVX1-NEXT:    retq
+;
+; AVX2-LABEL: load_i64_stride6_vf8:
+; AVX2:       # %bb.0:
+; AVX2-NEXT:    vmovaps 352(%rdi), %ymm13
+; AVX2-NEXT:    vmovaps 128(%rdi), %ymm4
+; AVX2-NEXT:    vmovaps 96(%rdi), %ymm9
+; AVX2-NEXT:    vmovaps 320(%rdi), %ymm2
+; AVX2-NEXT:    vmovaps 288(%rdi), %ymm7
+; AVX2-NEXT:    vmovaps 240(%rdi), %xmm6
+; AVX2-NEXT:    vmovaps 192(%rdi), %xmm1
+; AVX2-NEXT:    vmovlhps {{.*#+}} xmm3 = xmm1[0],xmm6[0]
+; AVX2-NEXT:    vunpcklpd {{.*#+}} ymm5 = ymm7[0],ymm2[0],ymm7[2],ymm2[2]
+; AVX2-NEXT:    vpermpd {{.*#+}} ymm5 = ymm5[0,1,0,3]
+; AVX2-NEXT:    vblendps {{.*#+}} ymm0 = ymm3[0,1,2,3],ymm5[4,5,6,7]
+; AVX2-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-NEXT:    vmovaps (%rdi), %xmm5
+; AVX2-NEXT:    vmovaps 16(%rdi), %xmm3
+; AVX2-NEXT:    vmovaps 48(%rdi), %xmm0
+; AVX2-NEXT:    vmovlhps {{.*#+}} xmm8 = xmm5[0],xmm0[0]
+; AVX2-NEXT:    vunpcklpd {{.*#+}} ymm10 = ymm9[0],ymm4[0],ymm9[2],ymm4[2]
+; AVX2-NEXT:    vpermpd {{.*#+}} ymm10 = ymm10[0,1,0,3]
+; AVX2-NEXT:    vblendps {{.*#+}} ymm12 = ymm8[0,1,2,3],ymm10[4,5,6,7]
+; AVX2-NEXT:    vbroadcastsd 104(%rdi), %ymm8
+; AVX2-NEXT:    vunpckhpd {{.*#+}} ymm8 = ymm8[1],ymm4[1],ymm8[3],ymm4[3]
+; AVX2-NEXT:    vunpckhpd {{.*#+}} xmm0 = xmm5[1],xmm0[1]
+; AVX2-NEXT:    vblendps {{.*#+}} ymm14 = ymm0[0,1,2,3],ymm8[4,5,6,7]
+; AVX2-NEXT:    vbroadcastsd 296(%rdi), %ymm0
+; AVX2-NEXT:    vunpckhpd {{.*#+}} ymm0 = ymm0[1],ymm2[1],ymm0[3],ymm2[3]
+; AVX2-NEXT:    vunpckhpd {{.*#+}} xmm1 = xmm1[1],xmm6[1]
+; AVX2-NEXT:    vblendps {{.*#+}} ymm15 = ymm1[0,1,2,3],ymm0[4,5,6,7]
+; AVX2-NEXT:    vbroadcastsd 160(%rdi), %ymm0
+; AVX2-NEXT:    vunpcklpd {{.*#+}} ymm0 = ymm9[0],ymm0[0],ymm9[2],ymm0[2]
+; AVX2-NEXT:    vmovaps 64(%rdi), %xmm1
+; AVX2-NEXT:    vmovlhps {{.*#+}} xmm8 = xmm3[0],xmm1[0]
+; AVX2-NEXT:    vblendps {{.*#+}} ymm8 = ymm8[0,1,2,3],ymm0[4,5,6,7]
+; AVX2-NEXT:    vbroadcastsd 352(%rdi), %ymm0
+; AVX2-NEXT:    vunpcklpd {{.*#+}} ymm0 = ymm7[0],ymm0[0],ymm7[2],ymm0[2]
+; AVX2-NEXT:    vmovaps 256(%rdi), %xmm5
+; AVX2-NEXT:    vmovaps 208(%rdi), %xmm6
+; AVX2-NEXT:    vmovlhps {{.*#+}} xmm10 = xmm6[0],xmm5[0]
+; AVX2-NEXT:    vblendps {{.*#+}} ymm10 = ymm10[0,1,2,3],ymm0[4,5,6,7]
+; AVX2-NEXT:    vmovaps 160(%rdi), %ymm0
+; AVX2-NEXT:    vunpckhpd {{.*#+}} xmm1 = xmm3[1],xmm1[1]
+; AVX2-NEXT:    vmovaps 32(%rdi), %xmm3
+; AVX2-NEXT:    vunpckhpd {{.*#+}} ymm9 = ymm9[1],ymm0[1],ymm9[3],ymm0[3]
+; AVX2-NEXT:    vpermpd {{.*#+}} ymm9 = ymm9[0,1,2,1]
+; AVX2-NEXT:    vblendps {{.*#+}} ymm9 = ymm1[0,1,2,3],ymm9[4,5,6,7]
+; AVX2-NEXT:    vunpckhpd {{.*#+}} xmm1 = xmm6[1],xmm5[1]
+; AVX2-NEXT:    vunpckhpd {{.*#+}} ymm5 = ymm7[1],ymm13[1],ymm7[3],ymm13[3]
+; AVX2-NEXT:    vpermpd {{.*#+}} ymm5 = ymm5[0,1,2,1]
+; AVX2-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm5[4,5,6,7]
+; AVX2-NEXT:    vmovaps 80(%rdi), %xmm5
+; AVX2-NEXT:    vmovlhps {{.*#+}} xmm6 = xmm3[0],xmm5[0]
+; AVX2-NEXT:    vunpcklpd {{.*#+}} ymm4 = ymm4[0],ymm0[0],ymm4[2],ymm0[2]
+; AVX2-NEXT:    vpermpd {{.*#+}} ymm4 = ymm4[0,1,0,3]
+; AVX2-NEXT:    vblendps {{.*#+}} ymm4 = ymm6[0,1,2,3],ymm4[4,5,6,7]
+; AVX2-NEXT:    vmovaps 272(%rdi), %xmm6
+; AVX2-NEXT:    vmovaps 224(%rdi), %xmm7
+; AVX2-NEXT:    vunpcklpd {{.*#+}} ymm2 = ymm2[0],ymm13[0],ymm2[2],ymm13[2]
+; AVX2-NEXT:    vpermpd {{.*#+}} ymm2 = ymm2[0,1,0,3]
+; AVX2-NEXT:    vmovlhps {{.*#+}} xmm11 = xmm7[0],xmm6[0]
+; AVX2-NEXT:    vblendps {{.*#+}} ymm2 = ymm11[0,1,2,3],ymm2[4,5,6,7]
+; AVX2-NEXT:    vbroadcastsd 136(%rdi), %ymm11
+; AVX2-NEXT:    vunpckhpd {{.*#+}} ymm0 = ymm11[1],ymm0[1],ymm11[3],ymm0[3]
+; AVX2-NEXT:    vunpckhpd {{.*#+}} xmm3 = xmm3[1],xmm5[1]
+; AVX2-NEXT:    vblendps {{.*#+}} ymm0 = ymm3[0,1,2,3],ymm0[4,5,6,7]
+; AVX2-NEXT:    vbroadcastsd 328(%rdi), %ymm3
+; AVX2-NEXT:    vunpckhpd {{.*#+}} ymm3 = ymm3[1],ymm13[1],ymm3[3],ymm13[3]
+; AVX2-NEXT:    vunpckhpd {{.*#+}} xmm5 = xmm7[1],xmm6[1]
+; AVX2-NEXT:    vblendps {{.*#+}} ymm3 = ymm5[0,1,2,3],ymm3[4,5,6,7]
+; AVX2-NEXT:    vmovaps %ymm12, (%rsi)
+; AVX2-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload
+; AVX2-NEXT:    vmovaps %ymm5, 32(%rsi)
+; AVX2-NEXT:    vmovaps %ymm15, 32(%rdx)
+; AVX2-NEXT:    vmovaps %ymm14, (%rdx)
+; AVX2-NEXT:    vmovaps %ymm10, 32(%rcx)
+; AVX2-NEXT:    vmovaps %ymm8, (%rcx)
+; AVX2-NEXT:    vmovaps %ymm1, 32(%r8)
+; AVX2-NEXT:    vmovaps %ymm9, (%r8)
+; AVX2-NEXT:    vmovaps %ymm2, 32(%r9)
+; AVX2-NEXT:    vmovaps %ymm4, (%r9)
+; AVX2-NEXT:    movq {{[0-9]+}}(%rsp), %rax
+; AVX2-NEXT:    vmovaps %ymm3, 32(%rax)
+; AVX2-NEXT:    vmovaps %ymm0, (%rax)
+; AVX2-NEXT:    vzeroupper
+; AVX2-NEXT:    retq
+;
+; AVX512-LABEL: load_i64_stride6_vf8:
+; AVX512:       # %bb.0:
+; AVX512-NEXT:    movq {{[0-9]+}}(%rsp), %rax
+; AVX512-NEXT:    vmovdqu64 320(%rdi), %zmm5
+; AVX512-NEXT:    vmovdqu64 256(%rdi), %zmm6
+; AVX512-NEXT:    vmovdqu64 (%rdi), %zmm0
+; AVX512-NEXT:    vmovdqu64 64(%rdi), %zmm1
+; AVX512-NEXT:    vmovdqu64 128(%rdi), %zmm2
+; AVX512-NEXT:    vmovdqu64 192(%rdi), %zmm3
+; AVX512-NEXT:    vbroadcasti64x4 {{.*#+}} zmm7 = [0,6,0,10,0,6,0,10]
+; AVX512-NEXT:    # zmm7 = mem[0,1,2,3,0,1,2,3]
+; AVX512-NEXT:    vpermi2q %zmm2, %zmm3, %zmm7
+; AVX512-NEXT:    vmovdqa {{.*#+}} ymm4 = <0,6,12,u>
+; AVX512-NEXT:    vpermi2q %zmm1, %zmm0, %zmm4
+; AVX512-NEXT:    movb $56, %dil
+; AVX512-NEXT:    kmovd %edi, %k1
+; AVX512-NEXT:    vmovdqa64 %zmm7, %zmm4 {%k1}
+; AVX512-NEXT:    vbroadcasti32x4 {{.*#+}} zmm7 = [4,10,4,10,4,10,4,10]
+; AVX512-NEXT:    # zmm7 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512-NEXT:    vmovdqa64 %zmm6, %zmm8
+; AVX512-NEXT:    vbroadcasti64x4 {{.*#+}} zmm9 = [0,0,6,12,0,0,6,12]
+; AVX512-NEXT:    # zmm9 = mem[0,1,2,3,0,1,2,3]
+; AVX512-NEXT:    vpermi2q %zmm5, %zmm6, %zmm9
+; AVX512-NEXT:    vbroadcasti64x4 {{.*#+}} zmm10 = [0,1,7,13,0,1,7,13]
+; AVX512-NEXT:    # zmm10 = mem[0,1,2,3,0,1,2,3]
+; AVX512-NEXT:    vpermi2q %zmm5, %zmm6, %zmm10
+; AVX512-NEXT:    vbroadcasti64x4 {{.*#+}} zmm11 = [0,10,0,6,0,10,0,6]
+; AVX512-NEXT:    # zmm11 = mem[0,1,2,3,0,1,2,3]
+; AVX512-NEXT:    vpermi2q %zmm6, %zmm5, %zmm11
+; AVX512-NEXT:    vbroadcasti64x4 {{.*#+}} zmm12 = [0,11,1,7,0,11,1,7]
+; AVX512-NEXT:    # zmm12 = mem[0,1,2,3,0,1,2,3]
+; AVX512-NEXT:    vpermi2q %zmm6, %zmm5, %zmm12
+; AVX512-NEXT:    vpermt2q %zmm5, %zmm7, %zmm6
+; AVX512-NEXT:    movb $-64, %dil
+; AVX512-NEXT:    kmovd %edi, %k2
+; AVX512-NEXT:    vmovdqa64 %zmm6, %zmm4 {%k2}
+; AVX512-NEXT:    vbroadcasti64x4 {{.*#+}} zmm6 = [1,7,0,11,1,7,0,11]
+; AVX512-NEXT:    # zmm6 = mem[0,1,2,3,0,1,2,3]
+; AVX512-NEXT:    vpermi2q %zmm2, %zmm3, %zmm6
+; AVX512-NEXT:    vmovdqa {{.*#+}} ymm13 = <1,7,13,u>
+; AVX512-NEXT:    vpermi2q %zmm1, %zmm0, %zmm13
+; AVX512-NEXT:    vmovdqa64 %zmm6, %zmm13 {%k1}
+; AVX512-NEXT:    vbroadcasti32x4 {{.*#+}} zmm6 = [5,11,5,11,5,11,5,11]
+; AVX512-NEXT:    # zmm6 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512-NEXT:    vpermt2q %zmm5, %zmm6, %zmm8
+; AVX512-NEXT:    vmovdqa64 %zmm8, %zmm13 {%k2}
+; AVX512-NEXT:    vbroadcasti32x4 {{.*#+}} zmm5 = [10,4,10,4,10,4,10,4]
+; AVX512-NEXT:    # zmm5 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512-NEXT:    vpermi2q %zmm3, %zmm2, %zmm5
+; AVX512-NEXT:    vmovdqa {{.*#+}} ymm8 = <10,0,6,u>
+; AVX512-NEXT:    vpermi2q %zmm0, %zmm1, %zmm8
+; AVX512-NEXT:    movb $24, %dil
+; AVX512-NEXT:    kmovd %edi, %k1
+; AVX512-NEXT:    vmovdqa64 %zmm5, %zmm8 {%k1}
+; AVX512-NEXT:    movb $-32, %dil
+; AVX512-NEXT:    kmovd %edi, %k2
+; AVX512-NEXT:    vmovdqa64 %zmm9, %zmm8 {%k2}
+; AVX512-NEXT:    vbroadcasti32x4 {{.*#+}} zmm5 = [11,5,11,5,11,5,11,5]
+; AVX512-NEXT:    # zmm5 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512-NEXT:    vpermi2q %zmm3, %zmm2, %zmm5
+; AVX512-NEXT:    vmovdqa {{.*#+}} ymm9 = <11,1,7,u>
+; AVX512-NEXT:    vpermi2q %zmm0, %zmm1, %zmm9
+; AVX512-NEXT:    vmovdqa64 %zmm5, %zmm9 {%k1}
+; AVX512-NEXT:    vmovdqa64 %zmm10, %zmm9 {%k2}
+; AVX512-NEXT:    vbroadcasti64x4 {{.*#+}} zmm5 = [12,0,0,6,12,0,0,6]
+; AVX512-NEXT:    # zmm5 = mem[0,1,2,3,0,1,2,3]
+; AVX512-NEXT:    vpermi2q %zmm3, %zmm2, %zmm5
+; AVX512-NEXT:    vpermi2q %zmm1, %zmm0, %zmm7
+; AVX512-NEXT:    vinserti32x4 $0, %xmm7, %zmm5, %zmm5
+; AVX512-NEXT:    vmovdqa64 %zmm11, %zmm5 {%k2}
+; AVX512-NEXT:    vbroadcasti64x4 {{.*#+}} zmm7 = [13,0,1,7,13,0,1,7]
+; AVX512-NEXT:    # zmm7 = mem[0,1,2,3,0,1,2,3]
+; AVX512-NEXT:    vpermi2q %zmm3, %zmm2, %zmm7
+; AVX512-NEXT:    vpermt2q %zmm1, %zmm6, %zmm0
+; AVX512-NEXT:    vinserti32x4 $0, %xmm0, %zmm7, %zmm0
+; AVX512-NEXT:    vmovdqa64 %zmm12, %zmm0 {%k2}
+; AVX512-NEXT:    vmovdqu64 %zmm4, (%rsi)
+; AVX512-NEXT:    vmovdqu64 %zmm13, (%rdx)
+; AVX512-NEXT:    vmovdqu64 %zmm8, (%rcx)
+; AVX512-NEXT:    vmovdqu64 %zmm9, (%r8)
+; AVX512-NEXT:    vmovdqu64 %zmm5, (%r9)
+; AVX512-NEXT:    vmovdqu64 %zmm0, (%rax)
+; AVX512-NEXT:    vzeroupper
+; AVX512-NEXT:    retq
+  %wide.vec = load <48 x i64>, <48 x i64>* %in.vec, align 32
+
+  %strided.vec0 = shufflevector <48 x i64> %wide.vec, <48 x i64> poison, <8 x i32> <i32 0, i32 6, i32 12, i32 18, i32 24, i32 30, i32 36, i32 42>
+  %strided.vec1 = shufflevector <48 x i64> %wide.vec, <48 x i64> poison, <8 x i32> <i32 1, i32 7, i32 13, i32 19, i32 25, i32 31, i32 37, i32 43>
+  %strided.vec2 = shufflevector <48 x i64> %wide.vec, <48 x i64> poison, <8 x i32> <i32 2, i32 8, i32 14, i32 20, i32 26, i32 32, i32 38, i32 44>
+  %strided.vec3 = shufflevector <48 x i64> %wide.vec, <48 x i64> poison, <8 x i32> <i32 3, i32 9, i32 15, i32 21, i32 27, i32 33, i32 39, i32 45>
+  %strided.vec4 = shufflevector <48 x i64> %wide.vec, <48 x i64> poison, <8 x i32> <i32 4, i32 10, i32 16, i32 22, i32 28, i32 34, i32 40, i32 46>
+  %strided.vec5 = shufflevector <48 x i64> %wide.vec, <48 x i64> poison, <8 x i32> <i32 5, i32 11, i32 17, i32 23, i32 29, i32 35, i32 41, i32 47>
+
+  store <8 x i64> %strided.vec0, <8 x i64>* %out.vec0, align 32
+  store <8 x i64> %strided.vec1, <8 x i64>* %out.vec1, align 32
+  store <8 x i64> %strided.vec2, <8 x i64>* %out.vec2, align 32
+  store <8 x i64> %strided.vec3, <8 x i64>* %out.vec3, align 32
+  store <8 x i64> %strided.vec4, <8 x i64>* %out.vec4, align 32
+  store <8 x i64> %strided.vec5, <8 x i64>* %out.vec5, align 32
+
+  ret void
+}

diff  --git a/llvm/test/CodeGen/X86/vector-interleaved-store-i64-stride-6.ll b/llvm/test/CodeGen/X86/vector-interleaved-store-i64-stride-6.ll
new file mode 100644
index 000000000000..29e10c47383b
--- /dev/null
+++ b/llvm/test/CodeGen/X86/vector-interleaved-store-i64-stride-6.ll
@@ -0,0 +1,675 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+sse2 | FileCheck %s --check-prefixes=SSE
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx  | FileCheck %s --check-prefixes=AVX1
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx2 | FileCheck %s --check-prefixes=AVX2
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx2,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX2
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx2,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX2
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512bw,+avx512vl | FileCheck %s --check-prefixes=AVX512
+
+; These patterns are produced by LoopVectorizer for interleaved stores.
+
+define void @store_i64_stride6_vf2(<2 x i64>* %in.vecptr0, <2 x i64>* %in.vecptr1, <2 x i64>* %in.vecptr2, <2 x i64>* %in.vecptr3, <2 x i64>* %in.vecptr4, <2 x i64>* %in.vecptr5, <12 x i64>* %out.vec) nounwind {
+; SSE-LABEL: store_i64_stride6_vf2:
+; SSE:       # %bb.0:
+; SSE-NEXT:    movq {{[0-9]+}}(%rsp), %rax
+; SSE-NEXT:    movaps (%rdi), %xmm0
+; SSE-NEXT:    movaps (%rsi), %xmm8
+; SSE-NEXT:    movaps (%rdx), %xmm2
+; SSE-NEXT:    movaps (%rcx), %xmm3
+; SSE-NEXT:    movaps (%r8), %xmm4
+; SSE-NEXT:    movaps (%r9), %xmm5
+; SSE-NEXT:    movaps %xmm0, %xmm6
+; SSE-NEXT:    movlhps {{.*#+}} xmm6 = xmm6[0],xmm8[0]
+; SSE-NEXT:    movaps %xmm4, %xmm7
+; SSE-NEXT:    movlhps {{.*#+}} xmm7 = xmm7[0],xmm5[0]
+; SSE-NEXT:    movaps %xmm2, %xmm1
+; SSE-NEXT:    unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm3[1]
+; SSE-NEXT:    unpckhpd {{.*#+}} xmm4 = xmm4[1],xmm5[1]
+; SSE-NEXT:    unpckhpd {{.*#+}} xmm0 = xmm0[1],xmm8[1]
+; SSE-NEXT:    movlhps {{.*#+}} xmm2 = xmm2[0],xmm3[0]
+; SSE-NEXT:    movaps %xmm2, 16(%rax)
+; SSE-NEXT:    movaps %xmm0, 48(%rax)
+; SSE-NEXT:    movaps %xmm4, 80(%rax)
+; SSE-NEXT:    movaps %xmm1, 64(%rax)
+; SSE-NEXT:    movaps %xmm7, 32(%rax)
+; SSE-NEXT:    movaps %xmm6, (%rax)
+; SSE-NEXT:    retq
+;
+; AVX1-LABEL: store_i64_stride6_vf2:
+; AVX1:       # %bb.0:
+; AVX1-NEXT:    movq {{[0-9]+}}(%rsp), %rax
+; AVX1-NEXT:    vmovaps (%rdi), %xmm0
+; AVX1-NEXT:    vmovaps (%rsi), %xmm1
+; AVX1-NEXT:    vmovaps (%rdx), %xmm2
+; AVX1-NEXT:    vmovaps (%rcx), %xmm3
+; AVX1-NEXT:    vmovaps (%r8), %xmm4
+; AVX1-NEXT:    vmovaps (%r9), %xmm5
+; AVX1-NEXT:    vinsertf128 $1, %xmm3, %ymm1, %ymm6
+; AVX1-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm7
+; AVX1-NEXT:    vunpcklpd {{.*#+}} ymm6 = ymm7[0],ymm6[0],ymm7[2],ymm6[2]
+; AVX1-NEXT:    vinsertf128 $1, %xmm1, %ymm5, %ymm1
+; AVX1-NEXT:    vinsertf128 $1, %xmm0, %ymm4, %ymm0
+; AVX1-NEXT:    vshufpd {{.*#+}} ymm0 = ymm0[0],ymm1[0],ymm0[3],ymm1[3]
+; AVX1-NEXT:    vinsertf128 $1, %xmm5, %ymm3, %ymm1
+; AVX1-NEXT:    vinsertf128 $1, %xmm4, %ymm2, %ymm2
+; AVX1-NEXT:    vunpckhpd {{.*#+}} ymm1 = ymm2[1],ymm1[1],ymm2[3],ymm1[3]
+; AVX1-NEXT:    vmovaps %ymm1, 64(%rax)
+; AVX1-NEXT:    vmovapd %ymm0, 32(%rax)
+; AVX1-NEXT:    vmovaps %ymm6, (%rax)
+; AVX1-NEXT:    vzeroupper
+; AVX1-NEXT:    retq
+;
+; AVX2-LABEL: store_i64_stride6_vf2:
+; AVX2:       # %bb.0:
+; AVX2-NEXT:    movq {{[0-9]+}}(%rsp), %rax
+; AVX2-NEXT:    vmovaps (%rdi), %xmm0
+; AVX2-NEXT:    vmovaps (%rdx), %xmm1
+; AVX2-NEXT:    vmovaps (%r8), %xmm2
+; AVX2-NEXT:    vinsertf128 $1, (%rsi), %ymm0, %ymm0
+; AVX2-NEXT:    vinsertf128 $1, (%rcx), %ymm1, %ymm1
+; AVX2-NEXT:    vinsertf128 $1, (%r9), %ymm2, %ymm2
+; AVX2-NEXT:    vunpcklpd {{.*#+}} ymm3 = ymm0[0],ymm1[0],ymm0[2],ymm1[2]
+; AVX2-NEXT:    vpermpd {{.*#+}} ymm3 = ymm3[0,2,1,3]
+; AVX2-NEXT:    vblendps {{.*#+}} ymm0 = ymm2[0,1],ymm0[2,3],ymm2[4,5],ymm0[6,7]
+; AVX2-NEXT:    vpermpd {{.*#+}} ymm0 = ymm0[0,2,1,3]
+; AVX2-NEXT:    vunpckhpd {{.*#+}} ymm1 = ymm1[1],ymm2[1],ymm1[3],ymm2[3]
+; AVX2-NEXT:    vpermpd {{.*#+}} ymm1 = ymm1[0,2,1,3]
+; AVX2-NEXT:    vmovaps %ymm1, 64(%rax)
+; AVX2-NEXT:    vmovaps %ymm0, 32(%rax)
+; AVX2-NEXT:    vmovaps %ymm3, (%rax)
+; AVX2-NEXT:    vzeroupper
+; AVX2-NEXT:    retq
+;
+; AVX512-LABEL: store_i64_stride6_vf2:
+; AVX512:       # %bb.0:
+; AVX512-NEXT:    movq {{[0-9]+}}(%rsp), %rax
+; AVX512-NEXT:    vmovdqa (%rdi), %xmm0
+; AVX512-NEXT:    vmovdqa (%rdx), %xmm1
+; AVX512-NEXT:    vmovdqa (%r8), %xmm2
+; AVX512-NEXT:    vinserti128 $1, (%rcx), %ymm1, %ymm1
+; AVX512-NEXT:    vinserti128 $1, (%rsi), %ymm0, %ymm0
+; AVX512-NEXT:    vinserti64x4 $1, %ymm1, %zmm0, %zmm0
+; AVX512-NEXT:    vinserti32x4 $1, (%r9), %zmm2, %zmm1
+; AVX512-NEXT:    vmovdqa {{.*#+}} ymm2 = [5,7,9,11]
+; AVX512-NEXT:    vpermi2q %zmm1, %zmm0, %zmm2
+; AVX512-NEXT:    vmovdqa64 {{.*#+}} zmm3 = [0,2,4,6,8,10,1,3]
+; AVX512-NEXT:    vpermi2q %zmm1, %zmm0, %zmm3
+; AVX512-NEXT:    vmovdqu64 %zmm3, (%rax)
+; AVX512-NEXT:    vmovdqa %ymm2, 64(%rax)
+; AVX512-NEXT:    vzeroupper
+; AVX512-NEXT:    retq
+  %in.vec0 = load <2 x i64>, <2 x i64>* %in.vecptr0, align 32
+  %in.vec1 = load <2 x i64>, <2 x i64>* %in.vecptr1, align 32
+  %in.vec2 = load <2 x i64>, <2 x i64>* %in.vecptr2, align 32
+  %in.vec3 = load <2 x i64>, <2 x i64>* %in.vecptr3, align 32
+  %in.vec4 = load <2 x i64>, <2 x i64>* %in.vecptr4, align 32
+  %in.vec5 = load <2 x i64>, <2 x i64>* %in.vecptr5, align 32
+
+  %concat01 = shufflevector <2 x i64> %in.vec0, <2 x i64> %in.vec1, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+  %concat23 = shufflevector <2 x i64> %in.vec2, <2 x i64> %in.vec3, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+  %concat45 = shufflevector <2 x i64> %in.vec4, <2 x i64> %in.vec5, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+  %concat0123 = shufflevector <4 x i64> %concat01, <4 x i64> %concat23, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+  %concat45uu = shufflevector <4 x i64> %concat45, <4 x i64> poison, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 undef, i32 undef, i32 undef, i32 undef>
+  %concat012345 = shufflevector <8 x i64> %concat0123, <8 x i64> %concat45uu, <12 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11>
+  %interleaved.vec = shufflevector <12 x i64> %concat012345, <12 x i64> poison, <12 x i32> <i32 0, i32 2, i32 4, i32 6, i32 8, i32 10, i32 1, i32 3, i32 5, i32 7, i32 9, i32 11>
+
+  store <12 x i64> %interleaved.vec, <12 x i64>* %out.vec, align 32
+
+  ret void
+}
+
+define void @store_i64_stride6_vf4(<4 x i64>* %in.vecptr0, <4 x i64>* %in.vecptr1, <4 x i64>* %in.vecptr2, <4 x i64>* %in.vecptr3, <4 x i64>* %in.vecptr4, <4 x i64>* %in.vecptr5, <24 x i64>* %out.vec) nounwind {
+; SSE-LABEL: store_i64_stride6_vf4:
+; SSE:       # %bb.0:
+; SSE-NEXT:    movq {{[0-9]+}}(%rsp), %rax
+; SSE-NEXT:    movaps (%rdi), %xmm0
+; SSE-NEXT:    movaps 16(%rdi), %xmm15
+; SSE-NEXT:    movaps (%rsi), %xmm8
+; SSE-NEXT:    movaps 16(%rsi), %xmm10
+; SSE-NEXT:    movaps (%rdx), %xmm6
+; SSE-NEXT:    movaps 16(%rdx), %xmm1
+; SSE-NEXT:    movaps (%rcx), %xmm9
+; SSE-NEXT:    movaps 16(%rcx), %xmm5
+; SSE-NEXT:    movaps (%r8), %xmm7
+; SSE-NEXT:    movaps 16(%r8), %xmm4
+; SSE-NEXT:    movaps (%r9), %xmm11
+; SSE-NEXT:    movaps 16(%r9), %xmm3
+; SSE-NEXT:    movaps %xmm4, %xmm12
+; SSE-NEXT:    unpckhpd {{.*#+}} xmm12 = xmm12[1],xmm3[1]
+; SSE-NEXT:    movaps %xmm1, %xmm13
+; SSE-NEXT:    unpckhpd {{.*#+}} xmm13 = xmm13[1],xmm5[1]
+; SSE-NEXT:    movaps %xmm15, %xmm14
+; SSE-NEXT:    unpckhpd {{.*#+}} xmm14 = xmm14[1],xmm10[1]
+; SSE-NEXT:    movlhps {{.*#+}} xmm4 = xmm4[0],xmm3[0]
+; SSE-NEXT:    movlhps {{.*#+}} xmm1 = xmm1[0],xmm5[0]
+; SSE-NEXT:    movlhps {{.*#+}} xmm15 = xmm15[0],xmm10[0]
+; SSE-NEXT:    movaps %xmm7, %xmm3
+; SSE-NEXT:    unpckhpd {{.*#+}} xmm3 = xmm3[1],xmm11[1]
+; SSE-NEXT:    movaps %xmm6, %xmm5
+; SSE-NEXT:    unpckhpd {{.*#+}} xmm5 = xmm5[1],xmm9[1]
+; SSE-NEXT:    movaps %xmm0, %xmm2
+; SSE-NEXT:    unpckhpd {{.*#+}} xmm2 = xmm2[1],xmm8[1]
+; SSE-NEXT:    movlhps {{.*#+}} xmm7 = xmm7[0],xmm11[0]
+; SSE-NEXT:    movlhps {{.*#+}} xmm6 = xmm6[0],xmm9[0]
+; SSE-NEXT:    movlhps {{.*#+}} xmm0 = xmm0[0],xmm8[0]
+; SSE-NEXT:    movaps %xmm0, (%rax)
+; SSE-NEXT:    movaps %xmm6, 16(%rax)
+; SSE-NEXT:    movaps %xmm7, 32(%rax)
+; SSE-NEXT:    movaps %xmm2, 48(%rax)
+; SSE-NEXT:    movaps %xmm5, 64(%rax)
+; SSE-NEXT:    movaps %xmm3, 80(%rax)
+; SSE-NEXT:    movaps %xmm15, 96(%rax)
+; SSE-NEXT:    movaps %xmm1, 112(%rax)
+; SSE-NEXT:    movaps %xmm4, 128(%rax)
+; SSE-NEXT:    movaps %xmm14, 144(%rax)
+; SSE-NEXT:    movaps %xmm13, 160(%rax)
+; SSE-NEXT:    movaps %xmm12, 176(%rax)
+; SSE-NEXT:    retq
+;
+; AVX1-LABEL: store_i64_stride6_vf4:
+; AVX1:       # %bb.0:
+; AVX1-NEXT:    movq {{[0-9]+}}(%rsp), %rax
+; AVX1-NEXT:    vmovaps (%rdi), %ymm0
+; AVX1-NEXT:    vmovaps (%rdx), %ymm1
+; AVX1-NEXT:    vmovaps (%r8), %ymm2
+; AVX1-NEXT:    vunpcklpd {{.*#+}} ymm1 = ymm1[0],mem[0],ymm1[2],mem[2]
+; AVX1-NEXT:    vmovaps 16(%rdi), %xmm3
+; AVX1-NEXT:    vunpcklpd {{.*#+}} xmm3 = xmm3[0],mem[0]
+; AVX1-NEXT:    vblendps {{.*#+}} ymm8 = ymm3[0,1,2,3],ymm1[4,5,6,7]
+; AVX1-NEXT:    vunpckhpd {{.*#+}} ymm2 = ymm2[1],mem[1],ymm2[3],mem[3]
+; AVX1-NEXT:    vmovaps 16(%rdx), %xmm3
+; AVX1-NEXT:    vunpckhpd {{.*#+}} xmm3 = xmm3[1],mem[1]
+; AVX1-NEXT:    vblendps {{.*#+}} ymm9 = ymm3[0,1,2,3],ymm2[4,5,6,7]
+; AVX1-NEXT:    vunpckhpd {{.*#+}} ymm0 = ymm0[1],mem[1],ymm0[3],mem[3]
+; AVX1-NEXT:    vmovaps 16(%r8), %xmm3
+; AVX1-NEXT:    vunpcklpd {{.*#+}} xmm3 = xmm3[0],mem[0]
+; AVX1-NEXT:    vblendps {{.*#+}} ymm10 = ymm3[0,1,2,3],ymm0[4,5,6,7]
+; AVX1-NEXT:    vmovaps (%rcx), %xmm3
+; AVX1-NEXT:    vmovaps (%rdx), %xmm4
+; AVX1-NEXT:    vunpckhpd {{.*#+}} xmm5 = xmm4[1],xmm3[1]
+; AVX1-NEXT:    vmovaps (%r9), %xmm6
+; AVX1-NEXT:    vmovaps (%r8), %xmm7
+; AVX1-NEXT:    vunpckhpd {{.*#+}} xmm1 = xmm7[1],xmm6[1]
+; AVX1-NEXT:    vmovlhps {{.*#+}} xmm6 = xmm7[0],xmm6[0]
+; AVX1-NEXT:    vmovaps (%rsi), %xmm7
+; AVX1-NEXT:    vmovaps (%rdi), %xmm2
+; AVX1-NEXT:    vunpckhpd {{.*#+}} xmm0 = xmm2[1],xmm7[1]
+; AVX1-NEXT:    vmovlhps {{.*#+}} xmm2 = xmm2[0],xmm7[0]
+; AVX1-NEXT:    vmovlhps {{.*#+}} xmm3 = xmm4[0],xmm3[0]
+; AVX1-NEXT:    vmovaps %xmm3, 16(%rax)
+; AVX1-NEXT:    vmovaps %xmm2, (%rax)
+; AVX1-NEXT:    vmovaps %xmm0, 48(%rax)
+; AVX1-NEXT:    vmovaps %xmm6, 32(%rax)
+; AVX1-NEXT:    vmovaps %xmm1, 80(%rax)
+; AVX1-NEXT:    vmovaps %xmm5, 64(%rax)
+; AVX1-NEXT:    vmovaps %ymm10, 128(%rax)
+; AVX1-NEXT:    vmovaps %ymm9, 160(%rax)
+; AVX1-NEXT:    vmovaps %ymm8, 96(%rax)
+; AVX1-NEXT:    vzeroupper
+; AVX1-NEXT:    retq
+;
+; AVX2-LABEL: store_i64_stride6_vf4:
+; AVX2:       # %bb.0:
+; AVX2-NEXT:    movq {{[0-9]+}}(%rsp), %rax
+; AVX2-NEXT:    vmovaps (%rdi), %ymm0
+; AVX2-NEXT:    vmovaps (%rsi), %ymm1
+; AVX2-NEXT:    vmovaps (%rdx), %ymm2
+; AVX2-NEXT:    vmovaps (%rcx), %ymm3
+; AVX2-NEXT:    vmovaps (%r8), %ymm4
+; AVX2-NEXT:    vmovaps (%r9), %ymm5
+; AVX2-NEXT:    vunpcklpd {{.*#+}} ymm6 = ymm2[0],ymm3[0],ymm2[2],ymm3[2]
+; AVX2-NEXT:    vunpcklpd {{.*#+}} ymm7 = ymm0[0],ymm1[0],ymm0[2],ymm1[2]
+; AVX2-NEXT:    vperm2f128 {{.*#+}} ymm8 = ymm7[2,3],ymm6[2,3]
+; AVX2-NEXT:    vunpckhpd {{.*#+}} ymm7 = ymm4[1],ymm5[1],ymm4[3],ymm5[3]
+; AVX2-NEXT:    vunpckhpd {{.*#+}} ymm2 = ymm2[1],ymm3[1],ymm2[3],ymm3[3]
+; AVX2-NEXT:    vperm2f128 {{.*#+}} ymm9 = ymm2[2,3],ymm7[2,3]
+; AVX2-NEXT:    vunpckhpd {{.*#+}} ymm0 = ymm0[1],ymm1[1],ymm0[3],ymm1[3]
+; AVX2-NEXT:    vunpcklpd {{.*#+}} ymm1 = ymm4[0],ymm5[0],ymm4[2],ymm5[2]
+; AVX2-NEXT:    vperm2f128 {{.*#+}} ymm10 = ymm1[2,3],ymm0[2,3]
+; AVX2-NEXT:    vmovaps (%rcx), %xmm1
+; AVX2-NEXT:    vmovaps (%rdx), %xmm3
+; AVX2-NEXT:    vunpckhpd {{.*#+}} xmm4 = xmm3[1],xmm1[1]
+; AVX2-NEXT:    vmovaps (%r9), %xmm5
+; AVX2-NEXT:    vmovaps (%r8), %xmm7
+; AVX2-NEXT:    vunpckhpd {{.*#+}} xmm6 = xmm7[1],xmm5[1]
+; AVX2-NEXT:    vmovlhps {{.*#+}} xmm5 = xmm7[0],xmm5[0]
+; AVX2-NEXT:    vmovaps (%rsi), %xmm7
+; AVX2-NEXT:    vmovaps (%rdi), %xmm2
+; AVX2-NEXT:    vunpckhpd {{.*#+}} xmm0 = xmm2[1],xmm7[1]
+; AVX2-NEXT:    vmovlhps {{.*#+}} xmm2 = xmm2[0],xmm7[0]
+; AVX2-NEXT:    vmovlhps {{.*#+}} xmm1 = xmm3[0],xmm1[0]
+; AVX2-NEXT:    vmovaps %xmm1, 16(%rax)
+; AVX2-NEXT:    vmovaps %xmm2, (%rax)
+; AVX2-NEXT:    vmovaps %xmm0, 48(%rax)
+; AVX2-NEXT:    vmovaps %xmm5, 32(%rax)
+; AVX2-NEXT:    vmovaps %xmm6, 80(%rax)
+; AVX2-NEXT:    vmovaps %xmm4, 64(%rax)
+; AVX2-NEXT:    vmovaps %ymm10, 128(%rax)
+; AVX2-NEXT:    vmovaps %ymm9, 160(%rax)
+; AVX2-NEXT:    vmovaps %ymm8, 96(%rax)
+; AVX2-NEXT:    vzeroupper
+; AVX2-NEXT:    retq
+;
+; AVX512-LABEL: store_i64_stride6_vf4:
+; AVX512:       # %bb.0:
+; AVX512-NEXT:    movq {{[0-9]+}}(%rsp), %rax
+; AVX512-NEXT:    vmovdqa (%rdi), %ymm0
+; AVX512-NEXT:    vmovdqa (%rdx), %ymm1
+; AVX512-NEXT:    vmovdqa (%r8), %ymm2
+; AVX512-NEXT:    vinserti64x4 $1, (%rcx), %zmm1, %zmm1
+; AVX512-NEXT:    vinserti64x4 $1, (%rsi), %zmm0, %zmm0
+; AVX512-NEXT:    vmovdqa64 {{.*#+}} zmm3 = <0,4,8,12,u,u,1,5>
+; AVX512-NEXT:    vpermi2q %zmm1, %zmm0, %zmm3
+; AVX512-NEXT:    vinserti64x4 $1, (%r9), %zmm2, %zmm2
+; AVX512-NEXT:    vmovdqa64 {{.*#+}} zmm4 = [0,1,2,3,8,12,6,7]
+; AVX512-NEXT:    vpermi2q %zmm2, %zmm3, %zmm4
+; AVX512-NEXT:    vmovdqa64 {{.*#+}} zmm3 = <1,5,9,13,u,u,2,6>
+; AVX512-NEXT:    vpermi2q %zmm2, %zmm1, %zmm3
+; AVX512-NEXT:    vmovdqa64 {{.*#+}} zmm5 = [0,1,2,3,10,14,6,7]
+; AVX512-NEXT:    vpermi2q %zmm0, %zmm3, %zmm5
+; AVX512-NEXT:    vmovdqa64 {{.*#+}} zmm3 = <2,6,11,15,u,u,3,7>
+; AVX512-NEXT:    vpermi2q %zmm0, %zmm2, %zmm3
+; AVX512-NEXT:    vmovdqa64 {{.*#+}} zmm0 = [0,1,2,3,11,15,6,7]
+; AVX512-NEXT:    vpermi2q %zmm1, %zmm3, %zmm0
+; AVX512-NEXT:    vmovdqu64 %zmm0, 128(%rax)
+; AVX512-NEXT:    vmovdqu64 %zmm5, 64(%rax)
+; AVX512-NEXT:    vmovdqu64 %zmm4, (%rax)
+; AVX512-NEXT:    vzeroupper
+; AVX512-NEXT:    retq
+  %in.vec0 = load <4 x i64>, <4 x i64>* %in.vecptr0, align 32
+  %in.vec1 = load <4 x i64>, <4 x i64>* %in.vecptr1, align 32
+  %in.vec2 = load <4 x i64>, <4 x i64>* %in.vecptr2, align 32
+  %in.vec3 = load <4 x i64>, <4 x i64>* %in.vecptr3, align 32
+  %in.vec4 = load <4 x i64>, <4 x i64>* %in.vecptr4, align 32
+  %in.vec5 = load <4 x i64>, <4 x i64>* %in.vecptr5, align 32
+
+  %concat01 = shufflevector <4 x i64> %in.vec0, <4 x i64> %in.vec1, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+  %concat23 = shufflevector <4 x i64> %in.vec2, <4 x i64> %in.vec3, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+  %concat45 = shufflevector <4 x i64> %in.vec4, <4 x i64> %in.vec5, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+  %concat0123 = shufflevector <8 x i64> %concat01, <8 x i64> %concat23, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+  %concat45uu = shufflevector <8 x i64> %concat45, <8 x i64> poison, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+  %concat012345 = shufflevector <16 x i64> %concat0123, <16 x i64> %concat45uu, <24 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23>
+  %interleaved.vec = shufflevector <24 x i64> %concat012345, <24 x i64> poison, <24 x i32> <i32 0, i32 4, i32 8, i32 12, i32 16, i32 20, i32 1, i32 5, i32 9, i32 13, i32 17, i32 21, i32 2, i32 6, i32 10, i32 14, i32 18, i32 22, i32 3, i32 7, i32 11, i32 15, i32 19, i32 23>
+
+  store <24 x i64> %interleaved.vec, <24 x i64>* %out.vec, align 32
+
+  ret void
+}
+
+define void @store_i64_stride6_vf8(<8 x i64>* %in.vecptr0, <8 x i64>* %in.vecptr1, <8 x i64>* %in.vecptr2, <8 x i64>* %in.vecptr3, <8 x i64>* %in.vecptr4, <8 x i64>* %in.vecptr5, <48 x i64>* %out.vec) nounwind {
+; SSE-LABEL: store_i64_stride6_vf8:
+; SSE:       # %bb.0:
+; SSE-NEXT:    subq $24, %rsp
+; SSE-NEXT:    movaps (%rdi), %xmm7
+; SSE-NEXT:    movaps 16(%rdi), %xmm10
+; SSE-NEXT:    movaps 32(%rdi), %xmm15
+; SSE-NEXT:    movaps (%rsi), %xmm11
+; SSE-NEXT:    movaps 16(%rsi), %xmm4
+; SSE-NEXT:    movaps 32(%rsi), %xmm8
+; SSE-NEXT:    movaps (%rdx), %xmm12
+; SSE-NEXT:    movaps 16(%rdx), %xmm9
+; SSE-NEXT:    movaps 32(%rdx), %xmm14
+; SSE-NEXT:    movaps (%rcx), %xmm3
+; SSE-NEXT:    movaps 16(%rcx), %xmm6
+; SSE-NEXT:    movaps (%r8), %xmm5
+; SSE-NEXT:    movaps 16(%r8), %xmm13
+; SSE-NEXT:    movaps (%r9), %xmm0
+; SSE-NEXT:    movaps 16(%r9), %xmm1
+; SSE-NEXT:    movaps %xmm7, %xmm2
+; SSE-NEXT:    movlhps {{.*#+}} xmm2 = xmm2[0],xmm11[0]
+; SSE-NEXT:    movaps %xmm2, (%rsp) # 16-byte Spill
+; SSE-NEXT:    unpckhpd {{.*#+}} xmm7 = xmm7[1],xmm11[1]
+; SSE-NEXT:    movaps %xmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT:    movaps %xmm12, %xmm7
+; SSE-NEXT:    movlhps {{.*#+}} xmm7 = xmm7[0],xmm3[0]
+; SSE-NEXT:    movaps %xmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT:    unpckhpd {{.*#+}} xmm12 = xmm12[1],xmm3[1]
+; SSE-NEXT:    movaps %xmm12, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT:    movaps %xmm5, %xmm3
+; SSE-NEXT:    movlhps {{.*#+}} xmm3 = xmm3[0],xmm0[0]
+; SSE-NEXT:    movaps %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT:    unpckhpd {{.*#+}} xmm5 = xmm5[1],xmm0[1]
+; SSE-NEXT:    movaps %xmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT:    movaps %xmm10, %xmm11
+; SSE-NEXT:    movlhps {{.*#+}} xmm11 = xmm11[0],xmm4[0]
+; SSE-NEXT:    unpckhpd {{.*#+}} xmm10 = xmm10[1],xmm4[1]
+; SSE-NEXT:    movaps %xmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT:    movaps %xmm9, %xmm0
+; SSE-NEXT:    movlhps {{.*#+}} xmm9 = xmm9[0],xmm6[0]
+; SSE-NEXT:    unpckhpd {{.*#+}} xmm0 = xmm0[1],xmm6[1]
+; SSE-NEXT:    movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT:    movaps %xmm13, %xmm10
+; SSE-NEXT:    movlhps {{.*#+}} xmm10 = xmm10[0],xmm1[0]
+; SSE-NEXT:    unpckhpd {{.*#+}} xmm13 = xmm13[1],xmm1[1]
+; SSE-NEXT:    movaps %xmm15, %xmm0
+; SSE-NEXT:    movlhps {{.*#+}} xmm15 = xmm15[0],xmm8[0]
+; SSE-NEXT:    unpckhpd {{.*#+}} xmm0 = xmm0[1],xmm8[1]
+; SSE-NEXT:    movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT:    movaps 32(%rcx), %xmm1
+; SSE-NEXT:    movaps %xmm14, %xmm12
+; SSE-NEXT:    movlhps {{.*#+}} xmm12 = xmm12[0],xmm1[0]
+; SSE-NEXT:    unpckhpd {{.*#+}} xmm14 = xmm14[1],xmm1[1]
+; SSE-NEXT:    movaps 32(%r8), %xmm5
+; SSE-NEXT:    movaps 32(%r9), %xmm0
+; SSE-NEXT:    movaps %xmm5, %xmm8
+; SSE-NEXT:    movlhps {{.*#+}} xmm8 = xmm8[0],xmm0[0]
+; SSE-NEXT:    unpckhpd {{.*#+}} xmm5 = xmm5[1],xmm0[1]
+; SSE-NEXT:    movaps 48(%rdi), %xmm6
+; SSE-NEXT:    movaps 48(%rsi), %xmm3
+; SSE-NEXT:    movaps %xmm6, %xmm7
+; SSE-NEXT:    movlhps {{.*#+}} xmm7 = xmm7[0],xmm3[0]
+; SSE-NEXT:    unpckhpd {{.*#+}} xmm6 = xmm6[1],xmm3[1]
+; SSE-NEXT:    movaps 48(%rdx), %xmm3
+; SSE-NEXT:    movaps 48(%rcx), %xmm2
+; SSE-NEXT:    movaps %xmm3, %xmm1
+; SSE-NEXT:    movlhps {{.*#+}} xmm1 = xmm1[0],xmm2[0]
+; SSE-NEXT:    unpckhpd {{.*#+}} xmm3 = xmm3[1],xmm2[1]
+; SSE-NEXT:    movaps 48(%r8), %xmm2
+; SSE-NEXT:    movaps 48(%r9), %xmm4
+; SSE-NEXT:    movaps %xmm2, %xmm0
+; SSE-NEXT:    movlhps {{.*#+}} xmm0 = xmm0[0],xmm4[0]
+; SSE-NEXT:    unpckhpd {{.*#+}} xmm2 = xmm2[1],xmm4[1]
+; SSE-NEXT:    movq {{[0-9]+}}(%rsp), %rax
+; SSE-NEXT:    movaps %xmm2, 368(%rax)
+; SSE-NEXT:    movaps %xmm3, 352(%rax)
+; SSE-NEXT:    movaps %xmm6, 336(%rax)
+; SSE-NEXT:    movaps %xmm0, 320(%rax)
+; SSE-NEXT:    movaps %xmm1, 304(%rax)
+; SSE-NEXT:    movaps %xmm7, 288(%rax)
+; SSE-NEXT:    movaps %xmm5, 272(%rax)
+; SSE-NEXT:    movaps %xmm14, 256(%rax)
+; SSE-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
+; SSE-NEXT:    movaps %xmm0, 240(%rax)
+; SSE-NEXT:    movaps %xmm8, 224(%rax)
+; SSE-NEXT:    movaps %xmm12, 208(%rax)
+; SSE-NEXT:    movaps %xmm15, 192(%rax)
+; SSE-NEXT:    movaps %xmm13, 176(%rax)
+; SSE-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
+; SSE-NEXT:    movaps %xmm0, 160(%rax)
+; SSE-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
+; SSE-NEXT:    movaps %xmm0, 144(%rax)
+; SSE-NEXT:    movaps %xmm10, 128(%rax)
+; SSE-NEXT:    movaps %xmm9, 112(%rax)
+; SSE-NEXT:    movaps %xmm11, 96(%rax)
+; SSE-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
+; SSE-NEXT:    movaps %xmm0, 80(%rax)
+; SSE-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
+; SSE-NEXT:    movaps %xmm0, 64(%rax)
+; SSE-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
+; SSE-NEXT:    movaps %xmm0, 48(%rax)
+; SSE-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
+; SSE-NEXT:    movaps %xmm0, 32(%rax)
+; SSE-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
+; SSE-NEXT:    movaps %xmm0, 16(%rax)
+; SSE-NEXT:    movaps (%rsp), %xmm0 # 16-byte Reload
+; SSE-NEXT:    movaps %xmm0, (%rax)
+; SSE-NEXT:    addq $24, %rsp
+; SSE-NEXT:    retq
+;
+; AVX1-LABEL: store_i64_stride6_vf8:
+; AVX1:       # %bb.0:
+; AVX1-NEXT:    vmovaps (%rdi), %ymm4
+; AVX1-NEXT:    vmovaps 32(%rdi), %ymm1
+; AVX1-NEXT:    vmovaps (%rdx), %ymm5
+; AVX1-NEXT:    vmovaps 32(%rdx), %ymm2
+; AVX1-NEXT:    vmovaps (%r8), %ymm3
+; AVX1-NEXT:    vmovaps 32(%r8), %ymm0
+; AVX1-NEXT:    vunpckhpd {{.*#+}} ymm0 = ymm0[1],mem[1],ymm0[3],mem[3]
+; AVX1-NEXT:    vmovaps 48(%rdx), %xmm6
+; AVX1-NEXT:    vunpckhpd {{.*#+}} xmm6 = xmm6[1],mem[1]
+; AVX1-NEXT:    vblendps {{.*#+}} ymm0 = ymm6[0,1,2,3],ymm0[4,5,6,7]
+; AVX1-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX1-NEXT:    vunpckhpd {{.*#+}} ymm1 = ymm1[1],mem[1],ymm1[3],mem[3]
+; AVX1-NEXT:    vmovaps 48(%r8), %xmm6
+; AVX1-NEXT:    vunpcklpd {{.*#+}} xmm6 = xmm6[0],mem[0]
+; AVX1-NEXT:    vblendps {{.*#+}} ymm0 = ymm6[0,1,2,3],ymm1[4,5,6,7]
+; AVX1-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX1-NEXT:    vunpcklpd {{.*#+}} ymm2 = ymm2[0],mem[0],ymm2[2],mem[2]
+; AVX1-NEXT:    vmovaps 48(%rdi), %xmm6
+; AVX1-NEXT:    vunpcklpd {{.*#+}} xmm6 = xmm6[0],mem[0]
+; AVX1-NEXT:    vblendps {{.*#+}} ymm0 = ymm6[0,1,2,3],ymm2[4,5,6,7]
+; AVX1-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX1-NEXT:    vunpckhpd {{.*#+}} ymm3 = ymm3[1],mem[1],ymm3[3],mem[3]
+; AVX1-NEXT:    vmovaps 16(%rdx), %xmm6
+; AVX1-NEXT:    vunpckhpd {{.*#+}} xmm6 = xmm6[1],mem[1]
+; AVX1-NEXT:    vblendps {{.*#+}} ymm11 = ymm6[0,1,2,3],ymm3[4,5,6,7]
+; AVX1-NEXT:    vunpckhpd {{.*#+}} ymm4 = ymm4[1],mem[1],ymm4[3],mem[3]
+; AVX1-NEXT:    vmovaps 16(%r8), %xmm6
+; AVX1-NEXT:    vunpcklpd {{.*#+}} xmm6 = xmm6[0],mem[0]
+; AVX1-NEXT:    vblendps {{.*#+}} ymm12 = ymm6[0,1,2,3],ymm4[4,5,6,7]
+; AVX1-NEXT:    vunpcklpd {{.*#+}} ymm5 = ymm5[0],mem[0],ymm5[2],mem[2]
+; AVX1-NEXT:    vmovaps 16(%rdi), %xmm6
+; AVX1-NEXT:    vunpcklpd {{.*#+}} xmm6 = xmm6[0],mem[0]
+; AVX1-NEXT:    vblendps {{.*#+}} ymm13 = ymm6[0,1,2,3],ymm5[4,5,6,7]
+; AVX1-NEXT:    vmovaps 32(%rcx), %xmm6
+; AVX1-NEXT:    vmovaps 32(%rdx), %xmm7
+; AVX1-NEXT:    vunpckhpd {{.*#+}} xmm14 = xmm7[1],xmm6[1]
+; AVX1-NEXT:    vmovaps (%r9), %xmm1
+; AVX1-NEXT:    vmovaps 32(%r9), %xmm2
+; AVX1-NEXT:    vmovaps 32(%r8), %xmm3
+; AVX1-NEXT:    vunpckhpd {{.*#+}} xmm15 = xmm3[1],xmm2[1]
+; AVX1-NEXT:    vmovlhps {{.*#+}} xmm10 = xmm3[0],xmm2[0]
+; AVX1-NEXT:    vmovaps (%rsi), %xmm3
+; AVX1-NEXT:    vmovaps 32(%rsi), %xmm5
+; AVX1-NEXT:    vmovaps 32(%rdi), %xmm0
+; AVX1-NEXT:    vunpckhpd {{.*#+}} xmm9 = xmm0[1],xmm5[1]
+; AVX1-NEXT:    vmovlhps {{.*#+}} xmm8 = xmm0[0],xmm5[0]
+; AVX1-NEXT:    vmovlhps {{.*#+}} xmm5 = xmm7[0],xmm6[0]
+; AVX1-NEXT:    vmovaps (%r8), %xmm6
+; AVX1-NEXT:    vunpckhpd {{.*#+}} xmm7 = xmm6[1],xmm1[1]
+; AVX1-NEXT:    vmovlhps {{.*#+}} xmm4 = xmm6[0],xmm1[0]
+; AVX1-NEXT:    vmovaps (%rdi), %xmm6
+; AVX1-NEXT:    vunpckhpd {{.*#+}} xmm1 = xmm6[1],xmm3[1]
+; AVX1-NEXT:    vmovlhps {{.*#+}} xmm3 = xmm6[0],xmm3[0]
+; AVX1-NEXT:    vmovaps (%rcx), %xmm6
+; AVX1-NEXT:    vmovaps (%rdx), %xmm0
+; AVX1-NEXT:    vunpckhpd {{.*#+}} xmm2 = xmm0[1],xmm6[1]
+; AVX1-NEXT:    vmovlhps {{.*#+}} xmm0 = xmm0[0],xmm6[0]
+; AVX1-NEXT:    movq {{[0-9]+}}(%rsp), %rax
+; AVX1-NEXT:    vmovaps %xmm0, 16(%rax)
+; AVX1-NEXT:    vmovaps %xmm3, (%rax)
+; AVX1-NEXT:    vmovaps %xmm1, 48(%rax)
+; AVX1-NEXT:    vmovaps %xmm4, 32(%rax)
+; AVX1-NEXT:    vmovaps %xmm7, 80(%rax)
+; AVX1-NEXT:    vmovaps %xmm2, 64(%rax)
+; AVX1-NEXT:    vmovaps %xmm5, 208(%rax)
+; AVX1-NEXT:    vmovaps %xmm8, 192(%rax)
+; AVX1-NEXT:    vmovaps %xmm9, 240(%rax)
+; AVX1-NEXT:    vmovaps %xmm10, 224(%rax)
+; AVX1-NEXT:    vmovaps %xmm15, 272(%rax)
+; AVX1-NEXT:    vmovaps %xmm14, 256(%rax)
+; AVX1-NEXT:    vmovaps %ymm13, 96(%rax)
+; AVX1-NEXT:    vmovaps %ymm12, 128(%rax)
+; AVX1-NEXT:    vmovaps %ymm11, 160(%rax)
+; AVX1-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
+; AVX1-NEXT:    vmovaps %ymm0, 288(%rax)
+; AVX1-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
+; AVX1-NEXT:    vmovaps %ymm0, 320(%rax)
+; AVX1-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
+; AVX1-NEXT:    vmovaps %ymm0, 352(%rax)
+; AVX1-NEXT:    vzeroupper
+; AVX1-NEXT:    retq
+;
+; AVX2-LABEL: store_i64_stride6_vf8:
+; AVX2:       # %bb.0:
+; AVX2-NEXT:    vmovaps (%rdi), %ymm5
+; AVX2-NEXT:    vmovaps 32(%rdi), %ymm2
+; AVX2-NEXT:    vmovaps (%rsi), %ymm6
+; AVX2-NEXT:    vmovaps 32(%rsi), %ymm3
+; AVX2-NEXT:    vmovaps (%rdx), %ymm7
+; AVX2-NEXT:    vmovaps 32(%rdx), %ymm4
+; AVX2-NEXT:    vmovaps (%rcx), %ymm8
+; AVX2-NEXT:    vmovaps 32(%rcx), %ymm9
+; AVX2-NEXT:    vmovaps (%r8), %ymm10
+; AVX2-NEXT:    vmovaps 32(%r8), %ymm1
+; AVX2-NEXT:    vmovaps (%r9), %ymm11
+; AVX2-NEXT:    vmovaps 32(%r9), %ymm12
+; AVX2-NEXT:    vunpckhpd {{.*#+}} ymm0 = ymm1[1],ymm12[1],ymm1[3],ymm12[3]
+; AVX2-NEXT:    vunpckhpd {{.*#+}} ymm13 = ymm4[1],ymm9[1],ymm4[3],ymm9[3]
+; AVX2-NEXT:    vperm2f128 {{.*#+}} ymm0 = ymm13[2,3],ymm0[2,3]
+; AVX2-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-NEXT:    vunpckhpd {{.*#+}} ymm13 = ymm2[1],ymm3[1],ymm2[3],ymm3[3]
+; AVX2-NEXT:    vunpcklpd {{.*#+}} ymm1 = ymm1[0],ymm12[0],ymm1[2],ymm12[2]
+; AVX2-NEXT:    vperm2f128 {{.*#+}} ymm0 = ymm1[2,3],ymm13[2,3]
+; AVX2-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-NEXT:    vunpcklpd {{.*#+}} ymm4 = ymm4[0],ymm9[0],ymm4[2],ymm9[2]
+; AVX2-NEXT:    vunpcklpd {{.*#+}} ymm2 = ymm2[0],ymm3[0],ymm2[2],ymm3[2]
+; AVX2-NEXT:    vperm2f128 {{.*#+}} ymm0 = ymm2[2,3],ymm4[2,3]
+; AVX2-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-NEXT:    vunpckhpd {{.*#+}} ymm3 = ymm10[1],ymm11[1],ymm10[3],ymm11[3]
+; AVX2-NEXT:    vunpckhpd {{.*#+}} ymm4 = ymm7[1],ymm8[1],ymm7[3],ymm8[3]
+; AVX2-NEXT:    vperm2f128 {{.*#+}} ymm15 = ymm4[2,3],ymm3[2,3]
+; AVX2-NEXT:    vunpckhpd {{.*#+}} ymm4 = ymm5[1],ymm6[1],ymm5[3],ymm6[3]
+; AVX2-NEXT:    vunpcklpd {{.*#+}} ymm9 = ymm10[0],ymm11[0],ymm10[2],ymm11[2]
+; AVX2-NEXT:    vperm2f128 {{.*#+}} ymm14 = ymm9[2,3],ymm4[2,3]
+; AVX2-NEXT:    vunpcklpd {{.*#+}} ymm7 = ymm7[0],ymm8[0],ymm7[2],ymm8[2]
+; AVX2-NEXT:    vunpcklpd {{.*#+}} ymm5 = ymm5[0],ymm6[0],ymm5[2],ymm6[2]
+; AVX2-NEXT:    vperm2f128 {{.*#+}} ymm13 = ymm5[2,3],ymm7[2,3]
+; AVX2-NEXT:    vmovaps 32(%rcx), %xmm6
+; AVX2-NEXT:    vmovaps 32(%rdx), %xmm7
+; AVX2-NEXT:    vunpckhpd {{.*#+}} xmm12 = xmm7[1],xmm6[1]
+; AVX2-NEXT:    vmovaps (%r9), %xmm1
+; AVX2-NEXT:    vmovaps 32(%r9), %xmm2
+; AVX2-NEXT:    vmovaps (%r8), %xmm3
+; AVX2-NEXT:    vmovaps 32(%r8), %xmm4
+; AVX2-NEXT:    vunpckhpd {{.*#+}} xmm11 = xmm4[1],xmm2[1]
+; AVX2-NEXT:    vmovlhps {{.*#+}} xmm10 = xmm4[0],xmm2[0]
+; AVX2-NEXT:    vmovaps 32(%rsi), %xmm4
+; AVX2-NEXT:    vmovaps 32(%rdi), %xmm0
+; AVX2-NEXT:    vunpckhpd {{.*#+}} xmm9 = xmm0[1],xmm4[1]
+; AVX2-NEXT:    vmovlhps {{.*#+}} xmm8 = xmm0[0],xmm4[0]
+; AVX2-NEXT:    vmovaps (%rsi), %xmm4
+; AVX2-NEXT:    vmovlhps {{.*#+}} xmm6 = xmm7[0],xmm6[0]
+; AVX2-NEXT:    vunpckhpd {{.*#+}} xmm7 = xmm3[1],xmm1[1]
+; AVX2-NEXT:    vmovlhps {{.*#+}} xmm5 = xmm3[0],xmm1[0]
+; AVX2-NEXT:    vmovaps (%rdi), %xmm3
+; AVX2-NEXT:    vunpckhpd {{.*#+}} xmm1 = xmm3[1],xmm4[1]
+; AVX2-NEXT:    vmovlhps {{.*#+}} xmm3 = xmm3[0],xmm4[0]
+; AVX2-NEXT:    vmovaps (%rcx), %xmm4
+; AVX2-NEXT:    vmovaps (%rdx), %xmm0
+; AVX2-NEXT:    vunpckhpd {{.*#+}} xmm2 = xmm0[1],xmm4[1]
+; AVX2-NEXT:    vmovlhps {{.*#+}} xmm0 = xmm0[0],xmm4[0]
+; AVX2-NEXT:    movq {{[0-9]+}}(%rsp), %rax
+; AVX2-NEXT:    vmovaps %xmm0, 16(%rax)
+; AVX2-NEXT:    vmovaps %xmm3, (%rax)
+; AVX2-NEXT:    vmovaps %xmm1, 48(%rax)
+; AVX2-NEXT:    vmovaps %xmm5, 32(%rax)
+; AVX2-NEXT:    vmovaps %xmm7, 80(%rax)
+; AVX2-NEXT:    vmovaps %xmm2, 64(%rax)
+; AVX2-NEXT:    vmovaps %xmm6, 208(%rax)
+; AVX2-NEXT:    vmovaps %xmm8, 192(%rax)
+; AVX2-NEXT:    vmovaps %xmm9, 240(%rax)
+; AVX2-NEXT:    vmovaps %xmm10, 224(%rax)
+; AVX2-NEXT:    vmovaps %xmm11, 272(%rax)
+; AVX2-NEXT:    vmovaps %xmm12, 256(%rax)
+; AVX2-NEXT:    vmovaps %ymm13, 96(%rax)
+; AVX2-NEXT:    vmovaps %ymm14, 128(%rax)
+; AVX2-NEXT:    vmovaps %ymm15, 160(%rax)
+; AVX2-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
+; AVX2-NEXT:    vmovaps %ymm0, 288(%rax)
+; AVX2-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
+; AVX2-NEXT:    vmovaps %ymm0, 320(%rax)
+; AVX2-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
+; AVX2-NEXT:    vmovaps %ymm0, 352(%rax)
+; AVX2-NEXT:    vzeroupper
+; AVX2-NEXT:    retq
+;
+; AVX512-LABEL: store_i64_stride6_vf8:
+; AVX512:       # %bb.0:
+; AVX512-NEXT:    movq {{[0-9]+}}(%rsp), %r10
+; AVX512-NEXT:    vmovdqu64 (%rdi), %zmm5
+; AVX512-NEXT:    vmovdqu64 (%rsi), %zmm6
+; AVX512-NEXT:    vmovdqu64 (%rdx), %zmm3
+; AVX512-NEXT:    vmovdqu64 (%rcx), %zmm4
+; AVX512-NEXT:    vmovdqu64 (%r8), %zmm8
+; AVX512-NEXT:    vmovdqu64 (%r9), %zmm2
+; AVX512-NEXT:    vbroadcasti64x4 {{.*#+}} zmm0 = [1,9,2,10,1,9,2,10]
+; AVX512-NEXT:    # zmm0 = mem[0,1,2,3,0,1,2,3]
+; AVX512-NEXT:    vpermi2q %zmm4, %zmm3, %zmm0
+; AVX512-NEXT:    vmovdqa (%r8), %xmm7
+; AVX512-NEXT:    vpunpckhqdq {{.*#+}} xmm7 = xmm7[1],mem[1]
+; AVX512-NEXT:    vinserti128 $1, %xmm7, %ymm0, %ymm7
+; AVX512-NEXT:    movb $12, %al
+; AVX512-NEXT:    kmovd %eax, %k1
+; AVX512-NEXT:    vinserti64x4 $0, %ymm7, %zmm0, %zmm0 {%k1}
+; AVX512-NEXT:    vbroadcasti32x4 {{.*#+}} zmm7 = [2,10,2,10,2,10,2,10]
+; AVX512-NEXT:    # zmm7 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512-NEXT:    vpermi2q %zmm6, %zmm5, %zmm7
+; AVX512-NEXT:    movb $48, %al
+; AVX512-NEXT:    kmovd %eax, %k2
+; AVX512-NEXT:    vmovdqa64 %zmm7, %zmm0 {%k2}
+; AVX512-NEXT:    vbroadcasti64x4 {{.*#+}} zmm7 = [0,8,1,9,0,8,1,9]
+; AVX512-NEXT:    # zmm7 = mem[0,1,2,3,0,1,2,3]
+; AVX512-NEXT:    vpermi2q %zmm6, %zmm5, %zmm7
+; AVX512-NEXT:    vmovdqa (%rdx), %xmm1
+; AVX512-NEXT:    vpunpcklqdq {{.*#+}} xmm1 = xmm1[0],mem[0]
+; AVX512-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm1
+; AVX512-NEXT:    vinserti64x4 $0, %ymm1, %zmm0, %zmm7 {%k1}
+; AVX512-NEXT:    vbroadcasti32x4 {{.*#+}} zmm1 = [0,8,0,8,0,8,0,8]
+; AVX512-NEXT:    # zmm1 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512-NEXT:    vpermi2q %zmm2, %zmm8, %zmm1
+; AVX512-NEXT:    vmovdqa64 %zmm1, %zmm7 {%k2}
+; AVX512-NEXT:    vbroadcasti64x4 {{.*#+}} zmm1 = [2,10,3,11,2,10,3,11]
+; AVX512-NEXT:    # zmm1 = mem[0,1,2,3,0,1,2,3]
+; AVX512-NEXT:    vpermi2q %zmm2, %zmm8, %zmm1
+; AVX512-NEXT:    vmovdqa (%rdi), %ymm9
+; AVX512-NEXT:    vpunpckhqdq {{.*#+}} ymm9 = ymm9[1],mem[1],ymm9[3],mem[3]
+; AVX512-NEXT:    vinserti64x4 $0, %ymm9, %zmm0, %zmm1 {%k1}
+; AVX512-NEXT:    vbroadcasti32x4 {{.*#+}} zmm9 = [3,11,3,11,3,11,3,11]
+; AVX512-NEXT:    # zmm9 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512-NEXT:    vpermi2q %zmm4, %zmm3, %zmm9
+; AVX512-NEXT:    vmovdqa64 %zmm9, %zmm1 {%k2}
+; AVX512-NEXT:    vbroadcasti32x4 {{.*#+}} zmm9 = [7,15,7,15,7,15,7,15]
+; AVX512-NEXT:    # zmm9 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512-NEXT:    vpermi2q %zmm4, %zmm3, %zmm9
+; AVX512-NEXT:    vbroadcasti64x4 {{.*#+}} zmm10 = [6,14,7,15,6,14,7,15]
+; AVX512-NEXT:    # zmm10 = mem[0,1,2,3,0,1,2,3]
+; AVX512-NEXT:    vpermi2q %zmm2, %zmm8, %zmm10
+; AVX512-NEXT:    vmovdqa {{.*#+}} ymm11 = <u,u,7,15>
+; AVX512-NEXT:    vpermi2q %zmm6, %zmm5, %zmm11
+; AVX512-NEXT:    vmovdqa64 %zmm11, %zmm10 {%k1}
+; AVX512-NEXT:    vmovdqa64 %zmm9, %zmm10 {%k2}
+; AVX512-NEXT:    vbroadcasti32x4 {{.*#+}} zmm9 = [6,14,6,14,6,14,6,14]
+; AVX512-NEXT:    # zmm9 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512-NEXT:    vpermi2q %zmm6, %zmm5, %zmm9
+; AVX512-NEXT:    vbroadcasti64x4 {{.*#+}} zmm11 = [5,13,6,14,5,13,6,14]
+; AVX512-NEXT:    # zmm11 = mem[0,1,2,3,0,1,2,3]
+; AVX512-NEXT:    vpermi2q %zmm4, %zmm3, %zmm11
+; AVX512-NEXT:    vmovdqa {{.*#+}} ymm12 = <u,u,5,13>
+; AVX512-NEXT:    vpermi2q %zmm2, %zmm8, %zmm12
+; AVX512-NEXT:    vmovdqa64 %zmm12, %zmm11 {%k1}
+; AVX512-NEXT:    vmovdqa64 %zmm9, %zmm11 {%k2}
+; AVX512-NEXT:    vbroadcasti64x4 {{.*#+}} zmm9 = [4,12,5,13,4,12,5,13]
+; AVX512-NEXT:    # zmm9 = mem[0,1,2,3,0,1,2,3]
+; AVX512-NEXT:    vpermi2q %zmm6, %zmm5, %zmm9
+; AVX512-NEXT:    vmovdqa {{.*#+}} ymm5 = <u,u,4,12>
+; AVX512-NEXT:    vpermi2q %zmm4, %zmm3, %zmm5
+; AVX512-NEXT:    vmovdqa64 %zmm5, %zmm9 {%k1}
+; AVX512-NEXT:    vpunpcklqdq {{.*#+}} zmm9 {%k2} = zmm8[0],zmm2[0],zmm8[2],zmm2[2],zmm8[4],zmm2[4],zmm8[6],zmm2[6]
+; AVX512-NEXT:    vmovdqu64 %zmm11, 256(%r10)
+; AVX512-NEXT:    vmovdqu64 %zmm10, 320(%r10)
+; AVX512-NEXT:    vmovdqu64 %zmm1, 128(%r10)
+; AVX512-NEXT:    vmovdqu64 %zmm9, 192(%r10)
+; AVX512-NEXT:    vmovdqu64 %zmm7, (%r10)
+; AVX512-NEXT:    vmovdqu64 %zmm0, 64(%r10)
+; AVX512-NEXT:    vzeroupper
+; AVX512-NEXT:    retq
+  %in.vec0 = load <8 x i64>, <8 x i64>* %in.vecptr0, align 32
+  %in.vec1 = load <8 x i64>, <8 x i64>* %in.vecptr1, align 32
+  %in.vec2 = load <8 x i64>, <8 x i64>* %in.vecptr2, align 32
+  %in.vec3 = load <8 x i64>, <8 x i64>* %in.vecptr3, align 32
+  %in.vec4 = load <8 x i64>, <8 x i64>* %in.vecptr4, align 32
+  %in.vec5 = load <8 x i64>, <8 x i64>* %in.vecptr5, align 32
+
+  %concat01 = shufflevector <8 x i64> %in.vec0, <8 x i64> %in.vec1, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+  %concat23 = shufflevector <8 x i64> %in.vec2, <8 x i64> %in.vec3, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+  %concat45 = shufflevector <8 x i64> %in.vec4, <8 x i64> %in.vec5, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+  %concat0123 = shufflevector <16 x i64> %concat01, <16 x i64> %concat23, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
+  %concat45uu = shufflevector <16 x i64> %concat45, <16 x i64> poison, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+  %concat012345 = shufflevector <32 x i64> %concat0123, <32 x i64> %concat45uu, <48 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 32, i32 33, i32 34, i32 35, i32 36, i32 37, i32 38, i32 39, i32 40, i32 41, i32 42, i32 43, i32 44, i32 45, i32 46, i32 47>
+  %interleaved.vec = shufflevector <48 x i64> %concat012345, <48 x i64> poison, <48 x i32> <i32 0, i32 8, i32 16, i32 24, i32 32, i32 40, i32 1, i32 9, i32 17, i32 25, i32 33, i32 41, i32 2, i32 10, i32 18, i32 26, i32 34, i32 42, i32 3, i32 11, i32 19, i32 27, i32 35, i32 43, i32 4, i32 12, i32 20, i32 28, i32 36, i32 44, i32 5, i32 13, i32 21, i32 29, i32 37, i32 45, i32 6, i32 14, i32 22, i32 30, i32 38, i32 46, i32 7, i32 15, i32 23, i32 31, i32 39, i32 47>
+
+  store <48 x i64> %interleaved.vec, <48 x i64>* %out.vec, align 32
+
+  ret void
+}


        


More information about the llvm-commits mailing list