[llvm] d7f3775 - [X86] combineEXTRACT_SUBVECTOR - fold extract_subvector(pshufd(v,i)) -> pshufd(extract_subvector(v,i))
Simon Pilgrim via llvm-commits
llvm-commits at lists.llvm.org
Mon Dec 16 10:37:17 PST 2024
Author: Simon Pilgrim
Date: 2024-12-16T18:32:12Z
New Revision: d7f3775977875a8208e494bab822b9cdef991822
URL: https://github.com/llvm/llvm-project/commit/d7f3775977875a8208e494bab822b9cdef991822
DIFF: https://github.com/llvm/llvm-project/commit/d7f3775977875a8208e494bab822b9cdef991822.diff
LOG: [X86] combineEXTRACT_SUBVECTOR - fold extract_subvector(pshufd(v,i)) -> pshufd(extract_subvector(v,i))
Attempt to avoid unnecessary wide PSHUFD or VPERMILIPS/D instructions by pre-extracting the subvector source if thats its only use.
Added:
Modified:
llvm/lib/Target/X86/X86ISelLowering.cpp
llvm/test/CodeGen/X86/oddshuffles.ll
llvm/test/CodeGen/X86/vector-interleaved-load-i32-stride-7.ll
llvm/test/CodeGen/X86/vector-interleaved-load-i32-stride-8.ll
llvm/test/CodeGen/X86/vector-interleaved-store-i32-stride-6.ll
llvm/test/CodeGen/X86/vector-interleaved-store-i32-stride-8.ll
Removed:
################################################################################
diff --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp
index 7d23176c493a23..c50db59464724c 100644
--- a/llvm/lib/Target/X86/X86ISelLowering.cpp
+++ b/llvm/lib/Target/X86/X86ISelLowering.cpp
@@ -58339,6 +58339,17 @@ static SDValue combineEXTRACT_SUBVECTOR(SDNode *N, SelectionDAG &DAG,
return DAG.getNode(
InOpcode, DL, VT,
extractSubVector(InVec.getOperand(0), IdxVal, DAG, DL, SizeInBits));
+ case X86ISD::PSHUFD:
+ case X86ISD::VPERMILPI:
+ if (InVec.getOperand(0).hasOneUse()) {
+ uint64_t M = InVec.getConstantOperandVal(1) & 255;
+ M = VT.getScalarSizeInBits() < 64 ? M : (M >> IdxVal);
+ return DAG.getNode(InOpcode, DL, VT,
+ extractSubVector(InVec.getOperand(0), IdxVal, DAG,
+ DL, SizeInBits),
+ DAG.getTargetConstant(M, DL, MVT::i8));
+ }
+ break;
case X86ISD::PCMPEQ:
case X86ISD::PCMPGT:
case X86ISD::UNPCKH:
diff --git a/llvm/test/CodeGen/X86/oddshuffles.ll b/llvm/test/CodeGen/X86/oddshuffles.ll
index 90c1d42a929c81..8fd8e0e8120c14 100644
--- a/llvm/test/CodeGen/X86/oddshuffles.ll
+++ b/llvm/test/CodeGen/X86/oddshuffles.ll
@@ -2400,34 +2400,32 @@ define void @D107009(ptr %input, ptr %output) {
;
; AVX1-LABEL: D107009:
; AVX1: # %bb.0:
-; AVX1-NEXT: vmovups 96(%rdi), %ymm0
-; AVX1-NEXT: vmovups 128(%rdi), %ymm1
-; AVX1-NEXT: vmovups 224(%rdi), %ymm2
-; AVX1-NEXT: vunpcklpd {{.*#+}} ymm2 = ymm2[0],mem[0],ymm2[2],mem[2]
-; AVX1-NEXT: vunpcklps {{.*#+}} ymm1 = ymm1[0],mem[0],ymm1[1],mem[1],ymm1[4],mem[4],ymm1[5],mem[5]
-; AVX1-NEXT: vshufps {{.*#+}} ymm1 = ymm1[0,1],ymm2[2,0],ymm1[4,5],ymm2[6,4]
-; AVX1-NEXT: vmovdqa 16(%rdi), %xmm2
-; AVX1-NEXT: vpunpckldq {{.*#+}} xmm2 = xmm2[0],mem[0],xmm2[1],mem[1]
-; AVX1-NEXT: vunpcklpd {{.*#+}} ymm0 = ymm0[0],mem[0],ymm0[2],mem[2]
-; AVX1-NEXT: vshufps {{.*#+}} ymm0 = ymm0[0,1,2,0,4,5,6,4]
+; AVX1-NEXT: vmovups 128(%rdi), %ymm0
+; AVX1-NEXT: vmovups 224(%rdi), %ymm1
+; AVX1-NEXT: vunpcklpd {{.*#+}} ymm1 = ymm1[0],mem[0],ymm1[2],mem[2]
+; AVX1-NEXT: vunpcklps {{.*#+}} ymm0 = ymm0[0],mem[0],ymm0[1],mem[1],ymm0[4],mem[4],ymm0[5],mem[5]
+; AVX1-NEXT: vshufps {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,0],ymm0[4,5],ymm1[6,4]
+; AVX1-NEXT: vmovaps 112(%rdi), %xmm1
+; AVX1-NEXT: vunpcklpd {{.*#+}} xmm1 = xmm1[0],mem[0]
+; AVX1-NEXT: vmovaps 16(%rdi), %xmm2
+; AVX1-NEXT: vunpcklps {{.*#+}} xmm2 = xmm2[0],mem[0],xmm2[1],mem[1]
+; AVX1-NEXT: vshufps {{.*#+}} xmm1 = xmm2[0,1],xmm1[2,0]
+; AVX1-NEXT: vpsrld $16, %xmm1, %xmm1
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
-; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm2[0,1,2,3],xmm0[4,5,6,7]
; AVX1-NEXT: vpsrld $16, %xmm0, %xmm0
-; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm1
-; AVX1-NEXT: vpsrld $16, %xmm1, %xmm1
-; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm1
-; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm0[0,1,1,3]
-; AVX1-NEXT: vpshufd {{.*#+}} xmm3 = xmm0[2,1,3,3]
+; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
+; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm1[0,1,1,3]
+; AVX1-NEXT: vpshufd {{.*#+}} xmm3 = xmm1[2,1,3,3]
; AVX1-NEXT: vinsertf128 $1, %xmm3, %ymm2, %ymm2
-; AVX1-NEXT: vshufps {{.*#+}} ymm3 = ymm1[3,3,3,3,7,7,7,7]
-; AVX1-NEXT: vshufpd {{.*#+}} ymm4 = ymm1[0,0,3,2]
-; AVX1-NEXT: vmovshdup {{.*#+}} ymm5 = ymm1[1,1,3,3,5,5,7,7]
-; AVX1-NEXT: vpshufd {{.*#+}} xmm6 = xmm0[3,3,3,3]
-; AVX1-NEXT: vpshufd {{.*#+}} xmm7 = xmm0[1,1,1,1]
-; AVX1-NEXT: vmovdqa %xmm0, 16(%rsi)
+; AVX1-NEXT: vshufps {{.*#+}} ymm3 = ymm0[3,3,3,3,7,7,7,7]
+; AVX1-NEXT: vshufpd {{.*#+}} ymm4 = ymm0[0,0,3,2]
+; AVX1-NEXT: vmovshdup {{.*#+}} ymm5 = ymm0[1,1,3,3,5,5,7,7]
+; AVX1-NEXT: vpshufd {{.*#+}} xmm6 = xmm1[3,3,3,3]
+; AVX1-NEXT: vpshufd {{.*#+}} xmm7 = xmm1[1,1,1,1]
+; AVX1-NEXT: vmovdqa %xmm1, 16(%rsi)
; AVX1-NEXT: vmovdqa %xmm7, 48(%rsi)
; AVX1-NEXT: vmovdqa %xmm6, 112(%rsi)
-; AVX1-NEXT: vmovups %ymm1, 128(%rsi)
+; AVX1-NEXT: vmovups %ymm0, 128(%rsi)
; AVX1-NEXT: vmovups %ymm5, 160(%rsi)
; AVX1-NEXT: vmovupd %ymm4, 192(%rsi)
; AVX1-NEXT: vmovupd %ymm3, 224(%rsi)
@@ -2470,34 +2468,32 @@ define void @D107009(ptr %input, ptr %output) {
;
; XOP-LABEL: D107009:
; XOP: # %bb.0:
-; XOP-NEXT: vmovups 96(%rdi), %ymm0
-; XOP-NEXT: vmovups 128(%rdi), %ymm1
-; XOP-NEXT: vmovups 224(%rdi), %ymm2
-; XOP-NEXT: vunpcklpd {{.*#+}} ymm2 = ymm2[0],mem[0],ymm2[2],mem[2]
-; XOP-NEXT: vunpcklps {{.*#+}} ymm1 = ymm1[0],mem[0],ymm1[1],mem[1],ymm1[4],mem[4],ymm1[5],mem[5]
-; XOP-NEXT: vshufps {{.*#+}} ymm1 = ymm1[0,1],ymm2[2,0],ymm1[4,5],ymm2[6,4]
-; XOP-NEXT: vmovdqa 16(%rdi), %xmm2
-; XOP-NEXT: vpunpckldq {{.*#+}} xmm2 = xmm2[0],mem[0],xmm2[1],mem[1]
-; XOP-NEXT: vunpcklpd {{.*#+}} ymm0 = ymm0[0],mem[0],ymm0[2],mem[2]
-; XOP-NEXT: vshufps {{.*#+}} ymm0 = ymm0[0,1,2,0,4,5,6,4]
+; XOP-NEXT: vmovups 128(%rdi), %ymm0
+; XOP-NEXT: vmovups 224(%rdi), %ymm1
+; XOP-NEXT: vunpcklpd {{.*#+}} ymm1 = ymm1[0],mem[0],ymm1[2],mem[2]
+; XOP-NEXT: vunpcklps {{.*#+}} ymm0 = ymm0[0],mem[0],ymm0[1],mem[1],ymm0[4],mem[4],ymm0[5],mem[5]
+; XOP-NEXT: vshufps {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,0],ymm0[4,5],ymm1[6,4]
+; XOP-NEXT: vmovaps 112(%rdi), %xmm1
+; XOP-NEXT: vunpcklpd {{.*#+}} xmm1 = xmm1[0],mem[0]
+; XOP-NEXT: vmovaps 16(%rdi), %xmm2
+; XOP-NEXT: vunpcklps {{.*#+}} xmm2 = xmm2[0],mem[0],xmm2[1],mem[1]
+; XOP-NEXT: vshufps {{.*#+}} xmm1 = xmm2[0,1],xmm1[2,0]
+; XOP-NEXT: vpsrld $16, %xmm1, %xmm1
; XOP-NEXT: vextractf128 $1, %ymm0, %xmm0
-; XOP-NEXT: vpblendw {{.*#+}} xmm0 = xmm2[0,1,2,3],xmm0[4,5,6,7]
; XOP-NEXT: vpsrld $16, %xmm0, %xmm0
-; XOP-NEXT: vextractf128 $1, %ymm1, %xmm1
-; XOP-NEXT: vpsrld $16, %xmm1, %xmm1
-; XOP-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm1
-; XOP-NEXT: vpshufd {{.*#+}} xmm2 = xmm0[0,1,1,3]
-; XOP-NEXT: vpshufd {{.*#+}} xmm3 = xmm0[2,1,3,3]
+; XOP-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
+; XOP-NEXT: vpshufd {{.*#+}} xmm2 = xmm1[0,1,1,3]
+; XOP-NEXT: vpshufd {{.*#+}} xmm3 = xmm1[2,1,3,3]
; XOP-NEXT: vinsertf128 $1, %xmm3, %ymm2, %ymm2
-; XOP-NEXT: vshufps {{.*#+}} ymm3 = ymm1[3,3,3,3,7,7,7,7]
-; XOP-NEXT: vshufpd {{.*#+}} ymm4 = ymm1[0,0,3,2]
-; XOP-NEXT: vmovshdup {{.*#+}} ymm5 = ymm1[1,1,3,3,5,5,7,7]
-; XOP-NEXT: vpshufd {{.*#+}} xmm6 = xmm0[3,3,3,3]
-; XOP-NEXT: vpshufd {{.*#+}} xmm7 = xmm0[1,1,1,1]
-; XOP-NEXT: vmovdqa %xmm0, 16(%rsi)
+; XOP-NEXT: vshufps {{.*#+}} ymm3 = ymm0[3,3,3,3,7,7,7,7]
+; XOP-NEXT: vshufpd {{.*#+}} ymm4 = ymm0[0,0,3,2]
+; XOP-NEXT: vmovshdup {{.*#+}} ymm5 = ymm0[1,1,3,3,5,5,7,7]
+; XOP-NEXT: vpshufd {{.*#+}} xmm6 = xmm1[3,3,3,3]
+; XOP-NEXT: vpshufd {{.*#+}} xmm7 = xmm1[1,1,1,1]
+; XOP-NEXT: vmovdqa %xmm1, 16(%rsi)
; XOP-NEXT: vmovdqa %xmm7, 48(%rsi)
; XOP-NEXT: vmovdqa %xmm6, 112(%rsi)
-; XOP-NEXT: vmovups %ymm1, 128(%rsi)
+; XOP-NEXT: vmovups %ymm0, 128(%rsi)
; XOP-NEXT: vmovups %ymm5, 160(%rsi)
; XOP-NEXT: vmovupd %ymm4, 192(%rsi)
; XOP-NEXT: vmovupd %ymm3, 224(%rsi)
diff --git a/llvm/test/CodeGen/X86/vector-interleaved-load-i32-stride-7.ll b/llvm/test/CodeGen/X86/vector-interleaved-load-i32-stride-7.ll
index e03e19fc6d16fe..d806253ef23a08 100644
--- a/llvm/test/CodeGen/X86/vector-interleaved-load-i32-stride-7.ll
+++ b/llvm/test/CodeGen/X86/vector-interleaved-load-i32-stride-7.ll
@@ -69,11 +69,11 @@ define void @load_i32_stride7_vf2(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; AVX-NEXT: vshufps {{.*#+}} xmm4 = xmm4[2,3,2,3]
; AVX-NEXT: vblendps {{.*#+}} xmm3 = xmm3[0],xmm4[1],xmm3[2,3]
; AVX-NEXT: vshufps {{.*#+}} ymm4 = ymm1[0,0],ymm0[1,0],ymm1[4,4],ymm0[5,4]
-; AVX-NEXT: vshufps {{.*#+}} ymm4 = ymm4[2,0,2,3,6,4,6,7]
; AVX-NEXT: vextractf128 $1, %ymm4, %xmm4
+; AVX-NEXT: vshufps {{.*#+}} xmm4 = xmm4[2,0,2,3]
; AVX-NEXT: vshufps {{.*#+}} ymm0 = ymm1[1,0],ymm0[2,0],ymm1[5,4],ymm0[6,4]
-; AVX-NEXT: vshufps {{.*#+}} ymm0 = ymm0[2,0,2,3,6,4,6,7]
; AVX-NEXT: vextractf128 $1, %ymm0, %xmm0
+; AVX-NEXT: vshufps {{.*#+}} xmm0 = xmm0[2,0,2,3]
; AVX-NEXT: vmovlps %xmm5, (%rsi)
; AVX-NEXT: vmovlps %xmm6, (%rdx)
; AVX-NEXT: vmovlps %xmm7, (%rcx)
@@ -104,8 +104,8 @@ define void @load_i32_stride7_vf2(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; AVX2-NEXT: vblendps {{.*#+}} ymm7 = ymm1[0,1,2,3],ymm0[4,5,6,7]
; AVX2-NEXT: vpermps %ymm7, %ymm4, %ymm4
; AVX2-NEXT: vblendps {{.*#+}} ymm7 = ymm1[0],ymm0[1],ymm1[2,3,4],ymm0[5],ymm1[6,7]
-; AVX2-NEXT: vshufps {{.*#+}} ymm7 = ymm7[1,0,2,3,5,4,6,7]
; AVX2-NEXT: vextractf128 $1, %ymm7, %xmm7
+; AVX2-NEXT: vshufps {{.*#+}} xmm7 = xmm7[1,0,2,3]
; AVX2-NEXT: vshufps {{.*#+}} ymm0 = ymm0[2,3,2,3,6,7,6,7]
; AVX2-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0],ymm1[1],ymm0[2,3,4],ymm1[5],ymm0[6,7]
; AVX2-NEXT: vextractf128 $1, %ymm0, %xmm0
@@ -139,8 +139,8 @@ define void @load_i32_stride7_vf2(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; AVX2-FP-NEXT: vblendps {{.*#+}} ymm7 = ymm1[0,1,2,3],ymm0[4,5,6,7]
; AVX2-FP-NEXT: vpermps %ymm7, %ymm4, %ymm4
; AVX2-FP-NEXT: vblendps {{.*#+}} ymm7 = ymm1[0],ymm0[1],ymm1[2,3,4],ymm0[5],ymm1[6,7]
-; AVX2-FP-NEXT: vshufps {{.*#+}} ymm7 = ymm7[1,0,2,3,5,4,6,7]
; AVX2-FP-NEXT: vextractf128 $1, %ymm7, %xmm7
+; AVX2-FP-NEXT: vshufps {{.*#+}} xmm7 = xmm7[1,0,2,3]
; AVX2-FP-NEXT: vshufps {{.*#+}} ymm0 = ymm0[2,3,2,3,6,7,6,7]
; AVX2-FP-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0],ymm1[1],ymm0[2,3,4],ymm1[5],ymm0[6,7]
; AVX2-FP-NEXT: vextractf128 $1, %ymm0, %xmm0
@@ -174,8 +174,8 @@ define void @load_i32_stride7_vf2(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; AVX2-FCP-NEXT: vblendps {{.*#+}} ymm7 = ymm1[0,1,2,3],ymm0[4,5,6,7]
; AVX2-FCP-NEXT: vpermps %ymm7, %ymm4, %ymm4
; AVX2-FCP-NEXT: vblendps {{.*#+}} ymm7 = ymm1[0],ymm0[1],ymm1[2,3,4],ymm0[5],ymm1[6,7]
-; AVX2-FCP-NEXT: vshufps {{.*#+}} ymm7 = ymm7[1,0,2,3,5,4,6,7]
; AVX2-FCP-NEXT: vextractf128 $1, %ymm7, %xmm7
+; AVX2-FCP-NEXT: vshufps {{.*#+}} xmm7 = xmm7[1,0,2,3]
; AVX2-FCP-NEXT: vshufps {{.*#+}} ymm0 = ymm0[2,3,2,3,6,7,6,7]
; AVX2-FCP-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0],ymm1[1],ymm0[2,3,4],ymm1[5],ymm0[6,7]
; AVX2-FCP-NEXT: vextractf128 $1, %ymm0, %xmm0
@@ -208,8 +208,8 @@ define void @load_i32_stride7_vf2(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; AVX512-NEXT: vmovdqa (%rdi), %ymm6
; AVX512-NEXT: vpermi2d %ymm5, %ymm6, %ymm1
; AVX512-NEXT: vpblendd {{.*#+}} ymm7 = ymm5[0],ymm6[1],ymm5[2,3,4],ymm6[5],ymm5[6,7]
-; AVX512-NEXT: vpshufd {{.*#+}} ymm7 = ymm7[1,0,2,3,5,4,6,7]
; AVX512-NEXT: vextracti128 $1, %ymm7, %xmm7
+; AVX512-NEXT: vpshufd {{.*#+}} xmm7 = xmm7[1,0,2,3]
; AVX512-NEXT: vpshufd {{.*#+}} ymm6 = ymm6[2,3,2,3,6,7,6,7]
; AVX512-NEXT: vpblendd {{.*#+}} ymm5 = ymm6[0],ymm5[1],ymm6[2,3,4],ymm5[5],ymm6[6,7]
; AVX512-NEXT: vextracti128 $1, %ymm5, %xmm5
@@ -273,8 +273,8 @@ define void @load_i32_stride7_vf2(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; AVX512DQ-NEXT: vmovdqa (%rdi), %ymm6
; AVX512DQ-NEXT: vpermi2d %ymm5, %ymm6, %ymm1
; AVX512DQ-NEXT: vpblendd {{.*#+}} ymm7 = ymm5[0],ymm6[1],ymm5[2,3,4],ymm6[5],ymm5[6,7]
-; AVX512DQ-NEXT: vpshufd {{.*#+}} ymm7 = ymm7[1,0,2,3,5,4,6,7]
; AVX512DQ-NEXT: vextracti128 $1, %ymm7, %xmm7
+; AVX512DQ-NEXT: vpshufd {{.*#+}} xmm7 = xmm7[1,0,2,3]
; AVX512DQ-NEXT: vpshufd {{.*#+}} ymm6 = ymm6[2,3,2,3,6,7,6,7]
; AVX512DQ-NEXT: vpblendd {{.*#+}} ymm5 = ymm6[0],ymm5[1],ymm6[2,3,4],ymm5[5],ymm6[6,7]
; AVX512DQ-NEXT: vextracti128 $1, %ymm5, %xmm5
@@ -338,8 +338,8 @@ define void @load_i32_stride7_vf2(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; AVX512BW-NEXT: vmovdqa (%rdi), %ymm6
; AVX512BW-NEXT: vpermi2d %ymm5, %ymm6, %ymm1
; AVX512BW-NEXT: vpblendd {{.*#+}} ymm7 = ymm5[0],ymm6[1],ymm5[2,3,4],ymm6[5],ymm5[6,7]
-; AVX512BW-NEXT: vpshufd {{.*#+}} ymm7 = ymm7[1,0,2,3,5,4,6,7]
; AVX512BW-NEXT: vextracti128 $1, %ymm7, %xmm7
+; AVX512BW-NEXT: vpshufd {{.*#+}} xmm7 = xmm7[1,0,2,3]
; AVX512BW-NEXT: vpshufd {{.*#+}} ymm6 = ymm6[2,3,2,3,6,7,6,7]
; AVX512BW-NEXT: vpblendd {{.*#+}} ymm5 = ymm6[0],ymm5[1],ymm6[2,3,4],ymm5[5],ymm6[6,7]
; AVX512BW-NEXT: vextracti128 $1, %ymm5, %xmm5
@@ -403,8 +403,8 @@ define void @load_i32_stride7_vf2(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; AVX512DQ-BW-NEXT: vmovdqa (%rdi), %ymm6
; AVX512DQ-BW-NEXT: vpermi2d %ymm5, %ymm6, %ymm1
; AVX512DQ-BW-NEXT: vpblendd {{.*#+}} ymm7 = ymm5[0],ymm6[1],ymm5[2,3,4],ymm6[5],ymm5[6,7]
-; AVX512DQ-BW-NEXT: vpshufd {{.*#+}} ymm7 = ymm7[1,0,2,3,5,4,6,7]
; AVX512DQ-BW-NEXT: vextracti128 $1, %ymm7, %xmm7
+; AVX512DQ-BW-NEXT: vpshufd {{.*#+}} xmm7 = xmm7[1,0,2,3]
; AVX512DQ-BW-NEXT: vpshufd {{.*#+}} ymm6 = ymm6[2,3,2,3,6,7,6,7]
; AVX512DQ-BW-NEXT: vpblendd {{.*#+}} ymm5 = ymm6[0],ymm5[1],ymm6[2,3,4],ymm5[5],ymm6[6,7]
; AVX512DQ-BW-NEXT: vextracti128 $1, %ymm5, %xmm5
@@ -562,15 +562,13 @@ define void @load_i32_stride7_vf4(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; AVX-NEXT: vblendps {{.*#+}} xmm4 = xmm4[0,1],xmm10[2,3]
; AVX-NEXT: vblendps {{.*#+}} xmm5 = xmm9[0,1,2],xmm5[3]
; AVX-NEXT: vshufps {{.*#+}} ymm10 = ymm0[0,0],ymm1[1,0],ymm0[4,4],ymm1[5,4]
-; AVX-NEXT: vshufps {{.*#+}} ymm10 = ymm10[2,0,2,3,6,4,6,7]
; AVX-NEXT: vextractf128 $1, %ymm10, %xmm10
-; AVX-NEXT: vshufps {{.*#+}} xmm5 = xmm10[0,1],xmm5[3,2]
+; AVX-NEXT: vshufps {{.*#+}} xmm5 = xmm10[2,0],xmm5[3,2]
; AVX-NEXT: vshufps {{.*#+}} xmm6 = xmm6[0,1,0,1]
; AVX-NEXT: vblendps {{.*#+}} xmm6 = xmm6[0,1,2],xmm9[3]
; AVX-NEXT: vshufps {{.*#+}} ymm0 = ymm0[1,0],ymm1[2,0],ymm0[5,4],ymm1[6,4]
-; AVX-NEXT: vshufps {{.*#+}} ymm0 = ymm0[2,0,2,3,6,4,6,7]
; AVX-NEXT: vextractf128 $1, %ymm0, %xmm0
-; AVX-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0,1],xmm6[2,3]
+; AVX-NEXT: vshufps {{.*#+}} xmm0 = xmm0[2,0],xmm6[2,3]
; AVX-NEXT: vmovaps %xmm2, (%rsi)
; AVX-NEXT: vmovaps %xmm7, (%rdx)
; AVX-NEXT: vmovaps %xmm8, (%rcx)
@@ -1154,8 +1152,8 @@ define void @load_i32_stride7_vf8(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; AVX-LABEL: load_i32_stride7_vf8:
; AVX: # %bb.0:
; AVX-NEXT: vmovaps 160(%rdi), %ymm4
-; AVX-NEXT: vmovaps 128(%rdi), %ymm8
-; AVX-NEXT: vmovaps 64(%rdi), %ymm11
+; AVX-NEXT: vmovaps 128(%rdi), %ymm7
+; AVX-NEXT: vmovaps 64(%rdi), %ymm10
; AVX-NEXT: vmovaps 32(%rdi), %ymm0
; AVX-NEXT: vmovaps (%rdi), %ymm1
; AVX-NEXT: vmovaps 96(%rdi), %ymm12
@@ -1163,88 +1161,86 @@ define void @load_i32_stride7_vf8(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; AVX-NEXT: vunpcklpd {{.*#+}} ymm3 = ymm12[0],ymm2[0],ymm12[2],ymm2[2]
; AVX-NEXT: vblendps {{.*#+}} ymm5 = ymm1[0,1,2,3,4,5],ymm0[6],ymm1[7]
; AVX-NEXT: vextractf128 $1, %ymm5, %xmm5
-; AVX-NEXT: vmovaps (%rdi), %xmm13
+; AVX-NEXT: vmovaps (%rdi), %xmm14
; AVX-NEXT: vmovaps 32(%rdi), %xmm9
-; AVX-NEXT: vblendps {{.*#+}} xmm5 = xmm13[0,1],xmm5[2,3]
+; AVX-NEXT: vblendps {{.*#+}} xmm5 = xmm14[0,1],xmm5[2,3]
; AVX-NEXT: vshufps {{.*#+}} xmm5 = xmm5[0,3,2,3]
-; AVX-NEXT: vblendps {{.*#+}} ymm3 = ymm5[0,1,2],ymm3[3,4,5,6,7]
-; AVX-NEXT: vmovaps 160(%rdi), %xmm5
-; AVX-NEXT: vmovaps 128(%rdi), %xmm6
-; AVX-NEXT: vunpckhpd {{.*#+}} xmm7 = xmm6[1],xmm5[1]
-; AVX-NEXT: vmovaps 192(%rdi), %xmm10
-; AVX-NEXT: vinsertps {{.*#+}} xmm7 = zero,xmm7[1,2],xmm10[1]
-; AVX-NEXT: vinsertf128 $1, %xmm7, %ymm0, %ymm7
-; AVX-NEXT: vblendps {{.*#+}} ymm3 = ymm3[0,1,2,3,4],ymm7[5,6,7]
-; AVX-NEXT: vshufps {{.*#+}} ymm7 = ymm12[1,1],ymm11[2,2],ymm12[5,5],ymm11[6,6]
-; AVX-NEXT: vperm2f128 {{.*#+}} ymm7 = ymm7[2,3,2,3]
-; AVX-NEXT: vblendps {{.*#+}} xmm14 = xmm9[0],xmm13[1],xmm9[2,3]
-; AVX-NEXT: vshufps {{.*#+}} xmm14 = xmm14[1,0],mem[3,3]
-; AVX-NEXT: vblendps {{.*#+}} ymm7 = ymm14[0,1,2],ymm7[3,4,5,6,7]
-; AVX-NEXT: vperm2f128 {{.*#+}} ymm14 = ymm8[2,3],ymm4[0,1]
-; AVX-NEXT: vshufps {{.*#+}} ymm14 = ymm8[0,0],ymm14[3,3],ymm8[4,4],ymm14[7,7]
-; AVX-NEXT: vextractf128 $1, %ymm14, %xmm14
-; AVX-NEXT: vinsertps {{.*#+}} xmm14 = zero,xmm14[1,2],xmm10[2]
-; AVX-NEXT: vinsertf128 $1, %xmm14, %ymm0, %ymm14
-; AVX-NEXT: vblendps {{.*#+}} ymm7 = ymm7[0,1,2,3,4],ymm14[5,6,7]
-; AVX-NEXT: vshufps {{.*#+}} xmm14 = xmm13[2,3,2,3]
-; AVX-NEXT: vblendps {{.*#+}} xmm14 = xmm14[0],xmm9[1],xmm14[2,3]
-; AVX-NEXT: vshufps {{.*#+}} ymm15 = ymm2[3,1],ymm11[0,3],ymm2[7,5],ymm11[4,7]
+; AVX-NEXT: vblendps {{.*#+}} ymm6 = ymm5[0,1,2],ymm3[3,4,5,6,7]
+; AVX-NEXT: vmovaps 160(%rdi), %xmm3
+; AVX-NEXT: vmovaps 128(%rdi), %xmm5
+; AVX-NEXT: vunpckhpd {{.*#+}} xmm8 = xmm5[1],xmm3[1]
+; AVX-NEXT: vmovaps 192(%rdi), %xmm11
+; AVX-NEXT: vinsertps {{.*#+}} xmm8 = zero,xmm8[1,2],xmm11[1]
+; AVX-NEXT: vinsertf128 $1, %xmm8, %ymm0, %ymm8
+; AVX-NEXT: vblendps {{.*#+}} ymm6 = ymm6[0,1,2,3,4],ymm8[5,6,7]
+; AVX-NEXT: vshufps {{.*#+}} ymm8 = ymm12[1,1],ymm10[2,2],ymm12[5,5],ymm10[6,6]
+; AVX-NEXT: vperm2f128 {{.*#+}} ymm8 = ymm8[2,3,2,3]
+; AVX-NEXT: vblendps {{.*#+}} xmm13 = xmm9[0],xmm14[1],xmm9[2,3]
+; AVX-NEXT: vshufps {{.*#+}} xmm13 = xmm13[1,0],mem[3,3]
+; AVX-NEXT: vblendps {{.*#+}} ymm8 = ymm13[0,1,2],ymm8[3,4,5,6,7]
+; AVX-NEXT: vperm2f128 {{.*#+}} ymm13 = ymm7[2,3],ymm4[0,1]
+; AVX-NEXT: vshufps {{.*#+}} ymm13 = ymm7[0,0],ymm13[3,3],ymm7[4,4],ymm13[7,7]
+; AVX-NEXT: vextractf128 $1, %ymm13, %xmm13
+; AVX-NEXT: vinsertps {{.*#+}} xmm13 = zero,xmm13[1,2],xmm11[2]
+; AVX-NEXT: vinsertf128 $1, %xmm13, %ymm0, %ymm13
+; AVX-NEXT: vblendps {{.*#+}} ymm8 = ymm8[0,1,2,3,4],ymm13[5,6,7]
+; AVX-NEXT: vshufps {{.*#+}} xmm13 = xmm14[2,3,2,3]
+; AVX-NEXT: vblendps {{.*#+}} xmm13 = xmm13[0],xmm9[1],xmm13[2,3]
+; AVX-NEXT: vshufps {{.*#+}} ymm15 = ymm2[3,1],ymm10[0,3],ymm2[7,5],ymm10[4,7]
; AVX-NEXT: vshufps {{.*#+}} ymm15 = ymm12[2,1],ymm15[2,0],ymm12[6,5],ymm15[6,4]
-; AVX-NEXT: vblendps {{.*#+}} ymm15 = ymm14[0,1],ymm15[2,3,4,5,6,7]
-; AVX-NEXT: vunpcklpd {{.*#+}} ymm14 = ymm8[0],ymm4[0],ymm8[2],ymm4[2]
-; AVX-NEXT: vextractf128 $1, %ymm14, %xmm14
-; AVX-NEXT: vblendps {{.*#+}} xmm10 = xmm14[0,1,2],xmm10[3]
-; AVX-NEXT: vmovaps 192(%rdi), %ymm14
-; AVX-NEXT: vinsertf128 $1, %xmm10, %ymm0, %ymm10
-; AVX-NEXT: vblendps {{.*#+}} ymm10 = ymm15[0,1,2,3,4],ymm10[5,6,7]
+; AVX-NEXT: vblendps {{.*#+}} ymm15 = ymm13[0,1],ymm15[2,3,4,5,6,7]
+; AVX-NEXT: vunpcklpd {{.*#+}} ymm13 = ymm7[0],ymm4[0],ymm7[2],ymm4[2]
+; AVX-NEXT: vextractf128 $1, %ymm13, %xmm13
+; AVX-NEXT: vblendps {{.*#+}} xmm11 = xmm13[0,1,2],xmm11[3]
+; AVX-NEXT: vmovaps 192(%rdi), %ymm13
+; AVX-NEXT: vinsertf128 $1, %xmm11, %ymm0, %ymm11
+; AVX-NEXT: vblendps {{.*#+}} ymm11 = ymm15[0,1,2,3,4],ymm11[5,6,7]
; AVX-NEXT: vmovaps 64(%rdi), %xmm15
-; AVX-NEXT: vshufps {{.*#+}} ymm11 = ymm11[1,0],ymm12[0,0],ymm11[5,4],ymm12[4,4]
-; AVX-NEXT: vshufps {{.*#+}} ymm11 = ymm12[3,1],ymm11[0,2],ymm12[7,5],ymm11[4,6]
-; AVX-NEXT: vblendps {{.*#+}} xmm12 = xmm9[0,1,2],xmm13[3]
+; AVX-NEXT: vshufps {{.*#+}} ymm10 = ymm10[1,0],ymm12[0,0],ymm10[5,4],ymm12[4,4]
+; AVX-NEXT: vshufps {{.*#+}} ymm10 = ymm12[3,1],ymm10[0,2],ymm12[7,5],ymm10[4,6]
+; AVX-NEXT: vblendps {{.*#+}} xmm12 = xmm9[0,1,2],xmm14[3]
; AVX-NEXT: vshufps {{.*#+}} xmm12 = xmm12[3,2,2,3]
-; AVX-NEXT: vblendps {{.*#+}} ymm11 = ymm12[0,1],ymm11[2,3,4,5,6,7]
-; AVX-NEXT: vshufps {{.*#+}} ymm12 = ymm14[0,1],ymm4[1,3],ymm14[4,5],ymm4[5,7]
-; AVX-NEXT: vshufps {{.*#+}} ymm12 = ymm8[0,2],ymm12[2,0],ymm8[4,6],ymm12[6,4]
-; AVX-NEXT: vblendps {{.*#+}} ymm11 = ymm11[0,1,2,3,4],ymm12[5,6,7]
-; AVX-NEXT: vshufps {{.*#+}} ymm12 = ymm14[1,0],ymm4[2,0],ymm14[5,4],ymm4[6,4]
-; AVX-NEXT: vperm2f128 {{.*#+}} ymm13 = ymm8[2,3,0,1]
-; AVX-NEXT: vshufps {{.*#+}} ymm8 = ymm8[3,0],ymm13[0,0],ymm8[7,4],ymm13[4,4]
-; AVX-NEXT: vshufps {{.*#+}} ymm8 = ymm8[2,0],ymm12[2,0],ymm8[6,4],ymm12[6,4]
+; AVX-NEXT: vblendps {{.*#+}} ymm10 = ymm12[0,1],ymm10[2,3,4,5,6,7]
+; AVX-NEXT: vshufps {{.*#+}} ymm12 = ymm13[0,1],ymm4[1,3],ymm13[4,5],ymm4[5,7]
+; AVX-NEXT: vshufps {{.*#+}} ymm12 = ymm7[0,2],ymm12[2,0],ymm7[4,6],ymm12[6,4]
+; AVX-NEXT: vblendps {{.*#+}} ymm10 = ymm10[0,1,2,3,4],ymm12[5,6,7]
+; AVX-NEXT: vshufps {{.*#+}} ymm12 = ymm13[1,0],ymm4[2,0],ymm13[5,4],ymm4[6,4]
+; AVX-NEXT: vperm2f128 {{.*#+}} ymm14 = ymm7[2,3,0,1]
+; AVX-NEXT: vshufps {{.*#+}} ymm7 = ymm7[3,0],ymm14[0,0],ymm7[7,4],ymm14[4,4]
+; AVX-NEXT: vshufps {{.*#+}} ymm7 = ymm7[2,0],ymm12[2,0],ymm7[6,4],ymm12[6,4]
; AVX-NEXT: vmovaps 96(%rdi), %xmm12
-; AVX-NEXT: vshufps {{.*#+}} xmm13 = xmm12[0,1,0,1]
-; AVX-NEXT: vblendps {{.*#+}} xmm13 = xmm15[0,1,2],xmm13[3]
+; AVX-NEXT: vshufps {{.*#+}} xmm14 = xmm12[0,1,0,1]
+; AVX-NEXT: vblendps {{.*#+}} xmm14 = xmm15[0,1,2],xmm14[3]
; AVX-NEXT: vshufps {{.*#+}} xmm9 = xmm9[2,3,2,3]
; AVX-NEXT: vblendps {{.*#+}} xmm9 = mem[0],xmm9[1],mem[2,3]
-; AVX-NEXT: vblendps {{.*#+}} xmm9 = xmm9[0,1],xmm13[2,3]
-; AVX-NEXT: vblendps {{.*#+}} ymm8 = ymm9[0,1,2,3],ymm8[4,5,6,7]
-; AVX-NEXT: vshufps {{.*#+}} ymm4 = ymm14[2,1],ymm4[3,3],ymm14[6,5],ymm4[7,7]
-; AVX-NEXT: vblendps {{.*#+}} xmm9 = xmm5[0],xmm6[1],xmm5[2,3]
+; AVX-NEXT: vblendps {{.*#+}} xmm9 = xmm9[0,1],xmm14[2,3]
+; AVX-NEXT: vblendps {{.*#+}} ymm7 = ymm9[0,1,2,3],ymm7[4,5,6,7]
+; AVX-NEXT: vshufps {{.*#+}} ymm4 = ymm13[2,1],ymm4[3,3],ymm13[6,5],ymm4[7,7]
+; AVX-NEXT: vblendps {{.*#+}} xmm9 = xmm3[0],xmm5[1],xmm3[2,3]
; AVX-NEXT: vinsertf128 $1, %xmm9, %ymm0, %ymm9
; AVX-NEXT: vshufps {{.*#+}} ymm4 = ymm9[1,0],ymm4[2,0],ymm9[5,4],ymm4[6,4]
; AVX-NEXT: vblendps {{.*#+}} xmm9 = xmm12[0,1,2],xmm15[3]
-; AVX-NEXT: vshufps {{.*#+}} ymm13 = ymm0[0,0],ymm1[1,0],ymm0[4,4],ymm1[5,4]
-; AVX-NEXT: vshufps {{.*#+}} ymm13 = ymm13[2,0,2,3,6,4,6,7]
-; AVX-NEXT: vextractf128 $1, %ymm13, %xmm13
-; AVX-NEXT: vshufps {{.*#+}} xmm9 = xmm13[0,1],xmm9[3,2]
+; AVX-NEXT: vshufps {{.*#+}} ymm14 = ymm0[0,0],ymm1[1,0],ymm0[4,4],ymm1[5,4]
+; AVX-NEXT: vextractf128 $1, %ymm14, %xmm14
+; AVX-NEXT: vshufps {{.*#+}} xmm9 = xmm14[2,0],xmm9[3,2]
; AVX-NEXT: vblendps {{.*#+}} ymm4 = ymm9[0,1,2,3],ymm4[4,5,6,7]
-; AVX-NEXT: vperm2f128 {{.*#+}} ymm9 = ymm14[2,3,0,1]
-; AVX-NEXT: vshufps {{.*#+}} ymm9 = ymm14[3,0],ymm9[0,0],ymm14[7,4],ymm9[4,4]
-; AVX-NEXT: vshufps {{.*#+}} xmm6 = xmm6[2,3,2,3]
-; AVX-NEXT: vblendps {{.*#+}} xmm5 = xmm6[0],xmm5[1],xmm6[2,3]
-; AVX-NEXT: vinsertf128 $1, %xmm5, %ymm0, %ymm5
-; AVX-NEXT: vshufps {{.*#+}} ymm5 = ymm5[0,1],ymm9[2,0],ymm5[4,5],ymm9[6,4]
; AVX-NEXT: vshufps {{.*#+}} xmm2 = xmm2[0,1,0,1]
; AVX-NEXT: vblendps {{.*#+}} xmm2 = xmm2[0,1,2],xmm12[3]
; AVX-NEXT: vshufps {{.*#+}} ymm0 = ymm0[1,0],ymm1[2,0],ymm0[5,4],ymm1[6,4]
-; AVX-NEXT: vshufps {{.*#+}} ymm0 = ymm0[2,0,2,3,6,4,6,7]
; AVX-NEXT: vextractf128 $1, %ymm0, %xmm0
-; AVX-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0,1],xmm2[2,3]
-; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm5[4,5,6,7]
-; AVX-NEXT: vmovaps %ymm3, (%rsi)
-; AVX-NEXT: vmovaps %ymm7, (%rdx)
-; AVX-NEXT: vmovaps %ymm10, (%rcx)
-; AVX-NEXT: vmovaps %ymm11, (%r8)
-; AVX-NEXT: vmovaps %ymm8, (%r9)
+; AVX-NEXT: vshufps {{.*#+}} xmm0 = xmm0[2,0],xmm2[2,3]
+; AVX-NEXT: vperm2f128 {{.*#+}} ymm1 = ymm13[2,3,0,1]
+; AVX-NEXT: vshufps {{.*#+}} ymm1 = ymm13[3,0],ymm1[0,0],ymm13[7,4],ymm1[4,4]
+; AVX-NEXT: vshufps {{.*#+}} xmm2 = xmm5[2,3,2,3]
+; AVX-NEXT: vblendps {{.*#+}} xmm2 = xmm2[0],xmm3[1],xmm2[2,3]
+; AVX-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm2
+; AVX-NEXT: vshufps {{.*#+}} ymm1 = ymm2[0,1],ymm1[2,0],ymm2[4,5],ymm1[6,4]
+; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
+; AVX-NEXT: vmovaps %ymm6, (%rsi)
+; AVX-NEXT: vmovaps %ymm8, (%rdx)
+; AVX-NEXT: vmovaps %ymm11, (%rcx)
+; AVX-NEXT: vmovaps %ymm10, (%r8)
+; AVX-NEXT: vmovaps %ymm7, (%r9)
; AVX-NEXT: movq {{[0-9]+}}(%rsp), %rax
; AVX-NEXT: vmovaps %ymm4, (%rax)
; AVX-NEXT: movq {{[0-9]+}}(%rsp), %rax
@@ -2334,79 +2330,79 @@ define void @load_i32_stride7_vf16(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
; AVX-LABEL: load_i32_stride7_vf16:
; AVX: # %bb.0:
; AVX-NEXT: subq $456, %rsp # imm = 0x1C8
-; AVX-NEXT: vmovaps 256(%rdi), %ymm4
+; AVX-NEXT: vmovaps 32(%rdi), %ymm4
; AVX-NEXT: vmovups %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT: vmovaps 224(%rdi), %ymm5
-; AVX-NEXT: vmovups %ymm5, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT: vmovaps 320(%rdi), %ymm15
-; AVX-NEXT: vmovaps 32(%rdi), %ymm2
+; AVX-NEXT: vmovaps (%rdi), %ymm6
+; AVX-NEXT: vmovups %ymm6, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX-NEXT: vmovaps 96(%rdi), %ymm15
+; AVX-NEXT: vmovaps 256(%rdi), %ymm2
; AVX-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT: vmovaps (%rdi), %ymm1
+; AVX-NEXT: vmovaps 224(%rdi), %ymm1
; AVX-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT: vmovaps 96(%rdi), %ymm7
-; AVX-NEXT: vmovaps 80(%rdi), %xmm0
+; AVX-NEXT: vmovaps 320(%rdi), %ymm5
+; AVX-NEXT: vmovaps 304(%rdi), %xmm0
; AVX-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT: vunpcklpd {{.*#+}} ymm0 = ymm7[0],ymm0[0],ymm7[2],ymm0[2]
+; AVX-NEXT: vunpcklpd {{.*#+}} ymm0 = ymm5[0],ymm0[0],ymm5[2],ymm0[2]
; AVX-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3,4,5],ymm2[6],ymm1[7]
; AVX-NEXT: vextractf128 $1, %ymm1, %xmm1
-; AVX-NEXT: vmovaps (%rdi), %xmm13
+; AVX-NEXT: vmovaps 224(%rdi), %xmm13
; AVX-NEXT: vblendps {{.*#+}} xmm1 = xmm13[0,1],xmm1[2,3]
; AVX-NEXT: vshufps {{.*#+}} xmm1 = xmm1[0,3,2,3]
; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3,4,5,6,7]
-; AVX-NEXT: vmovaps 160(%rdi), %xmm2
+; AVX-NEXT: vmovaps 384(%rdi), %xmm2
; AVX-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX-NEXT: vmovaps 128(%rdi), %xmm1
+; AVX-NEXT: vmovaps 352(%rdi), %xmm1
; AVX-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; AVX-NEXT: vunpckhpd {{.*#+}} xmm1 = xmm1[1],xmm2[1]
-; AVX-NEXT: vmovaps 192(%rdi), %xmm12
+; AVX-NEXT: vmovaps 416(%rdi), %xmm12
; AVX-NEXT: vinsertps {{.*#+}} xmm1 = zero,xmm1[1,2],xmm12[1]
; AVX-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm1
; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm1[5,6,7]
; AVX-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT: vmovaps 304(%rdi), %xmm0
+; AVX-NEXT: vmovaps 80(%rdi), %xmm0
; AVX-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX-NEXT: vunpcklpd {{.*#+}} ymm0 = ymm15[0],ymm0[0],ymm15[2],ymm0[2]
-; AVX-NEXT: vblendps {{.*#+}} ymm1 = ymm5[0,1,2,3,4,5],ymm4[6],ymm5[7]
+; AVX-NEXT: vblendps {{.*#+}} ymm1 = ymm6[0,1,2,3,4,5],ymm4[6],ymm6[7]
; AVX-NEXT: vextractf128 $1, %ymm1, %xmm1
-; AVX-NEXT: vmovaps 224(%rdi), %xmm10
-; AVX-NEXT: vblendps {{.*#+}} xmm1 = xmm10[0,1],xmm1[2,3]
+; AVX-NEXT: vmovaps (%rdi), %xmm9
+; AVX-NEXT: vblendps {{.*#+}} xmm1 = xmm9[0,1],xmm1[2,3]
; AVX-NEXT: vshufps {{.*#+}} xmm1 = xmm1[0,3,2,3]
; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3,4,5,6,7]
-; AVX-NEXT: vmovaps 384(%rdi), %xmm2
+; AVX-NEXT: vmovaps 160(%rdi), %xmm2
; AVX-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX-NEXT: vmovaps 352(%rdi), %xmm1
+; AVX-NEXT: vmovaps 128(%rdi), %xmm1
; AVX-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; AVX-NEXT: vunpckhpd {{.*#+}} xmm1 = xmm1[1],xmm2[1]
-; AVX-NEXT: vmovaps 416(%rdi), %xmm8
+; AVX-NEXT: vmovaps 192(%rdi), %xmm8
; AVX-NEXT: vinsertps {{.*#+}} xmm1 = zero,xmm1[1,2],xmm8[1]
; AVX-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm1
; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm1[5,6,7]
; AVX-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT: vmovaps 64(%rdi), %ymm5
-; AVX-NEXT: vshufps {{.*#+}} ymm0 = ymm7[1,1],ymm5[2,2],ymm7[5,5],ymm5[6,6]
+; AVX-NEXT: vmovaps 288(%rdi), %ymm6
+; AVX-NEXT: vshufps {{.*#+}} ymm0 = ymm5[1,1],ymm6[2,2],ymm5[5,5],ymm6[6,6]
; AVX-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3,2,3]
-; AVX-NEXT: vmovaps 32(%rdi), %xmm11
+; AVX-NEXT: vmovaps 256(%rdi), %xmm11
; AVX-NEXT: vblendps {{.*#+}} xmm1 = xmm11[0],xmm13[1],xmm11[2,3]
; AVX-NEXT: vshufps {{.*#+}} xmm1 = xmm1[1,0],mem[3,3]
; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3,4,5,6,7]
-; AVX-NEXT: vmovaps 160(%rdi), %ymm6
-; AVX-NEXT: vmovaps 128(%rdi), %ymm1
-; AVX-NEXT: vperm2f128 {{.*#+}} ymm3 = ymm1[2,3],ymm6[0,1]
+; AVX-NEXT: vmovaps 384(%rdi), %ymm7
+; AVX-NEXT: vmovaps 352(%rdi), %ymm1
+; AVX-NEXT: vperm2f128 {{.*#+}} ymm3 = ymm1[2,3],ymm7[0,1]
; AVX-NEXT: vshufps {{.*#+}} ymm3 = ymm1[0,0],ymm3[3,3],ymm1[4,4],ymm3[7,7]
; AVX-NEXT: vextractf128 $1, %ymm3, %xmm3
; AVX-NEXT: vinsertps {{.*#+}} xmm3 = zero,xmm3[1,2],xmm12[2]
; AVX-NEXT: vinsertf128 $1, %xmm3, %ymm0, %ymm3
; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm3[5,6,7]
; AVX-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT: vmovaps 288(%rdi), %ymm3
+; AVX-NEXT: vmovaps 64(%rdi), %ymm3
; AVX-NEXT: vshufps {{.*#+}} ymm0 = ymm15[1,1],ymm3[2,2],ymm15[5,5],ymm3[6,6]
; AVX-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3,2,3]
-; AVX-NEXT: vmovaps 256(%rdi), %xmm9
-; AVX-NEXT: vblendps {{.*#+}} xmm4 = xmm9[0],xmm10[1],xmm9[2,3]
+; AVX-NEXT: vmovaps 32(%rdi), %xmm10
+; AVX-NEXT: vblendps {{.*#+}} xmm4 = xmm10[0],xmm9[1],xmm10[2,3]
; AVX-NEXT: vshufps {{.*#+}} xmm4 = xmm4[1,0],mem[3,3]
; AVX-NEXT: vblendps {{.*#+}} ymm2 = ymm4[0,1,2],ymm0[3,4,5,6,7]
-; AVX-NEXT: vmovaps 384(%rdi), %ymm4
-; AVX-NEXT: vmovaps 352(%rdi), %ymm0
+; AVX-NEXT: vmovaps 160(%rdi), %ymm4
+; AVX-NEXT: vmovaps 128(%rdi), %ymm0
; AVX-NEXT: vperm2f128 {{.*#+}} ymm14 = ymm0[2,3],ymm4[0,1]
; AVX-NEXT: vshufps {{.*#+}} ymm14 = ymm0[0,0],ymm14[3,3],ymm0[4,4],ymm14[7,7]
; AVX-NEXT: vextractf128 $1, %ymm14, %xmm14
@@ -2417,17 +2413,17 @@ define void @load_i32_stride7_vf16(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
; AVX-NEXT: vshufps {{.*#+}} xmm2 = xmm13[2,3,2,3]
; AVX-NEXT: vblendps {{.*#+}} xmm2 = xmm2[0],xmm11[1],xmm2[2,3]
; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm14 # 32-byte Reload
-; AVX-NEXT: vshufps {{.*#+}} ymm14 = ymm14[3,1],ymm5[0,3],ymm14[7,5],ymm5[4,7]
-; AVX-NEXT: vshufps {{.*#+}} ymm14 = ymm7[2,1],ymm14[2,0],ymm7[6,5],ymm14[6,4]
+; AVX-NEXT: vshufps {{.*#+}} ymm14 = ymm14[3,1],ymm6[0,3],ymm14[7,5],ymm6[4,7]
+; AVX-NEXT: vshufps {{.*#+}} ymm14 = ymm5[2,1],ymm14[2,0],ymm5[6,5],ymm14[6,4]
; AVX-NEXT: vblendps {{.*#+}} ymm2 = ymm2[0,1],ymm14[2,3,4,5,6,7]
-; AVX-NEXT: vunpcklpd {{.*#+}} ymm14 = ymm1[0],ymm6[0],ymm1[2],ymm6[2]
+; AVX-NEXT: vunpcklpd {{.*#+}} ymm14 = ymm1[0],ymm7[0],ymm1[2],ymm7[2]
; AVX-NEXT: vextractf128 $1, %ymm14, %xmm14
; AVX-NEXT: vblendps {{.*#+}} xmm12 = xmm14[0,1,2],xmm12[3]
; AVX-NEXT: vinsertf128 $1, %xmm12, %ymm0, %ymm12
; AVX-NEXT: vblendps {{.*#+}} ymm2 = ymm2[0,1,2,3,4],ymm12[5,6,7]
; AVX-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT: vshufps {{.*#+}} xmm2 = xmm10[2,3,2,3]
-; AVX-NEXT: vblendps {{.*#+}} xmm2 = xmm2[0],xmm9[1],xmm2[2,3]
+; AVX-NEXT: vshufps {{.*#+}} xmm2 = xmm9[2,3,2,3]
+; AVX-NEXT: vblendps {{.*#+}} xmm2 = xmm2[0],xmm10[1],xmm2[2,3]
; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm12 # 32-byte Reload
; AVX-NEXT: vshufps {{.*#+}} ymm12 = ymm12[3,1],ymm3[0,3],ymm12[7,5],ymm3[4,7]
; AVX-NEXT: vshufps {{.*#+}} ymm12 = ymm15[2,1],ymm12[2,0],ymm15[6,5],ymm12[6,4]
@@ -2438,134 +2434,130 @@ define void @load_i32_stride7_vf16(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
; AVX-NEXT: vinsertf128 $1, %xmm8, %ymm0, %ymm8
; AVX-NEXT: vblendps {{.*#+}} ymm2 = ymm2[0,1,2,3,4],ymm8[5,6,7]
; AVX-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT: vshufps {{.*#+}} ymm2 = ymm5[1,0],ymm7[0,0],ymm5[5,4],ymm7[4,4]
-; AVX-NEXT: vshufps {{.*#+}} ymm2 = ymm7[3,1],ymm2[0,2],ymm7[7,5],ymm2[4,6]
+; AVX-NEXT: vshufps {{.*#+}} ymm2 = ymm6[1,0],ymm5[0,0],ymm6[5,4],ymm5[4,4]
+; AVX-NEXT: vshufps {{.*#+}} ymm2 = ymm5[3,1],ymm2[0,2],ymm5[7,5],ymm2[4,6]
; AVX-NEXT: vblendps {{.*#+}} xmm5 = xmm11[0,1,2],xmm13[3]
; AVX-NEXT: vshufps {{.*#+}} xmm5 = xmm5[3,2,2,3]
; AVX-NEXT: vblendps {{.*#+}} ymm5 = ymm5[0,1],ymm2[2,3,4,5,6,7]
-; AVX-NEXT: vmovaps 192(%rdi), %ymm2
-; AVX-NEXT: vshufps {{.*#+}} ymm7 = ymm2[0,1],ymm6[1,3],ymm2[4,5],ymm6[5,7]
-; AVX-NEXT: vshufps {{.*#+}} ymm7 = ymm1[0,2],ymm7[2,0],ymm1[4,6],ymm7[6,4]
-; AVX-NEXT: vblendps {{.*#+}} ymm5 = ymm5[0,1,2,3,4],ymm7[5,6,7]
+; AVX-NEXT: vmovaps 416(%rdi), %ymm2
+; AVX-NEXT: vshufps {{.*#+}} ymm6 = ymm2[0,1],ymm7[1,3],ymm2[4,5],ymm7[5,7]
+; AVX-NEXT: vshufps {{.*#+}} ymm6 = ymm1[0,2],ymm6[2,0],ymm1[4,6],ymm6[6,4]
+; AVX-NEXT: vblendps {{.*#+}} ymm5 = ymm5[0,1,2,3,4],ymm6[5,6,7]
; AVX-NEXT: vmovups %ymm5, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX-NEXT: vshufps {{.*#+}} ymm3 = ymm3[1,0],ymm15[0,0],ymm3[5,4],ymm15[4,4]
; AVX-NEXT: vshufps {{.*#+}} ymm3 = ymm15[3,1],ymm3[0,2],ymm15[7,5],ymm3[4,6]
-; AVX-NEXT: vblendps {{.*#+}} xmm5 = xmm9[0,1,2],xmm10[3]
+; AVX-NEXT: vblendps {{.*#+}} xmm5 = xmm10[0,1,2],xmm9[3]
; AVX-NEXT: vshufps {{.*#+}} xmm5 = xmm5[3,2,2,3]
; AVX-NEXT: vblendps {{.*#+}} ymm3 = ymm5[0,1],ymm3[2,3,4,5,6,7]
-; AVX-NEXT: vmovaps 416(%rdi), %ymm5
-; AVX-NEXT: vshufps {{.*#+}} ymm8 = ymm5[0,1],ymm4[1,3],ymm5[4,5],ymm4[5,7]
-; AVX-NEXT: vshufps {{.*#+}} ymm8 = ymm0[0,2],ymm8[2,0],ymm0[4,6],ymm8[6,4]
-; AVX-NEXT: vblendps {{.*#+}} ymm3 = ymm3[0,1,2,3,4],ymm8[5,6,7]
+; AVX-NEXT: vmovaps 192(%rdi), %ymm6
+; AVX-NEXT: vshufps {{.*#+}} ymm5 = ymm6[0,1],ymm4[1,3],ymm6[4,5],ymm4[5,7]
+; AVX-NEXT: vshufps {{.*#+}} ymm5 = ymm0[0,2],ymm5[2,0],ymm0[4,6],ymm5[6,4]
+; AVX-NEXT: vblendps {{.*#+}} ymm3 = ymm3[0,1,2,3,4],ymm5[5,6,7]
; AVX-NEXT: vmovups %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX-NEXT: vperm2f128 {{.*#+}} ymm3 = ymm1[2,3,0,1]
; AVX-NEXT: vshufps {{.*#+}} ymm1 = ymm1[3,0],ymm3[0,0],ymm1[7,4],ymm3[4,4]
-; AVX-NEXT: vshufps {{.*#+}} ymm3 = ymm2[1,0],ymm6[2,0],ymm2[5,4],ymm6[6,4]
+; AVX-NEXT: vshufps {{.*#+}} ymm3 = ymm2[1,0],ymm7[2,0],ymm2[5,4],ymm7[6,4]
; AVX-NEXT: vshufps {{.*#+}} ymm1 = ymm1[2,0],ymm3[2,0],ymm1[6,4],ymm3[6,4]
-; AVX-NEXT: vmovaps 64(%rdi), %xmm3
-; AVX-NEXT: vmovaps 96(%rdi), %xmm10
-; AVX-NEXT: vshufps {{.*#+}} xmm8 = xmm10[0,1,0,1]
-; AVX-NEXT: vblendps {{.*#+}} xmm8 = xmm3[0,1,2],xmm8[3]
-; AVX-NEXT: vshufps {{.*#+}} xmm11 = xmm11[2,3,2,3]
-; AVX-NEXT: vblendps {{.*#+}} xmm11 = mem[0],xmm11[1],mem[2,3]
-; AVX-NEXT: vblendps {{.*#+}} xmm8 = xmm11[0,1],xmm8[2,3]
-; AVX-NEXT: vblendps {{.*#+}} ymm1 = ymm8[0,1,2,3],ymm1[4,5,6,7]
-; AVX-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT: vperm2f128 {{.*#+}} ymm8 = ymm0[2,3,0,1]
-; AVX-NEXT: vshufps {{.*#+}} ymm0 = ymm0[3,0],ymm8[0,0],ymm0[7,4],ymm8[4,4]
-; AVX-NEXT: vshufps {{.*#+}} ymm8 = ymm5[1,0],ymm4[2,0],ymm5[5,4],ymm4[6,4]
-; AVX-NEXT: vshufps {{.*#+}} ymm0 = ymm0[2,0],ymm8[2,0],ymm0[6,4],ymm8[6,4]
-; AVX-NEXT: vmovaps 320(%rdi), %xmm8
-; AVX-NEXT: vshufps {{.*#+}} xmm11 = xmm8[0,1,0,1]
-; AVX-NEXT: vmovaps 288(%rdi), %xmm12
-; AVX-NEXT: vblendps {{.*#+}} xmm11 = xmm12[0,1,2],xmm11[3]
-; AVX-NEXT: vshufps {{.*#+}} xmm9 = xmm9[2,3,2,3]
+; AVX-NEXT: vmovaps 320(%rdi), %xmm5
+; AVX-NEXT: vshufps {{.*#+}} xmm3 = xmm5[0,1,0,1]
+; AVX-NEXT: vmovaps 288(%rdi), %xmm8
+; AVX-NEXT: vblendps {{.*#+}} xmm3 = xmm8[0,1,2],xmm3[3]
+; AVX-NEXT: vshufps {{.*#+}} xmm9 = xmm11[2,3,2,3]
; AVX-NEXT: vblendps {{.*#+}} xmm9 = mem[0],xmm9[1],mem[2,3]
-; AVX-NEXT: vblendps {{.*#+}} xmm9 = xmm9[0,1],xmm11[2,3]
-; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm9[0,1,2,3],ymm0[4,5,6,7]
+; AVX-NEXT: vblendps {{.*#+}} xmm3 = xmm9[0,1],xmm3[2,3]
+; AVX-NEXT: vblendps {{.*#+}} ymm1 = ymm3[0,1,2,3],ymm1[4,5,6,7]
+; AVX-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX-NEXT: vperm2f128 {{.*#+}} ymm3 = ymm0[2,3,0,1]
+; AVX-NEXT: vshufps {{.*#+}} ymm0 = ymm0[3,0],ymm3[0,0],ymm0[7,4],ymm3[4,4]
+; AVX-NEXT: vshufps {{.*#+}} ymm3 = ymm6[1,0],ymm4[2,0],ymm6[5,4],ymm4[6,4]
+; AVX-NEXT: vshufps {{.*#+}} ymm0 = ymm0[2,0],ymm3[2,0],ymm0[6,4],ymm3[6,4]
+; AVX-NEXT: vmovaps 64(%rdi), %xmm11
+; AVX-NEXT: vmovaps 96(%rdi), %xmm9
+; AVX-NEXT: vshufps {{.*#+}} xmm3 = xmm9[0,1,0,1]
+; AVX-NEXT: vblendps {{.*#+}} xmm3 = xmm11[0,1,2],xmm3[3]
+; AVX-NEXT: vshufps {{.*#+}} xmm10 = xmm10[2,3,2,3]
+; AVX-NEXT: vblendps {{.*#+}} xmm10 = mem[0],xmm10[1],mem[2,3]
+; AVX-NEXT: vblendps {{.*#+}} xmm3 = xmm10[0,1],xmm3[2,3]
+; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm3[0,1,2,3],ymm0[4,5,6,7]
; AVX-NEXT: vmovups %ymm0, (%rsp) # 32-byte Spill
-; AVX-NEXT: vshufps {{.*#+}} ymm6 = ymm2[2,1],ymm6[3,3],ymm2[6,5],ymm6[7,7]
-; AVX-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
+; AVX-NEXT: vshufps {{.*#+}} ymm3 = ymm2[2,1],ymm7[3,3],ymm2[6,5],ymm7[7,7]
+; AVX-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm14 # 16-byte Reload
; AVX-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
-; AVX-NEXT: vblendps {{.*#+}} xmm9 = xmm1[0],xmm0[1],xmm1[2,3]
-; AVX-NEXT: vinsertf128 $1, %xmm9, %ymm0, %ymm9
-; AVX-NEXT: vshufps {{.*#+}} ymm6 = ymm9[1,0],ymm6[2,0],ymm9[5,4],ymm6[6,4]
-; AVX-NEXT: vblendps {{.*#+}} xmm3 = xmm10[0,1,2],xmm3[3]
+; AVX-NEXT: vblendps {{.*#+}} xmm7 = xmm14[0],xmm0[1],xmm14[2,3]
+; AVX-NEXT: vinsertf128 $1, %xmm7, %ymm0, %ymm7
+; AVX-NEXT: vshufps {{.*#+}} ymm3 = ymm7[1,0],ymm3[2,0],ymm7[5,4],ymm3[6,4]
+; AVX-NEXT: vblendps {{.*#+}} xmm7 = xmm5[0,1,2],xmm8[3]
+; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm12 # 32-byte Reload
; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm13 # 32-byte Reload
-; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm14 # 32-byte Reload
-; AVX-NEXT: vshufps {{.*#+}} ymm9 = ymm14[0,0],ymm13[1,0],ymm14[4,4],ymm13[5,4]
-; AVX-NEXT: vshufps {{.*#+}} ymm9 = ymm9[2,0,2,3,6,4,6,7]
-; AVX-NEXT: vextractf128 $1, %ymm9, %xmm9
-; AVX-NEXT: vshufps {{.*#+}} xmm3 = xmm9[0,1],xmm3[3,2]
-; AVX-NEXT: vblendps {{.*#+}} ymm15 = ymm3[0,1,2,3],ymm6[4,5,6,7]
-; AVX-NEXT: vshufps {{.*#+}} ymm4 = ymm5[2,1],ymm4[3,3],ymm5[6,5],ymm4[7,7]
-; AVX-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm7 # 16-byte Reload
+; AVX-NEXT: vshufps {{.*#+}} ymm8 = ymm13[0,0],ymm12[1,0],ymm13[4,4],ymm12[5,4]
+; AVX-NEXT: vextractf128 $1, %ymm8, %xmm8
+; AVX-NEXT: vshufps {{.*#+}} xmm7 = xmm8[2,0],xmm7[3,2]
+; AVX-NEXT: vblendps {{.*#+}} ymm15 = ymm7[0,1,2,3],ymm3[4,5,6,7]
+; AVX-NEXT: vshufps {{.*#+}} ymm4 = ymm6[2,1],ymm4[3,3],ymm6[6,5],ymm4[7,7]
; AVX-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload
-; AVX-NEXT: vblendps {{.*#+}} xmm6 = xmm7[0],xmm3[1],xmm7[2,3]
-; AVX-NEXT: vinsertf128 $1, %xmm6, %ymm0, %ymm6
-; AVX-NEXT: vshufps {{.*#+}} ymm4 = ymm6[1,0],ymm4[2,0],ymm6[5,4],ymm4[6,4]
-; AVX-NEXT: vblendps {{.*#+}} xmm6 = xmm8[0,1,2],xmm12[3]
+; AVX-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
+; AVX-NEXT: vblendps {{.*#+}} xmm7 = xmm3[0],xmm1[1],xmm3[2,3]
+; AVX-NEXT: vinsertf128 $1, %xmm7, %ymm0, %ymm7
+; AVX-NEXT: vshufps {{.*#+}} ymm4 = ymm7[1,0],ymm4[2,0],ymm7[5,4],ymm4[6,4]
+; AVX-NEXT: vblendps {{.*#+}} xmm7 = xmm9[0,1,2],xmm11[3]
+; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm10 # 32-byte Reload
; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm11 # 32-byte Reload
-; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm12 # 32-byte Reload
-; AVX-NEXT: vshufps {{.*#+}} ymm9 = ymm11[0,0],ymm12[1,0],ymm11[4,4],ymm12[5,4]
-; AVX-NEXT: vshufps {{.*#+}} ymm9 = ymm9[2,0,2,3,6,4,6,7]
-; AVX-NEXT: vextractf128 $1, %ymm9, %xmm9
-; AVX-NEXT: vshufps {{.*#+}} xmm6 = xmm9[0,1],xmm6[3,2]
-; AVX-NEXT: vblendps {{.*#+}} ymm4 = ymm6[0,1,2,3],ymm4[4,5,6,7]
-; AVX-NEXT: vperm2f128 {{.*#+}} ymm6 = ymm2[2,3,0,1]
-; AVX-NEXT: vshufps {{.*#+}} ymm2 = ymm2[3,0],ymm6[0,0],ymm2[7,4],ymm6[4,4]
-; AVX-NEXT: vshufps {{.*#+}} xmm6 = xmm0[2,3,2,3]
-; AVX-NEXT: vblendps {{.*#+}} xmm6 = xmm6[0],xmm1[1],xmm6[2,3]
-; AVX-NEXT: vinsertf128 $1, %xmm6, %ymm0, %ymm6
-; AVX-NEXT: vshufps {{.*#+}} ymm2 = ymm6[0,1],ymm2[2,0],ymm6[4,5],ymm2[6,4]
-; AVX-NEXT: vpermilps $68, {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Folded Reload
-; AVX-NEXT: # xmm6 = mem[0,1,0,1]
-; AVX-NEXT: vblendps {{.*#+}} xmm6 = xmm6[0,1,2],xmm10[3]
-; AVX-NEXT: vshufps {{.*#+}} ymm9 = ymm14[1,0],ymm13[2,0],ymm14[5,4],ymm13[6,4]
-; AVX-NEXT: vshufps {{.*#+}} ymm9 = ymm9[2,0,2,3,6,4,6,7]
-; AVX-NEXT: vextractf128 $1, %ymm9, %xmm9
-; AVX-NEXT: vblendps {{.*#+}} xmm6 = xmm9[0,1],xmm6[2,3]
-; AVX-NEXT: vblendps {{.*#+}} ymm2 = ymm6[0,1,2,3],ymm2[4,5,6,7]
-; AVX-NEXT: vperm2f128 {{.*#+}} ymm6 = ymm5[2,3,0,1]
-; AVX-NEXT: vshufps {{.*#+}} ymm5 = ymm5[3,0],ymm6[0,0],ymm5[7,4],ymm6[4,4]
-; AVX-NEXT: vshufps {{.*#+}} xmm6 = xmm3[2,3,2,3]
-; AVX-NEXT: vblendps {{.*#+}} xmm6 = xmm6[0],xmm7[1],xmm6[2,3]
-; AVX-NEXT: vinsertf128 $1, %xmm6, %ymm0, %ymm6
-; AVX-NEXT: vshufps {{.*#+}} ymm5 = ymm6[0,1],ymm5[2,0],ymm6[4,5],ymm5[6,4]
-; AVX-NEXT: vpermilps $68, {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Folded Reload
-; AVX-NEXT: # xmm6 = mem[0,1,0,1]
-; AVX-NEXT: vblendps {{.*#+}} xmm6 = xmm6[0,1,2],xmm8[3]
-; AVX-NEXT: vshufps {{.*#+}} ymm8 = ymm11[1,0],ymm12[2,0],ymm11[5,4],ymm12[6,4]
-; AVX-NEXT: vshufps {{.*#+}} ymm8 = ymm8[2,0,2,3,6,4,6,7]
+; AVX-NEXT: vshufps {{.*#+}} ymm8 = ymm10[0,0],ymm11[1,0],ymm10[4,4],ymm11[5,4]
; AVX-NEXT: vextractf128 $1, %ymm8, %xmm8
-; AVX-NEXT: vblendps {{.*#+}} xmm6 = xmm8[0,1],xmm6[2,3]
-; AVX-NEXT: vblendps {{.*#+}} ymm5 = ymm6[0,1,2,3],ymm5[4,5,6,7]
+; AVX-NEXT: vshufps {{.*#+}} xmm7 = xmm8[2,0],xmm7[3,2]
+; AVX-NEXT: vblendps {{.*#+}} ymm4 = ymm7[0,1,2,3],ymm4[4,5,6,7]
+; AVX-NEXT: vpermilps $68, {{[-0-9]+}}(%r{{[sb]}}p), %xmm7 # 16-byte Folded Reload
+; AVX-NEXT: # xmm7 = mem[0,1,0,1]
+; AVX-NEXT: vblendps {{.*#+}} xmm5 = xmm7[0,1,2],xmm5[3]
+; AVX-NEXT: vshufps {{.*#+}} ymm7 = ymm13[1,0],ymm12[2,0],ymm13[5,4],ymm12[6,4]
+; AVX-NEXT: vextractf128 $1, %ymm7, %xmm7
+; AVX-NEXT: vshufps {{.*#+}} xmm5 = xmm7[2,0],xmm5[2,3]
+; AVX-NEXT: vperm2f128 {{.*#+}} ymm7 = ymm2[2,3,0,1]
+; AVX-NEXT: vshufps {{.*#+}} ymm2 = ymm2[3,0],ymm7[0,0],ymm2[7,4],ymm7[4,4]
+; AVX-NEXT: vshufps {{.*#+}} xmm7 = xmm0[2,3,2,3]
+; AVX-NEXT: vblendps {{.*#+}} xmm7 = xmm7[0],xmm14[1],xmm7[2,3]
+; AVX-NEXT: vinsertf128 $1, %xmm7, %ymm0, %ymm7
+; AVX-NEXT: vshufps {{.*#+}} ymm2 = ymm7[0,1],ymm2[2,0],ymm7[4,5],ymm2[6,4]
+; AVX-NEXT: vblendps {{.*#+}} ymm2 = ymm5[0,1,2,3],ymm2[4,5,6,7]
+; AVX-NEXT: vpermilps $68, {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Folded Reload
+; AVX-NEXT: # xmm5 = mem[0,1,0,1]
+; AVX-NEXT: vblendps {{.*#+}} xmm5 = xmm5[0,1,2],xmm9[3]
+; AVX-NEXT: vshufps {{.*#+}} ymm7 = ymm10[1,0],ymm11[2,0],ymm10[5,4],ymm11[6,4]
+; AVX-NEXT: vextractf128 $1, %ymm7, %xmm7
+; AVX-NEXT: vshufps {{.*#+}} xmm5 = xmm7[2,0],xmm5[2,3]
+; AVX-NEXT: vperm2f128 {{.*#+}} ymm7 = ymm6[2,3,0,1]
+; AVX-NEXT: vshufps {{.*#+}} ymm6 = ymm6[3,0],ymm7[0,0],ymm6[7,4],ymm7[4,4]
+; AVX-NEXT: vshufps {{.*#+}} xmm7 = xmm1[2,3,2,3]
+; AVX-NEXT: vblendps {{.*#+}} xmm7 = xmm7[0],xmm3[1],xmm7[2,3]
+; AVX-NEXT: vinsertf128 $1, %xmm7, %ymm0, %ymm7
+; AVX-NEXT: vshufps {{.*#+}} ymm6 = ymm7[0,1],ymm6[2,0],ymm7[4,5],ymm6[6,4]
+; AVX-NEXT: vblendps {{.*#+}} ymm5 = ymm5[0,1,2,3],ymm6[4,5,6,7]
+; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
+; AVX-NEXT: vmovaps %ymm0, (%rsi)
; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
; AVX-NEXT: vmovaps %ymm6, 32(%rsi)
-; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
-; AVX-NEXT: vmovaps %ymm6, (%rsi)
-; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX-NEXT: vmovaps %ymm0, 32(%rdx)
; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX-NEXT: vmovaps %ymm0, (%rdx)
; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX-NEXT: vmovaps %ymm0, 32(%rcx)
+; AVX-NEXT: vmovaps %ymm0, 32(%rdx)
; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX-NEXT: vmovaps %ymm0, (%rcx)
; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX-NEXT: vmovaps %ymm0, 32(%r8)
+; AVX-NEXT: vmovaps %ymm0, 32(%rcx)
; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX-NEXT: vmovaps %ymm0, (%r8)
-; AVX-NEXT: vmovups (%rsp), %ymm0 # 32-byte Reload
-; AVX-NEXT: vmovaps %ymm0, 32(%r9)
; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
+; AVX-NEXT: vmovaps %ymm0, 32(%r8)
+; AVX-NEXT: vmovups (%rsp), %ymm0 # 32-byte Reload
; AVX-NEXT: vmovaps %ymm0, (%r9)
+; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
+; AVX-NEXT: vmovaps %ymm0, 32(%r9)
; AVX-NEXT: movq {{[0-9]+}}(%rsp), %rax
-; AVX-NEXT: vmovaps %ymm4, 32(%rax)
-; AVX-NEXT: vmovaps %ymm15, (%rax)
+; AVX-NEXT: vmovaps %ymm4, (%rax)
+; AVX-NEXT: vmovaps %ymm15, 32(%rax)
; AVX-NEXT: movq {{[0-9]+}}(%rsp), %rax
-; AVX-NEXT: vmovaps %ymm5, 32(%rax)
-; AVX-NEXT: vmovaps %ymm2, (%rax)
+; AVX-NEXT: vmovaps %ymm5, (%rax)
+; AVX-NEXT: vmovaps %ymm2, 32(%rax)
; AVX-NEXT: addq $456, %rsp # imm = 0x1C8
; AVX-NEXT: vzeroupper
; AVX-NEXT: retq
@@ -4893,26 +4885,26 @@ define void @load_i32_stride7_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
;
; AVX-LABEL: load_i32_stride7_vf32:
; AVX: # %bb.0:
-; AVX-NEXT: subq $1464, %rsp # imm = 0x5B8
+; AVX-NEXT: subq $1432, %rsp # imm = 0x598
; AVX-NEXT: vmovaps 480(%rdi), %ymm4
; AVX-NEXT: vmovups %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX-NEXT: vmovaps 448(%rdi), %ymm3
; AVX-NEXT: vmovups %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT: vmovaps 544(%rdi), %ymm7
+; AVX-NEXT: vmovaps 544(%rdi), %ymm5
; AVX-NEXT: vmovaps 32(%rdi), %ymm2
; AVX-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX-NEXT: vmovaps (%rdi), %ymm1
; AVX-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT: vmovaps 96(%rdi), %ymm6
+; AVX-NEXT: vmovaps 96(%rdi), %ymm12
; AVX-NEXT: vmovaps 80(%rdi), %xmm0
; AVX-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT: vunpcklpd {{.*#+}} ymm0 = ymm6[0],ymm0[0],ymm6[2],ymm0[2]
-; AVX-NEXT: vmovups %ymm6, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX-NEXT: vunpcklpd {{.*#+}} ymm0 = ymm12[0],ymm0[0],ymm12[2],ymm0[2]
+; AVX-NEXT: vmovups %ymm12, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3,4,5],ymm2[6],ymm1[7]
; AVX-NEXT: vextractf128 $1, %ymm1, %xmm1
-; AVX-NEXT: vmovaps (%rdi), %xmm9
-; AVX-NEXT: vblendps {{.*#+}} xmm1 = xmm9[0,1],xmm1[2,3]
-; AVX-NEXT: vmovaps %xmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX-NEXT: vmovaps (%rdi), %xmm8
+; AVX-NEXT: vblendps {{.*#+}} xmm1 = xmm8[0,1],xmm1[2,3]
+; AVX-NEXT: vmovaps %xmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; AVX-NEXT: vshufps {{.*#+}} xmm1 = xmm1[0,3,2,3]
; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3,4,5,6,7]
; AVX-NEXT: vmovaps 160(%rdi), %xmm2
@@ -4920,20 +4912,19 @@ define void @load_i32_stride7_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
; AVX-NEXT: vmovaps 128(%rdi), %xmm1
; AVX-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; AVX-NEXT: vunpckhpd {{.*#+}} xmm1 = xmm1[1],xmm2[1]
-; AVX-NEXT: vmovaps 192(%rdi), %xmm10
-; AVX-NEXT: vinsertps {{.*#+}} xmm1 = zero,xmm1[1,2],xmm10[1]
-; AVX-NEXT: vmovaps %xmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX-NEXT: vmovaps 192(%rdi), %xmm7
+; AVX-NEXT: vinsertps {{.*#+}} xmm1 = zero,xmm1[1,2],xmm7[1]
+; AVX-NEXT: vmovaps %xmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; AVX-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm1
; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm1[5,6,7]
; AVX-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX-NEXT: vmovaps 528(%rdi), %xmm0
; AVX-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT: vunpcklpd {{.*#+}} ymm0 = ymm7[0],ymm0[0],ymm7[2],ymm0[2]
-; AVX-NEXT: vmovups %ymm7, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX-NEXT: vunpcklpd {{.*#+}} ymm0 = ymm5[0],ymm0[0],ymm5[2],ymm0[2]
; AVX-NEXT: vblendps {{.*#+}} ymm1 = ymm3[0,1,2,3,4,5],ymm4[6],ymm3[7]
; AVX-NEXT: vextractf128 $1, %ymm1, %xmm1
-; AVX-NEXT: vmovaps 448(%rdi), %xmm8
-; AVX-NEXT: vblendps {{.*#+}} xmm1 = xmm8[0,1],xmm1[2,3]
+; AVX-NEXT: vmovaps 448(%rdi), %xmm10
+; AVX-NEXT: vblendps {{.*#+}} xmm1 = xmm10[0,1],xmm1[2,3]
; AVX-NEXT: vshufps {{.*#+}} xmm1 = xmm1[0,3,2,3]
; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3,4,5,6,7]
; AVX-NEXT: vmovaps 608(%rdi), %xmm2
@@ -4941,9 +4932,9 @@ define void @load_i32_stride7_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
; AVX-NEXT: vmovaps 576(%rdi), %xmm1
; AVX-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; AVX-NEXT: vunpckhpd {{.*#+}} xmm1 = xmm1[1],xmm2[1]
-; AVX-NEXT: vmovaps 640(%rdi), %xmm4
-; AVX-NEXT: vinsertps {{.*#+}} xmm1 = zero,xmm1[1,2],xmm4[1]
-; AVX-NEXT: vmovaps %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX-NEXT: vmovaps 640(%rdi), %xmm9
+; AVX-NEXT: vinsertps {{.*#+}} xmm1 = zero,xmm1[1,2],xmm9[1]
+; AVX-NEXT: vmovaps %xmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; AVX-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm1
; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm1[5,6,7]
; AVX-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
@@ -4953,24 +4944,24 @@ define void @load_i32_stride7_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
; AVX-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],ymm1[6],ymm0[7]
; AVX-NEXT: vextractf128 $1, %ymm0, %xmm0
-; AVX-NEXT: vmovaps 224(%rdi), %xmm5
-; AVX-NEXT: vblendps {{.*#+}} xmm0 = xmm5[0,1],xmm0[2,3]
-; AVX-NEXT: vmovaps %xmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX-NEXT: vmovaps 224(%rdi), %xmm11
+; AVX-NEXT: vblendps {{.*#+}} xmm0 = xmm11[0,1],xmm0[2,3]
+; AVX-NEXT: vmovaps %xmm11, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; AVX-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,3,2,3]
-; AVX-NEXT: vmovaps 320(%rdi), %ymm13
+; AVX-NEXT: vmovaps 320(%rdi), %ymm4
; AVX-NEXT: vmovaps 304(%rdi), %xmm1
; AVX-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT: vunpcklpd {{.*#+}} ymm1 = ymm13[0],ymm1[0],ymm13[2],ymm1[2]
-; AVX-NEXT: vmovups %ymm13, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX-NEXT: vunpcklpd {{.*#+}} ymm1 = ymm4[0],ymm1[0],ymm4[2],ymm1[2]
+; AVX-NEXT: vmovups %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2],ymm1[3,4,5,6,7]
-; AVX-NEXT: vmovaps 384(%rdi), %xmm2
-; AVX-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX-NEXT: vmovaps 352(%rdi), %xmm1
+; AVX-NEXT: vmovaps 384(%rdi), %xmm1
; AVX-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX-NEXT: vunpckhpd {{.*#+}} xmm1 = xmm1[1],xmm2[1]
-; AVX-NEXT: vmovaps 416(%rdi), %xmm11
-; AVX-NEXT: vinsertps {{.*#+}} xmm1 = zero,xmm1[1,2],xmm11[1]
-; AVX-NEXT: vmovaps %xmm11, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX-NEXT: vmovaps 352(%rdi), %xmm2
+; AVX-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX-NEXT: vunpckhpd {{.*#+}} xmm1 = xmm2[1],xmm1[1]
+; AVX-NEXT: vmovaps 416(%rdi), %xmm3
+; AVX-NEXT: vinsertps {{.*#+}} xmm1 = zero,xmm1[1,2],xmm3[1]
+; AVX-NEXT: vmovaps %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; AVX-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm1
; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm1[5,6,7]
; AVX-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
@@ -4984,474 +4975,470 @@ define void @load_i32_stride7_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
; AVX-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; AVX-NEXT: vblendps {{.*#+}} xmm0 = xmm1[0,1],xmm0[2,3]
; AVX-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,3,2,3]
-; AVX-NEXT: vmovaps 768(%rdi), %ymm3
+; AVX-NEXT: vmovaps 768(%rdi), %ymm14
; AVX-NEXT: vmovaps 752(%rdi), %xmm1
; AVX-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT: vunpcklpd {{.*#+}} ymm1 = ymm3[0],ymm1[0],ymm3[2],ymm1[2]
+; AVX-NEXT: vunpcklpd {{.*#+}} ymm1 = ymm14[0],ymm1[0],ymm14[2],ymm1[2]
; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2],ymm1[3,4,5,6,7]
; AVX-NEXT: vmovaps 832(%rdi), %xmm2
; AVX-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; AVX-NEXT: vmovaps 800(%rdi), %xmm1
; AVX-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; AVX-NEXT: vunpckhpd {{.*#+}} xmm1 = xmm1[1],xmm2[1]
-; AVX-NEXT: vmovaps 864(%rdi), %xmm2
-; AVX-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX-NEXT: vinsertps {{.*#+}} xmm1 = zero,xmm1[1,2],xmm2[1]
+; AVX-NEXT: vmovaps 864(%rdi), %xmm6
+; AVX-NEXT: vinsertps {{.*#+}} xmm1 = zero,xmm1[1,2],xmm6[1]
; AVX-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm1
; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm1[5,6,7]
; AVX-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX-NEXT: vmovaps 64(%rdi), %ymm0
; AVX-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT: vshufps {{.*#+}} ymm0 = ymm6[1,1],ymm0[2,2],ymm6[5,5],ymm0[6,6]
+; AVX-NEXT: vshufps {{.*#+}} ymm0 = ymm12[1,1],ymm0[2,2],ymm12[5,5],ymm0[6,6]
; AVX-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3,2,3]
; AVX-NEXT: vmovaps 32(%rdi), %xmm1
; AVX-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX-NEXT: vblendps {{.*#+}} xmm1 = xmm1[0],xmm9[1],xmm1[2,3]
+; AVX-NEXT: vblendps {{.*#+}} xmm1 = xmm1[0],xmm8[1],xmm1[2,3]
; AVX-NEXT: vshufps {{.*#+}} xmm1 = xmm1[1,0],mem[3,3]
; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3,4,5,6,7]
; AVX-NEXT: vmovaps 160(%rdi), %ymm1
; AVX-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT: vmovaps 128(%rdi), %ymm2
-; AVX-NEXT: vmovups %ymm2, (%rsp) # 32-byte Spill
-; AVX-NEXT: vperm2f128 {{.*#+}} ymm1 = ymm2[2,3],ymm1[0,1]
-; AVX-NEXT: vshufps {{.*#+}} ymm1 = ymm2[0,0],ymm1[3,3],ymm2[4,4],ymm1[7,7]
+; AVX-NEXT: vmovaps 128(%rdi), %ymm15
+; AVX-NEXT: vperm2f128 {{.*#+}} ymm1 = ymm15[2,3],ymm1[0,1]
+; AVX-NEXT: vshufps {{.*#+}} ymm1 = ymm15[0,0],ymm1[3,3],ymm15[4,4],ymm1[7,7]
+; AVX-NEXT: vmovups %ymm15, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX-NEXT: vextractf128 $1, %ymm1, %xmm1
-; AVX-NEXT: vinsertps {{.*#+}} xmm1 = zero,xmm1[1,2],xmm10[2]
+; AVX-NEXT: vinsertps {{.*#+}} xmm1 = zero,xmm1[1,2],xmm7[2]
; AVX-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm1
; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm1[5,6,7]
; AVX-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX-NEXT: vmovaps 512(%rdi), %ymm0
; AVX-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT: vshufps {{.*#+}} ymm0 = ymm7[1,1],ymm0[2,2],ymm7[5,5],ymm0[6,6]
+; AVX-NEXT: vshufps {{.*#+}} ymm0 = ymm5[1,1],ymm0[2,2],ymm5[5,5],ymm0[6,6]
+; AVX-NEXT: vmovaps %ymm5, %ymm7
; AVX-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3,2,3]
-; AVX-NEXT: vmovaps 480(%rdi), %xmm10
-; AVX-NEXT: vblendps {{.*#+}} xmm1 = xmm10[0],xmm8[1],xmm10[2,3]
+; AVX-NEXT: vmovaps 480(%rdi), %xmm1
+; AVX-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX-NEXT: vblendps {{.*#+}} xmm2 = xmm1[0],xmm10[1],xmm1[2,3]
; AVX-NEXT: vmovaps %xmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX-NEXT: vmovaps %xmm8, %xmm7
-; AVX-NEXT: vmovaps %xmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX-NEXT: vshufps {{.*#+}} xmm1 = xmm1[1,0],mem[3,3]
-; AVX-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1,2],ymm0[3,4,5,6,7]
-; AVX-NEXT: vmovaps 608(%rdi), %ymm14
-; AVX-NEXT: vmovaps 576(%rdi), %ymm12
-; AVX-NEXT: vperm2f128 {{.*#+}} ymm2 = ymm12[2,3],ymm14[0,1]
-; AVX-NEXT: vmovups %ymm14, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT: vshufps {{.*#+}} ymm2 = ymm12[0,0],ymm2[3,3],ymm12[4,4],ymm2[7,7]
-; AVX-NEXT: vextractf128 $1, %ymm2, %xmm2
-; AVX-NEXT: vinsertps {{.*#+}} xmm2 = zero,xmm2[1,2],xmm4[2]
-; AVX-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm2
-; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3,4],ymm2[5,6,7]
+; AVX-NEXT: vshufps {{.*#+}} xmm2 = xmm2[1,0],mem[3,3]
+; AVX-NEXT: vblendps {{.*#+}} ymm2 = ymm2[0,1,2],ymm0[3,4,5,6,7]
+; AVX-NEXT: vmovaps 608(%rdi), %ymm0
; AVX-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX-NEXT: vmovaps 576(%rdi), %ymm12
+; AVX-NEXT: vperm2f128 {{.*#+}} ymm5 = ymm12[2,3],ymm0[0,1]
+; AVX-NEXT: vshufps {{.*#+}} ymm5 = ymm12[0,0],ymm5[3,3],ymm12[4,4],ymm5[7,7]
+; AVX-NEXT: vextractf128 $1, %ymm5, %xmm5
+; AVX-NEXT: vinsertps {{.*#+}} xmm5 = zero,xmm5[1,2],xmm9[2]
+; AVX-NEXT: vinsertf128 $1, %xmm5, %ymm0, %ymm5
+; AVX-NEXT: vblendps {{.*#+}} ymm2 = ymm2[0,1,2,3,4],ymm5[5,6,7]
+; AVX-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX-NEXT: vmovaps 288(%rdi), %ymm0
; AVX-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT: vshufps {{.*#+}} ymm1 = ymm13[1,1],ymm0[2,2],ymm13[5,5],ymm0[6,6]
-; AVX-NEXT: vperm2f128 {{.*#+}} ymm1 = ymm1[2,3,2,3]
+; AVX-NEXT: vshufps {{.*#+}} ymm2 = ymm4[1,1],ymm0[2,2],ymm4[5,5],ymm0[6,6]
+; AVX-NEXT: vperm2f128 {{.*#+}} ymm2 = ymm2[2,3,2,3]
; AVX-NEXT: vmovaps 256(%rdi), %xmm0
; AVX-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX-NEXT: vblendps {{.*#+}} xmm2 = xmm0[0],xmm5[1],xmm0[2,3]
-; AVX-NEXT: vshufps {{.*#+}} xmm2 = xmm2[1,0],mem[3,3]
-; AVX-NEXT: vblendps {{.*#+}} ymm1 = ymm2[0,1,2],ymm1[3,4,5,6,7]
-; AVX-NEXT: vmovaps 384(%rdi), %ymm0
+; AVX-NEXT: vblendps {{.*#+}} xmm5 = xmm0[0],xmm11[1],xmm0[2,3]
+; AVX-NEXT: vshufps {{.*#+}} xmm5 = xmm5[1,0],mem[3,3]
+; AVX-NEXT: vblendps {{.*#+}} ymm2 = ymm5[0,1,2],ymm2[3,4,5,6,7]
+; AVX-NEXT: vmovaps 384(%rdi), %ymm1
+; AVX-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX-NEXT: vmovaps 352(%rdi), %ymm0
; AVX-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT: vmovaps 352(%rdi), %ymm2
-; AVX-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT: vperm2f128 {{.*#+}} ymm5 = ymm2[2,3],ymm0[0,1]
-; AVX-NEXT: vshufps {{.*#+}} ymm5 = ymm2[0,0],ymm5[3,3],ymm2[4,4],ymm5[7,7]
+; AVX-NEXT: vperm2f128 {{.*#+}} ymm5 = ymm0[2,3],ymm1[0,1]
+; AVX-NEXT: vshufps {{.*#+}} ymm5 = ymm0[0,0],ymm5[3,3],ymm0[4,4],ymm5[7,7]
; AVX-NEXT: vextractf128 $1, %ymm5, %xmm5
-; AVX-NEXT: vinsertps {{.*#+}} xmm5 = zero,xmm5[1,2],xmm11[2]
+; AVX-NEXT: vinsertps {{.*#+}} xmm5 = zero,xmm5[1,2],xmm3[2]
; AVX-NEXT: vinsertf128 $1, %xmm5, %ymm0, %ymm5
-; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3,4],ymm5[5,6,7]
-; AVX-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT: vmovaps 736(%rdi), %ymm8
-; AVX-NEXT: vshufps {{.*#+}} ymm1 = ymm3[1,1],ymm8[2,2],ymm3[5,5],ymm8[6,6]
-; AVX-NEXT: vmovups %ymm8, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT: vmovaps %ymm3, %ymm9
-; AVX-NEXT: vmovups %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT: vperm2f128 {{.*#+}} ymm1 = ymm1[2,3,2,3]
-; AVX-NEXT: vmovaps 704(%rdi), %xmm6
-; AVX-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
-; AVX-NEXT: vblendps {{.*#+}} xmm11 = xmm6[0],xmm2[1],xmm6[2,3]
-; AVX-NEXT: vmovaps %xmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX-NEXT: vshufps {{.*#+}} xmm11 = xmm11[1,0],mem[3,3]
-; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm11[0,1,2],ymm1[3,4,5,6,7]
-; AVX-NEXT: vmovaps 832(%rdi), %ymm5
-; AVX-NEXT: vmovaps 800(%rdi), %ymm15
-; AVX-NEXT: vperm2f128 {{.*#+}} ymm13 = ymm15[2,3],ymm5[0,1]
+; AVX-NEXT: vblendps {{.*#+}} ymm2 = ymm2[0,1,2,3,4],ymm5[5,6,7]
+; AVX-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX-NEXT: vmovaps 736(%rdi), %ymm5
+; AVX-NEXT: vmovaps %ymm14, %ymm3
+; AVX-NEXT: vmovups %ymm14, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX-NEXT: vshufps {{.*#+}} ymm2 = ymm14[1,1],ymm5[2,2],ymm14[5,5],ymm5[6,6]
; AVX-NEXT: vmovups %ymm5, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT: vshufps {{.*#+}} ymm13 = ymm15[0,0],ymm13[3,3],ymm15[4,4],ymm13[7,7]
-; AVX-NEXT: vextractf128 $1, %ymm13, %xmm13
-; AVX-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Reload
-; AVX-NEXT: vinsertps {{.*#+}} xmm13 = zero,xmm13[1,2],xmm4[2]
-; AVX-NEXT: vinsertf128 $1, %xmm13, %ymm0, %ymm13
-; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm13[5,6,7]
-; AVX-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT: vpermilps $238, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
-; AVX-NEXT: # xmm0 = mem[2,3,2,3]
+; AVX-NEXT: vperm2f128 {{.*#+}} ymm2 = ymm2[2,3,2,3]
+; AVX-NEXT: vmovaps 704(%rdi), %xmm4
; AVX-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
-; AVX-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2,3]
-; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm13 # 32-byte Reload
-; AVX-NEXT: vshufps $199, {{[-0-9]+}}(%r{{[sb]}}p), %ymm13, %ymm13 # 32-byte Folded Reload
-; AVX-NEXT: # ymm13 = ymm13[3,1],mem[0,3],ymm13[7,5],mem[4,7]
-; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm11 # 32-byte Reload
-; AVX-NEXT: vshufps {{.*#+}} ymm13 = ymm11[2,1],ymm13[2,0],ymm11[6,5],ymm13[6,4]
-; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm13[2,3,4,5,6,7]
-; AVX-NEXT: vmovups (%rsp), %ymm3 # 32-byte Reload
-; AVX-NEXT: vunpcklpd {{[-0-9]+}}(%r{{[sb]}}p), %ymm3, %ymm13 # 32-byte Folded Reload
-; AVX-NEXT: # ymm13 = ymm3[0],mem[0],ymm3[2],mem[2]
-; AVX-NEXT: vextractf128 $1, %ymm13, %xmm13
-; AVX-NEXT: vblendps $8, {{[-0-9]+}}(%r{{[sb]}}p), %xmm13, %xmm13 # 16-byte Folded Reload
-; AVX-NEXT: # xmm13 = xmm13[0,1,2],mem[3]
-; AVX-NEXT: vinsertf128 $1, %xmm13, %ymm0, %ymm13
-; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm13[5,6,7]
-; AVX-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT: vshufps {{.*#+}} xmm0 = xmm7[2,3,2,3]
-; AVX-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0],xmm10[1],xmm0[2,3]
-; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm13 # 32-byte Reload
-; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm10 # 32-byte Reload
-; AVX-NEXT: vshufps {{.*#+}} ymm13 = ymm13[3,1],ymm10[0,3],ymm13[7,5],ymm10[4,7]
-; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm7 # 32-byte Reload
-; AVX-NEXT: vshufps {{.*#+}} ymm13 = ymm7[2,1],ymm13[2,0],ymm7[6,5],ymm13[6,4]
-; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm13[2,3,4,5,6,7]
-; AVX-NEXT: vunpcklpd {{.*#+}} ymm13 = ymm12[0],ymm14[0],ymm12[2],ymm14[2]
-; AVX-NEXT: vextractf128 $1, %ymm13, %xmm13
-; AVX-NEXT: vblendps $8, {{[-0-9]+}}(%r{{[sb]}}p), %xmm13, %xmm13 # 16-byte Folded Reload
-; AVX-NEXT: # xmm13 = xmm13[0,1,2],mem[3]
-; AVX-NEXT: vinsertf128 $1, %xmm13, %ymm0, %ymm13
-; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm13[5,6,7]
-; AVX-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT: vshufps {{.*#+}} xmm0 = xmm2[2,3,2,3]
-; AVX-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0],xmm6[1],xmm0[2,3]
-; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm13 # 32-byte Reload
-; AVX-NEXT: vshufps {{.*#+}} ymm13 = ymm13[3,1],ymm8[0,3],ymm13[7,5],ymm8[4,7]
-; AVX-NEXT: vshufps {{.*#+}} ymm13 = ymm9[2,1],ymm13[2,0],ymm9[6,5],ymm13[6,4]
-; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm13[2,3,4,5,6,7]
-; AVX-NEXT: vunpcklpd {{.*#+}} ymm13 = ymm15[0],ymm5[0],ymm15[2],ymm5[2]
-; AVX-NEXT: vextractf128 $1, %ymm13, %xmm13
-; AVX-NEXT: vblendps {{.*#+}} xmm8 = xmm13[0,1,2],xmm4[3]
-; AVX-NEXT: vinsertf128 $1, %xmm8, %ymm0, %ymm8
-; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm8[5,6,7]
-; AVX-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Reload
-; AVX-NEXT: vshufps {{.*#+}} xmm0 = xmm5[2,3,2,3]
-; AVX-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
-; AVX-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0],xmm2[1],xmm0[2,3]
+; AVX-NEXT: vblendps {{.*#+}} xmm11 = xmm4[0],xmm1[1],xmm4[2,3]
+; AVX-NEXT: vmovaps %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX-NEXT: vshufps {{.*#+}} xmm11 = xmm11[1,0],mem[3,3]
+; AVX-NEXT: vblendps {{.*#+}} ymm11 = ymm11[0,1,2],ymm2[3,4,5,6,7]
+; AVX-NEXT: vmovaps 832(%rdi), %ymm13
+; AVX-NEXT: vmovaps 800(%rdi), %ymm2
+; AVX-NEXT: vperm2f128 {{.*#+}} ymm14 = ymm2[2,3],ymm13[0,1]
+; AVX-NEXT: vmovups %ymm13, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX-NEXT: vshufps {{.*#+}} ymm14 = ymm2[0,0],ymm14[3,3],ymm2[4,4],ymm14[7,7]
+; AVX-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX-NEXT: vextractf128 $1, %ymm14, %xmm14
+; AVX-NEXT: vinsertps {{.*#+}} xmm14 = zero,xmm14[1,2],xmm6[2]
+; AVX-NEXT: vinsertf128 $1, %xmm14, %ymm0, %ymm14
+; AVX-NEXT: vblendps {{.*#+}} ymm11 = ymm11[0,1,2,3,4],ymm14[5,6,7]
+; AVX-NEXT: vmovups %ymm11, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX-NEXT: vpermilps $238, {{[-0-9]+}}(%r{{[sb]}}p), %xmm11 # 16-byte Folded Reload
+; AVX-NEXT: # xmm11 = mem[2,3,2,3]
+; AVX-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
+; AVX-NEXT: vblendps {{.*#+}} xmm11 = xmm11[0],xmm0[1],xmm11[2,3]
+; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm14 # 32-byte Reload
+; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm9 # 32-byte Reload
+; AVX-NEXT: vshufps {{.*#+}} ymm14 = ymm14[3,1],ymm9[0,3],ymm14[7,5],ymm9[4,7]
; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm8 # 32-byte Reload
+; AVX-NEXT: vshufps {{.*#+}} ymm14 = ymm8[2,1],ymm14[2,0],ymm8[6,5],ymm14[6,4]
+; AVX-NEXT: vblendps {{.*#+}} ymm11 = ymm11[0,1],ymm14[2,3,4,5,6,7]
+; AVX-NEXT: vunpcklpd {{[-0-9]+}}(%r{{[sb]}}p), %ymm15, %ymm14 # 32-byte Folded Reload
+; AVX-NEXT: # ymm14 = ymm15[0],mem[0],ymm15[2],mem[2]
+; AVX-NEXT: vextractf128 $1, %ymm14, %xmm14
+; AVX-NEXT: vblendps $8, {{[-0-9]+}}(%r{{[sb]}}p), %xmm14, %xmm14 # 16-byte Folded Reload
+; AVX-NEXT: # xmm14 = xmm14[0,1,2],mem[3]
+; AVX-NEXT: vinsertf128 $1, %xmm14, %ymm0, %ymm14
+; AVX-NEXT: vblendps {{.*#+}} ymm11 = ymm11[0,1,2,3,4],ymm14[5,6,7]
+; AVX-NEXT: vmovups %ymm11, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX-NEXT: vshufps {{.*#+}} xmm11 = xmm10[2,3,2,3]
+; AVX-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm15 # 16-byte Reload
+; AVX-NEXT: vblendps {{.*#+}} xmm11 = xmm11[0],xmm15[1],xmm11[2,3]
; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm14 # 32-byte Reload
-; AVX-NEXT: vshufps {{.*#+}} ymm8 = ymm8[3,1],ymm14[0,3],ymm8[7,5],ymm14[4,7]
-; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
-; AVX-NEXT: vshufps {{.*#+}} ymm8 = ymm4[2,1],ymm8[2,0],ymm4[6,5],ymm8[6,4]
-; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm8[2,3,4,5,6,7]
+; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm10 # 32-byte Reload
+; AVX-NEXT: vshufps {{.*#+}} ymm14 = ymm14[3,1],ymm10[0,3],ymm14[7,5],ymm10[4,7]
+; AVX-NEXT: vshufps {{.*#+}} ymm14 = ymm7[2,1],ymm14[2,0],ymm7[6,5],ymm14[6,4]
+; AVX-NEXT: vblendps {{.*#+}} ymm11 = ymm11[0,1],ymm14[2,3,4,5,6,7]
+; AVX-NEXT: vunpcklpd {{[-0-9]+}}(%r{{[sb]}}p), %ymm12, %ymm14 # 32-byte Folded Reload
+; AVX-NEXT: # ymm14 = ymm12[0],mem[0],ymm12[2],mem[2]
+; AVX-NEXT: vextractf128 $1, %ymm14, %xmm14
+; AVX-NEXT: vblendps $8, {{[-0-9]+}}(%r{{[sb]}}p), %xmm14, %xmm14 # 16-byte Folded Reload
+; AVX-NEXT: # xmm14 = xmm14[0,1,2],mem[3]
+; AVX-NEXT: vinsertf128 $1, %xmm14, %ymm0, %ymm14
+; AVX-NEXT: vblendps {{.*#+}} ymm11 = ymm11[0,1,2,3,4],ymm14[5,6,7]
+; AVX-NEXT: vmovups %ymm11, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX-NEXT: vshufps {{.*#+}} xmm11 = xmm1[2,3,2,3]
+; AVX-NEXT: vblendps {{.*#+}} xmm11 = xmm11[0],xmm4[1],xmm11[2,3]
+; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm14 # 32-byte Reload
+; AVX-NEXT: vshufps {{.*#+}} ymm14 = ymm14[3,1],ymm5[0,3],ymm14[7,5],ymm5[4,7]
+; AVX-NEXT: vshufps {{.*#+}} ymm14 = ymm3[2,1],ymm14[2,0],ymm3[6,5],ymm14[6,4]
+; AVX-NEXT: vblendps {{.*#+}} ymm11 = ymm11[0,1],ymm14[2,3,4,5,6,7]
+; AVX-NEXT: vunpcklpd {{.*#+}} ymm14 = ymm2[0],ymm13[0],ymm2[2],ymm13[2]
+; AVX-NEXT: vextractf128 $1, %ymm14, %xmm14
+; AVX-NEXT: vblendps {{.*#+}} xmm4 = xmm14[0,1,2],xmm6[3]
+; AVX-NEXT: vinsertf128 $1, %xmm4, %ymm0, %ymm4
+; AVX-NEXT: vblendps {{.*#+}} ymm4 = ymm11[0,1,2,3,4],ymm4[5,6,7]
+; AVX-NEXT: vmovups %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Reload
+; AVX-NEXT: vshufps {{.*#+}} xmm4 = xmm6[2,3,2,3]
+; AVX-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
+; AVX-NEXT: vblendps {{.*#+}} xmm4 = xmm4[0],xmm1[1],xmm4[2,3]
+; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm11 # 32-byte Reload
+; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload
+; AVX-NEXT: vshufps {{.*#+}} ymm11 = ymm11[3,1],ymm3[0,3],ymm11[7,5],ymm3[4,7]
+; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
+; AVX-NEXT: vshufps {{.*#+}} ymm11 = ymm2[2,1],ymm11[2,0],ymm2[6,5],ymm11[6,4]
+; AVX-NEXT: vblendps {{.*#+}} ymm4 = ymm4[0,1],ymm11[2,3,4,5,6,7]
; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm13 # 32-byte Reload
-; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
-; AVX-NEXT: vunpcklpd {{.*#+}} ymm8 = ymm6[0],ymm13[0],ymm6[2],ymm13[2]
-; AVX-NEXT: vextractf128 $1, %ymm8, %xmm8
-; AVX-NEXT: vblendps $8, {{[-0-9]+}}(%r{{[sb]}}p), %xmm8, %xmm8 # 16-byte Folded Reload
-; AVX-NEXT: # xmm8 = xmm8[0,1,2],mem[3]
-; AVX-NEXT: vinsertf128 $1, %xmm8, %ymm0, %ymm8
-; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm8[5,6,7]
+; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload
+; AVX-NEXT: vunpcklpd {{.*#+}} ymm11 = ymm13[0],ymm5[0],ymm13[2],ymm5[2]
+; AVX-NEXT: vextractf128 $1, %ymm11, %xmm11
+; AVX-NEXT: vblendps $8, {{[-0-9]+}}(%r{{[sb]}}p), %xmm11, %xmm11 # 16-byte Folded Reload
+; AVX-NEXT: # xmm11 = xmm11[0,1,2],mem[3]
+; AVX-NEXT: vinsertf128 $1, %xmm11, %ymm0, %ymm11
+; AVX-NEXT: vblendps {{.*#+}} ymm4 = ymm4[0,1,2,3,4],ymm11[5,6,7]
+; AVX-NEXT: vmovups %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX-NEXT: vshufps {{.*#+}} ymm4 = ymm9[1,0],ymm8[0,0],ymm9[5,4],ymm8[4,4]
+; AVX-NEXT: vshufps {{.*#+}} ymm4 = ymm8[3,1],ymm4[0,2],ymm8[7,5],ymm4[4,6]
+; AVX-NEXT: vblendps $8, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm9 # 16-byte Folded Reload
+; AVX-NEXT: # xmm9 = xmm0[0,1,2],mem[3]
+; AVX-NEXT: vshufps {{.*#+}} xmm9 = xmm9[3,2,2,3]
+; AVX-NEXT: vblendps {{.*#+}} ymm4 = ymm9[0,1],ymm4[2,3,4,5,6,7]
+; AVX-NEXT: vmovaps 192(%rdi), %ymm0
; AVX-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm14 # 32-byte Reload
+; AVX-NEXT: vshufps {{.*#+}} ymm11 = ymm0[0,1],ymm14[1,3],ymm0[4,5],ymm14[5,7]
; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX-NEXT: vshufps {{.*#+}} ymm0 = ymm0[1,0],ymm11[0,0],ymm0[5,4],ymm11[4,4]
-; AVX-NEXT: vshufps {{.*#+}} ymm0 = ymm11[3,1],ymm0[0,2],ymm11[7,5],ymm0[4,6]
-; AVX-NEXT: vblendps $8, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1, %xmm8 # 16-byte Folded Reload
-; AVX-NEXT: # xmm8 = xmm1[0,1,2],mem[3]
+; AVX-NEXT: vshufps {{.*#+}} ymm11 = ymm0[0,2],ymm11[2,0],ymm0[4,6],ymm11[6,4]
+; AVX-NEXT: vblendps {{.*#+}} ymm4 = ymm4[0,1,2,3,4],ymm11[5,6,7]
+; AVX-NEXT: vmovups %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX-NEXT: vshufps {{.*#+}} ymm4 = ymm10[1,0],ymm7[0,0],ymm10[5,4],ymm7[4,4]
+; AVX-NEXT: vshufps {{.*#+}} ymm4 = ymm7[3,1],ymm4[0,2],ymm7[7,5],ymm4[4,6]
+; AVX-NEXT: vmovaps %xmm15, %xmm10
+; AVX-NEXT: vblendps $8, {{[-0-9]+}}(%r{{[sb]}}p), %xmm15, %xmm8 # 16-byte Folded Reload
+; AVX-NEXT: # xmm8 = xmm15[0,1,2],mem[3]
; AVX-NEXT: vshufps {{.*#+}} xmm8 = xmm8[3,2,2,3]
-; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm8[0,1],ymm0[2,3,4,5,6,7]
-; AVX-NEXT: vmovaps 192(%rdi), %ymm1
-; AVX-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm9 # 32-byte Reload
-; AVX-NEXT: vshufps {{.*#+}} ymm8 = ymm1[0,1],ymm9[1,3],ymm1[4,5],ymm9[5,7]
-; AVX-NEXT: vshufps {{.*#+}} ymm8 = ymm3[0,2],ymm8[2,0],ymm3[4,6],ymm8[6,4]
-; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm8[5,6,7]
+; AVX-NEXT: vblendps {{.*#+}} ymm8 = ymm8[0,1],ymm4[2,3,4,5,6,7]
+; AVX-NEXT: vmovaps 640(%rdi), %ymm0
; AVX-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT: vshufps {{.*#+}} ymm0 = ymm10[1,0],ymm7[0,0],ymm10[5,4],ymm7[4,4]
-; AVX-NEXT: vshufps {{.*#+}} ymm0 = ymm7[3,1],ymm0[0,2],ymm7[7,5],ymm0[4,6]
-; AVX-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm11 # 16-byte Reload
-; AVX-NEXT: vblendps $8, {{[-0-9]+}}(%r{{[sb]}}p), %xmm11, %xmm8 # 16-byte Folded Reload
-; AVX-NEXT: # xmm8 = xmm11[0,1,2],mem[3]
+; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
+; AVX-NEXT: vshufps {{.*#+}} ymm11 = ymm0[0,1],ymm4[1,3],ymm0[4,5],ymm4[5,7]
+; AVX-NEXT: vshufps {{.*#+}} ymm11 = ymm12[0,2],ymm11[2,0],ymm12[4,6],ymm11[6,4]
+; AVX-NEXT: vblendps {{.*#+}} ymm8 = ymm8[0,1,2,3,4],ymm11[5,6,7]
+; AVX-NEXT: vmovups %ymm8, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX-NEXT: vshufps {{.*#+}} ymm7 = ymm3[1,0],ymm2[0,0],ymm3[5,4],ymm2[4,4]
+; AVX-NEXT: vshufps {{.*#+}} ymm7 = ymm2[3,1],ymm7[0,2],ymm2[7,5],ymm7[4,6]
+; AVX-NEXT: vblendps {{.*#+}} xmm8 = xmm1[0,1,2],xmm6[3]
; AVX-NEXT: vshufps {{.*#+}} xmm8 = xmm8[3,2,2,3]
-; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm8[0,1],ymm0[2,3,4,5,6,7]
-; AVX-NEXT: vmovaps 640(%rdi), %ymm1
-; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm8 # 32-byte Reload
-; AVX-NEXT: vshufps {{.*#+}} ymm10 = ymm1[0,1],ymm8[1,3],ymm1[4,5],ymm8[5,7]
-; AVX-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT: vshufps {{.*#+}} ymm10 = ymm12[0,2],ymm10[2,0],ymm12[4,6],ymm10[6,4]
-; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm10[5,6,7]
-; AVX-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT: vshufps {{.*#+}} ymm0 = ymm14[1,0],ymm4[0,0],ymm14[5,4],ymm4[4,4]
-; AVX-NEXT: vshufps {{.*#+}} ymm0 = ymm4[3,1],ymm0[0,2],ymm4[7,5],ymm0[4,6]
-; AVX-NEXT: vblendps {{.*#+}} xmm7 = xmm2[0,1,2],xmm5[3]
-; AVX-NEXT: vshufps {{.*#+}} xmm7 = xmm7[3,2,2,3]
-; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm7[0,1],ymm0[2,3,4,5,6,7]
-; AVX-NEXT: vmovaps 416(%rdi), %ymm14
-; AVX-NEXT: vshufps {{.*#+}} ymm10 = ymm14[0,1],ymm13[1,3],ymm14[4,5],ymm13[5,7]
-; AVX-NEXT: vmovups %ymm14, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT: vshufps {{.*#+}} ymm10 = ymm6[0,2],ymm10[2,0],ymm6[4,6],ymm10[6,4]
-; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm10[5,6,7]
-; AVX-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
+; AVX-NEXT: vblendps {{.*#+}} ymm7 = ymm8[0,1],ymm7[2,3,4,5,6,7]
+; AVX-NEXT: vmovaps 416(%rdi), %ymm15
+; AVX-NEXT: vmovaps %ymm5, %ymm8
+; AVX-NEXT: vshufps {{.*#+}} ymm11 = ymm15[0,1],ymm5[1,3],ymm15[4,5],ymm5[5,7]
+; AVX-NEXT: vshufps {{.*#+}} ymm11 = ymm13[0,2],ymm11[2,0],ymm13[4,6],ymm11[6,4]
+; AVX-NEXT: vblendps {{.*#+}} ymm7 = ymm7[0,1,2,3,4],ymm11[5,6,7]
+; AVX-NEXT: vmovups %ymm7, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX-NEXT: vshufps {{.*#+}} ymm0 = ymm0[1,0],ymm2[0,0],ymm0[5,4],ymm2[4,4]
-; AVX-NEXT: vshufps {{.*#+}} ymm0 = ymm2[3,1],ymm0[0,2],ymm2[7,5],ymm0[4,6]
-; AVX-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
-; AVX-NEXT: vblendps $7, {{[-0-9]+}}(%r{{[sb]}}p), %xmm2, %xmm5 # 16-byte Folded Reload
-; AVX-NEXT: # xmm5 = mem[0,1,2],xmm2[3]
-; AVX-NEXT: vshufps {{.*#+}} xmm5 = xmm5[3,2,2,3]
-; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm5[0,1],ymm0[2,3,4,5,6,7]
-; AVX-NEXT: vmovaps 864(%rdi), %ymm5
-; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm7 # 32-byte Reload
-; AVX-NEXT: vshufps {{.*#+}} ymm6 = ymm5[0,1],ymm7[1,3],ymm5[4,5],ymm7[5,7]
-; AVX-NEXT: vshufps {{.*#+}} ymm6 = ymm15[0,2],ymm6[2,0],ymm15[4,6],ymm6[6,4]
-; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm6[5,6,7]
+; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
+; AVX-NEXT: vshufps {{.*#+}} ymm5 = ymm1[1,0],ymm0[0,0],ymm1[5,4],ymm0[4,4]
+; AVX-NEXT: vshufps {{.*#+}} ymm5 = ymm0[3,1],ymm5[0,2],ymm0[7,5],ymm5[4,6]
+; AVX-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
+; AVX-NEXT: vblendps $7, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm6 # 16-byte Folded Reload
+; AVX-NEXT: # xmm6 = mem[0,1,2],xmm0[3]
+; AVX-NEXT: vshufps {{.*#+}} xmm6 = xmm6[3,2,2,3]
+; AVX-NEXT: vblendps {{.*#+}} ymm6 = ymm6[0,1],ymm5[2,3,4,5,6,7]
+; AVX-NEXT: vmovaps 864(%rdi), %ymm0
; AVX-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm12[2,3,0,1]
-; AVX-NEXT: vshufps {{.*#+}} ymm0 = ymm12[3,0],ymm0[0,0],ymm12[7,4],ymm0[4,4]
-; AVX-NEXT: vshufps {{.*#+}} ymm3 = ymm1[1,0],ymm8[2,0],ymm1[5,4],ymm8[6,4]
-; AVX-NEXT: vshufps {{.*#+}} ymm0 = ymm0[2,0],ymm3[2,0],ymm0[6,4],ymm3[6,4]
+; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm9 # 32-byte Reload
+; AVX-NEXT: vshufps {{.*#+}} ymm7 = ymm0[0,1],ymm9[1,3],ymm0[4,5],ymm9[5,7]
+; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
+; AVX-NEXT: vshufps {{.*#+}} ymm7 = ymm0[0,2],ymm7[2,0],ymm0[4,6],ymm7[6,4]
+; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm6[0,1,2,3,4],ymm7[5,6,7]
+; AVX-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX-NEXT: vperm2f128 {{.*#+}} ymm6 = ymm12[2,3,0,1]
+; AVX-NEXT: vshufps {{.*#+}} ymm0 = ymm12[3,0],ymm6[0,0],ymm12[7,4],ymm6[4,4]
+; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload
+; AVX-NEXT: vshufps {{.*#+}} ymm6 = ymm5[1,0],ymm4[2,0],ymm5[5,4],ymm4[6,4]
+; AVX-NEXT: vshufps {{.*#+}} ymm0 = ymm0[2,0],ymm6[2,0],ymm0[6,4],ymm6[6,4]
; AVX-NEXT: vmovaps 544(%rdi), %xmm1
; AVX-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX-NEXT: vshufps {{.*#+}} xmm10 = xmm1[0,1,0,1]
-; AVX-NEXT: vmovaps 512(%rdi), %xmm6
-; AVX-NEXT: vblendps {{.*#+}} xmm10 = xmm6[0,1,2],xmm10[3]
-; AVX-NEXT: vshufps {{.*#+}} xmm12 = xmm11[2,3,2,3]
-; AVX-NEXT: vblendps {{.*#+}} xmm12 = mem[0],xmm12[1],mem[2,3]
-; AVX-NEXT: vblendps {{.*#+}} xmm10 = xmm12[0,1],xmm10[2,3]
-; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm10[0,1,2,3],ymm0[4,5,6,7]
+; AVX-NEXT: vshufps {{.*#+}} xmm11 = xmm1[0,1,0,1]
+; AVX-NEXT: vmovaps 512(%rdi), %xmm7
+; AVX-NEXT: vblendps {{.*#+}} xmm11 = xmm7[0,1,2],xmm11[3]
+; AVX-NEXT: vshufps {{.*#+}} xmm1 = xmm10[2,3,2,3]
+; AVX-NEXT: vblendps {{.*#+}} xmm1 = mem[0],xmm1[1],mem[2,3]
+; AVX-NEXT: vblendps {{.*#+}} xmm1 = xmm1[0,1],xmm11[2,3]
+; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7]
; AVX-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT: vmovups (%rsp), %ymm2 # 32-byte Reload
-; AVX-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm2[2,3,0,1]
-; AVX-NEXT: vshufps {{.*#+}} ymm0 = ymm2[3,0],ymm0[0,0],ymm2[7,4],ymm0[4,4]
-; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload
-; AVX-NEXT: vshufps {{.*#+}} ymm4 = ymm3[1,0],ymm9[2,0],ymm3[5,4],ymm9[6,4]
-; AVX-NEXT: vshufps {{.*#+}} ymm10 = ymm0[2,0],ymm4[2,0],ymm0[6,4],ymm4[6,4]
-; AVX-NEXT: vmovaps 64(%rdi), %xmm0
-; AVX-NEXT: vmovaps 96(%rdi), %xmm4
-; AVX-NEXT: vshufps {{.*#+}} xmm12 = xmm4[0,1,0,1]
-; AVX-NEXT: vmovaps %xmm4, (%rsp) # 16-byte Spill
-; AVX-NEXT: vblendps {{.*#+}} xmm12 = xmm0[0,1,2],xmm12[3]
-; AVX-NEXT: vpermilps $238, {{[-0-9]+}}(%r{{[sb]}}p), %xmm13 # 16-byte Folded Reload
-; AVX-NEXT: # xmm13 = mem[2,3,2,3]
-; AVX-NEXT: vblendps {{.*#+}} xmm13 = mem[0],xmm13[1],mem[2,3]
-; AVX-NEXT: vblendps {{.*#+}} xmm12 = xmm13[0,1],xmm12[2,3]
-; AVX-NEXT: vblendps {{.*#+}} ymm1 = ymm12[0,1,2,3],ymm10[4,5,6,7]
-; AVX-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
-; AVX-NEXT: vperm2f128 {{.*#+}} ymm10 = ymm1[2,3,0,1]
-; AVX-NEXT: vshufps {{.*#+}} ymm2 = ymm1[3,0],ymm10[0,0],ymm1[7,4],ymm10[4,4]
-; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm11 # 32-byte Reload
-; AVX-NEXT: vshufps {{.*#+}} ymm10 = ymm14[1,0],ymm11[2,0],ymm14[5,4],ymm11[6,4]
-; AVX-NEXT: vshufps {{.*#+}} ymm12 = ymm2[2,0],ymm10[2,0],ymm2[6,4],ymm10[6,4]
-; AVX-NEXT: vmovaps 320(%rdi), %xmm1
-; AVX-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX-NEXT: vshufps {{.*#+}} xmm13 = xmm1[0,1,0,1]
-; AVX-NEXT: vmovaps 288(%rdi), %xmm2
-; AVX-NEXT: vblendps {{.*#+}} xmm13 = xmm2[0,1,2],xmm13[3]
+; AVX-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm1[2,3,0,1]
+; AVX-NEXT: vshufps {{.*#+}} ymm0 = ymm1[3,0],ymm0[0,0],ymm1[7,4],ymm0[4,4]
+; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm10 # 32-byte Reload
+; AVX-NEXT: vshufps {{.*#+}} ymm1 = ymm10[1,0],ymm14[2,0],ymm10[5,4],ymm14[6,4]
+; AVX-NEXT: vmovaps %ymm14, %ymm6
+; AVX-NEXT: vshufps {{.*#+}} ymm0 = ymm0[2,0],ymm1[2,0],ymm0[6,4],ymm1[6,4]
+; AVX-NEXT: vmovaps 64(%rdi), %xmm1
+; AVX-NEXT: vmovaps 96(%rdi), %xmm4
+; AVX-NEXT: vshufps {{.*#+}} xmm3 = xmm4[0,1,0,1]
+; AVX-NEXT: vmovaps %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX-NEXT: vblendps {{.*#+}} xmm3 = xmm1[0,1,2],xmm3[3]
; AVX-NEXT: vpermilps $238, {{[-0-9]+}}(%r{{[sb]}}p), %xmm14 # 16-byte Folded Reload
; AVX-NEXT: # xmm14 = mem[2,3,2,3]
; AVX-NEXT: vblendps {{.*#+}} xmm14 = mem[0],xmm14[1],mem[2,3]
-; AVX-NEXT: vblendps {{.*#+}} xmm13 = xmm14[0,1],xmm13[2,3]
-; AVX-NEXT: vblendps {{.*#+}} ymm1 = ymm13[0,1,2,3],ymm12[4,5,6,7]
-; AVX-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT: vperm2f128 {{.*#+}} ymm12 = ymm15[2,3,0,1]
-; AVX-NEXT: vshufps {{.*#+}} ymm1 = ymm15[3,0],ymm12[0,0],ymm15[7,4],ymm12[4,4]
-; AVX-NEXT: vshufps {{.*#+}} ymm12 = ymm5[1,0],ymm7[2,0],ymm5[5,4],ymm7[6,4]
-; AVX-NEXT: vshufps {{.*#+}} ymm12 = ymm1[2,0],ymm12[2,0],ymm1[6,4],ymm12[6,4]
-; AVX-NEXT: vmovaps 768(%rdi), %xmm1
-; AVX-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX-NEXT: vshufps {{.*#+}} xmm14 = xmm1[0,1,0,1]
-; AVX-NEXT: vmovaps 736(%rdi), %xmm13
-; AVX-NEXT: vblendps {{.*#+}} xmm14 = xmm13[0,1,2],xmm14[3]
+; AVX-NEXT: vblendps {{.*#+}} xmm3 = xmm14[0,1],xmm3[2,3]
+; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm3[0,1,2,3],ymm0[4,5,6,7]
+; AVX-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm13[2,3,0,1]
+; AVX-NEXT: vshufps {{.*#+}} ymm0 = ymm13[3,0],ymm0[0,0],ymm13[7,4],ymm0[4,4]
+; AVX-NEXT: vshufps {{.*#+}} ymm3 = ymm15[1,0],ymm8[2,0],ymm15[5,4],ymm8[6,4]
+; AVX-NEXT: vmovaps %ymm15, %ymm11
+; AVX-NEXT: vmovups %ymm15, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX-NEXT: vshufps {{.*#+}} ymm0 = ymm0[2,0],ymm3[2,0],ymm0[6,4],ymm3[6,4]
+; AVX-NEXT: vmovaps 320(%rdi), %xmm2
+; AVX-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX-NEXT: vshufps {{.*#+}} xmm14 = xmm2[0,1,0,1]
+; AVX-NEXT: vmovaps 288(%rdi), %xmm3
+; AVX-NEXT: vblendps {{.*#+}} xmm14 = xmm3[0,1,2],xmm14[3]
; AVX-NEXT: vpermilps $238, {{[-0-9]+}}(%r{{[sb]}}p), %xmm15 # 16-byte Folded Reload
; AVX-NEXT: # xmm15 = mem[2,3,2,3]
; AVX-NEXT: vblendps {{.*#+}} xmm15 = mem[0],xmm15[1],mem[2,3]
; AVX-NEXT: vblendps {{.*#+}} xmm14 = xmm15[0,1],xmm14[2,3]
-; AVX-NEXT: vblendps {{.*#+}} ymm1 = ymm14[0,1,2,3],ymm12[4,5,6,7]
-; AVX-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT: vshufps {{.*#+}} ymm12 = ymm3[2,1],ymm9[3,3],ymm3[6,5],ymm9[7,7]
-; AVX-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
-; AVX-NEXT: vblendps $2, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1, %xmm15 # 16-byte Folded Reload
-; AVX-NEXT: # xmm15 = xmm1[0],mem[1],xmm1[2,3]
-; AVX-NEXT: vinsertf128 $1, %xmm15, %ymm0, %ymm15
-; AVX-NEXT: vshufps {{.*#+}} ymm12 = ymm15[1,0],ymm12[2,0],ymm15[5,4],ymm12[6,4]
-; AVX-NEXT: vblendps {{.*#+}} xmm0 = xmm4[0,1,2],xmm0[3]
-; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
-; AVX-NEXT: vshufps $16, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm15 # 32-byte Folded Reload
-; AVX-NEXT: # ymm15 = ymm1[0,0],mem[1,0],ymm1[4,4],mem[5,4]
-; AVX-NEXT: vshufps {{.*#+}} ymm15 = ymm15[2,0,2,3,6,4,6,7]
-; AVX-NEXT: vextractf128 $1, %ymm15, %xmm15
-; AVX-NEXT: vshufps {{.*#+}} xmm0 = xmm15[0,1],xmm0[3,2]
-; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm12[4,5,6,7]
+; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm14[0,1,2,3],ymm0[4,5,6,7]
; AVX-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload
-; AVX-NEXT: vshufps {{.*#+}} ymm0 = ymm3[2,1],ymm8[3,3],ymm3[6,5],ymm8[7,7]
-; AVX-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm8 # 16-byte Reload
-; AVX-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm7 # 16-byte Reload
-; AVX-NEXT: vblendps {{.*#+}} xmm15 = xmm8[0],xmm7[1],xmm8[2,3]
-; AVX-NEXT: vinsertf128 $1, %xmm15, %ymm0, %ymm15
-; AVX-NEXT: vshufps {{.*#+}} ymm0 = ymm15[1,0],ymm0[2,0],ymm15[5,4],ymm0[6,4]
-; AVX-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm10 # 16-byte Reload
-; AVX-NEXT: vblendps {{.*#+}} xmm6 = xmm10[0,1,2],xmm6[3]
-; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm12 # 32-byte Reload
-; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm9 # 32-byte Reload
-; AVX-NEXT: vshufps {{.*#+}} ymm15 = ymm9[0,0],ymm12[1,0],ymm9[4,4],ymm12[5,4]
-; AVX-NEXT: vshufps {{.*#+}} ymm15 = ymm15[2,0,2,3,6,4,6,7]
-; AVX-NEXT: vextractf128 $1, %ymm15, %xmm15
-; AVX-NEXT: vshufps {{.*#+}} xmm6 = xmm15[0,1],xmm6[3,2]
-; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm6[0,1,2,3],ymm0[4,5,6,7]
+; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
+; AVX-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm2[2,3,0,1]
+; AVX-NEXT: vshufps {{.*#+}} ymm0 = ymm2[3,0],ymm0[0,0],ymm2[7,4],ymm0[4,4]
+; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm8 # 32-byte Reload
+; AVX-NEXT: vshufps {{.*#+}} ymm2 = ymm8[1,0],ymm9[2,0],ymm8[5,4],ymm9[6,4]
+; AVX-NEXT: vshufps {{.*#+}} ymm14 = ymm0[2,0],ymm2[2,0],ymm0[6,4],ymm2[6,4]
+; AVX-NEXT: vmovaps 768(%rdi), %xmm0
+; AVX-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX-NEXT: vshufps {{.*#+}} xmm15 = xmm0[0,1,0,1]
+; AVX-NEXT: vmovaps 736(%rdi), %xmm2
+; AVX-NEXT: vblendps {{.*#+}} xmm15 = xmm2[0,1,2],xmm15[3]
+; AVX-NEXT: vpermilps $238, {{[-0-9]+}}(%r{{[sb]}}p), %xmm13 # 16-byte Folded Reload
+; AVX-NEXT: # xmm13 = mem[2,3,2,3]
+; AVX-NEXT: vblendps {{.*#+}} xmm13 = mem[0],xmm13[1],mem[2,3]
+; AVX-NEXT: vblendps {{.*#+}} xmm13 = xmm13[0,1],xmm15[2,3]
+; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm13[0,1,2,3],ymm14[4,5,6,7]
; AVX-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX-NEXT: vshufps {{.*#+}} ymm13 = ymm10[2,1],ymm6[3,3],ymm10[6,5],ymm6[7,7]
+; AVX-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
+; AVX-NEXT: vblendps $2, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm14 # 16-byte Folded Reload
+; AVX-NEXT: # xmm14 = xmm0[0],mem[1],xmm0[2,3]
+; AVX-NEXT: vinsertf128 $1, %xmm14, %ymm0, %ymm14
+; AVX-NEXT: vshufps {{.*#+}} ymm13 = ymm14[1,0],ymm13[2,0],ymm14[5,4],ymm13[6,4]
+; AVX-NEXT: vblendps {{.*#+}} xmm1 = xmm4[0,1,2],xmm1[3]
; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX-NEXT: vshufps {{.*#+}} ymm0 = ymm0[2,1],ymm11[3,3],ymm0[6,5],ymm11[7,7]
-; AVX-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
-; AVX-NEXT: vblendps $13, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1, %xmm15 # 16-byte Folded Reload
-; AVX-NEXT: # xmm15 = mem[0],xmm1[1],mem[2,3]
-; AVX-NEXT: vinsertf128 $1, %xmm15, %ymm0, %ymm15
-; AVX-NEXT: vshufps {{.*#+}} ymm0 = ymm15[1,0],ymm0[2,0],ymm15[5,4],ymm0[6,4]
-; AVX-NEXT: vblendps $7, {{[-0-9]+}}(%r{{[sb]}}p), %xmm2, %xmm2 # 16-byte Folded Reload
-; AVX-NEXT: # xmm2 = mem[0,1,2],xmm2[3]
-; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
-; AVX-NEXT: vshufps $16, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm15 # 32-byte Folded Reload
-; AVX-NEXT: # ymm15 = ymm1[0,0],mem[1,0],ymm1[4,4],mem[5,4]
-; AVX-NEXT: vshufps {{.*#+}} ymm15 = ymm15[2,0,2,3,6,4,6,7]
-; AVX-NEXT: vextractf128 $1, %ymm15, %xmm15
-; AVX-NEXT: vshufps {{.*#+}} xmm2 = xmm15[0,1],xmm2[3,2]
-; AVX-NEXT: vblendps {{.*#+}} ymm15 = ymm2[0,1,2,3],ymm0[4,5,6,7]
-; AVX-NEXT: vshufps $246, {{[-0-9]+}}(%r{{[sb]}}p), %ymm5, %ymm0 # 32-byte Folded Reload
-; AVX-NEXT: # ymm0 = ymm5[2,1],mem[3,3],ymm5[6,5],mem[7,7]
-; AVX-NEXT: vmovaps %ymm5, %ymm14
-; AVX-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Reload
-; AVX-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
-; AVX-NEXT: vblendps {{.*#+}} xmm2 = xmm4[0],xmm1[1],xmm4[2,3]
-; AVX-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm2
-; AVX-NEXT: vshufps {{.*#+}} ymm0 = ymm2[1,0],ymm0[2,0],ymm2[5,4],ymm0[6,4]
+; AVX-NEXT: vshufps $16, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm14 # 32-byte Folded Reload
+; AVX-NEXT: # ymm14 = ymm0[0,0],mem[1,0],ymm0[4,4],mem[5,4]
+; AVX-NEXT: vextractf128 $1, %ymm14, %xmm14
+; AVX-NEXT: vshufps {{.*#+}} xmm1 = xmm14[2,0],xmm1[3,2]
+; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm13[4,5,6,7]
+; AVX-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX-NEXT: vshufps $246, {{[-0-9]+}}(%r{{[sb]}}p), %ymm5, %ymm13 # 32-byte Folded Reload
+; AVX-NEXT: # ymm13 = ymm5[2,1],mem[3,3],ymm5[6,5],mem[7,7]
+; AVX-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm15 # 16-byte Reload
+; AVX-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm10 # 16-byte Reload
+; AVX-NEXT: vblendps {{.*#+}} xmm14 = xmm15[0],xmm10[1],xmm15[2,3]
+; AVX-NEXT: vinsertf128 $1, %xmm14, %ymm0, %ymm14
+; AVX-NEXT: vshufps {{.*#+}} ymm13 = ymm14[1,0],ymm13[2,0],ymm14[5,4],ymm13[6,4]
; AVX-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Reload
-; AVX-NEXT: vblendps {{.*#+}} xmm2 = xmm6[0,1,2],xmm13[3]
-; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm13 # 32-byte Reload
-; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload
-; AVX-NEXT: vshufps {{.*#+}} ymm11 = ymm5[0,0],ymm13[1,0],ymm5[4,4],ymm13[5,4]
-; AVX-NEXT: vshufps {{.*#+}} ymm11 = ymm11[2,0,2,3,6,4,6,7]
-; AVX-NEXT: vextractf128 $1, %ymm11, %xmm11
-; AVX-NEXT: vshufps {{.*#+}} xmm2 = xmm11[0,1],xmm2[3,2]
-; AVX-NEXT: vblendps {{.*#+}} ymm11 = ymm2[0,1,2,3],ymm0[4,5,6,7]
-; AVX-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm3[2,3,0,1]
-; AVX-NEXT: vshufps {{.*#+}} ymm0 = ymm3[3,0],ymm0[0,0],ymm3[7,4],ymm0[4,4]
-; AVX-NEXT: vshufps {{.*#+}} xmm2 = xmm7[2,3,2,3]
-; AVX-NEXT: vblendps {{.*#+}} xmm2 = xmm2[0],xmm8[1],xmm2[2,3]
-; AVX-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm2
-; AVX-NEXT: vshufps {{.*#+}} ymm0 = ymm2[0,1],ymm0[2,0],ymm2[4,5],ymm0[6,4]
+; AVX-NEXT: vblendps {{.*#+}} xmm7 = xmm6[0,1,2],xmm7[3]
+; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
+; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
+; AVX-NEXT: vshufps {{.*#+}} ymm14 = ymm0[0,0],ymm4[1,0],ymm0[4,4],ymm4[5,4]
+; AVX-NEXT: vextractf128 $1, %ymm14, %xmm14
+; AVX-NEXT: vshufps {{.*#+}} xmm7 = xmm14[2,0],xmm7[3,2]
+; AVX-NEXT: vblendps {{.*#+}} ymm1 = ymm7[0,1,2,3],ymm13[4,5,6,7]
+; AVX-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX-NEXT: vshufps $246, {{[-0-9]+}}(%r{{[sb]}}p), %ymm11, %ymm13 # 32-byte Folded Reload
+; AVX-NEXT: # ymm13 = ymm11[2,1],mem[3,3],ymm11[6,5],mem[7,7]
+; AVX-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm7 # 16-byte Reload
+; AVX-NEXT: vblendps $13, {{[-0-9]+}}(%r{{[sb]}}p), %xmm7, %xmm14 # 16-byte Folded Reload
+; AVX-NEXT: # xmm14 = mem[0],xmm7[1],mem[2,3]
+; AVX-NEXT: vinsertf128 $1, %xmm14, %ymm0, %ymm14
+; AVX-NEXT: vshufps {{.*#+}} ymm13 = ymm14[1,0],ymm13[2,0],ymm14[5,4],ymm13[6,4]
+; AVX-NEXT: vblendps $7, {{[-0-9]+}}(%r{{[sb]}}p), %xmm3, %xmm3 # 16-byte Folded Reload
+; AVX-NEXT: # xmm3 = mem[0,1,2],xmm3[3]
+; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm11 # 32-byte Reload
+; AVX-NEXT: vshufps $16, {{[-0-9]+}}(%r{{[sb]}}p), %ymm11, %ymm14 # 32-byte Folded Reload
+; AVX-NEXT: # ymm14 = ymm11[0,0],mem[1,0],ymm11[4,4],mem[5,4]
+; AVX-NEXT: vextractf128 $1, %ymm14, %xmm14
+; AVX-NEXT: vshufps {{.*#+}} xmm3 = xmm14[2,0],xmm3[3,2]
+; AVX-NEXT: vblendps {{.*#+}} ymm13 = ymm3[0,1,2,3],ymm13[4,5,6,7]
+; AVX-NEXT: vmovaps %ymm8, %ymm7
+; AVX-NEXT: vshufps {{.*#+}} ymm3 = ymm8[2,1],ymm9[3,3],ymm8[6,5],ymm9[7,7]
+; AVX-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm9 # 16-byte Reload
+; AVX-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm8 # 16-byte Reload
+; AVX-NEXT: vblendps {{.*#+}} xmm12 = xmm9[0],xmm8[1],xmm9[2,3]
+; AVX-NEXT: vinsertf128 $1, %xmm12, %ymm0, %ymm12
+; AVX-NEXT: vshufps {{.*#+}} ymm3 = ymm12[1,0],ymm3[2,0],ymm12[5,4],ymm3[6,4]
+; AVX-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Reload
+; AVX-NEXT: vblendps {{.*#+}} xmm2 = xmm5[0,1,2],xmm2[3]
+; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm14 # 32-byte Reload
+; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
+; AVX-NEXT: vshufps {{.*#+}} ymm12 = ymm1[0,0],ymm14[1,0],ymm1[4,4],ymm14[5,4]
+; AVX-NEXT: vextractf128 $1, %ymm12, %xmm12
+; AVX-NEXT: vshufps {{.*#+}} xmm2 = xmm12[2,0],xmm2[3,2]
+; AVX-NEXT: vblendps {{.*#+}} ymm12 = ymm2[0,1,2,3],ymm3[4,5,6,7]
; AVX-NEXT: vpermilps $68, {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Folded Reload
; AVX-NEXT: # xmm2 = mem[0,1,0,1]
-; AVX-NEXT: vblendps {{.*#+}} xmm2 = xmm2[0,1,2],xmm10[3]
-; AVX-NEXT: vshufps {{.*#+}} ymm3 = ymm9[1,0],ymm12[2,0],ymm9[5,4],ymm12[6,4]
-; AVX-NEXT: vshufps {{.*#+}} ymm3 = ymm3[2,0,2,3,6,4,6,7]
+; AVX-NEXT: vblendps {{.*#+}} xmm2 = xmm2[0,1,2],xmm6[3]
+; AVX-NEXT: vshufps {{.*#+}} ymm3 = ymm0[1,0],ymm4[2,0],ymm0[5,4],ymm4[6,4]
; AVX-NEXT: vextractf128 $1, %ymm3, %xmm3
-; AVX-NEXT: vblendps {{.*#+}} xmm2 = xmm3[0,1],xmm2[2,3]
-; AVX-NEXT: vblendps {{.*#+}} ymm3 = ymm2[0,1,2,3],ymm0[4,5,6,7]
-; AVX-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm14[2,3,0,1]
-; AVX-NEXT: vshufps {{.*#+}} ymm0 = ymm14[3,0],ymm0[0,0],ymm14[7,4],ymm0[4,4]
-; AVX-NEXT: vshufps {{.*#+}} xmm2 = xmm1[2,3,2,3]
-; AVX-NEXT: vblendps {{.*#+}} xmm2 = xmm2[0],xmm4[1],xmm2[2,3]
-; AVX-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm2
-; AVX-NEXT: vshufps {{.*#+}} ymm0 = ymm2[0,1],ymm0[2,0],ymm2[4,5],ymm0[6,4]
+; AVX-NEXT: vshufps {{.*#+}} xmm2 = xmm3[2,0],xmm2[2,3]
+; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
+; AVX-NEXT: vperm2f128 {{.*#+}} ymm3 = ymm0[2,3,0,1]
+; AVX-NEXT: vshufps {{.*#+}} ymm3 = ymm0[3,0],ymm3[0,0],ymm0[7,4],ymm3[4,4]
+; AVX-NEXT: vshufps {{.*#+}} xmm4 = xmm10[2,3,2,3]
+; AVX-NEXT: vblendps {{.*#+}} xmm4 = xmm4[0],xmm15[1],xmm4[2,3]
+; AVX-NEXT: vinsertf128 $1, %xmm4, %ymm0, %ymm4
+; AVX-NEXT: vshufps {{.*#+}} ymm3 = ymm4[0,1],ymm3[2,0],ymm4[4,5],ymm3[6,4]
+; AVX-NEXT: vblendps {{.*#+}} ymm3 = ymm2[0,1,2,3],ymm3[4,5,6,7]
; AVX-NEXT: vpermilps $68, {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Folded Reload
; AVX-NEXT: # xmm2 = mem[0,1,0,1]
-; AVX-NEXT: vblendps {{.*#+}} xmm1 = xmm2[0,1,2],xmm6[3]
-; AVX-NEXT: vshufps {{.*#+}} ymm2 = ymm5[1,0],ymm13[2,0],ymm5[5,4],ymm13[6,4]
-; AVX-NEXT: vshufps {{.*#+}} ymm2 = ymm2[2,0,2,3,6,4,6,7]
+; AVX-NEXT: vblendps {{.*#+}} xmm0 = xmm2[0,1,2],xmm5[3]
+; AVX-NEXT: vshufps {{.*#+}} ymm2 = ymm1[1,0],ymm14[2,0],ymm1[5,4],ymm14[6,4]
; AVX-NEXT: vextractf128 $1, %ymm2, %xmm2
-; AVX-NEXT: vblendps {{.*#+}} xmm1 = xmm2[0,1],xmm1[2,3]
-; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7]
-; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
-; AVX-NEXT: vperm2f128 {{.*#+}} ymm1 = ymm2[2,3,0,1]
-; AVX-NEXT: vshufps {{.*#+}} ymm1 = ymm2[3,0],ymm1[0,0],ymm2[7,4],ymm1[4,4]
-; AVX-NEXT: vpermilps $238, {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Folded Reload
-; AVX-NEXT: # xmm2 = mem[2,3,2,3]
-; AVX-NEXT: vblendps $2, {{[-0-9]+}}(%r{{[sb]}}p), %xmm2, %xmm2 # 16-byte Folded Reload
-; AVX-NEXT: # xmm2 = xmm2[0],mem[1],xmm2[2,3]
-; AVX-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm2
-; AVX-NEXT: vshufps {{.*#+}} ymm1 = ymm2[0,1],ymm1[2,0],ymm2[4,5],ymm1[6,4]
+; AVX-NEXT: vshufps {{.*#+}} xmm0 = xmm2[2,0],xmm0[2,3]
+; AVX-NEXT: vperm2f128 {{.*#+}} ymm2 = ymm7[2,3,0,1]
+; AVX-NEXT: vshufps {{.*#+}} ymm2 = ymm7[3,0],ymm2[0,0],ymm7[7,4],ymm2[4,4]
+; AVX-NEXT: vshufps {{.*#+}} xmm4 = xmm8[2,3,2,3]
+; AVX-NEXT: vblendps {{.*#+}} xmm4 = xmm4[0],xmm9[1],xmm4[2,3]
+; AVX-NEXT: vinsertf128 $1, %xmm4, %ymm0, %ymm4
+; AVX-NEXT: vshufps {{.*#+}} ymm2 = ymm4[0,1],ymm2[2,0],ymm4[4,5],ymm2[6,4]
+; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm2[4,5,6,7]
; AVX-NEXT: vpermilps $68, {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Folded Reload
; AVX-NEXT: # xmm2 = mem[0,1,0,1]
-; AVX-NEXT: vblendps $8, (%rsp), %xmm2, %xmm2 # 16-byte Folded Reload
+; AVX-NEXT: vblendps $8, {{[-0-9]+}}(%r{{[sb]}}p), %xmm2, %xmm2 # 16-byte Folded Reload
; AVX-NEXT: # xmm2 = xmm2[0,1,2],mem[3]
-; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
-; AVX-NEXT: vshufps $33, {{[-0-9]+}}(%r{{[sb]}}p), %ymm4, %ymm4 # 32-byte Folded Reload
-; AVX-NEXT: # ymm4 = ymm4[1,0],mem[2,0],ymm4[5,4],mem[6,4]
-; AVX-NEXT: vshufps {{.*#+}} ymm4 = ymm4[2,0,2,3,6,4,6,7]
+; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
+; AVX-NEXT: vshufps $33, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm4 # 32-byte Folded Reload
+; AVX-NEXT: # ymm4 = ymm1[1,0],mem[2,0],ymm1[5,4],mem[6,4]
; AVX-NEXT: vextractf128 $1, %ymm4, %xmm4
-; AVX-NEXT: vblendps {{.*#+}} xmm2 = xmm4[0,1],xmm2[2,3]
-; AVX-NEXT: vblendps {{.*#+}} ymm1 = ymm2[0,1,2,3],ymm1[4,5,6,7]
-; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
-; AVX-NEXT: vperm2f128 {{.*#+}} ymm2 = ymm4[2,3,0,1]
-; AVX-NEXT: vshufps {{.*#+}} ymm2 = ymm4[3,0],ymm2[0,0],ymm4[7,4],ymm2[4,4]
-; AVX-NEXT: vpermilps $238, {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Folded Reload
-; AVX-NEXT: # xmm4 = mem[2,3,2,3]
-; AVX-NEXT: vblendps $2, {{[-0-9]+}}(%r{{[sb]}}p), %xmm4, %xmm4 # 16-byte Folded Reload
-; AVX-NEXT: # xmm4 = xmm4[0],mem[1],xmm4[2,3]
-; AVX-NEXT: vinsertf128 $1, %xmm4, %ymm0, %ymm4
-; AVX-NEXT: vshufps {{.*#+}} ymm2 = ymm4[0,1],ymm2[2,0],ymm4[4,5],ymm2[6,4]
+; AVX-NEXT: vshufps {{.*#+}} xmm2 = xmm4[2,0],xmm2[2,3]
+; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
+; AVX-NEXT: vperm2f128 {{.*#+}} ymm4 = ymm1[2,3,0,1]
+; AVX-NEXT: vshufps {{.*#+}} ymm4 = ymm1[3,0],ymm4[0,0],ymm1[7,4],ymm4[4,4]
+; AVX-NEXT: vpermilps $238, {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Folded Reload
+; AVX-NEXT: # xmm5 = mem[2,3,2,3]
+; AVX-NEXT: vblendps $2, {{[-0-9]+}}(%r{{[sb]}}p), %xmm5, %xmm5 # 16-byte Folded Reload
+; AVX-NEXT: # xmm5 = xmm5[0],mem[1],xmm5[2,3]
+; AVX-NEXT: vinsertf128 $1, %xmm5, %ymm0, %ymm5
+; AVX-NEXT: vshufps {{.*#+}} ymm4 = ymm5[0,1],ymm4[2,0],ymm5[4,5],ymm4[6,4]
+; AVX-NEXT: vblendps {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm4[4,5,6,7]
; AVX-NEXT: vpermilps $68, {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Folded Reload
; AVX-NEXT: # xmm4 = mem[0,1,0,1]
; AVX-NEXT: vblendps $8, {{[-0-9]+}}(%r{{[sb]}}p), %xmm4, %xmm4 # 16-byte Folded Reload
; AVX-NEXT: # xmm4 = xmm4[0,1,2],mem[3]
-; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload
-; AVX-NEXT: vshufps $33, {{[-0-9]+}}(%r{{[sb]}}p), %ymm5, %ymm5 # 32-byte Folded Reload
-; AVX-NEXT: # ymm5 = ymm5[1,0],mem[2,0],ymm5[5,4],mem[6,4]
-; AVX-NEXT: vshufps {{.*#+}} ymm5 = ymm5[2,0,2,3,6,4,6,7]
+; AVX-NEXT: vshufps $33, {{[-0-9]+}}(%r{{[sb]}}p), %ymm11, %ymm5 # 32-byte Folded Reload
+; AVX-NEXT: # ymm5 = ymm11[1,0],mem[2,0],ymm11[5,4],mem[6,4]
; AVX-NEXT: vextractf128 $1, %ymm5, %xmm5
-; AVX-NEXT: vblendps {{.*#+}} xmm4 = xmm5[0,1],xmm4[2,3]
-; AVX-NEXT: vblendps {{.*#+}} ymm2 = ymm4[0,1,2,3],ymm2[4,5,6,7]
-; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
-; AVX-NEXT: vmovaps %ymm4, 96(%rsi)
-; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
-; AVX-NEXT: vmovaps %ymm4, 32(%rsi)
-; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
-; AVX-NEXT: vmovaps %ymm4, 64(%rsi)
-; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
-; AVX-NEXT: vmovaps %ymm4, (%rsi)
-; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
-; AVX-NEXT: vmovaps %ymm4, 96(%rdx)
-; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
-; AVX-NEXT: vmovaps %ymm4, 32(%rdx)
-; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
-; AVX-NEXT: vmovaps %ymm4, 64(%rdx)
-; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
-; AVX-NEXT: vmovaps %ymm4, (%rdx)
-; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
-; AVX-NEXT: vmovaps %ymm4, 32(%rcx)
-; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
-; AVX-NEXT: vmovaps %ymm4, 96(%rcx)
-; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
-; AVX-NEXT: vmovaps %ymm4, 64(%rcx)
-; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
-; AVX-NEXT: vmovaps %ymm4, (%rcx)
-; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
-; AVX-NEXT: vmovaps %ymm4, 96(%r8)
-; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
-; AVX-NEXT: vmovaps %ymm4, 32(%r8)
-; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
-; AVX-NEXT: vmovaps %ymm4, 64(%r8)
-; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
-; AVX-NEXT: vmovaps %ymm4, (%r8)
-; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
-; AVX-NEXT: vmovaps %ymm4, 96(%r9)
-; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
-; AVX-NEXT: vmovaps %ymm4, 32(%r9)
-; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
-; AVX-NEXT: vmovaps %ymm4, (%r9)
-; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
-; AVX-NEXT: vmovaps %ymm4, 64(%r9)
-; AVX-NEXT: movq {{[0-9]+}}(%rsp), %rax
-; AVX-NEXT: vmovaps %ymm11, 96(%rax)
-; AVX-NEXT: vmovaps %ymm15, 32(%rax)
-; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
-; AVX-NEXT: vmovaps %ymm4, 64(%rax)
-; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
-; AVX-NEXT: vmovaps %ymm4, (%rax)
+; AVX-NEXT: vshufps {{.*#+}} xmm4 = xmm5[2,0],xmm4[2,3]
+; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
+; AVX-NEXT: vperm2f128 {{.*#+}} ymm5 = ymm1[2,3,0,1]
+; AVX-NEXT: vshufps {{.*#+}} ymm5 = ymm1[3,0],ymm5[0,0],ymm1[7,4],ymm5[4,4]
+; AVX-NEXT: vpermilps $238, {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Folded Reload
+; AVX-NEXT: # xmm6 = mem[2,3,2,3]
+; AVX-NEXT: vblendps $2, {{[-0-9]+}}(%r{{[sb]}}p), %xmm6, %xmm6 # 16-byte Folded Reload
+; AVX-NEXT: # xmm6 = xmm6[0],mem[1],xmm6[2,3]
+; AVX-NEXT: vinsertf128 $1, %xmm6, %ymm0, %ymm6
+; AVX-NEXT: vshufps {{.*#+}} ymm5 = ymm6[0,1],ymm5[2,0],ymm6[4,5],ymm5[6,4]
+; AVX-NEXT: vblendps {{.*#+}} ymm4 = ymm4[0,1,2,3],ymm5[4,5,6,7]
+; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
+; AVX-NEXT: vmovaps %ymm1, 96(%rsi)
+; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
+; AVX-NEXT: vmovaps %ymm1, 32(%rsi)
+; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload
+; AVX-NEXT: vmovaps %ymm5, 64(%rsi)
+; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload
+; AVX-NEXT: vmovaps %ymm5, (%rsi)
+; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
+; AVX-NEXT: vmovaps %ymm1, 96(%rdx)
+; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
+; AVX-NEXT: vmovaps %ymm1, 32(%rdx)
+; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
+; AVX-NEXT: vmovaps %ymm1, 64(%rdx)
+; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
+; AVX-NEXT: vmovaps %ymm1, (%rdx)
+; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
+; AVX-NEXT: vmovaps %ymm1, 32(%rcx)
+; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
+; AVX-NEXT: vmovaps %ymm1, 96(%rcx)
+; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
+; AVX-NEXT: vmovaps %ymm1, 64(%rcx)
+; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
+; AVX-NEXT: vmovaps %ymm1, (%rcx)
+; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
+; AVX-NEXT: vmovaps %ymm1, 96(%r8)
+; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
+; AVX-NEXT: vmovaps %ymm1, 32(%r8)
+; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
+; AVX-NEXT: vmovaps %ymm1, 64(%r8)
+; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
+; AVX-NEXT: vmovaps %ymm1, (%r8)
+; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
+; AVX-NEXT: vmovaps %ymm1, 96(%r9)
+; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
+; AVX-NEXT: vmovaps %ymm1, 32(%r9)
+; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
+; AVX-NEXT: vmovaps %ymm1, (%r9)
+; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
+; AVX-NEXT: vmovaps %ymm1, 64(%r9)
; AVX-NEXT: movq {{[0-9]+}}(%rsp), %rax
-; AVX-NEXT: vmovaps %ymm2, 32(%rax)
+; AVX-NEXT: vmovaps %ymm12, 96(%rax)
+; AVX-NEXT: vmovaps %ymm13, 32(%rax)
+; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
+; AVX-NEXT: vmovaps %ymm1, 64(%rax)
+; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
; AVX-NEXT: vmovaps %ymm1, (%rax)
+; AVX-NEXT: movq {{[0-9]+}}(%rsp), %rax
+; AVX-NEXT: vmovaps %ymm4, 32(%rax)
+; AVX-NEXT: vmovaps %ymm2, (%rax)
; AVX-NEXT: vmovaps %ymm0, 96(%rax)
; AVX-NEXT: vmovaps %ymm3, 64(%rax)
-; AVX-NEXT: addq $1464, %rsp # imm = 0x5B8
+; AVX-NEXT: addq $1432, %rsp # imm = 0x598
; AVX-NEXT: vzeroupper
; AVX-NEXT: retq
;
@@ -9949,27 +9936,27 @@ define void @load_i32_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
; AVX-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX-NEXT: vmovaps 672(%rdi), %ymm3
; AVX-NEXT: vmovups %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT: vmovaps 768(%rdi), %ymm5
+; AVX-NEXT: vmovaps 768(%rdi), %ymm11
; AVX-NEXT: vmovaps 256(%rdi), %ymm4
; AVX-NEXT: vmovups %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX-NEXT: vmovaps 224(%rdi), %ymm1
; AVX-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT: vmovaps 320(%rdi), %ymm7
+; AVX-NEXT: vmovaps 320(%rdi), %ymm6
; AVX-NEXT: vmovaps 304(%rdi), %xmm0
; AVX-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT: vunpcklpd {{.*#+}} ymm0 = ymm7[0],ymm0[0],ymm7[2],ymm0[2]
-; AVX-NEXT: vmovups %ymm7, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX-NEXT: vunpcklpd {{.*#+}} ymm0 = ymm6[0],ymm0[0],ymm6[2],ymm0[2]
+; AVX-NEXT: vmovups %ymm6, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3,4,5],ymm4[6],ymm1[7]
; AVX-NEXT: vextractf128 $1, %ymm1, %xmm1
-; AVX-NEXT: vmovaps 224(%rdi), %xmm13
-; AVX-NEXT: vblendps {{.*#+}} xmm1 = xmm13[0,1],xmm1[2,3]
+; AVX-NEXT: vmovaps 224(%rdi), %xmm10
+; AVX-NEXT: vblendps {{.*#+}} xmm1 = xmm10[0,1],xmm1[2,3]
; AVX-NEXT: vshufps {{.*#+}} xmm1 = xmm1[0,3,2,3]
; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3,4,5,6,7]
-; AVX-NEXT: vmovaps 384(%rdi), %xmm4
-; AVX-NEXT: vmovaps %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX-NEXT: vmovaps 352(%rdi), %xmm1
+; AVX-NEXT: vmovaps 384(%rdi), %xmm1
; AVX-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX-NEXT: vunpckhpd {{.*#+}} xmm1 = xmm1[1],xmm4[1]
+; AVX-NEXT: vmovaps 352(%rdi), %xmm4
+; AVX-NEXT: vmovaps %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX-NEXT: vunpckhpd {{.*#+}} xmm1 = xmm4[1],xmm1[1]
; AVX-NEXT: vmovaps 416(%rdi), %xmm4
; AVX-NEXT: vmovaps %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; AVX-NEXT: vinsertps {{.*#+}} xmm1 = zero,xmm1[1,2],xmm4[1]
@@ -9978,22 +9965,21 @@ define void @load_i32_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
; AVX-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX-NEXT: vmovaps 752(%rdi), %xmm0
; AVX-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT: vunpcklpd {{.*#+}} ymm0 = ymm5[0],ymm0[0],ymm5[2],ymm0[2]
-; AVX-NEXT: vmovups %ymm5, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX-NEXT: vunpcklpd {{.*#+}} ymm0 = ymm11[0],ymm0[0],ymm11[2],ymm0[2]
+; AVX-NEXT: vmovups %ymm11, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX-NEXT: vblendps {{.*#+}} ymm1 = ymm3[0,1,2,3,4,5],ymm2[6],ymm3[7]
; AVX-NEXT: vextractf128 $1, %ymm1, %xmm1
-; AVX-NEXT: vmovaps 672(%rdi), %xmm3
-; AVX-NEXT: vblendps {{.*#+}} xmm1 = xmm3[0,1],xmm1[2,3]
+; AVX-NEXT: vmovaps 672(%rdi), %xmm15
+; AVX-NEXT: vblendps {{.*#+}} xmm1 = xmm15[0,1],xmm1[2,3]
; AVX-NEXT: vshufps {{.*#+}} xmm1 = xmm1[0,3,2,3]
; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3,4,5,6,7]
-; AVX-NEXT: vmovaps 832(%rdi), %xmm2
-; AVX-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX-NEXT: vmovaps 800(%rdi), %xmm1
+; AVX-NEXT: vmovaps 832(%rdi), %xmm1
; AVX-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX-NEXT: vunpckhpd {{.*#+}} xmm1 = xmm1[1],xmm2[1]
-; AVX-NEXT: vmovaps 864(%rdi), %xmm2
+; AVX-NEXT: vmovaps 800(%rdi), %xmm2
; AVX-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX-NEXT: vinsertps {{.*#+}} xmm1 = zero,xmm1[1,2],xmm2[1]
+; AVX-NEXT: vunpckhpd {{.*#+}} xmm1 = xmm2[1],xmm1[1]
+; AVX-NEXT: vmovaps 864(%rdi), %xmm13
+; AVX-NEXT: vinsertps {{.*#+}} xmm1 = zero,xmm1[1,2],xmm13[1]
; AVX-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm1
; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm1[5,6,7]
; AVX-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
@@ -10051,27 +10037,27 @@ define void @load_i32_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
; AVX-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm1
; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm1[5,6,7]
; AVX-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT: vmovaps 32(%rdi), %ymm0
-; AVX-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT: vmovaps (%rdi), %ymm1
+; AVX-NEXT: vmovaps 32(%rdi), %ymm1
; AVX-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3,4,5],ymm0[6],ymm1[7]
+; AVX-NEXT: vmovaps (%rdi), %ymm0
+; AVX-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],ymm1[6],ymm0[7]
; AVX-NEXT: vextractf128 $1, %ymm0, %xmm0
; AVX-NEXT: vmovaps (%rdi), %xmm1
; AVX-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; AVX-NEXT: vblendps {{.*#+}} xmm0 = xmm1[0,1],xmm0[2,3]
; AVX-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,3,2,3]
-; AVX-NEXT: vmovaps 96(%rdi), %ymm15
+; AVX-NEXT: vmovaps 96(%rdi), %ymm14
; AVX-NEXT: vmovaps 80(%rdi), %xmm1
; AVX-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT: vunpcklpd {{.*#+}} ymm1 = ymm15[0],ymm1[0],ymm15[2],ymm1[2]
-; AVX-NEXT: vmovups %ymm15, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX-NEXT: vunpcklpd {{.*#+}} ymm1 = ymm14[0],ymm1[0],ymm14[2],ymm1[2]
+; AVX-NEXT: vmovups %ymm14, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2],ymm1[3,4,5,6,7]
-; AVX-NEXT: vmovaps 160(%rdi), %xmm2
-; AVX-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX-NEXT: vmovaps 128(%rdi), %xmm1
+; AVX-NEXT: vmovaps 160(%rdi), %xmm1
; AVX-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX-NEXT: vunpckhpd {{.*#+}} xmm1 = xmm1[1],xmm2[1]
+; AVX-NEXT: vmovaps 128(%rdi), %xmm2
+; AVX-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX-NEXT: vunpckhpd {{.*#+}} xmm1 = xmm2[1],xmm1[1]
; AVX-NEXT: vmovaps 192(%rdi), %xmm2
; AVX-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; AVX-NEXT: vinsertps {{.*#+}} xmm1 = zero,xmm1[1,2],xmm2[1]
@@ -10113,22 +10099,21 @@ define void @load_i32_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
; AVX-NEXT: vextractf128 $1, %ymm0, %xmm0
; AVX-NEXT: vmovaps 896(%rdi), %xmm12
; AVX-NEXT: vblendps {{.*#+}} xmm0 = xmm12[0,1],xmm0[2,3]
-; AVX-NEXT: vmovaps %xmm12, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; AVX-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,3,2,3]
-; AVX-NEXT: vmovaps 992(%rdi), %ymm4
+; AVX-NEXT: vmovaps 992(%rdi), %ymm5
; AVX-NEXT: vmovaps 976(%rdi), %xmm1
; AVX-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT: vunpcklpd {{.*#+}} ymm1 = ymm4[0],ymm1[0],ymm4[2],ymm1[2]
-; AVX-NEXT: vmovups %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX-NEXT: vunpcklpd {{.*#+}} ymm1 = ymm5[0],ymm1[0],ymm5[2],ymm1[2]
+; AVX-NEXT: vmovups %ymm5, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2],ymm1[3,4,5,6,7]
; AVX-NEXT: vmovaps 1056(%rdi), %xmm1
; AVX-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; AVX-NEXT: vmovaps 1024(%rdi), %xmm2
; AVX-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; AVX-NEXT: vunpckhpd {{.*#+}} xmm1 = xmm2[1],xmm1[1]
-; AVX-NEXT: vmovaps 1088(%rdi), %xmm10
-; AVX-NEXT: vinsertps {{.*#+}} xmm1 = zero,xmm1[1,2],xmm10[1]
-; AVX-NEXT: vmovaps %xmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX-NEXT: vmovaps 1088(%rdi), %xmm8
+; AVX-NEXT: vinsertps {{.*#+}} xmm1 = zero,xmm1[1,2],xmm8[1]
+; AVX-NEXT: vmovaps %xmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; AVX-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm1
; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm1[5,6,7]
; AVX-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
@@ -10142,31 +10127,31 @@ define void @load_i32_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
; AVX-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; AVX-NEXT: vblendps {{.*#+}} xmm0 = xmm1[0,1],xmm0[2,3]
; AVX-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,3,2,3]
-; AVX-NEXT: vmovaps 1440(%rdi), %ymm8
+; AVX-NEXT: vmovaps 1440(%rdi), %ymm4
; AVX-NEXT: vmovaps 1424(%rdi), %xmm1
; AVX-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT: vunpcklpd {{.*#+}} ymm1 = ymm8[0],ymm1[0],ymm8[2],ymm1[2]
-; AVX-NEXT: vmovups %ymm8, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX-NEXT: vunpcklpd {{.*#+}} ymm1 = ymm4[0],ymm1[0],ymm4[2],ymm1[2]
+; AVX-NEXT: vmovups %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2],ymm1[3,4,5,6,7]
-; AVX-NEXT: vmovaps 1504(%rdi), %xmm2
-; AVX-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX-NEXT: vmovaps 1472(%rdi), %xmm1
+; AVX-NEXT: vmovaps 1504(%rdi), %xmm1
; AVX-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX-NEXT: vunpckhpd {{.*#+}} xmm1 = xmm1[1],xmm2[1]
-; AVX-NEXT: vmovaps 1536(%rdi), %xmm14
-; AVX-NEXT: vinsertps {{.*#+}} xmm1 = zero,xmm1[1,2],xmm14[1]
-; AVX-NEXT: vmovaps %xmm14, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX-NEXT: vmovaps 1472(%rdi), %xmm2
+; AVX-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX-NEXT: vunpckhpd {{.*#+}} xmm1 = xmm2[1],xmm1[1]
+; AVX-NEXT: vmovaps 1536(%rdi), %xmm2
+; AVX-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX-NEXT: vinsertps {{.*#+}} xmm1 = zero,xmm1[1,2],xmm2[1]
; AVX-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm1
; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm1[5,6,7]
; AVX-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX-NEXT: vmovaps 288(%rdi), %ymm0
; AVX-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT: vshufps {{.*#+}} ymm0 = ymm7[1,1],ymm0[2,2],ymm7[5,5],ymm0[6,6]
+; AVX-NEXT: vshufps {{.*#+}} ymm0 = ymm6[1,1],ymm0[2,2],ymm6[5,5],ymm0[6,6]
; AVX-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3,2,3]
; AVX-NEXT: vmovaps 256(%rdi), %xmm1
; AVX-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX-NEXT: vblendps {{.*#+}} xmm1 = xmm1[0],xmm13[1],xmm1[2,3]
-; AVX-NEXT: vmovaps %xmm13, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX-NEXT: vblendps {{.*#+}} xmm1 = xmm1[0],xmm10[1],xmm1[2,3]
+; AVX-NEXT: vmovaps %xmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; AVX-NEXT: vshufps {{.*#+}} xmm1 = xmm1[1,0],mem[3,3]
; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3,4,5,6,7]
; AVX-NEXT: vmovaps 384(%rdi), %ymm1
@@ -10183,24 +10168,22 @@ define void @load_i32_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
; AVX-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX-NEXT: vmovaps 736(%rdi), %ymm0
; AVX-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT: vshufps {{.*#+}} ymm0 = ymm5[1,1],ymm0[2,2],ymm5[5,5],ymm0[6,6]
+; AVX-NEXT: vshufps {{.*#+}} ymm0 = ymm11[1,1],ymm0[2,2],ymm11[5,5],ymm0[6,6]
; AVX-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3,2,3]
; AVX-NEXT: vmovaps 704(%rdi), %xmm1
; AVX-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX-NEXT: vblendps {{.*#+}} xmm1 = xmm1[0],xmm3[1],xmm1[2,3]
-; AVX-NEXT: vmovaps %xmm3, %xmm5
-; AVX-NEXT: vmovaps %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX-NEXT: vblendps {{.*#+}} xmm1 = xmm1[0],xmm15[1],xmm1[2,3]
+; AVX-NEXT: vmovaps %xmm15, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; AVX-NEXT: vshufps {{.*#+}} xmm1 = xmm1[1,0],mem[3,3]
; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3,4,5,6,7]
; AVX-NEXT: vmovaps 832(%rdi), %ymm1
; AVX-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT: vmovaps 800(%rdi), %ymm3
-; AVX-NEXT: vmovups %ymm3, (%rsp) # 32-byte Spill
-; AVX-NEXT: vperm2f128 {{.*#+}} ymm1 = ymm3[2,3],ymm1[0,1]
-; AVX-NEXT: vshufps {{.*#+}} ymm1 = ymm3[0,0],ymm1[3,3],ymm3[4,4],ymm1[7,7]
+; AVX-NEXT: vmovaps 800(%rdi), %ymm2
+; AVX-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX-NEXT: vperm2f128 {{.*#+}} ymm1 = ymm2[2,3],ymm1[0,1]
+; AVX-NEXT: vshufps {{.*#+}} ymm1 = ymm2[0,0],ymm1[3,3],ymm2[4,4],ymm1[7,7]
; AVX-NEXT: vextractf128 $1, %ymm1, %xmm1
-; AVX-NEXT: vinsertps $49, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1, %xmm1 # 16-byte Folded Reload
-; AVX-NEXT: # xmm1 = zero,xmm1[1,2],mem[0]
+; AVX-NEXT: vinsertps {{.*#+}} xmm1 = zero,xmm1[1,2],xmm13[2]
; AVX-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm1
; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm1[5,6,7]
; AVX-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
@@ -10208,10 +10191,10 @@ define void @load_i32_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
; AVX-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX-NEXT: vshufps {{.*#+}} ymm0 = ymm9[1,1],ymm0[2,2],ymm9[5,5],ymm0[6,6]
; AVX-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3,2,3]
-; AVX-NEXT: vmovaps 1152(%rdi), %xmm11
-; AVX-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm9 # 16-byte Reload
-; AVX-NEXT: vblendps {{.*#+}} xmm1 = xmm11[0],xmm9[1],xmm11[2,3]
-; AVX-NEXT: vmovaps %xmm11, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX-NEXT: vmovaps 1152(%rdi), %xmm1
+; AVX-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm11 # 16-byte Reload
+; AVX-NEXT: vblendps {{.*#+}} xmm1 = xmm1[0],xmm11[1],xmm1[2,3]
; AVX-NEXT: vshufps {{.*#+}} xmm1 = xmm1[1,0],mem[3,3]
; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3,4,5,6,7]
; AVX-NEXT: vmovaps 1280(%rdi), %ymm1
@@ -10221,8 +10204,8 @@ define void @load_i32_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
; AVX-NEXT: vperm2f128 {{.*#+}} ymm1 = ymm2[2,3],ymm1[0,1]
; AVX-NEXT: vshufps {{.*#+}} ymm1 = ymm2[0,0],ymm1[3,3],ymm2[4,4],ymm1[7,7]
; AVX-NEXT: vextractf128 $1, %ymm1, %xmm1
-; AVX-NEXT: vinsertps $49, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1, %xmm1 # 16-byte Folded Reload
-; AVX-NEXT: # xmm1 = zero,xmm1[1,2],mem[0]
+; AVX-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm9 # 16-byte Reload
+; AVX-NEXT: vinsertps {{.*#+}} xmm1 = zero,xmm1[1,2],xmm9[2]
; AVX-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm1
; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm1[5,6,7]
; AVX-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
@@ -10251,7 +10234,7 @@ define void @load_i32_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
; AVX-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX-NEXT: vmovaps 64(%rdi), %ymm0
; AVX-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT: vshufps {{.*#+}} ymm0 = ymm15[1,1],ymm0[2,2],ymm15[5,5],ymm0[6,6]
+; AVX-NEXT: vshufps {{.*#+}} ymm0 = ymm14[1,1],ymm0[2,2],ymm14[5,5],ymm0[6,6]
; AVX-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3,2,3]
; AVX-NEXT: vmovaps 32(%rdi), %xmm1
; AVX-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
@@ -10273,13 +10256,13 @@ define void @load_i32_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
; AVX-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX-NEXT: vmovaps 512(%rdi), %ymm0
; AVX-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
-; AVX-NEXT: vshufps {{.*#+}} ymm1 = ymm6[1,1],ymm0[2,2],ymm6[5,5],ymm0[6,6]
+; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
+; AVX-NEXT: vshufps {{.*#+}} ymm1 = ymm1[1,1],ymm0[2,2],ymm1[5,5],ymm0[6,6]
; AVX-NEXT: vperm2f128 {{.*#+}} ymm1 = ymm1[2,3,2,3]
; AVX-NEXT: vmovaps 480(%rdi), %xmm0
; AVX-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm15 # 16-byte Reload
-; AVX-NEXT: vblendps {{.*#+}} xmm2 = xmm0[0],xmm15[1],xmm0[2,3]
+; AVX-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm14 # 16-byte Reload
+; AVX-NEXT: vblendps {{.*#+}} xmm2 = xmm0[0],xmm14[1],xmm0[2,3]
; AVX-NEXT: vshufps {{.*#+}} xmm2 = xmm2[1,0],mem[3,3]
; AVX-NEXT: vblendps {{.*#+}} ymm2 = ymm2[0,1,2],ymm1[3,4,5,6,7]
; AVX-NEXT: vmovaps 608(%rdi), %ymm0
@@ -10296,180 +10279,183 @@ define void @load_i32_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
; AVX-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX-NEXT: vmovaps 960(%rdi), %ymm0
; AVX-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT: vshufps {{.*#+}} ymm2 = ymm4[1,1],ymm0[2,2],ymm4[5,5],ymm0[6,6]
+; AVX-NEXT: vshufps {{.*#+}} ymm2 = ymm5[1,1],ymm0[2,2],ymm5[5,5],ymm0[6,6]
; AVX-NEXT: vperm2f128 {{.*#+}} ymm2 = ymm2[2,3,2,3]
; AVX-NEXT: vmovaps 928(%rdi), %xmm0
; AVX-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; AVX-NEXT: vblendps {{.*#+}} xmm3 = xmm0[0],xmm12[1],xmm0[2,3]
+; AVX-NEXT: vmovaps %xmm12, %xmm6
+; AVX-NEXT: vmovaps %xmm12, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; AVX-NEXT: vshufps {{.*#+}} xmm3 = xmm3[1,0],mem[3,3]
; AVX-NEXT: vblendps {{.*#+}} ymm2 = ymm3[0,1,2],ymm2[3,4,5,6,7]
; AVX-NEXT: vmovaps 1056(%rdi), %ymm0
; AVX-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX-NEXT: vmovaps 1024(%rdi), %ymm1
; AVX-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT: vperm2f128 {{.*#+}} ymm4 = ymm1[2,3],ymm0[0,1]
-; AVX-NEXT: vshufps {{.*#+}} ymm4 = ymm1[0,0],ymm4[3,3],ymm1[4,4],ymm4[7,7]
-; AVX-NEXT: vextractf128 $1, %ymm4, %xmm4
-; AVX-NEXT: vinsertps {{.*#+}} xmm4 = zero,xmm4[1,2],xmm10[2]
-; AVX-NEXT: vinsertf128 $1, %xmm4, %ymm0, %ymm4
-; AVX-NEXT: vblendps {{.*#+}} ymm2 = ymm2[0,1,2,3,4],ymm4[5,6,7]
+; AVX-NEXT: vperm2f128 {{.*#+}} ymm5 = ymm1[2,3],ymm0[0,1]
+; AVX-NEXT: vshufps {{.*#+}} ymm5 = ymm1[0,0],ymm5[3,3],ymm1[4,4],ymm5[7,7]
+; AVX-NEXT: vextractf128 $1, %ymm5, %xmm5
+; AVX-NEXT: vinsertps {{.*#+}} xmm5 = zero,xmm5[1,2],xmm8[2]
+; AVX-NEXT: vinsertf128 $1, %xmm5, %ymm0, %ymm5
+; AVX-NEXT: vblendps {{.*#+}} ymm2 = ymm2[0,1,2,3,4],ymm5[5,6,7]
; AVX-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX-NEXT: vmovaps 1408(%rdi), %ymm0
; AVX-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT: vshufps {{.*#+}} ymm4 = ymm8[1,1],ymm0[2,2],ymm8[5,5],ymm0[6,6]
-; AVX-NEXT: vperm2f128 {{.*#+}} ymm4 = ymm4[2,3,2,3]
-; AVX-NEXT: vmovaps 1376(%rdi), %xmm0
-; AVX-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX-NEXT: vshufps {{.*#+}} ymm5 = ymm4[1,1],ymm0[2,2],ymm4[5,5],ymm0[6,6]
+; AVX-NEXT: vperm2f128 {{.*#+}} ymm5 = ymm5[2,3,2,3]
+; AVX-NEXT: vmovaps 1376(%rdi), %xmm4
; AVX-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload
-; AVX-NEXT: vblendps {{.*#+}} xmm12 = xmm0[0],xmm3[1],xmm0[2,3]
+; AVX-NEXT: vblendps {{.*#+}} xmm12 = xmm4[0],xmm3[1],xmm4[2,3]
; AVX-NEXT: vshufps {{.*#+}} xmm12 = xmm12[1,0],mem[3,3]
-; AVX-NEXT: vblendps {{.*#+}} ymm12 = ymm12[0,1,2],ymm4[3,4,5,6,7]
+; AVX-NEXT: vblendps {{.*#+}} ymm12 = ymm12[0,1,2],ymm5[3,4,5,6,7]
; AVX-NEXT: vmovaps 1504(%rdi), %ymm0
; AVX-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX-NEXT: vmovaps 1472(%rdi), %ymm1
; AVX-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT: vperm2f128 {{.*#+}} ymm10 = ymm1[2,3],ymm0[0,1]
-; AVX-NEXT: vshufps {{.*#+}} ymm10 = ymm1[0,0],ymm10[3,3],ymm1[4,4],ymm10[7,7]
-; AVX-NEXT: vextractf128 $1, %ymm10, %xmm10
-; AVX-NEXT: vinsertps {{.*#+}} xmm10 = zero,xmm10[1,2],xmm14[2]
-; AVX-NEXT: vinsertf128 $1, %xmm10, %ymm0, %ymm10
-; AVX-NEXT: vblendps {{.*#+}} ymm10 = ymm12[0,1,2,3,4],ymm10[5,6,7]
-; AVX-NEXT: vmovups %ymm10, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT: vshufps {{.*#+}} xmm10 = xmm13[2,3,2,3]
-; AVX-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm14 # 16-byte Reload
-; AVX-NEXT: vblendps {{.*#+}} xmm10 = xmm10[0],xmm14[1],xmm10[2,3]
+; AVX-NEXT: vperm2f128 {{.*#+}} ymm8 = ymm1[2,3],ymm0[0,1]
+; AVX-NEXT: vshufps {{.*#+}} ymm8 = ymm1[0,0],ymm8[3,3],ymm1[4,4],ymm8[7,7]
+; AVX-NEXT: vextractf128 $1, %ymm8, %xmm8
+; AVX-NEXT: vinsertps $49, {{[-0-9]+}}(%r{{[sb]}}p), %xmm8, %xmm8 # 16-byte Folded Reload
+; AVX-NEXT: # xmm8 = zero,xmm8[1,2],mem[0]
+; AVX-NEXT: vinsertf128 $1, %xmm8, %ymm0, %ymm8
+; AVX-NEXT: vblendps {{.*#+}} ymm8 = ymm12[0,1,2,3,4],ymm8[5,6,7]
+; AVX-NEXT: vmovups %ymm8, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX-NEXT: vshufps {{.*#+}} xmm8 = xmm10[2,3,2,3]
+; AVX-NEXT: vblendps $2, {{[-0-9]+}}(%r{{[sb]}}p), %xmm8, %xmm8 # 16-byte Folded Reload
+; AVX-NEXT: # xmm8 = xmm8[0],mem[1],xmm8[2,3]
; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm12 # 32-byte Reload
; AVX-NEXT: vshufps $199, {{[-0-9]+}}(%r{{[sb]}}p), %ymm12, %ymm12 # 32-byte Folded Reload
; AVX-NEXT: # ymm12 = ymm12[3,1],mem[0,3],ymm12[7,5],mem[4,7]
; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX-NEXT: vshufps {{.*#+}} ymm12 = ymm0[2,1],ymm12[2,0],ymm0[6,5],ymm12[6,4]
-; AVX-NEXT: vblendps {{.*#+}} ymm10 = ymm10[0,1],ymm12[2,3,4,5,6,7]
-; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm8 # 32-byte Reload
+; AVX-NEXT: vblendps {{.*#+}} ymm8 = ymm8[0,1],ymm12[2,3,4,5,6,7]
+; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm10 # 32-byte Reload
; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX-NEXT: vunpcklpd {{.*#+}} ymm12 = ymm0[0],ymm8[0],ymm0[2],ymm8[2]
+; AVX-NEXT: vunpcklpd {{.*#+}} ymm12 = ymm0[0],ymm10[0],ymm0[2],ymm10[2]
; AVX-NEXT: vextractf128 $1, %ymm12, %xmm12
; AVX-NEXT: vblendps $8, {{[-0-9]+}}(%r{{[sb]}}p), %xmm12, %xmm12 # 16-byte Folded Reload
; AVX-NEXT: # xmm12 = xmm12[0,1,2],mem[3]
; AVX-NEXT: vinsertf128 $1, %xmm12, %ymm0, %ymm12
-; AVX-NEXT: vblendps {{.*#+}} ymm10 = ymm10[0,1,2,3,4],ymm12[5,6,7]
-; AVX-NEXT: vmovups %ymm10, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT: vshufps {{.*#+}} xmm10 = xmm5[2,3,2,3]
-; AVX-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Reload
-; AVX-NEXT: vblendps {{.*#+}} xmm10 = xmm10[0],xmm5[1],xmm10[2,3]
+; AVX-NEXT: vblendps {{.*#+}} ymm8 = ymm8[0,1,2,3,4],ymm12[5,6,7]
+; AVX-NEXT: vmovups %ymm8, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX-NEXT: vshufps {{.*#+}} xmm8 = xmm15[2,3,2,3]
+; AVX-NEXT: vblendps $2, {{[-0-9]+}}(%r{{[sb]}}p), %xmm8, %xmm8 # 16-byte Folded Reload
+; AVX-NEXT: # xmm8 = xmm8[0],mem[1],xmm8[2,3]
; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm12 # 32-byte Reload
; AVX-NEXT: vshufps $199, {{[-0-9]+}}(%r{{[sb]}}p), %ymm12, %ymm12 # 32-byte Folded Reload
; AVX-NEXT: # ymm12 = ymm12[3,1],mem[0,3],ymm12[7,5],mem[4,7]
; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX-NEXT: vshufps {{.*#+}} ymm12 = ymm0[2,1],ymm12[2,0],ymm0[6,5],ymm12[6,4]
-; AVX-NEXT: vblendps {{.*#+}} ymm10 = ymm10[0,1],ymm12[2,3,4,5,6,7]
-; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
-; AVX-NEXT: vmovups (%rsp), %ymm0 # 32-byte Reload
-; AVX-NEXT: vunpcklpd {{.*#+}} ymm12 = ymm0[0],ymm1[0],ymm0[2],ymm1[2]
+; AVX-NEXT: vblendps {{.*#+}} ymm8 = ymm8[0,1],ymm12[2,3,4,5,6,7]
+; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
+; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
+; AVX-NEXT: vunpcklpd {{.*#+}} ymm12 = ymm0[0],ymm2[0],ymm0[2],ymm2[2]
; AVX-NEXT: vextractf128 $1, %ymm12, %xmm12
-; AVX-NEXT: vblendps $8, {{[-0-9]+}}(%r{{[sb]}}p), %xmm12, %xmm12 # 16-byte Folded Reload
-; AVX-NEXT: # xmm12 = xmm12[0,1,2],mem[3]
+; AVX-NEXT: vblendps {{.*#+}} xmm12 = xmm12[0,1,2],xmm13[3]
; AVX-NEXT: vinsertf128 $1, %xmm12, %ymm0, %ymm12
-; AVX-NEXT: vblendps {{.*#+}} ymm10 = ymm10[0,1,2,3,4],ymm12[5,6,7]
-; AVX-NEXT: vmovups %ymm10, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT: vshufps {{.*#+}} xmm10 = xmm9[2,3,2,3]
-; AVX-NEXT: vblendps {{.*#+}} xmm10 = xmm10[0],xmm11[1],xmm10[2,3]
+; AVX-NEXT: vblendps {{.*#+}} ymm8 = ymm8[0,1,2,3,4],ymm12[5,6,7]
+; AVX-NEXT: vmovups %ymm8, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX-NEXT: vshufps {{.*#+}} xmm8 = xmm11[2,3,2,3]
+; AVX-NEXT: vblendps $2, {{[-0-9]+}}(%r{{[sb]}}p), %xmm8, %xmm8 # 16-byte Folded Reload
+; AVX-NEXT: # xmm8 = xmm8[0],mem[1],xmm8[2,3]
; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm12 # 32-byte Reload
; AVX-NEXT: vshufps $199, {{[-0-9]+}}(%r{{[sb]}}p), %ymm12, %ymm12 # 32-byte Folded Reload
; AVX-NEXT: # ymm12 = ymm12[3,1],mem[0,3],ymm12[7,5],mem[4,7]
; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX-NEXT: vshufps {{.*#+}} ymm12 = ymm0[2,1],ymm12[2,0],ymm0[6,5],ymm12[6,4]
-; AVX-NEXT: vblendps {{.*#+}} ymm10 = ymm10[0,1],ymm12[2,3,4,5,6,7]
+; AVX-NEXT: vblendps {{.*#+}} ymm8 = ymm8[0,1],ymm12[2,3,4,5,6,7]
+; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm13 # 32-byte Reload
-; AVX-NEXT: vunpcklpd {{.*#+}} ymm12 = ymm13[0],ymm0[0],ymm13[2],ymm0[2]
+; AVX-NEXT: vunpcklpd {{.*#+}} ymm12 = ymm0[0],ymm1[0],ymm0[2],ymm1[2]
; AVX-NEXT: vextractf128 $1, %ymm12, %xmm12
-; AVX-NEXT: vblendps $8, {{[-0-9]+}}(%r{{[sb]}}p), %xmm12, %xmm12 # 16-byte Folded Reload
-; AVX-NEXT: # xmm12 = xmm12[0,1,2],mem[3]
+; AVX-NEXT: vblendps {{.*#+}} xmm12 = xmm12[0,1,2],xmm9[3]
; AVX-NEXT: vinsertf128 $1, %xmm12, %ymm0, %ymm12
-; AVX-NEXT: vblendps {{.*#+}} ymm10 = ymm10[0,1,2,3,4],ymm12[5,6,7]
-; AVX-NEXT: vmovups %ymm10, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT: vshufps {{.*#+}} xmm10 = xmm7[2,3,2,3]
-; AVX-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm9 # 16-byte Reload
-; AVX-NEXT: vblendps {{.*#+}} xmm10 = xmm10[0],xmm9[1],xmm10[2,3]
+; AVX-NEXT: vblendps {{.*#+}} ymm8 = ymm8[0,1,2,3,4],ymm12[5,6,7]
+; AVX-NEXT: vmovups %ymm8, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX-NEXT: vshufps {{.*#+}} xmm8 = xmm7[2,3,2,3]
+; AVX-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm13 # 16-byte Reload
+; AVX-NEXT: vblendps {{.*#+}} xmm8 = xmm8[0],xmm13[1],xmm8[2,3]
; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm12 # 32-byte Reload
; AVX-NEXT: vshufps $199, {{[-0-9]+}}(%r{{[sb]}}p), %ymm12, %ymm12 # 32-byte Folded Reload
; AVX-NEXT: # ymm12 = ymm12[3,1],mem[0,3],ymm12[7,5],mem[4,7]
-; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
-; AVX-NEXT: vshufps {{.*#+}} ymm12 = ymm2[2,1],ymm12[2,0],ymm2[6,5],ymm12[6,4]
-; AVX-NEXT: vblendps {{.*#+}} ymm10 = ymm10[0,1],ymm12[2,3,4,5,6,7]
+; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
+; AVX-NEXT: vshufps {{.*#+}} ymm12 = ymm0[2,1],ymm12[2,0],ymm0[6,5],ymm12[6,4]
+; AVX-NEXT: vblendps {{.*#+}} ymm8 = ymm8[0,1],ymm12[2,3,4,5,6,7]
; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm11 # 32-byte Reload
-; AVX-NEXT: vunpcklpd {{[-0-9]+}}(%r{{[sb]}}p), %ymm11, %ymm12 # 32-byte Folded Reload
-; AVX-NEXT: # ymm12 = ymm11[0],mem[0],ymm11[2],mem[2]
+; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm15 # 32-byte Reload
+; AVX-NEXT: vunpcklpd {{.*#+}} ymm12 = ymm15[0],ymm11[0],ymm15[2],ymm11[2]
; AVX-NEXT: vextractf128 $1, %ymm12, %xmm12
; AVX-NEXT: vblendps $8, {{[-0-9]+}}(%r{{[sb]}}p), %xmm12, %xmm12 # 16-byte Folded Reload
; AVX-NEXT: # xmm12 = xmm12[0,1,2],mem[3]
; AVX-NEXT: vinsertf128 $1, %xmm12, %ymm0, %ymm12
-; AVX-NEXT: vblendps {{.*#+}} ymm10 = ymm10[0,1,2,3,4],ymm12[5,6,7]
-; AVX-NEXT: vmovups %ymm10, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT: vpermilps $238, {{[-0-9]+}}(%r{{[sb]}}p), %xmm10 # 16-byte Folded Reload
-; AVX-NEXT: # xmm10 = mem[2,3,2,3]
-; AVX-NEXT: vblendps $2, {{[-0-9]+}}(%r{{[sb]}}p), %xmm10, %xmm10 # 16-byte Folded Reload
-; AVX-NEXT: # xmm10 = xmm10[0],mem[1],xmm10[2,3]
+; AVX-NEXT: vblendps {{.*#+}} ymm8 = ymm8[0,1,2,3,4],ymm12[5,6,7]
+; AVX-NEXT: vmovups %ymm8, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX-NEXT: vpermilps $238, {{[-0-9]+}}(%r{{[sb]}}p), %xmm8 # 16-byte Folded Reload
+; AVX-NEXT: # xmm8 = mem[2,3,2,3]
+; AVX-NEXT: vblendps $2, {{[-0-9]+}}(%r{{[sb]}}p), %xmm8, %xmm8 # 16-byte Folded Reload
+; AVX-NEXT: # xmm8 = xmm8[0],mem[1],xmm8[2,3]
; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm12 # 32-byte Reload
; AVX-NEXT: vshufps $199, {{[-0-9]+}}(%r{{[sb]}}p), %ymm12, %ymm12 # 32-byte Folded Reload
; AVX-NEXT: # ymm12 = ymm12[3,1],mem[0,3],ymm12[7,5],mem[4,7]
-; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
-; AVX-NEXT: vshufps {{.*#+}} ymm12 = ymm2[2,1],ymm12[2,0],ymm2[6,5],ymm12[6,4]
-; AVX-NEXT: vblendps {{.*#+}} ymm10 = ymm10[0,1],ymm12[2,3,4,5,6,7]
-; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
-; AVX-NEXT: vunpcklpd {{[-0-9]+}}(%r{{[sb]}}p), %ymm2, %ymm12 # 32-byte Folded Reload
-; AVX-NEXT: # ymm12 = ymm2[0],mem[0],ymm2[2],mem[2]
+; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
+; AVX-NEXT: vshufps {{.*#+}} ymm12 = ymm0[2,1],ymm12[2,0],ymm0[6,5],ymm12[6,4]
+; AVX-NEXT: vblendps {{.*#+}} ymm8 = ymm8[0,1],ymm12[2,3,4,5,6,7]
+; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
+; AVX-NEXT: vunpcklpd {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm12 # 32-byte Folded Reload
+; AVX-NEXT: # ymm12 = ymm0[0],mem[0],ymm0[2],mem[2]
; AVX-NEXT: vextractf128 $1, %ymm12, %xmm12
; AVX-NEXT: vblendps $8, {{[-0-9]+}}(%r{{[sb]}}p), %xmm12, %xmm12 # 16-byte Folded Reload
; AVX-NEXT: # xmm12 = xmm12[0,1,2],mem[3]
; AVX-NEXT: vinsertf128 $1, %xmm12, %ymm0, %ymm12
-; AVX-NEXT: vblendps {{.*#+}} ymm10 = ymm10[0,1,2,3,4],ymm12[5,6,7]
-; AVX-NEXT: vmovups %ymm10, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT: vshufps {{.*#+}} xmm10 = xmm15[2,3,2,3]
-; AVX-NEXT: vblendps $2, {{[-0-9]+}}(%r{{[sb]}}p), %xmm10, %xmm10 # 16-byte Folded Reload
-; AVX-NEXT: # xmm10 = xmm10[0],mem[1],xmm10[2,3]
+; AVX-NEXT: vblendps {{.*#+}} ymm8 = ymm8[0,1,2,3,4],ymm12[5,6,7]
+; AVX-NEXT: vmovups %ymm8, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX-NEXT: vshufps {{.*#+}} xmm8 = xmm14[2,3,2,3]
+; AVX-NEXT: vblendps $2, {{[-0-9]+}}(%r{{[sb]}}p), %xmm8, %xmm8 # 16-byte Folded Reload
+; AVX-NEXT: # xmm8 = xmm8[0],mem[1],xmm8[2,3]
; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm12 # 32-byte Reload
; AVX-NEXT: vshufps $199, {{[-0-9]+}}(%r{{[sb]}}p), %ymm12, %ymm12 # 32-byte Folded Reload
; AVX-NEXT: # ymm12 = ymm12[3,1],mem[0,3],ymm12[7,5],mem[4,7]
-; AVX-NEXT: vshufps {{.*#+}} ymm12 = ymm6[2,1],ymm12[2,0],ymm6[6,5],ymm12[6,4]
-; AVX-NEXT: vblendps {{.*#+}} ymm10 = ymm10[0,1],ymm12[2,3,4,5,6,7]
-; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
-; AVX-NEXT: vunpcklpd {{[-0-9]+}}(%r{{[sb]}}p), %ymm2, %ymm12 # 32-byte Folded Reload
-; AVX-NEXT: # ymm12 = ymm2[0],mem[0],ymm2[2],mem[2]
+; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
+; AVX-NEXT: vshufps {{.*#+}} ymm12 = ymm0[2,1],ymm12[2,0],ymm0[6,5],ymm12[6,4]
+; AVX-NEXT: vblendps {{.*#+}} ymm8 = ymm8[0,1],ymm12[2,3,4,5,6,7]
+; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
+; AVX-NEXT: vunpcklpd {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm12 # 32-byte Folded Reload
+; AVX-NEXT: # ymm12 = ymm0[0],mem[0],ymm0[2],mem[2]
; AVX-NEXT: vextractf128 $1, %ymm12, %xmm12
; AVX-NEXT: vblendps $8, {{[-0-9]+}}(%r{{[sb]}}p), %xmm12, %xmm12 # 16-byte Folded Reload
; AVX-NEXT: # xmm12 = xmm12[0,1,2],mem[3]
; AVX-NEXT: vinsertf128 $1, %xmm12, %ymm0, %ymm12
-; AVX-NEXT: vblendps {{.*#+}} ymm10 = ymm10[0,1,2,3,4],ymm12[5,6,7]
-; AVX-NEXT: vmovups %ymm10, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT: vpermilps $238, {{[-0-9]+}}(%r{{[sb]}}p), %xmm10 # 16-byte Folded Reload
-; AVX-NEXT: # xmm10 = mem[2,3,2,3]
-; AVX-NEXT: vblendps $2, {{[-0-9]+}}(%r{{[sb]}}p), %xmm10, %xmm10 # 16-byte Folded Reload
-; AVX-NEXT: # xmm10 = xmm10[0],mem[1],xmm10[2,3]
+; AVX-NEXT: vblendps {{.*#+}} ymm8 = ymm8[0,1,2,3,4],ymm12[5,6,7]
+; AVX-NEXT: vmovups %ymm8, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX-NEXT: vshufps {{.*#+}} xmm8 = xmm6[2,3,2,3]
+; AVX-NEXT: vblendps $2, {{[-0-9]+}}(%r{{[sb]}}p), %xmm8, %xmm8 # 16-byte Folded Reload
+; AVX-NEXT: # xmm8 = xmm8[0],mem[1],xmm8[2,3]
; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm12 # 32-byte Reload
; AVX-NEXT: vshufps $199, {{[-0-9]+}}(%r{{[sb]}}p), %ymm12, %ymm12 # 32-byte Folded Reload
; AVX-NEXT: # ymm12 = ymm12[3,1],mem[0,3],ymm12[7,5],mem[4,7]
-; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
-; AVX-NEXT: vshufps {{.*#+}} ymm12 = ymm2[2,1],ymm12[2,0],ymm2[6,5],ymm12[6,4]
-; AVX-NEXT: vblendps {{.*#+}} ymm10 = ymm10[0,1],ymm12[2,3,4,5,6,7]
-; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
-; AVX-NEXT: vunpcklpd {{[-0-9]+}}(%r{{[sb]}}p), %ymm2, %ymm12 # 32-byte Folded Reload
-; AVX-NEXT: # ymm12 = ymm2[0],mem[0],ymm2[2],mem[2]
+; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm14 # 32-byte Reload
+; AVX-NEXT: vshufps {{.*#+}} ymm12 = ymm14[2,1],ymm12[2,0],ymm14[6,5],ymm12[6,4]
+; AVX-NEXT: vblendps {{.*#+}} ymm8 = ymm8[0,1],ymm12[2,3,4,5,6,7]
+; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
+; AVX-NEXT: vunpcklpd {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm12 # 32-byte Folded Reload
+; AVX-NEXT: # ymm12 = ymm0[0],mem[0],ymm0[2],mem[2]
; AVX-NEXT: vextractf128 $1, %ymm12, %xmm12
; AVX-NEXT: vblendps $8, {{[-0-9]+}}(%r{{[sb]}}p), %xmm12, %xmm12 # 16-byte Folded Reload
; AVX-NEXT: # xmm12 = xmm12[0,1,2],mem[3]
; AVX-NEXT: vinsertf128 $1, %xmm12, %ymm0, %ymm12
-; AVX-NEXT: vblendps {{.*#+}} ymm10 = ymm10[0,1,2,3,4],ymm12[5,6,7]
-; AVX-NEXT: vmovups %ymm10, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT: vshufps {{.*#+}} xmm10 = xmm3[2,3,2,3]
-; AVX-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Reload
-; AVX-NEXT: vblendps {{.*#+}} xmm10 = xmm10[0],xmm4[1],xmm10[2,3]
+; AVX-NEXT: vblendps {{.*#+}} ymm8 = ymm8[0,1,2,3,4],ymm12[5,6,7]
+; AVX-NEXT: vmovups %ymm8, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX-NEXT: vmovaps %xmm3, %xmm9
+; AVX-NEXT: vshufps {{.*#+}} xmm8 = xmm3[2,3,2,3]
+; AVX-NEXT: vblendps {{.*#+}} xmm8 = xmm8[0],xmm4[1],xmm8[2,3]
+; AVX-NEXT: vmovaps %xmm4, %xmm6
+; AVX-NEXT: vmovaps %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm12 # 32-byte Reload
-; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
-; AVX-NEXT: vshufps {{.*#+}} ymm12 = ymm12[3,1],ymm6[0,3],ymm12[7,5],ymm6[4,7]
-; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
-; AVX-NEXT: vshufps {{.*#+}} ymm12 = ymm2[2,1],ymm12[2,0],ymm2[6,5],ymm12[6,4]
-; AVX-NEXT: vblendps {{.*#+}} ymm10 = ymm10[0,1],ymm12[2,3,4,5,6,7]
+; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload
+; AVX-NEXT: vshufps {{.*#+}} ymm12 = ymm12[3,1],ymm5[0,3],ymm12[7,5],ymm5[4,7]
+; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
+; AVX-NEXT: vshufps {{.*#+}} ymm12 = ymm4[2,1],ymm12[2,0],ymm4[6,5],ymm12[6,4]
+; AVX-NEXT: vblendps {{.*#+}} ymm8 = ymm8[0,1],ymm12[2,3,4,5,6,7]
; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload
; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm7 # 32-byte Reload
; AVX-NEXT: vunpcklpd {{.*#+}} ymm12 = ymm7[0],ymm3[0],ymm7[2],ymm3[2]
@@ -10477,217 +10463,218 @@ define void @load_i32_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
; AVX-NEXT: vblendps $8, {{[-0-9]+}}(%r{{[sb]}}p), %xmm12, %xmm12 # 16-byte Folded Reload
; AVX-NEXT: # xmm12 = xmm12[0,1,2],mem[3]
; AVX-NEXT: vinsertf128 $1, %xmm12, %ymm0, %ymm12
-; AVX-NEXT: vblendps {{.*#+}} ymm10 = ymm10[0,1,2,3,4],ymm12[5,6,7]
-; AVX-NEXT: vmovups %ymm10, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX-NEXT: vblendps {{.*#+}} ymm8 = ymm8[0,1,2,3,4],ymm12[5,6,7]
+; AVX-NEXT: vmovups %ymm8, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm12 # 32-byte Reload
-; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm10 # 32-byte Reload
-; AVX-NEXT: vshufps {{.*#+}} ymm10 = ymm10[1,0],ymm12[0,0],ymm10[5,4],ymm12[4,4]
-; AVX-NEXT: vshufps {{.*#+}} ymm10 = ymm12[3,1],ymm10[0,2],ymm12[7,5],ymm10[4,6]
-; AVX-NEXT: vblendps $8, {{[-0-9]+}}(%r{{[sb]}}p), %xmm14, %xmm12 # 16-byte Folded Reload
-; AVX-NEXT: # xmm12 = xmm14[0,1,2],mem[3]
+; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
+; AVX-NEXT: vshufps {{.*#+}} ymm8 = ymm0[1,0],ymm12[0,0],ymm0[5,4],ymm12[4,4]
+; AVX-NEXT: vshufps {{.*#+}} ymm8 = ymm12[3,1],ymm8[0,2],ymm12[7,5],ymm8[4,6]
+; AVX-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
+; AVX-NEXT: vblendps $7, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm12 # 16-byte Folded Reload
+; AVX-NEXT: # xmm12 = mem[0,1,2],xmm0[3]
; AVX-NEXT: vshufps {{.*#+}} xmm12 = xmm12[3,2,2,3]
-; AVX-NEXT: vblendps {{.*#+}} ymm10 = ymm12[0,1],ymm10[2,3,4,5,6,7]
-; AVX-NEXT: vmovaps 416(%rdi), %ymm12
-; AVX-NEXT: vmovups %ymm12, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT: vshufps {{.*#+}} ymm15 = ymm12[0,1],ymm8[1,3],ymm12[4,5],ymm8[5,7]
-; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm8 # 32-byte Reload
-; AVX-NEXT: vshufps {{.*#+}} ymm15 = ymm8[0,2],ymm15[2,0],ymm8[4,6],ymm15[6,4]
-; AVX-NEXT: vblendps {{.*#+}} ymm10 = ymm10[0,1,2,3,4],ymm15[5,6,7]
-; AVX-NEXT: vmovups %ymm10, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX-NEXT: vblendps {{.*#+}} ymm8 = ymm12[0,1],ymm8[2,3,4,5,6,7]
+; AVX-NEXT: vmovaps 416(%rdi), %ymm0
+; AVX-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX-NEXT: vshufps {{.*#+}} ymm12 = ymm0[0,1],ymm10[1,3],ymm0[4,5],ymm10[5,7]
+; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
+; AVX-NEXT: vshufps {{.*#+}} ymm12 = ymm0[0,2],ymm12[2,0],ymm0[4,6],ymm12[6,4]
+; AVX-NEXT: vblendps {{.*#+}} ymm8 = ymm8[0,1,2,3,4],ymm12[5,6,7]
+; AVX-NEXT: vmovups %ymm8, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm12 # 32-byte Reload
-; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm8 # 32-byte Reload
-; AVX-NEXT: vshufps {{.*#+}} ymm10 = ymm8[1,0],ymm12[0,0],ymm8[5,4],ymm12[4,4]
-; AVX-NEXT: vshufps {{.*#+}} ymm10 = ymm12[3,1],ymm10[0,2],ymm12[7,5],ymm10[4,6]
-; AVX-NEXT: vblendps $8, {{[-0-9]+}}(%r{{[sb]}}p), %xmm5, %xmm15 # 16-byte Folded Reload
-; AVX-NEXT: # xmm15 = xmm5[0,1,2],mem[3]
-; AVX-NEXT: vshufps {{.*#+}} xmm15 = xmm15[3,2,2,3]
-; AVX-NEXT: vblendps {{.*#+}} ymm10 = ymm15[0,1],ymm10[2,3,4,5,6,7]
-; AVX-NEXT: vmovaps 864(%rdi), %ymm5
-; AVX-NEXT: vmovups %ymm5, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT: vshufps {{.*#+}} ymm15 = ymm5[0,1],ymm1[1,3],ymm5[4,5],ymm1[5,7]
-; AVX-NEXT: vmovups (%rsp), %ymm1 # 32-byte Reload
-; AVX-NEXT: vshufps {{.*#+}} ymm15 = ymm1[0,2],ymm15[2,0],ymm1[4,6],ymm15[6,4]
-; AVX-NEXT: vblendps {{.*#+}} ymm1 = ymm10[0,1,2,3,4],ymm15[5,6,7]
-; AVX-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
-; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload
-; AVX-NEXT: vshufps {{.*#+}} ymm10 = ymm5[1,0],ymm1[0,0],ymm5[5,4],ymm1[4,4]
-; AVX-NEXT: vshufps {{.*#+}} ymm10 = ymm1[3,1],ymm10[0,2],ymm1[7,5],ymm10[4,6]
-; AVX-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
-; AVX-NEXT: vblendps $7, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1, %xmm15 # 16-byte Folded Reload
-; AVX-NEXT: # xmm15 = mem[0,1,2],xmm1[3]
-; AVX-NEXT: vshufps {{.*#+}} xmm15 = xmm15[3,2,2,3]
-; AVX-NEXT: vblendps {{.*#+}} ymm10 = ymm15[0,1],ymm10[2,3,4,5,6,7]
-; AVX-NEXT: vmovaps 1312(%rdi), %ymm1
-; AVX-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT: vshufps {{.*#+}} ymm15 = ymm1[0,1],ymm0[1,3],ymm1[4,5],ymm0[5,7]
-; AVX-NEXT: vshufps {{.*#+}} ymm15 = ymm13[0,2],ymm15[2,0],ymm13[4,6],ymm15[6,4]
-; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm10[0,1,2,3,4],ymm15[5,6,7]
+; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
+; AVX-NEXT: vshufps {{.*#+}} ymm8 = ymm0[1,0],ymm12[0,0],ymm0[5,4],ymm12[4,4]
+; AVX-NEXT: vshufps {{.*#+}} ymm8 = ymm12[3,1],ymm8[0,2],ymm12[7,5],ymm8[4,6]
+; AVX-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
+; AVX-NEXT: vblendps $7, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm12 # 16-byte Folded Reload
+; AVX-NEXT: # xmm12 = mem[0,1,2],xmm0[3]
+; AVX-NEXT: vshufps {{.*#+}} xmm12 = xmm12[3,2,2,3]
+; AVX-NEXT: vblendps {{.*#+}} ymm8 = ymm12[0,1],ymm8[2,3,4,5,6,7]
+; AVX-NEXT: vmovaps 864(%rdi), %ymm0
+; AVX-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX-NEXT: vshufps {{.*#+}} ymm12 = ymm0[0,1],ymm2[1,3],ymm0[4,5],ymm2[5,7]
+; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
+; AVX-NEXT: vshufps {{.*#+}} ymm12 = ymm0[0,2],ymm12[2,0],ymm0[4,6],ymm12[6,4]
+; AVX-NEXT: vblendps {{.*#+}} ymm2 = ymm8[0,1,2,3,4],ymm12[5,6,7]
+; AVX-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
+; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
+; AVX-NEXT: vshufps {{.*#+}} ymm8 = ymm0[1,0],ymm2[0,0],ymm0[5,4],ymm2[4,4]
+; AVX-NEXT: vshufps {{.*#+}} ymm8 = ymm2[3,1],ymm8[0,2],ymm2[7,5],ymm8[4,6]
+; AVX-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
+; AVX-NEXT: vblendps $7, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm12 # 16-byte Folded Reload
+; AVX-NEXT: # xmm12 = mem[0,1,2],xmm0[3]
+; AVX-NEXT: vshufps {{.*#+}} xmm12 = xmm12[3,2,2,3]
+; AVX-NEXT: vblendps {{.*#+}} ymm8 = ymm12[0,1],ymm8[2,3,4,5,6,7]
+; AVX-NEXT: vmovaps 1312(%rdi), %ymm0
; AVX-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX-NEXT: vshufps {{.*#+}} ymm12 = ymm0[0,1],ymm1[1,3],ymm0[4,5],ymm1[5,7]
; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
+; AVX-NEXT: vshufps {{.*#+}} ymm12 = ymm0[0,2],ymm12[2,0],ymm0[4,6],ymm12[6,4]
+; AVX-NEXT: vblendps {{.*#+}} ymm1 = ymm8[0,1,2,3,4],ymm12[5,6,7]
+; AVX-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
-; AVX-NEXT: vshufps {{.*#+}} ymm10 = ymm1[1,0],ymm0[0,0],ymm1[5,4],ymm0[4,4]
-; AVX-NEXT: vshufps {{.*#+}} ymm10 = ymm0[3,1],ymm10[0,2],ymm0[7,5],ymm10[4,6]
-; AVX-NEXT: vblendps $8, {{[-0-9]+}}(%r{{[sb]}}p), %xmm9, %xmm15 # 16-byte Folded Reload
-; AVX-NEXT: # xmm15 = xmm9[0,1,2],mem[3]
-; AVX-NEXT: vshufps {{.*#+}} xmm15 = xmm15[3,2,2,3]
-; AVX-NEXT: vblendps {{.*#+}} ymm10 = ymm15[0,1],ymm10[2,3,4,5,6,7]
+; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
+; AVX-NEXT: vshufps {{.*#+}} ymm8 = ymm0[1,0],ymm1[0,0],ymm0[5,4],ymm1[4,4]
+; AVX-NEXT: vshufps {{.*#+}} ymm8 = ymm1[3,1],ymm8[0,2],ymm1[7,5],ymm8[4,6]
+; AVX-NEXT: vblendps $8, {{[-0-9]+}}(%r{{[sb]}}p), %xmm13, %xmm12 # 16-byte Folded Reload
+; AVX-NEXT: # xmm12 = xmm13[0,1,2],mem[3]
+; AVX-NEXT: vshufps {{.*#+}} xmm12 = xmm12[3,2,2,3]
+; AVX-NEXT: vblendps {{.*#+}} ymm8 = ymm12[0,1],ymm8[2,3,4,5,6,7]
; AVX-NEXT: vmovaps 1760(%rdi), %ymm0
; AVX-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT: vshufps $212, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm15 # 32-byte Folded Reload
-; AVX-NEXT: # ymm15 = ymm0[0,1],mem[1,3],ymm0[4,5],mem[5,7]
-; AVX-NEXT: vshufps {{.*#+}} ymm15 = ymm11[0,2],ymm15[2,0],ymm11[4,6],ymm15[6,4]
-; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm10[0,1,2,3,4],ymm15[5,6,7]
+; AVX-NEXT: vshufps {{.*#+}} ymm12 = ymm0[0,1],ymm11[1,3],ymm0[4,5],ymm11[5,7]
+; AVX-NEXT: vshufps {{.*#+}} ymm12 = ymm15[0,2],ymm12[2,0],ymm15[4,6],ymm12[6,4]
+; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm8[0,1,2,3,4],ymm12[5,6,7]
; AVX-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT: vmovaps %ymm2, %ymm0
-; AVX-NEXT: vshufps {{.*#+}} ymm2 = ymm6[1,0],ymm2[0,0],ymm6[5,4],ymm2[4,4]
-; AVX-NEXT: vshufps {{.*#+}} ymm2 = ymm0[3,1],ymm2[0,2],ymm0[7,5],ymm2[4,6]
-; AVX-NEXT: vblendps $8, {{[-0-9]+}}(%r{{[sb]}}p), %xmm4, %xmm9 # 16-byte Folded Reload
-; AVX-NEXT: # xmm9 = xmm4[0,1,2],mem[3]
-; AVX-NEXT: vshufps {{.*#+}} xmm9 = xmm9[3,2,2,3]
-; AVX-NEXT: vblendps {{.*#+}} ymm2 = ymm9[0,1],ymm2[2,3,4,5,6,7]
+; AVX-NEXT: vshufps {{.*#+}} ymm2 = ymm5[1,0],ymm4[0,0],ymm5[5,4],ymm4[4,4]
+; AVX-NEXT: vshufps {{.*#+}} ymm2 = ymm4[3,1],ymm2[0,2],ymm4[7,5],ymm2[4,6]
+; AVX-NEXT: vblendps {{.*#+}} xmm4 = xmm6[0,1,2],xmm9[3]
+; AVX-NEXT: vshufps {{.*#+}} xmm4 = xmm4[3,2,2,3]
+; AVX-NEXT: vblendps {{.*#+}} ymm2 = ymm4[0,1],ymm2[2,3,4,5,6,7]
; AVX-NEXT: vmovaps 1536(%rdi), %ymm0
; AVX-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT: vshufps {{.*#+}} ymm9 = ymm0[0,1],ymm3[1,3],ymm0[4,5],ymm3[5,7]
-; AVX-NEXT: vshufps {{.*#+}} ymm9 = ymm7[0,2],ymm9[2,0],ymm7[4,6],ymm9[6,4]
-; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm2[0,1,2,3,4],ymm9[5,6,7]
+; AVX-NEXT: vshufps {{.*#+}} ymm4 = ymm0[0,1],ymm3[1,3],ymm0[4,5],ymm3[5,7]
+; AVX-NEXT: vmovaps %ymm3, %ymm15
+; AVX-NEXT: vshufps {{.*#+}} ymm4 = ymm7[0,2],ymm4[2,0],ymm7[4,6],ymm4[6,4]
+; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm2[0,1,2,3,4],ymm4[5,6,7]
; AVX-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
-; AVX-NEXT: vshufps {{.*#+}} ymm2 = ymm1[1,0],ymm0[0,0],ymm1[5,4],ymm0[4,4]
-; AVX-NEXT: vshufps {{.*#+}} ymm2 = ymm0[3,1],ymm2[0,2],ymm0[7,5],ymm2[4,6]
-; AVX-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm15 # 16-byte Reload
-; AVX-NEXT: vblendps $8, {{[-0-9]+}}(%r{{[sb]}}p), %xmm15, %xmm7 # 16-byte Folded Reload
-; AVX-NEXT: # xmm7 = xmm15[0,1,2],mem[3]
-; AVX-NEXT: vshufps {{.*#+}} xmm7 = xmm7[3,2,2,3]
-; AVX-NEXT: vblendps {{.*#+}} ymm2 = ymm7[0,1],ymm2[2,3,4,5,6,7]
-; AVX-NEXT: vmovaps 1088(%rdi), %ymm12
-; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm8 # 32-byte Reload
-; AVX-NEXT: vshufps {{.*#+}} ymm7 = ymm12[0,1],ymm8[1,3],ymm12[4,5],ymm8[5,7]
-; AVX-NEXT: vmovups %ymm12, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm11 # 32-byte Reload
-; AVX-NEXT: vshufps {{.*#+}} ymm7 = ymm11[0,2],ymm7[2,0],ymm11[4,6],ymm7[6,4]
-; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm2[0,1,2,3,4],ymm7[5,6,7]
+; AVX-NEXT: vshufps {{.*#+}} ymm2 = ymm1[1,0],ymm14[0,0],ymm1[5,4],ymm14[4,4]
+; AVX-NEXT: vshufps {{.*#+}} ymm2 = ymm14[3,1],ymm2[0,2],ymm14[7,5],ymm2[4,6]
+; AVX-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm12 # 16-byte Reload
+; AVX-NEXT: vblendps $8, {{[-0-9]+}}(%r{{[sb]}}p), %xmm12, %xmm4 # 16-byte Folded Reload
+; AVX-NEXT: # xmm4 = xmm12[0,1,2],mem[3]
+; AVX-NEXT: vshufps {{.*#+}} xmm4 = xmm4[3,2,2,3]
+; AVX-NEXT: vblendps {{.*#+}} ymm2 = ymm4[0,1],ymm2[2,3,4,5,6,7]
+; AVX-NEXT: vmovaps 1088(%rdi), %ymm11
+; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm10 # 32-byte Reload
+; AVX-NEXT: vshufps {{.*#+}} ymm4 = ymm11[0,1],ymm10[1,3],ymm11[4,5],ymm10[5,7]
+; AVX-NEXT: vmovups %ymm11, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm14 # 32-byte Reload
+; AVX-NEXT: vshufps {{.*#+}} ymm4 = ymm14[0,2],ymm4[2,0],ymm14[4,6],ymm4[6,4]
+; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm2[0,1,2,3,4],ymm4[5,6,7]
; AVX-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
; AVX-NEXT: vshufps {{.*#+}} ymm2 = ymm1[1,0],ymm0[0,0],ymm1[5,4],ymm0[4,4]
; AVX-NEXT: vshufps {{.*#+}} ymm2 = ymm0[3,1],ymm2[0,2],ymm0[7,5],ymm2[4,6]
-; AVX-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm14 # 16-byte Reload
-; AVX-NEXT: vblendps $8, {{[-0-9]+}}(%r{{[sb]}}p), %xmm14, %xmm7 # 16-byte Folded Reload
-; AVX-NEXT: # xmm7 = xmm14[0,1,2],mem[3]
-; AVX-NEXT: vshufps {{.*#+}} xmm7 = xmm7[3,2,2,3]
-; AVX-NEXT: vblendps {{.*#+}} ymm2 = ymm7[0,1],ymm2[2,3,4,5,6,7]
-; AVX-NEXT: vmovaps 640(%rdi), %ymm4
-; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm10 # 32-byte Reload
-; AVX-NEXT: vshufps {{.*#+}} ymm7 = ymm4[0,1],ymm10[1,3],ymm4[4,5],ymm10[5,7]
-; AVX-NEXT: vmovups %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm9 # 16-byte Reload
+; AVX-NEXT: vblendps $8, {{[-0-9]+}}(%r{{[sb]}}p), %xmm9, %xmm4 # 16-byte Folded Reload
+; AVX-NEXT: # xmm4 = xmm9[0,1,2],mem[3]
+; AVX-NEXT: vshufps {{.*#+}} xmm4 = xmm4[3,2,2,3]
+; AVX-NEXT: vblendps {{.*#+}} ymm2 = ymm4[0,1],ymm2[2,3,4,5,6,7]
+; AVX-NEXT: vmovaps 640(%rdi), %ymm8
+; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload
+; AVX-NEXT: vshufps {{.*#+}} ymm4 = ymm8[0,1],ymm5[1,3],ymm8[4,5],ymm5[5,7]
+; AVX-NEXT: vmovups %ymm8, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload
-; AVX-NEXT: vshufps {{.*#+}} ymm7 = ymm3[0,2],ymm7[2,0],ymm3[4,6],ymm7[6,4]
-; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm2[0,1,2,3,4],ymm7[5,6,7]
+; AVX-NEXT: vshufps {{.*#+}} ymm4 = ymm3[0,2],ymm4[2,0],ymm3[4,6],ymm4[6,4]
+; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm2[0,1,2,3,4],ymm4[5,6,7]
; AVX-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
; AVX-NEXT: vshufps {{.*#+}} ymm2 = ymm1[1,0],ymm0[0,0],ymm1[5,4],ymm0[4,4]
; AVX-NEXT: vshufps {{.*#+}} ymm2 = ymm0[3,1],ymm2[0,2],ymm0[7,5],ymm2[4,6]
-; AVX-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Reload
-; AVX-NEXT: vblendps $8, {{[-0-9]+}}(%r{{[sb]}}p), %xmm5, %xmm7 # 16-byte Folded Reload
-; AVX-NEXT: # xmm7 = xmm5[0,1,2],mem[3]
-; AVX-NEXT: vshufps {{.*#+}} xmm7 = xmm7[3,2,2,3]
-; AVX-NEXT: vblendps {{.*#+}} ymm7 = ymm7[0,1],ymm2[2,3,4,5,6,7]
+; AVX-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Reload
+; AVX-NEXT: vblendps $8, {{[-0-9]+}}(%r{{[sb]}}p), %xmm6, %xmm4 # 16-byte Folded Reload
+; AVX-NEXT: # xmm4 = xmm6[0,1,2],mem[3]
+; AVX-NEXT: vshufps {{.*#+}} xmm4 = xmm4[3,2,2,3]
+; AVX-NEXT: vblendps {{.*#+}} ymm4 = ymm4[0,1],ymm2[2,3,4,5,6,7]
; AVX-NEXT: vmovaps 192(%rdi), %ymm13
+; AVX-NEXT: vmovups %ymm13, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
-; AVX-NEXT: vshufps {{.*#+}} ymm9 = ymm13[0,1],ymm1[1,3],ymm13[4,5],ymm1[5,7]
+; AVX-NEXT: vshufps {{.*#+}} ymm7 = ymm13[0,1],ymm1[1,3],ymm13[4,5],ymm1[5,7]
; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
-; AVX-NEXT: vshufps {{.*#+}} ymm9 = ymm2[0,2],ymm9[2,0],ymm2[4,6],ymm9[6,4]
-; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm7[0,1,2,3,4],ymm9[5,6,7]
+; AVX-NEXT: vshufps {{.*#+}} ymm7 = ymm2[0,2],ymm7[2,0],ymm2[4,6],ymm7[6,4]
+; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm4[0,1,2,3,4],ymm7[5,6,7]
; AVX-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT: vperm2f128 {{.*#+}} ymm7 = ymm2[2,3,0,1]
-; AVX-NEXT: vshufps {{.*#+}} ymm0 = ymm2[3,0],ymm7[0,0],ymm2[7,4],ymm7[4,4]
-; AVX-NEXT: vshufps {{.*#+}} ymm7 = ymm13[1,0],ymm1[2,0],ymm13[5,4],ymm1[6,4]
-; AVX-NEXT: vshufps {{.*#+}} ymm0 = ymm0[2,0],ymm7[2,0],ymm0[6,4],ymm7[6,4]
-; AVX-NEXT: vmovaps 64(%rdi), %xmm9
+; AVX-NEXT: vperm2f128 {{.*#+}} ymm4 = ymm2[2,3,0,1]
+; AVX-NEXT: vshufps {{.*#+}} ymm0 = ymm2[3,0],ymm4[0,0],ymm2[7,4],ymm4[4,4]
+; AVX-NEXT: vshufps {{.*#+}} ymm4 = ymm13[1,0],ymm1[2,0],ymm13[5,4],ymm1[6,4]
+; AVX-NEXT: vshufps {{.*#+}} ymm0 = ymm0[2,0],ymm4[2,0],ymm0[6,4],ymm4[6,4]
+; AVX-NEXT: vmovaps 64(%rdi), %xmm2
+; AVX-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; AVX-NEXT: vmovaps 96(%rdi), %xmm1
; AVX-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX-NEXT: vshufps {{.*#+}} xmm7 = xmm1[0,1,0,1]
-; AVX-NEXT: vblendps {{.*#+}} xmm7 = xmm9[0,1,2],xmm7[3]
-; AVX-NEXT: vshufps {{.*#+}} xmm6 = xmm5[2,3,2,3]
+; AVX-NEXT: vshufps {{.*#+}} xmm4 = xmm1[0,1,0,1]
+; AVX-NEXT: vblendps {{.*#+}} xmm4 = xmm2[0,1,2],xmm4[3]
+; AVX-NEXT: vshufps {{.*#+}} xmm6 = xmm6[2,3,2,3]
; AVX-NEXT: vblendps {{.*#+}} xmm6 = mem[0],xmm6[1],mem[2,3]
-; AVX-NEXT: vblendps {{.*#+}} xmm6 = xmm6[0,1],xmm7[2,3]
-; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm6[0,1,2,3],ymm0[4,5,6,7]
+; AVX-NEXT: vblendps {{.*#+}} xmm4 = xmm6[0,1],xmm4[2,3]
+; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm4[0,1,2,3],ymm0[4,5,6,7]
; AVX-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
; AVX-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm1[2,3,0,1]
; AVX-NEXT: vshufps {{.*#+}} ymm0 = ymm1[3,0],ymm0[0,0],ymm1[7,4],ymm0[4,4]
; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm7 # 32-byte Reload
-; AVX-NEXT: vshufps $33, {{[-0-9]+}}(%r{{[sb]}}p), %ymm7, %ymm5 # 32-byte Folded Reload
-; AVX-NEXT: # ymm5 = ymm7[1,0],mem[2,0],ymm7[5,4],mem[6,4]
-; AVX-NEXT: vshufps {{.*#+}} ymm0 = ymm0[2,0],ymm5[2,0],ymm0[6,4],ymm5[6,4]
+; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm13 # 32-byte Reload
+; AVX-NEXT: vshufps {{.*#+}} ymm4 = ymm13[1,0],ymm7[2,0],ymm13[5,4],ymm7[6,4]
+; AVX-NEXT: vshufps {{.*#+}} ymm0 = ymm0[2,0],ymm4[2,0],ymm0[6,4],ymm4[6,4]
; AVX-NEXT: vmovaps 320(%rdi), %xmm1
; AVX-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX-NEXT: vshufps {{.*#+}} xmm5 = xmm1[0,1,0,1]
+; AVX-NEXT: vshufps {{.*#+}} xmm4 = xmm1[0,1,0,1]
; AVX-NEXT: vmovaps 288(%rdi), %xmm1
; AVX-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX-NEXT: vblendps {{.*#+}} xmm5 = xmm1[0,1,2],xmm5[3]
+; AVX-NEXT: vblendps {{.*#+}} xmm4 = xmm1[0,1,2],xmm4[3]
; AVX-NEXT: vpermilps $238, {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Folded Reload
; AVX-NEXT: # xmm6 = mem[2,3,2,3]
; AVX-NEXT: vblendps {{.*#+}} xmm6 = mem[0],xmm6[1],mem[2,3]
-; AVX-NEXT: vblendps {{.*#+}} xmm5 = xmm6[0,1],xmm5[2,3]
-; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm5[0,1,2,3],ymm0[4,5,6,7]
+; AVX-NEXT: vblendps {{.*#+}} xmm4 = xmm6[0,1],xmm4[2,3]
+; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm4[0,1,2,3],ymm0[4,5,6,7]
; AVX-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm3[2,3,0,1]
; AVX-NEXT: vshufps {{.*#+}} ymm0 = ymm3[3,0],ymm0[0,0],ymm3[7,4],ymm0[4,4]
-; AVX-NEXT: vshufps {{.*#+}} ymm1 = ymm4[1,0],ymm10[2,0],ymm4[5,4],ymm10[6,4]
+; AVX-NEXT: vshufps {{.*#+}} ymm1 = ymm8[1,0],ymm5[2,0],ymm8[5,4],ymm5[6,4]
; AVX-NEXT: vshufps {{.*#+}} ymm0 = ymm0[2,0],ymm1[2,0],ymm0[6,4],ymm1[6,4]
; AVX-NEXT: vmovaps 544(%rdi), %xmm1
; AVX-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; AVX-NEXT: vshufps {{.*#+}} xmm1 = xmm1[0,1,0,1]
; AVX-NEXT: vmovaps 512(%rdi), %xmm6
; AVX-NEXT: vblendps {{.*#+}} xmm1 = xmm6[0,1,2],xmm1[3]
-; AVX-NEXT: vshufps {{.*#+}} xmm5 = xmm14[2,3,2,3]
-; AVX-NEXT: vblendps {{.*#+}} xmm5 = mem[0],xmm5[1],mem[2,3]
-; AVX-NEXT: vblendps {{.*#+}} xmm1 = xmm5[0,1],xmm1[2,3]
+; AVX-NEXT: vshufps {{.*#+}} xmm4 = xmm9[2,3,2,3]
+; AVX-NEXT: vblendps {{.*#+}} xmm4 = mem[0],xmm4[1],mem[2,3]
+; AVX-NEXT: vblendps {{.*#+}} xmm1 = xmm4[0,1],xmm1[2,3]
; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7]
; AVX-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT: vmovups (%rsp), %ymm1 # 32-byte Reload
+; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
; AVX-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm1[2,3,0,1]
; AVX-NEXT: vshufps {{.*#+}} ymm0 = ymm1[3,0],ymm0[0,0],ymm1[7,4],ymm0[4,4]
-; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload
-; AVX-NEXT: vshufps $33, {{[-0-9]+}}(%r{{[sb]}}p), %ymm5, %ymm1 # 32-byte Folded Reload
-; AVX-NEXT: # ymm1 = ymm5[1,0],mem[2,0],ymm5[5,4],mem[6,4]
+; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
+; AVX-NEXT: vshufps $33, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm1 # 32-byte Folded Reload
+; AVX-NEXT: # ymm1 = ymm1[1,0],mem[2,0],ymm1[5,4],mem[6,4]
; AVX-NEXT: vshufps {{.*#+}} ymm0 = ymm0[2,0],ymm1[2,0],ymm0[6,4],ymm1[6,4]
; AVX-NEXT: vmovaps 768(%rdi), %xmm1
-; AVX-NEXT: vmovaps %xmm1, (%rsp) # 16-byte Spill
+; AVX-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; AVX-NEXT: vshufps {{.*#+}} xmm1 = xmm1[0,1,0,1]
-; AVX-NEXT: vmovaps 736(%rdi), %xmm2
-; AVX-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX-NEXT: vblendps {{.*#+}} xmm1 = xmm2[0,1,2],xmm1[3]
-; AVX-NEXT: vpermilps $238, {{[-0-9]+}}(%r{{[sb]}}p), %xmm14 # 16-byte Folded Reload
-; AVX-NEXT: # xmm14 = mem[2,3,2,3]
-; AVX-NEXT: vblendps {{.*#+}} xmm14 = mem[0],xmm14[1],mem[2,3]
-; AVX-NEXT: vblendps {{.*#+}} xmm1 = xmm14[0,1],xmm1[2,3]
+; AVX-NEXT: vmovaps 736(%rdi), %xmm4
+; AVX-NEXT: vblendps {{.*#+}} xmm1 = xmm4[0,1,2],xmm1[3]
+; AVX-NEXT: vpermilps $238, {{[-0-9]+}}(%r{{[sb]}}p), %xmm9 # 16-byte Folded Reload
+; AVX-NEXT: # xmm9 = mem[2,3,2,3]
+; AVX-NEXT: vblendps {{.*#+}} xmm9 = mem[0],xmm9[1],mem[2,3]
+; AVX-NEXT: vblendps {{.*#+}} xmm1 = xmm9[0,1],xmm1[2,3]
; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7]
; AVX-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm11[2,3,0,1]
-; AVX-NEXT: vshufps {{.*#+}} ymm0 = ymm11[3,0],ymm0[0,0],ymm11[7,4],ymm0[4,4]
-; AVX-NEXT: vshufps {{.*#+}} ymm1 = ymm12[1,0],ymm8[2,0],ymm12[5,4],ymm8[6,4]
+; AVX-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm14[2,3,0,1]
+; AVX-NEXT: vshufps {{.*#+}} ymm0 = ymm14[3,0],ymm0[0,0],ymm14[7,4],ymm0[4,4]
+; AVX-NEXT: vshufps {{.*#+}} ymm1 = ymm11[1,0],ymm10[2,0],ymm11[5,4],ymm10[6,4]
; AVX-NEXT: vshufps {{.*#+}} ymm0 = ymm0[2,0],ymm1[2,0],ymm0[6,4],ymm1[6,4]
; AVX-NEXT: vmovaps 992(%rdi), %xmm1
; AVX-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; AVX-NEXT: vshufps {{.*#+}} xmm1 = xmm1[0,1,0,1]
; AVX-NEXT: vmovaps 960(%rdi), %xmm3
; AVX-NEXT: vblendps {{.*#+}} xmm1 = xmm3[0,1,2],xmm1[3]
-; AVX-NEXT: vshufps {{.*#+}} xmm14 = xmm15[2,3,2,3]
-; AVX-NEXT: vblendps {{.*#+}} xmm14 = mem[0],xmm14[1],mem[2,3]
-; AVX-NEXT: vblendps {{.*#+}} xmm1 = xmm14[0,1],xmm1[2,3]
+; AVX-NEXT: vshufps {{.*#+}} xmm9 = xmm12[2,3,2,3]
+; AVX-NEXT: vblendps {{.*#+}} xmm9 = mem[0],xmm9[1],mem[2,3]
+; AVX-NEXT: vblendps {{.*#+}} xmm1 = xmm9[0,1],xmm1[2,3]
; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7]
; AVX-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
; AVX-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm1[2,3,0,1]
; AVX-NEXT: vshufps {{.*#+}} ymm0 = ymm1[3,0],ymm0[0,0],ymm1[7,4],ymm0[4,4]
-; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm8 # 32-byte Reload
-; AVX-NEXT: vshufps $33, {{[-0-9]+}}(%r{{[sb]}}p), %ymm8, %ymm1 # 32-byte Folded Reload
-; AVX-NEXT: # ymm1 = ymm8[1,0],mem[2,0],ymm8[5,4],mem[6,4]
+; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm11 # 32-byte Reload
+; AVX-NEXT: vshufps $33, {{[-0-9]+}}(%r{{[sb]}}p), %ymm11, %ymm1 # 32-byte Folded Reload
+; AVX-NEXT: # ymm1 = ymm11[1,0],mem[2,0],ymm11[5,4],mem[6,4]
; AVX-NEXT: vshufps {{.*#+}} ymm0 = ymm0[2,0],ymm1[2,0],ymm0[6,4],ymm1[6,4]
; AVX-NEXT: vmovaps 1216(%rdi), %xmm1
; AVX-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
@@ -10705,151 +10692,145 @@ define void @load_i32_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
; AVX-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm1[2,3,0,1]
; AVX-NEXT: vshufps {{.*#+}} ymm0 = ymm1[3,0],ymm0[0,0],ymm1[7,4],ymm0[4,4]
; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
-; AVX-NEXT: vshufps $33, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm1 # 32-byte Folded Reload
-; AVX-NEXT: # ymm1 = ymm1[1,0],mem[2,0],ymm1[5,4],mem[6,4]
+; AVX-NEXT: vshufps {{.*#+}} ymm1 = ymm1[1,0],ymm15[2,0],ymm1[5,4],ymm15[6,4]
; AVX-NEXT: vshufps {{.*#+}} ymm0 = ymm0[2,0],ymm1[2,0],ymm0[6,4],ymm1[6,4]
; AVX-NEXT: vmovaps 1440(%rdi), %xmm1
; AVX-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; AVX-NEXT: vshufps {{.*#+}} xmm14 = xmm1[0,1,0,1]
; AVX-NEXT: vmovaps 1408(%rdi), %xmm1
; AVX-NEXT: vblendps {{.*#+}} xmm14 = xmm1[0,1,2],xmm14[3]
-; AVX-NEXT: vpermilps $238, {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Folded Reload
-; AVX-NEXT: # xmm4 = mem[2,3,2,3]
-; AVX-NEXT: vblendps {{.*#+}} xmm4 = mem[0],xmm4[1],mem[2,3]
-; AVX-NEXT: vblendps {{.*#+}} xmm4 = xmm4[0,1],xmm14[2,3]
-; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm4[0,1,2,3],ymm0[4,5,6,7]
+; AVX-NEXT: vpermilps $238, {{[-0-9]+}}(%r{{[sb]}}p), %xmm10 # 16-byte Folded Reload
+; AVX-NEXT: # xmm10 = mem[2,3,2,3]
+; AVX-NEXT: vblendps {{.*#+}} xmm10 = mem[0],xmm10[1],mem[2,3]
+; AVX-NEXT: vblendps {{.*#+}} xmm10 = xmm10[0,1],xmm14[2,3]
+; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm10[0,1,2,3],ymm0[4,5,6,7]
; AVX-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
; AVX-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm2[2,3,0,1]
; AVX-NEXT: vshufps {{.*#+}} ymm0 = ymm2[3,0],ymm0[0,0],ymm2[7,4],ymm0[4,4]
-; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
-; AVX-NEXT: vshufps $33, {{[-0-9]+}}(%r{{[sb]}}p), %ymm2, %ymm4 # 32-byte Folded Reload
-; AVX-NEXT: # ymm4 = ymm2[1,0],mem[2,0],ymm2[5,4],mem[6,4]
-; AVX-NEXT: vshufps {{.*#+}} ymm4 = ymm0[2,0],ymm4[2,0],ymm0[6,4],ymm4[6,4]
+; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload
+; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm15 # 32-byte Reload
+; AVX-NEXT: vshufps {{.*#+}} ymm10 = ymm5[1,0],ymm15[2,0],ymm5[5,4],ymm15[6,4]
+; AVX-NEXT: vshufps {{.*#+}} ymm10 = ymm0[2,0],ymm10[2,0],ymm0[6,4],ymm10[6,4]
; AVX-NEXT: vmovaps 1664(%rdi), %xmm0
; AVX-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; AVX-NEXT: vshufps {{.*#+}} xmm14 = xmm0[0,1,0,1]
; AVX-NEXT: vmovaps 1632(%rdi), %xmm0
; AVX-NEXT: vblendps {{.*#+}} xmm14 = xmm0[0,1,2],xmm14[3]
-; AVX-NEXT: vpermilps $238, {{[-0-9]+}}(%r{{[sb]}}p), %xmm11 # 16-byte Folded Reload
-; AVX-NEXT: # xmm11 = mem[2,3,2,3]
-; AVX-NEXT: vblendps {{.*#+}} xmm11 = mem[0],xmm11[1],mem[2,3]
-; AVX-NEXT: vblendps {{.*#+}} xmm11 = xmm11[0,1],xmm14[2,3]
-; AVX-NEXT: vblendps {{.*#+}} ymm4 = ymm11[0,1,2,3],ymm4[4,5,6,7]
-; AVX-NEXT: vmovups %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT: vshufps $246, {{[-0-9]+}}(%r{{[sb]}}p), %ymm13, %ymm4 # 32-byte Folded Reload
-; AVX-NEXT: # ymm4 = ymm13[2,1],mem[3,3],ymm13[6,5],mem[7,7]
-; AVX-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm12 # 16-byte Reload
-; AVX-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm10 # 16-byte Reload
-; AVX-NEXT: vblendps {{.*#+}} xmm11 = xmm12[0],xmm10[1],xmm12[2,3]
-; AVX-NEXT: vinsertf128 $1, %xmm11, %ymm0, %ymm11
-; AVX-NEXT: vshufps {{.*#+}} ymm4 = ymm11[1,0],ymm4[2,0],ymm11[5,4],ymm4[6,4]
-; AVX-NEXT: vblendps $7, {{[-0-9]+}}(%r{{[sb]}}p), %xmm9, %xmm11 # 16-byte Folded Reload
-; AVX-NEXT: # xmm11 = mem[0,1,2],xmm9[3]
-; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm15 # 32-byte Reload
-; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm9 # 32-byte Reload
-; AVX-NEXT: vshufps {{.*#+}} ymm14 = ymm9[0,0],ymm15[1,0],ymm9[4,4],ymm15[5,4]
-; AVX-NEXT: vshufps {{.*#+}} ymm14 = ymm14[2,0,2,3,6,4,6,7]
-; AVX-NEXT: vextractf128 $1, %ymm14, %xmm14
-; AVX-NEXT: vshufps {{.*#+}} xmm11 = xmm14[0,1],xmm11[3,2]
-; AVX-NEXT: vblendps {{.*#+}} ymm4 = ymm11[0,1,2,3],ymm4[4,5,6,7]
-; AVX-NEXT: vmovups %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT: vshufps $246, {{[-0-9]+}}(%r{{[sb]}}p), %ymm7, %ymm4 # 32-byte Folded Reload
-; AVX-NEXT: # ymm4 = ymm7[2,1],mem[3,3],ymm7[6,5],mem[7,7]
+; AVX-NEXT: vpermilps $238, {{[-0-9]+}}(%r{{[sb]}}p), %xmm9 # 16-byte Folded Reload
+; AVX-NEXT: # xmm9 = mem[2,3,2,3]
+; AVX-NEXT: vblendps {{.*#+}} xmm9 = mem[0],xmm9[1],mem[2,3]
+; AVX-NEXT: vblendps {{.*#+}} xmm9 = xmm9[0,1],xmm14[2,3]
+; AVX-NEXT: vblendps {{.*#+}} ymm2 = ymm9[0,1,2,3],ymm10[4,5,6,7]
+; AVX-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
+; AVX-NEXT: vshufps $246, {{[-0-9]+}}(%r{{[sb]}}p), %ymm2, %ymm9 # 32-byte Folded Reload
+; AVX-NEXT: # ymm9 = ymm2[2,1],mem[3,3],ymm2[6,5],mem[7,7]
+; AVX-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
+; AVX-NEXT: vblendps $13, {{[-0-9]+}}(%r{{[sb]}}p), %xmm2, %xmm10 # 16-byte Folded Reload
+; AVX-NEXT: # xmm10 = mem[0],xmm2[1],mem[2,3]
+; AVX-NEXT: vinsertf128 $1, %xmm10, %ymm0, %ymm10
+; AVX-NEXT: vshufps {{.*#+}} ymm9 = ymm10[1,0],ymm9[2,0],ymm10[5,4],ymm9[6,4]
; AVX-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm14 # 16-byte Reload
+; AVX-NEXT: vblendps $8, {{[-0-9]+}}(%r{{[sb]}}p), %xmm14, %xmm8 # 16-byte Folded Reload
+; AVX-NEXT: # xmm8 = xmm14[0,1,2],mem[3]
+; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm12 # 32-byte Reload
+; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
+; AVX-NEXT: vshufps {{.*#+}} ymm10 = ymm2[0,0],ymm12[1,0],ymm2[4,4],ymm12[5,4]
+; AVX-NEXT: vextractf128 $1, %ymm10, %xmm10
+; AVX-NEXT: vshufps {{.*#+}} xmm8 = xmm10[2,0],xmm8[3,2]
+; AVX-NEXT: vblendps {{.*#+}} ymm8 = ymm8[0,1,2,3],ymm9[4,5,6,7]
+; AVX-NEXT: vmovups %ymm8, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX-NEXT: vshufps {{.*#+}} ymm8 = ymm13[2,1],ymm7[3,3],ymm13[6,5],ymm7[7,7]
; AVX-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm9 # 16-byte Reload
-; AVX-NEXT: vblendps {{.*#+}} xmm11 = xmm14[0],xmm9[1],xmm14[2,3]
-; AVX-NEXT: vinsertf128 $1, %xmm11, %ymm0, %ymm11
-; AVX-NEXT: vshufps {{.*#+}} ymm4 = ymm11[1,0],ymm4[2,0],ymm11[5,4],ymm4[6,4]
+; AVX-NEXT: vblendps $13, {{[-0-9]+}}(%r{{[sb]}}p), %xmm9, %xmm9 # 16-byte Folded Reload
+; AVX-NEXT: # xmm9 = mem[0],xmm9[1],mem[2,3]
+; AVX-NEXT: vinsertf128 $1, %xmm9, %ymm0, %ymm9
+; AVX-NEXT: vshufps {{.*#+}} ymm8 = ymm9[1,0],ymm8[2,0],ymm9[5,4],ymm8[6,4]
; AVX-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm7 # 16-byte Reload
; AVX-NEXT: vblendps $7, {{[-0-9]+}}(%r{{[sb]}}p), %xmm7, %xmm7 # 16-byte Folded Reload
; AVX-NEXT: # xmm7 = mem[0,1,2],xmm7[3]
-; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm11 # 32-byte Reload
-; AVX-NEXT: vshufps $16, {{[-0-9]+}}(%r{{[sb]}}p), %ymm11, %ymm11 # 32-byte Folded Reload
-; AVX-NEXT: # ymm11 = ymm11[0,0],mem[1,0],ymm11[4,4],mem[5,4]
-; AVX-NEXT: vshufps {{.*#+}} ymm11 = ymm11[2,0,2,3,6,4,6,7]
-; AVX-NEXT: vextractf128 $1, %ymm11, %xmm11
-; AVX-NEXT: vshufps {{.*#+}} xmm7 = xmm11[0,1],xmm7[3,2]
-; AVX-NEXT: vblendps {{.*#+}} ymm4 = ymm7[0,1,2,3],ymm4[4,5,6,7]
-; AVX-NEXT: vmovups %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
-; AVX-NEXT: vshufps $246, {{[-0-9]+}}(%r{{[sb]}}p), %ymm4, %ymm4 # 32-byte Folded Reload
-; AVX-NEXT: # ymm4 = ymm4[2,1],mem[3,3],ymm4[6,5],mem[7,7]
-; AVX-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm11 # 16-byte Reload
-; AVX-NEXT: vblendps $13, {{[-0-9]+}}(%r{{[sb]}}p), %xmm11, %xmm7 # 16-byte Folded Reload
-; AVX-NEXT: # xmm7 = mem[0],xmm11[1],mem[2,3]
-; AVX-NEXT: vinsertf128 $1, %xmm7, %ymm0, %ymm7
-; AVX-NEXT: vshufps {{.*#+}} ymm4 = ymm7[1,0],ymm4[2,0],ymm7[5,4],ymm4[6,4]
+; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm13 # 32-byte Reload
+; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm10 # 32-byte Reload
+; AVX-NEXT: vshufps {{.*#+}} ymm9 = ymm10[0,0],ymm13[1,0],ymm10[4,4],ymm13[5,4]
+; AVX-NEXT: vextractf128 $1, %ymm9, %xmm9
+; AVX-NEXT: vshufps {{.*#+}} xmm7 = xmm9[2,0],xmm7[3,2]
+; AVX-NEXT: vblendps {{.*#+}} ymm7 = ymm7[0,1,2,3],ymm8[4,5,6,7]
+; AVX-NEXT: vmovups %ymm7, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm7 # 32-byte Reload
+; AVX-NEXT: vshufps $246, {{[-0-9]+}}(%r{{[sb]}}p), %ymm7, %ymm7 # 32-byte Folded Reload
+; AVX-NEXT: # ymm7 = ymm7[2,1],mem[3,3],ymm7[6,5],mem[7,7]
+; AVX-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm8 # 16-byte Reload
+; AVX-NEXT: vblendps $13, {{[-0-9]+}}(%r{{[sb]}}p), %xmm8, %xmm8 # 16-byte Folded Reload
+; AVX-NEXT: # xmm8 = mem[0],xmm8[1],mem[2,3]
+; AVX-NEXT: vinsertf128 $1, %xmm8, %ymm0, %ymm8
+; AVX-NEXT: vshufps {{.*#+}} ymm7 = ymm8[1,0],ymm7[2,0],ymm8[5,4],ymm7[6,4]
; AVX-NEXT: vblendps $7, {{[-0-9]+}}(%r{{[sb]}}p), %xmm6, %xmm6 # 16-byte Folded Reload
; AVX-NEXT: # xmm6 = mem[0,1,2],xmm6[3]
-; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm7 # 32-byte Reload
-; AVX-NEXT: vshufps $16, {{[-0-9]+}}(%r{{[sb]}}p), %ymm7, %ymm7 # 32-byte Folded Reload
-; AVX-NEXT: # ymm7 = ymm7[0,0],mem[1,0],ymm7[4,4],mem[5,4]
-; AVX-NEXT: vshufps {{.*#+}} ymm7 = ymm7[2,0,2,3,6,4,6,7]
-; AVX-NEXT: vextractf128 $1, %ymm7, %xmm7
-; AVX-NEXT: vshufps {{.*#+}} xmm6 = xmm7[0,1],xmm6[3,2]
-; AVX-NEXT: vblendps {{.*#+}} ymm4 = ymm6[0,1,2,3],ymm4[4,5,6,7]
-; AVX-NEXT: vmovups %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT: vshufps $246, {{[-0-9]+}}(%r{{[sb]}}p), %ymm5, %ymm4 # 32-byte Folded Reload
-; AVX-NEXT: # ymm4 = ymm5[2,1],mem[3,3],ymm5[6,5],mem[7,7]
-; AVX-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm7 # 16-byte Reload
-; AVX-NEXT: vblendps $13, {{[-0-9]+}}(%r{{[sb]}}p), %xmm7, %xmm6 # 16-byte Folded Reload
-; AVX-NEXT: # xmm6 = mem[0],xmm7[1],mem[2,3]
-; AVX-NEXT: vinsertf128 $1, %xmm6, %ymm0, %ymm6
-; AVX-NEXT: vshufps {{.*#+}} ymm4 = ymm6[1,0],ymm4[2,0],ymm6[5,4],ymm4[6,4]
-; AVX-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Reload
-; AVX-NEXT: vblendps $7, (%rsp), %xmm5, %xmm5 # 16-byte Folded Reload
-; AVX-NEXT: # xmm5 = mem[0,1,2],xmm5[3]
+; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm9 # 32-byte Reload
+; AVX-NEXT: vshufps $16, {{[-0-9]+}}(%r{{[sb]}}p), %ymm9, %ymm8 # 32-byte Folded Reload
+; AVX-NEXT: # ymm8 = ymm9[0,0],mem[1,0],ymm9[4,4],mem[5,4]
+; AVX-NEXT: vextractf128 $1, %ymm8, %xmm8
+; AVX-NEXT: vshufps {{.*#+}} xmm6 = xmm8[2,0],xmm6[3,2]
+; AVX-NEXT: vblendps {{.*#+}} ymm6 = ymm6[0,1,2,3],ymm7[4,5,6,7]
+; AVX-NEXT: vmovups %ymm6, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
-; AVX-NEXT: vshufps $16, {{[-0-9]+}}(%r{{[sb]}}p), %ymm6, %ymm6 # 32-byte Folded Reload
-; AVX-NEXT: # ymm6 = ymm6[0,0],mem[1,0],ymm6[4,4],mem[5,4]
-; AVX-NEXT: vshufps {{.*#+}} ymm6 = ymm6[2,0,2,3,6,4,6,7]
-; AVX-NEXT: vextractf128 $1, %ymm6, %xmm6
-; AVX-NEXT: vshufps {{.*#+}} xmm5 = xmm6[0,1],xmm5[3,2]
-; AVX-NEXT: vblendps {{.*#+}} ymm4 = ymm5[0,1,2,3],ymm4[4,5,6,7]
+; AVX-NEXT: vshufps $246, {{[-0-9]+}}(%r{{[sb]}}p), %ymm6, %ymm6 # 32-byte Folded Reload
+; AVX-NEXT: # ymm6 = ymm6[2,1],mem[3,3],ymm6[6,5],mem[7,7]
+; AVX-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm7 # 16-byte Reload
+; AVX-NEXT: vblendps $13, {{[-0-9]+}}(%r{{[sb]}}p), %xmm7, %xmm7 # 16-byte Folded Reload
+; AVX-NEXT: # xmm7 = mem[0],xmm7[1],mem[2,3]
+; AVX-NEXT: vinsertf128 $1, %xmm7, %ymm0, %ymm7
+; AVX-NEXT: vshufps {{.*#+}} ymm6 = ymm7[1,0],ymm6[2,0],ymm7[5,4],ymm6[6,4]
+; AVX-NEXT: vblendps $7, {{[-0-9]+}}(%r{{[sb]}}p), %xmm4, %xmm4 # 16-byte Folded Reload
+; AVX-NEXT: # xmm4 = mem[0,1,2],xmm4[3]
+; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm8 # 32-byte Reload
+; AVX-NEXT: vshufps $16, {{[-0-9]+}}(%r{{[sb]}}p), %ymm8, %ymm7 # 32-byte Folded Reload
+; AVX-NEXT: # ymm7 = ymm8[0,0],mem[1,0],ymm8[4,4],mem[5,4]
+; AVX-NEXT: vextractf128 $1, %ymm7, %xmm7
+; AVX-NEXT: vshufps {{.*#+}} xmm4 = xmm7[2,0],xmm4[3,2]
+; AVX-NEXT: vblendps {{.*#+}} ymm4 = ymm4[0,1,2,3],ymm6[4,5,6,7]
; AVX-NEXT: vmovups %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
; AVX-NEXT: vshufps $246, {{[-0-9]+}}(%r{{[sb]}}p), %ymm4, %ymm4 # 32-byte Folded Reload
; AVX-NEXT: # ymm4 = ymm4[2,1],mem[3,3],ymm4[6,5],mem[7,7]
-; AVX-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Reload
-; AVX-NEXT: vblendps $13, {{[-0-9]+}}(%r{{[sb]}}p), %xmm5, %xmm5 # 16-byte Folded Reload
-; AVX-NEXT: # xmm5 = mem[0],xmm5[1],mem[2,3]
-; AVX-NEXT: vinsertf128 $1, %xmm5, %ymm0, %ymm5
-; AVX-NEXT: vshufps {{.*#+}} ymm4 = ymm5[1,0],ymm4[2,0],ymm5[5,4],ymm4[6,4]
+; AVX-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Reload
+; AVX-NEXT: vblendps $13, {{[-0-9]+}}(%r{{[sb]}}p), %xmm6, %xmm6 # 16-byte Folded Reload
+; AVX-NEXT: # xmm6 = mem[0],xmm6[1],mem[2,3]
+; AVX-NEXT: vinsertf128 $1, %xmm6, %ymm0, %ymm6
+; AVX-NEXT: vshufps {{.*#+}} ymm4 = ymm6[1,0],ymm4[2,0],ymm6[5,4],ymm4[6,4]
; AVX-NEXT: vblendps $7, {{[-0-9]+}}(%r{{[sb]}}p), %xmm3, %xmm3 # 16-byte Folded Reload
; AVX-NEXT: # xmm3 = mem[0,1,2],xmm3[3]
-; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload
-; AVX-NEXT: vshufps $16, {{[-0-9]+}}(%r{{[sb]}}p), %ymm5, %ymm5 # 32-byte Folded Reload
-; AVX-NEXT: # ymm5 = ymm5[0,0],mem[1,0],ymm5[4,4],mem[5,4]
-; AVX-NEXT: vshufps {{.*#+}} ymm5 = ymm5[2,0,2,3,6,4,6,7]
-; AVX-NEXT: vextractf128 $1, %ymm5, %xmm5
-; AVX-NEXT: vshufps {{.*#+}} xmm3 = xmm5[0,1],xmm3[3,2]
+; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm7 # 32-byte Reload
+; AVX-NEXT: vshufps $16, {{[-0-9]+}}(%r{{[sb]}}p), %ymm7, %ymm6 # 32-byte Folded Reload
+; AVX-NEXT: # ymm6 = ymm7[0,0],mem[1,0],ymm7[4,4],mem[5,4]
+; AVX-NEXT: vextractf128 $1, %ymm6, %xmm6
+; AVX-NEXT: vshufps {{.*#+}} xmm3 = xmm6[2,0],xmm3[3,2]
; AVX-NEXT: vblendps {{.*#+}} ymm3 = ymm3[0,1,2,3],ymm4[4,5,6,7]
; AVX-NEXT: vmovups %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT: vshufps $246, {{[-0-9]+}}(%r{{[sb]}}p), %ymm8, %ymm3 # 32-byte Folded Reload
-; AVX-NEXT: # ymm3 = ymm8[2,1],mem[3,3],ymm8[6,5],mem[7,7]
-; AVX-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Reload
-; AVX-NEXT: vblendps $13, {{[-0-9]+}}(%r{{[sb]}}p), %xmm6, %xmm4 # 16-byte Folded Reload
-; AVX-NEXT: # xmm4 = mem[0],xmm6[1],mem[2,3]
+; AVX-NEXT: vshufps $246, {{[-0-9]+}}(%r{{[sb]}}p), %ymm11, %ymm3 # 32-byte Folded Reload
+; AVX-NEXT: # ymm3 = ymm11[2,1],mem[3,3],ymm11[6,5],mem[7,7]
+; AVX-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Reload
+; AVX-NEXT: vblendps $13, {{[-0-9]+}}(%r{{[sb]}}p), %xmm4, %xmm4 # 16-byte Folded Reload
+; AVX-NEXT: # xmm4 = mem[0],xmm4[1],mem[2,3]
; AVX-NEXT: vinsertf128 $1, %xmm4, %ymm0, %ymm4
; AVX-NEXT: vshufps {{.*#+}} ymm3 = ymm4[1,0],ymm3[2,0],ymm4[5,4],ymm3[6,4]
-; AVX-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Reload
-; AVX-NEXT: vblendps $7, {{[-0-9]+}}(%r{{[sb]}}p), %xmm4, %xmm4 # 16-byte Folded Reload
-; AVX-NEXT: # xmm4 = mem[0,1,2],xmm4[3]
-; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload
-; AVX-NEXT: vshufps $16, {{[-0-9]+}}(%r{{[sb]}}p), %ymm5, %ymm5 # 32-byte Folded Reload
-; AVX-NEXT: # ymm5 = ymm5[0,0],mem[1,0],ymm5[4,4],mem[5,4]
-; AVX-NEXT: vshufps {{.*#+}} ymm5 = ymm5[2,0,2,3,6,4,6,7]
-; AVX-NEXT: vextractf128 $1, %ymm5, %xmm5
-; AVX-NEXT: vshufps {{.*#+}} xmm4 = xmm5[0,1],xmm4[3,2]
+; AVX-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm11 # 16-byte Reload
+; AVX-NEXT: vblendps $8, {{[-0-9]+}}(%r{{[sb]}}p), %xmm11, %xmm4 # 16-byte Folded Reload
+; AVX-NEXT: # xmm4 = xmm11[0,1,2],mem[3]
+; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
+; AVX-NEXT: vshufps $16, {{[-0-9]+}}(%r{{[sb]}}p), %ymm6, %ymm6 # 32-byte Folded Reload
+; AVX-NEXT: # ymm6 = ymm6[0,0],mem[1,0],ymm6[4,4],mem[5,4]
+; AVX-NEXT: vextractf128 $1, %ymm6, %xmm6
+; AVX-NEXT: vshufps {{.*#+}} xmm4 = xmm6[2,0],xmm4[3,2]
; AVX-NEXT: vblendps {{.*#+}} ymm3 = ymm4[0,1,2,3],ymm3[4,5,6,7]
; AVX-NEXT: vmovups %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm8 # 32-byte Reload
-; AVX-NEXT: vshufps $246, {{[-0-9]+}}(%r{{[sb]}}p), %ymm8, %ymm3 # 32-byte Folded Reload
-; AVX-NEXT: # ymm3 = ymm8[2,1],mem[3,3],ymm8[6,5],mem[7,7]
-; AVX-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Reload
-; AVX-NEXT: vblendps $2, {{[-0-9]+}}(%r{{[sb]}}p), %xmm5, %xmm4 # 16-byte Folded Reload
-; AVX-NEXT: # xmm4 = xmm5[0],mem[1],xmm5[2,3]
+; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload
+; AVX-NEXT: vshufps $246, {{[-0-9]+}}(%r{{[sb]}}p), %ymm3, %ymm3 # 32-byte Folded Reload
+; AVX-NEXT: # ymm3 = ymm3[2,1],mem[3,3],ymm3[6,5],mem[7,7]
+; AVX-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Reload
+; AVX-NEXT: vblendps $13, {{[-0-9]+}}(%r{{[sb]}}p), %xmm4, %xmm4 # 16-byte Folded Reload
+; AVX-NEXT: # xmm4 = mem[0],xmm4[1],mem[2,3]
; AVX-NEXT: vinsertf128 $1, %xmm4, %ymm0, %ymm4
; AVX-NEXT: vshufps {{.*#+}} ymm3 = ymm4[1,0],ymm3[2,0],ymm4[5,4],ymm3[6,4]
; AVX-NEXT: vblendps $7, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1, %xmm1 # 16-byte Folded Reload
@@ -10857,100 +10838,66 @@ define void @load_i32_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
; AVX-NEXT: vshufps $16, {{[-0-9]+}}(%r{{[sb]}}p), %ymm4, %ymm4 # 32-byte Folded Reload
; AVX-NEXT: # ymm4 = ymm4[0,0],mem[1,0],ymm4[4,4],mem[5,4]
-; AVX-NEXT: vshufps {{.*#+}} ymm4 = ymm4[2,0,2,3,6,4,6,7]
; AVX-NEXT: vextractf128 $1, %ymm4, %xmm4
-; AVX-NEXT: vshufps {{.*#+}} xmm1 = xmm4[0,1],xmm1[3,2]
+; AVX-NEXT: vshufps {{.*#+}} xmm1 = xmm4[2,0],xmm1[3,2]
; AVX-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm3[4,5,6,7]
; AVX-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT: vshufps $246, {{[-0-9]+}}(%r{{[sb]}}p), %ymm2, %ymm1 # 32-byte Folded Reload
-; AVX-NEXT: # ymm1 = ymm2[2,1],mem[3,3],ymm2[6,5],mem[7,7]
+; AVX-NEXT: vshufps {{.*#+}} ymm1 = ymm5[2,1],ymm15[3,3],ymm5[6,5],ymm15[7,7]
; AVX-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload
; AVX-NEXT: vblendps $13, {{[-0-9]+}}(%r{{[sb]}}p), %xmm3, %xmm3 # 16-byte Folded Reload
; AVX-NEXT: # xmm3 = mem[0],xmm3[1],mem[2,3]
; AVX-NEXT: vinsertf128 $1, %xmm3, %ymm0, %ymm3
; AVX-NEXT: vshufps {{.*#+}} ymm1 = ymm3[1,0],ymm1[2,0],ymm3[5,4],ymm1[6,4]
-; AVX-NEXT: vblendps $7, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
-; AVX-NEXT: # xmm0 = mem[0,1,2],xmm0[3]
-; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload
-; AVX-NEXT: vshufps $16, {{[-0-9]+}}(%r{{[sb]}}p), %ymm3, %ymm3 # 32-byte Folded Reload
-; AVX-NEXT: # ymm3 = ymm3[0,0],mem[1,0],ymm3[4,4],mem[5,4]
-; AVX-NEXT: vshufps {{.*#+}} ymm3 = ymm3[2,0,2,3,6,4,6,7]
+; AVX-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm15 # 16-byte Reload
+; AVX-NEXT: vblendps {{.*#+}} xmm0 = xmm15[0,1,2],xmm0[3]
+; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
+; AVX-NEXT: vshufps $16, {{[-0-9]+}}(%r{{[sb]}}p), %ymm6, %ymm3 # 32-byte Folded Reload
+; AVX-NEXT: # ymm3 = ymm6[0,0],mem[1,0],ymm6[4,4],mem[5,4]
; AVX-NEXT: vextractf128 $1, %ymm3, %xmm3
-; AVX-NEXT: vshufps {{.*#+}} xmm0 = xmm3[0,1],xmm0[3,2]
+; AVX-NEXT: vshufps {{.*#+}} xmm0 = xmm3[2,0],xmm0[3,2]
; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
; AVX-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm13[2,3,0,1]
-; AVX-NEXT: vshufps {{.*#+}} ymm0 = ymm13[3,0],ymm0[0,0],ymm13[7,4],ymm0[4,4]
-; AVX-NEXT: vshufps {{.*#+}} xmm1 = xmm10[2,3,2,3]
-; AVX-NEXT: vblendps {{.*#+}} xmm1 = xmm1[0],xmm12[1],xmm1[2,3]
-; AVX-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm1
-; AVX-NEXT: vshufps {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,0],ymm1[4,5],ymm0[6,4]
-; AVX-NEXT: vpermilps $68, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
-; AVX-NEXT: # xmm1 = mem[0,1,0,1]
-; AVX-NEXT: vblendps $8, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1, %xmm1 # 16-byte Folded Reload
-; AVX-NEXT: # xmm1 = xmm1[0,1,2],mem[3]
-; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
-; AVX-NEXT: vshufps {{.*#+}} ymm2 = ymm2[1,0],ymm15[2,0],ymm2[5,4],ymm15[6,4]
-; AVX-NEXT: vshufps {{.*#+}} ymm2 = ymm2[2,0,2,3,6,4,6,7]
-; AVX-NEXT: vextractf128 $1, %ymm2, %xmm2
-; AVX-NEXT: vblendps {{.*#+}} xmm1 = xmm2[0,1],xmm1[2,3]
-; AVX-NEXT: vblendps {{.*#+}} ymm13 = ymm1[0,1,2,3],ymm0[4,5,6,7]
-; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX-NEXT: vperm2f128 {{.*#+}} ymm1 = ymm0[2,3,0,1]
-; AVX-NEXT: vshufps {{.*#+}} ymm1 = ymm0[3,0],ymm1[0,0],ymm0[7,4],ymm1[4,4]
-; AVX-NEXT: vshufps {{.*#+}} xmm2 = xmm9[2,3,2,3]
-; AVX-NEXT: vblendps {{.*#+}} xmm2 = xmm2[0],xmm14[1],xmm2[2,3]
-; AVX-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm2
-; AVX-NEXT: vshufps {{.*#+}} ymm1 = ymm2[0,1],ymm1[2,0],ymm2[4,5],ymm1[6,4]
-; AVX-NEXT: vpermilps $68, {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Folded Reload
-; AVX-NEXT: # xmm2 = mem[0,1,0,1]
-; AVX-NEXT: vblendps $8, {{[-0-9]+}}(%r{{[sb]}}p), %xmm2, %xmm2 # 16-byte Folded Reload
-; AVX-NEXT: # xmm2 = xmm2[0,1,2],mem[3]
-; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX-NEXT: vshufps $33, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm3 # 32-byte Folded Reload
-; AVX-NEXT: # ymm3 = ymm0[1,0],mem[2,0],ymm0[5,4],mem[6,4]
-; AVX-NEXT: vshufps {{.*#+}} ymm3 = ymm3[2,0,2,3,6,4,6,7]
+; AVX-NEXT: vpermilps $68, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
+; AVX-NEXT: # xmm0 = mem[0,1,0,1]
+; AVX-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0,1,2],xmm14[3]
+; AVX-NEXT: vshufps {{.*#+}} ymm3 = ymm2[1,0],ymm12[2,0],ymm2[5,4],ymm12[6,4]
; AVX-NEXT: vextractf128 $1, %ymm3, %xmm3
-; AVX-NEXT: vblendps {{.*#+}} xmm2 = xmm3[0,1],xmm2[2,3]
-; AVX-NEXT: vblendps {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm1[4,5,6,7]
-; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX-NEXT: vperm2f128 {{.*#+}} ymm1 = ymm0[2,3,0,1]
-; AVX-NEXT: vshufps {{.*#+}} ymm1 = ymm0[3,0],ymm1[0,0],ymm0[7,4],ymm1[4,4]
-; AVX-NEXT: vshufps {{.*#+}} xmm3 = xmm11[2,3,2,3]
-; AVX-NEXT: vblendps $2, {{[-0-9]+}}(%r{{[sb]}}p), %xmm3, %xmm3 # 16-byte Folded Reload
-; AVX-NEXT: # xmm3 = xmm3[0],mem[1],xmm3[2,3]
-; AVX-NEXT: vinsertf128 $1, %xmm3, %ymm0, %ymm3
-; AVX-NEXT: vshufps {{.*#+}} ymm1 = ymm3[0,1],ymm1[2,0],ymm3[4,5],ymm1[6,4]
-; AVX-NEXT: vpermilps $68, {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Folded Reload
-; AVX-NEXT: # xmm3 = mem[0,1,0,1]
-; AVX-NEXT: vblendps $8, {{[-0-9]+}}(%r{{[sb]}}p), %xmm3, %xmm3 # 16-byte Folded Reload
-; AVX-NEXT: # xmm3 = xmm3[0,1,2],mem[3]
-; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX-NEXT: vshufps $33, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm4 # 32-byte Folded Reload
-; AVX-NEXT: # ymm4 = ymm0[1,0],mem[2,0],ymm0[5,4],mem[6,4]
-; AVX-NEXT: vshufps {{.*#+}} ymm4 = ymm4[2,0,2,3,6,4,6,7]
-; AVX-NEXT: vextractf128 $1, %ymm4, %xmm4
-; AVX-NEXT: vblendps {{.*#+}} xmm3 = xmm4[0,1],xmm3[2,3]
-; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm3[0,1,2,3],ymm1[4,5,6,7]
-; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload
-; AVX-NEXT: vperm2f128 {{.*#+}} ymm1 = ymm3[2,3,0,1]
-; AVX-NEXT: vshufps {{.*#+}} ymm1 = ymm3[3,0],ymm1[0,0],ymm3[7,4],ymm1[4,4]
-; AVX-NEXT: vshufps {{.*#+}} xmm3 = xmm7[2,3,2,3]
+; AVX-NEXT: vshufps {{.*#+}} xmm0 = xmm3[2,0],xmm0[2,3]
+; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
+; AVX-NEXT: vperm2f128 {{.*#+}} ymm3 = ymm1[2,3,0,1]
+; AVX-NEXT: vshufps {{.*#+}} ymm2 = ymm1[3,0],ymm3[0,0],ymm1[7,4],ymm3[4,4]
+; AVX-NEXT: vpermilps $238, {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Folded Reload
+; AVX-NEXT: # xmm3 = mem[2,3,2,3]
; AVX-NEXT: vblendps $2, {{[-0-9]+}}(%r{{[sb]}}p), %xmm3, %xmm3 # 16-byte Folded Reload
; AVX-NEXT: # xmm3 = xmm3[0],mem[1],xmm3[2,3]
; AVX-NEXT: vinsertf128 $1, %xmm3, %ymm0, %ymm3
-; AVX-NEXT: vshufps {{.*#+}} ymm1 = ymm3[0,1],ymm1[2,0],ymm3[4,5],ymm1[6,4]
-; AVX-NEXT: vpermilps $68, {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Folded Reload
-; AVX-NEXT: # xmm3 = mem[0,1,0,1]
-; AVX-NEXT: vblendps $8, (%rsp), %xmm3, %xmm3 # 16-byte Folded Reload
-; AVX-NEXT: # xmm3 = xmm3[0,1,2],mem[3]
-; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
-; AVX-NEXT: vshufps $33, {{[-0-9]+}}(%r{{[sb]}}p), %ymm4, %ymm4 # 32-byte Folded Reload
-; AVX-NEXT: # ymm4 = ymm4[1,0],mem[2,0],ymm4[5,4],mem[6,4]
-; AVX-NEXT: vshufps {{.*#+}} ymm4 = ymm4[2,0,2,3,6,4,6,7]
-; AVX-NEXT: vextractf128 $1, %ymm4, %xmm4
-; AVX-NEXT: vblendps {{.*#+}} xmm3 = xmm4[0,1],xmm3[2,3]
-; AVX-NEXT: vblendps {{.*#+}} ymm1 = ymm3[0,1,2,3],ymm1[4,5,6,7]
+; AVX-NEXT: vshufps {{.*#+}} ymm2 = ymm3[0,1],ymm2[2,0],ymm3[4,5],ymm2[6,4]
+; AVX-NEXT: vblendps {{.*#+}} ymm2 = ymm0[0,1,2,3],ymm2[4,5,6,7]
+; AVX-NEXT: vpermilps $68, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
+; AVX-NEXT: # xmm0 = mem[0,1,0,1]
+; AVX-NEXT: vblendps $8, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
+; AVX-NEXT: # xmm0 = xmm0[0,1,2],mem[3]
+; AVX-NEXT: vshufps {{.*#+}} ymm3 = ymm10[1,0],ymm13[2,0],ymm10[5,4],ymm13[6,4]
+; AVX-NEXT: vextractf128 $1, %ymm3, %xmm3
+; AVX-NEXT: vshufps {{.*#+}} xmm0 = xmm3[2,0],xmm0[2,3]
+; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
+; AVX-NEXT: vperm2f128 {{.*#+}} ymm3 = ymm1[2,3,0,1]
+; AVX-NEXT: vshufps {{.*#+}} ymm3 = ymm1[3,0],ymm3[0,0],ymm1[7,4],ymm3[4,4]
+; AVX-NEXT: vpermilps $238, {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Folded Reload
+; AVX-NEXT: # xmm4 = mem[2,3,2,3]
+; AVX-NEXT: vblendps $2, {{[-0-9]+}}(%r{{[sb]}}p), %xmm4, %xmm4 # 16-byte Folded Reload
+; AVX-NEXT: # xmm4 = xmm4[0],mem[1],xmm4[2,3]
+; AVX-NEXT: vinsertf128 $1, %xmm4, %ymm0, %ymm4
+; AVX-NEXT: vshufps {{.*#+}} ymm3 = ymm4[0,1],ymm3[2,0],ymm4[4,5],ymm3[6,4]
+; AVX-NEXT: vblendps {{.*#+}} ymm1 = ymm0[0,1,2,3],ymm3[4,5,6,7]
+; AVX-NEXT: vpermilps $68, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
+; AVX-NEXT: # xmm0 = mem[0,1,0,1]
+; AVX-NEXT: vblendps $8, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
+; AVX-NEXT: # xmm0 = xmm0[0,1,2],mem[3]
+; AVX-NEXT: vshufps $33, {{[-0-9]+}}(%r{{[sb]}}p), %ymm9, %ymm3 # 32-byte Folded Reload
+; AVX-NEXT: # ymm3 = ymm9[1,0],mem[2,0],ymm9[5,4],mem[6,4]
+; AVX-NEXT: vextractf128 $1, %ymm3, %xmm3
+; AVX-NEXT: vshufps {{.*#+}} xmm0 = xmm3[2,0],xmm0[2,3]
; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
; AVX-NEXT: vperm2f128 {{.*#+}} ymm3 = ymm4[2,3,0,1]
; AVX-NEXT: vshufps {{.*#+}} ymm3 = ymm4[3,0],ymm3[0,0],ymm4[7,4],ymm3[4,4]
@@ -10960,180 +10907,203 @@ define void @load_i32_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
; AVX-NEXT: # xmm4 = xmm4[0],mem[1],xmm4[2,3]
; AVX-NEXT: vinsertf128 $1, %xmm4, %ymm0, %ymm4
; AVX-NEXT: vshufps {{.*#+}} ymm3 = ymm4[0,1],ymm3[2,0],ymm4[4,5],ymm3[6,4]
+; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm3[4,5,6,7]
+; AVX-NEXT: vpermilps $68, {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Folded Reload
+; AVX-NEXT: # xmm3 = mem[0,1,0,1]
+; AVX-NEXT: vblendps $8, {{[-0-9]+}}(%r{{[sb]}}p), %xmm3, %xmm3 # 16-byte Folded Reload
+; AVX-NEXT: # xmm3 = xmm3[0,1,2],mem[3]
+; AVX-NEXT: vshufps $33, {{[-0-9]+}}(%r{{[sb]}}p), %ymm8, %ymm4 # 32-byte Folded Reload
+; AVX-NEXT: # ymm4 = ymm8[1,0],mem[2,0],ymm8[5,4],mem[6,4]
+; AVX-NEXT: vextractf128 $1, %ymm4, %xmm4
+; AVX-NEXT: vshufps {{.*#+}} xmm3 = xmm4[2,0],xmm3[2,3]
+; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload
+; AVX-NEXT: vperm2f128 {{.*#+}} ymm4 = ymm5[2,3,0,1]
+; AVX-NEXT: vshufps {{.*#+}} ymm4 = ymm5[3,0],ymm4[0,0],ymm5[7,4],ymm4[4,4]
+; AVX-NEXT: vpermilps $238, {{[-0-9]+}}(%r{{[sb]}}p), %xmm8 # 16-byte Folded Reload
+; AVX-NEXT: # xmm8 = mem[2,3,2,3]
+; AVX-NEXT: vblendps $2, {{[-0-9]+}}(%r{{[sb]}}p), %xmm8, %xmm8 # 16-byte Folded Reload
+; AVX-NEXT: # xmm8 = xmm8[0],mem[1],xmm8[2,3]
+; AVX-NEXT: vinsertf128 $1, %xmm8, %ymm0, %ymm8
+; AVX-NEXT: vshufps {{.*#+}} ymm4 = ymm8[0,1],ymm4[2,0],ymm8[4,5],ymm4[6,4]
+; AVX-NEXT: vblendps {{.*#+}} ymm3 = ymm3[0,1,2,3],ymm4[4,5,6,7]
; AVX-NEXT: vpermilps $68, {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Folded Reload
; AVX-NEXT: # xmm4 = mem[0,1,0,1]
; AVX-NEXT: vblendps $8, {{[-0-9]+}}(%r{{[sb]}}p), %xmm4, %xmm4 # 16-byte Folded Reload
; AVX-NEXT: # xmm4 = xmm4[0,1,2],mem[3]
+; AVX-NEXT: vshufps $33, {{[-0-9]+}}(%r{{[sb]}}p), %ymm7, %ymm8 # 32-byte Folded Reload
+; AVX-NEXT: # ymm8 = ymm7[1,0],mem[2,0],ymm7[5,4],mem[6,4]
+; AVX-NEXT: vextractf128 $1, %ymm8, %xmm8
+; AVX-NEXT: vshufps {{.*#+}} xmm4 = xmm8[2,0],xmm4[2,3]
+; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload
+; AVX-NEXT: vperm2f128 {{.*#+}} ymm8 = ymm5[2,3,0,1]
+; AVX-NEXT: vshufps {{.*#+}} ymm8 = ymm5[3,0],ymm8[0,0],ymm5[7,4],ymm8[4,4]
+; AVX-NEXT: vpermilps $238, {{[-0-9]+}}(%r{{[sb]}}p), %xmm9 # 16-byte Folded Reload
+; AVX-NEXT: # xmm9 = mem[2,3,2,3]
+; AVX-NEXT: vblendps $2, {{[-0-9]+}}(%r{{[sb]}}p), %xmm9, %xmm9 # 16-byte Folded Reload
+; AVX-NEXT: # xmm9 = xmm9[0],mem[1],xmm9[2,3]
+; AVX-NEXT: vinsertf128 $1, %xmm9, %ymm0, %ymm9
+; AVX-NEXT: vshufps {{.*#+}} ymm8 = ymm9[0,1],ymm8[2,0],ymm9[4,5],ymm8[6,4]
+; AVX-NEXT: vblendps {{.*#+}} ymm8 = ymm4[0,1,2,3],ymm8[4,5,6,7]
+; AVX-NEXT: vpermilps $68, {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Folded Reload
+; AVX-NEXT: # xmm4 = mem[0,1,0,1]
+; AVX-NEXT: vblendps {{.*#+}} xmm4 = xmm4[0,1,2],xmm11[3]
+; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload
+; AVX-NEXT: vshufps $33, {{[-0-9]+}}(%r{{[sb]}}p), %ymm5, %ymm9 # 32-byte Folded Reload
+; AVX-NEXT: # ymm9 = ymm5[1,0],mem[2,0],ymm5[5,4],mem[6,4]
+; AVX-NEXT: vextractf128 $1, %ymm9, %xmm9
+; AVX-NEXT: vshufps {{.*#+}} xmm4 = xmm9[2,0],xmm4[2,3]
+; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload
+; AVX-NEXT: vperm2f128 {{.*#+}} ymm9 = ymm5[2,3,0,1]
+; AVX-NEXT: vshufps {{.*#+}} ymm9 = ymm5[3,0],ymm9[0,0],ymm5[7,4],ymm9[4,4]
+; AVX-NEXT: vpermilps $238, {{[-0-9]+}}(%r{{[sb]}}p), %xmm10 # 16-byte Folded Reload
+; AVX-NEXT: # xmm10 = mem[2,3,2,3]
+; AVX-NEXT: vblendps $2, {{[-0-9]+}}(%r{{[sb]}}p), %xmm10, %xmm10 # 16-byte Folded Reload
+; AVX-NEXT: # xmm10 = xmm10[0],mem[1],xmm10[2,3]
+; AVX-NEXT: vinsertf128 $1, %xmm10, %ymm0, %ymm10
+; AVX-NEXT: vshufps {{.*#+}} ymm9 = ymm10[0,1],ymm9[2,0],ymm10[4,5],ymm9[6,4]
+; AVX-NEXT: vblendps {{.*#+}} ymm4 = ymm4[0,1,2,3],ymm9[4,5,6,7]
+; AVX-NEXT: vpermilps $68, {{[-0-9]+}}(%r{{[sb]}}p), %xmm9 # 16-byte Folded Reload
+; AVX-NEXT: # xmm9 = mem[0,1,0,1]
+; AVX-NEXT: vblendps $8, {{[-0-9]+}}(%r{{[sb]}}p), %xmm9, %xmm5 # 16-byte Folded Reload
+; AVX-NEXT: # xmm5 = xmm9[0,1,2],mem[3]
; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm7 # 32-byte Reload
; AVX-NEXT: vshufps $33, {{[-0-9]+}}(%r{{[sb]}}p), %ymm7, %ymm9 # 32-byte Folded Reload
; AVX-NEXT: # ymm9 = ymm7[1,0],mem[2,0],ymm7[5,4],mem[6,4]
-; AVX-NEXT: vshufps {{.*#+}} ymm9 = ymm9[2,0,2,3,6,4,6,7]
; AVX-NEXT: vextractf128 $1, %ymm9, %xmm9
-; AVX-NEXT: vblendps {{.*#+}} xmm4 = xmm9[0,1],xmm4[2,3]
-; AVX-NEXT: vblendps {{.*#+}} ymm3 = ymm4[0,1,2,3],ymm3[4,5,6,7]
+; AVX-NEXT: vshufps {{.*#+}} xmm5 = xmm9[2,0],xmm5[2,3]
; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm7 # 32-byte Reload
-; AVX-NEXT: vperm2f128 {{.*#+}} ymm4 = ymm7[2,3,0,1]
-; AVX-NEXT: vshufps {{.*#+}} ymm4 = ymm7[3,0],ymm4[0,0],ymm7[7,4],ymm4[4,4]
-; AVX-NEXT: vshufps {{.*#+}} xmm9 = xmm6[2,3,2,3]
-; AVX-NEXT: vblendps $2, {{[-0-9]+}}(%r{{[sb]}}p), %xmm9, %xmm9 # 16-byte Folded Reload
-; AVX-NEXT: # xmm9 = xmm9[0],mem[1],xmm9[2,3]
-; AVX-NEXT: vinsertf128 $1, %xmm9, %ymm0, %ymm9
-; AVX-NEXT: vshufps {{.*#+}} ymm4 = ymm9[0,1],ymm4[2,0],ymm9[4,5],ymm4[6,4]
+; AVX-NEXT: vperm2f128 {{.*#+}} ymm9 = ymm7[2,3,0,1]
+; AVX-NEXT: vshufps {{.*#+}} ymm9 = ymm7[3,0],ymm9[0,0],ymm7[7,4],ymm9[4,4]
+; AVX-NEXT: vpermilps $238, {{[-0-9]+}}(%r{{[sb]}}p), %xmm10 # 16-byte Folded Reload
+; AVX-NEXT: # xmm10 = mem[2,3,2,3]
+; AVX-NEXT: vblendps $2, {{[-0-9]+}}(%r{{[sb]}}p), %xmm10, %xmm10 # 16-byte Folded Reload
+; AVX-NEXT: # xmm10 = xmm10[0],mem[1],xmm10[2,3]
+; AVX-NEXT: vinsertf128 $1, %xmm10, %ymm0, %ymm10
+; AVX-NEXT: vshufps {{.*#+}} ymm9 = ymm10[0,1],ymm9[2,0],ymm10[4,5],ymm9[6,4]
+; AVX-NEXT: vblendps {{.*#+}} ymm5 = ymm5[0,1,2,3],ymm9[4,5,6,7]
; AVX-NEXT: vpermilps $68, {{[-0-9]+}}(%r{{[sb]}}p), %xmm9 # 16-byte Folded Reload
; AVX-NEXT: # xmm9 = mem[0,1,0,1]
-; AVX-NEXT: vblendps $8, {{[-0-9]+}}(%r{{[sb]}}p), %xmm9, %xmm9 # 16-byte Folded Reload
-; AVX-NEXT: # xmm9 = xmm9[0,1,2],mem[3]
-; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
+; AVX-NEXT: vblendps {{.*#+}} xmm9 = xmm9[0,1,2],xmm15[3]
; AVX-NEXT: vshufps $33, {{[-0-9]+}}(%r{{[sb]}}p), %ymm6, %ymm10 # 32-byte Folded Reload
; AVX-NEXT: # ymm10 = ymm6[1,0],mem[2,0],ymm6[5,4],mem[6,4]
-; AVX-NEXT: vshufps {{.*#+}} ymm10 = ymm10[2,0,2,3,6,4,6,7]
; AVX-NEXT: vextractf128 $1, %ymm10, %xmm10
-; AVX-NEXT: vblendps {{.*#+}} xmm9 = xmm10[0,1],xmm9[2,3]
-; AVX-NEXT: vblendps {{.*#+}} ymm9 = ymm9[0,1,2,3],ymm4[4,5,6,7]
-; AVX-NEXT: vperm2f128 {{.*#+}} ymm4 = ymm8[2,3,0,1]
-; AVX-NEXT: vshufps {{.*#+}} ymm4 = ymm8[3,0],ymm4[0,0],ymm8[7,4],ymm4[4,4]
-; AVX-NEXT: vpermilps $238, {{[-0-9]+}}(%r{{[sb]}}p), %xmm10 # 16-byte Folded Reload
-; AVX-NEXT: # xmm10 = mem[2,3,2,3]
-; AVX-NEXT: vblendps {{.*#+}} xmm10 = xmm10[0],xmm5[1],xmm10[2,3]
-; AVX-NEXT: vinsertf128 $1, %xmm10, %ymm0, %ymm10
-; AVX-NEXT: vshufps {{.*#+}} ymm4 = ymm10[0,1],ymm4[2,0],ymm10[4,5],ymm4[6,4]
-; AVX-NEXT: vpermilps $68, {{[-0-9]+}}(%r{{[sb]}}p), %xmm10 # 16-byte Folded Reload
-; AVX-NEXT: # xmm10 = mem[0,1,0,1]
-; AVX-NEXT: vblendps $8, {{[-0-9]+}}(%r{{[sb]}}p), %xmm10, %xmm10 # 16-byte Folded Reload
-; AVX-NEXT: # xmm10 = xmm10[0,1,2],mem[3]
+; AVX-NEXT: vshufps {{.*#+}} xmm9 = xmm10[2,0],xmm9[2,3]
; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
-; AVX-NEXT: vshufps $33, {{[-0-9]+}}(%r{{[sb]}}p), %ymm6, %ymm11 # 32-byte Folded Reload
-; AVX-NEXT: # ymm11 = ymm6[1,0],mem[2,0],ymm6[5,4],mem[6,4]
-; AVX-NEXT: vshufps {{.*#+}} ymm11 = ymm11[2,0,2,3,6,4,6,7]
-; AVX-NEXT: vextractf128 $1, %ymm11, %xmm11
-; AVX-NEXT: vblendps {{.*#+}} xmm10 = xmm11[0,1],xmm10[2,3]
-; AVX-NEXT: vblendps {{.*#+}} ymm4 = ymm10[0,1,2,3],ymm4[4,5,6,7]
-; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload
-; AVX-NEXT: vperm2f128 {{.*#+}} ymm10 = ymm5[2,3,0,1]
-; AVX-NEXT: vshufps {{.*#+}} ymm10 = ymm5[3,0],ymm10[0,0],ymm5[7,4],ymm10[4,4]
-; AVX-NEXT: vpermilps $238, {{[-0-9]+}}(%r{{[sb]}}p), %xmm11 # 16-byte Folded Reload
-; AVX-NEXT: # xmm11 = mem[2,3,2,3]
-; AVX-NEXT: vblendps $2, {{[-0-9]+}}(%r{{[sb]}}p), %xmm11, %xmm11 # 16-byte Folded Reload
-; AVX-NEXT: # xmm11 = xmm11[0],mem[1],xmm11[2,3]
-; AVX-NEXT: vinsertf128 $1, %xmm11, %ymm0, %ymm11
-; AVX-NEXT: vshufps {{.*#+}} ymm10 = ymm11[0,1],ymm10[2,0],ymm11[4,5],ymm10[6,4]
-; AVX-NEXT: vpermilps $68, {{[-0-9]+}}(%r{{[sb]}}p), %xmm11 # 16-byte Folded Reload
-; AVX-NEXT: # xmm11 = mem[0,1,0,1]
-; AVX-NEXT: vblendps $8, {{[-0-9]+}}(%r{{[sb]}}p), %xmm11, %xmm11 # 16-byte Folded Reload
-; AVX-NEXT: # xmm11 = xmm11[0,1,2],mem[3]
-; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload
-; AVX-NEXT: vshufps $33, {{[-0-9]+}}(%r{{[sb]}}p), %ymm5, %ymm12 # 32-byte Folded Reload
-; AVX-NEXT: # ymm12 = ymm5[1,0],mem[2,0],ymm5[5,4],mem[6,4]
-; AVX-NEXT: vshufps {{.*#+}} ymm12 = ymm12[2,0,2,3,6,4,6,7]
-; AVX-NEXT: vextractf128 $1, %ymm12, %xmm12
-; AVX-NEXT: vblendps {{.*#+}} xmm11 = xmm12[0,1],xmm11[2,3]
-; AVX-NEXT: vblendps {{.*#+}} ymm10 = ymm11[0,1,2,3],ymm10[4,5,6,7]
-; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload
-; AVX-NEXT: vmovaps %ymm5, 192(%rsi)
-; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload
-; AVX-NEXT: vmovaps %ymm5, 128(%rsi)
-; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload
-; AVX-NEXT: vmovaps %ymm5, 64(%rsi)
-; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload
-; AVX-NEXT: vmovaps %ymm5, (%rsi)
-; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm11 # 32-byte Reload
-; AVX-NEXT: vmovaps %ymm11, 224(%rsi)
-; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm11 # 32-byte Reload
-; AVX-NEXT: vmovaps %ymm11, 160(%rsi)
-; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm11 # 32-byte Reload
-; AVX-NEXT: vmovaps %ymm11, 96(%rsi)
-; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm11 # 32-byte Reload
-; AVX-NEXT: vmovaps %ymm11, 32(%rsi)
-; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload
-; AVX-NEXT: vmovaps %ymm5, 192(%rdx)
-; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload
-; AVX-NEXT: vmovaps %ymm5, 128(%rdx)
-; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload
-; AVX-NEXT: vmovaps %ymm5, 64(%rdx)
-; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload
-; AVX-NEXT: vmovaps %ymm5, (%rdx)
-; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload
-; AVX-NEXT: vmovaps %ymm5, 224(%rdx)
-; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload
-; AVX-NEXT: vmovaps %ymm5, 160(%rdx)
-; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload
-; AVX-NEXT: vmovaps %ymm5, 96(%rdx)
-; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload
-; AVX-NEXT: vmovaps %ymm5, 32(%rdx)
-; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload
-; AVX-NEXT: vmovaps %ymm5, 192(%rcx)
-; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload
-; AVX-NEXT: vmovaps %ymm5, 128(%rcx)
-; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload
-; AVX-NEXT: vmovaps %ymm5, 64(%rcx)
-; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload
-; AVX-NEXT: vmovaps %ymm5, (%rcx)
-; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload
-; AVX-NEXT: vmovaps %ymm5, 224(%rcx)
-; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload
-; AVX-NEXT: vmovaps %ymm5, 160(%rcx)
-; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload
-; AVX-NEXT: vmovaps %ymm5, 96(%rcx)
-; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload
-; AVX-NEXT: vmovaps %ymm5, 32(%rcx)
-; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload
-; AVX-NEXT: vmovaps %ymm5, (%r8)
-; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload
-; AVX-NEXT: vmovaps %ymm5, 64(%r8)
-; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload
-; AVX-NEXT: vmovaps %ymm5, 128(%r8)
-; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload
-; AVX-NEXT: vmovaps %ymm5, 192(%r8)
-; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload
-; AVX-NEXT: vmovaps %ymm5, 224(%r8)
-; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload
-; AVX-NEXT: vmovaps %ymm5, 160(%r8)
-; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload
-; AVX-NEXT: vmovaps %ymm5, 96(%r8)
-; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload
-; AVX-NEXT: vmovaps %ymm5, 32(%r8)
-; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload
-; AVX-NEXT: vmovaps %ymm5, 224(%r9)
-; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload
-; AVX-NEXT: vmovaps %ymm5, 192(%r9)
-; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload
-; AVX-NEXT: vmovaps %ymm5, 160(%r9)
-; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload
-; AVX-NEXT: vmovaps %ymm5, 128(%r9)
-; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload
-; AVX-NEXT: vmovaps %ymm5, 96(%r9)
-; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload
-; AVX-NEXT: vmovaps %ymm5, 64(%r9)
-; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload
-; AVX-NEXT: vmovaps %ymm5, 32(%r9)
-; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload
-; AVX-NEXT: vmovaps %ymm5, (%r9)
+; AVX-NEXT: vperm2f128 {{.*#+}} ymm10 = ymm6[2,3,0,1]
+; AVX-NEXT: vshufps {{.*#+}} ymm10 = ymm6[3,0],ymm10[0,0],ymm6[7,4],ymm10[4,4]
+; AVX-NEXT: vpermilps $238, {{[-0-9]+}}(%r{{[sb]}}p), %xmm12 # 16-byte Folded Reload
+; AVX-NEXT: # xmm12 = mem[2,3,2,3]
+; AVX-NEXT: vblendps $2, {{[-0-9]+}}(%r{{[sb]}}p), %xmm12, %xmm12 # 16-byte Folded Reload
+; AVX-NEXT: # xmm12 = xmm12[0],mem[1],xmm12[2,3]
+; AVX-NEXT: vinsertf128 $1, %xmm12, %ymm0, %ymm12
+; AVX-NEXT: vshufps {{.*#+}} ymm10 = ymm12[0,1],ymm10[2,0],ymm12[4,5],ymm10[6,4]
+; AVX-NEXT: vblendps {{.*#+}} ymm9 = ymm9[0,1,2,3],ymm10[4,5,6,7]
+; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
+; AVX-NEXT: vmovaps %ymm6, 192(%rsi)
+; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
+; AVX-NEXT: vmovaps %ymm6, 128(%rsi)
+; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
+; AVX-NEXT: vmovaps %ymm6, 64(%rsi)
+; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
+; AVX-NEXT: vmovaps %ymm6, (%rsi)
+; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
+; AVX-NEXT: vmovaps %ymm6, 224(%rsi)
+; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm10 # 32-byte Reload
+; AVX-NEXT: vmovaps %ymm10, 160(%rsi)
+; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm10 # 32-byte Reload
+; AVX-NEXT: vmovaps %ymm10, 96(%rsi)
+; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm10 # 32-byte Reload
+; AVX-NEXT: vmovaps %ymm10, 32(%rsi)
+; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
+; AVX-NEXT: vmovaps %ymm6, 192(%rdx)
+; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
+; AVX-NEXT: vmovaps %ymm6, 128(%rdx)
+; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
+; AVX-NEXT: vmovaps %ymm6, 64(%rdx)
+; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
+; AVX-NEXT: vmovaps %ymm6, (%rdx)
+; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
+; AVX-NEXT: vmovaps %ymm6, 224(%rdx)
+; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
+; AVX-NEXT: vmovaps %ymm6, 160(%rdx)
+; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
+; AVX-NEXT: vmovaps %ymm6, 96(%rdx)
+; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
+; AVX-NEXT: vmovaps %ymm6, 32(%rdx)
+; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
+; AVX-NEXT: vmovaps %ymm6, 192(%rcx)
+; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
+; AVX-NEXT: vmovaps %ymm6, 128(%rcx)
+; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
+; AVX-NEXT: vmovaps %ymm6, 64(%rcx)
+; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
+; AVX-NEXT: vmovaps %ymm6, (%rcx)
+; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
+; AVX-NEXT: vmovaps %ymm6, 224(%rcx)
+; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
+; AVX-NEXT: vmovaps %ymm6, 160(%rcx)
+; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
+; AVX-NEXT: vmovaps %ymm6, 96(%rcx)
+; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
+; AVX-NEXT: vmovaps %ymm6, 32(%rcx)
+; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
+; AVX-NEXT: vmovaps %ymm6, (%r8)
+; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
+; AVX-NEXT: vmovaps %ymm6, 64(%r8)
+; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
+; AVX-NEXT: vmovaps %ymm6, 128(%r8)
+; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
+; AVX-NEXT: vmovaps %ymm6, 192(%r8)
+; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
+; AVX-NEXT: vmovaps %ymm6, 224(%r8)
+; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
+; AVX-NEXT: vmovaps %ymm6, 160(%r8)
+; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
+; AVX-NEXT: vmovaps %ymm6, 96(%r8)
+; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
+; AVX-NEXT: vmovaps %ymm6, 32(%r8)
+; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
+; AVX-NEXT: vmovaps %ymm6, 224(%r9)
+; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
+; AVX-NEXT: vmovaps %ymm6, 192(%r9)
+; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
+; AVX-NEXT: vmovaps %ymm6, 160(%r9)
+; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
+; AVX-NEXT: vmovaps %ymm6, 128(%r9)
+; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
+; AVX-NEXT: vmovaps %ymm6, 96(%r9)
+; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
+; AVX-NEXT: vmovaps %ymm6, 64(%r9)
+; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
+; AVX-NEXT: vmovaps %ymm6, 32(%r9)
+; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
+; AVX-NEXT: vmovaps %ymm6, (%r9)
; AVX-NEXT: movq {{[0-9]+}}(%rsp), %rax
-; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload
-; AVX-NEXT: vmovaps %ymm5, 224(%rax)
-; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload
-; AVX-NEXT: vmovaps %ymm5, 192(%rax)
-; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload
-; AVX-NEXT: vmovaps %ymm5, 160(%rax)
-; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload
-; AVX-NEXT: vmovaps %ymm5, 128(%rax)
-; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload
-; AVX-NEXT: vmovaps %ymm5, 96(%rax)
-; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload
-; AVX-NEXT: vmovaps %ymm5, 64(%rax)
-; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload
-; AVX-NEXT: vmovaps %ymm5, 32(%rax)
-; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload
-; AVX-NEXT: vmovaps %ymm5, (%rax)
+; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
+; AVX-NEXT: vmovaps %ymm6, 224(%rax)
+; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
+; AVX-NEXT: vmovaps %ymm6, 192(%rax)
+; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
+; AVX-NEXT: vmovaps %ymm6, 160(%rax)
+; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
+; AVX-NEXT: vmovaps %ymm6, 128(%rax)
+; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
+; AVX-NEXT: vmovaps %ymm6, 96(%rax)
+; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
+; AVX-NEXT: vmovaps %ymm6, 64(%rax)
+; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
+; AVX-NEXT: vmovaps %ymm6, 32(%rax)
+; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
+; AVX-NEXT: vmovaps %ymm6, (%rax)
; AVX-NEXT: movq {{[0-9]+}}(%rsp), %rax
-; AVX-NEXT: vmovaps %ymm10, 224(%rax)
-; AVX-NEXT: vmovaps %ymm4, 192(%rax)
-; AVX-NEXT: vmovaps %ymm9, 160(%rax)
-; AVX-NEXT: vmovaps %ymm3, 128(%rax)
-; AVX-NEXT: vmovaps %ymm1, 96(%rax)
+; AVX-NEXT: vmovaps %ymm9, 224(%rax)
+; AVX-NEXT: vmovaps %ymm5, 192(%rax)
+; AVX-NEXT: vmovaps %ymm4, 160(%rax)
+; AVX-NEXT: vmovaps %ymm8, 128(%rax)
+; AVX-NEXT: vmovaps %ymm3, 96(%rax)
; AVX-NEXT: vmovaps %ymm0, 64(%rax)
-; AVX-NEXT: vmovaps %ymm2, 32(%rax)
-; AVX-NEXT: vmovaps %ymm13, (%rax)
+; AVX-NEXT: vmovaps %ymm1, 32(%rax)
+; AVX-NEXT: vmovaps %ymm2, (%rax)
; AVX-NEXT: addq $3176, %rsp # imm = 0xC68
; AVX-NEXT: vzeroupper
; AVX-NEXT: retq
diff --git a/llvm/test/CodeGen/X86/vector-interleaved-load-i32-stride-8.ll b/llvm/test/CodeGen/X86/vector-interleaved-load-i32-stride-8.ll
index cd0891385faffe..f0c95f4fa9ef8c 100644
--- a/llvm/test/CodeGen/X86/vector-interleaved-load-i32-stride-8.ll
+++ b/llvm/test/CodeGen/X86/vector-interleaved-load-i32-stride-8.ll
@@ -61,13 +61,13 @@ define void @load_i32_stride8_vf2(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; AVX-NEXT: vunpcklps {{.*#+}} ymm3 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[4],ymm1[4],ymm0[5],ymm1[5]
; AVX-NEXT: vextractf128 $1, %ymm3, %xmm3
; AVX-NEXT: vshufps {{.*#+}} ymm6 = ymm1[1,0],ymm0[1,0],ymm1[5,4],ymm0[5,4]
-; AVX-NEXT: vshufps {{.*#+}} ymm6 = ymm6[2,0,2,3,6,4,6,7]
; AVX-NEXT: vextractf128 $1, %ymm6, %xmm6
+; AVX-NEXT: vshufps {{.*#+}} xmm6 = xmm6[2,0,2,3]
; AVX-NEXT: vunpckhps {{.*#+}} ymm7 = ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[6],ymm1[6],ymm0[7],ymm1[7]
; AVX-NEXT: vextractf128 $1, %ymm7, %xmm7
; AVX-NEXT: vshufps {{.*#+}} ymm0 = ymm1[3,0],ymm0[3,0],ymm1[7,4],ymm0[7,4]
-; AVX-NEXT: vshufps {{.*#+}} ymm0 = ymm0[2,0,2,3,6,4,6,7]
; AVX-NEXT: vextractf128 $1, %ymm0, %xmm0
+; AVX-NEXT: vshufps {{.*#+}} xmm0 = xmm0[2,0,2,3]
; AVX-NEXT: vmovq %xmm4, (%rsi)
; AVX-NEXT: vmovq %xmm5, (%rdx)
; AVX-NEXT: vmovq %xmm2, (%rcx)
@@ -514,50 +514,46 @@ define void @load_i32_stride8_vf4(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; AVX-NEXT: vmovaps 32(%rdi), %ymm1
; AVX-NEXT: vmovaps 64(%rdi), %ymm2
; AVX-NEXT: vmovaps 96(%rdi), %ymm3
-; AVX-NEXT: vmovaps 32(%rdi), %xmm5
-; AVX-NEXT: vmovaps (%rdi), %xmm6
-; AVX-NEXT: vunpcklps {{.*#+}} xmm4 = xmm6[0],xmm5[0],xmm6[1],xmm5[1]
+; AVX-NEXT: vmovaps 32(%rdi), %xmm4
+; AVX-NEXT: vmovaps (%rdi), %xmm5
+; AVX-NEXT: vunpcklps {{.*#+}} xmm6 = xmm5[0],xmm4[0],xmm5[1],xmm4[1]
; AVX-NEXT: vmovaps 96(%rdi), %xmm7
; AVX-NEXT: vmovaps 64(%rdi), %xmm8
; AVX-NEXT: vunpcklps {{.*#+}} xmm9 = xmm8[0],xmm7[0],xmm8[1],xmm7[1]
-; AVX-NEXT: vmovlhps {{.*#+}} xmm4 = xmm4[0],xmm9[0]
-; AVX-NEXT: vshufps {{.*#+}} xmm10 = xmm6[1,1,1,1]
-; AVX-NEXT: vblendps {{.*#+}} xmm10 = xmm10[0],xmm5[1],xmm10[2,3]
+; AVX-NEXT: vmovlhps {{.*#+}} xmm6 = xmm6[0],xmm9[0]
+; AVX-NEXT: vshufps {{.*#+}} xmm10 = xmm5[1,1,1,1]
+; AVX-NEXT: vblendps {{.*#+}} xmm10 = xmm10[0],xmm4[1],xmm10[2,3]
; AVX-NEXT: vblendps {{.*#+}} xmm9 = xmm10[0,1],xmm9[2,3]
; AVX-NEXT: vshufps {{.*#+}} xmm10 = xmm7[2,2,2,2]
; AVX-NEXT: vblendps {{.*#+}} xmm10 = xmm8[0,1,2],xmm10[3]
-; AVX-NEXT: vunpckhps {{.*#+}} xmm5 = xmm6[2],xmm5[2],xmm6[3],xmm5[3]
-; AVX-NEXT: vblendps {{.*#+}} xmm6 = xmm5[0,1],xmm10[2,3]
+; AVX-NEXT: vunpckhps {{.*#+}} xmm4 = xmm5[2],xmm4[2],xmm5[3],xmm4[3]
+; AVX-NEXT: vblendps {{.*#+}} xmm5 = xmm4[0,1],xmm10[2,3]
; AVX-NEXT: vunpckhps {{.*#+}} xmm7 = xmm8[2],xmm7[2],xmm8[3],xmm7[3]
-; AVX-NEXT: vunpckhpd {{.*#+}} xmm5 = xmm5[1],xmm7[1]
+; AVX-NEXT: vunpckhpd {{.*#+}} xmm4 = xmm4[1],xmm7[1]
; AVX-NEXT: vunpcklpd {{.*#+}} ymm7 = ymm3[0],ymm2[0],ymm3[2],ymm2[2]
-; AVX-NEXT: vshufps {{.*#+}} ymm7 = ymm7[0,1,2,0,4,5,6,4]
; AVX-NEXT: vextractf128 $1, %ymm7, %xmm7
; AVX-NEXT: vunpcklps {{.*#+}} ymm8 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[4],ymm1[4],ymm0[5],ymm1[5]
; AVX-NEXT: vextractf128 $1, %ymm8, %xmm8
-; AVX-NEXT: vblendps {{.*#+}} xmm7 = xmm8[0,1],xmm7[2,3]
+; AVX-NEXT: vshufps {{.*#+}} xmm7 = xmm8[0,1],xmm7[2,0]
; AVX-NEXT: vunpcklps {{.*#+}} ymm8 = ymm2[0],ymm3[0],ymm2[1],ymm3[1],ymm2[4],ymm3[4],ymm2[5],ymm3[5]
; AVX-NEXT: vextractf128 $1, %ymm8, %xmm8
; AVX-NEXT: vshufps {{.*#+}} ymm10 = ymm1[1,0],ymm0[1,0],ymm1[5,4],ymm0[5,4]
-; AVX-NEXT: vshufps {{.*#+}} ymm10 = ymm10[2,0,2,3,6,4,6,7]
; AVX-NEXT: vextractf128 $1, %ymm10, %xmm10
-; AVX-NEXT: vblendps {{.*#+}} xmm8 = xmm10[0,1],xmm8[2,3]
+; AVX-NEXT: vshufps {{.*#+}} xmm8 = xmm10[2,0],xmm8[2,3]
; AVX-NEXT: vunpckhpd {{.*#+}} ymm10 = ymm3[1],ymm2[1],ymm3[3],ymm2[3]
-; AVX-NEXT: vshufps {{.*#+}} ymm10 = ymm10[0,1,2,0,4,5,6,4]
; AVX-NEXT: vextractf128 $1, %ymm10, %xmm10
; AVX-NEXT: vunpckhps {{.*#+}} ymm11 = ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[6],ymm1[6],ymm0[7],ymm1[7]
; AVX-NEXT: vextractf128 $1, %ymm11, %xmm11
-; AVX-NEXT: vblendps {{.*#+}} xmm10 = xmm11[0,1],xmm10[2,3]
+; AVX-NEXT: vshufps {{.*#+}} xmm10 = xmm11[0,1],xmm10[2,0]
; AVX-NEXT: vunpckhps {{.*#+}} ymm2 = ymm2[2],ymm3[2],ymm2[3],ymm3[3],ymm2[6],ymm3[6],ymm2[7],ymm3[7]
; AVX-NEXT: vextractf128 $1, %ymm2, %xmm2
; AVX-NEXT: vshufps {{.*#+}} ymm0 = ymm1[3,0],ymm0[3,0],ymm1[7,4],ymm0[7,4]
-; AVX-NEXT: vshufps {{.*#+}} ymm0 = ymm0[2,0,2,3,6,4,6,7]
; AVX-NEXT: vextractf128 $1, %ymm0, %xmm0
-; AVX-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0,1],xmm2[2,3]
-; AVX-NEXT: vmovaps %xmm4, (%rsi)
+; AVX-NEXT: vshufps {{.*#+}} xmm0 = xmm0[2,0],xmm2[2,3]
+; AVX-NEXT: vmovaps %xmm6, (%rsi)
; AVX-NEXT: vmovaps %xmm9, (%rdx)
-; AVX-NEXT: vmovaps %xmm6, (%rcx)
-; AVX-NEXT: vmovaps %xmm5, (%r8)
+; AVX-NEXT: vmovaps %xmm5, (%rcx)
+; AVX-NEXT: vmovaps %xmm4, (%r8)
; AVX-NEXT: vmovaps %xmm7, (%r9)
; AVX-NEXT: vmovaps %xmm8, (%r11)
; AVX-NEXT: vmovaps %xmm10, (%r10)
@@ -1189,11 +1185,10 @@ define void @load_i32_stride8_vf8(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; AVX-NEXT: vunpcklps {{.*#+}} ymm12 = ymm7[0],ymm6[0],ymm7[1],ymm6[1],ymm7[4],ymm6[4],ymm7[5],ymm6[5]
; AVX-NEXT: vshufps {{.*#+}} ymm8 = ymm12[0,1],ymm8[2,0],ymm12[4,5],ymm8[6,4]
; AVX-NEXT: vunpcklpd {{.*#+}} ymm12 = ymm3[0],ymm2[0],ymm3[2],ymm2[2]
-; AVX-NEXT: vshufps {{.*#+}} ymm12 = ymm12[0,1,2,0,4,5,6,4]
; AVX-NEXT: vextractf128 $1, %ymm12, %xmm12
; AVX-NEXT: vunpcklps {{.*#+}} ymm13 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[4],ymm1[4],ymm0[5],ymm1[5]
; AVX-NEXT: vextractf128 $1, %ymm13, %xmm13
-; AVX-NEXT: vblendps {{.*#+}} xmm12 = xmm13[0,1],xmm12[2,3]
+; AVX-NEXT: vshufps {{.*#+}} xmm12 = xmm13[0,1],xmm12[2,0]
; AVX-NEXT: vblendps {{.*#+}} ymm12 = ymm12[0,1,2,3],ymm8[4,5,6,7]
; AVX-NEXT: vunpcklps {{.*#+}} ymm8 = ymm11[0],ymm10[0],ymm11[1],ymm10[1],ymm11[4],ymm10[4],ymm11[5],ymm10[5]
; AVX-NEXT: vshufps {{.*#+}} ymm13 = ymm6[1,0],ymm7[1,0],ymm6[5,4],ymm7[5,4]
@@ -1201,19 +1196,17 @@ define void @load_i32_stride8_vf8(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; AVX-NEXT: vunpcklps {{.*#+}} ymm13 = ymm2[0],ymm3[0],ymm2[1],ymm3[1],ymm2[4],ymm3[4],ymm2[5],ymm3[5]
; AVX-NEXT: vextractf128 $1, %ymm13, %xmm13
; AVX-NEXT: vshufps {{.*#+}} ymm15 = ymm1[1,0],ymm0[1,0],ymm1[5,4],ymm0[5,4]
-; AVX-NEXT: vshufps {{.*#+}} ymm15 = ymm15[2,0,2,3,6,4,6,7]
; AVX-NEXT: vextractf128 $1, %ymm15, %xmm15
-; AVX-NEXT: vblendps {{.*#+}} xmm13 = xmm15[0,1],xmm13[2,3]
+; AVX-NEXT: vshufps {{.*#+}} xmm13 = xmm15[2,0],xmm13[2,3]
; AVX-NEXT: vblendps {{.*#+}} ymm8 = ymm13[0,1,2,3],ymm8[4,5,6,7]
; AVX-NEXT: vunpckhpd {{.*#+}} ymm13 = ymm10[1],ymm11[1],ymm10[3],ymm11[3]
; AVX-NEXT: vunpckhps {{.*#+}} ymm15 = ymm7[2],ymm6[2],ymm7[3],ymm6[3],ymm7[6],ymm6[6],ymm7[7],ymm6[7]
; AVX-NEXT: vshufps {{.*#+}} ymm13 = ymm15[0,1],ymm13[2,0],ymm15[4,5],ymm13[6,4]
; AVX-NEXT: vunpckhpd {{.*#+}} ymm15 = ymm3[1],ymm2[1],ymm3[3],ymm2[3]
-; AVX-NEXT: vshufps {{.*#+}} ymm15 = ymm15[0,1,2,0,4,5,6,4]
; AVX-NEXT: vextractf128 $1, %ymm15, %xmm15
; AVX-NEXT: vunpckhps {{.*#+}} ymm4 = ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[6],ymm1[6],ymm0[7],ymm1[7]
; AVX-NEXT: vextractf128 $1, %ymm4, %xmm4
-; AVX-NEXT: vblendps {{.*#+}} xmm4 = xmm4[0,1],xmm15[2,3]
+; AVX-NEXT: vshufps {{.*#+}} xmm4 = xmm4[0,1],xmm15[2,0]
; AVX-NEXT: vblendps {{.*#+}} ymm4 = ymm4[0,1,2,3],ymm13[4,5,6,7]
; AVX-NEXT: vunpckhps {{.*#+}} ymm10 = ymm11[2],ymm10[2],ymm11[3],ymm10[3],ymm11[6],ymm10[6],ymm11[7],ymm10[7]
; AVX-NEXT: vshufps {{.*#+}} ymm6 = ymm6[3,0],ymm7[3,0],ymm6[7,4],ymm7[7,4]
@@ -1221,9 +1214,8 @@ define void @load_i32_stride8_vf8(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; AVX-NEXT: vunpckhps {{.*#+}} ymm2 = ymm2[2],ymm3[2],ymm2[3],ymm3[3],ymm2[6],ymm3[6],ymm2[7],ymm3[7]
; AVX-NEXT: vshufps {{.*#+}} ymm0 = ymm1[3,0],ymm0[3,0],ymm1[7,4],ymm0[7,4]
; AVX-NEXT: vextractf128 $1, %ymm2, %xmm1
-; AVX-NEXT: vshufps {{.*#+}} ymm0 = ymm0[2,0,2,3,6,4,6,7]
; AVX-NEXT: vextractf128 $1, %ymm0, %xmm0
-; AVX-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3]
+; AVX-NEXT: vshufps {{.*#+}} xmm0 = xmm0[2,0],xmm1[2,3]
; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm6[4,5,6,7]
; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
; AVX-NEXT: vmovaps %ymm1, (%rsi)
@@ -2302,34 +2294,35 @@ define void @load_i32_stride8_vf16(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
;
; AVX-LABEL: load_i32_stride8_vf16:
; AVX: # %bb.0:
-; AVX-NEXT: subq $616, %rsp # imm = 0x268
-; AVX-NEXT: vmovaps 32(%rdi), %xmm14
-; AVX-NEXT: vmovaps (%rdi), %xmm9
-; AVX-NEXT: vunpcklps {{.*#+}} xmm0 = xmm9[0],xmm14[0],xmm9[1],xmm14[1]
+; AVX-NEXT: subq $584, %rsp # imm = 0x248
+; AVX-NEXT: vmovaps 32(%rdi), %xmm0
+; AVX-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX-NEXT: vmovaps (%rdi), %xmm12
+; AVX-NEXT: vunpcklps {{.*#+}} xmm0 = xmm12[0],xmm0[0],xmm12[1],xmm0[1]
; AVX-NEXT: vmovaps 96(%rdi), %xmm1
; AVX-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; AVX-NEXT: vmovaps 64(%rdi), %xmm2
; AVX-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX-NEXT: vunpcklps {{.*#+}} xmm8 = xmm2[0],xmm1[0],xmm2[1],xmm1[1]
-; AVX-NEXT: vmovlhps {{.*#+}} xmm2 = xmm0[0],xmm8[0]
-; AVX-NEXT: vmovaps 160(%rdi), %xmm5
+; AVX-NEXT: vunpcklps {{.*#+}} xmm9 = xmm2[0],xmm1[0],xmm2[1],xmm1[1]
+; AVX-NEXT: vmovlhps {{.*#+}} xmm4 = xmm0[0],xmm9[0]
+; AVX-NEXT: vmovaps 160(%rdi), %xmm8
; AVX-NEXT: vmovaps 128(%rdi), %xmm10
-; AVX-NEXT: vunpcklps {{.*#+}} xmm0 = xmm10[0],xmm5[0],xmm10[1],xmm5[1]
+; AVX-NEXT: vunpcklps {{.*#+}} xmm0 = xmm10[0],xmm8[0],xmm10[1],xmm8[1]
; AVX-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm6
; AVX-NEXT: vmovaps 224(%rdi), %xmm0
; AVX-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; AVX-NEXT: vmovaps 192(%rdi), %xmm1
; AVX-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX-NEXT: vunpcklps {{.*#+}} xmm4 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
-; AVX-NEXT: vshufps {{.*#+}} xmm7 = xmm4[0,1,0,1]
+; AVX-NEXT: vunpcklps {{.*#+}} xmm5 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
+; AVX-NEXT: vshufps {{.*#+}} xmm7 = xmm5[0,1,0,1]
; AVX-NEXT: vinsertf128 $1, %xmm7, %ymm0, %ymm7
; AVX-NEXT: vblendps {{.*#+}} ymm6 = ymm6[0,1,2,3,4,5],ymm7[6,7]
-; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm2[0,1,2,3],ymm6[4,5,6,7]
+; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm4[0,1,2,3],ymm6[4,5,6,7]
; AVX-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX-NEXT: vmovaps 416(%rdi), %xmm11
-; AVX-NEXT: vmovaps 384(%rdi), %xmm12
-; AVX-NEXT: vunpcklps {{.*#+}} xmm2 = xmm12[0],xmm11[0],xmm12[1],xmm11[1]
-; AVX-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm2
+; AVX-NEXT: vmovaps 384(%rdi), %xmm13
+; AVX-NEXT: vunpcklps {{.*#+}} xmm4 = xmm13[0],xmm11[0],xmm13[1],xmm11[1]
+; AVX-NEXT: vinsertf128 $1, %xmm4, %ymm0, %ymm4
; AVX-NEXT: vmovaps 480(%rdi), %xmm0
; AVX-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
; AVX-NEXT: vmovaps 448(%rdi), %xmm1
@@ -2337,87 +2330,88 @@ define void @load_i32_stride8_vf16(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
; AVX-NEXT: vunpcklps {{.*#+}} xmm3 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
; AVX-NEXT: vshufps {{.*#+}} xmm6 = xmm3[0,1,0,1]
; AVX-NEXT: vinsertf128 $1, %xmm6, %ymm0, %ymm6
-; AVX-NEXT: vblendps {{.*#+}} ymm2 = ymm2[0,1,2,3,4,5],ymm6[6,7]
-; AVX-NEXT: vmovaps 288(%rdi), %xmm13
+; AVX-NEXT: vblendps {{.*#+}} ymm2 = ymm4[0,1,2,3,4,5],ymm6[6,7]
+; AVX-NEXT: vmovaps 288(%rdi), %xmm14
; AVX-NEXT: vmovaps 256(%rdi), %xmm15
-; AVX-NEXT: vunpcklps {{.*#+}} xmm1 = xmm15[0],xmm13[0],xmm15[1],xmm13[1]
+; AVX-NEXT: vunpcklps {{.*#+}} xmm1 = xmm15[0],xmm14[0],xmm15[1],xmm14[1]
; AVX-NEXT: vmovaps 352(%rdi), %xmm7
; AVX-NEXT: vmovaps 320(%rdi), %xmm6
; AVX-NEXT: vunpcklps {{.*#+}} xmm0 = xmm6[0],xmm7[0],xmm6[1],xmm7[1]
; AVX-NEXT: vmovlhps {{.*#+}} xmm1 = xmm1[0],xmm0[0]
; AVX-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm2[4,5,6,7]
; AVX-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT: vshufps {{.*#+}} xmm1 = xmm9[1,1,1,1]
-; AVX-NEXT: vblendps {{.*#+}} xmm1 = xmm1[0],xmm14[1],xmm1[2,3]
-; AVX-NEXT: vblendps {{.*#+}} xmm1 = xmm1[0,1],xmm8[2,3]
-; AVX-NEXT: vinsertf128 $1, %xmm4, %ymm0, %ymm2
-; AVX-NEXT: vshufps {{.*#+}} xmm4 = xmm10[1,1,1,1]
-; AVX-NEXT: vblendps {{.*#+}} xmm4 = xmm4[0],xmm5[1],xmm4[2,3]
-; AVX-NEXT: vinsertf128 $1, %xmm4, %ymm0, %ymm4
-; AVX-NEXT: vblendps {{.*#+}} ymm2 = ymm4[0,1,2,3,4,5],ymm2[6,7]
+; AVX-NEXT: vshufps {{.*#+}} xmm1 = xmm12[1,1,1,1]
+; AVX-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Reload
+; AVX-NEXT: vblendps {{.*#+}} xmm1 = xmm1[0],xmm4[1],xmm1[2,3]
+; AVX-NEXT: vblendps {{.*#+}} xmm1 = xmm1[0,1],xmm9[2,3]
+; AVX-NEXT: vinsertf128 $1, %xmm5, %ymm0, %ymm2
+; AVX-NEXT: vshufps {{.*#+}} xmm5 = xmm10[1,1,1,1]
+; AVX-NEXT: vblendps {{.*#+}} xmm5 = xmm5[0],xmm8[1],xmm5[2,3]
+; AVX-NEXT: vinsertf128 $1, %xmm5, %ymm0, %ymm5
+; AVX-NEXT: vblendps {{.*#+}} ymm2 = ymm5[0,1,2,3,4,5],ymm2[6,7]
; AVX-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm2[4,5,6,7]
; AVX-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX-NEXT: vshufps {{.*#+}} xmm1 = xmm15[1,1,1,1]
-; AVX-NEXT: vblendps {{.*#+}} xmm1 = xmm1[0],xmm13[1],xmm1[2,3]
+; AVX-NEXT: vblendps {{.*#+}} xmm1 = xmm1[0],xmm14[1],xmm1[2,3]
; AVX-NEXT: vblendps {{.*#+}} xmm0 = xmm1[0,1],xmm0[2,3]
; AVX-NEXT: vinsertf128 $1, %xmm3, %ymm0, %ymm1
-; AVX-NEXT: vshufps {{.*#+}} xmm2 = xmm12[1,1,1,1]
+; AVX-NEXT: vshufps {{.*#+}} xmm2 = xmm13[1,1,1,1]
; AVX-NEXT: vblendps {{.*#+}} xmm2 = xmm2[0],xmm11[1],xmm2[2,3]
; AVX-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm2
; AVX-NEXT: vblendps {{.*#+}} ymm1 = ymm2[0,1,2,3,4,5],ymm1[6,7]
; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
; AVX-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT: vunpckhps {{.*#+}} xmm1 = xmm9[2],xmm14[2],xmm9[3],xmm14[3]
-; AVX-NEXT: vunpckhps {{.*#+}} xmm0 = xmm10[2],xmm5[2],xmm10[3],xmm5[3]
-; AVX-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm14 # 16-byte Reload
-; AVX-NEXT: vshufps {{.*#+}} xmm2 = xmm14[2,2,2,2]
-; AVX-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm10 # 16-byte Reload
-; AVX-NEXT: vblendps {{.*#+}} xmm2 = xmm10[0,1,2],xmm2[3]
+; AVX-NEXT: vunpckhps {{.*#+}} xmm1 = xmm12[2],xmm4[2],xmm12[3],xmm4[3]
+; AVX-NEXT: vunpckhps {{.*#+}} xmm4 = xmm10[2],xmm8[2],xmm10[3],xmm8[3]
+; AVX-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
+; AVX-NEXT: vshufps {{.*#+}} xmm2 = xmm0[2,2,2,2]
+; AVX-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm12 # 16-byte Reload
+; AVX-NEXT: vblendps {{.*#+}} xmm2 = xmm12[0,1,2],xmm2[3]
; AVX-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm2
-; AVX-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm3
+; AVX-NEXT: vinsertf128 $1, %xmm4, %ymm0, %ymm3
; AVX-NEXT: vblendps {{.*#+}} ymm2 = ymm3[0,1,2,3,4,5],ymm2[6,7]
+; AVX-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm10 # 16-byte Reload
+; AVX-NEXT: vshufps {{.*#+}} xmm3 = xmm10[2,2,2,2]
; AVX-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm9 # 16-byte Reload
-; AVX-NEXT: vshufps {{.*#+}} xmm3 = xmm9[2,2,2,2]
-; AVX-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm8 # 16-byte Reload
-; AVX-NEXT: vblendps {{.*#+}} xmm3 = xmm8[0,1,2],xmm3[3]
+; AVX-NEXT: vblendps {{.*#+}} xmm3 = xmm9[0,1,2],xmm3[3]
; AVX-NEXT: vblendps {{.*#+}} xmm3 = xmm1[0,1],xmm3[2,3]
; AVX-NEXT: vblendps {{.*#+}} ymm2 = ymm3[0,1,2,3],ymm2[4,5,6,7]
; AVX-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT: vunpckhps {{.*#+}} xmm2 = xmm15[2],xmm13[2],xmm15[3],xmm13[3]
-; AVX-NEXT: vunpckhps {{.*#+}} xmm3 = xmm12[2],xmm11[2],xmm12[3],xmm11[3]
-; AVX-NEXT: vmovaps (%rsp), %xmm15 # 16-byte Reload
-; AVX-NEXT: vshufps {{.*#+}} xmm4 = xmm15[2,2,2,2]
+; AVX-NEXT: vunpckhps {{.*#+}} xmm2 = xmm15[2],xmm14[2],xmm15[3],xmm14[3]
+; AVX-NEXT: vunpckhps {{.*#+}} xmm3 = xmm13[2],xmm11[2],xmm13[3],xmm11[3]
+; AVX-NEXT: vmovaps (%rsp), %xmm14 # 16-byte Reload
+; AVX-NEXT: vshufps {{.*#+}} xmm5 = xmm14[2,2,2,2]
; AVX-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm11 # 16-byte Reload
-; AVX-NEXT: vblendps {{.*#+}} xmm4 = xmm11[0,1,2],xmm4[3]
-; AVX-NEXT: vinsertf128 $1, %xmm4, %ymm0, %ymm4
-; AVX-NEXT: vinsertf128 $1, %xmm3, %ymm0, %ymm5
-; AVX-NEXT: vblendps {{.*#+}} ymm4 = ymm5[0,1,2,3,4,5],ymm4[6,7]
-; AVX-NEXT: vshufps {{.*#+}} xmm5 = xmm7[2,2,2,2]
-; AVX-NEXT: vblendps {{.*#+}} xmm5 = xmm6[0,1,2],xmm5[3]
-; AVX-NEXT: vblendps {{.*#+}} xmm5 = xmm2[0,1],xmm5[2,3]
-; AVX-NEXT: vblendps {{.*#+}} ymm4 = ymm5[0,1,2,3],ymm4[4,5,6,7]
-; AVX-NEXT: vmovups %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT: vunpckhps {{.*#+}} xmm4 = xmm8[2],xmm9[2],xmm8[3],xmm9[3]
-; AVX-NEXT: vunpckhpd {{.*#+}} xmm1 = xmm1[1],xmm4[1]
-; AVX-NEXT: vunpckhps {{.*#+}} xmm4 = xmm10[2],xmm14[2],xmm10[3],xmm14[3]
-; AVX-NEXT: vmovaps 288(%rdi), %ymm5
+; AVX-NEXT: vblendps {{.*#+}} xmm5 = xmm11[0,1,2],xmm5[3]
+; AVX-NEXT: vinsertf128 $1, %xmm5, %ymm0, %ymm5
+; AVX-NEXT: vinsertf128 $1, %xmm3, %ymm0, %ymm8
+; AVX-NEXT: vblendps {{.*#+}} ymm5 = ymm8[0,1,2,3,4,5],ymm5[6,7]
+; AVX-NEXT: vshufps {{.*#+}} xmm8 = xmm7[2,2,2,2]
+; AVX-NEXT: vblendps {{.*#+}} xmm8 = xmm6[0,1,2],xmm8[3]
+; AVX-NEXT: vblendps {{.*#+}} xmm8 = xmm2[0,1],xmm8[2,3]
+; AVX-NEXT: vblendps {{.*#+}} ymm5 = ymm8[0,1,2,3],ymm5[4,5,6,7]
; AVX-NEXT: vmovups %ymm5, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT: vinsertf128 $1, %xmm4, %ymm0, %ymm4
-; AVX-NEXT: vshufps {{.*#+}} xmm0 = xmm0[2,3,2,3]
+; AVX-NEXT: vunpckhps {{.*#+}} xmm5 = xmm9[2],xmm10[2],xmm9[3],xmm10[3]
+; AVX-NEXT: vunpckhpd {{.*#+}} xmm1 = xmm1[1],xmm5[1]
+; AVX-NEXT: vunpckhps {{.*#+}} xmm5 = xmm12[2],xmm0[2],xmm12[3],xmm0[3]
+; AVX-NEXT: vmovaps 320(%rdi), %ymm8
+; AVX-NEXT: vinsertf128 $1, %xmm5, %ymm0, %ymm5
+; AVX-NEXT: vshufps {{.*#+}} xmm0 = xmm4[2,3,2,3]
; AVX-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
-; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],ymm4[6,7]
-; AVX-NEXT: vmovaps 256(%rdi), %ymm4
-; AVX-NEXT: vmovups %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],ymm5[6,7]
+; AVX-NEXT: vmovaps 352(%rdi), %ymm5
+; AVX-NEXT: vmovups %ymm5, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7]
; AVX-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT: vmovaps 416(%rdi), %ymm8
-; AVX-NEXT: vmovups %ymm8, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX-NEXT: vmovaps 416(%rdi), %ymm4
+; AVX-NEXT: vmovups %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX-NEXT: vunpckhps {{.*#+}} xmm0 = xmm6[2],xmm7[2],xmm6[3],xmm7[3]
; AVX-NEXT: vmovaps 384(%rdi), %ymm6
+; AVX-NEXT: vmovups %ymm6, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX-NEXT: vunpckhpd {{.*#+}} xmm0 = xmm2[1],xmm0[1]
; AVX-NEXT: vmovaps 448(%rdi), %ymm7
; AVX-NEXT: vmovups %ymm7, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT: vunpckhps {{.*#+}} xmm1 = xmm11[2],xmm15[2],xmm11[3],xmm15[3]
+; AVX-NEXT: vunpckhps {{.*#+}} xmm1 = xmm11[2],xmm14[2],xmm11[3],xmm14[3]
; AVX-NEXT: vmovaps 480(%rdi), %ymm9
; AVX-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm1
; AVX-NEXT: vshufps {{.*#+}} xmm2 = xmm3[2,3,2,3]
@@ -2426,124 +2420,116 @@ define void @load_i32_stride8_vf16(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
; AVX-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX-NEXT: vunpcklpd {{.*#+}} ymm0 = ymm9[0],ymm7[0],ymm9[2],ymm7[2]
-; AVX-NEXT: vunpcklps {{.*#+}} ymm1 = ymm6[0],ymm8[0],ymm6[1],ymm8[1],ymm6[4],ymm8[4],ymm6[5],ymm8[5]
-; AVX-NEXT: vmovups %ymm6, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX-NEXT: vmovaps %ymm9, %ymm3
+; AVX-NEXT: vunpcklps {{.*#+}} ymm1 = ymm6[0],ymm4[0],ymm6[1],ymm4[1],ymm6[4],ymm4[4],ymm6[5],ymm4[5]
; AVX-NEXT: vshufps {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,0],ymm1[4,5],ymm0[6,4]
-; AVX-NEXT: vunpcklps {{.*#+}} ymm1 = ymm4[0],ymm5[0],ymm4[1],ymm5[1],ymm4[4],ymm5[4],ymm4[5],ymm5[5]
+; AVX-NEXT: vunpcklpd {{.*#+}} ymm1 = ymm5[0],ymm8[0],ymm5[2],ymm8[2]
+; AVX-NEXT: vmovaps %ymm8, %ymm6
; AVX-NEXT: vextractf128 $1, %ymm1, %xmm1
-; AVX-NEXT: vmovaps 320(%rdi), %ymm10
-; AVX-NEXT: vmovaps 352(%rdi), %ymm11
-; AVX-NEXT: vunpcklpd {{.*#+}} ymm2 = ymm11[0],ymm10[0],ymm11[2],ymm10[2]
-; AVX-NEXT: vmovups %ymm11, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT: vshufps {{.*#+}} ymm2 = ymm2[0,1,2,0,4,5,6,4]
+; AVX-NEXT: vmovaps 288(%rdi), %ymm7
+; AVX-NEXT: vmovaps 256(%rdi), %ymm9
+; AVX-NEXT: vunpcklps {{.*#+}} ymm2 = ymm9[0],ymm7[0],ymm9[1],ymm7[1],ymm9[4],ymm7[4],ymm9[5],ymm7[5]
; AVX-NEXT: vextractf128 $1, %ymm2, %xmm2
-; AVX-NEXT: vblendps {{.*#+}} xmm1 = xmm1[0,1],xmm2[2,3]
+; AVX-NEXT: vshufps {{.*#+}} xmm1 = xmm2[0,1],xmm1[2,0]
; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7]
; AVX-NEXT: vmovups %ymm0, (%rsp) # 32-byte Spill
-; AVX-NEXT: vmovaps 160(%rdi), %ymm7
-; AVX-NEXT: vmovaps 128(%rdi), %ymm5
-; AVX-NEXT: vmovaps 192(%rdi), %ymm1
+; AVX-NEXT: vmovaps 160(%rdi), %ymm11
+; AVX-NEXT: vmovaps 128(%rdi), %ymm12
+; AVX-NEXT: vmovaps 192(%rdi), %ymm10
+; AVX-NEXT: vmovaps 224(%rdi), %ymm13
+; AVX-NEXT: vunpcklpd {{.*#+}} ymm0 = ymm13[0],ymm10[0],ymm13[2],ymm10[2]
+; AVX-NEXT: vmovups %ymm10, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX-NEXT: vunpcklps {{.*#+}} ymm1 = ymm12[0],ymm11[0],ymm12[1],ymm11[1],ymm12[4],ymm11[4],ymm12[5],ymm11[5]
+; AVX-NEXT: vmovups %ymm12, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX-NEXT: vmovups %ymm11, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX-NEXT: vshufps {{.*#+}} ymm4 = ymm1[0,1],ymm0[2,0],ymm1[4,5],ymm0[6,4]
+; AVX-NEXT: vmovaps 64(%rdi), %ymm1
; AVX-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT: vmovaps 224(%rdi), %ymm0
+; AVX-NEXT: vmovaps 96(%rdi), %ymm0
; AVX-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX-NEXT: vunpcklpd {{.*#+}} ymm0 = ymm0[0],ymm1[0],ymm0[2],ymm1[2]
-; AVX-NEXT: vunpcklps {{.*#+}} ymm1 = ymm5[0],ymm7[0],ymm5[1],ymm7[1],ymm5[4],ymm7[4],ymm5[5],ymm7[5]
-; AVX-NEXT: vmovups %ymm5, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT: vshufps {{.*#+}} ymm2 = ymm1[0,1],ymm0[2,0],ymm1[4,5],ymm0[6,4]
-; AVX-NEXT: vmovaps 64(%rdi), %ymm0
-; AVX-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT: vmovaps 96(%rdi), %ymm14
-; AVX-NEXT: vunpcklpd {{.*#+}} ymm0 = ymm14[0],ymm0[0],ymm14[2],ymm0[2]
-; AVX-NEXT: vshufps {{.*#+}} ymm0 = ymm0[0,1,2,0,4,5,6,4]
; AVX-NEXT: vextractf128 $1, %ymm0, %xmm0
-; AVX-NEXT: vmovaps (%rdi), %ymm13
-; AVX-NEXT: vmovaps 32(%rdi), %ymm12
-; AVX-NEXT: vunpcklps {{.*#+}} ymm15 = ymm13[0],ymm12[0],ymm13[1],ymm12[1],ymm13[4],ymm12[4],ymm13[5],ymm12[5]
-; AVX-NEXT: vextractf128 $1, %ymm15, %xmm15
-; AVX-NEXT: vblendps {{.*#+}} xmm0 = xmm15[0,1],xmm0[2,3]
-; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm2[4,5,6,7]
+; AVX-NEXT: vmovaps (%rdi), %ymm1
+; AVX-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX-NEXT: vmovaps 32(%rdi), %ymm15
+; AVX-NEXT: vunpcklps {{.*#+}} ymm14 = ymm1[0],ymm15[0],ymm1[1],ymm15[1],ymm1[4],ymm15[4],ymm1[5],ymm15[5]
+; AVX-NEXT: vextractf128 $1, %ymm14, %xmm14
+; AVX-NEXT: vshufps {{.*#+}} xmm0 = xmm14[0,1],xmm0[2,0]
+; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm4[4,5,6,7]
; AVX-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
-; AVX-NEXT: vunpcklps {{.*#+}} ymm0 = ymm4[0],ymm9[0],ymm4[1],ymm9[1],ymm4[4],ymm9[4],ymm4[5],ymm9[5]
-; AVX-NEXT: vmovaps %ymm9, %ymm8
-; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm9 # 32-byte Reload
-; AVX-NEXT: vshufps {{.*#+}} ymm2 = ymm9[1,0],ymm6[1,0],ymm9[5,4],ymm6[5,4]
-; AVX-NEXT: vshufps {{.*#+}} ymm6 = ymm2[2,0],ymm0[2,3],ymm2[6,4],ymm0[6,7]
-; AVX-NEXT: vunpcklps {{.*#+}} ymm2 = ymm10[0],ymm11[0],ymm10[1],ymm11[1],ymm10[4],ymm11[4],ymm10[5],ymm11[5]
-; AVX-NEXT: vextractf128 $1, %ymm2, %xmm2
; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
-; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX-NEXT: vshufps {{.*#+}} ymm15 = ymm0[1,0],ymm1[1,0],ymm0[5,4],ymm1[5,4]
-; AVX-NEXT: vshufps {{.*#+}} ymm15 = ymm15[2,0,2,3,6,4,6,7]
-; AVX-NEXT: vextractf128 $1, %ymm15, %xmm15
-; AVX-NEXT: vblendps {{.*#+}} xmm2 = xmm15[0,1],xmm2[2,3]
-; AVX-NEXT: vblendps {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm6[4,5,6,7]
-; AVX-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX-NEXT: vunpcklps {{.*#+}} ymm2 = ymm1[0],ymm3[0],ymm1[1],ymm3[1],ymm1[4],ymm3[4],ymm1[5],ymm3[5]
+; AVX-NEXT: vmovaps %ymm3, %ymm8
; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload
-; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
-; AVX-NEXT: vunpcklps {{.*#+}} ymm15 = ymm3[0],ymm6[0],ymm3[1],ymm6[1],ymm3[4],ymm6[4],ymm3[5],ymm6[5]
-; AVX-NEXT: vshufps {{.*#+}} ymm2 = ymm7[1,0],ymm5[1,0],ymm7[5,4],ymm5[5,4]
-; AVX-NEXT: vmovaps %ymm7, %ymm11
-; AVX-NEXT: vshufps {{.*#+}} ymm7 = ymm2[2,0],ymm15[2,3],ymm2[6,4],ymm15[6,7]
-; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload
-; AVX-NEXT: vunpcklps {{.*#+}} ymm2 = ymm5[0],ymm14[0],ymm5[1],ymm14[1],ymm5[4],ymm14[4],ymm5[5],ymm14[5]
-; AVX-NEXT: vextractf128 $1, %ymm2, %xmm2
-; AVX-NEXT: vshufps {{.*#+}} ymm15 = ymm12[1,0],ymm13[1,0],ymm12[5,4],ymm13[5,4]
-; AVX-NEXT: vshufps {{.*#+}} ymm15 = ymm15[2,0,2,3,6,4,6,7]
-; AVX-NEXT: vextractf128 $1, %ymm15, %xmm15
-; AVX-NEXT: vblendps {{.*#+}} xmm2 = xmm15[0,1],xmm2[2,3]
-; AVX-NEXT: vblendps {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm7[4,5,6,7]
-; AVX-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT: vunpckhpd {{.*#+}} ymm7 = ymm8[1],ymm4[1],ymm8[3],ymm4[3]
-; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
-; AVX-NEXT: vunpckhps {{.*#+}} ymm2 = ymm4[2],ymm9[2],ymm4[3],ymm9[3],ymm4[6],ymm9[6],ymm4[7],ymm9[7]
-; AVX-NEXT: vshufps {{.*#+}} ymm9 = ymm2[0,1],ymm7[2,0],ymm2[4,5],ymm7[6,4]
-; AVX-NEXT: vunpckhps {{.*#+}} ymm2 = ymm1[2],ymm0[2],ymm1[3],ymm0[3],ymm1[6],ymm0[6],ymm1[7],ymm0[7]
-; AVX-NEXT: vextractf128 $1, %ymm2, %xmm2
+; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
+; AVX-NEXT: vshufps {{.*#+}} ymm4 = ymm0[1,0],ymm3[1,0],ymm0[5,4],ymm3[5,4]
+; AVX-NEXT: vshufps {{.*#+}} ymm5 = ymm4[2,0],ymm2[2,3],ymm4[6,4],ymm2[6,7]
+; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
+; AVX-NEXT: vunpcklps {{.*#+}} ymm4 = ymm6[0],ymm2[0],ymm6[1],ymm2[1],ymm6[4],ymm2[4],ymm6[5],ymm2[5]
+; AVX-NEXT: vextractf128 $1, %ymm4, %xmm4
+; AVX-NEXT: vshufps {{.*#+}} ymm14 = ymm7[1,0],ymm9[1,0],ymm7[5,4],ymm9[5,4]
+; AVX-NEXT: vextractf128 $1, %ymm14, %xmm14
+; AVX-NEXT: vshufps {{.*#+}} xmm4 = xmm14[2,0],xmm4[2,3]
+; AVX-NEXT: vblendps {{.*#+}} ymm4 = ymm4[0,1,2,3],ymm5[4,5,6,7]
+; AVX-NEXT: vmovups %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX-NEXT: vunpcklps {{.*#+}} ymm5 = ymm10[0],ymm13[0],ymm10[1],ymm13[1],ymm10[4],ymm13[4],ymm10[5],ymm13[5]
+; AVX-NEXT: vshufps {{.*#+}} ymm4 = ymm11[1,0],ymm12[1,0],ymm11[5,4],ymm12[5,4]
+; AVX-NEXT: vshufps {{.*#+}} ymm5 = ymm4[2,0],ymm5[2,3],ymm4[6,4],ymm5[6,7]
+; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm10 # 32-byte Reload
+; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm12 # 32-byte Reload
+; AVX-NEXT: vunpcklps {{.*#+}} ymm4 = ymm10[0],ymm12[0],ymm10[1],ymm12[1],ymm10[4],ymm12[4],ymm10[5],ymm12[5]
+; AVX-NEXT: vextractf128 $1, %ymm4, %xmm4
+; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm11 # 32-byte Reload
+; AVX-NEXT: vshufps {{.*#+}} ymm14 = ymm15[1,0],ymm11[1,0],ymm15[5,4],ymm11[5,4]
+; AVX-NEXT: vextractf128 $1, %ymm14, %xmm14
+; AVX-NEXT: vshufps {{.*#+}} xmm4 = xmm14[2,0],xmm4[2,3]
+; AVX-NEXT: vblendps {{.*#+}} ymm4 = ymm4[0,1,2,3],ymm5[4,5,6,7]
+; AVX-NEXT: vmovups %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX-NEXT: vunpckhpd {{.*#+}} ymm5 = ymm8[1],ymm1[1],ymm8[3],ymm1[3]
+; AVX-NEXT: vunpckhps {{.*#+}} ymm4 = ymm3[2],ymm0[2],ymm3[3],ymm0[3],ymm3[6],ymm0[6],ymm3[7],ymm0[7]
+; AVX-NEXT: vshufps {{.*#+}} ymm1 = ymm4[0,1],ymm5[2,0],ymm4[4,5],ymm5[6,4]
+; AVX-NEXT: vmovaps %ymm6, %ymm3
+; AVX-NEXT: vunpckhpd {{.*#+}} ymm4 = ymm2[1],ymm6[1],ymm2[3],ymm6[3]
+; AVX-NEXT: vextractf128 $1, %ymm4, %xmm4
+; AVX-NEXT: vmovaps %ymm7, %ymm2
+; AVX-NEXT: vunpckhps {{.*#+}} ymm14 = ymm9[2],ymm7[2],ymm9[3],ymm7[3],ymm9[6],ymm7[6],ymm9[7],ymm7[7]
+; AVX-NEXT: vextractf128 $1, %ymm14, %xmm14
+; AVX-NEXT: vshufps {{.*#+}} xmm4 = xmm14[0,1],xmm4[2,0]
+; AVX-NEXT: vblendps {{.*#+}} ymm5 = ymm4[0,1,2,3],ymm1[4,5,6,7]
; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm7 # 32-byte Reload
-; AVX-NEXT: vunpckhpd {{.*#+}} ymm15 = ymm7[1],ymm10[1],ymm7[3],ymm10[3]
-; AVX-NEXT: vshufps {{.*#+}} ymm15 = ymm15[0,1,2,0,4,5,6,4]
-; AVX-NEXT: vextractf128 $1, %ymm15, %xmm15
-; AVX-NEXT: vblendps {{.*#+}} xmm2 = xmm2[0,1],xmm15[2,3]
-; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm2[0,1,2,3],ymm9[4,5,6,7]
-; AVX-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT: vunpckhpd {{.*#+}} ymm0 = ymm6[1],ymm3[1],ymm6[3],ymm3[3]
-; AVX-NEXT: vmovaps %ymm3, %ymm9
-; AVX-NEXT: vmovaps %ymm11, %ymm1
-; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload
-; AVX-NEXT: vunpckhps {{.*#+}} ymm2 = ymm3[2],ymm11[2],ymm3[3],ymm11[3],ymm3[6],ymm11[6],ymm3[7],ymm11[7]
-; AVX-NEXT: vshufps {{.*#+}} ymm0 = ymm2[0,1],ymm0[2,0],ymm2[4,5],ymm0[6,4]
-; AVX-NEXT: vunpckhpd {{.*#+}} ymm2 = ymm14[1],ymm5[1],ymm14[3],ymm5[3]
-; AVX-NEXT: vshufps {{.*#+}} ymm2 = ymm2[0,1,2,0,4,5,6,4]
-; AVX-NEXT: vextractf128 $1, %ymm2, %xmm2
-; AVX-NEXT: vunpckhps {{.*#+}} ymm15 = ymm13[2],ymm12[2],ymm13[3],ymm12[3],ymm13[6],ymm12[6],ymm13[7],ymm12[7]
-; AVX-NEXT: vextractf128 $1, %ymm15, %xmm15
-; AVX-NEXT: vblendps {{.*#+}} xmm2 = xmm15[0,1],xmm2[2,3]
-; AVX-NEXT: vblendps {{.*#+}} ymm11 = ymm2[0,1,2,3],ymm0[4,5,6,7]
+; AVX-NEXT: vunpckhpd {{.*#+}} ymm0 = ymm13[1],ymm7[1],ymm13[3],ymm7[3]
+; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
+; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
+; AVX-NEXT: vunpckhps {{.*#+}} ymm4 = ymm6[2],ymm1[2],ymm6[3],ymm1[3],ymm6[6],ymm1[6],ymm6[7],ymm1[7]
+; AVX-NEXT: vshufps {{.*#+}} ymm0 = ymm4[0,1],ymm0[2,0],ymm4[4,5],ymm0[6,4]
+; AVX-NEXT: vunpckhpd {{.*#+}} ymm4 = ymm12[1],ymm10[1],ymm12[3],ymm10[3]
+; AVX-NEXT: vextractf128 $1, %ymm4, %xmm4
+; AVX-NEXT: vunpckhps {{.*#+}} ymm14 = ymm11[2],ymm15[2],ymm11[3],ymm15[3],ymm11[6],ymm15[6],ymm11[7],ymm15[7]
+; AVX-NEXT: vextractf128 $1, %ymm14, %xmm14
+; AVX-NEXT: vshufps {{.*#+}} xmm4 = xmm14[0,1],xmm4[2,0]
+; AVX-NEXT: vblendps {{.*#+}} ymm14 = ymm4[0,1,2,3],ymm0[4,5,6,7]
; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX-NEXT: vunpckhps {{.*#+}} ymm0 = ymm0[2],ymm8[2],ymm0[3],ymm8[3],ymm0[6],ymm8[6],ymm0[7],ymm8[7]
-; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
-; AVX-NEXT: vshufps {{.*#+}} ymm2 = ymm2[3,0],ymm4[3,0],ymm2[7,4],ymm4[7,4]
-; AVX-NEXT: vshufps {{.*#+}} ymm0 = ymm2[2,0],ymm0[2,3],ymm2[6,4],ymm0[6,7]
-; AVX-NEXT: vunpckhps {{.*#+}} ymm2 = ymm10[2],ymm7[2],ymm10[3],ymm7[3],ymm10[6],ymm7[6],ymm10[7],ymm7[7]
; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
-; AVX-NEXT: vshufps $51, {{[-0-9]+}}(%r{{[sb]}}p), %ymm4, %ymm7 # 32-byte Folded Reload
-; AVX-NEXT: # ymm7 = ymm4[3,0],mem[3,0],ymm4[7,4],mem[7,4]
-; AVX-NEXT: vextractf128 $1, %ymm2, %xmm2
-; AVX-NEXT: vshufps {{.*#+}} ymm7 = ymm7[2,0,2,3,6,4,6,7]
-; AVX-NEXT: vextractf128 $1, %ymm7, %xmm7
-; AVX-NEXT: vblendps {{.*#+}} xmm2 = xmm7[0,1],xmm2[2,3]
-; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm2[0,1,2,3],ymm0[4,5,6,7]
-; AVX-NEXT: vunpckhps {{.*#+}} ymm2 = ymm9[2],ymm6[2],ymm9[3],ymm6[3],ymm9[6],ymm6[6],ymm9[7],ymm6[7]
-; AVX-NEXT: vshufps {{.*#+}} ymm6 = ymm1[3,0],ymm3[3,0],ymm1[7,4],ymm3[7,4]
-; AVX-NEXT: vshufps {{.*#+}} ymm2 = ymm6[2,0],ymm2[2,3],ymm6[6,4],ymm2[6,7]
-; AVX-NEXT: vunpckhps {{.*#+}} ymm4 = ymm5[2],ymm14[2],ymm5[3],ymm14[3],ymm5[6],ymm14[6],ymm5[7],ymm14[7]
-; AVX-NEXT: vshufps {{.*#+}} ymm1 = ymm12[3,0],ymm13[3,0],ymm12[7,4],ymm13[7,4]
-; AVX-NEXT: vextractf128 $1, %ymm4, %xmm3
-; AVX-NEXT: vshufps {{.*#+}} ymm1 = ymm1[2,0,2,3,6,4,6,7]
+; AVX-NEXT: vshufps $51, {{[-0-9]+}}(%r{{[sb]}}p), %ymm4, %ymm4 # 32-byte Folded Reload
+; AVX-NEXT: # ymm4 = ymm4[3,0],mem[3,0],ymm4[7,4],mem[7,4]
+; AVX-NEXT: vshufps {{.*#+}} ymm0 = ymm4[2,0],ymm0[2,3],ymm4[6,4],ymm0[6,7]
+; AVX-NEXT: vunpckhps {{[-0-9]+}}(%r{{[sb]}}p), %ymm3, %ymm4 # 32-byte Folded Reload
+; AVX-NEXT: # ymm4 = ymm3[2],mem[2],ymm3[3],mem[3],ymm3[6],mem[6],ymm3[7],mem[7]
+; AVX-NEXT: vshufps {{.*#+}} ymm9 = ymm2[3,0],ymm9[3,0],ymm2[7,4],ymm9[7,4]
+; AVX-NEXT: vextractf128 $1, %ymm4, %xmm4
+; AVX-NEXT: vextractf128 $1, %ymm9, %xmm9
+; AVX-NEXT: vshufps {{.*#+}} xmm4 = xmm9[2,0],xmm4[2,3]
+; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm4[0,1,2,3],ymm0[4,5,6,7]
+; AVX-NEXT: vunpckhps {{.*#+}} ymm4 = ymm7[2],ymm13[2],ymm7[3],ymm13[3],ymm7[6],ymm13[6],ymm7[7],ymm13[7]
+; AVX-NEXT: vshufps {{.*#+}} ymm6 = ymm1[3,0],ymm6[3,0],ymm1[7,4],ymm6[7,4]
+; AVX-NEXT: vshufps {{.*#+}} ymm4 = ymm6[2,0],ymm4[2,3],ymm6[6,4],ymm4[6,7]
+; AVX-NEXT: vunpckhps {{.*#+}} ymm3 = ymm10[2],ymm12[2],ymm10[3],ymm12[3],ymm10[6],ymm12[6],ymm10[7],ymm12[7]
+; AVX-NEXT: vshufps {{.*#+}} ymm1 = ymm15[3,0],ymm11[3,0],ymm15[7,4],ymm11[7,4]
+; AVX-NEXT: vextractf128 $1, %ymm3, %xmm2
; AVX-NEXT: vextractf128 $1, %ymm1, %xmm1
-; AVX-NEXT: vblendps {{.*#+}} xmm1 = xmm1[0,1],xmm3[2,3]
-; AVX-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm2[4,5,6,7]
+; AVX-NEXT: vshufps {{.*#+}} xmm1 = xmm1[2,0],xmm2[2,3]
+; AVX-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm4[4,5,6,7]
; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
; AVX-NEXT: vmovaps %ymm2, 32(%rsi)
; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
@@ -2570,13 +2556,12 @@ define void @load_i32_stride8_vf16(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
; AVX-NEXT: vmovaps %ymm2, (%rax)
; AVX-NEXT: movq {{[0-9]+}}(%rsp), %rax
-; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
-; AVX-NEXT: vmovaps %ymm2, 32(%rax)
-; AVX-NEXT: vmovaps %ymm11, (%rax)
+; AVX-NEXT: vmovaps %ymm5, 32(%rax)
+; AVX-NEXT: vmovaps %ymm14, (%rax)
; AVX-NEXT: movq {{[0-9]+}}(%rsp), %rax
; AVX-NEXT: vmovaps %ymm0, 32(%rax)
; AVX-NEXT: vmovaps %ymm1, (%rax)
-; AVX-NEXT: addq $616, %rsp # imm = 0x268
+; AVX-NEXT: addq $584, %rsp # imm = 0x248
; AVX-NEXT: vzeroupper
; AVX-NEXT: retq
;
@@ -4892,7 +4877,7 @@ define void @load_i32_stride8_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
;
; AVX-LABEL: load_i32_stride8_vf32:
; AVX: # %bb.0:
-; AVX-NEXT: subq $1800, %rsp # imm = 0x708
+; AVX-NEXT: subq $1768, %rsp # imm = 0x6E8
; AVX-NEXT: vmovaps 288(%rdi), %xmm14
; AVX-NEXT: vmovaps 256(%rdi), %xmm10
; AVX-NEXT: vunpcklps {{.*#+}} xmm0 = xmm10[0],xmm14[0],xmm10[1],xmm14[1]
@@ -5175,84 +5160,81 @@ define void @load_i32_stride8_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
; AVX-NEXT: vblendps {{.*#+}} ymm1 = ymm2[0,1,2,3,4,5],ymm1[6,7]
; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
; AVX-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT: vmovaps 416(%rdi), %ymm13
-; AVX-NEXT: vmovaps 384(%rdi), %ymm5
-; AVX-NEXT: vmovaps 448(%rdi), %ymm9
-; AVX-NEXT: vmovaps 480(%rdi), %ymm12
-; AVX-NEXT: vunpcklpd {{.*#+}} ymm0 = ymm12[0],ymm9[0],ymm12[2],ymm9[2]
+; AVX-NEXT: vmovaps 416(%rdi), %ymm12
+; AVX-NEXT: vmovaps 384(%rdi), %ymm9
+; AVX-NEXT: vmovaps 448(%rdi), %ymm7
+; AVX-NEXT: vmovaps 480(%rdi), %ymm11
+; AVX-NEXT: vunpcklpd {{.*#+}} ymm0 = ymm11[0],ymm7[0],ymm11[2],ymm7[2]
+; AVX-NEXT: vmovups %ymm7, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX-NEXT: vunpcklps {{.*#+}} ymm1 = ymm9[0],ymm12[0],ymm9[1],ymm12[1],ymm9[4],ymm12[4],ymm9[5],ymm12[5]
; AVX-NEXT: vmovups %ymm9, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT: vunpcklps {{.*#+}} ymm1 = ymm5[0],ymm13[0],ymm5[1],ymm13[1],ymm5[4],ymm13[4],ymm5[5],ymm13[5]
-; AVX-NEXT: vmovups %ymm5, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX-NEXT: vshufps {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,0],ymm1[4,5],ymm0[6,4]
-; AVX-NEXT: vmovaps 288(%rdi), %ymm2
+; AVX-NEXT: vmovaps 320(%rdi), %ymm2
; AVX-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT: vmovaps 256(%rdi), %ymm1
+; AVX-NEXT: vmovaps 352(%rdi), %ymm1
; AVX-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT: vunpcklps {{.*#+}} ymm1 = ymm1[0],ymm2[0],ymm1[1],ymm2[1],ymm1[4],ymm2[4],ymm1[5],ymm2[5]
+; AVX-NEXT: vunpcklpd {{.*#+}} ymm1 = ymm1[0],ymm2[0],ymm1[2],ymm2[2]
; AVX-NEXT: vextractf128 $1, %ymm1, %xmm1
-; AVX-NEXT: vmovaps 320(%rdi), %ymm2
+; AVX-NEXT: vmovaps 288(%rdi), %ymm2
; AVX-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT: vmovaps 352(%rdi), %ymm11
-; AVX-NEXT: vunpcklpd {{.*#+}} ymm2 = ymm11[0],ymm2[0],ymm11[2],ymm2[2]
-; AVX-NEXT: vmovups %ymm11, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT: vshufps {{.*#+}} ymm2 = ymm2[0,1,2,0,4,5,6,4]
+; AVX-NEXT: vmovaps 256(%rdi), %ymm10
+; AVX-NEXT: vunpcklps {{.*#+}} ymm2 = ymm10[0],ymm2[0],ymm10[1],ymm2[1],ymm10[4],ymm2[4],ymm10[5],ymm2[5]
+; AVX-NEXT: vmovups %ymm10, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX-NEXT: vextractf128 $1, %ymm2, %xmm2
-; AVX-NEXT: vblendps {{.*#+}} xmm1 = xmm1[0,1],xmm2[2,3]
+; AVX-NEXT: vshufps {{.*#+}} xmm1 = xmm2[0,1],xmm1[2,0]
; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7]
; AVX-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT: vmovaps 928(%rdi), %ymm2
-; AVX-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX-NEXT: vmovaps 928(%rdi), %ymm5
; AVX-NEXT: vmovaps 896(%rdi), %ymm3
-; AVX-NEXT: vmovups %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX-NEXT: vmovups %ymm3, (%rsp) # 32-byte Spill
; AVX-NEXT: vmovaps 960(%rdi), %ymm1
; AVX-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX-NEXT: vmovaps 992(%rdi), %ymm0
-; AVX-NEXT: vmovups %ymm0, (%rsp) # 32-byte Spill
+; AVX-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX-NEXT: vunpcklpd {{.*#+}} ymm0 = ymm0[0],ymm1[0],ymm0[2],ymm1[2]
-; AVX-NEXT: vunpcklps {{.*#+}} ymm1 = ymm3[0],ymm2[0],ymm3[1],ymm2[1],ymm3[4],ymm2[4],ymm3[5],ymm2[5]
+; AVX-NEXT: vunpcklps {{.*#+}} ymm1 = ymm3[0],ymm5[0],ymm3[1],ymm5[1],ymm3[4],ymm5[4],ymm3[5],ymm5[5]
+; AVX-NEXT: vmovups %ymm5, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX-NEXT: vshufps {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,0],ymm1[4,5],ymm0[6,4]
-; AVX-NEXT: vmovaps 800(%rdi), %ymm1
-; AVX-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT: vmovaps 768(%rdi), %ymm2
+; AVX-NEXT: vmovaps 832(%rdi), %ymm2
; AVX-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT: vunpcklps {{.*#+}} ymm1 = ymm2[0],ymm1[0],ymm2[1],ymm1[1],ymm2[4],ymm1[4],ymm2[5],ymm1[5]
+; AVX-NEXT: vmovaps 864(%rdi), %ymm1
+; AVX-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX-NEXT: vunpcklpd {{.*#+}} ymm1 = ymm1[0],ymm2[0],ymm1[2],ymm2[2]
; AVX-NEXT: vextractf128 $1, %ymm1, %xmm1
-; AVX-NEXT: vmovaps 832(%rdi), %ymm3
+; AVX-NEXT: vmovaps 800(%rdi), %ymm3
; AVX-NEXT: vmovups %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT: vmovaps 864(%rdi), %ymm2
+; AVX-NEXT: vmovaps 768(%rdi), %ymm2
; AVX-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT: vunpcklpd {{.*#+}} ymm2 = ymm2[0],ymm3[0],ymm2[2],ymm3[2]
-; AVX-NEXT: vshufps {{.*#+}} ymm2 = ymm2[0,1,2,0,4,5,6,4]
+; AVX-NEXT: vunpcklps {{.*#+}} ymm2 = ymm2[0],ymm3[0],ymm2[1],ymm3[1],ymm2[4],ymm3[4],ymm2[5],ymm3[5]
; AVX-NEXT: vextractf128 $1, %ymm2, %xmm2
-; AVX-NEXT: vblendps {{.*#+}} xmm1 = xmm1[0,1],xmm2[2,3]
+; AVX-NEXT: vshufps {{.*#+}} xmm1 = xmm2[0,1],xmm1[2,0]
; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7]
; AVX-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX-NEXT: vmovaps 672(%rdi), %ymm2
; AVX-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT: vmovaps 640(%rdi), %ymm1
+; AVX-NEXT: vmovaps 640(%rdi), %ymm3
+; AVX-NEXT: vmovups %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX-NEXT: vmovaps 704(%rdi), %ymm13
+; AVX-NEXT: vmovaps 736(%rdi), %ymm0
+; AVX-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX-NEXT: vunpcklpd {{.*#+}} ymm0 = ymm0[0],ymm13[0],ymm0[2],ymm13[2]
+; AVX-NEXT: vmovups %ymm13, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX-NEXT: vunpcklps {{.*#+}} ymm1 = ymm3[0],ymm2[0],ymm3[1],ymm2[1],ymm3[4],ymm2[4],ymm3[5],ymm2[5]
+; AVX-NEXT: vshufps {{.*#+}} ymm4 = ymm1[0,1],ymm0[2,0],ymm1[4,5],ymm0[6,4]
+; AVX-NEXT: vmovaps 576(%rdi), %ymm1
; AVX-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT: vmovaps 704(%rdi), %ymm0
+; AVX-NEXT: vmovaps 608(%rdi), %ymm0
; AVX-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT: vmovaps 736(%rdi), %ymm10
-; AVX-NEXT: vunpcklpd {{.*#+}} ymm0 = ymm10[0],ymm0[0],ymm10[2],ymm0[2]
-; AVX-NEXT: vmovups %ymm10, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT: vunpcklps {{.*#+}} ymm1 = ymm1[0],ymm2[0],ymm1[1],ymm2[1],ymm1[4],ymm2[4],ymm1[5],ymm2[5]
-; AVX-NEXT: vshufps {{.*#+}} ymm3 = ymm1[0,1],ymm0[2,0],ymm1[4,5],ymm0[6,4]
+; AVX-NEXT: vunpcklpd {{.*#+}} ymm0 = ymm0[0],ymm1[0],ymm0[2],ymm1[2]
+; AVX-NEXT: vextractf128 $1, %ymm0, %xmm6
; AVX-NEXT: vmovaps 544(%rdi), %ymm0
; AVX-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX-NEXT: vmovaps 512(%rdi), %ymm1
; AVX-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT: vunpcklps {{.*#+}} ymm0 = ymm1[0],ymm0[0],ymm1[1],ymm0[1],ymm1[4],ymm0[4],ymm1[5],ymm0[5]
-; AVX-NEXT: vextractf128 $1, %ymm0, %xmm6
-; AVX-NEXT: vmovaps 576(%rdi), %ymm0
-; AVX-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT: vmovaps 608(%rdi), %ymm1
-; AVX-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT: vunpcklpd {{.*#+}} ymm8 = ymm1[0],ymm0[0],ymm1[2],ymm0[2]
-; AVX-NEXT: vshufps {{.*#+}} ymm8 = ymm8[0,1,2,0,4,5,6,4]
+; AVX-NEXT: vunpcklps {{.*#+}} ymm8 = ymm1[0],ymm0[0],ymm1[1],ymm0[1],ymm1[4],ymm0[4],ymm1[5],ymm0[5]
; AVX-NEXT: vextractf128 $1, %ymm8, %xmm8
-; AVX-NEXT: vblendps {{.*#+}} xmm6 = xmm6[0,1],xmm8[2,3]
-; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm6[0,1,2,3],ymm3[4,5,6,7]
+; AVX-NEXT: vshufps {{.*#+}} xmm6 = xmm8[0,1],xmm6[2,0]
+; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm6[0,1,2,3],ymm4[4,5,6,7]
; AVX-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX-NEXT: vmovaps 160(%rdi), %ymm0
; AVX-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
@@ -5262,67 +5244,63 @@ define void @load_i32_stride8_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
; AVX-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX-NEXT: vmovaps 224(%rdi), %ymm3
; AVX-NEXT: vmovups %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT: vunpcklpd {{.*#+}} ymm3 = ymm3[0],ymm2[0],ymm3[2],ymm2[2]
+; AVX-NEXT: vunpcklpd {{.*#+}} ymm4 = ymm3[0],ymm2[0],ymm3[2],ymm2[2]
; AVX-NEXT: vunpcklps {{.*#+}} ymm6 = ymm1[0],ymm0[0],ymm1[1],ymm0[1],ymm1[4],ymm0[4],ymm1[5],ymm0[5]
-; AVX-NEXT: vshufps {{.*#+}} ymm1 = ymm6[0,1],ymm3[2,0],ymm6[4,5],ymm3[6,4]
-; AVX-NEXT: vmovaps 64(%rdi), %ymm14
+; AVX-NEXT: vshufps {{.*#+}} ymm1 = ymm6[0,1],ymm4[2,0],ymm6[4,5],ymm4[6,4]
+; AVX-NEXT: vmovaps 64(%rdi), %ymm0
+; AVX-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX-NEXT: vmovaps 96(%rdi), %ymm2
; AVX-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT: vunpcklpd {{.*#+}} ymm3 = ymm2[0],ymm14[0],ymm2[2],ymm14[2]
-; AVX-NEXT: vmovups %ymm14, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT: vshufps {{.*#+}} ymm3 = ymm3[0,1,2,0,4,5,6,4]
-; AVX-NEXT: vextractf128 $1, %ymm3, %xmm0
+; AVX-NEXT: vunpcklpd {{.*#+}} ymm4 = ymm2[0],ymm0[0],ymm2[2],ymm0[2]
+; AVX-NEXT: vextractf128 $1, %ymm4, %xmm0
; AVX-NEXT: vmovaps (%rdi), %ymm2
; AVX-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT: vmovaps 32(%rdi), %ymm3
-; AVX-NEXT: vmovups %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT: vunpcklps {{.*#+}} ymm15 = ymm2[0],ymm3[0],ymm2[1],ymm3[1],ymm2[4],ymm3[4],ymm2[5],ymm3[5]
+; AVX-NEXT: vmovaps 32(%rdi), %ymm14
+; AVX-NEXT: vunpcklps {{.*#+}} ymm15 = ymm2[0],ymm14[0],ymm2[1],ymm14[1],ymm2[4],ymm14[4],ymm2[5],ymm14[5]
; AVX-NEXT: vextractf128 $1, %ymm15, %xmm15
-; AVX-NEXT: vblendps {{.*#+}} xmm0 = xmm15[0,1],xmm0[2,3]
+; AVX-NEXT: vshufps {{.*#+}} xmm0 = xmm15[0,1],xmm0[2,0]
; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
; AVX-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX-NEXT: vmovaps %ymm11, %ymm6
+; AVX-NEXT: vmovups %ymm11, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX-NEXT: vunpcklps {{.*#+}} ymm0 = ymm7[0],ymm11[0],ymm7[1],ymm11[1],ymm7[4],ymm11[4],ymm7[5],ymm11[5]
+; AVX-NEXT: vmovaps %ymm12, %ymm7
; AVX-NEXT: vmovups %ymm12, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT: vunpcklps {{.*#+}} ymm0 = ymm9[0],ymm12[0],ymm9[1],ymm12[1],ymm9[4],ymm12[4],ymm9[5],ymm12[5]
-; AVX-NEXT: vmovaps %ymm13, %ymm8
-; AVX-NEXT: vmovups %ymm13, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT: vshufps {{.*#+}} ymm1 = ymm13[1,0],ymm5[1,0],ymm13[5,4],ymm5[5,4]
+; AVX-NEXT: vshufps {{.*#+}} ymm1 = ymm12[1,0],ymm9[1,0],ymm12[5,4],ymm9[5,4]
; AVX-NEXT: vshufps {{.*#+}} ymm0 = ymm1[2,0],ymm0[2,3],ymm1[6,4],ymm0[6,7]
-; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm13 # 32-byte Reload
-; AVX-NEXT: vunpcklps {{.*#+}} ymm1 = ymm13[0],ymm11[0],ymm13[1],ymm11[1],ymm13[4],ymm11[4],ymm13[5],ymm11[5]
-; AVX-NEXT: vextractf128 $1, %ymm1, %xmm1
-; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload
+; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm9 # 32-byte Reload
; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload
-; AVX-NEXT: vshufps {{.*#+}} ymm15 = ymm3[1,0],ymm5[1,0],ymm3[5,4],ymm5[5,4]
-; AVX-NEXT: vshufps {{.*#+}} ymm15 = ymm15[2,0,2,3,6,4,6,7]
+; AVX-NEXT: vunpcklps {{.*#+}} ymm1 = ymm3[0],ymm9[0],ymm3[1],ymm9[1],ymm3[4],ymm9[4],ymm3[5],ymm9[5]
+; AVX-NEXT: vextractf128 $1, %ymm1, %xmm1
+; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm11 # 32-byte Reload
+; AVX-NEXT: vshufps {{.*#+}} ymm15 = ymm11[1,0],ymm10[1,0],ymm11[5,4],ymm10[5,4]
; AVX-NEXT: vextractf128 $1, %ymm15, %xmm15
-; AVX-NEXT: vblendps {{.*#+}} xmm1 = xmm15[0,1],xmm1[2,3]
+; AVX-NEXT: vshufps {{.*#+}} xmm1 = xmm15[2,0],xmm1[2,3]
; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7]
; AVX-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT: vmovups (%rsp), %ymm11 # 32-byte Reload
-; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm7 # 32-byte Reload
-; AVX-NEXT: vunpcklps {{.*#+}} ymm0 = ymm7[0],ymm11[0],ymm7[1],ymm11[1],ymm7[4],ymm11[4],ymm7[5],ymm11[5]
-; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm9 # 32-byte Reload
-; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
-; AVX-NEXT: vshufps {{.*#+}} ymm1 = ymm1[1,0],ymm9[1,0],ymm1[5,4],ymm9[5,4]
+; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm10 # 32-byte Reload
+; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
+; AVX-NEXT: vunpcklps {{.*#+}} ymm0 = ymm2[0],ymm10[0],ymm2[1],ymm10[1],ymm2[4],ymm10[4],ymm2[5],ymm10[5]
+; AVX-NEXT: vmovups (%rsp), %ymm12 # 32-byte Reload
+; AVX-NEXT: vshufps {{.*#+}} ymm1 = ymm5[1,0],ymm12[1,0],ymm5[5,4],ymm12[5,4]
; AVX-NEXT: vshufps {{.*#+}} ymm0 = ymm1[2,0],ymm0[2,3],ymm1[6,4],ymm0[6,7]
-; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
-; AVX-NEXT: vunpcklps {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm1 # 32-byte Folded Reload
-; AVX-NEXT: # ymm1 = ymm1[0],mem[0],ymm1[1],mem[1],ymm1[4],mem[4],ymm1[5],mem[5]
+; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload
+; AVX-NEXT: vunpcklps {{[-0-9]+}}(%r{{[sb]}}p), %ymm5, %ymm1 # 32-byte Folded Reload
+; AVX-NEXT: # ymm1 = ymm5[0],mem[0],ymm5[1],mem[1],ymm5[4],mem[4],ymm5[5],mem[5]
; AVX-NEXT: vextractf128 $1, %ymm1, %xmm1
-; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
-; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
-; AVX-NEXT: vshufps {{.*#+}} ymm15 = ymm6[1,0],ymm4[1,0],ymm6[5,4],ymm4[5,4]
-; AVX-NEXT: vshufps {{.*#+}} ymm15 = ymm15[2,0,2,3,6,4,6,7]
+; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm8 # 32-byte Reload
+; AVX-NEXT: vshufps $17, {{[-0-9]+}}(%r{{[sb]}}p), %ymm8, %ymm15 # 32-byte Folded Reload
+; AVX-NEXT: # ymm15 = ymm8[1,0],mem[1,0],ymm8[5,4],mem[5,4]
; AVX-NEXT: vextractf128 $1, %ymm15, %xmm15
-; AVX-NEXT: vblendps {{.*#+}} xmm1 = xmm15[0,1],xmm1[2,3]
+; AVX-NEXT: vshufps {{.*#+}} xmm1 = xmm15[2,0],xmm1[2,3]
; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7]
; AVX-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX-NEXT: vunpcklps {{.*#+}} ymm0 = ymm0[0],ymm10[0],ymm0[1],ymm10[1],ymm0[4],ymm10[4],ymm0[5],ymm10[5]
-; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm10 # 32-byte Reload
-; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
-; AVX-NEXT: vshufps {{.*#+}} ymm1 = ymm6[1,0],ymm10[1,0],ymm6[5,4],ymm10[5,4]
-; AVX-NEXT: vshufps {{.*#+}} ymm2 = ymm1[2,0],ymm0[2,3],ymm1[6,4],ymm0[6,7]
+; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
+; AVX-NEXT: vunpcklps {{.*#+}} ymm0 = ymm13[0],ymm4[0],ymm13[1],ymm4[1],ymm13[4],ymm4[4],ymm13[5],ymm4[5]
+; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm13 # 32-byte Reload
+; AVX-NEXT: vshufps $17, {{[-0-9]+}}(%r{{[sb]}}p), %ymm13, %ymm1 # 32-byte Folded Reload
+; AVX-NEXT: # ymm1 = ymm13[1,0],mem[1,0],ymm13[5,4],mem[5,4]
+; AVX-NEXT: vshufps {{.*#+}} ymm0 = ymm1[2,0],ymm0[2,3],ymm1[6,4],ymm0[6,7]
; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
; AVX-NEXT: vunpcklps {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm1 # 32-byte Folded Reload
; AVX-NEXT: # ymm1 = ymm1[0],mem[0],ymm1[1],mem[1],ymm1[4],mem[4],ymm1[5],mem[5]
@@ -5330,10 +5308,9 @@ define void @load_i32_stride8_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm15 # 32-byte Reload
; AVX-NEXT: vshufps $17, {{[-0-9]+}}(%r{{[sb]}}p), %ymm15, %ymm15 # 32-byte Folded Reload
; AVX-NEXT: # ymm15 = ymm15[1,0],mem[1,0],ymm15[5,4],mem[5,4]
-; AVX-NEXT: vshufps {{.*#+}} ymm15 = ymm15[2,0,2,3,6,4,6,7]
; AVX-NEXT: vextractf128 $1, %ymm15, %xmm15
-; AVX-NEXT: vblendps {{.*#+}} xmm1 = xmm15[0,1],xmm1[2,3]
-; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm2[4,5,6,7]
+; AVX-NEXT: vshufps {{.*#+}} xmm1 = xmm15[2,0],xmm1[2,3]
+; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7]
; AVX-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX-NEXT: vunpcklps {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
@@ -5342,78 +5319,73 @@ define void @load_i32_stride8_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
; AVX-NEXT: vshufps $17, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm1 # 32-byte Folded Reload
; AVX-NEXT: # ymm1 = ymm1[1,0],mem[1,0],ymm1[5,4],mem[5,4]
; AVX-NEXT: vshufps {{.*#+}} ymm0 = ymm1[2,0],ymm0[2,3],ymm1[6,4],ymm0[6,7]
-; AVX-NEXT: vunpcklps {{[-0-9]+}}(%r{{[sb]}}p), %ymm14, %ymm1 # 32-byte Folded Reload
-; AVX-NEXT: # ymm1 = ymm14[0],mem[0],ymm14[1],mem[1],ymm14[4],mem[4],ymm14[5],mem[5]
+; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
+; AVX-NEXT: vunpcklps {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm1 # 32-byte Folded Reload
+; AVX-NEXT: # ymm1 = ymm1[0],mem[0],ymm1[1],mem[1],ymm1[4],mem[4],ymm1[5],mem[5]
; AVX-NEXT: vextractf128 $1, %ymm1, %xmm1
-; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm14 # 32-byte Reload
-; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
-; AVX-NEXT: vshufps {{.*#+}} ymm15 = ymm14[1,0],ymm2[1,0],ymm14[5,4],ymm2[5,4]
-; AVX-NEXT: vshufps {{.*#+}} ymm15 = ymm15[2,0,2,3,6,4,6,7]
+; AVX-NEXT: vshufps $17, {{[-0-9]+}}(%r{{[sb]}}p), %ymm14, %ymm15 # 32-byte Folded Reload
+; AVX-NEXT: # ymm15 = ymm14[1,0],mem[1,0],ymm14[5,4],mem[5,4]
; AVX-NEXT: vextractf128 $1, %ymm15, %xmm15
-; AVX-NEXT: vblendps {{.*#+}} xmm1 = xmm15[0,1],xmm1[2,3]
+; AVX-NEXT: vshufps {{.*#+}} xmm1 = xmm15[2,0],xmm1[2,3]
; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7]
; AVX-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT: vunpckhpd {{[-0-9]+}}(%r{{[sb]}}p), %ymm12, %ymm0 # 32-byte Folded Reload
-; AVX-NEXT: # ymm0 = ymm12[1],mem[1],ymm12[3],mem[3]
+; AVX-NEXT: vunpckhpd {{[-0-9]+}}(%r{{[sb]}}p), %ymm6, %ymm0 # 32-byte Folded Reload
+; AVX-NEXT: # ymm0 = ymm6[1],mem[1],ymm6[3],mem[3]
; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
-; AVX-NEXT: vunpckhps {{.*#+}} ymm1 = ymm1[2],ymm8[2],ymm1[3],ymm8[3],ymm1[6],ymm8[6],ymm1[7],ymm8[7]
+; AVX-NEXT: vunpckhps {{.*#+}} ymm1 = ymm1[2],ymm7[2],ymm1[3],ymm7[3],ymm1[6],ymm7[6],ymm1[7],ymm7[7]
; AVX-NEXT: vshufps {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,0],ymm1[4,5],ymm0[6,4]
-; AVX-NEXT: vunpckhps {{.*#+}} ymm1 = ymm5[2],ymm3[2],ymm5[3],ymm3[3],ymm5[6],ymm3[6],ymm5[7],ymm3[7]
+; AVX-NEXT: vunpckhpd {{.*#+}} ymm1 = ymm9[1],ymm3[1],ymm9[3],ymm3[3]
; AVX-NEXT: vextractf128 $1, %ymm1, %xmm1
-; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm12 # 32-byte Reload
-; AVX-NEXT: vunpckhpd {{.*#+}} ymm15 = ymm12[1],ymm13[1],ymm12[3],ymm13[3]
-; AVX-NEXT: vshufps {{.*#+}} ymm15 = ymm15[0,1,2,0,4,5,6,4]
+; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm9 # 32-byte Reload
+; AVX-NEXT: vunpckhps {{.*#+}} ymm15 = ymm9[2],ymm11[2],ymm9[3],ymm11[3],ymm9[6],ymm11[6],ymm9[7],ymm11[7]
; AVX-NEXT: vextractf128 $1, %ymm15, %xmm15
-; AVX-NEXT: vblendps {{.*#+}} xmm1 = xmm1[0,1],xmm15[2,3]
+; AVX-NEXT: vshufps {{.*#+}} xmm1 = xmm15[0,1],xmm1[2,0]
; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7]
; AVX-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT: vunpckhpd {{.*#+}} ymm0 = ymm11[1],ymm7[1],ymm11[3],ymm7[3]
-; AVX-NEXT: vunpckhps {{[-0-9]+}}(%r{{[sb]}}p), %ymm9, %ymm1 # 32-byte Folded Reload
-; AVX-NEXT: # ymm1 = ymm9[2],mem[2],ymm9[3],mem[3],ymm9[6],mem[6],ymm9[7],mem[7]
+; AVX-NEXT: vunpckhpd {{.*#+}} ymm0 = ymm10[1],ymm2[1],ymm10[3],ymm2[3]
+; AVX-NEXT: vunpckhps {{[-0-9]+}}(%r{{[sb]}}p), %ymm12, %ymm1 # 32-byte Folded Reload
+; AVX-NEXT: # ymm1 = ymm12[2],mem[2],ymm12[3],mem[3],ymm12[6],mem[6],ymm12[7],mem[7]
; AVX-NEXT: vshufps {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,0],ymm1[4,5],ymm0[6,4]
-; AVX-NEXT: vunpckhps {{[-0-9]+}}(%r{{[sb]}}p), %ymm4, %ymm1 # 32-byte Folded Reload
-; AVX-NEXT: # ymm1 = ymm4[2],mem[2],ymm4[3],mem[3],ymm4[6],mem[6],ymm4[7],mem[7]
+; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
+; AVX-NEXT: vunpckhpd {{.*#+}} ymm1 = ymm1[1],ymm5[1],ymm1[3],ymm5[3]
; AVX-NEXT: vextractf128 $1, %ymm1, %xmm1
-; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload
-; AVX-NEXT: vunpckhpd {{[-0-9]+}}(%r{{[sb]}}p), %ymm3, %ymm15 # 32-byte Folded Reload
-; AVX-NEXT: # ymm15 = ymm3[1],mem[1],ymm3[3],mem[3]
-; AVX-NEXT: vshufps {{.*#+}} ymm15 = ymm15[0,1,2,0,4,5,6,4]
+; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
+; AVX-NEXT: vunpckhps {{.*#+}} ymm15 = ymm2[2],ymm8[2],ymm2[3],ymm8[3],ymm2[6],ymm8[6],ymm2[7],ymm8[7]
; AVX-NEXT: vextractf128 $1, %ymm15, %xmm15
-; AVX-NEXT: vblendps {{.*#+}} xmm1 = xmm1[0,1],xmm15[2,3]
+; AVX-NEXT: vshufps {{.*#+}} xmm1 = xmm15[0,1],xmm1[2,0]
; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7]
; AVX-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm7 # 32-byte Reload
; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload
-; AVX-NEXT: vunpckhpd {{.*#+}} ymm0 = ymm5[1],ymm7[1],ymm5[3],ymm7[3]
-; AVX-NEXT: vunpckhps {{.*#+}} ymm1 = ymm10[2],ymm6[2],ymm10[3],ymm6[3],ymm10[6],ymm6[6],ymm10[7],ymm6[7]
+; AVX-NEXT: vunpckhpd {{.*#+}} ymm0 = ymm4[1],ymm5[1],ymm4[3],ymm5[3]
+; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
+; AVX-NEXT: vunpckhps {{.*#+}} ymm1 = ymm4[2],ymm13[2],ymm4[3],ymm13[3],ymm4[6],ymm13[6],ymm4[7],ymm13[7]
; AVX-NEXT: vshufps {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,0],ymm1[4,5],ymm0[6,4]
-; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm8 # 32-byte Reload
; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
-; AVX-NEXT: vunpckhps {{.*#+}} ymm1 = ymm6[2],ymm8[2],ymm6[3],ymm8[3],ymm6[6],ymm8[6],ymm6[7],ymm8[7]
+; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
+; AVX-NEXT: vunpckhpd {{.*#+}} ymm1 = ymm6[1],ymm2[1],ymm6[3],ymm2[3]
; AVX-NEXT: vextractf128 $1, %ymm1, %xmm1
-; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
+; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm7 # 32-byte Reload
; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload
-; AVX-NEXT: vunpckhpd {{.*#+}} ymm15 = ymm4[1],ymm3[1],ymm4[3],ymm3[3]
-; AVX-NEXT: vshufps {{.*#+}} ymm15 = ymm15[0,1,2,0,4,5,6,4]
+; AVX-NEXT: vunpckhps {{.*#+}} ymm15 = ymm7[2],ymm3[2],ymm7[3],ymm3[3],ymm7[6],ymm3[6],ymm7[7],ymm3[7]
; AVX-NEXT: vextractf128 $1, %ymm15, %xmm15
-; AVX-NEXT: vblendps {{.*#+}} xmm1 = xmm1[0,1],xmm15[2,3]
+; AVX-NEXT: vshufps {{.*#+}} xmm1 = xmm15[0,1],xmm1[2,0]
; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7]
; AVX-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm11 # 32-byte Reload
+; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm8 # 32-byte Reload
; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX-NEXT: vunpckhpd {{.*#+}} ymm0 = ymm0[1],ymm11[1],ymm0[3],ymm11[3]
-; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm13 # 32-byte Reload
+; AVX-NEXT: vunpckhpd {{.*#+}} ymm0 = ymm0[1],ymm8[1],ymm0[3],ymm8[3]
+; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm12 # 32-byte Reload
; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm10 # 32-byte Reload
-; AVX-NEXT: vunpckhps {{.*#+}} ymm1 = ymm13[2],ymm10[2],ymm13[3],ymm10[3],ymm13[6],ymm10[6],ymm13[7],ymm10[7]
+; AVX-NEXT: vunpckhps {{.*#+}} ymm1 = ymm12[2],ymm10[2],ymm12[3],ymm10[3],ymm12[6],ymm10[6],ymm12[7],ymm10[7]
; AVX-NEXT: vshufps {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,0],ymm1[4,5],ymm0[6,4]
-; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm9 # 32-byte Reload
-; AVX-NEXT: vunpckhpd {{[-0-9]+}}(%r{{[sb]}}p), %ymm9, %ymm1 # 32-byte Folded Reload
-; AVX-NEXT: # ymm1 = ymm9[1],mem[1],ymm9[3],mem[3]
-; AVX-NEXT: vshufps {{.*#+}} ymm1 = ymm1[0,1,2,0,4,5,6,4]
+; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm11 # 32-byte Reload
+; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
+; AVX-NEXT: vunpckhpd {{.*#+}} ymm1 = ymm1[1],ymm11[1],ymm1[3],ymm11[3]
; AVX-NEXT: vextractf128 $1, %ymm1, %xmm1
-; AVX-NEXT: vunpckhps {{.*#+}} ymm15 = ymm2[2],ymm14[2],ymm2[3],ymm14[3],ymm2[6],ymm14[6],ymm2[7],ymm14[7]
+; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm13 # 32-byte Reload
+; AVX-NEXT: vunpckhps {{.*#+}} ymm15 = ymm13[2],ymm14[2],ymm13[3],ymm14[3],ymm13[6],ymm14[6],ymm13[7],ymm14[7]
; AVX-NEXT: vextractf128 $1, %ymm15, %xmm15
-; AVX-NEXT: vblendps {{.*#+}} xmm1 = xmm15[0,1],xmm1[2,3]
+; AVX-NEXT: vshufps {{.*#+}} xmm1 = xmm15[0,1],xmm1[2,0]
; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7]
; AVX-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
@@ -5424,57 +5396,52 @@ define void @load_i32_stride8_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
; AVX-NEXT: # ymm1 = ymm1[3,0],mem[3,0],ymm1[7,4],mem[7,4]
; AVX-NEXT: vshufps {{.*#+}} ymm0 = ymm1[2,0],ymm0[2,3],ymm1[6,4],ymm0[6,7]
; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
-; AVX-NEXT: vunpckhps {{.*#+}} ymm1 = ymm1[2],ymm12[2],ymm1[3],ymm12[3],ymm1[6],ymm12[6],ymm1[7],ymm12[7]
-; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
-; AVX-NEXT: vshufps $51, {{[-0-9]+}}(%r{{[sb]}}p), %ymm2, %ymm15 # 32-byte Folded Reload
-; AVX-NEXT: # ymm15 = ymm2[3,0],mem[3,0],ymm2[7,4],mem[7,4]
+; AVX-NEXT: vunpckhps {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm1 # 32-byte Folded Reload
+; AVX-NEXT: # ymm1 = ymm1[2],mem[2],ymm1[3],mem[3],ymm1[6],mem[6],ymm1[7],mem[7]
+; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm15 # 32-byte Reload
+; AVX-NEXT: vshufps {{.*#+}} ymm15 = ymm15[3,0],ymm9[3,0],ymm15[7,4],ymm9[7,4]
; AVX-NEXT: vextractf128 $1, %ymm1, %xmm1
-; AVX-NEXT: vshufps {{.*#+}} ymm15 = ymm15[2,0,2,3,6,4,6,7]
; AVX-NEXT: vextractf128 $1, %ymm15, %xmm15
-; AVX-NEXT: vblendps {{.*#+}} xmm1 = xmm15[0,1],xmm1[2,3]
-; AVX-NEXT: vblendps {{.*#+}} ymm12 = ymm1[0,1,2,3],ymm0[4,5,6,7]
-; AVX-NEXT: vunpckhps {{.*#+}} ymm0 = ymm7[2],ymm5[2],ymm7[3],ymm5[3],ymm7[6],ymm5[6],ymm7[7],ymm5[7]
+; AVX-NEXT: vshufps {{.*#+}} xmm1 = xmm15[2,0],xmm1[2,3]
+; AVX-NEXT: vblendps {{.*#+}} ymm9 = ymm1[0,1,2,3],ymm0[4,5,6,7]
+; AVX-NEXT: vunpckhps {{[-0-9]+}}(%r{{[sb]}}p), %ymm5, %ymm0 # 32-byte Folded Reload
+; AVX-NEXT: # ymm0 = ymm5[2],mem[2],ymm5[3],mem[3],ymm5[6],mem[6],ymm5[7],mem[7]
; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
-; AVX-NEXT: vshufps $51, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm1 # 32-byte Folded Reload
-; AVX-NEXT: # ymm1 = ymm1[3,0],mem[3,0],ymm1[7,4],mem[7,4]
+; AVX-NEXT: vshufps {{.*#+}} ymm1 = ymm1[3,0],ymm4[3,0],ymm1[7,4],ymm4[7,4]
; AVX-NEXT: vshufps {{.*#+}} ymm0 = ymm1[2,0],ymm0[2,3],ymm1[6,4],ymm0[6,7]
-; AVX-NEXT: vunpckhps {{.*#+}} ymm1 = ymm3[2],ymm4[2],ymm3[3],ymm4[3],ymm3[6],ymm4[6],ymm3[7],ymm4[7]
-; AVX-NEXT: vshufps {{.*#+}} ymm2 = ymm8[3,0],ymm6[3,0],ymm8[7,4],ymm6[7,4]
+; AVX-NEXT: vunpckhps {{.*#+}} ymm1 = ymm2[2],ymm6[2],ymm2[3],ymm6[3],ymm2[6],ymm6[6],ymm2[7],ymm6[7]
+; AVX-NEXT: vshufps {{.*#+}} ymm2 = ymm3[3,0],ymm7[3,0],ymm3[7,4],ymm7[7,4]
; AVX-NEXT: vextractf128 $1, %ymm1, %xmm1
-; AVX-NEXT: vshufps {{.*#+}} ymm2 = ymm2[2,0,2,3,6,4,6,7]
; AVX-NEXT: vextractf128 $1, %ymm2, %xmm2
-; AVX-NEXT: vblendps {{.*#+}} xmm1 = xmm2[0,1],xmm1[2,3]
+; AVX-NEXT: vshufps {{.*#+}} xmm1 = xmm2[2,0],xmm1[2,3]
; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7]
; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
-; AVX-NEXT: vunpckhps (%rsp), %ymm1, %ymm1 # 32-byte Folded Reload
+; AVX-NEXT: vunpckhps {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm1 # 32-byte Folded Reload
; AVX-NEXT: # ymm1 = ymm1[2],mem[2],ymm1[3],mem[3],ymm1[6],mem[6],ymm1[7],mem[7]
; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
-; AVX-NEXT: vshufps $51, {{[-0-9]+}}(%r{{[sb]}}p), %ymm2, %ymm2 # 32-byte Folded Reload
+; AVX-NEXT: vshufps $51, (%rsp), %ymm2, %ymm2 # 32-byte Folded Reload
; AVX-NEXT: # ymm2 = ymm2[3,0],mem[3,0],ymm2[7,4],mem[7,4]
; AVX-NEXT: vshufps {{.*#+}} ymm1 = ymm2[2,0],ymm1[2,3],ymm2[6,4],ymm1[6,7]
; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
; AVX-NEXT: vunpckhps {{[-0-9]+}}(%r{{[sb]}}p), %ymm2, %ymm2 # 32-byte Folded Reload
; AVX-NEXT: # ymm2 = ymm2[2],mem[2],ymm2[3],mem[3],ymm2[6],mem[6],ymm2[7],mem[7]
; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload
-; AVX-NEXT: vshufps $51, {{[-0-9]+}}(%r{{[sb]}}p), %ymm3, %ymm4 # 32-byte Folded Reload
-; AVX-NEXT: # ymm4 = ymm3[3,0],mem[3,0],ymm3[7,4],mem[7,4]
+; AVX-NEXT: vshufps $51, {{[-0-9]+}}(%r{{[sb]}}p), %ymm3, %ymm3 # 32-byte Folded Reload
+; AVX-NEXT: # ymm3 = ymm3[3,0],mem[3,0],ymm3[7,4],mem[7,4]
; AVX-NEXT: vextractf128 $1, %ymm2, %xmm2
-; AVX-NEXT: vshufps {{.*#+}} ymm4 = ymm4[2,0,2,3,6,4,6,7]
-; AVX-NEXT: vextractf128 $1, %ymm4, %xmm4
-; AVX-NEXT: vblendps {{.*#+}} xmm2 = xmm4[0,1],xmm2[2,3]
+; AVX-NEXT: vextractf128 $1, %ymm3, %xmm3
+; AVX-NEXT: vshufps {{.*#+}} xmm2 = xmm3[2,0],xmm2[2,3]
; AVX-NEXT: vblendps {{.*#+}} ymm1 = ymm2[0,1,2,3],ymm1[4,5,6,7]
-; AVX-NEXT: vunpckhps {{[-0-9]+}}(%r{{[sb]}}p), %ymm11, %ymm2 # 32-byte Folded Reload
-; AVX-NEXT: # ymm2 = ymm11[2],mem[2],ymm11[3],mem[3],ymm11[6],mem[6],ymm11[7],mem[7]
-; AVX-NEXT: vshufps {{.*#+}} ymm4 = ymm10[3,0],ymm13[3,0],ymm10[7,4],ymm13[7,4]
-; AVX-NEXT: vshufps {{.*#+}} ymm2 = ymm4[2,0],ymm2[2,3],ymm4[6,4],ymm2[6,7]
-; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload
-; AVX-NEXT: vunpckhps {{.*#+}} ymm4 = ymm3[2],ymm9[2],ymm3[3],ymm9[3],ymm3[6],ymm9[6],ymm3[7],ymm9[7]
-; AVX-NEXT: vshufps $51, {{[-0-9]+}}(%r{{[sb]}}p), %ymm14, %ymm3 # 32-byte Folded Reload
-; AVX-NEXT: # ymm3 = ymm14[3,0],mem[3,0],ymm14[7,4],mem[7,4]
-; AVX-NEXT: vextractf128 $1, %ymm4, %xmm4
-; AVX-NEXT: vshufps {{.*#+}} ymm3 = ymm3[2,0,2,3,6,4,6,7]
+; AVX-NEXT: vunpckhps {{[-0-9]+}}(%r{{[sb]}}p), %ymm8, %ymm2 # 32-byte Folded Reload
+; AVX-NEXT: # ymm2 = ymm8[2],mem[2],ymm8[3],mem[3],ymm8[6],mem[6],ymm8[7],mem[7]
+; AVX-NEXT: vshufps {{.*#+}} ymm3 = ymm10[3,0],ymm12[3,0],ymm10[7,4],ymm12[7,4]
+; AVX-NEXT: vshufps {{.*#+}} ymm2 = ymm3[2,0],ymm2[2,3],ymm3[6,4],ymm2[6,7]
+; AVX-NEXT: vunpckhps {{[-0-9]+}}(%r{{[sb]}}p), %ymm11, %ymm3 # 32-byte Folded Reload
+; AVX-NEXT: # ymm3 = ymm11[2],mem[2],ymm11[3],mem[3],ymm11[6],mem[6],ymm11[7],mem[7]
+; AVX-NEXT: vshufps {{.*#+}} ymm4 = ymm14[3,0],ymm13[3,0],ymm14[7,4],ymm13[7,4]
; AVX-NEXT: vextractf128 $1, %ymm3, %xmm3
-; AVX-NEXT: vblendps {{.*#+}} xmm3 = xmm3[0,1],xmm4[2,3]
+; AVX-NEXT: vextractf128 $1, %ymm4, %xmm4
+; AVX-NEXT: vshufps {{.*#+}} xmm3 = xmm4[2,0],xmm3[2,3]
; AVX-NEXT: vblendps {{.*#+}} ymm2 = ymm3[0,1,2,3],ymm2[4,5,6,7]
; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload
; AVX-NEXT: vmovaps %ymm3, 64(%rsi)
@@ -5537,9 +5504,9 @@ define void @load_i32_stride8_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
; AVX-NEXT: movq {{[0-9]+}}(%rsp), %rax
; AVX-NEXT: vmovaps %ymm1, 96(%rax)
; AVX-NEXT: vmovaps %ymm0, 64(%rax)
-; AVX-NEXT: vmovaps %ymm12, 32(%rax)
+; AVX-NEXT: vmovaps %ymm9, 32(%rax)
; AVX-NEXT: vmovaps %ymm2, (%rax)
-; AVX-NEXT: addq $1800, %rsp # imm = 0x708
+; AVX-NEXT: addq $1768, %rsp # imm = 0x6E8
; AVX-NEXT: vzeroupper
; AVX-NEXT: retq
;
@@ -10431,7 +10398,7 @@ define void @load_i32_stride8_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
; AVX-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm1
; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],ymm1[6,7]
; AVX-NEXT: vmovaps 32(%rdi), %xmm1
-; AVX-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX-NEXT: vmovaps %xmm1, (%rsp) # 16-byte Spill
; AVX-NEXT: vmovaps (%rdi), %xmm4
; AVX-NEXT: vunpcklps {{.*#+}} xmm1 = xmm4[0],xmm1[0],xmm4[1],xmm1[1]
; AVX-NEXT: vmovaps %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
@@ -10474,7 +10441,7 @@ define void @load_i32_stride8_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
; AVX-NEXT: vmovaps 1184(%rdi), %xmm0
; AVX-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; AVX-NEXT: vmovaps 1152(%rdi), %xmm6
-; AVX-NEXT: vmovaps %xmm6, (%rsp) # 16-byte Spill
+; AVX-NEXT: vmovaps %xmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; AVX-NEXT: vunpcklps {{.*#+}} xmm6 = xmm6[0],xmm0[0],xmm6[1],xmm0[1]
; AVX-NEXT: vinsertf128 $1, %xmm6, %ymm0, %ymm7
; AVX-NEXT: vmovaps 1248(%rdi), %xmm0
@@ -10584,7 +10551,7 @@ define void @load_i32_stride8_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm5[4,5,6,7]
; AVX-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX-NEXT: vshufps {{.*#+}} xmm0 = xmm4[1,1,1,1]
-; AVX-NEXT: vblendps $2, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
+; AVX-NEXT: vblendps $2, (%rsp), %xmm0, %xmm0 # 16-byte Folded Reload
; AVX-NEXT: # xmm0 = xmm0[0],mem[1],xmm0[2,3]
; AVX-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0,1],xmm3[2,3]
; AVX-NEXT: vinsertf128 $1, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm3 # 16-byte Folded Reload
@@ -10616,7 +10583,7 @@ define void @load_i32_stride8_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
; AVX-NEXT: vblendps $12, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
; AVX-NEXT: # xmm0 = xmm0[0,1],mem[2,3]
; AVX-NEXT: vinsertf128 $1, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm1 # 16-byte Folded Reload
-; AVX-NEXT: vpermilps $85, (%rsp), %xmm2 # 16-byte Folded Reload
+; AVX-NEXT: vpermilps $85, {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Folded Reload
; AVX-NEXT: # xmm2 = mem[1,1,1,1]
; AVX-NEXT: vblendps $2, {{[-0-9]+}}(%r{{[sb]}}p), %xmm2, %xmm2 # 16-byte Folded Reload
; AVX-NEXT: # xmm2 = xmm2[0],mem[1],xmm2[2,3]
@@ -10719,9 +10686,9 @@ define void @load_i32_stride8_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7]
; AVX-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
-; AVX-NEXT: vunpckhps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm8 # 16-byte Folded Reload
+; AVX-NEXT: vunpckhps (%rsp), %xmm0, %xmm8 # 16-byte Folded Reload
; AVX-NEXT: # xmm8 = xmm0[2],mem[2],xmm0[3],mem[3]
-; AVX-NEXT: vmovaps %xmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX-NEXT: vmovaps %xmm8, (%rsp) # 16-byte Spill
; AVX-NEXT: vunpckhps {{[-0-9]+}}(%r{{[sb]}}p), %xmm10, %xmm1 # 16-byte Folded Reload
; AVX-NEXT: # xmm1 = xmm10[2],mem[2],xmm10[3],mem[3]
; AVX-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
@@ -10765,7 +10732,7 @@ define void @load_i32_stride8_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
; AVX-NEXT: vunpckhps {{[-0-9]+}}(%r{{[sb]}}p), %xmm8, %xmm0 # 16-byte Folded Reload
; AVX-NEXT: # xmm0 = xmm8[2],mem[2],xmm8[3],mem[3]
; AVX-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX-NEXT: vmovaps (%rsp), %xmm8 # 16-byte Reload
+; AVX-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm8 # 16-byte Reload
; AVX-NEXT: vunpckhps {{[-0-9]+}}(%r{{[sb]}}p), %xmm8, %xmm8 # 16-byte Folded Reload
; AVX-NEXT: # xmm8 = xmm8[2],mem[2],xmm8[3],mem[3]
; AVX-NEXT: vmovaps %xmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
@@ -10786,7 +10753,7 @@ define void @load_i32_stride8_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
; AVX-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm8 # 16-byte Reload
; AVX-NEXT: vunpckhps {{[-0-9]+}}(%r{{[sb]}}p), %xmm8, %xmm0 # 16-byte Folded Reload
; AVX-NEXT: # xmm0 = xmm8[2],mem[2],xmm8[3],mem[3]
-; AVX-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
+; AVX-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; AVX-NEXT: vunpckhps {{[-0-9]+}}(%r{{[sb]}}p), %xmm9, %xmm8 # 16-byte Folded Reload
; AVX-NEXT: # xmm8 = xmm9[2],mem[2],xmm9[3],mem[3]
; AVX-NEXT: vmovaps %xmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
@@ -10861,7 +10828,7 @@ define void @load_i32_stride8_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
; AVX-NEXT: vmovups %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX-NEXT: vunpckhps {{[-0-9]+}}(%r{{[sb]}}p), %xmm3, %xmm4 # 16-byte Folded Reload
; AVX-NEXT: # xmm4 = xmm3[2],mem[2],xmm3[3],mem[3]
-; AVX-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
+; AVX-NEXT: vmovaps (%rsp), %xmm2 # 16-byte Reload
; AVX-NEXT: vunpckhpd {{.*#+}} xmm2 = xmm2[1],xmm4[1]
; AVX-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload
; AVX-NEXT: vunpckhps {{[-0-9]+}}(%r{{[sb]}}p), %xmm3, %xmm4 # 16-byte Folded Reload
@@ -10901,7 +10868,7 @@ define void @load_i32_stride8_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
; AVX-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX-NEXT: vunpckhps {{.*#+}} xmm0 = xmm10[2],xmm12[2],xmm10[3],xmm12[3]
-; AVX-NEXT: vmovaps (%rsp), %xmm1 # 16-byte Reload
+; AVX-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
; AVX-NEXT: vunpckhpd {{.*#+}} xmm0 = xmm1[1],xmm0[1]
; AVX-NEXT: vunpckhps {{[-0-9]+}}(%r{{[sb]}}p), %xmm13, %xmm1 # 16-byte Folded Reload
; AVX-NEXT: # xmm1 = xmm13[2],mem[2],xmm13[3],mem[3]
@@ -10914,50 +10881,46 @@ define void @load_i32_stride8_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
; AVX-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX-NEXT: vmovaps 416(%rdi), %ymm2
; AVX-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT: vmovaps 384(%rdi), %ymm15
+; AVX-NEXT: vmovaps 384(%rdi), %ymm3
+; AVX-NEXT: vmovups %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX-NEXT: vmovaps 448(%rdi), %ymm4
; AVX-NEXT: vmovaps 480(%rdi), %ymm0
; AVX-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX-NEXT: vunpcklpd {{.*#+}} ymm0 = ymm0[0],ymm4[0],ymm0[2],ymm4[2]
-; AVX-NEXT: vunpcklps {{.*#+}} ymm1 = ymm15[0],ymm2[0],ymm15[1],ymm2[1],ymm15[4],ymm2[4],ymm15[5],ymm2[5]
+; AVX-NEXT: vunpcklps {{.*#+}} ymm1 = ymm3[0],ymm2[0],ymm3[1],ymm2[1],ymm3[4],ymm2[4],ymm3[5],ymm2[5]
; AVX-NEXT: vshufps {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,0],ymm1[4,5],ymm0[6,4]
-; AVX-NEXT: vmovaps 288(%rdi), %ymm6
-; AVX-NEXT: vmovaps 256(%rdi), %ymm1
-; AVX-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT: vunpcklps {{.*#+}} ymm1 = ymm1[0],ymm6[0],ymm1[1],ymm6[1],ymm1[4],ymm6[4],ymm1[5],ymm6[5]
-; AVX-NEXT: vmovups %ymm6, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX-NEXT: vmovaps 320(%rdi), %ymm6
+; AVX-NEXT: vmovaps 352(%rdi), %ymm13
+; AVX-NEXT: vunpcklpd {{.*#+}} ymm1 = ymm13[0],ymm6[0],ymm13[2],ymm6[2]
+; AVX-NEXT: vmovups %ymm13, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX-NEXT: vextractf128 $1, %ymm1, %xmm1
-; AVX-NEXT: vmovaps 320(%rdi), %ymm2
-; AVX-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT: vmovaps 352(%rdi), %ymm9
-; AVX-NEXT: vunpcklpd {{.*#+}} ymm2 = ymm9[0],ymm2[0],ymm9[2],ymm2[2]
-; AVX-NEXT: vshufps {{.*#+}} ymm2 = ymm2[0,1,2,0,4,5,6,4]
+; AVX-NEXT: vmovaps 288(%rdi), %ymm8
+; AVX-NEXT: vmovaps 256(%rdi), %ymm7
+; AVX-NEXT: vunpcklps {{.*#+}} ymm2 = ymm7[0],ymm8[0],ymm7[1],ymm8[1],ymm7[4],ymm8[4],ymm7[5],ymm8[5]
; AVX-NEXT: vextractf128 $1, %ymm2, %xmm2
-; AVX-NEXT: vblendps {{.*#+}} xmm1 = xmm1[0,1],xmm2[2,3]
+; AVX-NEXT: vshufps {{.*#+}} xmm1 = xmm2[0,1],xmm1[2,0]
; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7]
; AVX-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT: vmovaps 672(%rdi), %ymm12
-; AVX-NEXT: vmovaps 640(%rdi), %ymm8
-; AVX-NEXT: vmovaps 704(%rdi), %ymm13
-; AVX-NEXT: vmovaps 736(%rdi), %ymm0
-; AVX-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT: vunpcklpd {{.*#+}} ymm0 = ymm0[0],ymm13[0],ymm0[2],ymm13[2]
-; AVX-NEXT: vmovups %ymm13, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT: vunpcklps {{.*#+}} ymm1 = ymm8[0],ymm12[0],ymm8[1],ymm12[1],ymm8[4],ymm12[4],ymm8[5],ymm12[5]
-; AVX-NEXT: vshufps {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,0],ymm1[4,5],ymm0[6,4]
-; AVX-NEXT: vmovaps 544(%rdi), %ymm1
-; AVX-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT: vmovaps 512(%rdi), %ymm2
+; AVX-NEXT: vmovaps 672(%rdi), %ymm2
; AVX-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT: vunpcklps {{.*#+}} ymm1 = ymm2[0],ymm1[0],ymm2[1],ymm1[1],ymm2[4],ymm1[4],ymm2[5],ymm1[5]
+; AVX-NEXT: vmovaps 640(%rdi), %ymm9
+; AVX-NEXT: vmovaps 704(%rdi), %ymm12
+; AVX-NEXT: vmovaps 736(%rdi), %ymm10
+; AVX-NEXT: vunpcklpd {{.*#+}} ymm0 = ymm10[0],ymm12[0],ymm10[2],ymm12[2]
+; AVX-NEXT: vunpcklps {{.*#+}} ymm1 = ymm9[0],ymm2[0],ymm9[1],ymm2[1],ymm9[4],ymm2[4],ymm9[5],ymm2[5]
+; AVX-NEXT: vshufps {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,0],ymm1[4,5],ymm0[6,4]
+; AVX-NEXT: vmovaps 576(%rdi), %ymm14
+; AVX-NEXT: vmovaps 608(%rdi), %ymm11
+; AVX-NEXT: vunpcklpd {{.*#+}} ymm1 = ymm11[0],ymm14[0],ymm11[2],ymm14[2]
+; AVX-NEXT: vmovups %ymm14, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX-NEXT: vextractf128 $1, %ymm1, %xmm1
-; AVX-NEXT: vmovaps 576(%rdi), %ymm11
-; AVX-NEXT: vmovaps 608(%rdi), %ymm7
-; AVX-NEXT: vunpcklpd {{.*#+}} ymm2 = ymm7[0],ymm11[0],ymm7[2],ymm11[2]
-; AVX-NEXT: vmovups %ymm7, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT: vshufps {{.*#+}} ymm2 = ymm2[0,1,2,0,4,5,6,4]
+; AVX-NEXT: vmovaps 544(%rdi), %ymm2
+; AVX-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX-NEXT: vmovaps 512(%rdi), %ymm3
+; AVX-NEXT: vmovups %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX-NEXT: vunpcklps {{.*#+}} ymm2 = ymm3[0],ymm2[0],ymm3[1],ymm2[1],ymm3[4],ymm2[4],ymm3[5],ymm2[5]
; AVX-NEXT: vextractf128 $1, %ymm2, %xmm2
-; AVX-NEXT: vblendps {{.*#+}} xmm1 = xmm1[0,1],xmm2[2,3]
+; AVX-NEXT: vshufps {{.*#+}} xmm1 = xmm2[0,1],xmm1[2,0]
; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7]
; AVX-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX-NEXT: vmovaps 928(%rdi), %ymm2
@@ -10971,20 +10934,19 @@ define void @load_i32_stride8_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
; AVX-NEXT: vunpcklpd {{.*#+}} ymm0 = ymm0[0],ymm1[0],ymm0[2],ymm1[2]
; AVX-NEXT: vunpcklps {{.*#+}} ymm1 = ymm3[0],ymm2[0],ymm3[1],ymm2[1],ymm3[4],ymm2[4],ymm3[5],ymm2[5]
; AVX-NEXT: vshufps {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,0],ymm1[4,5],ymm0[6,4]
-; AVX-NEXT: vmovaps 800(%rdi), %ymm2
+; AVX-NEXT: vmovaps 832(%rdi), %ymm2
; AVX-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT: vmovaps 768(%rdi), %ymm1
+; AVX-NEXT: vmovaps 864(%rdi), %ymm1
; AVX-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT: vunpcklps {{.*#+}} ymm1 = ymm1[0],ymm2[0],ymm1[1],ymm2[1],ymm1[4],ymm2[4],ymm1[5],ymm2[5]
+; AVX-NEXT: vunpcklpd {{.*#+}} ymm1 = ymm1[0],ymm2[0],ymm1[2],ymm2[2]
; AVX-NEXT: vextractf128 $1, %ymm1, %xmm1
-; AVX-NEXT: vmovaps 832(%rdi), %ymm3
+; AVX-NEXT: vmovaps 800(%rdi), %ymm3
; AVX-NEXT: vmovups %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT: vmovaps 864(%rdi), %ymm2
+; AVX-NEXT: vmovaps 768(%rdi), %ymm2
; AVX-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT: vunpcklpd {{.*#+}} ymm2 = ymm2[0],ymm3[0],ymm2[2],ymm3[2]
-; AVX-NEXT: vshufps {{.*#+}} ymm2 = ymm2[0,1,2,0,4,5,6,4]
+; AVX-NEXT: vunpcklps {{.*#+}} ymm2 = ymm2[0],ymm3[0],ymm2[1],ymm3[1],ymm2[4],ymm3[4],ymm2[5],ymm3[5]
; AVX-NEXT: vextractf128 $1, %ymm2, %xmm2
-; AVX-NEXT: vblendps {{.*#+}} xmm1 = xmm1[0,1],xmm2[2,3]
+; AVX-NEXT: vshufps {{.*#+}} xmm1 = xmm2[0,1],xmm1[2,0]
; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7]
; AVX-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX-NEXT: vmovaps 1184(%rdi), %ymm2
@@ -10998,20 +10960,19 @@ define void @load_i32_stride8_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
; AVX-NEXT: vunpcklpd {{.*#+}} ymm0 = ymm3[0],ymm0[0],ymm3[2],ymm0[2]
; AVX-NEXT: vunpcklps {{.*#+}} ymm1 = ymm1[0],ymm2[0],ymm1[1],ymm2[1],ymm1[4],ymm2[4],ymm1[5],ymm2[5]
; AVX-NEXT: vshufps {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,0],ymm1[4,5],ymm0[6,4]
-; AVX-NEXT: vmovaps 1056(%rdi), %ymm1
-; AVX-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT: vmovaps 1024(%rdi), %ymm2
+; AVX-NEXT: vmovaps 1088(%rdi), %ymm2
; AVX-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT: vunpcklps {{.*#+}} ymm1 = ymm2[0],ymm1[0],ymm2[1],ymm1[1],ymm2[4],ymm1[4],ymm2[5],ymm1[5]
+; AVX-NEXT: vmovaps 1120(%rdi), %ymm1
+; AVX-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX-NEXT: vunpcklpd {{.*#+}} ymm1 = ymm1[0],ymm2[0],ymm1[2],ymm2[2]
; AVX-NEXT: vextractf128 $1, %ymm1, %xmm1
-; AVX-NEXT: vmovaps 1088(%rdi), %ymm3
+; AVX-NEXT: vmovaps 1056(%rdi), %ymm3
; AVX-NEXT: vmovups %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT: vmovaps 1120(%rdi), %ymm2
+; AVX-NEXT: vmovaps 1024(%rdi), %ymm2
; AVX-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT: vunpcklpd {{.*#+}} ymm2 = ymm2[0],ymm3[0],ymm2[2],ymm3[2]
-; AVX-NEXT: vshufps {{.*#+}} ymm2 = ymm2[0,1,2,0,4,5,6,4]
+; AVX-NEXT: vunpcklps {{.*#+}} ymm2 = ymm2[0],ymm3[0],ymm2[1],ymm3[1],ymm2[4],ymm3[4],ymm2[5],ymm3[5]
; AVX-NEXT: vextractf128 $1, %ymm2, %xmm2
-; AVX-NEXT: vblendps {{.*#+}} xmm1 = xmm1[0,1],xmm2[2,3]
+; AVX-NEXT: vshufps {{.*#+}} xmm1 = xmm2[0,1],xmm1[2,0]
; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7]
; AVX-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX-NEXT: vmovaps 1440(%rdi), %ymm2
@@ -11025,20 +10986,19 @@ define void @load_i32_stride8_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
; AVX-NEXT: vunpcklpd {{.*#+}} ymm0 = ymm0[0],ymm1[0],ymm0[2],ymm1[2]
; AVX-NEXT: vunpcklps {{.*#+}} ymm1 = ymm3[0],ymm2[0],ymm3[1],ymm2[1],ymm3[4],ymm2[4],ymm3[5],ymm2[5]
; AVX-NEXT: vshufps {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,0],ymm1[4,5],ymm0[6,4]
-; AVX-NEXT: vmovaps 1312(%rdi), %ymm2
+; AVX-NEXT: vmovaps 1344(%rdi), %ymm2
; AVX-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT: vmovaps 1280(%rdi), %ymm1
+; AVX-NEXT: vmovaps 1376(%rdi), %ymm1
; AVX-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT: vunpcklps {{.*#+}} ymm1 = ymm1[0],ymm2[0],ymm1[1],ymm2[1],ymm1[4],ymm2[4],ymm1[5],ymm2[5]
+; AVX-NEXT: vunpcklpd {{.*#+}} ymm1 = ymm1[0],ymm2[0],ymm1[2],ymm2[2]
; AVX-NEXT: vextractf128 $1, %ymm1, %xmm1
-; AVX-NEXT: vmovaps 1344(%rdi), %ymm3
+; AVX-NEXT: vmovaps 1312(%rdi), %ymm3
; AVX-NEXT: vmovups %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT: vmovaps 1376(%rdi), %ymm2
+; AVX-NEXT: vmovaps 1280(%rdi), %ymm2
; AVX-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT: vunpcklpd {{.*#+}} ymm2 = ymm2[0],ymm3[0],ymm2[2],ymm3[2]
-; AVX-NEXT: vshufps {{.*#+}} ymm2 = ymm2[0,1,2,0,4,5,6,4]
+; AVX-NEXT: vunpcklps {{.*#+}} ymm2 = ymm2[0],ymm3[0],ymm2[1],ymm3[1],ymm2[4],ymm3[4],ymm2[5],ymm3[5]
; AVX-NEXT: vextractf128 $1, %ymm2, %xmm2
-; AVX-NEXT: vblendps {{.*#+}} xmm1 = xmm1[0,1],xmm2[2,3]
+; AVX-NEXT: vshufps {{.*#+}} xmm1 = xmm2[0,1],xmm1[2,0]
; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7]
; AVX-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX-NEXT: vmovaps 1696(%rdi), %ymm2
@@ -11052,20 +11012,19 @@ define void @load_i32_stride8_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
; AVX-NEXT: vunpcklpd {{.*#+}} ymm0 = ymm0[0],ymm1[0],ymm0[2],ymm1[2]
; AVX-NEXT: vunpcklps {{.*#+}} ymm1 = ymm3[0],ymm2[0],ymm3[1],ymm2[1],ymm3[4],ymm2[4],ymm3[5],ymm2[5]
; AVX-NEXT: vshufps {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,0],ymm1[4,5],ymm0[6,4]
-; AVX-NEXT: vmovaps 1568(%rdi), %ymm2
+; AVX-NEXT: vmovaps 1600(%rdi), %ymm2
; AVX-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT: vmovaps 1536(%rdi), %ymm1
+; AVX-NEXT: vmovaps 1632(%rdi), %ymm1
; AVX-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT: vunpcklps {{.*#+}} ymm1 = ymm1[0],ymm2[0],ymm1[1],ymm2[1],ymm1[4],ymm2[4],ymm1[5],ymm2[5]
+; AVX-NEXT: vunpcklpd {{.*#+}} ymm1 = ymm1[0],ymm2[0],ymm1[2],ymm2[2]
; AVX-NEXT: vextractf128 $1, %ymm1, %xmm1
-; AVX-NEXT: vmovaps 1600(%rdi), %ymm3
+; AVX-NEXT: vmovaps 1568(%rdi), %ymm3
; AVX-NEXT: vmovups %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT: vmovaps 1632(%rdi), %ymm2
+; AVX-NEXT: vmovaps 1536(%rdi), %ymm2
; AVX-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT: vunpcklpd {{.*#+}} ymm2 = ymm2[0],ymm3[0],ymm2[2],ymm3[2]
-; AVX-NEXT: vshufps {{.*#+}} ymm2 = ymm2[0,1,2,0,4,5,6,4]
+; AVX-NEXT: vunpcklps {{.*#+}} ymm2 = ymm2[0],ymm3[0],ymm2[1],ymm3[1],ymm2[4],ymm3[4],ymm2[5],ymm3[5]
; AVX-NEXT: vextractf128 $1, %ymm2, %xmm2
-; AVX-NEXT: vblendps {{.*#+}} xmm1 = xmm1[0,1],xmm2[2,3]
+; AVX-NEXT: vshufps {{.*#+}} xmm1 = xmm2[0,1],xmm1[2,0]
; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7]
; AVX-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX-NEXT: vmovaps 1952(%rdi), %ymm2
@@ -11079,20 +11038,19 @@ define void @load_i32_stride8_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
; AVX-NEXT: vunpcklpd {{.*#+}} ymm0 = ymm0[0],ymm1[0],ymm0[2],ymm1[2]
; AVX-NEXT: vunpcklps {{.*#+}} ymm1 = ymm3[0],ymm2[0],ymm3[1],ymm2[1],ymm3[4],ymm2[4],ymm3[5],ymm2[5]
; AVX-NEXT: vshufps {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,0],ymm1[4,5],ymm0[6,4]
-; AVX-NEXT: vmovaps 1824(%rdi), %ymm2
+; AVX-NEXT: vmovaps 1856(%rdi), %ymm2
; AVX-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT: vmovaps 1792(%rdi), %ymm1
+; AVX-NEXT: vmovaps 1888(%rdi), %ymm1
; AVX-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT: vunpcklps {{.*#+}} ymm1 = ymm1[0],ymm2[0],ymm1[1],ymm2[1],ymm1[4],ymm2[4],ymm1[5],ymm2[5]
+; AVX-NEXT: vunpcklpd {{.*#+}} ymm1 = ymm1[0],ymm2[0],ymm1[2],ymm2[2]
; AVX-NEXT: vextractf128 $1, %ymm1, %xmm1
-; AVX-NEXT: vmovaps 1856(%rdi), %ymm3
+; AVX-NEXT: vmovaps 1824(%rdi), %ymm3
; AVX-NEXT: vmovups %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT: vmovaps 1888(%rdi), %ymm2
-; AVX-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT: vunpcklpd {{.*#+}} ymm2 = ymm2[0],ymm3[0],ymm2[2],ymm3[2]
-; AVX-NEXT: vshufps {{.*#+}} ymm2 = ymm2[0,1,2,0,4,5,6,4]
+; AVX-NEXT: vmovaps 1792(%rdi), %ymm2
+; AVX-NEXT: vmovups %ymm2, (%rsp) # 32-byte Spill
+; AVX-NEXT: vunpcklps {{.*#+}} ymm2 = ymm2[0],ymm3[0],ymm2[1],ymm3[1],ymm2[4],ymm3[4],ymm2[5],ymm3[5]
; AVX-NEXT: vextractf128 $1, %ymm2, %xmm2
-; AVX-NEXT: vblendps {{.*#+}} xmm1 = xmm1[0,1],xmm2[2,3]
+; AVX-NEXT: vshufps {{.*#+}} xmm1 = xmm2[0,1],xmm1[2,0]
; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7]
; AVX-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX-NEXT: vmovaps 160(%rdi), %ymm2
@@ -11102,449 +11060,423 @@ define void @load_i32_stride8_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
; AVX-NEXT: vmovaps 192(%rdi), %ymm1
; AVX-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX-NEXT: vmovaps 224(%rdi), %ymm0
-; AVX-NEXT: vmovups %ymm0, (%rsp) # 32-byte Spill
+; AVX-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX-NEXT: vunpcklpd {{.*#+}} ymm0 = ymm0[0],ymm1[0],ymm0[2],ymm1[2]
; AVX-NEXT: vunpcklps {{.*#+}} ymm1 = ymm3[0],ymm2[0],ymm3[1],ymm2[1],ymm3[4],ymm2[4],ymm3[5],ymm2[5]
-; AVX-NEXT: vshufps {{.*#+}} ymm10 = ymm1[0,1],ymm0[2,0],ymm1[4,5],ymm0[6,4]
-; AVX-NEXT: vmovaps 64(%rdi), %ymm1
-; AVX-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT: vmovaps 96(%rdi), %ymm0
+; AVX-NEXT: vshufps {{.*#+}} ymm2 = ymm1[0,1],ymm0[2,0],ymm1[4,5],ymm0[6,4]
+; AVX-NEXT: vmovaps 64(%rdi), %ymm0
; AVX-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT: vunpcklpd {{.*#+}} ymm0 = ymm0[0],ymm1[0],ymm0[2],ymm1[2]
-; AVX-NEXT: vshufps {{.*#+}} ymm0 = ymm0[0,1,2,0,4,5,6,4]
-; AVX-NEXT: vextractf128 $1, %ymm0, %xmm0
+; AVX-NEXT: vmovaps 96(%rdi), %ymm1
+; AVX-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX-NEXT: vunpcklpd {{.*#+}} ymm1 = ymm1[0],ymm0[0],ymm1[2],ymm0[2]
+; AVX-NEXT: vextractf128 $1, %ymm1, %xmm0
; AVX-NEXT: vmovaps (%rdi), %ymm1
; AVX-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT: vmovaps 32(%rdi), %ymm2
-; AVX-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT: vunpcklps {{.*#+}} ymm14 = ymm1[0],ymm2[0],ymm1[1],ymm2[1],ymm1[4],ymm2[4],ymm1[5],ymm2[5]
-; AVX-NEXT: vextractf128 $1, %ymm14, %xmm14
-; AVX-NEXT: vblendps {{.*#+}} xmm0 = xmm14[0,1],xmm0[2,3]
-; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm10[4,5,6,7]
+; AVX-NEXT: vmovaps 32(%rdi), %ymm3
+; AVX-NEXT: vmovups %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX-NEXT: vunpcklps {{.*#+}} ymm15 = ymm1[0],ymm3[0],ymm1[1],ymm3[1],ymm1[4],ymm3[4],ymm1[5],ymm3[5]
+; AVX-NEXT: vextractf128 $1, %ymm15, %xmm15
+; AVX-NEXT: vshufps {{.*#+}} xmm0 = xmm15[0,1],xmm0[2,0]
+; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm2[4,5,6,7]
; AVX-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload
; AVX-NEXT: vmovaps %ymm4, %ymm5
; AVX-NEXT: vmovups %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX-NEXT: vunpcklps {{.*#+}} ymm0 = ymm4[0],ymm3[0],ymm4[1],ymm3[1],ymm4[4],ymm3[4],ymm4[5],ymm3[5]
-; AVX-NEXT: vmovaps %ymm15, %ymm4
-; AVX-NEXT: vmovups %ymm15, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
-; AVX-NEXT: vshufps {{.*#+}} ymm10 = ymm1[1,0],ymm15[1,0],ymm1[5,4],ymm15[5,4]
-; AVX-NEXT: vshufps {{.*#+}} ymm0 = ymm10[2,0],ymm0[2,3],ymm10[6,4],ymm0[6,7]
-; AVX-NEXT: vmovaps %ymm9, %ymm15
-; AVX-NEXT: vmovups %ymm9, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm9 # 32-byte Reload
-; AVX-NEXT: vunpcklps {{.*#+}} ymm10 = ymm9[0],ymm15[0],ymm9[1],ymm15[1],ymm9[4],ymm15[4],ymm9[5],ymm15[5]
-; AVX-NEXT: vextractf128 $1, %ymm10, %xmm10
-; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
-; AVX-NEXT: vshufps {{.*#+}} ymm14 = ymm6[1,0],ymm2[1,0],ymm6[5,4],ymm2[5,4]
-; AVX-NEXT: vshufps {{.*#+}} ymm14 = ymm14[2,0,2,3,6,4,6,7]
-; AVX-NEXT: vextractf128 $1, %ymm14, %xmm14
-; AVX-NEXT: vblendps {{.*#+}} xmm10 = xmm14[0,1],xmm10[2,3]
-; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm10[0,1,2,3],ymm0[4,5,6,7]
-; AVX-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
-; AVX-NEXT: vunpcklps {{.*#+}} ymm0 = ymm13[0],ymm6[0],ymm13[1],ymm6[1],ymm13[4],ymm6[4],ymm13[5],ymm6[5]
+; AVX-NEXT: vshufps {{.*#+}} ymm2 = ymm1[1,0],ymm4[1,0],ymm1[5,4],ymm4[5,4]
+; AVX-NEXT: vshufps {{.*#+}} ymm0 = ymm2[2,0],ymm0[2,3],ymm2[6,4],ymm0[6,7]
+; AVX-NEXT: vmovups %ymm6, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX-NEXT: vunpcklps {{.*#+}} ymm2 = ymm6[0],ymm13[0],ymm6[1],ymm13[1],ymm6[4],ymm13[4],ymm6[5],ymm13[5]
+; AVX-NEXT: vextractf128 $1, %ymm2, %xmm2
+; AVX-NEXT: vmovups %ymm7, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX-NEXT: vmovups %ymm8, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX-NEXT: vshufps {{.*#+}} ymm15 = ymm8[1,0],ymm7[1,0],ymm8[5,4],ymm7[5,4]
+; AVX-NEXT: vextractf128 $1, %ymm15, %xmm15
+; AVX-NEXT: vshufps {{.*#+}} xmm2 = xmm15[2,0],xmm2[2,3]
+; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm2[0,1,2,3],ymm0[4,5,6,7]
+; AVX-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX-NEXT: vmovups %ymm10, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX-NEXT: vmovaps %ymm12, %ymm13
; AVX-NEXT: vmovups %ymm12, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT: vshufps {{.*#+}} ymm10 = ymm12[1,0],ymm8[1,0],ymm12[5,4],ymm8[5,4]
-; AVX-NEXT: vshufps {{.*#+}} ymm0 = ymm10[2,0],ymm0[2,3],ymm10[6,4],ymm0[6,7]
-; AVX-NEXT: vmovaps %ymm11, %ymm13
+; AVX-NEXT: vunpcklps {{.*#+}} ymm0 = ymm12[0],ymm10[0],ymm12[1],ymm10[1],ymm12[4],ymm10[4],ymm12[5],ymm10[5]
+; AVX-NEXT: vmovups %ymm9, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm12 # 32-byte Reload
+; AVX-NEXT: vshufps {{.*#+}} ymm2 = ymm12[1,0],ymm9[1,0],ymm12[5,4],ymm9[5,4]
+; AVX-NEXT: vshufps {{.*#+}} ymm0 = ymm2[2,0],ymm0[2,3],ymm2[6,4],ymm0[6,7]
; AVX-NEXT: vmovups %ymm11, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT: vunpcklps {{.*#+}} ymm10 = ymm11[0],ymm7[0],ymm11[1],ymm7[1],ymm11[4],ymm7[4],ymm11[5],ymm7[5]
-; AVX-NEXT: vextractf128 $1, %ymm10, %xmm10
-; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm11 # 32-byte Reload
-; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm7 # 32-byte Reload
-; AVX-NEXT: vshufps {{.*#+}} ymm14 = ymm7[1,0],ymm11[1,0],ymm7[5,4],ymm11[5,4]
-; AVX-NEXT: vshufps {{.*#+}} ymm14 = ymm14[2,0,2,3,6,4,6,7]
-; AVX-NEXT: vextractf128 $1, %ymm14, %xmm14
-; AVX-NEXT: vblendps {{.*#+}} xmm10 = xmm14[0,1],xmm10[2,3]
-; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm10[0,1,2,3],ymm0[4,5,6,7]
+; AVX-NEXT: vunpcklps {{.*#+}} ymm2 = ymm14[0],ymm11[0],ymm14[1],ymm11[1],ymm14[4],ymm11[4],ymm14[5],ymm11[5]
+; AVX-NEXT: vextractf128 $1, %ymm2, %xmm2
+; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm14 # 32-byte Reload
+; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm15 # 32-byte Reload
+; AVX-NEXT: vshufps {{.*#+}} ymm15 = ymm15[1,0],ymm14[1,0],ymm15[5,4],ymm14[5,4]
+; AVX-NEXT: vextractf128 $1, %ymm15, %xmm15
+; AVX-NEXT: vshufps {{.*#+}} xmm2 = xmm15[2,0],xmm2[2,3]
+; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm2[0,1,2,3],ymm0[4,5,6,7]
; AVX-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX-NEXT: vunpcklps {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
; AVX-NEXT: # ymm0 = ymm0[0],mem[0],ymm0[1],mem[1],ymm0[4],mem[4],ymm0[5],mem[5]
-; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm7 # 32-byte Reload
-; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm10 # 32-byte Reload
-; AVX-NEXT: vshufps {{.*#+}} ymm10 = ymm10[1,0],ymm7[1,0],ymm10[5,4],ymm7[5,4]
-; AVX-NEXT: vshufps {{.*#+}} ymm0 = ymm10[2,0],ymm0[2,3],ymm10[6,4],ymm0[6,7]
-; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm10 # 32-byte Reload
-; AVX-NEXT: vunpcklps {{[-0-9]+}}(%r{{[sb]}}p), %ymm10, %ymm10 # 32-byte Folded Reload
-; AVX-NEXT: # ymm10 = ymm10[0],mem[0],ymm10[1],mem[1],ymm10[4],mem[4],ymm10[5],mem[5]
-; AVX-NEXT: vextractf128 $1, %ymm10, %xmm10
-; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm14 # 32-byte Reload
-; AVX-NEXT: vshufps $17, {{[-0-9]+}}(%r{{[sb]}}p), %ymm14, %ymm14 # 32-byte Folded Reload
-; AVX-NEXT: # ymm14 = ymm14[1,0],mem[1,0],ymm14[5,4],mem[5,4]
-; AVX-NEXT: vshufps {{.*#+}} ymm14 = ymm14[2,0,2,3,6,4,6,7]
-; AVX-NEXT: vextractf128 $1, %ymm14, %xmm14
-; AVX-NEXT: vblendps {{.*#+}} xmm10 = xmm14[0,1],xmm10[2,3]
-; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm10[0,1,2,3],ymm0[4,5,6,7]
+; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
+; AVX-NEXT: vshufps $17, {{[-0-9]+}}(%r{{[sb]}}p), %ymm2, %ymm2 # 32-byte Folded Reload
+; AVX-NEXT: # ymm2 = ymm2[1,0],mem[1,0],ymm2[5,4],mem[5,4]
+; AVX-NEXT: vshufps {{.*#+}} ymm0 = ymm2[2,0],ymm0[2,3],ymm2[6,4],ymm0[6,7]
+; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
+; AVX-NEXT: vunpcklps {{[-0-9]+}}(%r{{[sb]}}p), %ymm2, %ymm2 # 32-byte Folded Reload
+; AVX-NEXT: # ymm2 = ymm2[0],mem[0],ymm2[1],mem[1],ymm2[4],mem[4],ymm2[5],mem[5]
+; AVX-NEXT: vextractf128 $1, %ymm2, %xmm2
+; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm15 # 32-byte Reload
+; AVX-NEXT: vshufps $17, {{[-0-9]+}}(%r{{[sb]}}p), %ymm15, %ymm15 # 32-byte Folded Reload
+; AVX-NEXT: # ymm15 = ymm15[1,0],mem[1,0],ymm15[5,4],mem[5,4]
+; AVX-NEXT: vextractf128 $1, %ymm15, %xmm15
+; AVX-NEXT: vshufps {{.*#+}} xmm2 = xmm15[2,0],xmm2[2,3]
+; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm2[0,1,2,3],ymm0[4,5,6,7]
; AVX-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX-NEXT: vunpcklps {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
; AVX-NEXT: # ymm0 = ymm0[0],mem[0],ymm0[1],mem[1],ymm0[4],mem[4],ymm0[5],mem[5]
-; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm10 # 32-byte Reload
-; AVX-NEXT: vshufps $17, {{[-0-9]+}}(%r{{[sb]}}p), %ymm10, %ymm10 # 32-byte Folded Reload
-; AVX-NEXT: # ymm10 = ymm10[1,0],mem[1,0],ymm10[5,4],mem[5,4]
-; AVX-NEXT: vshufps {{.*#+}} ymm0 = ymm10[2,0],ymm0[2,3],ymm10[6,4],ymm0[6,7]
-; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm10 # 32-byte Reload
-; AVX-NEXT: vunpcklps {{[-0-9]+}}(%r{{[sb]}}p), %ymm10, %ymm10 # 32-byte Folded Reload
-; AVX-NEXT: # ymm10 = ymm10[0],mem[0],ymm10[1],mem[1],ymm10[4],mem[4],ymm10[5],mem[5]
-; AVX-NEXT: vextractf128 $1, %ymm10, %xmm10
-; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm14 # 32-byte Reload
-; AVX-NEXT: vshufps $17, {{[-0-9]+}}(%r{{[sb]}}p), %ymm14, %ymm14 # 32-byte Folded Reload
-; AVX-NEXT: # ymm14 = ymm14[1,0],mem[1,0],ymm14[5,4],mem[5,4]
-; AVX-NEXT: vshufps {{.*#+}} ymm14 = ymm14[2,0,2,3,6,4,6,7]
-; AVX-NEXT: vextractf128 $1, %ymm14, %xmm14
-; AVX-NEXT: vblendps {{.*#+}} xmm10 = xmm14[0,1],xmm10[2,3]
-; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm10[0,1,2,3],ymm0[4,5,6,7]
+; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
+; AVX-NEXT: vshufps $17, {{[-0-9]+}}(%r{{[sb]}}p), %ymm2, %ymm2 # 32-byte Folded Reload
+; AVX-NEXT: # ymm2 = ymm2[1,0],mem[1,0],ymm2[5,4],mem[5,4]
+; AVX-NEXT: vshufps {{.*#+}} ymm0 = ymm2[2,0],ymm0[2,3],ymm2[6,4],ymm0[6,7]
+; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
+; AVX-NEXT: vunpcklps {{[-0-9]+}}(%r{{[sb]}}p), %ymm2, %ymm2 # 32-byte Folded Reload
+; AVX-NEXT: # ymm2 = ymm2[0],mem[0],ymm2[1],mem[1],ymm2[4],mem[4],ymm2[5],mem[5]
+; AVX-NEXT: vextractf128 $1, %ymm2, %xmm2
+; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm15 # 32-byte Reload
+; AVX-NEXT: vshufps $17, {{[-0-9]+}}(%r{{[sb]}}p), %ymm15, %ymm15 # 32-byte Folded Reload
+; AVX-NEXT: # ymm15 = ymm15[1,0],mem[1,0],ymm15[5,4],mem[5,4]
+; AVX-NEXT: vextractf128 $1, %ymm15, %xmm15
+; AVX-NEXT: vshufps {{.*#+}} xmm2 = xmm15[2,0],xmm2[2,3]
+; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm2[0,1,2,3],ymm0[4,5,6,7]
; AVX-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX-NEXT: vunpcklps {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
; AVX-NEXT: # ymm0 = ymm0[0],mem[0],ymm0[1],mem[1],ymm0[4],mem[4],ymm0[5],mem[5]
-; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm10 # 32-byte Reload
-; AVX-NEXT: vshufps $17, {{[-0-9]+}}(%r{{[sb]}}p), %ymm10, %ymm10 # 32-byte Folded Reload
-; AVX-NEXT: # ymm10 = ymm10[1,0],mem[1,0],ymm10[5,4],mem[5,4]
-; AVX-NEXT: vshufps {{.*#+}} ymm0 = ymm10[2,0],ymm0[2,3],ymm10[6,4],ymm0[6,7]
-; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm10 # 32-byte Reload
-; AVX-NEXT: vunpcklps {{[-0-9]+}}(%r{{[sb]}}p), %ymm10, %ymm10 # 32-byte Folded Reload
-; AVX-NEXT: # ymm10 = ymm10[0],mem[0],ymm10[1],mem[1],ymm10[4],mem[4],ymm10[5],mem[5]
-; AVX-NEXT: vextractf128 $1, %ymm10, %xmm10
-; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm14 # 32-byte Reload
-; AVX-NEXT: vshufps $17, {{[-0-9]+}}(%r{{[sb]}}p), %ymm14, %ymm14 # 32-byte Folded Reload
-; AVX-NEXT: # ymm14 = ymm14[1,0],mem[1,0],ymm14[5,4],mem[5,4]
-; AVX-NEXT: vshufps {{.*#+}} ymm14 = ymm14[2,0,2,3,6,4,6,7]
-; AVX-NEXT: vextractf128 $1, %ymm14, %xmm14
-; AVX-NEXT: vblendps {{.*#+}} xmm10 = xmm14[0,1],xmm10[2,3]
-; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm10[0,1,2,3],ymm0[4,5,6,7]
+; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
+; AVX-NEXT: vshufps $17, {{[-0-9]+}}(%r{{[sb]}}p), %ymm2, %ymm2 # 32-byte Folded Reload
+; AVX-NEXT: # ymm2 = ymm2[1,0],mem[1,0],ymm2[5,4],mem[5,4]
+; AVX-NEXT: vshufps {{.*#+}} ymm0 = ymm2[2,0],ymm0[2,3],ymm2[6,4],ymm0[6,7]
+; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
+; AVX-NEXT: vunpcklps {{[-0-9]+}}(%r{{[sb]}}p), %ymm2, %ymm2 # 32-byte Folded Reload
+; AVX-NEXT: # ymm2 = ymm2[0],mem[0],ymm2[1],mem[1],ymm2[4],mem[4],ymm2[5],mem[5]
+; AVX-NEXT: vextractf128 $1, %ymm2, %xmm2
+; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm15 # 32-byte Reload
+; AVX-NEXT: vshufps $17, {{[-0-9]+}}(%r{{[sb]}}p), %ymm15, %ymm15 # 32-byte Folded Reload
+; AVX-NEXT: # ymm15 = ymm15[1,0],mem[1,0],ymm15[5,4],mem[5,4]
+; AVX-NEXT: vextractf128 $1, %ymm15, %xmm15
+; AVX-NEXT: vshufps {{.*#+}} xmm2 = xmm15[2,0],xmm2[2,3]
+; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm2[0,1,2,3],ymm0[4,5,6,7]
; AVX-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX-NEXT: vunpcklps {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
; AVX-NEXT: # ymm0 = ymm0[0],mem[0],ymm0[1],mem[1],ymm0[4],mem[4],ymm0[5],mem[5]
-; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm10 # 32-byte Reload
-; AVX-NEXT: vshufps $17, {{[-0-9]+}}(%r{{[sb]}}p), %ymm10, %ymm10 # 32-byte Folded Reload
-; AVX-NEXT: # ymm10 = ymm10[1,0],mem[1,0],ymm10[5,4],mem[5,4]
-; AVX-NEXT: vshufps {{.*#+}} ymm0 = ymm10[2,0],ymm0[2,3],ymm10[6,4],ymm0[6,7]
-; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm10 # 32-byte Reload
-; AVX-NEXT: vunpcklps {{[-0-9]+}}(%r{{[sb]}}p), %ymm10, %ymm10 # 32-byte Folded Reload
-; AVX-NEXT: # ymm10 = ymm10[0],mem[0],ymm10[1],mem[1],ymm10[4],mem[4],ymm10[5],mem[5]
-; AVX-NEXT: vextractf128 $1, %ymm10, %xmm10
-; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm14 # 32-byte Reload
-; AVX-NEXT: vshufps $17, {{[-0-9]+}}(%r{{[sb]}}p), %ymm14, %ymm14 # 32-byte Folded Reload
-; AVX-NEXT: # ymm14 = ymm14[1,0],mem[1,0],ymm14[5,4],mem[5,4]
-; AVX-NEXT: vshufps {{.*#+}} ymm14 = ymm14[2,0,2,3,6,4,6,7]
-; AVX-NEXT: vextractf128 $1, %ymm14, %xmm14
-; AVX-NEXT: vblendps {{.*#+}} xmm10 = xmm14[0,1],xmm10[2,3]
-; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm10[0,1,2,3],ymm0[4,5,6,7]
+; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
+; AVX-NEXT: vshufps $17, {{[-0-9]+}}(%r{{[sb]}}p), %ymm2, %ymm2 # 32-byte Folded Reload
+; AVX-NEXT: # ymm2 = ymm2[1,0],mem[1,0],ymm2[5,4],mem[5,4]
+; AVX-NEXT: vshufps {{.*#+}} ymm0 = ymm2[2,0],ymm0[2,3],ymm2[6,4],ymm0[6,7]
+; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
+; AVX-NEXT: vunpcklps {{[-0-9]+}}(%r{{[sb]}}p), %ymm2, %ymm2 # 32-byte Folded Reload
+; AVX-NEXT: # ymm2 = ymm2[0],mem[0],ymm2[1],mem[1],ymm2[4],mem[4],ymm2[5],mem[5]
+; AVX-NEXT: vextractf128 $1, %ymm2, %xmm2
+; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm15 # 32-byte Reload
+; AVX-NEXT: vshufps $17, {{[-0-9]+}}(%r{{[sb]}}p), %ymm15, %ymm15 # 32-byte Folded Reload
+; AVX-NEXT: # ymm15 = ymm15[1,0],mem[1,0],ymm15[5,4],mem[5,4]
+; AVX-NEXT: vextractf128 $1, %ymm15, %xmm15
+; AVX-NEXT: vshufps {{.*#+}} xmm2 = xmm15[2,0],xmm2[2,3]
+; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm2[0,1,2,3],ymm0[4,5,6,7]
; AVX-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX-NEXT: vunpcklps {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
; AVX-NEXT: # ymm0 = ymm0[0],mem[0],ymm0[1],mem[1],ymm0[4],mem[4],ymm0[5],mem[5]
-; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm10 # 32-byte Reload
-; AVX-NEXT: vshufps $17, {{[-0-9]+}}(%r{{[sb]}}p), %ymm10, %ymm10 # 32-byte Folded Reload
-; AVX-NEXT: # ymm10 = ymm10[1,0],mem[1,0],ymm10[5,4],mem[5,4]
-; AVX-NEXT: vshufps {{.*#+}} ymm0 = ymm10[2,0],ymm0[2,3],ymm10[6,4],ymm0[6,7]
-; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm10 # 32-byte Reload
-; AVX-NEXT: vunpcklps {{[-0-9]+}}(%r{{[sb]}}p), %ymm10, %ymm10 # 32-byte Folded Reload
-; AVX-NEXT: # ymm10 = ymm10[0],mem[0],ymm10[1],mem[1],ymm10[4],mem[4],ymm10[5],mem[5]
-; AVX-NEXT: vextractf128 $1, %ymm10, %xmm10
-; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm14 # 32-byte Reload
-; AVX-NEXT: vshufps $17, {{[-0-9]+}}(%r{{[sb]}}p), %ymm14, %ymm14 # 32-byte Folded Reload
-; AVX-NEXT: # ymm14 = ymm14[1,0],mem[1,0],ymm14[5,4],mem[5,4]
-; AVX-NEXT: vshufps {{.*#+}} ymm14 = ymm14[2,0,2,3,6,4,6,7]
-; AVX-NEXT: vextractf128 $1, %ymm14, %xmm14
-; AVX-NEXT: vblendps {{.*#+}} xmm10 = xmm14[0,1],xmm10[2,3]
-; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm10[0,1,2,3],ymm0[4,5,6,7]
+; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
+; AVX-NEXT: vshufps $17, {{[-0-9]+}}(%r{{[sb]}}p), %ymm2, %ymm2 # 32-byte Folded Reload
+; AVX-NEXT: # ymm2 = ymm2[1,0],mem[1,0],ymm2[5,4],mem[5,4]
+; AVX-NEXT: vshufps {{.*#+}} ymm0 = ymm2[2,0],ymm0[2,3],ymm2[6,4],ymm0[6,7]
+; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
+; AVX-NEXT: vunpcklps {{[-0-9]+}}(%r{{[sb]}}p), %ymm2, %ymm2 # 32-byte Folded Reload
+; AVX-NEXT: # ymm2 = ymm2[0],mem[0],ymm2[1],mem[1],ymm2[4],mem[4],ymm2[5],mem[5]
+; AVX-NEXT: vextractf128 $1, %ymm2, %xmm2
+; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm15 # 32-byte Reload
+; AVX-NEXT: vshufps $17, (%rsp), %ymm15, %ymm15 # 32-byte Folded Reload
+; AVX-NEXT: # ymm15 = ymm15[1,0],mem[1,0],ymm15[5,4],mem[5,4]
+; AVX-NEXT: vextractf128 $1, %ymm15, %xmm15
+; AVX-NEXT: vshufps {{.*#+}} xmm2 = xmm15[2,0],xmm2[2,3]
+; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm2[0,1,2,3],ymm0[4,5,6,7]
; AVX-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX-NEXT: vunpcklps (%rsp), %ymm0, %ymm0 # 32-byte Folded Reload
+; AVX-NEXT: vunpcklps {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
; AVX-NEXT: # ymm0 = ymm0[0],mem[0],ymm0[1],mem[1],ymm0[4],mem[4],ymm0[5],mem[5]
-; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm10 # 32-byte Reload
-; AVX-NEXT: vshufps $17, {{[-0-9]+}}(%r{{[sb]}}p), %ymm10, %ymm10 # 32-byte Folded Reload
-; AVX-NEXT: # ymm10 = ymm10[1,0],mem[1,0],ymm10[5,4],mem[5,4]
-; AVX-NEXT: vshufps {{.*#+}} ymm0 = ymm10[2,0],ymm0[2,3],ymm10[6,4],ymm0[6,7]
-; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm10 # 32-byte Reload
-; AVX-NEXT: vunpcklps {{[-0-9]+}}(%r{{[sb]}}p), %ymm10, %ymm10 # 32-byte Folded Reload
-; AVX-NEXT: # ymm10 = ymm10[0],mem[0],ymm10[1],mem[1],ymm10[4],mem[4],ymm10[5],mem[5]
-; AVX-NEXT: vextractf128 $1, %ymm10, %xmm10
-; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm14 # 32-byte Reload
-; AVX-NEXT: vshufps $17, {{[-0-9]+}}(%r{{[sb]}}p), %ymm14, %ymm14 # 32-byte Folded Reload
-; AVX-NEXT: # ymm14 = ymm14[1,0],mem[1,0],ymm14[5,4],mem[5,4]
-; AVX-NEXT: vshufps {{.*#+}} ymm14 = ymm14[2,0,2,3,6,4,6,7]
-; AVX-NEXT: vextractf128 $1, %ymm14, %xmm14
-; AVX-NEXT: vblendps {{.*#+}} xmm10 = xmm14[0,1],xmm10[2,3]
-; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm10[0,1,2,3],ymm0[4,5,6,7]
+; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
+; AVX-NEXT: vshufps $17, {{[-0-9]+}}(%r{{[sb]}}p), %ymm2, %ymm2 # 32-byte Folded Reload
+; AVX-NEXT: # ymm2 = ymm2[1,0],mem[1,0],ymm2[5,4],mem[5,4]
+; AVX-NEXT: vshufps {{.*#+}} ymm0 = ymm2[2,0],ymm0[2,3],ymm2[6,4],ymm0[6,7]
+; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
+; AVX-NEXT: vunpcklps {{[-0-9]+}}(%r{{[sb]}}p), %ymm2, %ymm2 # 32-byte Folded Reload
+; AVX-NEXT: # ymm2 = ymm2[0],mem[0],ymm2[1],mem[1],ymm2[4],mem[4],ymm2[5],mem[5]
+; AVX-NEXT: vextractf128 $1, %ymm2, %xmm2
+; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm15 # 32-byte Reload
+; AVX-NEXT: vshufps $17, {{[-0-9]+}}(%r{{[sb]}}p), %ymm15, %ymm15 # 32-byte Folded Reload
+; AVX-NEXT: # ymm15 = ymm15[1,0],mem[1,0],ymm15[5,4],mem[5,4]
+; AVX-NEXT: vextractf128 $1, %ymm15, %xmm15
+; AVX-NEXT: vshufps {{.*#+}} xmm2 = xmm15[2,0],xmm2[2,3]
+; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm2[0,1,2,3],ymm0[4,5,6,7]
; AVX-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX-NEXT: vunpckhpd {{.*#+}} ymm0 = ymm3[1],ymm5[1],ymm3[3],ymm5[3]
-; AVX-NEXT: vunpckhps {{.*#+}} ymm10 = ymm4[2],ymm1[2],ymm4[3],ymm1[3],ymm4[6],ymm1[6],ymm4[7],ymm1[7]
-; AVX-NEXT: vshufps {{.*#+}} ymm0 = ymm10[0,1],ymm0[2,0],ymm10[4,5],ymm0[6,4]
-; AVX-NEXT: vunpckhps {{[-0-9]+}}(%r{{[sb]}}p), %ymm2, %ymm10 # 32-byte Folded Reload
-; AVX-NEXT: # ymm10 = ymm2[2],mem[2],ymm2[3],mem[3],ymm2[6],mem[6],ymm2[7],mem[7]
-; AVX-NEXT: vextractf128 $1, %ymm10, %xmm10
-; AVX-NEXT: vunpckhpd {{.*#+}} ymm14 = ymm15[1],ymm9[1],ymm15[3],ymm9[3]
-; AVX-NEXT: vshufps {{.*#+}} ymm14 = ymm14[0,1,2,0,4,5,6,4]
-; AVX-NEXT: vextractf128 $1, %ymm14, %xmm14
-; AVX-NEXT: vblendps {{.*#+}} xmm10 = xmm10[0,1],xmm14[2,3]
-; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm10[0,1,2,3],ymm0[4,5,6,7]
+; AVX-NEXT: vunpckhps {{.*#+}} ymm2 = ymm4[2],ymm1[2],ymm4[3],ymm1[3],ymm4[6],ymm1[6],ymm4[7],ymm1[7]
+; AVX-NEXT: vshufps {{.*#+}} ymm0 = ymm2[0,1],ymm0[2,0],ymm2[4,5],ymm0[6,4]
+; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
+; AVX-NEXT: vunpckhpd {{.*#+}} ymm2 = ymm4[1],ymm6[1],ymm4[3],ymm6[3]
+; AVX-NEXT: vextractf128 $1, %ymm2, %xmm2
+; AVX-NEXT: vunpckhps {{.*#+}} ymm15 = ymm7[2],ymm8[2],ymm7[3],ymm8[3],ymm7[6],ymm8[6],ymm7[7],ymm8[7]
+; AVX-NEXT: vextractf128 $1, %ymm15, %xmm15
+; AVX-NEXT: vshufps {{.*#+}} xmm2 = xmm15[0,1],xmm2[2,0]
+; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm2[0,1,2,3],ymm0[4,5,6,7]
; AVX-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT: vunpckhpd {{[-0-9]+}}(%r{{[sb]}}p), %ymm6, %ymm0 # 32-byte Folded Reload
-; AVX-NEXT: # ymm0 = ymm6[1],mem[1],ymm6[3],mem[3]
-; AVX-NEXT: vunpckhps {{.*#+}} ymm10 = ymm8[2],ymm12[2],ymm8[3],ymm12[3],ymm8[6],ymm12[6],ymm8[7],ymm12[7]
-; AVX-NEXT: vshufps {{.*#+}} ymm0 = ymm10[0,1],ymm0[2,0],ymm10[4,5],ymm0[6,4]
-; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm12 # 32-byte Reload
-; AVX-NEXT: vunpckhps {{.*#+}} ymm10 = ymm11[2],ymm12[2],ymm11[3],ymm12[3],ymm11[6],ymm12[6],ymm11[7],ymm12[7]
-; AVX-NEXT: vextractf128 $1, %ymm10, %xmm10
+; AVX-NEXT: vunpckhpd {{.*#+}} ymm0 = ymm10[1],ymm13[1],ymm10[3],ymm13[3]
+; AVX-NEXT: vunpckhps {{.*#+}} ymm2 = ymm9[2],ymm12[2],ymm9[3],ymm12[3],ymm9[6],ymm12[6],ymm9[7],ymm12[7]
+; AVX-NEXT: vshufps {{.*#+}} ymm0 = ymm2[0,1],ymm0[2,0],ymm2[4,5],ymm0[6,4]
+; AVX-NEXT: vunpckhpd {{[-0-9]+}}(%r{{[sb]}}p), %ymm11, %ymm2 # 32-byte Folded Reload
+; AVX-NEXT: # ymm2 = ymm11[1],mem[1],ymm11[3],mem[3]
+; AVX-NEXT: vextractf128 $1, %ymm2, %xmm2
; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
-; AVX-NEXT: vunpckhpd {{.*#+}} ymm14 = ymm1[1],ymm13[1],ymm1[3],ymm13[3]
-; AVX-NEXT: vshufps {{.*#+}} ymm14 = ymm14[0,1,2,0,4,5,6,4]
-; AVX-NEXT: vextractf128 $1, %ymm14, %xmm14
-; AVX-NEXT: vblendps {{.*#+}} xmm10 = xmm10[0,1],xmm14[2,3]
-; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm10[0,1,2,3],ymm0[4,5,6,7]
+; AVX-NEXT: vunpckhps {{.*#+}} ymm15 = ymm14[2],ymm1[2],ymm14[3],ymm1[3],ymm14[6],ymm1[6],ymm14[7],ymm1[7]
+; AVX-NEXT: vextractf128 $1, %ymm15, %xmm15
+; AVX-NEXT: vshufps {{.*#+}} xmm2 = xmm15[0,1],xmm2[2,0]
+; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm2[0,1,2,3],ymm0[4,5,6,7]
; AVX-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
-; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
-; AVX-NEXT: vunpckhpd {{.*#+}} ymm0 = ymm6[1],ymm4[1],ymm6[3],ymm4[3]
-; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload
-; AVX-NEXT: vunpckhps {{.*#+}} ymm10 = ymm7[2],ymm5[2],ymm7[3],ymm5[3],ymm7[6],ymm5[6],ymm7[7],ymm5[7]
-; AVX-NEXT: vshufps {{.*#+}} ymm0 = ymm10[0,1],ymm0[2,0],ymm10[4,5],ymm0[6,4]
-; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm13 # 32-byte Reload
; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm11 # 32-byte Reload
-; AVX-NEXT: vunpckhps {{.*#+}} ymm10 = ymm13[2],ymm11[2],ymm13[3],ymm11[3],ymm13[6],ymm11[6],ymm13[7],ymm11[7]
-; AVX-NEXT: vextractf128 $1, %ymm10, %xmm10
+; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload
+; AVX-NEXT: vunpckhpd {{.*#+}} ymm0 = ymm11[1],ymm5[1],ymm11[3],ymm5[3]
+; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm9 # 32-byte Reload
; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload
-; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
-; AVX-NEXT: vunpckhpd {{.*#+}} ymm14 = ymm3[1],ymm2[1],ymm3[3],ymm2[3]
-; AVX-NEXT: vshufps {{.*#+}} ymm14 = ymm14[0,1,2,0,4,5,6,4]
-; AVX-NEXT: vextractf128 $1, %ymm14, %xmm14
-; AVX-NEXT: vblendps {{.*#+}} xmm10 = xmm10[0,1],xmm14[2,3]
-; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm10[0,1,2,3],ymm0[4,5,6,7]
-; AVX-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX-NEXT: vunpckhps {{.*#+}} ymm2 = ymm9[2],ymm3[2],ymm9[3],ymm3[3],ymm9[6],ymm3[6],ymm9[7],ymm3[7]
+; AVX-NEXT: vshufps {{.*#+}} ymm0 = ymm2[0,1],ymm0[2,0],ymm2[4,5],ymm0[6,4]
; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm8 # 32-byte Reload
-; AVX-NEXT: vunpckhpd {{[-0-9]+}}(%r{{[sb]}}p), %ymm8, %ymm0 # 32-byte Folded Reload
-; AVX-NEXT: # ymm0 = ymm8[1],mem[1],ymm8[3],mem[3]
-; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm9 # 32-byte Reload
+; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
+; AVX-NEXT: vunpckhpd {{.*#+}} ymm2 = ymm8[1],ymm6[1],ymm8[3],ymm6[3]
+; AVX-NEXT: vextractf128 $1, %ymm2, %xmm2
+; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm10 # 32-byte Reload
; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm7 # 32-byte Reload
-; AVX-NEXT: vunpckhps {{.*#+}} ymm10 = ymm9[2],ymm7[2],ymm9[3],ymm7[3],ymm9[6],ymm7[6],ymm9[7],ymm7[7]
-; AVX-NEXT: vshufps {{.*#+}} ymm0 = ymm10[0,1],ymm0[2,0],ymm10[4,5],ymm0[6,4]
-; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm15 # 32-byte Reload
-; AVX-NEXT: vunpckhps {{[-0-9]+}}(%r{{[sb]}}p), %ymm15, %ymm10 # 32-byte Folded Reload
-; AVX-NEXT: # ymm10 = ymm15[2],mem[2],ymm15[3],mem[3],ymm15[6],mem[6],ymm15[7],mem[7]
-; AVX-NEXT: vextractf128 $1, %ymm10, %xmm10
+; AVX-NEXT: vunpckhps {{.*#+}} ymm15 = ymm10[2],ymm7[2],ymm10[3],ymm7[3],ymm10[6],ymm7[6],ymm10[7],ymm7[7]
+; AVX-NEXT: vextractf128 $1, %ymm15, %xmm15
+; AVX-NEXT: vshufps {{.*#+}} xmm2 = xmm15[0,1],xmm2[2,0]
+; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm2[0,1,2,3],ymm0[4,5,6,7]
+; AVX-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm13 # 32-byte Reload
+; AVX-NEXT: vunpckhpd {{[-0-9]+}}(%r{{[sb]}}p), %ymm13, %ymm0 # 32-byte Folded Reload
+; AVX-NEXT: # ymm0 = ymm13[1],mem[1],ymm13[3],mem[3]
; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm14 # 32-byte Reload
-; AVX-NEXT: vunpckhpd {{[-0-9]+}}(%r{{[sb]}}p), %ymm14, %ymm14 # 32-byte Folded Reload
-; AVX-NEXT: # ymm14 = ymm14[1],mem[1],ymm14[3],mem[3]
-; AVX-NEXT: vshufps {{.*#+}} ymm14 = ymm14[0,1,2,0,4,5,6,4]
-; AVX-NEXT: vextractf128 $1, %ymm14, %xmm14
-; AVX-NEXT: vblendps {{.*#+}} xmm10 = xmm10[0,1],xmm14[2,3]
-; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm10[0,1,2,3],ymm0[4,5,6,7]
+; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm12 # 32-byte Reload
+; AVX-NEXT: vunpckhps {{.*#+}} ymm2 = ymm14[2],ymm12[2],ymm14[3],ymm12[3],ymm14[6],ymm12[6],ymm14[7],ymm12[7]
+; AVX-NEXT: vshufps {{.*#+}} ymm0 = ymm2[0,1],ymm0[2,0],ymm2[4,5],ymm0[6,4]
+; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
+; AVX-NEXT: vunpckhpd {{[-0-9]+}}(%r{{[sb]}}p), %ymm2, %ymm2 # 32-byte Folded Reload
+; AVX-NEXT: # ymm2 = ymm2[1],mem[1],ymm2[3],mem[3]
+; AVX-NEXT: vextractf128 $1, %ymm2, %xmm2
+; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm15 # 32-byte Reload
+; AVX-NEXT: vunpckhps {{[-0-9]+}}(%r{{[sb]}}p), %ymm15, %ymm15 # 32-byte Folded Reload
+; AVX-NEXT: # ymm15 = ymm15[2],mem[2],ymm15[3],mem[3],ymm15[6],mem[6],ymm15[7],mem[7]
+; AVX-NEXT: vextractf128 $1, %ymm15, %xmm15
+; AVX-NEXT: vshufps {{.*#+}} xmm2 = xmm15[0,1],xmm2[2,0]
+; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm2[0,1,2,3],ymm0[4,5,6,7]
; AVX-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX-NEXT: vunpckhpd {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
; AVX-NEXT: # ymm0 = ymm0[1],mem[1],ymm0[3],mem[3]
-; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm10 # 32-byte Reload
-; AVX-NEXT: vunpckhps {{[-0-9]+}}(%r{{[sb]}}p), %ymm10, %ymm10 # 32-byte Folded Reload
-; AVX-NEXT: # ymm10 = ymm10[2],mem[2],ymm10[3],mem[3],ymm10[6],mem[6],ymm10[7],mem[7]
-; AVX-NEXT: vshufps {{.*#+}} ymm0 = ymm10[0,1],ymm0[2,0],ymm10[4,5],ymm0[6,4]
-; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm10 # 32-byte Reload
-; AVX-NEXT: vunpckhps {{[-0-9]+}}(%r{{[sb]}}p), %ymm10, %ymm10 # 32-byte Folded Reload
-; AVX-NEXT: # ymm10 = ymm10[2],mem[2],ymm10[3],mem[3],ymm10[6],mem[6],ymm10[7],mem[7]
-; AVX-NEXT: vextractf128 $1, %ymm10, %xmm10
-; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm14 # 32-byte Reload
-; AVX-NEXT: vunpckhpd {{[-0-9]+}}(%r{{[sb]}}p), %ymm14, %ymm14 # 32-byte Folded Reload
-; AVX-NEXT: # ymm14 = ymm14[1],mem[1],ymm14[3],mem[3]
-; AVX-NEXT: vshufps {{.*#+}} ymm14 = ymm14[0,1,2,0,4,5,6,4]
-; AVX-NEXT: vextractf128 $1, %ymm14, %xmm14
-; AVX-NEXT: vblendps {{.*#+}} xmm10 = xmm10[0,1],xmm14[2,3]
-; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm10[0,1,2,3],ymm0[4,5,6,7]
+; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
+; AVX-NEXT: vunpckhps {{[-0-9]+}}(%r{{[sb]}}p), %ymm2, %ymm2 # 32-byte Folded Reload
+; AVX-NEXT: # ymm2 = ymm2[2],mem[2],ymm2[3],mem[3],ymm2[6],mem[6],ymm2[7],mem[7]
+; AVX-NEXT: vshufps {{.*#+}} ymm0 = ymm2[0,1],ymm0[2,0],ymm2[4,5],ymm0[6,4]
+; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
+; AVX-NEXT: vunpckhpd {{[-0-9]+}}(%r{{[sb]}}p), %ymm2, %ymm2 # 32-byte Folded Reload
+; AVX-NEXT: # ymm2 = ymm2[1],mem[1],ymm2[3],mem[3]
+; AVX-NEXT: vextractf128 $1, %ymm2, %xmm2
+; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm15 # 32-byte Reload
+; AVX-NEXT: vunpckhps {{[-0-9]+}}(%r{{[sb]}}p), %ymm15, %ymm15 # 32-byte Folded Reload
+; AVX-NEXT: # ymm15 = ymm15[2],mem[2],ymm15[3],mem[3],ymm15[6],mem[6],ymm15[7],mem[7]
+; AVX-NEXT: vextractf128 $1, %ymm15, %xmm15
+; AVX-NEXT: vshufps {{.*#+}} xmm2 = xmm15[0,1],xmm2[2,0]
+; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm2[0,1,2,3],ymm0[4,5,6,7]
; AVX-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX-NEXT: vunpckhpd {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
; AVX-NEXT: # ymm0 = ymm0[1],mem[1],ymm0[3],mem[3]
-; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm10 # 32-byte Reload
-; AVX-NEXT: vunpckhps {{[-0-9]+}}(%r{{[sb]}}p), %ymm10, %ymm10 # 32-byte Folded Reload
-; AVX-NEXT: # ymm10 = ymm10[2],mem[2],ymm10[3],mem[3],ymm10[6],mem[6],ymm10[7],mem[7]
-; AVX-NEXT: vshufps {{.*#+}} ymm0 = ymm10[0,1],ymm0[2,0],ymm10[4,5],ymm0[6,4]
-; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm10 # 32-byte Reload
-; AVX-NEXT: vunpckhps {{[-0-9]+}}(%r{{[sb]}}p), %ymm10, %ymm10 # 32-byte Folded Reload
-; AVX-NEXT: # ymm10 = ymm10[2],mem[2],ymm10[3],mem[3],ymm10[6],mem[6],ymm10[7],mem[7]
-; AVX-NEXT: vextractf128 $1, %ymm10, %xmm10
-; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm14 # 32-byte Reload
-; AVX-NEXT: vunpckhpd {{[-0-9]+}}(%r{{[sb]}}p), %ymm14, %ymm14 # 32-byte Folded Reload
-; AVX-NEXT: # ymm14 = ymm14[1],mem[1],ymm14[3],mem[3]
-; AVX-NEXT: vshufps {{.*#+}} ymm14 = ymm14[0,1,2,0,4,5,6,4]
-; AVX-NEXT: vextractf128 $1, %ymm14, %xmm14
-; AVX-NEXT: vblendps {{.*#+}} xmm10 = xmm10[0,1],xmm14[2,3]
-; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm10[0,1,2,3],ymm0[4,5,6,7]
+; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
+; AVX-NEXT: vunpckhps {{[-0-9]+}}(%r{{[sb]}}p), %ymm2, %ymm2 # 32-byte Folded Reload
+; AVX-NEXT: # ymm2 = ymm2[2],mem[2],ymm2[3],mem[3],ymm2[6],mem[6],ymm2[7],mem[7]
+; AVX-NEXT: vshufps {{.*#+}} ymm0 = ymm2[0,1],ymm0[2,0],ymm2[4,5],ymm0[6,4]
+; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
+; AVX-NEXT: vunpckhpd {{[-0-9]+}}(%r{{[sb]}}p), %ymm2, %ymm2 # 32-byte Folded Reload
+; AVX-NEXT: # ymm2 = ymm2[1],mem[1],ymm2[3],mem[3]
+; AVX-NEXT: vextractf128 $1, %ymm2, %xmm2
+; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm15 # 32-byte Reload
+; AVX-NEXT: vunpckhps {{[-0-9]+}}(%r{{[sb]}}p), %ymm15, %ymm15 # 32-byte Folded Reload
+; AVX-NEXT: # ymm15 = ymm15[2],mem[2],ymm15[3],mem[3],ymm15[6],mem[6],ymm15[7],mem[7]
+; AVX-NEXT: vextractf128 $1, %ymm15, %xmm15
+; AVX-NEXT: vshufps {{.*#+}} xmm2 = xmm15[0,1],xmm2[2,0]
+; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm2[0,1,2,3],ymm0[4,5,6,7]
; AVX-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX-NEXT: vunpckhpd {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
; AVX-NEXT: # ymm0 = ymm0[1],mem[1],ymm0[3],mem[3]
-; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm10 # 32-byte Reload
-; AVX-NEXT: vunpckhps {{[-0-9]+}}(%r{{[sb]}}p), %ymm10, %ymm10 # 32-byte Folded Reload
-; AVX-NEXT: # ymm10 = ymm10[2],mem[2],ymm10[3],mem[3],ymm10[6],mem[6],ymm10[7],mem[7]
-; AVX-NEXT: vshufps {{.*#+}} ymm0 = ymm10[0,1],ymm0[2,0],ymm10[4,5],ymm0[6,4]
-; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm10 # 32-byte Reload
-; AVX-NEXT: vunpckhps {{[-0-9]+}}(%r{{[sb]}}p), %ymm10, %ymm10 # 32-byte Folded Reload
-; AVX-NEXT: # ymm10 = ymm10[2],mem[2],ymm10[3],mem[3],ymm10[6],mem[6],ymm10[7],mem[7]
-; AVX-NEXT: vextractf128 $1, %ymm10, %xmm10
-; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm14 # 32-byte Reload
-; AVX-NEXT: vunpckhpd {{[-0-9]+}}(%r{{[sb]}}p), %ymm14, %ymm14 # 32-byte Folded Reload
-; AVX-NEXT: # ymm14 = ymm14[1],mem[1],ymm14[3],mem[3]
-; AVX-NEXT: vshufps {{.*#+}} ymm14 = ymm14[0,1,2,0,4,5,6,4]
-; AVX-NEXT: vextractf128 $1, %ymm14, %xmm14
-; AVX-NEXT: vblendps {{.*#+}} xmm10 = xmm10[0,1],xmm14[2,3]
-; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm10[0,1,2,3],ymm0[4,5,6,7]
+; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
+; AVX-NEXT: vunpckhps {{[-0-9]+}}(%r{{[sb]}}p), %ymm2, %ymm2 # 32-byte Folded Reload
+; AVX-NEXT: # ymm2 = ymm2[2],mem[2],ymm2[3],mem[3],ymm2[6],mem[6],ymm2[7],mem[7]
+; AVX-NEXT: vshufps {{.*#+}} ymm0 = ymm2[0,1],ymm0[2,0],ymm2[4,5],ymm0[6,4]
+; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
+; AVX-NEXT: vunpckhpd {{[-0-9]+}}(%r{{[sb]}}p), %ymm2, %ymm2 # 32-byte Folded Reload
+; AVX-NEXT: # ymm2 = ymm2[1],mem[1],ymm2[3],mem[3]
+; AVX-NEXT: vextractf128 $1, %ymm2, %xmm2
+; AVX-NEXT: vmovups (%rsp), %ymm15 # 32-byte Reload
+; AVX-NEXT: vunpckhps {{[-0-9]+}}(%r{{[sb]}}p), %ymm15, %ymm15 # 32-byte Folded Reload
+; AVX-NEXT: # ymm15 = ymm15[2],mem[2],ymm15[3],mem[3],ymm15[6],mem[6],ymm15[7],mem[7]
+; AVX-NEXT: vextractf128 $1, %ymm15, %xmm15
+; AVX-NEXT: vshufps {{.*#+}} xmm2 = xmm15[0,1],xmm2[2,0]
+; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm2[0,1,2,3],ymm0[4,5,6,7]
; AVX-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT: vmovups (%rsp), %ymm0 # 32-byte Reload
+; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX-NEXT: vunpckhpd {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
; AVX-NEXT: # ymm0 = ymm0[1],mem[1],ymm0[3],mem[3]
-; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm10 # 32-byte Reload
-; AVX-NEXT: vunpckhps {{[-0-9]+}}(%r{{[sb]}}p), %ymm10, %ymm10 # 32-byte Folded Reload
-; AVX-NEXT: # ymm10 = ymm10[2],mem[2],ymm10[3],mem[3],ymm10[6],mem[6],ymm10[7],mem[7]
-; AVX-NEXT: vshufps {{.*#+}} ymm0 = ymm10[0,1],ymm0[2,0],ymm10[4,5],ymm0[6,4]
-; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm10 # 32-byte Reload
-; AVX-NEXT: vunpckhpd {{[-0-9]+}}(%r{{[sb]}}p), %ymm10, %ymm10 # 32-byte Folded Reload
-; AVX-NEXT: # ymm10 = ymm10[1],mem[1],ymm10[3],mem[3]
-; AVX-NEXT: vshufps {{.*#+}} ymm10 = ymm10[0,1,2,0,4,5,6,4]
-; AVX-NEXT: vextractf128 $1, %ymm10, %xmm10
-; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm14 # 32-byte Reload
-; AVX-NEXT: vunpckhps {{[-0-9]+}}(%r{{[sb]}}p), %ymm14, %ymm14 # 32-byte Folded Reload
-; AVX-NEXT: # ymm14 = ymm14[2],mem[2],ymm14[3],mem[3],ymm14[6],mem[6],ymm14[7],mem[7]
-; AVX-NEXT: vextractf128 $1, %ymm14, %xmm14
-; AVX-NEXT: vblendps {{.*#+}} xmm10 = xmm14[0,1],xmm10[2,3]
-; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm10[0,1,2,3],ymm0[4,5,6,7]
+; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
+; AVX-NEXT: vunpckhps {{[-0-9]+}}(%r{{[sb]}}p), %ymm2, %ymm2 # 32-byte Folded Reload
+; AVX-NEXT: # ymm2 = ymm2[2],mem[2],ymm2[3],mem[3],ymm2[6],mem[6],ymm2[7],mem[7]
+; AVX-NEXT: vshufps {{.*#+}} ymm0 = ymm2[0,1],ymm0[2,0],ymm2[4,5],ymm0[6,4]
+; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
+; AVX-NEXT: vunpckhpd {{[-0-9]+}}(%r{{[sb]}}p), %ymm2, %ymm2 # 32-byte Folded Reload
+; AVX-NEXT: # ymm2 = ymm2[1],mem[1],ymm2[3],mem[3]
+; AVX-NEXT: vextractf128 $1, %ymm2, %xmm2
+; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm15 # 32-byte Reload
+; AVX-NEXT: vunpckhps {{[-0-9]+}}(%r{{[sb]}}p), %ymm15, %ymm15 # 32-byte Folded Reload
+; AVX-NEXT: # ymm15 = ymm15[2],mem[2],ymm15[3],mem[3],ymm15[6],mem[6],ymm15[7],mem[7]
+; AVX-NEXT: vextractf128 $1, %ymm15, %xmm15
+; AVX-NEXT: vshufps {{.*#+}} xmm2 = xmm15[0,1],xmm2[2,0]
+; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm2[0,1,2,3],ymm0[4,5,6,7]
; AVX-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX-NEXT: vunpckhps {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
; AVX-NEXT: # ymm0 = ymm0[2],mem[2],ymm0[3],mem[3],ymm0[6],mem[6],ymm0[7],mem[7]
-; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm10 # 32-byte Reload
-; AVX-NEXT: vshufps $51, {{[-0-9]+}}(%r{{[sb]}}p), %ymm10, %ymm10 # 32-byte Folded Reload
-; AVX-NEXT: # ymm10 = ymm10[3,0],mem[3,0],ymm10[7,4],mem[7,4]
-; AVX-NEXT: vshufps {{.*#+}} ymm0 = ymm10[2,0],ymm0[2,3],ymm10[6,4],ymm0[6,7]
-; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm10 # 32-byte Reload
-; AVX-NEXT: vunpckhps {{[-0-9]+}}(%r{{[sb]}}p), %ymm10, %ymm10 # 32-byte Folded Reload
-; AVX-NEXT: # ymm10 = ymm10[2],mem[2],ymm10[3],mem[3],ymm10[6],mem[6],ymm10[7],mem[7]
-; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm14 # 32-byte Reload
-; AVX-NEXT: vshufps $51, {{[-0-9]+}}(%r{{[sb]}}p), %ymm14, %ymm14 # 32-byte Folded Reload
-; AVX-NEXT: # ymm14 = ymm14[3,0],mem[3,0],ymm14[7,4],mem[7,4]
-; AVX-NEXT: vextractf128 $1, %ymm10, %xmm10
-; AVX-NEXT: vshufps {{.*#+}} ymm14 = ymm14[2,0,2,3,6,4,6,7]
-; AVX-NEXT: vextractf128 $1, %ymm14, %xmm14
-; AVX-NEXT: vblendps {{.*#+}} xmm10 = xmm14[0,1],xmm10[2,3]
-; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm10[0,1,2,3],ymm0[4,5,6,7]
-; AVX-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
+; AVX-NEXT: vshufps $51, {{[-0-9]+}}(%r{{[sb]}}p), %ymm2, %ymm2 # 32-byte Folded Reload
+; AVX-NEXT: # ymm2 = ymm2[3,0],mem[3,0],ymm2[7,4],mem[7,4]
+; AVX-NEXT: vshufps {{.*#+}} ymm0 = ymm2[2,0],ymm0[2,3],ymm2[6,4],ymm0[6,7]
+; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
+; AVX-NEXT: vunpckhps {{.*#+}} ymm2 = ymm2[2],ymm4[2],ymm2[3],ymm4[3],ymm2[6],ymm4[6],ymm2[7],ymm4[7]
+; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
+; AVX-NEXT: vshufps $51, {{[-0-9]+}}(%r{{[sb]}}p), %ymm4, %ymm15 # 32-byte Folded Reload
+; AVX-NEXT: # ymm15 = ymm4[3,0],mem[3,0],ymm4[7,4],mem[7,4]
+; AVX-NEXT: vextractf128 $1, %ymm2, %xmm2
+; AVX-NEXT: vextractf128 $1, %ymm15, %xmm15
+; AVX-NEXT: vshufps {{.*#+}} xmm2 = xmm15[2,0],xmm2[2,3]
+; AVX-NEXT: vblendps {{.*#+}} ymm4 = ymm2[0,1,2,3],ymm0[4,5,6,7]
; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX-NEXT: vunpckhps {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
; AVX-NEXT: # ymm0 = ymm0[2],mem[2],ymm0[3],mem[3],ymm0[6],mem[6],ymm0[7],mem[7]
-; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm10 # 32-byte Reload
-; AVX-NEXT: vshufps $51, {{[-0-9]+}}(%r{{[sb]}}p), %ymm10, %ymm10 # 32-byte Folded Reload
-; AVX-NEXT: # ymm10 = ymm10[3,0],mem[3,0],ymm10[7,4],mem[7,4]
-; AVX-NEXT: vshufps {{.*#+}} ymm0 = ymm10[2,0],ymm0[2,3],ymm10[6,4],ymm0[6,7]
-; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm10 # 32-byte Reload
-; AVX-NEXT: vunpckhps {{.*#+}} ymm10 = ymm10[2],ymm1[2],ymm10[3],ymm1[3],ymm10[6],ymm1[6],ymm10[7],ymm1[7]
-; AVX-NEXT: vshufps $51, {{[-0-9]+}}(%r{{[sb]}}p), %ymm12, %ymm14 # 32-byte Folded Reload
-; AVX-NEXT: # ymm14 = ymm12[3,0],mem[3,0],ymm12[7,4],mem[7,4]
-; AVX-NEXT: vextractf128 $1, %ymm10, %xmm10
-; AVX-NEXT: vshufps {{.*#+}} ymm14 = ymm14[2,0,2,3,6,4,6,7]
-; AVX-NEXT: vextractf128 $1, %ymm14, %xmm14
-; AVX-NEXT: vblendps {{.*#+}} xmm10 = xmm14[0,1],xmm10[2,3]
-; AVX-NEXT: vblendps {{.*#+}} ymm12 = ymm10[0,1,2,3],ymm0[4,5,6,7]
-; AVX-NEXT: vunpckhps {{.*#+}} ymm0 = ymm4[2],ymm6[2],ymm4[3],ymm6[3],ymm4[6],ymm6[6],ymm4[7],ymm6[7]
-; AVX-NEXT: vshufps $51, {{[-0-9]+}}(%r{{[sb]}}p), %ymm5, %ymm10 # 32-byte Folded Reload
-; AVX-NEXT: # ymm10 = ymm5[3,0],mem[3,0],ymm5[7,4],mem[7,4]
-; AVX-NEXT: vshufps {{.*#+}} ymm0 = ymm10[2,0],ymm0[2,3],ymm10[6,4],ymm0[6,7]
-; AVX-NEXT: vunpckhps {{.*#+}} ymm10 = ymm2[2],ymm3[2],ymm2[3],ymm3[3],ymm2[6],ymm3[6],ymm2[7],ymm3[7]
-; AVX-NEXT: vshufps {{.*#+}} ymm14 = ymm11[3,0],ymm13[3,0],ymm11[7,4],ymm13[7,4]
-; AVX-NEXT: vextractf128 $1, %ymm10, %xmm10
-; AVX-NEXT: vshufps {{.*#+}} ymm14 = ymm14[2,0,2,3,6,4,6,7]
-; AVX-NEXT: vextractf128 $1, %ymm14, %xmm14
-; AVX-NEXT: vblendps {{.*#+}} xmm10 = xmm14[0,1],xmm10[2,3]
-; AVX-NEXT: vblendps {{.*#+}} ymm3 = ymm10[0,1,2,3],ymm0[4,5,6,7]
+; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
+; AVX-NEXT: vshufps $51, {{[-0-9]+}}(%r{{[sb]}}p), %ymm2, %ymm2 # 32-byte Folded Reload
+; AVX-NEXT: # ymm2 = ymm2[3,0],mem[3,0],ymm2[7,4],mem[7,4]
+; AVX-NEXT: vshufps {{.*#+}} ymm0 = ymm2[2,0],ymm0[2,3],ymm2[6,4],ymm0[6,7]
+; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
+; AVX-NEXT: vunpckhps {{[-0-9]+}}(%r{{[sb]}}p), %ymm2, %ymm2 # 32-byte Folded Reload
+; AVX-NEXT: # ymm2 = ymm2[2],mem[2],ymm2[3],mem[3],ymm2[6],mem[6],ymm2[7],mem[7]
+; AVX-NEXT: vshufps $51, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm15 # 32-byte Folded Reload
+; AVX-NEXT: # ymm15 = ymm1[3,0],mem[3,0],ymm1[7,4],mem[7,4]
+; AVX-NEXT: vextractf128 $1, %ymm2, %xmm2
+; AVX-NEXT: vextractf128 $1, %ymm15, %xmm15
+; AVX-NEXT: vshufps {{.*#+}} xmm2 = xmm15[2,0],xmm2[2,3]
+; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm2[0,1,2,3],ymm0[4,5,6,7]
+; AVX-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX-NEXT: vunpckhps {{.*#+}} ymm0 = ymm5[2],ymm11[2],ymm5[3],ymm11[3],ymm5[6],ymm11[6],ymm5[7],ymm11[7]
+; AVX-NEXT: vshufps {{.*#+}} ymm2 = ymm3[3,0],ymm9[3,0],ymm3[7,4],ymm9[7,4]
+; AVX-NEXT: vshufps {{.*#+}} ymm0 = ymm2[2,0],ymm0[2,3],ymm2[6,4],ymm0[6,7]
+; AVX-NEXT: vunpckhps {{.*#+}} ymm2 = ymm6[2],ymm8[2],ymm6[3],ymm8[3],ymm6[6],ymm8[6],ymm6[7],ymm8[7]
+; AVX-NEXT: vshufps {{.*#+}} ymm15 = ymm7[3,0],ymm10[3,0],ymm7[7,4],ymm10[7,4]
+; AVX-NEXT: vextractf128 $1, %ymm2, %xmm2
+; AVX-NEXT: vextractf128 $1, %ymm15, %xmm15
+; AVX-NEXT: vshufps {{.*#+}} xmm2 = xmm15[2,0],xmm2[2,3]
+; AVX-NEXT: vblendps {{.*#+}} ymm3 = ymm2[0,1,2,3],ymm0[4,5,6,7]
; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX-NEXT: vunpckhps {{.*#+}} ymm0 = ymm0[2],ymm8[2],ymm0[3],ymm8[3],ymm0[6],ymm8[6],ymm0[7],ymm8[7]
-; AVX-NEXT: vshufps {{.*#+}} ymm10 = ymm7[3,0],ymm9[3,0],ymm7[7,4],ymm9[7,4]
-; AVX-NEXT: vshufps {{.*#+}} ymm0 = ymm10[2,0],ymm0[2,3],ymm10[6,4],ymm0[6,7]
+; AVX-NEXT: vunpckhps {{.*#+}} ymm0 = ymm0[2],ymm13[2],ymm0[3],ymm13[3],ymm0[6],ymm13[6],ymm0[7],ymm13[7]
+; AVX-NEXT: vshufps {{.*#+}} ymm2 = ymm12[3,0],ymm14[3,0],ymm12[7,4],ymm14[7,4]
+; AVX-NEXT: vshufps {{.*#+}} ymm0 = ymm2[2,0],ymm0[2,3],ymm2[6,4],ymm0[6,7]
; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
-; AVX-NEXT: vunpckhps {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm10 # 32-byte Folded Reload
-; AVX-NEXT: # ymm10 = ymm1[2],mem[2],ymm1[3],mem[3],ymm1[6],mem[6],ymm1[7],mem[7]
+; AVX-NEXT: vunpckhps {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm2 # 32-byte Folded Reload
+; AVX-NEXT: # ymm2 = ymm1[2],mem[2],ymm1[3],mem[3],ymm1[6],mem[6],ymm1[7],mem[7]
; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
-; AVX-NEXT: vshufps {{.*#+}} ymm14 = ymm1[3,0],ymm15[3,0],ymm1[7,4],ymm15[7,4]
-; AVX-NEXT: vextractf128 $1, %ymm10, %xmm10
-; AVX-NEXT: vshufps {{.*#+}} ymm14 = ymm14[2,0,2,3,6,4,6,7]
-; AVX-NEXT: vextractf128 $1, %ymm14, %xmm14
-; AVX-NEXT: vblendps {{.*#+}} xmm10 = xmm14[0,1],xmm10[2,3]
-; AVX-NEXT: vblendps {{.*#+}} ymm4 = ymm10[0,1,2,3],ymm0[4,5,6,7]
+; AVX-NEXT: vshufps $51, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm15 # 32-byte Folded Reload
+; AVX-NEXT: # ymm15 = ymm1[3,0],mem[3,0],ymm1[7,4],mem[7,4]
+; AVX-NEXT: vextractf128 $1, %ymm2, %xmm2
+; AVX-NEXT: vextractf128 $1, %ymm15, %xmm15
+; AVX-NEXT: vshufps {{.*#+}} xmm2 = xmm15[2,0],xmm2[2,3]
+; AVX-NEXT: vblendps {{.*#+}} ymm5 = ymm2[0,1,2,3],ymm0[4,5,6,7]
; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX-NEXT: vunpckhps {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
; AVX-NEXT: # ymm0 = ymm0[2],mem[2],ymm0[3],mem[3],ymm0[6],mem[6],ymm0[7],mem[7]
; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
-; AVX-NEXT: vshufps $51, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm10 # 32-byte Folded Reload
-; AVX-NEXT: # ymm10 = ymm1[3,0],mem[3,0],ymm1[7,4],mem[7,4]
-; AVX-NEXT: vshufps {{.*#+}} ymm0 = ymm10[2,0],ymm0[2,3],ymm10[6,4],ymm0[6,7]
+; AVX-NEXT: vshufps $51, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm2 # 32-byte Folded Reload
+; AVX-NEXT: # ymm2 = ymm1[3,0],mem[3,0],ymm1[7,4],mem[7,4]
+; AVX-NEXT: vshufps {{.*#+}} ymm0 = ymm2[2,0],ymm0[2,3],ymm2[6,4],ymm0[6,7]
; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
-; AVX-NEXT: vunpckhps {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm10 # 32-byte Folded Reload
-; AVX-NEXT: # ymm10 = ymm1[2],mem[2],ymm1[3],mem[3],ymm1[6],mem[6],ymm1[7],mem[7]
+; AVX-NEXT: vunpckhps {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm2 # 32-byte Folded Reload
+; AVX-NEXT: # ymm2 = ymm1[2],mem[2],ymm1[3],mem[3],ymm1[6],mem[6],ymm1[7],mem[7]
; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
-; AVX-NEXT: vshufps $51, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm14 # 32-byte Folded Reload
-; AVX-NEXT: # ymm14 = ymm1[3,0],mem[3,0],ymm1[7,4],mem[7,4]
-; AVX-NEXT: vextractf128 $1, %ymm10, %xmm10
-; AVX-NEXT: vshufps {{.*#+}} ymm14 = ymm14[2,0,2,3,6,4,6,7]
-; AVX-NEXT: vextractf128 $1, %ymm14, %xmm14
-; AVX-NEXT: vblendps {{.*#+}} xmm10 = xmm14[0,1],xmm10[2,3]
-; AVX-NEXT: vblendps {{.*#+}} ymm10 = ymm10[0,1,2,3],ymm0[4,5,6,7]
+; AVX-NEXT: vshufps $51, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm15 # 32-byte Folded Reload
+; AVX-NEXT: # ymm15 = ymm1[3,0],mem[3,0],ymm1[7,4],mem[7,4]
+; AVX-NEXT: vextractf128 $1, %ymm2, %xmm2
+; AVX-NEXT: vextractf128 $1, %ymm15, %xmm15
+; AVX-NEXT: vshufps {{.*#+}} xmm2 = xmm15[2,0],xmm2[2,3]
+; AVX-NEXT: vblendps {{.*#+}} ymm6 = ymm2[0,1,2,3],ymm0[4,5,6,7]
; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX-NEXT: vunpckhps {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
; AVX-NEXT: # ymm0 = ymm0[2],mem[2],ymm0[3],mem[3],ymm0[6],mem[6],ymm0[7],mem[7]
; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
-; AVX-NEXT: vshufps $51, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm14 # 32-byte Folded Reload
-; AVX-NEXT: # ymm14 = ymm1[3,0],mem[3,0],ymm1[7,4],mem[7,4]
-; AVX-NEXT: vshufps {{.*#+}} ymm1 = ymm14[2,0],ymm0[2,3],ymm14[6,4],ymm0[6,7]
-; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX-NEXT: vunpckhps {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm14 # 32-byte Folded Reload
-; AVX-NEXT: # ymm14 = ymm0[2],mem[2],ymm0[3],mem[3],ymm0[6],mem[6],ymm0[7],mem[7]
-; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX-NEXT: vshufps $51, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
-; AVX-NEXT: # ymm0 = ymm0[3,0],mem[3,0],ymm0[7,4],mem[7,4]
-; AVX-NEXT: vextractf128 $1, %ymm14, %xmm14
-; AVX-NEXT: vshufps {{.*#+}} ymm0 = ymm0[2,0,2,3,6,4,6,7]
-; AVX-NEXT: vextractf128 $1, %ymm0, %xmm0
-; AVX-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0,1],xmm14[2,3]
-; AVX-NEXT: vblendps {{.*#+}} ymm14 = ymm0[0,1,2,3],ymm1[4,5,6,7]
+; AVX-NEXT: vshufps $51, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm2 # 32-byte Folded Reload
+; AVX-NEXT: # ymm2 = ymm1[3,0],mem[3,0],ymm1[7,4],mem[7,4]
+; AVX-NEXT: vshufps {{.*#+}} ymm0 = ymm2[2,0],ymm0[2,3],ymm2[6,4],ymm0[6,7]
+; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
+; AVX-NEXT: vunpckhps {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm2 # 32-byte Folded Reload
+; AVX-NEXT: # ymm2 = ymm1[2],mem[2],ymm1[3],mem[3],ymm1[6],mem[6],ymm1[7],mem[7]
+; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
+; AVX-NEXT: vshufps $51, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm15 # 32-byte Folded Reload
+; AVX-NEXT: # ymm15 = ymm1[3,0],mem[3,0],ymm1[7,4],mem[7,4]
+; AVX-NEXT: vextractf128 $1, %ymm2, %xmm2
+; AVX-NEXT: vextractf128 $1, %ymm15, %xmm15
+; AVX-NEXT: vshufps {{.*#+}} xmm2 = xmm15[2,0],xmm2[2,3]
+; AVX-NEXT: vblendps {{.*#+}} ymm15 = ymm2[0,1,2,3],ymm0[4,5,6,7]
; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX-NEXT: vunpckhps {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
; AVX-NEXT: # ymm0 = ymm0[2],mem[2],ymm0[3],mem[3],ymm0[6],mem[6],ymm0[7],mem[7]
; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
-; AVX-NEXT: vshufps $51, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm1 # 32-byte Folded Reload
-; AVX-NEXT: # ymm1 = ymm1[3,0],mem[3,0],ymm1[7,4],mem[7,4]
-; AVX-NEXT: vshufps {{.*#+}} ymm0 = ymm1[2,0],ymm0[2,3],ymm1[6,4],ymm0[6,7]
+; AVX-NEXT: vshufps $51, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm2 # 32-byte Folded Reload
+; AVX-NEXT: # ymm2 = ymm1[3,0],mem[3,0],ymm1[7,4],mem[7,4]
+; AVX-NEXT: vshufps {{.*#+}} ymm0 = ymm2[2,0],ymm0[2,3],ymm2[6,4],ymm0[6,7]
; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
-; AVX-NEXT: vunpckhps {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm1 # 32-byte Folded Reload
-; AVX-NEXT: # ymm1 = ymm1[2],mem[2],ymm1[3],mem[3],ymm1[6],mem[6],ymm1[7],mem[7]
-; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
-; AVX-NEXT: vshufps $51, {{[-0-9]+}}(%r{{[sb]}}p), %ymm2, %ymm5 # 32-byte Folded Reload
-; AVX-NEXT: # ymm5 = ymm2[3,0],mem[3,0],ymm2[7,4],mem[7,4]
-; AVX-NEXT: vextractf128 $1, %ymm1, %xmm1
-; AVX-NEXT: vshufps {{.*#+}} ymm5 = ymm5[2,0,2,3,6,4,6,7]
-; AVX-NEXT: vextractf128 $1, %ymm5, %xmm5
-; AVX-NEXT: vblendps {{.*#+}} xmm1 = xmm5[0,1],xmm1[2,3]
-; AVX-NEXT: vblendps {{.*#+}} ymm5 = ymm1[0,1,2,3],ymm0[4,5,6,7]
+; AVX-NEXT: vunpckhps {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm2 # 32-byte Folded Reload
+; AVX-NEXT: # ymm2 = ymm1[2],mem[2],ymm1[3],mem[3],ymm1[6],mem[6],ymm1[7],mem[7]
+; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
+; AVX-NEXT: vshufps $51, (%rsp), %ymm1, %ymm7 # 32-byte Folded Reload
+; AVX-NEXT: # ymm7 = ymm1[3,0],mem[3,0],ymm1[7,4],mem[7,4]
+; AVX-NEXT: vextractf128 $1, %ymm2, %xmm2
+; AVX-NEXT: vextractf128 $1, %ymm7, %xmm7
+; AVX-NEXT: vshufps {{.*#+}} xmm2 = xmm7[2,0],xmm2[2,3]
+; AVX-NEXT: vblendps {{.*#+}} ymm7 = ymm2[0,1,2,3],ymm0[4,5,6,7]
; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX-NEXT: vunpckhps (%rsp), %ymm0, %ymm0 # 32-byte Folded Reload
+; AVX-NEXT: vunpckhps {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
; AVX-NEXT: # ymm0 = ymm0[2],mem[2],ymm0[3],mem[3],ymm0[6],mem[6],ymm0[7],mem[7]
; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
+; AVX-NEXT: vshufps $51, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm2 # 32-byte Folded Reload
+; AVX-NEXT: # ymm2 = ymm1[3,0],mem[3,0],ymm1[7,4],mem[7,4]
+; AVX-NEXT: vshufps {{.*#+}} ymm0 = ymm2[2,0],ymm0[2,3],ymm2[6,4],ymm0[6,7]
+; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
+; AVX-NEXT: vunpckhps {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm2 # 32-byte Folded Reload
+; AVX-NEXT: # ymm2 = ymm1[2],mem[2],ymm1[3],mem[3],ymm1[6],mem[6],ymm1[7],mem[7]
+; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
; AVX-NEXT: vshufps $51, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm1 # 32-byte Folded Reload
; AVX-NEXT: # ymm1 = ymm1[3,0],mem[3,0],ymm1[7,4],mem[7,4]
-; AVX-NEXT: vshufps {{.*#+}} ymm0 = ymm1[2,0],ymm0[2,3],ymm1[6,4],ymm0[6,7]
-; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
-; AVX-NEXT: vunpckhps {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm1 # 32-byte Folded Reload
-; AVX-NEXT: # ymm1 = ymm1[2],mem[2],ymm1[3],mem[3],ymm1[6],mem[6],ymm1[7],mem[7]
-; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
-; AVX-NEXT: vshufps $51, {{[-0-9]+}}(%r{{[sb]}}p), %ymm2, %ymm2 # 32-byte Folded Reload
-; AVX-NEXT: # ymm2 = ymm2[3,0],mem[3,0],ymm2[7,4],mem[7,4]
-; AVX-NEXT: vextractf128 $1, %ymm1, %xmm1
-; AVX-NEXT: vshufps {{.*#+}} ymm2 = ymm2[2,0,2,3,6,4,6,7]
; AVX-NEXT: vextractf128 $1, %ymm2, %xmm2
-; AVX-NEXT: vblendps {{.*#+}} xmm1 = xmm2[0,1],xmm1[2,3]
+; AVX-NEXT: vextractf128 $1, %ymm1, %xmm1
+; AVX-NEXT: vshufps {{.*#+}} xmm1 = xmm1[2,0],xmm2[2,3]
; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7]
; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
; AVX-NEXT: vmovaps %ymm1, 192(%rsi)
@@ -11661,14 +11593,14 @@ define void @load_i32_stride8_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
; AVX-NEXT: vmovaps %ymm1, (%rax)
; AVX-NEXT: movq {{[0-9]+}}(%rsp), %rax
-; AVX-NEXT: vmovaps %ymm5, 224(%rax)
-; AVX-NEXT: vmovaps %ymm14, 192(%rax)
-; AVX-NEXT: vmovaps %ymm10, 160(%rax)
-; AVX-NEXT: vmovaps %ymm4, 128(%rax)
+; AVX-NEXT: vmovaps %ymm7, 224(%rax)
+; AVX-NEXT: vmovaps %ymm15, 192(%rax)
+; AVX-NEXT: vmovaps %ymm6, 160(%rax)
+; AVX-NEXT: vmovaps %ymm5, 128(%rax)
; AVX-NEXT: vmovaps %ymm3, 96(%rax)
-; AVX-NEXT: vmovaps %ymm12, 64(%rax)
; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
-; AVX-NEXT: vmovaps %ymm1, 32(%rax)
+; AVX-NEXT: vmovaps %ymm1, 64(%rax)
+; AVX-NEXT: vmovaps %ymm4, 32(%rax)
; AVX-NEXT: vmovaps %ymm0, (%rax)
; AVX-NEXT: addq $3720, %rsp # imm = 0xE88
; AVX-NEXT: vzeroupper
diff --git a/llvm/test/CodeGen/X86/vector-interleaved-store-i32-stride-6.ll b/llvm/test/CodeGen/X86/vector-interleaved-store-i32-stride-6.ll
index c511c131193f7f..9d53325ed7c56f 100644
--- a/llvm/test/CodeGen/X86/vector-interleaved-store-i32-stride-6.ll
+++ b/llvm/test/CodeGen/X86/vector-interleaved-store-i32-stride-6.ll
@@ -779,8 +779,8 @@ define void @store_i32_stride6_vf8(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX-NEXT: vunpcklps {{.*#+}} ymm0 = ymm7[0],ymm8[0],ymm7[1],ymm8[1],ymm7[4],ymm8[4],ymm7[5],ymm8[5]
; AVX-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3,2,3]
; AVX-NEXT: vunpcklpd {{.*#+}} ymm4 = ymm3[0],ymm2[0],ymm3[2],ymm2[2]
-; AVX-NEXT: vshufps {{.*#+}} ymm4 = ymm4[0,1,2,0,4,5,6,4]
; AVX-NEXT: vextractf128 $1, %ymm4, %xmm4
+; AVX-NEXT: vshufps {{.*#+}} xmm4 = xmm4[0,1,2,0]
; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm4[2,3],ymm0[4,5,6,7]
; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5],ymm0[6,7]
; AVX-NEXT: vbroadcastss 16(%r9), %ymm4
@@ -1515,8 +1515,8 @@ define void @store_i32_stride6_vf16(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
; AVX-NEXT: vunpcklps {{.*#+}} ymm0 = ymm5[0],ymm13[0],ymm5[1],ymm13[1],ymm5[4],ymm13[4],ymm5[5],ymm13[5]
; AVX-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3,2,3]
; AVX-NEXT: vunpcklpd {{.*#+}} ymm1 = ymm9[0],ymm7[0],ymm9[2],ymm7[2]
-; AVX-NEXT: vshufps {{.*#+}} ymm1 = ymm1[0,1,2,0,4,5,6,4]
; AVX-NEXT: vextractf128 $1, %ymm1, %xmm1
+; AVX-NEXT: vshufps {{.*#+}} xmm1 = xmm1[0,1,2,0]
; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3],ymm0[4,5,6,7]
; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm11[4,5],ymm0[6,7]
; AVX-NEXT: vbroadcastss 48(%r9), %ymm1
@@ -1542,8 +1542,8 @@ define void @store_i32_stride6_vf16(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
; AVX-NEXT: vmovaps (%rdx), %ymm11
; AVX-NEXT: vmovaps (%rcx), %ymm12
; AVX-NEXT: vunpcklpd {{.*#+}} ymm10 = ymm12[0],ymm11[0],ymm12[2],ymm11[2]
-; AVX-NEXT: vshufps {{.*#+}} ymm10 = ymm10[0,1,2,0,4,5,6,4]
; AVX-NEXT: vextractf128 $1, %ymm10, %xmm10
+; AVX-NEXT: vshufps {{.*#+}} xmm10 = xmm10[0,1,2,0]
; AVX-NEXT: vblendps {{.*#+}} ymm10 = ymm14[0,1],ymm10[2,3],ymm14[4,5,6,7]
; AVX-NEXT: vmovaps (%r8), %ymm14
; AVX-NEXT: vmovups %ymm14, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
@@ -3268,8 +3268,8 @@ define void @store_i32_stride6_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
; AVX-NEXT: vunpcklps {{.*#+}} ymm0 = ymm12[0],ymm8[0],ymm12[1],ymm8[1],ymm12[4],ymm8[4],ymm12[5],ymm8[5]
; AVX-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3,2,3]
; AVX-NEXT: vunpcklpd {{.*#+}} ymm1 = ymm6[0],ymm4[0],ymm6[2],ymm4[2]
-; AVX-NEXT: vshufps {{.*#+}} ymm1 = ymm1[0,1,2,0,4,5,6,4]
; AVX-NEXT: vextractf128 $1, %ymm1, %xmm1
+; AVX-NEXT: vshufps {{.*#+}} xmm1 = xmm1[0,1,2,0]
; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3],ymm0[4,5,6,7]
; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm5[4,5],ymm0[6,7]
; AVX-NEXT: vbroadcastss 16(%r9), %ymm1
@@ -3301,8 +3301,8 @@ define void @store_i32_stride6_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
; AVX-NEXT: vmovaps 32(%rcx), %ymm13
; AVX-NEXT: vunpcklpd {{.*#+}} ymm1 = ymm13[0],ymm8[0],ymm13[2],ymm8[2]
; AVX-NEXT: vmovups %ymm8, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT: vshufps {{.*#+}} ymm1 = ymm1[0,1,2,0,4,5,6,4]
; AVX-NEXT: vextractf128 $1, %ymm1, %xmm1
+; AVX-NEXT: vshufps {{.*#+}} xmm1 = xmm1[0,1,2,0]
; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3],ymm0[4,5,6,7]
; AVX-NEXT: vmovaps 32(%r8), %ymm1
; AVX-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
@@ -3339,8 +3339,8 @@ define void @store_i32_stride6_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
; AVX-NEXT: vmovaps 64(%rcx), %ymm1
; AVX-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX-NEXT: vunpcklpd {{.*#+}} ymm1 = ymm1[0],ymm2[0],ymm1[2],ymm2[2]
-; AVX-NEXT: vshufps {{.*#+}} ymm1 = ymm1[0,1,2,0,4,5,6,4]
; AVX-NEXT: vextractf128 $1, %ymm1, %xmm1
+; AVX-NEXT: vshufps {{.*#+}} xmm1 = xmm1[0,1,2,0]
; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3],ymm0[4,5,6,7]
; AVX-NEXT: vmovaps 64(%r8), %ymm1
; AVX-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
@@ -3374,8 +3374,8 @@ define void @store_i32_stride6_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
; AVX-NEXT: vmovaps 96(%rcx), %ymm2
; AVX-NEXT: vunpcklpd {{.*#+}} ymm15 = ymm2[0],ymm10[0],ymm2[2],ymm10[2]
; AVX-NEXT: vmovups %ymm10, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT: vshufps {{.*#+}} ymm15 = ymm15[0,1,2,0,4,5,6,4]
; AVX-NEXT: vextractf128 $1, %ymm15, %xmm15
+; AVX-NEXT: vshufps {{.*#+}} xmm15 = xmm15[0,1,2,0]
; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1],ymm15[2,3],ymm1[4,5,6,7]
; AVX-NEXT: vmovaps 96(%r8), %ymm1
; AVX-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
@@ -6613,8 +6613,8 @@ define void @store_i32_stride6_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
; AVX-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3,2,3]
; AVX-NEXT: vunpcklpd {{.*#+}} ymm1 = ymm15[0],ymm4[0],ymm15[2],ymm4[2]
; AVX-NEXT: vmovups %ymm15, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT: vshufps {{.*#+}} ymm1 = ymm1[0,1,2,0,4,5,6,4]
; AVX-NEXT: vextractf128 $1, %ymm1, %xmm1
+; AVX-NEXT: vshufps {{.*#+}} xmm1 = xmm1[0,1,2,0]
; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3],ymm0[4,5,6,7]
; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm6[4,5],ymm0[6,7]
; AVX-NEXT: vbroadcastss 16(%r9), %ymm1
@@ -6647,8 +6647,8 @@ define void @store_i32_stride6_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
; AVX-NEXT: vmovaps 32(%rcx), %ymm1
; AVX-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX-NEXT: vunpcklpd {{.*#+}} ymm1 = ymm1[0],ymm2[0],ymm1[2],ymm2[2]
-; AVX-NEXT: vshufps {{.*#+}} ymm1 = ymm1[0,1,2,0,4,5,6,4]
; AVX-NEXT: vextractf128 $1, %ymm1, %xmm1
+; AVX-NEXT: vshufps {{.*#+}} xmm1 = xmm1[0,1,2,0]
; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3],ymm0[4,5,6,7]
; AVX-NEXT: vmovaps 32(%r8), %ymm1
; AVX-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
@@ -6687,8 +6687,8 @@ define void @store_i32_stride6_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
; AVX-NEXT: vmovaps 64(%rcx), %ymm1
; AVX-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX-NEXT: vunpcklpd {{.*#+}} ymm1 = ymm1[0],ymm2[0],ymm1[2],ymm2[2]
-; AVX-NEXT: vshufps {{.*#+}} ymm1 = ymm1[0,1,2,0,4,5,6,4]
; AVX-NEXT: vextractf128 $1, %ymm1, %xmm1
+; AVX-NEXT: vshufps {{.*#+}} xmm1 = xmm1[0,1,2,0]
; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3],ymm0[4,5,6,7]
; AVX-NEXT: vmovaps 64(%r8), %ymm1
; AVX-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
@@ -6727,8 +6727,8 @@ define void @store_i32_stride6_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
; AVX-NEXT: vmovaps 96(%rcx), %ymm1
; AVX-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX-NEXT: vunpcklpd {{.*#+}} ymm1 = ymm1[0],ymm2[0],ymm1[2],ymm2[2]
-; AVX-NEXT: vshufps {{.*#+}} ymm1 = ymm1[0,1,2,0,4,5,6,4]
; AVX-NEXT: vextractf128 $1, %ymm1, %xmm1
+; AVX-NEXT: vshufps {{.*#+}} xmm1 = xmm1[0,1,2,0]
; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3],ymm0[4,5,6,7]
; AVX-NEXT: vmovaps 96(%r8), %ymm1
; AVX-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
@@ -6766,8 +6766,8 @@ define void @store_i32_stride6_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
; AVX-NEXT: vmovaps 128(%rcx), %ymm2
; AVX-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX-NEXT: vunpcklpd {{.*#+}} ymm1 = ymm2[0],ymm1[0],ymm2[2],ymm1[2]
-; AVX-NEXT: vshufps {{.*#+}} ymm1 = ymm1[0,1,2,0,4,5,6,4]
; AVX-NEXT: vextractf128 $1, %ymm1, %xmm1
+; AVX-NEXT: vshufps {{.*#+}} xmm1 = xmm1[0,1,2,0]
; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3],ymm0[4,5,6,7]
; AVX-NEXT: vmovaps 128(%r8), %ymm1
; AVX-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
@@ -6804,8 +6804,8 @@ define void @store_i32_stride6_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
; AVX-NEXT: vmovaps 160(%rcx), %ymm8
; AVX-NEXT: vunpcklpd {{.*#+}} ymm1 = ymm8[0],ymm1[0],ymm8[2],ymm1[2]
; AVX-NEXT: vmovups %ymm8, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT: vshufps {{.*#+}} ymm1 = ymm1[0,1,2,0,4,5,6,4]
; AVX-NEXT: vextractf128 $1, %ymm1, %xmm1
+; AVX-NEXT: vshufps {{.*#+}} xmm1 = xmm1[0,1,2,0]
; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3],ymm0[4,5,6,7]
; AVX-NEXT: vmovaps 160(%r8), %ymm1
; AVX-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
@@ -6842,8 +6842,8 @@ define void @store_i32_stride6_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
; AVX-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX-NEXT: vunpcklpd {{.*#+}} ymm1 = ymm1[0],ymm4[0],ymm1[2],ymm4[2]
; AVX-NEXT: vmovups %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT: vshufps {{.*#+}} ymm1 = ymm1[0,1,2,0,4,5,6,4]
; AVX-NEXT: vextractf128 $1, %ymm1, %xmm1
+; AVX-NEXT: vshufps {{.*#+}} xmm1 = xmm1[0,1,2,0]
; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3],ymm0[4,5,6,7]
; AVX-NEXT: vmovaps 192(%r8), %ymm1
; AVX-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
@@ -6880,8 +6880,8 @@ define void @store_i32_stride6_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
; AVX-NEXT: vunpcklpd {{.*#+}} ymm13 = ymm1[0],ymm5[0],ymm1[2],ymm5[2]
; AVX-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX-NEXT: vmovups %ymm5, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT: vshufps {{.*#+}} ymm13 = ymm13[0,1,2,0,4,5,6,4]
; AVX-NEXT: vextractf128 $1, %ymm13, %xmm13
+; AVX-NEXT: vshufps {{.*#+}} xmm13 = xmm13[0,1,2,0]
; AVX-NEXT: vblendps {{.*#+}} ymm13 = ymm14[0,1],ymm13[2,3],ymm14[4,5,6,7]
; AVX-NEXT: vmovaps 224(%r8), %ymm10
; AVX-NEXT: vmovups %ymm10, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
diff --git a/llvm/test/CodeGen/X86/vector-interleaved-store-i32-stride-8.ll b/llvm/test/CodeGen/X86/vector-interleaved-store-i32-stride-8.ll
index bd6c527e06b147..8d74c175f4bd2b 100644
--- a/llvm/test/CodeGen/X86/vector-interleaved-store-i32-stride-8.ll
+++ b/llvm/test/CodeGen/X86/vector-interleaved-store-i32-stride-8.ll
@@ -982,117 +982,130 @@ define void @store_i32_stride8_vf8(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
;
; AVX-LABEL: store_i32_stride8_vf8:
; AVX: # %bb.0:
+; AVX-NEXT: subq $120, %rsp
; AVX-NEXT: movq {{[0-9]+}}(%rsp), %rax
; AVX-NEXT: movq {{[0-9]+}}(%rsp), %r10
-; AVX-NEXT: vmovaps (%rdi), %ymm3
-; AVX-NEXT: vmovaps (%rsi), %ymm5
-; AVX-NEXT: vmovaps (%rdx), %ymm4
-; AVX-NEXT: vmovaps (%rcx), %ymm6
-; AVX-NEXT: vmovaps (%r8), %ymm7
-; AVX-NEXT: vmovaps (%r9), %ymm8
-; AVX-NEXT: vmovaps (%r10), %ymm9
-; AVX-NEXT: vmovaps (%rax), %ymm10
-; AVX-NEXT: vunpcklps {{.*#+}} ymm0 = ymm9[0],ymm10[0],ymm9[1],ymm10[1],ymm9[4],ymm10[4],ymm9[5],ymm10[5]
-; AVX-NEXT: vshufps {{.*#+}} ymm1 = ymm8[1,0],ymm7[1,0],ymm8[5,4],ymm7[5,4]
-; AVX-NEXT: vshufps {{.*#+}} ymm0 = ymm1[2,0],ymm0[2,3],ymm1[6,4],ymm0[6,7]
-; AVX-NEXT: vunpcklps {{.*#+}} ymm1 = ymm4[0],ymm6[0],ymm4[1],ymm6[1],ymm4[4],ymm6[4],ymm4[5],ymm6[5]
-; AVX-NEXT: vextractf128 $1, %ymm1, %xmm1
-; AVX-NEXT: vshufps {{.*#+}} ymm2 = ymm5[1,0],ymm3[1,0],ymm5[5,4],ymm3[5,4]
-; AVX-NEXT: vshufps {{.*#+}} ymm2 = ymm2[2,0,2,3,6,4,6,7]
-; AVX-NEXT: vextractf128 $1, %ymm2, %xmm2
-; AVX-NEXT: vblendps {{.*#+}} xmm1 = xmm2[0,1],xmm1[2,3]
-; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7]
-; AVX-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT: vunpckhps {{.*#+}} ymm1 = ymm9[2],ymm10[2],ymm9[3],ymm10[3],ymm9[6],ymm10[6],ymm9[7],ymm10[7]
-; AVX-NEXT: vshufps {{.*#+}} ymm2 = ymm8[3,0],ymm7[3,0],ymm8[7,4],ymm7[7,4]
-; AVX-NEXT: vshufps {{.*#+}} ymm1 = ymm2[2,0],ymm1[2,3],ymm2[6,4],ymm1[6,7]
-; AVX-NEXT: vunpckhps {{.*#+}} ymm2 = ymm4[2],ymm6[2],ymm4[3],ymm6[3],ymm4[6],ymm6[6],ymm4[7],ymm6[7]
-; AVX-NEXT: vextractf128 $1, %ymm2, %xmm2
-; AVX-NEXT: vshufps {{.*#+}} ymm11 = ymm5[3,0],ymm3[3,0],ymm5[7,4],ymm3[7,4]
-; AVX-NEXT: vshufps {{.*#+}} ymm11 = ymm11[2,0,2,3,6,4,6,7]
-; AVX-NEXT: vextractf128 $1, %ymm11, %xmm11
-; AVX-NEXT: vblendps {{.*#+}} xmm2 = xmm11[0,1],xmm2[2,3]
-; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm2[0,1,2,3],ymm1[4,5,6,7]
-; AVX-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT: vunpckhpd {{.*#+}} ymm2 = ymm10[1],ymm9[1],ymm10[3],ymm9[3]
-; AVX-NEXT: vunpckhps {{.*#+}} ymm11 = ymm7[2],ymm8[2],ymm7[3],ymm8[3],ymm7[6],ymm8[6],ymm7[7],ymm8[7]
-; AVX-NEXT: vshufps {{.*#+}} ymm2 = ymm11[0,1],ymm2[2,0],ymm11[4,5],ymm2[6,4]
-; AVX-NEXT: vunpckhps {{.*#+}} ymm11 = ymm3[2],ymm5[2],ymm3[3],ymm5[3],ymm3[6],ymm5[6],ymm3[7],ymm5[7]
-; AVX-NEXT: vextractf128 $1, %ymm11, %xmm11
-; AVX-NEXT: vunpckhpd {{.*#+}} ymm12 = ymm6[1],ymm4[1],ymm6[3],ymm4[3]
-; AVX-NEXT: vshufps {{.*#+}} ymm12 = ymm12[0,1,2,0,4,5,6,4]
-; AVX-NEXT: vextractf128 $1, %ymm12, %xmm12
-; AVX-NEXT: vblendps {{.*#+}} xmm11 = xmm11[0,1],xmm12[2,3]
-; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm11[0,1,2,3],ymm2[4,5,6,7]
-; AVX-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT: vunpcklpd {{.*#+}} ymm9 = ymm10[0],ymm9[0],ymm10[2],ymm9[2]
-; AVX-NEXT: vunpcklps {{.*#+}} ymm7 = ymm7[0],ymm8[0],ymm7[1],ymm8[1],ymm7[4],ymm8[4],ymm7[5],ymm8[5]
-; AVX-NEXT: vshufps {{.*#+}} ymm7 = ymm7[0,1],ymm9[2,0],ymm7[4,5],ymm9[6,4]
-; AVX-NEXT: vunpcklps {{.*#+}} ymm3 = ymm3[0],ymm5[0],ymm3[1],ymm5[1],ymm3[4],ymm5[4],ymm3[5],ymm5[5]
-; AVX-NEXT: vextractf128 $1, %ymm3, %xmm3
-; AVX-NEXT: vunpcklpd {{.*#+}} ymm4 = ymm6[0],ymm4[0],ymm6[2],ymm4[2]
-; AVX-NEXT: vshufps {{.*#+}} ymm4 = ymm4[0,1,2,0,4,5,6,4]
-; AVX-NEXT: vextractf128 $1, %ymm4, %xmm4
-; AVX-NEXT: vblendps {{.*#+}} xmm3 = xmm3[0,1],xmm4[2,3]
-; AVX-NEXT: vblendps {{.*#+}} ymm3 = ymm3[0,1,2,3],ymm7[4,5,6,7]
+; AVX-NEXT: vmovaps (%r8), %ymm9
; AVX-NEXT: vmovaps (%rcx), %xmm4
-; AVX-NEXT: vmovaps (%rdx), %xmm5
-; AVX-NEXT: vunpcklps {{.*#+}} xmm9 = xmm5[0],xmm4[0],xmm5[1],xmm4[1]
-; AVX-NEXT: vmovaps (%rsi), %xmm10
-; AVX-NEXT: vmovaps (%rdi), %xmm11
+; AVX-NEXT: vmovaps (%rdx), %xmm3
+; AVX-NEXT: vunpcklps {{.*#+}} xmm2 = xmm3[0],xmm4[0],xmm3[1],xmm4[1]
+; AVX-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX-NEXT: vmovaps %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX-NEXT: vmovaps %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX-NEXT: vmovaps (%rsi), %xmm11
+; AVX-NEXT: vmovaps (%rdi), %xmm12
+; AVX-NEXT: vshufps {{.*#+}} xmm0 = xmm12[1,1,1,1]
+; AVX-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0],xmm11[1],xmm0[2,3]
+; AVX-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0,1],xmm2[2,3]
; AVX-NEXT: vmovaps (%rax), %xmm6
-; AVX-NEXT: vmovaps (%r10), %xmm7
-; AVX-NEXT: vunpcklps {{.*#+}} xmm12 = xmm7[0],xmm6[0],xmm7[1],xmm6[1]
-; AVX-NEXT: vinsertf128 $1, %xmm12, %ymm0, %ymm8
-; AVX-NEXT: vmovaps (%r9), %xmm13
-; AVX-NEXT: vmovaps (%r8), %xmm14
-; AVX-NEXT: vshufps {{.*#+}} xmm15 = xmm14[1,1,1,1]
-; AVX-NEXT: vblendps {{.*#+}} xmm15 = xmm15[0],xmm13[1],xmm15[2,3]
-; AVX-NEXT: vinsertf128 $1, %xmm15, %ymm0, %ymm15
-; AVX-NEXT: vblendps {{.*#+}} ymm8 = ymm15[0,1,2,3,4,5],ymm8[6,7]
-; AVX-NEXT: vshufps {{.*#+}} xmm15 = xmm11[1,1,1,1]
-; AVX-NEXT: vblendps {{.*#+}} xmm15 = xmm15[0],xmm10[1],xmm15[2,3]
-; AVX-NEXT: vblendps {{.*#+}} xmm15 = xmm15[0,1],xmm9[2,3]
-; AVX-NEXT: vblendps {{.*#+}} ymm8 = ymm15[0,1,2,3],ymm8[4,5,6,7]
-; AVX-NEXT: vunpckhps {{.*#+}} xmm15 = xmm7[2],xmm6[2],xmm7[3],xmm6[3]
-; AVX-NEXT: vinsertf128 $1, %xmm15, %ymm0, %ymm15
-; AVX-NEXT: vunpckhps {{.*#+}} xmm2 = xmm14[2],xmm13[2],xmm14[3],xmm13[3]
-; AVX-NEXT: vshufps {{.*#+}} xmm1 = xmm2[2,3,2,3]
+; AVX-NEXT: vmovaps (%r10), %xmm5
+; AVX-NEXT: vunpcklps {{.*#+}} xmm13 = xmm5[0],xmm6[0],xmm5[1],xmm6[1]
+; AVX-NEXT: vmovaps %xmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX-NEXT: vmovaps %xmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX-NEXT: vinsertf128 $1, %xmm13, %ymm0, %ymm1
+; AVX-NEXT: vmovaps (%r9), %xmm14
+; AVX-NEXT: vmovaps (%r8), %xmm15
+; AVX-NEXT: vshufps {{.*#+}} xmm2 = xmm15[1,1,1,1]
+; AVX-NEXT: vblendps {{.*#+}} xmm2 = xmm2[0],xmm14[1],xmm2[2,3]
+; AVX-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm2
+; AVX-NEXT: vblendps {{.*#+}} ymm1 = ymm2[0,1,2,3,4,5],ymm1[6,7]
+; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
+; AVX-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX-NEXT: vunpckhps {{.*#+}} xmm0 = xmm3[2],xmm4[2],xmm3[3],xmm4[3]
+; AVX-NEXT: vunpckhps {{.*#+}} xmm1 = xmm5[2],xmm6[2],xmm5[3],xmm6[3]
; AVX-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm1
-; AVX-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3,4,5],ymm15[6,7]
-; AVX-NEXT: vunpckhps {{.*#+}} xmm15 = xmm5[2],xmm4[2],xmm5[3],xmm4[3]
-; AVX-NEXT: vunpckhps {{.*#+}} xmm0 = xmm11[2],xmm10[2],xmm11[3],xmm10[3]
-; AVX-NEXT: vunpckhpd {{.*#+}} xmm15 = xmm0[1],xmm15[1]
-; AVX-NEXT: vblendps {{.*#+}} ymm1 = ymm15[0,1,2,3],ymm1[4,5,6,7]
-; AVX-NEXT: vunpcklps {{.*#+}} xmm10 = xmm11[0],xmm10[0],xmm11[1],xmm10[1]
-; AVX-NEXT: vmovlhps {{.*#+}} xmm9 = xmm10[0],xmm9[0]
-; AVX-NEXT: vunpcklps {{.*#+}} xmm10 = xmm14[0],xmm13[0],xmm14[1],xmm13[1]
+; AVX-NEXT: vunpckhps {{.*#+}} xmm2 = xmm15[2],xmm14[2],xmm15[3],xmm14[3]
+; AVX-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX-NEXT: vshufps {{.*#+}} xmm2 = xmm2[2,3,2,3]
+; AVX-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm2
+; AVX-NEXT: vblendps {{.*#+}} ymm1 = ymm2[0,1,2,3,4,5],ymm1[6,7]
+; AVX-NEXT: vunpckhps {{.*#+}} xmm2 = xmm12[2],xmm11[2],xmm12[3],xmm11[3]
+; AVX-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX-NEXT: vunpckhpd {{.*#+}} xmm0 = xmm2[1],xmm0[1]
+; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
+; AVX-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX-NEXT: vmovaps (%r9), %ymm6
+; AVX-NEXT: vmovaps (%r10), %ymm5
+; AVX-NEXT: vmovaps (%rax), %ymm4
+; AVX-NEXT: vunpcklps {{.*#+}} ymm0 = ymm5[0],ymm4[0],ymm5[1],ymm4[1],ymm5[4],ymm4[4],ymm5[5],ymm4[5]
+; AVX-NEXT: vshufps {{.*#+}} ymm1 = ymm6[1,0],ymm9[1,0],ymm6[5,4],ymm9[5,4]
+; AVX-NEXT: vshufps {{.*#+}} ymm8 = ymm1[2,0],ymm0[2,3],ymm1[6,4],ymm0[6,7]
+; AVX-NEXT: vmovaps (%rdx), %ymm3
+; AVX-NEXT: vmovaps (%rcx), %ymm2
+; AVX-NEXT: vunpcklps {{.*#+}} ymm0 = ymm3[0],ymm2[0],ymm3[1],ymm2[1],ymm3[4],ymm2[4],ymm3[5],ymm2[5]
+; AVX-NEXT: vextractf128 $1, %ymm0, %xmm7
+; AVX-NEXT: vmovaps (%rdi), %ymm1
+; AVX-NEXT: vmovaps (%rsi), %ymm0
+; AVX-NEXT: vshufps {{.*#+}} ymm10 = ymm0[1,0],ymm1[1,0],ymm0[5,4],ymm1[5,4]
+; AVX-NEXT: vextractf128 $1, %ymm10, %xmm10
+; AVX-NEXT: vshufps {{.*#+}} xmm7 = xmm10[2,0],xmm7[2,3]
+; AVX-NEXT: vblendps {{.*#+}} ymm7 = ymm7[0,1,2,3],ymm8[4,5,6,7]
+; AVX-NEXT: vmovups %ymm7, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX-NEXT: vunpckhps {{.*#+}} ymm7 = ymm5[2],ymm4[2],ymm5[3],ymm4[3],ymm5[6],ymm4[6],ymm5[7],ymm4[7]
+; AVX-NEXT: vshufps {{.*#+}} ymm8 = ymm6[3,0],ymm9[3,0],ymm6[7,4],ymm9[7,4]
+; AVX-NEXT: vshufps {{.*#+}} ymm7 = ymm8[2,0],ymm7[2,3],ymm8[6,4],ymm7[6,7]
+; AVX-NEXT: vunpckhps {{.*#+}} ymm8 = ymm3[2],ymm2[2],ymm3[3],ymm2[3],ymm3[6],ymm2[6],ymm3[7],ymm2[7]
+; AVX-NEXT: vextractf128 $1, %ymm8, %xmm8
+; AVX-NEXT: vshufps {{.*#+}} ymm10 = ymm0[3,0],ymm1[3,0],ymm0[7,4],ymm1[7,4]
+; AVX-NEXT: vextractf128 $1, %ymm10, %xmm10
+; AVX-NEXT: vshufps {{.*#+}} xmm8 = xmm10[2,0],xmm8[2,3]
+; AVX-NEXT: vblendps {{.*#+}} ymm7 = ymm8[0,1,2,3],ymm7[4,5,6,7]
+; AVX-NEXT: vmovups %ymm7, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX-NEXT: vunpcklps {{.*#+}} xmm7 = xmm12[0],xmm11[0],xmm12[1],xmm11[1]
+; AVX-NEXT: vunpcklpd {{[-0-9]+}}(%r{{[sb]}}p), %xmm7, %xmm7 # 16-byte Folded Reload
+; AVX-NEXT: # xmm7 = xmm7[0],mem[0]
+; AVX-NEXT: vunpcklps {{.*#+}} xmm8 = xmm15[0],xmm14[0],xmm15[1],xmm14[1]
+; AVX-NEXT: vinsertf128 $1, %xmm8, %ymm0, %ymm8
+; AVX-NEXT: vshufps {{.*#+}} xmm10 = xmm13[0,1,0,1]
; AVX-NEXT: vinsertf128 $1, %xmm10, %ymm0, %ymm10
-; AVX-NEXT: vshufps {{.*#+}} xmm11 = xmm12[0,1,0,1]
-; AVX-NEXT: vinsertf128 $1, %xmm11, %ymm0, %ymm11
-; AVX-NEXT: vblendps {{.*#+}} ymm10 = ymm10[0,1,2,3,4,5],ymm11[6,7]
-; AVX-NEXT: vblendps {{.*#+}} ymm9 = ymm9[0,1,2,3],ymm10[4,5,6,7]
-; AVX-NEXT: vshufps {{.*#+}} xmm4 = xmm4[2,2,2,2]
-; AVX-NEXT: vblendps {{.*#+}} xmm4 = xmm5[0,1,2],xmm4[3]
-; AVX-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0,1],xmm4[2,3]
-; AVX-NEXT: vshufps {{.*#+}} xmm4 = xmm6[2,2,2,2]
-; AVX-NEXT: vblendps {{.*#+}} xmm4 = xmm7[0,1,2],xmm4[3]
+; AVX-NEXT: vblendps {{.*#+}} ymm8 = ymm8[0,1,2,3,4,5],ymm10[6,7]
+; AVX-NEXT: vblendps {{.*#+}} ymm7 = ymm7[0,1,2,3],ymm8[4,5,6,7]
+; AVX-NEXT: vunpckhpd {{.*#+}} ymm8 = ymm4[1],ymm5[1],ymm4[3],ymm5[3]
+; AVX-NEXT: vunpckhps {{.*#+}} ymm10 = ymm9[2],ymm6[2],ymm9[3],ymm6[3],ymm9[6],ymm6[6],ymm9[7],ymm6[7]
+; AVX-NEXT: vshufps {{.*#+}} ymm8 = ymm10[0,1],ymm8[2,0],ymm10[4,5],ymm8[6,4]
+; AVX-NEXT: vunpckhpd {{.*#+}} ymm10 = ymm2[1],ymm3[1],ymm2[3],ymm3[3]
+; AVX-NEXT: vextractf128 $1, %ymm10, %xmm10
+; AVX-NEXT: vunpckhps {{.*#+}} ymm11 = ymm1[2],ymm0[2],ymm1[3],ymm0[3],ymm1[6],ymm0[6],ymm1[7],ymm0[7]
+; AVX-NEXT: vextractf128 $1, %ymm11, %xmm11
+; AVX-NEXT: vshufps {{.*#+}} xmm10 = xmm11[0,1],xmm10[2,0]
+; AVX-NEXT: vblendps {{.*#+}} ymm8 = ymm10[0,1,2,3],ymm8[4,5,6,7]
+; AVX-NEXT: vunpcklpd {{.*#+}} ymm4 = ymm4[0],ymm5[0],ymm4[2],ymm5[2]
+; AVX-NEXT: vunpcklps {{.*#+}} ymm5 = ymm9[0],ymm6[0],ymm9[1],ymm6[1],ymm9[4],ymm6[4],ymm9[5],ymm6[5]
+; AVX-NEXT: vshufps {{.*#+}} ymm4 = ymm5[0,1],ymm4[2,0],ymm5[4,5],ymm4[6,4]
+; AVX-NEXT: vunpcklpd {{.*#+}} ymm2 = ymm2[0],ymm3[0],ymm2[2],ymm3[2]
+; AVX-NEXT: vunpcklps {{.*#+}} ymm0 = ymm1[0],ymm0[0],ymm1[1],ymm0[1],ymm1[4],ymm0[4],ymm1[5],ymm0[5]
+; AVX-NEXT: vextractf128 $1, %ymm2, %xmm1
+; AVX-NEXT: vextractf128 $1, %ymm0, %xmm0
+; AVX-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,0]
+; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm4[4,5,6,7]
+; AVX-NEXT: vpermilps $170, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
+; AVX-NEXT: # xmm1 = mem[2,2,2,2]
+; AVX-NEXT: vblendps $7, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1, %xmm1 # 16-byte Folded Reload
+; AVX-NEXT: # xmm1 = mem[0,1,2],xmm1[3]
+; AVX-NEXT: vblendps $3, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1, %xmm1 # 16-byte Folded Reload
+; AVX-NEXT: # xmm1 = mem[0,1],xmm1[2,3]
+; AVX-NEXT: vpermilps $170, {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Folded Reload
+; AVX-NEXT: # xmm2 = mem[2,2,2,2]
+; AVX-NEXT: vblendps $7, {{[-0-9]+}}(%r{{[sb]}}p), %xmm2, %xmm2 # 16-byte Folded Reload
+; AVX-NEXT: # xmm2 = mem[0,1,2],xmm2[3]
+; AVX-NEXT: vinsertf128 $1, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm3 # 16-byte Folded Reload
; AVX-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm2
-; AVX-NEXT: vinsertf128 $1, %xmm4, %ymm0, %ymm4
-; AVX-NEXT: vblendps {{.*#+}} ymm2 = ymm2[0,1,2,3,4,5],ymm4[6,7]
-; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm2[4,5,6,7]
+; AVX-NEXT: vblendps {{.*#+}} ymm2 = ymm3[0,1,2,3,4,5],ymm2[6,7]
+; AVX-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm2[4,5,6,7]
; AVX-NEXT: movq {{[0-9]+}}(%rsp), %rax
-; AVX-NEXT: vmovaps %ymm0, 64(%rax)
-; AVX-NEXT: vmovaps %ymm9, (%rax)
-; AVX-NEXT: vmovaps %ymm1, 96(%rax)
-; AVX-NEXT: vmovaps %ymm8, 32(%rax)
-; AVX-NEXT: vmovaps %ymm3, 128(%rax)
-; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX-NEXT: vmovaps %ymm0, 192(%rax)
+; AVX-NEXT: vmovaps %ymm1, 64(%rax)
+; AVX-NEXT: vmovaps %ymm0, 128(%rax)
+; AVX-NEXT: vmovaps %ymm8, 192(%rax)
+; AVX-NEXT: vmovaps %ymm7, (%rax)
; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX-NEXT: vmovaps %ymm0, 224(%rax)
; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX-NEXT: vmovaps %ymm0, 160(%rax)
+; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
+; AVX-NEXT: vmovaps %ymm0, 96(%rax)
+; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
+; AVX-NEXT: vmovaps %ymm0, 32(%rax)
+; AVX-NEXT: addq $120, %rsp
; AVX-NEXT: vzeroupper
; AVX-NEXT: retq
;
@@ -2041,231 +2054,221 @@ define void @store_i32_stride8_vf16(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
; AVX-NEXT: subq $264, %rsp # imm = 0x108
; AVX-NEXT: movq {{[0-9]+}}(%rsp), %rax
; AVX-NEXT: movq {{[0-9]+}}(%rsp), %r10
-; AVX-NEXT: vmovaps (%rdi), %ymm7
-; AVX-NEXT: vmovaps 32(%rdi), %ymm0
-; AVX-NEXT: vmovaps (%rsi), %ymm8
-; AVX-NEXT: vmovaps 32(%rsi), %ymm1
-; AVX-NEXT: vmovaps (%rdx), %ymm9
-; AVX-NEXT: vmovaps 32(%rdx), %ymm3
-; AVX-NEXT: vmovaps 32(%rcx), %ymm4
-; AVX-NEXT: vmovaps 32(%r8), %ymm5
-; AVX-NEXT: vmovaps 32(%r9), %ymm6
-; AVX-NEXT: vmovaps 32(%r10), %ymm11
-; AVX-NEXT: vmovaps 32(%rax), %ymm12
-; AVX-NEXT: vunpckhpd {{.*#+}} ymm2 = ymm12[1],ymm11[1],ymm12[3],ymm11[3]
-; AVX-NEXT: vunpckhps {{.*#+}} ymm10 = ymm5[2],ymm6[2],ymm5[3],ymm6[3],ymm5[6],ymm6[6],ymm5[7],ymm6[7]
-; AVX-NEXT: vshufps {{.*#+}} ymm2 = ymm10[0,1],ymm2[2,0],ymm10[4,5],ymm2[6,4]
-; AVX-NEXT: vunpckhps {{.*#+}} ymm10 = ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[6],ymm1[6],ymm0[7],ymm1[7]
-; AVX-NEXT: vextractf128 $1, %ymm10, %xmm10
-; AVX-NEXT: vunpckhpd {{.*#+}} ymm13 = ymm4[1],ymm3[1],ymm4[3],ymm3[3]
-; AVX-NEXT: vshufps {{.*#+}} ymm13 = ymm13[0,1,2,0,4,5,6,4]
-; AVX-NEXT: vextractf128 $1, %ymm13, %xmm13
-; AVX-NEXT: vblendps {{.*#+}} xmm10 = xmm10[0,1],xmm13[2,3]
-; AVX-NEXT: vblendps {{.*#+}} ymm2 = ymm10[0,1,2,3],ymm2[4,5,6,7]
-; AVX-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT: vunpckhps {{.*#+}} ymm2 = ymm11[2],ymm12[2],ymm11[3],ymm12[3],ymm11[6],ymm12[6],ymm11[7],ymm12[7]
-; AVX-NEXT: vshufps {{.*#+}} ymm10 = ymm6[3,0],ymm5[3,0],ymm6[7,4],ymm5[7,4]
-; AVX-NEXT: vshufps {{.*#+}} ymm2 = ymm10[2,0],ymm2[2,3],ymm10[6,4],ymm2[6,7]
-; AVX-NEXT: vunpckhps {{.*#+}} ymm10 = ymm3[2],ymm4[2],ymm3[3],ymm4[3],ymm3[6],ymm4[6],ymm3[7],ymm4[7]
-; AVX-NEXT: vextractf128 $1, %ymm10, %xmm10
-; AVX-NEXT: vshufps {{.*#+}} ymm13 = ymm1[3,0],ymm0[3,0],ymm1[7,4],ymm0[7,4]
-; AVX-NEXT: vshufps {{.*#+}} ymm13 = ymm13[2,0,2,3,6,4,6,7]
-; AVX-NEXT: vextractf128 $1, %ymm13, %xmm13
-; AVX-NEXT: vblendps {{.*#+}} xmm10 = xmm13[0,1],xmm10[2,3]
-; AVX-NEXT: vblendps {{.*#+}} ymm2 = ymm10[0,1,2,3],ymm2[4,5,6,7]
-; AVX-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT: vunpcklpd {{.*#+}} ymm2 = ymm12[0],ymm11[0],ymm12[2],ymm11[2]
-; AVX-NEXT: vunpcklps {{.*#+}} ymm10 = ymm5[0],ymm6[0],ymm5[1],ymm6[1],ymm5[4],ymm6[4],ymm5[5],ymm6[5]
-; AVX-NEXT: vshufps {{.*#+}} ymm10 = ymm10[0,1],ymm2[2,0],ymm10[4,5],ymm2[6,4]
-; AVX-NEXT: vunpcklps {{.*#+}} ymm2 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[4],ymm1[4],ymm0[5],ymm1[5]
-; AVX-NEXT: vextractf128 $1, %ymm2, %xmm2
-; AVX-NEXT: vunpcklpd {{.*#+}} ymm13 = ymm4[0],ymm3[0],ymm4[2],ymm3[2]
-; AVX-NEXT: vshufps {{.*#+}} ymm13 = ymm13[0,1,2,0,4,5,6,4]
-; AVX-NEXT: vextractf128 $1, %ymm13, %xmm13
-; AVX-NEXT: vblendps {{.*#+}} xmm13 = xmm2[0,1],xmm13[2,3]
-; AVX-NEXT: vmovaps (%rcx), %ymm2
-; AVX-NEXT: vblendps {{.*#+}} ymm10 = ymm13[0,1,2,3],ymm10[4,5,6,7]
-; AVX-NEXT: vmovups %ymm10, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT: vmovaps (%r8), %ymm10
-; AVX-NEXT: vunpcklps {{.*#+}} ymm13 = ymm11[0],ymm12[0],ymm11[1],ymm12[1],ymm11[4],ymm12[4],ymm11[5],ymm12[5]
-; AVX-NEXT: vmovaps (%r9), %ymm11
-; AVX-NEXT: vshufps {{.*#+}} ymm5 = ymm6[1,0],ymm5[1,0],ymm6[5,4],ymm5[5,4]
-; AVX-NEXT: vmovaps (%r10), %ymm12
-; AVX-NEXT: vshufps {{.*#+}} ymm6 = ymm5[2,0],ymm13[2,3],ymm5[6,4],ymm13[6,7]
-; AVX-NEXT: vmovaps (%rax), %ymm5
-; AVX-NEXT: vunpcklps {{.*#+}} ymm3 = ymm3[0],ymm4[0],ymm3[1],ymm4[1],ymm3[4],ymm4[4],ymm3[5],ymm4[5]
-; AVX-NEXT: vextractf128 $1, %ymm3, %xmm3
-; AVX-NEXT: vshufps {{.*#+}} ymm0 = ymm1[1,0],ymm0[1,0],ymm1[5,4],ymm0[5,4]
-; AVX-NEXT: vshufps {{.*#+}} ymm0 = ymm0[2,0,2,3,6,4,6,7]
-; AVX-NEXT: vextractf128 $1, %ymm0, %xmm0
-; AVX-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0,1],xmm3[2,3]
-; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm6[4,5,6,7]
-; AVX-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT: vunpckhpd {{.*#+}} ymm0 = ymm5[1],ymm12[1],ymm5[3],ymm12[3]
-; AVX-NEXT: vunpckhps {{.*#+}} ymm1 = ymm10[2],ymm11[2],ymm10[3],ymm11[3],ymm10[6],ymm11[6],ymm10[7],ymm11[7]
-; AVX-NEXT: vshufps {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,0],ymm1[4,5],ymm0[6,4]
-; AVX-NEXT: vunpckhps {{.*#+}} ymm1 = ymm7[2],ymm8[2],ymm7[3],ymm8[3],ymm7[6],ymm8[6],ymm7[7],ymm8[7]
-; AVX-NEXT: vextractf128 $1, %ymm1, %xmm1
-; AVX-NEXT: vunpckhpd {{.*#+}} ymm3 = ymm2[1],ymm9[1],ymm2[3],ymm9[3]
-; AVX-NEXT: vshufps {{.*#+}} ymm3 = ymm3[0,1,2,0,4,5,6,4]
-; AVX-NEXT: vextractf128 $1, %ymm3, %xmm3
-; AVX-NEXT: vblendps {{.*#+}} xmm1 = xmm1[0,1],xmm3[2,3]
-; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7]
-; AVX-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT: vunpckhps {{.*#+}} ymm0 = ymm12[2],ymm5[2],ymm12[3],ymm5[3],ymm12[6],ymm5[6],ymm12[7],ymm5[7]
-; AVX-NEXT: vshufps {{.*#+}} ymm1 = ymm11[3,0],ymm10[3,0],ymm11[7,4],ymm10[7,4]
-; AVX-NEXT: vshufps {{.*#+}} ymm0 = ymm1[2,0],ymm0[2,3],ymm1[6,4],ymm0[6,7]
-; AVX-NEXT: vunpckhps {{.*#+}} ymm1 = ymm9[2],ymm2[2],ymm9[3],ymm2[3],ymm9[6],ymm2[6],ymm9[7],ymm2[7]
-; AVX-NEXT: vextractf128 $1, %ymm1, %xmm1
-; AVX-NEXT: vshufps {{.*#+}} ymm3 = ymm8[3,0],ymm7[3,0],ymm8[7,4],ymm7[7,4]
-; AVX-NEXT: vshufps {{.*#+}} ymm3 = ymm3[2,0,2,3,6,4,6,7]
-; AVX-NEXT: vextractf128 $1, %ymm3, %xmm3
-; AVX-NEXT: vblendps {{.*#+}} xmm1 = xmm3[0,1],xmm1[2,3]
-; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7]
-; AVX-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT: vunpcklpd {{.*#+}} ymm0 = ymm5[0],ymm12[0],ymm5[2],ymm12[2]
-; AVX-NEXT: vunpcklps {{.*#+}} ymm1 = ymm10[0],ymm11[0],ymm10[1],ymm11[1],ymm10[4],ymm11[4],ymm10[5],ymm11[5]
-; AVX-NEXT: vshufps {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,0],ymm1[4,5],ymm0[6,4]
-; AVX-NEXT: vunpcklps {{.*#+}} ymm1 = ymm7[0],ymm8[0],ymm7[1],ymm8[1],ymm7[4],ymm8[4],ymm7[5],ymm8[5]
-; AVX-NEXT: vextractf128 $1, %ymm1, %xmm1
-; AVX-NEXT: vunpcklpd {{.*#+}} ymm3 = ymm2[0],ymm9[0],ymm2[2],ymm9[2]
-; AVX-NEXT: vshufps {{.*#+}} ymm3 = ymm3[0,1,2,0,4,5,6,4]
-; AVX-NEXT: vextractf128 $1, %ymm3, %xmm3
-; AVX-NEXT: vblendps {{.*#+}} xmm1 = xmm1[0,1],xmm3[2,3]
-; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7]
-; AVX-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT: vunpcklps {{.*#+}} ymm0 = ymm12[0],ymm5[0],ymm12[1],ymm5[1],ymm12[4],ymm5[4],ymm12[5],ymm5[5]
-; AVX-NEXT: vshufps {{.*#+}} ymm1 = ymm11[1,0],ymm10[1,0],ymm11[5,4],ymm10[5,4]
-; AVX-NEXT: vshufps {{.*#+}} ymm0 = ymm1[2,0],ymm0[2,3],ymm1[6,4],ymm0[6,7]
-; AVX-NEXT: vunpcklps {{.*#+}} ymm1 = ymm9[0],ymm2[0],ymm9[1],ymm2[1],ymm9[4],ymm2[4],ymm9[5],ymm2[5]
-; AVX-NEXT: vextractf128 $1, %ymm1, %xmm1
-; AVX-NEXT: vshufps {{.*#+}} ymm2 = ymm8[1,0],ymm7[1,0],ymm8[5,4],ymm7[5,4]
-; AVX-NEXT: vshufps {{.*#+}} ymm2 = ymm2[2,0,2,3,6,4,6,7]
-; AVX-NEXT: vextractf128 $1, %ymm2, %xmm2
-; AVX-NEXT: vblendps {{.*#+}} xmm1 = xmm2[0,1],xmm1[2,3]
-; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7]
-; AVX-NEXT: vmovups %ymm0, (%rsp) # 32-byte Spill
-; AVX-NEXT: vmovaps (%rsi), %xmm4
-; AVX-NEXT: vmovaps %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX-NEXT: vmovaps (%rax), %xmm1
+; AVX-NEXT: vmovaps 32(%r8), %ymm0
+; AVX-NEXT: vmovaps 32(%r9), %ymm5
+; AVX-NEXT: vmovaps 32(%r10), %ymm13
+; AVX-NEXT: vmovaps 32(%rax), %ymm14
+; AVX-NEXT: vmovaps (%rcx), %xmm2
+; AVX-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX-NEXT: vmovaps (%rdx), %xmm1
; AVX-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX-NEXT: vmovaps (%r10), %xmm0
-; AVX-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX-NEXT: vunpcklps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
-; AVX-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm1
+; AVX-NEXT: vunpcklps {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1]
+; AVX-NEXT: vmovaps (%rsi), %xmm6
+; AVX-NEXT: vmovaps %xmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX-NEXT: vmovaps (%rdi), %xmm7
+; AVX-NEXT: vmovaps %xmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX-NEXT: vmovaps (%rax), %xmm3
+; AVX-NEXT: vmovaps %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX-NEXT: vmovaps (%r10), %xmm2
+; AVX-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX-NEXT: vunpcklps {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1]
+; AVX-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm3
; AVX-NEXT: vmovaps (%r9), %xmm15
-; AVX-NEXT: vmovaps (%r8), %xmm13
-; AVX-NEXT: vshufps {{.*#+}} xmm2 = xmm13[1,1,1,1]
-; AVX-NEXT: vblendps {{.*#+}} xmm2 = xmm2[0],xmm15[1],xmm2[2,3]
+; AVX-NEXT: vmovaps (%r8), %xmm11
+; AVX-NEXT: vshufps {{.*#+}} xmm4 = xmm11[1,1,1,1]
+; AVX-NEXT: vblendps {{.*#+}} xmm4 = xmm4[0],xmm15[1],xmm4[2,3]
+; AVX-NEXT: vinsertf128 $1, %xmm4, %ymm0, %ymm4
+; AVX-NEXT: vblendps {{.*#+}} ymm3 = ymm4[0,1,2,3,4,5],ymm3[6,7]
+; AVX-NEXT: vshufps {{.*#+}} xmm4 = xmm7[1,1,1,1]
+; AVX-NEXT: vblendps {{.*#+}} xmm4 = xmm4[0],xmm6[1],xmm4[2,3]
+; AVX-NEXT: vblendps {{.*#+}} xmm4 = xmm4[0,1],xmm1[2,3]
+; AVX-NEXT: vblendps {{.*#+}} ymm3 = ymm4[0,1,2,3],ymm3[4,5,6,7]
+; AVX-NEXT: vmovups %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX-NEXT: vunpcklps {{.*#+}} xmm3 = xmm7[0],xmm6[0],xmm7[1],xmm6[1]
+; AVX-NEXT: vmovlhps {{.*#+}} xmm1 = xmm3[0],xmm1[0]
+; AVX-NEXT: vunpcklps {{.*#+}} xmm3 = xmm11[0],xmm15[0],xmm11[1],xmm15[1]
+; AVX-NEXT: vinsertf128 $1, %xmm3, %ymm0, %ymm3
+; AVX-NEXT: vshufps {{.*#+}} xmm2 = xmm2[0,1,0,1]
; AVX-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm2
-; AVX-NEXT: vblendps {{.*#+}} ymm1 = ymm2[0,1,2,3,4,5],ymm1[6,7]
-; AVX-NEXT: vmovaps (%rdi), %xmm11
-; AVX-NEXT: vshufps {{.*#+}} xmm2 = xmm11[1,1,1,1]
-; AVX-NEXT: vblendps {{.*#+}} xmm2 = xmm2[0],xmm4[1],xmm2[2,3]
-; AVX-NEXT: vmovaps (%rcx), %xmm3
-; AVX-NEXT: vmovaps %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX-NEXT: vmovaps (%rdx), %xmm14
-; AVX-NEXT: vunpcklps {{.*#+}} xmm3 = xmm14[0],xmm3[0],xmm14[1],xmm3[1]
-; AVX-NEXT: vblendps {{.*#+}} xmm2 = xmm2[0,1],xmm3[2,3]
-; AVX-NEXT: vblendps {{.*#+}} ymm1 = ymm2[0,1,2,3],ymm1[4,5,6,7]
+; AVX-NEXT: vblendps {{.*#+}} ymm2 = ymm3[0,1,2,3,4,5],ymm2[6,7]
+; AVX-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm2[4,5,6,7]
; AVX-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT: vunpcklps {{.*#+}} xmm1 = xmm11[0],xmm4[0],xmm11[1],xmm4[1]
-; AVX-NEXT: vmovlhps {{.*#+}} xmm1 = xmm1[0],xmm3[0]
-; AVX-NEXT: vunpcklps {{.*#+}} xmm2 = xmm13[0],xmm15[0],xmm13[1],xmm15[1]
-; AVX-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm2
-; AVX-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,1,0,1]
-; AVX-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
-; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm2[0,1,2,3,4,5],ymm0[6,7]
+; AVX-NEXT: vunpckhpd {{.*#+}} ymm1 = ymm14[1],ymm13[1],ymm14[3],ymm13[3]
+; AVX-NEXT: vunpckhps {{.*#+}} ymm2 = ymm0[2],ymm5[2],ymm0[3],ymm5[3],ymm0[6],ymm5[6],ymm0[7],ymm5[7]
+; AVX-NEXT: vshufps {{.*#+}} ymm3 = ymm2[0,1],ymm1[2,0],ymm2[4,5],ymm1[6,4]
+; AVX-NEXT: vmovaps 32(%rdx), %ymm1
+; AVX-NEXT: vmovaps 32(%rcx), %ymm2
+; AVX-NEXT: vunpckhpd {{.*#+}} ymm4 = ymm2[1],ymm1[1],ymm2[3],ymm1[3]
+; AVX-NEXT: vextractf128 $1, %ymm4, %xmm4
+; AVX-NEXT: vmovaps 32(%rdi), %ymm6
+; AVX-NEXT: vmovaps 32(%rsi), %ymm7
+; AVX-NEXT: vunpckhps {{.*#+}} ymm8 = ymm6[2],ymm7[2],ymm6[3],ymm7[3],ymm6[6],ymm7[6],ymm6[7],ymm7[7]
+; AVX-NEXT: vextractf128 $1, %ymm8, %xmm8
+; AVX-NEXT: vshufps {{.*#+}} xmm4 = xmm8[0,1],xmm4[2,0]
+; AVX-NEXT: vblendps {{.*#+}} ymm3 = ymm4[0,1,2,3],ymm3[4,5,6,7]
+; AVX-NEXT: vmovups %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX-NEXT: vunpckhps {{.*#+}} ymm3 = ymm13[2],ymm14[2],ymm13[3],ymm14[3],ymm13[6],ymm14[6],ymm13[7],ymm14[7]
+; AVX-NEXT: vshufps {{.*#+}} ymm4 = ymm5[3,0],ymm0[3,0],ymm5[7,4],ymm0[7,4]
+; AVX-NEXT: vshufps {{.*#+}} ymm3 = ymm4[2,0],ymm3[2,3],ymm4[6,4],ymm3[6,7]
+; AVX-NEXT: vunpckhps {{.*#+}} ymm4 = ymm1[2],ymm2[2],ymm1[3],ymm2[3],ymm1[6],ymm2[6],ymm1[7],ymm2[7]
+; AVX-NEXT: vextractf128 $1, %ymm4, %xmm4
+; AVX-NEXT: vshufps {{.*#+}} ymm8 = ymm7[3,0],ymm6[3,0],ymm7[7,4],ymm6[7,4]
+; AVX-NEXT: vextractf128 $1, %ymm8, %xmm8
+; AVX-NEXT: vshufps {{.*#+}} xmm4 = xmm8[2,0],xmm4[2,3]
+; AVX-NEXT: vblendps {{.*#+}} ymm3 = ymm4[0,1,2,3],ymm3[4,5,6,7]
+; AVX-NEXT: vmovups %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX-NEXT: vunpcklpd {{.*#+}} ymm3 = ymm14[0],ymm13[0],ymm14[2],ymm13[2]
+; AVX-NEXT: vunpcklps {{.*#+}} ymm4 = ymm0[0],ymm5[0],ymm0[1],ymm5[1],ymm0[4],ymm5[4],ymm0[5],ymm5[5]
+; AVX-NEXT: vshufps {{.*#+}} ymm3 = ymm4[0,1],ymm3[2,0],ymm4[4,5],ymm3[6,4]
+; AVX-NEXT: vunpcklpd {{.*#+}} ymm4 = ymm2[0],ymm1[0],ymm2[2],ymm1[2]
+; AVX-NEXT: vextractf128 $1, %ymm4, %xmm4
+; AVX-NEXT: vunpcklps {{.*#+}} ymm8 = ymm6[0],ymm7[0],ymm6[1],ymm7[1],ymm6[4],ymm7[4],ymm6[5],ymm7[5]
+; AVX-NEXT: vextractf128 $1, %ymm8, %xmm8
+; AVX-NEXT: vshufps {{.*#+}} xmm4 = xmm8[0,1],xmm4[2,0]
+; AVX-NEXT: vblendps {{.*#+}} ymm3 = ymm4[0,1,2,3],ymm3[4,5,6,7]
+; AVX-NEXT: vmovups %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX-NEXT: vunpcklps {{.*#+}} ymm3 = ymm13[0],ymm14[0],ymm13[1],ymm14[1],ymm13[4],ymm14[4],ymm13[5],ymm14[5]
+; AVX-NEXT: vshufps {{.*#+}} ymm0 = ymm5[1,0],ymm0[1,0],ymm5[5,4],ymm0[5,4]
+; AVX-NEXT: vshufps {{.*#+}} ymm0 = ymm0[2,0],ymm3[2,3],ymm0[6,4],ymm3[6,7]
+; AVX-NEXT: vunpcklps {{.*#+}} ymm1 = ymm1[0],ymm2[0],ymm1[1],ymm2[1],ymm1[4],ymm2[4],ymm1[5],ymm2[5]
+; AVX-NEXT: vshufps {{.*#+}} ymm2 = ymm7[1,0],ymm6[1,0],ymm7[5,4],ymm6[5,4]
+; AVX-NEXT: vextractf128 $1, %ymm1, %xmm1
+; AVX-NEXT: vextractf128 $1, %ymm2, %xmm2
+; AVX-NEXT: vshufps {{.*#+}} xmm1 = xmm2[2,0],xmm1[2,3]
+; AVX-NEXT: vmovaps 32(%rax), %xmm13
; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7]
; AVX-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT: vmovaps 32(%rax), %xmm9
-; AVX-NEXT: vmovaps 32(%r10), %xmm7
-; AVX-NEXT: vshufps {{.*#+}} xmm0 = xmm9[2,2,2,2]
-; AVX-NEXT: vblendps {{.*#+}} xmm0 = xmm7[0,1,2],xmm0[3]
-; AVX-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm6
-; AVX-NEXT: vmovaps 32(%r9), %xmm8
-; AVX-NEXT: vmovaps 32(%r8), %xmm5
-; AVX-NEXT: vunpckhps {{.*#+}} xmm4 = xmm5[2],xmm8[2],xmm5[3],xmm8[3]
-; AVX-NEXT: vinsertf128 $1, %xmm4, %ymm0, %ymm10
-; AVX-NEXT: vblendps {{.*#+}} ymm10 = ymm10[0,1,2,3,4,5],ymm6[6,7]
+; AVX-NEXT: vmovaps 32(%r10), %xmm1
+; AVX-NEXT: vshufps {{.*#+}} xmm0 = xmm13[2,2,2,2]
+; AVX-NEXT: vblendps {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[3]
+; AVX-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm3
+; AVX-NEXT: vmovaps 32(%r9), %xmm2
+; AVX-NEXT: vmovaps 32(%r8), %xmm0
+; AVX-NEXT: vunpckhps {{.*#+}} xmm4 = xmm0[2],xmm2[2],xmm0[3],xmm2[3]
+; AVX-NEXT: vinsertf128 $1, %xmm4, %ymm0, %ymm6
+; AVX-NEXT: vblendps {{.*#+}} ymm3 = ymm6[0,1,2,3,4,5],ymm3[6,7]
; AVX-NEXT: vmovaps 32(%rcx), %xmm6
-; AVX-NEXT: vmovaps 32(%rdx), %xmm1
-; AVX-NEXT: vshufps {{.*#+}} xmm12 = xmm6[2,2,2,2]
-; AVX-NEXT: vblendps {{.*#+}} xmm12 = xmm1[0,1,2],xmm12[3]
-; AVX-NEXT: vmovaps 32(%rsi), %xmm3
-; AVX-NEXT: vmovaps 32(%rdi), %xmm2
-; AVX-NEXT: vunpckhps {{.*#+}} xmm0 = xmm2[2],xmm3[2],xmm2[3],xmm3[3]
-; AVX-NEXT: vblendps {{.*#+}} xmm12 = xmm0[0,1],xmm12[2,3]
-; AVX-NEXT: vblendps {{.*#+}} ymm12 = ymm12[0,1,2,3],ymm10[4,5,6,7]
-; AVX-NEXT: vunpckhps {{.*#+}} xmm10 = xmm1[2],xmm6[2],xmm1[3],xmm6[3]
-; AVX-NEXT: vunpckhpd {{.*#+}} xmm0 = xmm0[1],xmm10[1]
-; AVX-NEXT: vunpckhps {{.*#+}} xmm10 = xmm7[2],xmm9[2],xmm7[3],xmm9[3]
-; AVX-NEXT: vinsertf128 $1, %xmm10, %ymm0, %ymm10
+; AVX-NEXT: vmovaps 32(%rdx), %xmm7
+; AVX-NEXT: vshufps {{.*#+}} xmm8 = xmm6[2,2,2,2]
+; AVX-NEXT: vblendps {{.*#+}} xmm8 = xmm7[0,1,2],xmm8[3]
+; AVX-NEXT: vmovaps 32(%rsi), %xmm12
+; AVX-NEXT: vmovaps 32(%rdi), %xmm5
+; AVX-NEXT: vunpckhps {{.*#+}} xmm14 = xmm5[2],xmm12[2],xmm5[3],xmm12[3]
+; AVX-NEXT: vblendps {{.*#+}} xmm8 = xmm14[0,1],xmm8[2,3]
+; AVX-NEXT: vblendps {{.*#+}} ymm3 = ymm8[0,1,2,3],ymm3[4,5,6,7]
+; AVX-NEXT: vmovups %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX-NEXT: vunpckhps {{.*#+}} xmm3 = xmm7[2],xmm6[2],xmm7[3],xmm6[3]
+; AVX-NEXT: vunpckhpd {{.*#+}} xmm3 = xmm14[1],xmm3[1]
+; AVX-NEXT: vunpckhps {{.*#+}} xmm8 = xmm1[2],xmm13[2],xmm1[3],xmm13[3]
+; AVX-NEXT: vinsertf128 $1, %xmm8, %ymm0, %ymm8
; AVX-NEXT: vshufps {{.*#+}} xmm4 = xmm4[2,3,2,3]
; AVX-NEXT: vinsertf128 $1, %xmm4, %ymm0, %ymm4
-; AVX-NEXT: vblendps {{.*#+}} ymm4 = ymm4[0,1,2,3,4,5],ymm10[6,7]
-; AVX-NEXT: vblendps {{.*#+}} ymm10 = ymm0[0,1,2,3],ymm4[4,5,6,7]
-; AVX-NEXT: vunpcklps {{.*#+}} xmm1 = xmm1[0],xmm6[0],xmm1[1],xmm6[1]
-; AVX-NEXT: vunpcklps {{.*#+}} xmm4 = xmm7[0],xmm9[0],xmm7[1],xmm9[1]
-; AVX-NEXT: vunpcklps {{.*#+}} xmm0 = xmm5[0],xmm8[0],xmm5[1],xmm8[1]
-; AVX-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
-; AVX-NEXT: vshufps {{.*#+}} xmm6 = xmm4[0,1,0,1]
+; AVX-NEXT: vblendps {{.*#+}} ymm4 = ymm4[0,1,2,3,4,5],ymm8[6,7]
+; AVX-NEXT: vblendps {{.*#+}} ymm3 = ymm3[0,1,2,3],ymm4[4,5,6,7]
+; AVX-NEXT: vmovups %ymm3, (%rsp) # 32-byte Spill
+; AVX-NEXT: vunpcklps {{.*#+}} xmm3 = xmm7[0],xmm6[0],xmm7[1],xmm6[1]
+; AVX-NEXT: vunpcklps {{.*#+}} xmm1 = xmm1[0],xmm13[0],xmm1[1],xmm13[1]
+; AVX-NEXT: vunpcklps {{.*#+}} xmm4 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
+; AVX-NEXT: vinsertf128 $1, %xmm4, %ymm0, %ymm4
+; AVX-NEXT: vshufps {{.*#+}} xmm6 = xmm1[0,1,0,1]
; AVX-NEXT: vinsertf128 $1, %xmm6, %ymm0, %ymm6
-; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],ymm6[6,7]
-; AVX-NEXT: vunpcklps {{.*#+}} xmm6 = xmm2[0],xmm3[0],xmm2[1],xmm3[1]
-; AVX-NEXT: vmovlhps {{.*#+}} xmm6 = xmm6[0],xmm1[0]
-; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm6[0,1,2,3],ymm0[4,5,6,7]
-; AVX-NEXT: vshufps {{.*#+}} xmm2 = xmm2[1,1,1,1]
-; AVX-NEXT: vblendps {{.*#+}} xmm2 = xmm2[0],xmm3[1],xmm2[2,3]
-; AVX-NEXT: vblendps {{.*#+}} xmm1 = xmm2[0,1],xmm1[2,3]
-; AVX-NEXT: vshufps {{.*#+}} xmm2 = xmm5[1,1,1,1]
-; AVX-NEXT: vblendps {{.*#+}} xmm2 = xmm2[0],xmm8[1],xmm2[2,3]
-; AVX-NEXT: vinsertf128 $1, %xmm4, %ymm0, %ymm3
-; AVX-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm2
-; AVX-NEXT: vblendps {{.*#+}} ymm2 = ymm2[0,1,2,3,4,5],ymm3[6,7]
-; AVX-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm2[4,5,6,7]
-; AVX-NEXT: vunpckhps {{[-0-9]+}}(%r{{[sb]}}p), %xmm11, %xmm2 # 16-byte Folded Reload
-; AVX-NEXT: # xmm2 = xmm11[2],mem[2],xmm11[3],mem[3]
-; AVX-NEXT: vunpckhps {{.*#+}} xmm3 = xmm13[2],xmm15[2],xmm13[3],xmm15[3]
-; AVX-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm7 # 16-byte Reload
-; AVX-NEXT: vshufps {{.*#+}} xmm4 = xmm7[2,2,2,2]
+; AVX-NEXT: vblendps {{.*#+}} ymm4 = ymm4[0,1,2,3,4,5],ymm6[6,7]
+; AVX-NEXT: vunpcklps {{.*#+}} xmm6 = xmm5[0],xmm12[0],xmm5[1],xmm12[1]
+; AVX-NEXT: vmovlhps {{.*#+}} xmm6 = xmm6[0],xmm3[0]
+; AVX-NEXT: vblendps {{.*#+}} ymm4 = ymm6[0,1,2,3],ymm4[4,5,6,7]
+; AVX-NEXT: vmovups %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX-NEXT: vshufps {{.*#+}} xmm4 = xmm5[1,1,1,1]
+; AVX-NEXT: vblendps {{.*#+}} xmm4 = xmm4[0],xmm12[1],xmm4[2,3]
+; AVX-NEXT: vblendps {{.*#+}} xmm3 = xmm4[0,1],xmm3[2,3]
+; AVX-NEXT: vshufps {{.*#+}} xmm0 = xmm0[1,1,1,1]
+; AVX-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0],xmm2[1],xmm0[2,3]
+; AVX-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm1
+; AVX-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
+; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],ymm1[6,7]
+; AVX-NEXT: vblendps {{.*#+}} ymm12 = ymm3[0,1,2,3],ymm0[4,5,6,7]
+; AVX-NEXT: vmovaps (%r8), %ymm8
+; AVX-NEXT: vmovaps (%r9), %ymm6
+; AVX-NEXT: vmovaps (%r10), %ymm5
+; AVX-NEXT: vmovaps (%rax), %ymm4
+; AVX-NEXT: vunpckhpd {{.*#+}} ymm0 = ymm4[1],ymm5[1],ymm4[3],ymm5[3]
+; AVX-NEXT: vunpckhps {{.*#+}} ymm1 = ymm8[2],ymm6[2],ymm8[3],ymm6[3],ymm8[6],ymm6[6],ymm8[7],ymm6[7]
+; AVX-NEXT: vshufps {{.*#+}} ymm9 = ymm1[0,1],ymm0[2,0],ymm1[4,5],ymm0[6,4]
+; AVX-NEXT: vmovaps (%rdx), %ymm3
+; AVX-NEXT: vmovaps (%rcx), %ymm2
+; AVX-NEXT: vunpckhpd {{.*#+}} ymm7 = ymm2[1],ymm3[1],ymm2[3],ymm3[3]
+; AVX-NEXT: vextractf128 $1, %ymm7, %xmm7
+; AVX-NEXT: vmovaps (%rdi), %ymm1
+; AVX-NEXT: vmovaps (%rsi), %ymm0
+; AVX-NEXT: vunpckhps {{.*#+}} ymm10 = ymm1[2],ymm0[2],ymm1[3],ymm0[3],ymm1[6],ymm0[6],ymm1[7],ymm0[7]
+; AVX-NEXT: vextractf128 $1, %ymm10, %xmm10
+; AVX-NEXT: vshufps {{.*#+}} xmm7 = xmm10[0,1],xmm7[2,0]
+; AVX-NEXT: vblendps {{.*#+}} ymm10 = ymm7[0,1,2,3],ymm9[4,5,6,7]
+; AVX-NEXT: vunpckhps {{.*#+}} ymm7 = ymm5[2],ymm4[2],ymm5[3],ymm4[3],ymm5[6],ymm4[6],ymm5[7],ymm4[7]
+; AVX-NEXT: vshufps {{.*#+}} ymm9 = ymm6[3,0],ymm8[3,0],ymm6[7,4],ymm8[7,4]
+; AVX-NEXT: vshufps {{.*#+}} ymm7 = ymm9[2,0],ymm7[2,3],ymm9[6,4],ymm7[6,7]
+; AVX-NEXT: vunpckhps {{.*#+}} ymm9 = ymm3[2],ymm2[2],ymm3[3],ymm2[3],ymm3[6],ymm2[6],ymm3[7],ymm2[7]
+; AVX-NEXT: vextractf128 $1, %ymm9, %xmm9
+; AVX-NEXT: vshufps {{.*#+}} ymm14 = ymm0[3,0],ymm1[3,0],ymm0[7,4],ymm1[7,4]
+; AVX-NEXT: vextractf128 $1, %ymm14, %xmm14
+; AVX-NEXT: vshufps {{.*#+}} xmm9 = xmm14[2,0],xmm9[2,3]
+; AVX-NEXT: vblendps {{.*#+}} ymm9 = ymm9[0,1,2,3],ymm7[4,5,6,7]
+; AVX-NEXT: vunpcklpd {{.*#+}} ymm7 = ymm4[0],ymm5[0],ymm4[2],ymm5[2]
+; AVX-NEXT: vunpcklps {{.*#+}} ymm14 = ymm8[0],ymm6[0],ymm8[1],ymm6[1],ymm8[4],ymm6[4],ymm8[5],ymm6[5]
+; AVX-NEXT: vshufps {{.*#+}} ymm7 = ymm14[0,1],ymm7[2,0],ymm14[4,5],ymm7[6,4]
+; AVX-NEXT: vunpcklpd {{.*#+}} ymm14 = ymm2[0],ymm3[0],ymm2[2],ymm3[2]
+; AVX-NEXT: vextractf128 $1, %ymm14, %xmm14
+; AVX-NEXT: vunpcklps {{.*#+}} ymm13 = ymm1[0],ymm0[0],ymm1[1],ymm0[1],ymm1[4],ymm0[4],ymm1[5],ymm0[5]
+; AVX-NEXT: vextractf128 $1, %ymm13, %xmm13
+; AVX-NEXT: vshufps {{.*#+}} xmm13 = xmm13[0,1],xmm14[2,0]
+; AVX-NEXT: vblendps {{.*#+}} ymm7 = ymm13[0,1,2,3],ymm7[4,5,6,7]
+; AVX-NEXT: vunpcklps {{.*#+}} ymm4 = ymm5[0],ymm4[0],ymm5[1],ymm4[1],ymm5[4],ymm4[4],ymm5[5],ymm4[5]
+; AVX-NEXT: vshufps {{.*#+}} ymm5 = ymm6[1,0],ymm8[1,0],ymm6[5,4],ymm8[5,4]
+; AVX-NEXT: vshufps {{.*#+}} ymm4 = ymm5[2,0],ymm4[2,3],ymm5[6,4],ymm4[6,7]
+; AVX-NEXT: vunpcklps {{.*#+}} ymm2 = ymm3[0],ymm2[0],ymm3[1],ymm2[1],ymm3[4],ymm2[4],ymm3[5],ymm2[5]
+; AVX-NEXT: vshufps {{.*#+}} ymm0 = ymm0[1,0],ymm1[1,0],ymm0[5,4],ymm1[5,4]
+; AVX-NEXT: vextractf128 $1, %ymm2, %xmm1
+; AVX-NEXT: vextractf128 $1, %ymm0, %xmm0
+; AVX-NEXT: vshufps {{.*#+}} xmm0 = xmm0[2,0],xmm1[2,3]
+; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm4[4,5,6,7]
+; AVX-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
+; AVX-NEXT: vunpckhps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1, %xmm1 # 16-byte Folded Reload
+; AVX-NEXT: # xmm1 = xmm1[2],mem[2],xmm1[3],mem[3]
+; AVX-NEXT: vunpckhps {{.*#+}} xmm2 = xmm11[2],xmm15[2],xmm11[3],xmm15[3]
+; AVX-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm11 # 16-byte Reload
+; AVX-NEXT: vshufps {{.*#+}} xmm3 = xmm11[2,2,2,2]
+; AVX-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm8 # 16-byte Reload
+; AVX-NEXT: vblendps {{.*#+}} xmm3 = xmm8[0,1,2],xmm3[3]
+; AVX-NEXT: vinsertf128 $1, %xmm3, %ymm0, %ymm3
+; AVX-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm4
+; AVX-NEXT: vblendps {{.*#+}} ymm3 = ymm4[0,1,2,3,4,5],ymm3[6,7]
+; AVX-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Reload
+; AVX-NEXT: vshufps {{.*#+}} xmm4 = xmm5[2,2,2,2]
; AVX-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Reload
; AVX-NEXT: vblendps {{.*#+}} xmm4 = xmm6[0,1,2],xmm4[3]
+; AVX-NEXT: vblendps {{.*#+}} xmm4 = xmm1[0,1],xmm4[2,3]
+; AVX-NEXT: vblendps {{.*#+}} ymm3 = ymm4[0,1,2,3],ymm3[4,5,6,7]
+; AVX-NEXT: vunpckhps {{.*#+}} xmm4 = xmm6[2],xmm5[2],xmm6[3],xmm5[3]
+; AVX-NEXT: vunpckhpd {{.*#+}} xmm1 = xmm1[1],xmm4[1]
+; AVX-NEXT: vunpckhps {{.*#+}} xmm4 = xmm8[2],xmm11[2],xmm8[3],xmm11[3]
; AVX-NEXT: vinsertf128 $1, %xmm4, %ymm0, %ymm4
-; AVX-NEXT: vinsertf128 $1, %xmm3, %ymm0, %ymm5
-; AVX-NEXT: vblendps {{.*#+}} ymm4 = ymm5[0,1,2,3,4,5],ymm4[6,7]
-; AVX-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm8 # 16-byte Reload
-; AVX-NEXT: vshufps {{.*#+}} xmm5 = xmm8[2,2,2,2]
-; AVX-NEXT: vblendps {{.*#+}} xmm5 = xmm14[0,1,2],xmm5[3]
-; AVX-NEXT: vblendps {{.*#+}} xmm5 = xmm2[0,1],xmm5[2,3]
-; AVX-NEXT: vblendps {{.*#+}} ymm4 = ymm5[0,1,2,3],ymm4[4,5,6,7]
-; AVX-NEXT: vunpckhps {{.*#+}} xmm5 = xmm14[2],xmm8[2],xmm14[3],xmm8[3]
-; AVX-NEXT: vunpckhpd {{.*#+}} xmm2 = xmm2[1],xmm5[1]
-; AVX-NEXT: vunpckhps {{.*#+}} xmm5 = xmm6[2],xmm7[2],xmm6[3],xmm7[3]
-; AVX-NEXT: vinsertf128 $1, %xmm5, %ymm0, %ymm5
-; AVX-NEXT: vshufps {{.*#+}} xmm3 = xmm3[2,3,2,3]
-; AVX-NEXT: vinsertf128 $1, %xmm3, %ymm0, %ymm3
-; AVX-NEXT: vblendps {{.*#+}} ymm3 = ymm3[0,1,2,3,4,5],ymm5[6,7]
-; AVX-NEXT: vblendps {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm3[4,5,6,7]
+; AVX-NEXT: vshufps {{.*#+}} xmm2 = xmm2[2,3,2,3]
+; AVX-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm2
+; AVX-NEXT: vblendps {{.*#+}} ymm2 = ymm2[0,1,2,3,4,5],ymm4[6,7]
+; AVX-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm2[4,5,6,7]
; AVX-NEXT: movq {{[0-9]+}}(%rsp), %rax
-; AVX-NEXT: vmovaps %ymm2, 96(%rax)
-; AVX-NEXT: vmovaps %ymm4, 64(%rax)
-; AVX-NEXT: vmovaps %ymm1, 288(%rax)
-; AVX-NEXT: vmovaps %ymm0, 256(%rax)
-; AVX-NEXT: vmovaps %ymm10, 352(%rax)
-; AVX-NEXT: vmovaps %ymm12, 320(%rax)
-; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX-NEXT: vmovaps %ymm0, (%rax)
-; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX-NEXT: vmovaps %ymm0, 32(%rax)
-; AVX-NEXT: vmovups (%rsp), %ymm0 # 32-byte Reload
+; AVX-NEXT: vmovaps %ymm1, 96(%rax)
+; AVX-NEXT: vmovaps %ymm3, 64(%rax)
; AVX-NEXT: vmovaps %ymm0, 160(%rax)
+; AVX-NEXT: vmovaps %ymm7, 128(%rax)
+; AVX-NEXT: vmovaps %ymm9, 224(%rax)
+; AVX-NEXT: vmovaps %ymm10, 192(%rax)
+; AVX-NEXT: vmovaps %ymm12, 288(%rax)
; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX-NEXT: vmovaps %ymm0, 128(%rax)
-; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX-NEXT: vmovaps %ymm0, 224(%rax)
+; AVX-NEXT: vmovaps %ymm0, 256(%rax)
+; AVX-NEXT: vmovups (%rsp), %ymm0 # 32-byte Reload
+; AVX-NEXT: vmovaps %ymm0, 352(%rax)
; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX-NEXT: vmovaps %ymm0, 192(%rax)
+; AVX-NEXT: vmovaps %ymm0, 320(%rax)
; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX-NEXT: vmovaps %ymm0, 416(%rax)
; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
@@ -2274,6 +2277,10 @@ define void @store_i32_stride8_vf16(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
; AVX-NEXT: vmovaps %ymm0, 480(%rax)
; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX-NEXT: vmovaps %ymm0, 448(%rax)
+; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
+; AVX-NEXT: vmovaps %ymm0, (%rax)
+; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
+; AVX-NEXT: vmovaps %ymm0, 32(%rax)
; AVX-NEXT: addq $264, %rsp # imm = 0x108
; AVX-NEXT: vzeroupper
; AVX-NEXT: retq
@@ -4493,282 +4500,220 @@ define void @store_i32_stride8_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
;
; AVX-LABEL: store_i32_stride8_vf32:
; AVX: # %bb.0:
-; AVX-NEXT: subq $648, %rsp # imm = 0x288
+; AVX-NEXT: subq $680, %rsp # imm = 0x2A8
; AVX-NEXT: movq {{[0-9]+}}(%rsp), %rax
; AVX-NEXT: movq {{[0-9]+}}(%rsp), %r10
-; AVX-NEXT: vmovaps (%rdi), %ymm3
-; AVX-NEXT: vmovaps 32(%rdi), %ymm0
-; AVX-NEXT: vmovaps (%rsi), %ymm4
-; AVX-NEXT: vmovaps 32(%rsi), %ymm1
-; AVX-NEXT: vmovaps 32(%rdx), %ymm2
-; AVX-NEXT: vmovaps (%rdx), %ymm5
-; AVX-NEXT: vmovaps (%rcx), %ymm7
-; AVX-NEXT: vmovaps (%r8), %ymm8
-; AVX-NEXT: vmovaps (%r9), %ymm11
-; AVX-NEXT: vmovaps (%r10), %ymm10
-; AVX-NEXT: vmovaps (%rax), %ymm12
-; AVX-NEXT: vunpcklpd {{.*#+}} ymm6 = ymm12[0],ymm10[0],ymm12[2],ymm10[2]
-; AVX-NEXT: vunpcklps {{.*#+}} ymm9 = ymm8[0],ymm11[0],ymm8[1],ymm11[1],ymm8[4],ymm11[4],ymm8[5],ymm11[5]
-; AVX-NEXT: vshufps {{.*#+}} ymm6 = ymm9[0,1],ymm6[2,0],ymm9[4,5],ymm6[6,4]
-; AVX-NEXT: vunpcklps {{.*#+}} ymm9 = ymm3[0],ymm4[0],ymm3[1],ymm4[1],ymm3[4],ymm4[4],ymm3[5],ymm4[5]
-; AVX-NEXT: vextractf128 $1, %ymm9, %xmm9
-; AVX-NEXT: vunpcklpd {{.*#+}} ymm13 = ymm7[0],ymm5[0],ymm7[2],ymm5[2]
-; AVX-NEXT: vshufps {{.*#+}} ymm13 = ymm13[0,1,2,0,4,5,6,4]
-; AVX-NEXT: vextractf128 $1, %ymm13, %xmm13
-; AVX-NEXT: vblendps {{.*#+}} xmm9 = xmm9[0,1],xmm13[2,3]
-; AVX-NEXT: vblendps {{.*#+}} ymm6 = ymm9[0,1,2,3],ymm6[4,5,6,7]
+; AVX-NEXT: vmovaps (%rdi), %ymm0
+; AVX-NEXT: vmovaps (%rsi), %ymm1
+; AVX-NEXT: vmovaps (%rdx), %ymm2
+; AVX-NEXT: vmovaps (%rcx), %ymm3
+; AVX-NEXT: vmovaps (%rcx), %xmm5
+; AVX-NEXT: vmovaps (%rdx), %xmm7
+; AVX-NEXT: vunpcklps {{.*#+}} xmm4 = xmm7[0],xmm5[0],xmm7[1],xmm5[1]
+; AVX-NEXT: vmovaps (%rsi), %xmm9
+; AVX-NEXT: vmovaps (%rdi), %xmm12
+; AVX-NEXT: vmovaps (%r9), %xmm11
+; AVX-NEXT: vmovaps (%r8), %xmm13
+; AVX-NEXT: vunpcklps {{.*#+}} xmm6 = xmm13[0],xmm11[0],xmm13[1],xmm11[1]
+; AVX-NEXT: vinsertf128 $1, %xmm6, %ymm0, %ymm6
+; AVX-NEXT: vmovaps (%rax), %xmm8
+; AVX-NEXT: vmovaps (%r10), %xmm10
+; AVX-NEXT: vunpcklps {{.*#+}} xmm14 = xmm10[0],xmm8[0],xmm10[1],xmm8[1]
+; AVX-NEXT: vshufps {{.*#+}} xmm15 = xmm14[0,1,0,1]
+; AVX-NEXT: vinsertf128 $1, %xmm15, %ymm0, %ymm15
+; AVX-NEXT: vblendps {{.*#+}} ymm6 = ymm6[0,1,2,3,4,5],ymm15[6,7]
+; AVX-NEXT: vunpcklps {{.*#+}} xmm15 = xmm12[0],xmm9[0],xmm12[1],xmm9[1]
+; AVX-NEXT: vmovlhps {{.*#+}} xmm15 = xmm15[0],xmm4[0]
+; AVX-NEXT: vblendps {{.*#+}} ymm6 = ymm15[0,1,2,3],ymm6[4,5,6,7]
; AVX-NEXT: vmovups %ymm6, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT: vunpcklps {{.*#+}} ymm6 = ymm10[0],ymm12[0],ymm10[1],ymm12[1],ymm10[4],ymm12[4],ymm10[5],ymm12[5]
-; AVX-NEXT: vshufps {{.*#+}} ymm9 = ymm11[1,0],ymm8[1,0],ymm11[5,4],ymm8[5,4]
-; AVX-NEXT: vshufps {{.*#+}} ymm6 = ymm9[2,0],ymm6[2,3],ymm9[6,4],ymm6[6,7]
-; AVX-NEXT: vunpcklps {{.*#+}} ymm9 = ymm5[0],ymm7[0],ymm5[1],ymm7[1],ymm5[4],ymm7[4],ymm5[5],ymm7[5]
-; AVX-NEXT: vextractf128 $1, %ymm9, %xmm9
-; AVX-NEXT: vshufps {{.*#+}} ymm13 = ymm4[1,0],ymm3[1,0],ymm4[5,4],ymm3[5,4]
-; AVX-NEXT: vshufps {{.*#+}} ymm13 = ymm13[2,0,2,3,6,4,6,7]
-; AVX-NEXT: vextractf128 $1, %ymm13, %xmm13
-; AVX-NEXT: vblendps {{.*#+}} xmm9 = xmm13[0,1],xmm9[2,3]
-; AVX-NEXT: vblendps {{.*#+}} ymm6 = ymm9[0,1,2,3],ymm6[4,5,6,7]
+; AVX-NEXT: vshufps {{.*#+}} xmm6 = xmm12[1,1,1,1]
+; AVX-NEXT: vblendps {{.*#+}} xmm6 = xmm6[0],xmm9[1],xmm6[2,3]
+; AVX-NEXT: vblendps {{.*#+}} xmm6 = xmm6[0,1],xmm4[2,3]
+; AVX-NEXT: vinsertf128 $1, %xmm14, %ymm0, %ymm4
+; AVX-NEXT: vshufps {{.*#+}} xmm14 = xmm13[1,1,1,1]
+; AVX-NEXT: vblendps {{.*#+}} xmm14 = xmm14[0],xmm11[1],xmm14[2,3]
+; AVX-NEXT: vinsertf128 $1, %xmm14, %ymm0, %ymm14
+; AVX-NEXT: vblendps {{.*#+}} ymm14 = ymm14[0,1,2,3,4,5],ymm4[6,7]
+; AVX-NEXT: vmovaps (%r8), %ymm4
+; AVX-NEXT: vblendps {{.*#+}} ymm6 = ymm6[0,1,2,3],ymm14[4,5,6,7]
; AVX-NEXT: vmovups %ymm6, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT: vunpckhpd {{.*#+}} ymm6 = ymm12[1],ymm10[1],ymm12[3],ymm10[3]
-; AVX-NEXT: vunpckhps {{.*#+}} ymm9 = ymm8[2],ymm11[2],ymm8[3],ymm11[3],ymm8[6],ymm11[6],ymm8[7],ymm11[7]
-; AVX-NEXT: vshufps {{.*#+}} ymm9 = ymm9[0,1],ymm6[2,0],ymm9[4,5],ymm6[6,4]
-; AVX-NEXT: vunpckhps {{.*#+}} ymm6 = ymm3[2],ymm4[2],ymm3[3],ymm4[3],ymm3[6],ymm4[6],ymm3[7],ymm4[7]
-; AVX-NEXT: vextractf128 $1, %ymm6, %xmm6
-; AVX-NEXT: vunpckhpd {{.*#+}} ymm13 = ymm7[1],ymm5[1],ymm7[3],ymm5[3]
-; AVX-NEXT: vshufps {{.*#+}} ymm13 = ymm13[0,1,2,0,4,5,6,4]
-; AVX-NEXT: vextractf128 $1, %ymm13, %xmm13
-; AVX-NEXT: vblendps {{.*#+}} xmm13 = xmm6[0,1],xmm13[2,3]
-; AVX-NEXT: vmovaps 32(%rcx), %ymm6
+; AVX-NEXT: vmovaps (%r9), %ymm6
+; AVX-NEXT: vunpckhps {{.*#+}} xmm12 = xmm12[2],xmm9[2],xmm12[3],xmm9[3]
+; AVX-NEXT: vunpckhps {{.*#+}} xmm11 = xmm13[2],xmm11[2],xmm13[3],xmm11[3]
+; AVX-NEXT: vinsertf128 $1, %xmm11, %ymm0, %ymm9
+; AVX-NEXT: vshufps {{.*#+}} xmm13 = xmm8[2,2,2,2]
+; AVX-NEXT: vblendps {{.*#+}} xmm13 = xmm10[0,1,2],xmm13[3]
+; AVX-NEXT: vinsertf128 $1, %xmm13, %ymm0, %ymm13
+; AVX-NEXT: vblendps {{.*#+}} ymm9 = ymm9[0,1,2,3,4,5],ymm13[6,7]
+; AVX-NEXT: vshufps {{.*#+}} xmm13 = xmm5[2,2,2,2]
+; AVX-NEXT: vblendps {{.*#+}} xmm13 = xmm7[0,1,2],xmm13[3]
+; AVX-NEXT: vblendps {{.*#+}} xmm13 = xmm12[0,1],xmm13[2,3]
; AVX-NEXT: vblendps {{.*#+}} ymm9 = ymm13[0,1,2,3],ymm9[4,5,6,7]
; AVX-NEXT: vmovups %ymm9, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT: vmovaps 32(%r8), %ymm9
-; AVX-NEXT: vunpckhps {{.*#+}} ymm12 = ymm10[2],ymm12[2],ymm10[3],ymm12[3],ymm10[6],ymm12[6],ymm10[7],ymm12[7]
-; AVX-NEXT: vmovaps 32(%r9), %ymm10
-; AVX-NEXT: vshufps {{.*#+}} ymm11 = ymm11[3,0],ymm8[3,0],ymm11[7,4],ymm8[7,4]
-; AVX-NEXT: vmovaps 32(%r10), %ymm8
-; AVX-NEXT: vshufps {{.*#+}} ymm12 = ymm11[2,0],ymm12[2,3],ymm11[6,4],ymm12[6,7]
-; AVX-NEXT: vmovaps 32(%rax), %ymm11
-; AVX-NEXT: vunpckhps {{.*#+}} ymm5 = ymm5[2],ymm7[2],ymm5[3],ymm7[3],ymm5[6],ymm7[6],ymm5[7],ymm7[7]
-; AVX-NEXT: vextractf128 $1, %ymm5, %xmm5
-; AVX-NEXT: vshufps {{.*#+}} ymm3 = ymm4[3,0],ymm3[3,0],ymm4[7,4],ymm3[7,4]
-; AVX-NEXT: vshufps {{.*#+}} ymm3 = ymm3[2,0,2,3,6,4,6,7]
-; AVX-NEXT: vextractf128 $1, %ymm3, %xmm3
-; AVX-NEXT: vblendps {{.*#+}} xmm3 = xmm3[0,1],xmm5[2,3]
-; AVX-NEXT: vblendps {{.*#+}} ymm3 = ymm3[0,1,2,3],ymm12[4,5,6,7]
-; AVX-NEXT: vmovups %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT: vunpcklpd {{.*#+}} ymm3 = ymm11[0],ymm8[0],ymm11[2],ymm8[2]
-; AVX-NEXT: vunpcklps {{.*#+}} ymm4 = ymm9[0],ymm10[0],ymm9[1],ymm10[1],ymm9[4],ymm10[4],ymm9[5],ymm10[5]
-; AVX-NEXT: vshufps {{.*#+}} ymm3 = ymm4[0,1],ymm3[2,0],ymm4[4,5],ymm3[6,4]
-; AVX-NEXT: vunpcklps {{.*#+}} ymm4 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[4],ymm1[4],ymm0[5],ymm1[5]
-; AVX-NEXT: vextractf128 $1, %ymm4, %xmm4
-; AVX-NEXT: vunpcklpd {{.*#+}} ymm5 = ymm6[0],ymm2[0],ymm6[2],ymm2[2]
-; AVX-NEXT: vshufps {{.*#+}} ymm5 = ymm5[0,1,2,0,4,5,6,4]
-; AVX-NEXT: vextractf128 $1, %ymm5, %xmm5
-; AVX-NEXT: vblendps {{.*#+}} xmm4 = xmm4[0,1],xmm5[2,3]
-; AVX-NEXT: vblendps {{.*#+}} ymm3 = ymm4[0,1,2,3],ymm3[4,5,6,7]
-; AVX-NEXT: vmovups %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT: vunpcklps {{.*#+}} ymm3 = ymm8[0],ymm11[0],ymm8[1],ymm11[1],ymm8[4],ymm11[4],ymm8[5],ymm11[5]
-; AVX-NEXT: vshufps {{.*#+}} ymm4 = ymm10[1,0],ymm9[1,0],ymm10[5,4],ymm9[5,4]
-; AVX-NEXT: vshufps {{.*#+}} ymm3 = ymm4[2,0],ymm3[2,3],ymm4[6,4],ymm3[6,7]
-; AVX-NEXT: vunpcklps {{.*#+}} ymm4 = ymm2[0],ymm6[0],ymm2[1],ymm6[1],ymm2[4],ymm6[4],ymm2[5],ymm6[5]
-; AVX-NEXT: vextractf128 $1, %ymm4, %xmm4
-; AVX-NEXT: vshufps {{.*#+}} ymm5 = ymm1[1,0],ymm0[1,0],ymm1[5,4],ymm0[5,4]
-; AVX-NEXT: vshufps {{.*#+}} ymm5 = ymm5[2,0,2,3,6,4,6,7]
-; AVX-NEXT: vextractf128 $1, %ymm5, %xmm5
-; AVX-NEXT: vblendps {{.*#+}} xmm4 = xmm5[0,1],xmm4[2,3]
-; AVX-NEXT: vblendps {{.*#+}} ymm3 = ymm4[0,1,2,3],ymm3[4,5,6,7]
-; AVX-NEXT: vmovups %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT: vunpckhpd {{.*#+}} ymm3 = ymm11[1],ymm8[1],ymm11[3],ymm8[3]
-; AVX-NEXT: vunpckhps {{.*#+}} ymm4 = ymm9[2],ymm10[2],ymm9[3],ymm10[3],ymm9[6],ymm10[6],ymm9[7],ymm10[7]
-; AVX-NEXT: vshufps {{.*#+}} ymm3 = ymm4[0,1],ymm3[2,0],ymm4[4,5],ymm3[6,4]
-; AVX-NEXT: vunpckhps {{.*#+}} ymm4 = ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[6],ymm1[6],ymm0[7],ymm1[7]
-; AVX-NEXT: vextractf128 $1, %ymm4, %xmm4
-; AVX-NEXT: vunpckhpd {{.*#+}} ymm5 = ymm6[1],ymm2[1],ymm6[3],ymm2[3]
-; AVX-NEXT: vshufps {{.*#+}} ymm5 = ymm5[0,1,2,0,4,5,6,4]
-; AVX-NEXT: vextractf128 $1, %ymm5, %xmm5
-; AVX-NEXT: vblendps {{.*#+}} xmm4 = xmm4[0,1],xmm5[2,3]
-; AVX-NEXT: vblendps {{.*#+}} ymm3 = ymm4[0,1,2,3],ymm3[4,5,6,7]
-; AVX-NEXT: vmovups %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT: vmovaps 64(%rdi), %ymm3
-; AVX-NEXT: vunpckhps {{.*#+}} ymm7 = ymm8[2],ymm11[2],ymm8[3],ymm11[3],ymm8[6],ymm11[6],ymm8[7],ymm11[7]
-; AVX-NEXT: vmovaps 64(%rsi), %ymm4
-; AVX-NEXT: vshufps {{.*#+}} ymm8 = ymm10[3,0],ymm9[3,0],ymm10[7,4],ymm9[7,4]
-; AVX-NEXT: vmovaps 64(%r8), %ymm5
-; AVX-NEXT: vshufps {{.*#+}} ymm8 = ymm8[2,0],ymm7[2,3],ymm8[6,4],ymm7[6,7]
-; AVX-NEXT: vmovaps 64(%r9), %ymm7
-; AVX-NEXT: vunpckhps {{.*#+}} ymm6 = ymm2[2],ymm6[2],ymm2[3],ymm6[3],ymm2[6],ymm6[6],ymm2[7],ymm6[7]
-; AVX-NEXT: vmovaps 64(%r10), %ymm2
-; AVX-NEXT: vshufps {{.*#+}} ymm1 = ymm1[3,0],ymm0[3,0],ymm1[7,4],ymm0[7,4]
-; AVX-NEXT: vmovaps 64(%rax), %ymm0
-; AVX-NEXT: vextractf128 $1, %ymm6, %xmm6
-; AVX-NEXT: vshufps {{.*#+}} ymm1 = ymm1[2,0,2,3,6,4,6,7]
-; AVX-NEXT: vextractf128 $1, %ymm1, %xmm1
-; AVX-NEXT: vblendps {{.*#+}} xmm1 = xmm1[0,1],xmm6[2,3]
-; AVX-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm8[4,5,6,7]
-; AVX-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT: vunpcklpd {{.*#+}} ymm1 = ymm0[0],ymm2[0],ymm0[2],ymm2[2]
-; AVX-NEXT: vunpcklps {{.*#+}} ymm6 = ymm5[0],ymm7[0],ymm5[1],ymm7[1],ymm5[4],ymm7[4],ymm5[5],ymm7[5]
-; AVX-NEXT: vshufps {{.*#+}} ymm6 = ymm6[0,1],ymm1[2,0],ymm6[4,5],ymm1[6,4]
-; AVX-NEXT: vunpcklps {{.*#+}} ymm1 = ymm3[0],ymm4[0],ymm3[1],ymm4[1],ymm3[4],ymm4[4],ymm3[5],ymm4[5]
-; AVX-NEXT: vextractf128 $1, %ymm1, %xmm8
-; AVX-NEXT: vmovaps 64(%rdx), %ymm1
-; AVX-NEXT: vmovaps 64(%rcx), %ymm9
-; AVX-NEXT: vunpcklpd {{.*#+}} ymm10 = ymm9[0],ymm1[0],ymm9[2],ymm1[2]
-; AVX-NEXT: vshufps {{.*#+}} ymm10 = ymm10[0,1,2,0,4,5,6,4]
-; AVX-NEXT: vextractf128 $1, %ymm10, %xmm10
-; AVX-NEXT: vblendps {{.*#+}} xmm8 = xmm8[0,1],xmm10[2,3]
-; AVX-NEXT: vblendps {{.*#+}} ymm6 = ymm8[0,1,2,3],ymm6[4,5,6,7]
-; AVX-NEXT: vmovups %ymm6, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT: vunpcklps {{.*#+}} ymm6 = ymm2[0],ymm0[0],ymm2[1],ymm0[1],ymm2[4],ymm0[4],ymm2[5],ymm0[5]
-; AVX-NEXT: vshufps {{.*#+}} ymm8 = ymm7[1,0],ymm5[1,0],ymm7[5,4],ymm5[5,4]
-; AVX-NEXT: vshufps {{.*#+}} ymm6 = ymm8[2,0],ymm6[2,3],ymm8[6,4],ymm6[6,7]
-; AVX-NEXT: vunpcklps {{.*#+}} ymm8 = ymm1[0],ymm9[0],ymm1[1],ymm9[1],ymm1[4],ymm9[4],ymm1[5],ymm9[5]
-; AVX-NEXT: vextractf128 $1, %ymm8, %xmm8
-; AVX-NEXT: vshufps {{.*#+}} ymm10 = ymm4[1,0],ymm3[1,0],ymm4[5,4],ymm3[5,4]
-; AVX-NEXT: vshufps {{.*#+}} ymm10 = ymm10[2,0,2,3,6,4,6,7]
-; AVX-NEXT: vextractf128 $1, %ymm10, %xmm10
-; AVX-NEXT: vblendps {{.*#+}} xmm8 = xmm10[0,1],xmm8[2,3]
-; AVX-NEXT: vblendps {{.*#+}} ymm6 = ymm8[0,1,2,3],ymm6[4,5,6,7]
-; AVX-NEXT: vmovups %ymm6, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT: vunpckhpd {{.*#+}} ymm6 = ymm0[1],ymm2[1],ymm0[3],ymm2[3]
-; AVX-NEXT: vunpckhps {{.*#+}} ymm8 = ymm5[2],ymm7[2],ymm5[3],ymm7[3],ymm5[6],ymm7[6],ymm5[7],ymm7[7]
-; AVX-NEXT: vshufps {{.*#+}} ymm6 = ymm8[0,1],ymm6[2,0],ymm8[4,5],ymm6[6,4]
-; AVX-NEXT: vunpckhps {{.*#+}} ymm8 = ymm3[2],ymm4[2],ymm3[3],ymm4[3],ymm3[6],ymm4[6],ymm3[7],ymm4[7]
+; AVX-NEXT: vmovaps (%r10), %ymm9
+; AVX-NEXT: vunpckhps {{.*#+}} xmm7 = xmm7[2],xmm5[2],xmm7[3],xmm5[3]
+; AVX-NEXT: vmovaps (%rax), %ymm5
+; AVX-NEXT: vunpckhpd {{.*#+}} xmm7 = xmm12[1],xmm7[1]
+; AVX-NEXT: vunpckhps {{.*#+}} xmm8 = xmm10[2],xmm8[2],xmm10[3],xmm8[3]
+; AVX-NEXT: vinsertf128 $1, %xmm8, %ymm0, %ymm8
+; AVX-NEXT: vshufps {{.*#+}} xmm10 = xmm11[2,3,2,3]
+; AVX-NEXT: vinsertf128 $1, %xmm10, %ymm0, %ymm10
+; AVX-NEXT: vblendps {{.*#+}} ymm8 = ymm10[0,1,2,3,4,5],ymm8[6,7]
+; AVX-NEXT: vblendps {{.*#+}} ymm7 = ymm7[0,1,2,3],ymm8[4,5,6,7]
+; AVX-NEXT: vmovups %ymm7, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX-NEXT: vunpcklpd {{.*#+}} ymm7 = ymm5[0],ymm9[0],ymm5[2],ymm9[2]
+; AVX-NEXT: vunpcklps {{.*#+}} ymm8 = ymm4[0],ymm6[0],ymm4[1],ymm6[1],ymm4[4],ymm6[4],ymm4[5],ymm6[5]
+; AVX-NEXT: vshufps {{.*#+}} ymm7 = ymm8[0,1],ymm7[2,0],ymm8[4,5],ymm7[6,4]
+; AVX-NEXT: vunpcklpd {{.*#+}} ymm8 = ymm3[0],ymm2[0],ymm3[2],ymm2[2]
; AVX-NEXT: vextractf128 $1, %ymm8, %xmm8
-; AVX-NEXT: vunpckhpd {{.*#+}} ymm10 = ymm9[1],ymm1[1],ymm9[3],ymm1[3]
-; AVX-NEXT: vshufps {{.*#+}} ymm10 = ymm10[0,1,2,0,4,5,6,4]
-; AVX-NEXT: vextractf128 $1, %ymm10, %xmm10
-; AVX-NEXT: vblendps {{.*#+}} xmm8 = xmm8[0,1],xmm10[2,3]
-; AVX-NEXT: vblendps {{.*#+}} ymm6 = ymm8[0,1,2,3],ymm6[4,5,6,7]
-; AVX-NEXT: vmovups %ymm6, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT: vunpckhps {{.*#+}} ymm0 = ymm2[2],ymm0[2],ymm2[3],ymm0[3],ymm2[6],ymm0[6],ymm2[7],ymm0[7]
-; AVX-NEXT: vshufps {{.*#+}} ymm2 = ymm7[3,0],ymm5[3,0],ymm7[7,4],ymm5[7,4]
-; AVX-NEXT: vshufps {{.*#+}} ymm5 = ymm2[2,0],ymm0[2,3],ymm2[6,4],ymm0[6,7]
-; AVX-NEXT: vunpckhps {{.*#+}} ymm2 = ymm1[2],ymm9[2],ymm1[3],ymm9[3],ymm1[6],ymm9[6],ymm1[7],ymm9[7]
-; AVX-NEXT: vmovaps 96(%r8), %ymm0
-; AVX-NEXT: vshufps {{.*#+}} ymm3 = ymm4[3,0],ymm3[3,0],ymm4[7,4],ymm3[7,4]
-; AVX-NEXT: vmovaps 96(%r9), %ymm1
-; AVX-NEXT: vextractf128 $1, %ymm2, %xmm2
-; AVX-NEXT: vshufps {{.*#+}} ymm3 = ymm3[2,0,2,3,6,4,6,7]
-; AVX-NEXT: vextractf128 $1, %ymm3, %xmm3
-; AVX-NEXT: vblendps {{.*#+}} xmm3 = xmm3[0,1],xmm2[2,3]
-; AVX-NEXT: vmovaps 96(%r10), %ymm2
-; AVX-NEXT: vblendps {{.*#+}} ymm3 = ymm3[0,1,2,3],ymm5[4,5,6,7]
-; AVX-NEXT: vmovups %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT: vmovaps 96(%rax), %ymm5
-; AVX-NEXT: vunpcklpd {{.*#+}} ymm3 = ymm5[0],ymm2[0],ymm5[2],ymm2[2]
-; AVX-NEXT: vunpcklps {{.*#+}} ymm4 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[4],ymm1[4],ymm0[5],ymm1[5]
-; AVX-NEXT: vshufps {{.*#+}} ymm7 = ymm4[0,1],ymm3[2,0],ymm4[4,5],ymm3[6,4]
-; AVX-NEXT: vmovaps 96(%rdi), %ymm3
-; AVX-NEXT: vmovaps 96(%rsi), %ymm4
-; AVX-NEXT: vunpcklps {{.*#+}} ymm6 = ymm3[0],ymm4[0],ymm3[1],ymm4[1],ymm3[4],ymm4[4],ymm3[5],ymm4[5]
-; AVX-NEXT: vextractf128 $1, %ymm6, %xmm8
-; AVX-NEXT: vmovaps 96(%rdx), %ymm6
-; AVX-NEXT: vmovaps 96(%rcx), %ymm9
-; AVX-NEXT: vunpcklpd {{.*#+}} ymm10 = ymm9[0],ymm6[0],ymm9[2],ymm6[2]
-; AVX-NEXT: vshufps {{.*#+}} ymm10 = ymm10[0,1,2,0,4,5,6,4]
+; AVX-NEXT: vunpcklps {{.*#+}} ymm10 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[4],ymm1[4],ymm0[5],ymm1[5]
; AVX-NEXT: vextractf128 $1, %ymm10, %xmm10
-; AVX-NEXT: vblendps {{.*#+}} xmm8 = xmm8[0,1],xmm10[2,3]
+; AVX-NEXT: vshufps {{.*#+}} xmm8 = xmm10[0,1],xmm8[2,0]
; AVX-NEXT: vblendps {{.*#+}} ymm7 = ymm8[0,1,2,3],ymm7[4,5,6,7]
; AVX-NEXT: vmovups %ymm7, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT: vunpcklps {{.*#+}} ymm7 = ymm2[0],ymm5[0],ymm2[1],ymm5[1],ymm2[4],ymm5[4],ymm2[5],ymm5[5]
-; AVX-NEXT: vshufps {{.*#+}} ymm8 = ymm1[1,0],ymm0[1,0],ymm1[5,4],ymm0[5,4]
+; AVX-NEXT: vunpcklps {{.*#+}} ymm7 = ymm9[0],ymm5[0],ymm9[1],ymm5[1],ymm9[4],ymm5[4],ymm9[5],ymm5[5]
+; AVX-NEXT: vshufps {{.*#+}} ymm8 = ymm6[1,0],ymm4[1,0],ymm6[5,4],ymm4[5,4]
; AVX-NEXT: vshufps {{.*#+}} ymm7 = ymm8[2,0],ymm7[2,3],ymm8[6,4],ymm7[6,7]
-; AVX-NEXT: vunpcklps {{.*#+}} ymm8 = ymm6[0],ymm9[0],ymm6[1],ymm9[1],ymm6[4],ymm9[4],ymm6[5],ymm9[5]
+; AVX-NEXT: vunpcklps {{.*#+}} ymm8 = ymm2[0],ymm3[0],ymm2[1],ymm3[1],ymm2[4],ymm3[4],ymm2[5],ymm3[5]
; AVX-NEXT: vextractf128 $1, %ymm8, %xmm8
-; AVX-NEXT: vshufps {{.*#+}} ymm10 = ymm4[1,0],ymm3[1,0],ymm4[5,4],ymm3[5,4]
-; AVX-NEXT: vshufps {{.*#+}} ymm10 = ymm10[2,0,2,3,6,4,6,7]
+; AVX-NEXT: vshufps {{.*#+}} ymm10 = ymm1[1,0],ymm0[1,0],ymm1[5,4],ymm0[5,4]
; AVX-NEXT: vextractf128 $1, %ymm10, %xmm10
-; AVX-NEXT: vblendps {{.*#+}} xmm8 = xmm10[0,1],xmm8[2,3]
+; AVX-NEXT: vshufps {{.*#+}} xmm8 = xmm10[2,0],xmm8[2,3]
; AVX-NEXT: vblendps {{.*#+}} ymm7 = ymm8[0,1,2,3],ymm7[4,5,6,7]
; AVX-NEXT: vmovups %ymm7, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT: vunpckhpd {{.*#+}} ymm7 = ymm5[1],ymm2[1],ymm5[3],ymm2[3]
-; AVX-NEXT: vunpckhps {{.*#+}} ymm8 = ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[6],ymm1[6],ymm0[7],ymm1[7]
+; AVX-NEXT: vunpckhpd {{.*#+}} ymm7 = ymm5[1],ymm9[1],ymm5[3],ymm9[3]
+; AVX-NEXT: vunpckhps {{.*#+}} ymm8 = ymm4[2],ymm6[2],ymm4[3],ymm6[3],ymm4[6],ymm6[6],ymm4[7],ymm6[7]
; AVX-NEXT: vshufps {{.*#+}} ymm7 = ymm8[0,1],ymm7[2,0],ymm8[4,5],ymm7[6,4]
-; AVX-NEXT: vunpckhps {{.*#+}} ymm8 = ymm3[2],ymm4[2],ymm3[3],ymm4[3],ymm3[6],ymm4[6],ymm3[7],ymm4[7]
+; AVX-NEXT: vunpckhpd {{.*#+}} ymm8 = ymm3[1],ymm2[1],ymm3[3],ymm2[3]
; AVX-NEXT: vextractf128 $1, %ymm8, %xmm8
-; AVX-NEXT: vunpckhpd {{.*#+}} ymm10 = ymm9[1],ymm6[1],ymm9[3],ymm6[3]
-; AVX-NEXT: vshufps {{.*#+}} ymm10 = ymm10[0,1,2,0,4,5,6,4]
+; AVX-NEXT: vunpckhps {{.*#+}} ymm10 = ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[6],ymm1[6],ymm0[7],ymm1[7]
; AVX-NEXT: vextractf128 $1, %ymm10, %xmm10
-; AVX-NEXT: vblendps {{.*#+}} xmm8 = xmm8[0,1],xmm10[2,3]
+; AVX-NEXT: vshufps {{.*#+}} xmm8 = xmm10[0,1],xmm8[2,0]
; AVX-NEXT: vblendps {{.*#+}} ymm7 = ymm8[0,1,2,3],ymm7[4,5,6,7]
; AVX-NEXT: vmovups %ymm7, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT: vunpckhps {{.*#+}} ymm2 = ymm2[2],ymm5[2],ymm2[3],ymm5[3],ymm2[6],ymm5[6],ymm2[7],ymm5[7]
+; AVX-NEXT: vmovaps 32(%rcx), %xmm7
+; AVX-NEXT: vunpckhps {{.*#+}} ymm8 = ymm9[2],ymm5[2],ymm9[3],ymm5[3],ymm9[6],ymm5[6],ymm9[7],ymm5[7]
+; AVX-NEXT: vmovaps 32(%rax), %xmm5
+; AVX-NEXT: vshufps {{.*#+}} ymm6 = ymm6[3,0],ymm4[3,0],ymm6[7,4],ymm4[7,4]
+; AVX-NEXT: vmovaps 32(%r10), %xmm4
+; AVX-NEXT: vshufps {{.*#+}} ymm6 = ymm6[2,0],ymm8[2,3],ymm6[6,4],ymm8[6,7]
+; AVX-NEXT: vunpckhps {{.*#+}} ymm2 = ymm2[2],ymm3[2],ymm2[3],ymm3[3],ymm2[6],ymm3[6],ymm2[7],ymm3[7]
+; AVX-NEXT: vextractf128 $1, %ymm2, %xmm2
+; AVX-NEXT: vshufps {{.*#+}} ymm0 = ymm1[3,0],ymm0[3,0],ymm1[7,4],ymm0[7,4]
+; AVX-NEXT: vextractf128 $1, %ymm0, %xmm0
+; AVX-NEXT: vshufps {{.*#+}} xmm0 = xmm0[2,0],xmm2[2,3]
+; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm6[4,5,6,7]
+; AVX-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX-NEXT: vmovaps 32(%r9), %xmm1
+; AVX-NEXT: vmovaps 32(%r8), %xmm2
+; AVX-NEXT: vunpcklps {{.*#+}} xmm0 = xmm2[0],xmm1[0],xmm2[1],xmm1[1]
+; AVX-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
+; AVX-NEXT: vunpcklps {{.*#+}} xmm3 = xmm4[0],xmm5[0],xmm4[1],xmm5[1]
+; AVX-NEXT: vshufps {{.*#+}} xmm6 = xmm3[0,1,0,1]
+; AVX-NEXT: vinsertf128 $1, %xmm6, %ymm0, %ymm6
+; AVX-NEXT: vblendps {{.*#+}} ymm6 = ymm0[0,1,2,3,4,5],ymm6[6,7]
+; AVX-NEXT: vmovaps 32(%rdx), %xmm0
+; AVX-NEXT: vunpcklps {{.*#+}} xmm8 = xmm0[0],xmm7[0],xmm0[1],xmm7[1]
+; AVX-NEXT: vmovaps 32(%rsi), %xmm9
+; AVX-NEXT: vmovaps 32(%rdi), %xmm10
+; AVX-NEXT: vunpcklps {{.*#+}} xmm11 = xmm10[0],xmm9[0],xmm10[1],xmm9[1]
+; AVX-NEXT: vmovlhps {{.*#+}} xmm11 = xmm11[0],xmm8[0]
+; AVX-NEXT: vblendps {{.*#+}} ymm6 = ymm11[0,1,2,3],ymm6[4,5,6,7]
+; AVX-NEXT: vmovups %ymm6, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX-NEXT: vshufps {{.*#+}} xmm6 = xmm10[1,1,1,1]
+; AVX-NEXT: vblendps {{.*#+}} xmm6 = xmm6[0],xmm9[1],xmm6[2,3]
+; AVX-NEXT: vblendps {{.*#+}} xmm6 = xmm6[0,1],xmm8[2,3]
+; AVX-NEXT: vinsertf128 $1, %xmm3, %ymm0, %ymm3
+; AVX-NEXT: vshufps {{.*#+}} xmm8 = xmm2[1,1,1,1]
+; AVX-NEXT: vblendps {{.*#+}} xmm8 = xmm8[0],xmm1[1],xmm8[2,3]
+; AVX-NEXT: vinsertf128 $1, %xmm8, %ymm0, %ymm8
+; AVX-NEXT: vblendps {{.*#+}} ymm3 = ymm8[0,1,2,3,4,5],ymm3[6,7]
+; AVX-NEXT: vblendps {{.*#+}} ymm3 = ymm6[0,1,2,3],ymm3[4,5,6,7]
+; AVX-NEXT: vmovups %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX-NEXT: vunpckhps {{.*#+}} xmm3 = xmm10[2],xmm9[2],xmm10[3],xmm9[3]
+; AVX-NEXT: vunpckhps {{.*#+}} xmm2 = xmm2[2],xmm1[2],xmm2[3],xmm1[3]
+; AVX-NEXT: vshufps {{.*#+}} xmm1 = xmm5[2,2,2,2]
+; AVX-NEXT: vblendps {{.*#+}} xmm1 = xmm4[0,1,2],xmm1[3]
+; AVX-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm1
+; AVX-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm6
+; AVX-NEXT: vblendps {{.*#+}} ymm1 = ymm6[0,1,2,3,4,5],ymm1[6,7]
+; AVX-NEXT: vshufps {{.*#+}} xmm6 = xmm7[2,2,2,2]
+; AVX-NEXT: vblendps {{.*#+}} xmm6 = xmm0[0,1,2],xmm6[3]
+; AVX-NEXT: vblendps {{.*#+}} xmm6 = xmm3[0,1],xmm6[2,3]
+; AVX-NEXT: vblendps {{.*#+}} ymm1 = ymm6[0,1,2,3],ymm1[4,5,6,7]
+; AVX-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX-NEXT: vunpckhps {{.*#+}} xmm0 = xmm0[2],xmm7[2],xmm0[3],xmm7[3]
+; AVX-NEXT: vunpckhpd {{.*#+}} xmm3 = xmm3[1],xmm0[1]
+; AVX-NEXT: vmovaps 32(%r8), %ymm0
+; AVX-NEXT: vunpckhps {{.*#+}} xmm4 = xmm4[2],xmm5[2],xmm4[3],xmm5[3]
+; AVX-NEXT: vmovaps 32(%r9), %ymm1
+; AVX-NEXT: vinsertf128 $1, %xmm4, %ymm0, %ymm4
+; AVX-NEXT: vshufps {{.*#+}} xmm2 = xmm2[2,3,2,3]
+; AVX-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm2
+; AVX-NEXT: vblendps {{.*#+}} ymm4 = ymm2[0,1,2,3,4,5],ymm4[6,7]
+; AVX-NEXT: vmovaps 32(%r10), %ymm2
+; AVX-NEXT: vblendps {{.*#+}} ymm3 = ymm3[0,1,2,3],ymm4[4,5,6,7]
+; AVX-NEXT: vmovups %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX-NEXT: vmovaps 32(%rax), %ymm3
+; AVX-NEXT: vunpcklpd {{.*#+}} ymm4 = ymm3[0],ymm2[0],ymm3[2],ymm2[2]
+; AVX-NEXT: vunpcklps {{.*#+}} ymm5 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[4],ymm1[4],ymm0[5],ymm1[5]
+; AVX-NEXT: vshufps {{.*#+}} ymm6 = ymm5[0,1],ymm4[2,0],ymm5[4,5],ymm4[6,4]
+; AVX-NEXT: vmovaps 32(%rdx), %ymm4
+; AVX-NEXT: vmovaps 32(%rcx), %ymm5
+; AVX-NEXT: vunpcklpd {{.*#+}} ymm7 = ymm5[0],ymm4[0],ymm5[2],ymm4[2]
+; AVX-NEXT: vextractf128 $1, %ymm7, %xmm7
+; AVX-NEXT: vmovaps 32(%rdi), %ymm8
+; AVX-NEXT: vmovaps 32(%rsi), %ymm9
+; AVX-NEXT: vunpcklps {{.*#+}} ymm10 = ymm8[0],ymm9[0],ymm8[1],ymm9[1],ymm8[4],ymm9[4],ymm8[5],ymm9[5]
+; AVX-NEXT: vextractf128 $1, %ymm10, %xmm10
+; AVX-NEXT: vshufps {{.*#+}} xmm7 = xmm10[0,1],xmm7[2,0]
+; AVX-NEXT: vblendps {{.*#+}} ymm6 = ymm7[0,1,2,3],ymm6[4,5,6,7]
+; AVX-NEXT: vmovups %ymm6, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX-NEXT: vunpcklps {{.*#+}} ymm6 = ymm2[0],ymm3[0],ymm2[1],ymm3[1],ymm2[4],ymm3[4],ymm2[5],ymm3[5]
+; AVX-NEXT: vshufps {{.*#+}} ymm7 = ymm1[1,0],ymm0[1,0],ymm1[5,4],ymm0[5,4]
+; AVX-NEXT: vshufps {{.*#+}} ymm6 = ymm7[2,0],ymm6[2,3],ymm7[6,4],ymm6[6,7]
+; AVX-NEXT: vunpcklps {{.*#+}} ymm7 = ymm4[0],ymm5[0],ymm4[1],ymm5[1],ymm4[4],ymm5[4],ymm4[5],ymm5[5]
+; AVX-NEXT: vextractf128 $1, %ymm7, %xmm7
+; AVX-NEXT: vshufps {{.*#+}} ymm10 = ymm9[1,0],ymm8[1,0],ymm9[5,4],ymm8[5,4]
+; AVX-NEXT: vextractf128 $1, %ymm10, %xmm10
+; AVX-NEXT: vshufps {{.*#+}} xmm7 = xmm10[2,0],xmm7[2,3]
+; AVX-NEXT: vblendps {{.*#+}} ymm6 = ymm7[0,1,2,3],ymm6[4,5,6,7]
+; AVX-NEXT: vmovups %ymm6, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX-NEXT: vunpckhpd {{.*#+}} ymm6 = ymm3[1],ymm2[1],ymm3[3],ymm2[3]
+; AVX-NEXT: vunpckhps {{.*#+}} ymm7 = ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[6],ymm1[6],ymm0[7],ymm1[7]
+; AVX-NEXT: vshufps {{.*#+}} ymm6 = ymm7[0,1],ymm6[2,0],ymm7[4,5],ymm6[6,4]
+; AVX-NEXT: vunpckhpd {{.*#+}} ymm7 = ymm5[1],ymm4[1],ymm5[3],ymm4[3]
+; AVX-NEXT: vextractf128 $1, %ymm7, %xmm7
+; AVX-NEXT: vunpckhps {{.*#+}} ymm10 = ymm8[2],ymm9[2],ymm8[3],ymm9[3],ymm8[6],ymm9[6],ymm8[7],ymm9[7]
+; AVX-NEXT: vextractf128 $1, %ymm10, %xmm10
+; AVX-NEXT: vshufps {{.*#+}} xmm7 = xmm10[0,1],xmm7[2,0]
+; AVX-NEXT: vblendps {{.*#+}} ymm6 = ymm7[0,1,2,3],ymm6[4,5,6,7]
+; AVX-NEXT: vmovups %ymm6, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX-NEXT: vunpckhps {{.*#+}} ymm2 = ymm2[2],ymm3[2],ymm2[3],ymm3[3],ymm2[6],ymm3[6],ymm2[7],ymm3[7]
; AVX-NEXT: vshufps {{.*#+}} ymm0 = ymm1[3,0],ymm0[3,0],ymm1[7,4],ymm0[7,4]
; AVX-NEXT: vshufps {{.*#+}} ymm0 = ymm0[2,0],ymm2[2,3],ymm0[6,4],ymm2[6,7]
-; AVX-NEXT: vunpckhps {{.*#+}} ymm1 = ymm6[2],ymm9[2],ymm6[3],ymm9[3],ymm6[6],ymm9[6],ymm6[7],ymm9[7]
-; AVX-NEXT: vshufps {{.*#+}} ymm2 = ymm4[3,0],ymm3[3,0],ymm4[7,4],ymm3[7,4]
+; AVX-NEXT: vunpckhps {{.*#+}} ymm1 = ymm4[2],ymm5[2],ymm4[3],ymm5[3],ymm4[6],ymm5[6],ymm4[7],ymm5[7]
+; AVX-NEXT: vshufps {{.*#+}} ymm2 = ymm9[3,0],ymm8[3,0],ymm9[7,4],ymm8[7,4]
; AVX-NEXT: vextractf128 $1, %ymm1, %xmm1
-; AVX-NEXT: vshufps {{.*#+}} ymm2 = ymm2[2,0,2,3,6,4,6,7]
; AVX-NEXT: vextractf128 $1, %ymm2, %xmm2
-; AVX-NEXT: vblendps {{.*#+}} xmm1 = xmm2[0,1],xmm1[2,3]
+; AVX-NEXT: vshufps {{.*#+}} xmm1 = xmm2[2,0],xmm1[2,3]
; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7]
; AVX-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT: vmovaps (%r9), %xmm2
-; AVX-NEXT: vmovaps (%r8), %xmm3
-; AVX-NEXT: vunpcklps {{.*#+}} xmm0 = xmm3[0],xmm2[0],xmm3[1],xmm2[1]
-; AVX-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm4
-; AVX-NEXT: vmovaps (%rax), %xmm0
-; AVX-NEXT: vmovaps (%r10), %xmm1
+; AVX-NEXT: vmovaps 64(%r9), %xmm4
+; AVX-NEXT: vmovaps 64(%r8), %xmm5
+; AVX-NEXT: vunpcklps {{.*#+}} xmm0 = xmm5[0],xmm4[0],xmm5[1],xmm4[1]
+; AVX-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm2
+; AVX-NEXT: vmovaps 64(%rax), %xmm0
+; AVX-NEXT: vmovaps 64(%r10), %xmm1
; AVX-NEXT: vunpcklps {{.*#+}} xmm6 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
-; AVX-NEXT: vshufps {{.*#+}} xmm5 = xmm6[0,1,0,1]
-; AVX-NEXT: vinsertf128 $1, %xmm5, %ymm0, %ymm5
-; AVX-NEXT: vblendps {{.*#+}} ymm7 = ymm4[0,1,2,3,4,5],ymm5[6,7]
-; AVX-NEXT: vmovaps (%rcx), %xmm4
-; AVX-NEXT: vmovaps (%rdx), %xmm5
-; AVX-NEXT: vunpcklps {{.*#+}} xmm8 = xmm5[0],xmm4[0],xmm5[1],xmm4[1]
-; AVX-NEXT: vmovaps (%rsi), %xmm9
-; AVX-NEXT: vmovaps (%rdi), %xmm10
-; AVX-NEXT: vunpcklps {{.*#+}} xmm11 = xmm10[0],xmm9[0],xmm10[1],xmm9[1]
-; AVX-NEXT: vmovlhps {{.*#+}} xmm11 = xmm11[0],xmm8[0]
-; AVX-NEXT: vblendps {{.*#+}} ymm7 = ymm11[0,1,2,3],ymm7[4,5,6,7]
-; AVX-NEXT: vmovups %ymm7, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT: vshufps {{.*#+}} xmm7 = xmm10[1,1,1,1]
-; AVX-NEXT: vblendps {{.*#+}} xmm7 = xmm7[0],xmm9[1],xmm7[2,3]
-; AVX-NEXT: vblendps {{.*#+}} xmm7 = xmm7[0,1],xmm8[2,3]
-; AVX-NEXT: vinsertf128 $1, %xmm6, %ymm0, %ymm6
-; AVX-NEXT: vshufps {{.*#+}} xmm8 = xmm3[1,1,1,1]
-; AVX-NEXT: vblendps {{.*#+}} xmm8 = xmm8[0],xmm2[1],xmm8[2,3]
-; AVX-NEXT: vinsertf128 $1, %xmm8, %ymm0, %ymm8
-; AVX-NEXT: vblendps {{.*#+}} ymm6 = ymm8[0,1,2,3,4,5],ymm6[6,7]
-; AVX-NEXT: vblendps {{.*#+}} ymm6 = ymm7[0,1,2,3],ymm6[4,5,6,7]
-; AVX-NEXT: vmovups %ymm6, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT: vunpckhps {{.*#+}} xmm6 = xmm10[2],xmm9[2],xmm10[3],xmm9[3]
-; AVX-NEXT: vunpckhps {{.*#+}} xmm2 = xmm3[2],xmm2[2],xmm3[3],xmm2[3]
-; AVX-NEXT: vshufps {{.*#+}} xmm3 = xmm0[2,2,2,2]
-; AVX-NEXT: vblendps {{.*#+}} xmm3 = xmm1[0,1,2],xmm3[3]
-; AVX-NEXT: vinsertf128 $1, %xmm3, %ymm0, %ymm3
-; AVX-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm7
-; AVX-NEXT: vblendps {{.*#+}} ymm3 = ymm7[0,1,2,3,4,5],ymm3[6,7]
-; AVX-NEXT: vshufps {{.*#+}} xmm7 = xmm4[2,2,2,2]
-; AVX-NEXT: vblendps {{.*#+}} xmm7 = xmm5[0,1,2],xmm7[3]
-; AVX-NEXT: vblendps {{.*#+}} xmm7 = xmm6[0,1],xmm7[2,3]
-; AVX-NEXT: vblendps {{.*#+}} ymm3 = ymm7[0,1,2,3],ymm3[4,5,6,7]
-; AVX-NEXT: vmovups %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT: vunpckhps {{.*#+}} xmm3 = xmm5[2],xmm4[2],xmm5[3],xmm4[3]
-; AVX-NEXT: vunpckhpd {{.*#+}} xmm3 = xmm6[1],xmm3[1]
-; AVX-NEXT: vunpckhps {{.*#+}} xmm0 = xmm1[2],xmm0[2],xmm1[3],xmm0[3]
-; AVX-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
-; AVX-NEXT: vshufps {{.*#+}} xmm1 = xmm2[2,3,2,3]
-; AVX-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm1
-; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3,4,5],ymm0[6,7]
-; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm3[0,1,2,3],ymm0[4,5,6,7]
-; AVX-NEXT: vmovups %ymm0, (%rsp) # 32-byte Spill
-; AVX-NEXT: vmovaps 32(%r9), %xmm4
-; AVX-NEXT: vmovaps 32(%r8), %xmm5
-; AVX-NEXT: vunpcklps {{.*#+}} xmm0 = xmm5[0],xmm4[0],xmm5[1],xmm4[1]
-; AVX-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm2
-; AVX-NEXT: vmovaps 32(%rax), %xmm0
-; AVX-NEXT: vmovaps 32(%r10), %xmm1
-; AVX-NEXT: vunpcklps {{.*#+}} xmm6 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
-; AVX-NEXT: vshufps {{.*#+}} xmm3 = xmm6[0,1,0,1]
-; AVX-NEXT: vinsertf128 $1, %xmm3, %ymm0, %ymm3
-; AVX-NEXT: vblendps {{.*#+}} ymm7 = ymm2[0,1,2,3,4,5],ymm3[6,7]
-; AVX-NEXT: vmovaps 32(%rcx), %xmm2
-; AVX-NEXT: vmovaps 32(%rdx), %xmm3
-; AVX-NEXT: vunpcklps {{.*#+}} xmm8 = xmm3[0],xmm2[0],xmm3[1],xmm2[1]
-; AVX-NEXT: vmovaps 32(%rsi), %xmm9
-; AVX-NEXT: vmovaps 32(%rdi), %xmm10
+; AVX-NEXT: vshufps {{.*#+}} xmm3 = xmm6[0,1,0,1]
+; AVX-NEXT: vinsertf128 $1, %xmm3, %ymm0, %ymm3
+; AVX-NEXT: vblendps {{.*#+}} ymm7 = ymm2[0,1,2,3,4,5],ymm3[6,7]
+; AVX-NEXT: vmovaps 64(%rcx), %xmm2
+; AVX-NEXT: vmovaps 64(%rdx), %xmm3
+; AVX-NEXT: vunpcklps {{.*#+}} xmm8 = xmm3[0],xmm2[0],xmm3[1],xmm2[1]
+; AVX-NEXT: vmovaps 64(%rsi), %xmm9
+; AVX-NEXT: vmovaps 64(%rdi), %xmm10
; AVX-NEXT: vunpcklps {{.*#+}} xmm11 = xmm10[0],xmm9[0],xmm10[1],xmm9[1]
; AVX-NEXT: vmovlhps {{.*#+}} xmm11 = xmm11[0],xmm8[0]
; AVX-NEXT: vblendps {{.*#+}} ymm7 = ymm11[0,1,2,3],ymm7[4,5,6,7]
@@ -4804,32 +4749,81 @@ define void @store_i32_stride8_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3,4,5],ymm0[6,7]
; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm2[0,1,2,3],ymm0[4,5,6,7]
; AVX-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT: vmovaps 64(%r9), %xmm4
-; AVX-NEXT: vmovaps 64(%r8), %xmm5
+; AVX-NEXT: vmovaps 64(%r8), %ymm0
+; AVX-NEXT: vmovaps 64(%r9), %ymm1
+; AVX-NEXT: vmovaps 64(%r10), %ymm2
+; AVX-NEXT: vmovaps 64(%rax), %ymm3
+; AVX-NEXT: vunpcklpd {{.*#+}} ymm4 = ymm3[0],ymm2[0],ymm3[2],ymm2[2]
+; AVX-NEXT: vunpcklps {{.*#+}} ymm5 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[4],ymm1[4],ymm0[5],ymm1[5]
+; AVX-NEXT: vshufps {{.*#+}} ymm6 = ymm5[0,1],ymm4[2,0],ymm5[4,5],ymm4[6,4]
+; AVX-NEXT: vmovaps 64(%rdx), %ymm4
+; AVX-NEXT: vmovaps 64(%rcx), %ymm5
+; AVX-NEXT: vunpcklpd {{.*#+}} ymm7 = ymm5[0],ymm4[0],ymm5[2],ymm4[2]
+; AVX-NEXT: vextractf128 $1, %ymm7, %xmm7
+; AVX-NEXT: vmovaps 64(%rdi), %ymm8
+; AVX-NEXT: vmovaps 64(%rsi), %ymm9
+; AVX-NEXT: vunpcklps {{.*#+}} ymm10 = ymm8[0],ymm9[0],ymm8[1],ymm9[1],ymm8[4],ymm9[4],ymm8[5],ymm9[5]
+; AVX-NEXT: vextractf128 $1, %ymm10, %xmm10
+; AVX-NEXT: vshufps {{.*#+}} xmm7 = xmm10[0,1],xmm7[2,0]
+; AVX-NEXT: vblendps {{.*#+}} ymm6 = ymm7[0,1,2,3],ymm6[4,5,6,7]
+; AVX-NEXT: vmovups %ymm6, (%rsp) # 32-byte Spill
+; AVX-NEXT: vunpcklps {{.*#+}} ymm6 = ymm2[0],ymm3[0],ymm2[1],ymm3[1],ymm2[4],ymm3[4],ymm2[5],ymm3[5]
+; AVX-NEXT: vshufps {{.*#+}} ymm7 = ymm1[1,0],ymm0[1,0],ymm1[5,4],ymm0[5,4]
+; AVX-NEXT: vshufps {{.*#+}} ymm6 = ymm7[2,0],ymm6[2,3],ymm7[6,4],ymm6[6,7]
+; AVX-NEXT: vunpcklps {{.*#+}} ymm7 = ymm4[0],ymm5[0],ymm4[1],ymm5[1],ymm4[4],ymm5[4],ymm4[5],ymm5[5]
+; AVX-NEXT: vextractf128 $1, %ymm7, %xmm7
+; AVX-NEXT: vshufps {{.*#+}} ymm10 = ymm9[1,0],ymm8[1,0],ymm9[5,4],ymm8[5,4]
+; AVX-NEXT: vextractf128 $1, %ymm10, %xmm10
+; AVX-NEXT: vshufps {{.*#+}} xmm7 = xmm10[2,0],xmm7[2,3]
+; AVX-NEXT: vblendps {{.*#+}} ymm6 = ymm7[0,1,2,3],ymm6[4,5,6,7]
+; AVX-NEXT: vmovups %ymm6, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX-NEXT: vunpckhpd {{.*#+}} ymm6 = ymm3[1],ymm2[1],ymm3[3],ymm2[3]
+; AVX-NEXT: vunpckhps {{.*#+}} ymm7 = ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[6],ymm1[6],ymm0[7],ymm1[7]
+; AVX-NEXT: vshufps {{.*#+}} ymm6 = ymm7[0,1],ymm6[2,0],ymm7[4,5],ymm6[6,4]
+; AVX-NEXT: vunpckhpd {{.*#+}} ymm7 = ymm5[1],ymm4[1],ymm5[3],ymm4[3]
+; AVX-NEXT: vextractf128 $1, %ymm7, %xmm7
+; AVX-NEXT: vunpckhps {{.*#+}} ymm10 = ymm8[2],ymm9[2],ymm8[3],ymm9[3],ymm8[6],ymm9[6],ymm8[7],ymm9[7]
+; AVX-NEXT: vextractf128 $1, %ymm10, %xmm10
+; AVX-NEXT: vshufps {{.*#+}} xmm7 = xmm10[0,1],xmm7[2,0]
+; AVX-NEXT: vblendps {{.*#+}} ymm6 = ymm7[0,1,2,3],ymm6[4,5,6,7]
+; AVX-NEXT: vmovups %ymm6, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX-NEXT: vunpckhps {{.*#+}} ymm2 = ymm2[2],ymm3[2],ymm2[3],ymm3[3],ymm2[6],ymm3[6],ymm2[7],ymm3[7]
+; AVX-NEXT: vshufps {{.*#+}} ymm0 = ymm1[3,0],ymm0[3,0],ymm1[7,4],ymm0[7,4]
+; AVX-NEXT: vshufps {{.*#+}} ymm0 = ymm0[2,0],ymm2[2,3],ymm0[6,4],ymm2[6,7]
+; AVX-NEXT: vunpckhps {{.*#+}} ymm1 = ymm4[2],ymm5[2],ymm4[3],ymm5[3],ymm4[6],ymm5[6],ymm4[7],ymm5[7]
+; AVX-NEXT: vshufps {{.*#+}} ymm2 = ymm9[3,0],ymm8[3,0],ymm9[7,4],ymm8[7,4]
+; AVX-NEXT: vextractf128 $1, %ymm1, %xmm1
+; AVX-NEXT: vextractf128 $1, %ymm2, %xmm2
+; AVX-NEXT: vshufps {{.*#+}} xmm1 = xmm2[2,0],xmm1[2,3]
+; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7]
+; AVX-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX-NEXT: vmovaps 96(%r9), %xmm4
+; AVX-NEXT: vmovaps 96(%r8), %xmm5
; AVX-NEXT: vunpcklps {{.*#+}} xmm0 = xmm5[0],xmm4[0],xmm5[1],xmm4[1]
; AVX-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm2
-; AVX-NEXT: vmovaps 64(%rax), %xmm9
-; AVX-NEXT: vmovaps 64(%r10), %xmm1
+; AVX-NEXT: vmovaps 96(%rax), %xmm9
+; AVX-NEXT: vmovaps 96(%r10), %xmm1
; AVX-NEXT: vunpcklps {{.*#+}} xmm6 = xmm1[0],xmm9[0],xmm1[1],xmm9[1]
; AVX-NEXT: vshufps {{.*#+}} xmm3 = xmm6[0,1,0,1]
; AVX-NEXT: vinsertf128 $1, %xmm3, %ymm0, %ymm3
; AVX-NEXT: vblendps {{.*#+}} ymm10 = ymm2[0,1,2,3,4,5],ymm3[6,7]
-; AVX-NEXT: vmovaps 64(%rcx), %xmm2
-; AVX-NEXT: vmovaps 64(%rdx), %xmm3
+; AVX-NEXT: vmovaps 96(%rcx), %xmm2
+; AVX-NEXT: vmovaps 96(%rdx), %xmm3
; AVX-NEXT: vunpcklps {{.*#+}} xmm8 = xmm3[0],xmm2[0],xmm3[1],xmm2[1]
-; AVX-NEXT: vmovaps 64(%rsi), %xmm0
-; AVX-NEXT: vmovaps 64(%rdi), %xmm7
-; AVX-NEXT: vunpcklps {{.*#+}} xmm15 = xmm7[0],xmm0[0],xmm7[1],xmm0[1]
-; AVX-NEXT: vmovlhps {{.*#+}} xmm15 = xmm15[0],xmm8[0]
-; AVX-NEXT: vblendps {{.*#+}} ymm13 = ymm15[0,1,2,3],ymm10[4,5,6,7]
-; AVX-NEXT: vshufps {{.*#+}} xmm15 = xmm7[1,1,1,1]
-; AVX-NEXT: vblendps {{.*#+}} xmm15 = xmm15[0],xmm0[1],xmm15[2,3]
-; AVX-NEXT: vblendps {{.*#+}} xmm8 = xmm15[0,1],xmm8[2,3]
+; AVX-NEXT: vmovaps 96(%rsi), %xmm0
+; AVX-NEXT: vmovaps 96(%rdi), %xmm7
+; AVX-NEXT: vunpcklps {{.*#+}} xmm11 = xmm7[0],xmm0[0],xmm7[1],xmm0[1]
+; AVX-NEXT: vmovlhps {{.*#+}} xmm11 = xmm11[0],xmm8[0]
+; AVX-NEXT: vblendps {{.*#+}} ymm10 = ymm11[0,1,2,3],ymm10[4,5,6,7]
+; AVX-NEXT: vmovups %ymm10, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX-NEXT: vshufps {{.*#+}} xmm10 = xmm7[1,1,1,1]
+; AVX-NEXT: vblendps {{.*#+}} xmm10 = xmm10[0],xmm0[1],xmm10[2,3]
+; AVX-NEXT: vblendps {{.*#+}} xmm8 = xmm10[0,1],xmm8[2,3]
; AVX-NEXT: vinsertf128 $1, %xmm6, %ymm0, %ymm6
-; AVX-NEXT: vshufps {{.*#+}} xmm15 = xmm5[1,1,1,1]
-; AVX-NEXT: vblendps {{.*#+}} xmm15 = xmm15[0],xmm4[1],xmm15[2,3]
-; AVX-NEXT: vinsertf128 $1, %xmm15, %ymm0, %ymm15
-; AVX-NEXT: vblendps {{.*#+}} ymm6 = ymm15[0,1,2,3,4,5],ymm6[6,7]
+; AVX-NEXT: vshufps {{.*#+}} xmm10 = xmm5[1,1,1,1]
+; AVX-NEXT: vblendps {{.*#+}} xmm10 = xmm10[0],xmm4[1],xmm10[2,3]
+; AVX-NEXT: vinsertf128 $1, %xmm10, %ymm0, %ymm10
+; AVX-NEXT: vblendps {{.*#+}} ymm6 = ymm10[0,1,2,3,4,5],ymm6[6,7]
; AVX-NEXT: vblendps {{.*#+}} ymm12 = ymm8[0,1,2,3],ymm6[4,5,6,7]
; AVX-NEXT: vunpckhps {{.*#+}} xmm0 = xmm7[2],xmm0[2],xmm7[3],xmm0[3]
; AVX-NEXT: vunpckhps {{.*#+}} xmm4 = xmm5[2],xmm4[2],xmm5[3],xmm4[3]
@@ -4850,94 +4844,77 @@ define void @store_i32_stride8_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
; AVX-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm2
; AVX-NEXT: vblendps {{.*#+}} ymm1 = ymm2[0,1,2,3,4,5],ymm1[6,7]
; AVX-NEXT: vblendps {{.*#+}} ymm10 = ymm0[0,1,2,3],ymm1[4,5,6,7]
-; AVX-NEXT: vmovaps 96(%r9), %xmm3
-; AVX-NEXT: vmovaps 96(%r8), %xmm9
-; AVX-NEXT: vunpcklps {{.*#+}} xmm0 = xmm9[0],xmm3[0],xmm9[1],xmm3[1]
-; AVX-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
-; AVX-NEXT: vmovaps 96(%rax), %xmm7
-; AVX-NEXT: vmovaps 96(%r10), %xmm6
-; AVX-NEXT: vunpcklps {{.*#+}} xmm15 = xmm6[0],xmm7[0],xmm6[1],xmm7[1]
-; AVX-NEXT: vshufps {{.*#+}} xmm2 = xmm15[0,1,0,1]
-; AVX-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm2
-; AVX-NEXT: vblendps {{.*#+}} ymm8 = ymm0[0,1,2,3,4,5],ymm2[6,7]
-; AVX-NEXT: vmovaps 96(%rcx), %xmm5
-; AVX-NEXT: vmovaps 96(%rdx), %xmm4
-; AVX-NEXT: vunpcklps {{.*#+}} xmm2 = xmm4[0],xmm5[0],xmm4[1],xmm5[1]
-; AVX-NEXT: vmovaps 96(%rsi), %xmm1
-; AVX-NEXT: vmovaps 96(%rdi), %xmm0
-; AVX-NEXT: vunpcklps {{.*#+}} xmm14 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
-; AVX-NEXT: vmovlhps {{.*#+}} xmm14 = xmm14[0],xmm2[0]
+; AVX-NEXT: vmovaps 96(%r8), %ymm7
+; AVX-NEXT: vmovaps 96(%r9), %ymm6
+; AVX-NEXT: vmovaps 96(%r10), %ymm5
+; AVX-NEXT: vmovaps 96(%rax), %ymm4
+; AVX-NEXT: vunpcklpd {{.*#+}} ymm0 = ymm4[0],ymm5[0],ymm4[2],ymm5[2]
+; AVX-NEXT: vunpcklps {{.*#+}} ymm1 = ymm7[0],ymm6[0],ymm7[1],ymm6[1],ymm7[4],ymm6[4],ymm7[5],ymm6[5]
+; AVX-NEXT: vshufps {{.*#+}} ymm8 = ymm1[0,1],ymm0[2,0],ymm1[4,5],ymm0[6,4]
+; AVX-NEXT: vmovaps 96(%rdx), %ymm3
+; AVX-NEXT: vmovaps 96(%rcx), %ymm2
+; AVX-NEXT: vunpcklpd {{.*#+}} ymm9 = ymm2[0],ymm3[0],ymm2[2],ymm3[2]
+; AVX-NEXT: vextractf128 $1, %ymm9, %xmm9
+; AVX-NEXT: vmovaps 96(%rdi), %ymm1
+; AVX-NEXT: vmovaps 96(%rsi), %ymm0
+; AVX-NEXT: vunpcklps {{.*#+}} ymm15 = ymm1[0],ymm0[0],ymm1[1],ymm0[1],ymm1[4],ymm0[4],ymm1[5],ymm0[5]
+; AVX-NEXT: vextractf128 $1, %ymm15, %xmm15
+; AVX-NEXT: vshufps {{.*#+}} xmm9 = xmm15[0,1],xmm9[2,0]
+; AVX-NEXT: vblendps {{.*#+}} ymm9 = ymm9[0,1,2,3],ymm8[4,5,6,7]
+; AVX-NEXT: vunpcklps {{.*#+}} ymm8 = ymm5[0],ymm4[0],ymm5[1],ymm4[1],ymm5[4],ymm4[4],ymm5[5],ymm4[5]
+; AVX-NEXT: vshufps {{.*#+}} ymm15 = ymm6[1,0],ymm7[1,0],ymm6[5,4],ymm7[5,4]
+; AVX-NEXT: vshufps {{.*#+}} ymm8 = ymm15[2,0],ymm8[2,3],ymm15[6,4],ymm8[6,7]
+; AVX-NEXT: vunpcklps {{.*#+}} ymm15 = ymm3[0],ymm2[0],ymm3[1],ymm2[1],ymm3[4],ymm2[4],ymm3[5],ymm2[5]
+; AVX-NEXT: vextractf128 $1, %ymm15, %xmm15
+; AVX-NEXT: vshufps {{.*#+}} ymm14 = ymm0[1,0],ymm1[1,0],ymm0[5,4],ymm1[5,4]
+; AVX-NEXT: vextractf128 $1, %ymm14, %xmm14
+; AVX-NEXT: vshufps {{.*#+}} xmm14 = xmm14[2,0],xmm15[2,3]
; AVX-NEXT: vblendps {{.*#+}} ymm8 = ymm14[0,1,2,3],ymm8[4,5,6,7]
-; AVX-NEXT: vshufps {{.*#+}} xmm14 = xmm0[1,1,1,1]
-; AVX-NEXT: vblendps {{.*#+}} xmm14 = xmm14[0],xmm1[1],xmm14[2,3]
-; AVX-NEXT: vblendps {{.*#+}} xmm2 = xmm14[0,1],xmm2[2,3]
-; AVX-NEXT: vinsertf128 $1, %xmm15, %ymm0, %ymm14
-; AVX-NEXT: vshufps {{.*#+}} xmm15 = xmm9[1,1,1,1]
-; AVX-NEXT: vblendps {{.*#+}} xmm15 = xmm15[0],xmm3[1],xmm15[2,3]
-; AVX-NEXT: vinsertf128 $1, %xmm15, %ymm0, %ymm15
-; AVX-NEXT: vblendps {{.*#+}} ymm14 = ymm15[0,1,2,3,4,5],ymm14[6,7]
-; AVX-NEXT: vblendps {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm14[4,5,6,7]
-; AVX-NEXT: vunpckhps {{.*#+}} xmm0 = xmm0[2],xmm1[2],xmm0[3],xmm1[3]
-; AVX-NEXT: vunpckhps {{.*#+}} xmm1 = xmm9[2],xmm3[2],xmm9[3],xmm3[3]
-; AVX-NEXT: vshufps {{.*#+}} xmm3 = xmm7[2,2,2,2]
-; AVX-NEXT: vblendps {{.*#+}} xmm3 = xmm6[0,1,2],xmm3[3]
-; AVX-NEXT: vinsertf128 $1, %xmm3, %ymm0, %ymm3
-; AVX-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm9
-; AVX-NEXT: vblendps {{.*#+}} ymm3 = ymm9[0,1,2,3,4,5],ymm3[6,7]
-; AVX-NEXT: vshufps {{.*#+}} xmm9 = xmm5[2,2,2,2]
-; AVX-NEXT: vblendps {{.*#+}} xmm9 = xmm4[0,1,2],xmm9[3]
-; AVX-NEXT: vblendps {{.*#+}} xmm9 = xmm0[0,1],xmm9[2,3]
-; AVX-NEXT: vblendps {{.*#+}} ymm3 = ymm9[0,1,2,3],ymm3[4,5,6,7]
-; AVX-NEXT: vunpckhps {{.*#+}} xmm4 = xmm4[2],xmm5[2],xmm4[3],xmm5[3]
-; AVX-NEXT: vunpckhpd {{.*#+}} xmm0 = xmm0[1],xmm4[1]
-; AVX-NEXT: vunpckhps {{.*#+}} xmm4 = xmm6[2],xmm7[2],xmm6[3],xmm7[3]
-; AVX-NEXT: vinsertf128 $1, %xmm4, %ymm0, %ymm4
-; AVX-NEXT: vshufps {{.*#+}} xmm1 = xmm1[2,3,2,3]
-; AVX-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm1
-; AVX-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3,4,5],ymm4[6,7]
-; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
+; AVX-NEXT: vunpckhpd {{.*#+}} ymm14 = ymm4[1],ymm5[1],ymm4[3],ymm5[3]
+; AVX-NEXT: vunpckhps {{.*#+}} ymm15 = ymm7[2],ymm6[2],ymm7[3],ymm6[3],ymm7[6],ymm6[6],ymm7[7],ymm6[7]
+; AVX-NEXT: vshufps {{.*#+}} ymm14 = ymm15[0,1],ymm14[2,0],ymm15[4,5],ymm14[6,4]
+; AVX-NEXT: vunpckhpd {{.*#+}} ymm15 = ymm2[1],ymm3[1],ymm2[3],ymm3[3]
+; AVX-NEXT: vextractf128 $1, %ymm15, %xmm15
+; AVX-NEXT: vunpckhps {{.*#+}} ymm13 = ymm1[2],ymm0[2],ymm1[3],ymm0[3],ymm1[6],ymm0[6],ymm1[7],ymm0[7]
+; AVX-NEXT: vextractf128 $1, %ymm13, %xmm13
+; AVX-NEXT: vshufps {{.*#+}} xmm13 = xmm13[0,1],xmm15[2,0]
+; AVX-NEXT: vblendps {{.*#+}} ymm13 = ymm13[0,1,2,3],ymm14[4,5,6,7]
+; AVX-NEXT: vunpckhps {{.*#+}} ymm4 = ymm5[2],ymm4[2],ymm5[3],ymm4[3],ymm5[6],ymm4[6],ymm5[7],ymm4[7]
+; AVX-NEXT: vshufps {{.*#+}} ymm5 = ymm6[3,0],ymm7[3,0],ymm6[7,4],ymm7[7,4]
+; AVX-NEXT: vshufps {{.*#+}} ymm4 = ymm5[2,0],ymm4[2,3],ymm5[6,4],ymm4[6,7]
+; AVX-NEXT: vunpckhps {{.*#+}} ymm2 = ymm3[2],ymm2[2],ymm3[3],ymm2[3],ymm3[6],ymm2[6],ymm3[7],ymm2[7]
+; AVX-NEXT: vshufps {{.*#+}} ymm0 = ymm0[3,0],ymm1[3,0],ymm0[7,4],ymm1[7,4]
+; AVX-NEXT: vextractf128 $1, %ymm2, %xmm1
+; AVX-NEXT: vextractf128 $1, %ymm0, %xmm0
+; AVX-NEXT: vshufps {{.*#+}} xmm0 = xmm0[2,0],xmm1[2,3]
+; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm4[4,5,6,7]
; AVX-NEXT: movq {{[0-9]+}}(%rsp), %rax
-; AVX-NEXT: vmovaps %ymm0, 864(%rax)
-; AVX-NEXT: vmovaps %ymm3, 832(%rax)
-; AVX-NEXT: vmovaps %ymm2, 800(%rax)
-; AVX-NEXT: vmovaps %ymm8, 768(%rax)
-; AVX-NEXT: vmovaps %ymm10, 608(%rax)
-; AVX-NEXT: vmovaps %ymm11, 576(%rax)
-; AVX-NEXT: vmovaps %ymm12, 544(%rax)
-; AVX-NEXT: vmovaps %ymm13, 512(%rax)
-; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX-NEXT: vmovaps %ymm0, 352(%rax)
-; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX-NEXT: vmovaps %ymm0, 320(%rax)
-; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX-NEXT: vmovaps %ymm0, 288(%rax)
-; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX-NEXT: vmovaps %ymm0, 256(%rax)
-; AVX-NEXT: vmovups (%rsp), %ymm0 # 32-byte Reload
-; AVX-NEXT: vmovaps %ymm0, 96(%rax)
-; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX-NEXT: vmovaps %ymm0, 64(%rax)
-; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX-NEXT: vmovaps %ymm0, 32(%rax)
-; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX-NEXT: vmovaps %ymm0, (%rax)
-; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX-NEXT: vmovaps %ymm0, 992(%rax)
+; AVX-NEXT: vmovaps %ymm13, 960(%rax)
+; AVX-NEXT: vmovaps %ymm8, 928(%rax)
+; AVX-NEXT: vmovaps %ymm9, 896(%rax)
+; AVX-NEXT: vmovaps %ymm10, 864(%rax)
+; AVX-NEXT: vmovaps %ymm11, 832(%rax)
+; AVX-NEXT: vmovaps %ymm12, 800(%rax)
; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX-NEXT: vmovaps %ymm0, 960(%rax)
-; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX-NEXT: vmovaps %ymm0, 928(%rax)
-; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX-NEXT: vmovaps %ymm0, 896(%rax)
+; AVX-NEXT: vmovaps %ymm0, 768(%rax)
; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX-NEXT: vmovaps %ymm0, 736(%rax)
; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX-NEXT: vmovaps %ymm0, 704(%rax)
; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX-NEXT: vmovaps %ymm0, 672(%rax)
-; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
+; AVX-NEXT: vmovups (%rsp), %ymm0 # 32-byte Reload
; AVX-NEXT: vmovaps %ymm0, 640(%rax)
; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
+; AVX-NEXT: vmovaps %ymm0, 608(%rax)
+; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
+; AVX-NEXT: vmovaps %ymm0, 576(%rax)
+; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
+; AVX-NEXT: vmovaps %ymm0, 544(%rax)
+; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
+; AVX-NEXT: vmovaps %ymm0, 512(%rax)
+; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX-NEXT: vmovaps %ymm0, 480(%rax)
; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX-NEXT: vmovaps %ymm0, 448(%rax)
@@ -4946,6 +4923,14 @@ define void @store_i32_stride8_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX-NEXT: vmovaps %ymm0, 384(%rax)
; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
+; AVX-NEXT: vmovaps %ymm0, 352(%rax)
+; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
+; AVX-NEXT: vmovaps %ymm0, 320(%rax)
+; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
+; AVX-NEXT: vmovaps %ymm0, 288(%rax)
+; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
+; AVX-NEXT: vmovaps %ymm0, 256(%rax)
+; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX-NEXT: vmovaps %ymm0, 224(%rax)
; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX-NEXT: vmovaps %ymm0, 192(%rax)
@@ -4953,7 +4938,15 @@ define void @store_i32_stride8_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
; AVX-NEXT: vmovaps %ymm0, 160(%rax)
; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX-NEXT: vmovaps %ymm0, 128(%rax)
-; AVX-NEXT: addq $648, %rsp # imm = 0x288
+; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
+; AVX-NEXT: vmovaps %ymm0, 96(%rax)
+; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
+; AVX-NEXT: vmovaps %ymm0, 64(%rax)
+; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
+; AVX-NEXT: vmovaps %ymm0, 32(%rax)
+; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
+; AVX-NEXT: vmovaps %ymm0, (%rax)
+; AVX-NEXT: addq $680, %rsp # imm = 0x2A8
; AVX-NEXT: vzeroupper
; AVX-NEXT: retq
;
@@ -9726,490 +9719,220 @@ define void @store_i32_stride8_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
;
; AVX-LABEL: store_i32_stride8_vf64:
; AVX: # %bb.0:
-; AVX-NEXT: subq $1672, %rsp # imm = 0x688
+; AVX-NEXT: subq $1704, %rsp # imm = 0x6A8
; AVX-NEXT: movq {{[0-9]+}}(%rsp), %rax
; AVX-NEXT: movq {{[0-9]+}}(%rsp), %r10
-; AVX-NEXT: vmovaps (%rdi), %ymm3
-; AVX-NEXT: vmovaps 32(%rdi), %ymm0
-; AVX-NEXT: vmovaps (%rsi), %ymm4
-; AVX-NEXT: vmovaps 32(%rsi), %ymm1
-; AVX-NEXT: vmovaps 32(%rdx), %ymm2
-; AVX-NEXT: vmovaps (%rdx), %ymm5
-; AVX-NEXT: vmovaps (%rcx), %ymm6
-; AVX-NEXT: vmovaps (%r8), %ymm7
-; AVX-NEXT: vmovaps (%r9), %ymm9
-; AVX-NEXT: vmovaps (%r10), %ymm11
-; AVX-NEXT: vmovaps (%rax), %ymm12
-; AVX-NEXT: vunpcklpd {{.*#+}} ymm8 = ymm12[0],ymm11[0],ymm12[2],ymm11[2]
-; AVX-NEXT: vunpcklps {{.*#+}} ymm10 = ymm7[0],ymm9[0],ymm7[1],ymm9[1],ymm7[4],ymm9[4],ymm7[5],ymm9[5]
-; AVX-NEXT: vshufps {{.*#+}} ymm8 = ymm10[0,1],ymm8[2,0],ymm10[4,5],ymm8[6,4]
-; AVX-NEXT: vunpcklps {{.*#+}} ymm10 = ymm3[0],ymm4[0],ymm3[1],ymm4[1],ymm3[4],ymm4[4],ymm3[5],ymm4[5]
-; AVX-NEXT: vextractf128 $1, %ymm10, %xmm10
-; AVX-NEXT: vunpcklpd {{.*#+}} ymm13 = ymm6[0],ymm5[0],ymm6[2],ymm5[2]
-; AVX-NEXT: vshufps {{.*#+}} ymm13 = ymm13[0,1,2,0,4,5,6,4]
-; AVX-NEXT: vextractf128 $1, %ymm13, %xmm13
-; AVX-NEXT: vblendps {{.*#+}} xmm10 = xmm10[0,1],xmm13[2,3]
-; AVX-NEXT: vblendps {{.*#+}} ymm8 = ymm10[0,1,2,3],ymm8[4,5,6,7]
-; AVX-NEXT: vmovups %ymm8, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT: vunpcklps {{.*#+}} ymm8 = ymm11[0],ymm12[0],ymm11[1],ymm12[1],ymm11[4],ymm12[4],ymm11[5],ymm12[5]
-; AVX-NEXT: vshufps {{.*#+}} ymm10 = ymm9[1,0],ymm7[1,0],ymm9[5,4],ymm7[5,4]
-; AVX-NEXT: vshufps {{.*#+}} ymm8 = ymm10[2,0],ymm8[2,3],ymm10[6,4],ymm8[6,7]
-; AVX-NEXT: vunpcklps {{.*#+}} ymm10 = ymm5[0],ymm6[0],ymm5[1],ymm6[1],ymm5[4],ymm6[4],ymm5[5],ymm6[5]
-; AVX-NEXT: vextractf128 $1, %ymm10, %xmm10
-; AVX-NEXT: vshufps {{.*#+}} ymm13 = ymm4[1,0],ymm3[1,0],ymm4[5,4],ymm3[5,4]
-; AVX-NEXT: vshufps {{.*#+}} ymm13 = ymm13[2,0,2,3,6,4,6,7]
-; AVX-NEXT: vextractf128 $1, %ymm13, %xmm13
-; AVX-NEXT: vblendps {{.*#+}} xmm10 = xmm13[0,1],xmm10[2,3]
-; AVX-NEXT: vblendps {{.*#+}} ymm8 = ymm10[0,1,2,3],ymm8[4,5,6,7]
-; AVX-NEXT: vmovups %ymm8, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT: vunpckhpd {{.*#+}} ymm8 = ymm12[1],ymm11[1],ymm12[3],ymm11[3]
-; AVX-NEXT: vunpckhps {{.*#+}} ymm10 = ymm7[2],ymm9[2],ymm7[3],ymm9[3],ymm7[6],ymm9[6],ymm7[7],ymm9[7]
-; AVX-NEXT: vshufps {{.*#+}} ymm10 = ymm10[0,1],ymm8[2,0],ymm10[4,5],ymm8[6,4]
-; AVX-NEXT: vunpckhps {{.*#+}} ymm8 = ymm3[2],ymm4[2],ymm3[3],ymm4[3],ymm3[6],ymm4[6],ymm3[7],ymm4[7]
-; AVX-NEXT: vextractf128 $1, %ymm8, %xmm8
-; AVX-NEXT: vunpckhpd {{.*#+}} ymm13 = ymm6[1],ymm5[1],ymm6[3],ymm5[3]
-; AVX-NEXT: vshufps {{.*#+}} ymm13 = ymm13[0,1,2,0,4,5,6,4]
-; AVX-NEXT: vextractf128 $1, %ymm13, %xmm13
-; AVX-NEXT: vblendps {{.*#+}} xmm13 = xmm8[0,1],xmm13[2,3]
-; AVX-NEXT: vmovaps 32(%rcx), %ymm8
+; AVX-NEXT: vmovaps (%rdi), %ymm0
+; AVX-NEXT: vmovaps (%rsi), %ymm1
+; AVX-NEXT: vmovaps (%rdx), %ymm2
+; AVX-NEXT: vmovaps (%rcx), %ymm3
+; AVX-NEXT: vmovaps (%rcx), %xmm5
+; AVX-NEXT: vmovaps (%rdx), %xmm7
+; AVX-NEXT: vunpcklps {{.*#+}} xmm4 = xmm7[0],xmm5[0],xmm7[1],xmm5[1]
+; AVX-NEXT: vmovaps (%rsi), %xmm10
+; AVX-NEXT: vmovaps (%rdi), %xmm12
+; AVX-NEXT: vmovaps (%r9), %xmm11
+; AVX-NEXT: vmovaps (%r8), %xmm13
+; AVX-NEXT: vunpcklps {{.*#+}} xmm6 = xmm13[0],xmm11[0],xmm13[1],xmm11[1]
+; AVX-NEXT: vinsertf128 $1, %xmm6, %ymm0, %ymm6
+; AVX-NEXT: vmovaps (%rax), %xmm8
+; AVX-NEXT: vmovaps (%r10), %xmm9
+; AVX-NEXT: vunpcklps {{.*#+}} xmm14 = xmm9[0],xmm8[0],xmm9[1],xmm8[1]
+; AVX-NEXT: vshufps {{.*#+}} xmm15 = xmm14[0,1,0,1]
+; AVX-NEXT: vinsertf128 $1, %xmm15, %ymm0, %ymm15
+; AVX-NEXT: vblendps {{.*#+}} ymm6 = ymm6[0,1,2,3,4,5],ymm15[6,7]
+; AVX-NEXT: vunpcklps {{.*#+}} xmm15 = xmm12[0],xmm10[0],xmm12[1],xmm10[1]
+; AVX-NEXT: vmovlhps {{.*#+}} xmm15 = xmm15[0],xmm4[0]
+; AVX-NEXT: vblendps {{.*#+}} ymm6 = ymm15[0,1,2,3],ymm6[4,5,6,7]
+; AVX-NEXT: vmovups %ymm6, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX-NEXT: vshufps {{.*#+}} xmm6 = xmm12[1,1,1,1]
+; AVX-NEXT: vblendps {{.*#+}} xmm6 = xmm6[0],xmm10[1],xmm6[2,3]
+; AVX-NEXT: vblendps {{.*#+}} xmm6 = xmm6[0,1],xmm4[2,3]
+; AVX-NEXT: vinsertf128 $1, %xmm14, %ymm0, %ymm4
+; AVX-NEXT: vshufps {{.*#+}} xmm14 = xmm13[1,1,1,1]
+; AVX-NEXT: vblendps {{.*#+}} xmm14 = xmm14[0],xmm11[1],xmm14[2,3]
+; AVX-NEXT: vinsertf128 $1, %xmm14, %ymm0, %ymm14
+; AVX-NEXT: vblendps {{.*#+}} ymm14 = ymm14[0,1,2,3,4,5],ymm4[6,7]
+; AVX-NEXT: vmovaps (%r8), %ymm4
+; AVX-NEXT: vblendps {{.*#+}} ymm6 = ymm6[0,1,2,3],ymm14[4,5,6,7]
+; AVX-NEXT: vmovups %ymm6, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX-NEXT: vmovaps (%r9), %ymm6
+; AVX-NEXT: vunpckhps {{.*#+}} xmm12 = xmm12[2],xmm10[2],xmm12[3],xmm10[3]
+; AVX-NEXT: vunpckhps {{.*#+}} xmm11 = xmm13[2],xmm11[2],xmm13[3],xmm11[3]
+; AVX-NEXT: vinsertf128 $1, %xmm11, %ymm0, %ymm10
+; AVX-NEXT: vshufps {{.*#+}} xmm13 = xmm8[2,2,2,2]
+; AVX-NEXT: vblendps {{.*#+}} xmm13 = xmm9[0,1,2],xmm13[3]
+; AVX-NEXT: vinsertf128 $1, %xmm13, %ymm0, %ymm13
+; AVX-NEXT: vblendps {{.*#+}} ymm10 = ymm10[0,1,2,3,4,5],ymm13[6,7]
+; AVX-NEXT: vshufps {{.*#+}} xmm13 = xmm5[2,2,2,2]
+; AVX-NEXT: vblendps {{.*#+}} xmm13 = xmm7[0,1,2],xmm13[3]
+; AVX-NEXT: vblendps {{.*#+}} xmm13 = xmm12[0,1],xmm13[2,3]
; AVX-NEXT: vblendps {{.*#+}} ymm10 = ymm13[0,1,2,3],ymm10[4,5,6,7]
; AVX-NEXT: vmovups %ymm10, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT: vmovaps 32(%r8), %ymm10
-; AVX-NEXT: vunpckhps {{.*#+}} ymm12 = ymm11[2],ymm12[2],ymm11[3],ymm12[3],ymm11[6],ymm12[6],ymm11[7],ymm12[7]
-; AVX-NEXT: vmovaps 32(%r9), %ymm11
-; AVX-NEXT: vshufps {{.*#+}} ymm9 = ymm9[3,0],ymm7[3,0],ymm9[7,4],ymm7[7,4]
-; AVX-NEXT: vmovaps 32(%r10), %ymm7
-; AVX-NEXT: vshufps {{.*#+}} ymm12 = ymm9[2,0],ymm12[2,3],ymm9[6,4],ymm12[6,7]
-; AVX-NEXT: vmovaps 32(%rax), %ymm9
-; AVX-NEXT: vunpckhps {{.*#+}} ymm5 = ymm5[2],ymm6[2],ymm5[3],ymm6[3],ymm5[6],ymm6[6],ymm5[7],ymm6[7]
-; AVX-NEXT: vextractf128 $1, %ymm5, %xmm5
-; AVX-NEXT: vshufps {{.*#+}} ymm3 = ymm4[3,0],ymm3[3,0],ymm4[7,4],ymm3[7,4]
-; AVX-NEXT: vshufps {{.*#+}} ymm3 = ymm3[2,0,2,3,6,4,6,7]
-; AVX-NEXT: vextractf128 $1, %ymm3, %xmm3
-; AVX-NEXT: vblendps {{.*#+}} xmm3 = xmm3[0,1],xmm5[2,3]
-; AVX-NEXT: vblendps {{.*#+}} ymm3 = ymm3[0,1,2,3],ymm12[4,5,6,7]
-; AVX-NEXT: vmovups %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT: vunpcklpd {{.*#+}} ymm3 = ymm9[0],ymm7[0],ymm9[2],ymm7[2]
-; AVX-NEXT: vunpcklps {{.*#+}} ymm4 = ymm10[0],ymm11[0],ymm10[1],ymm11[1],ymm10[4],ymm11[4],ymm10[5],ymm11[5]
-; AVX-NEXT: vshufps {{.*#+}} ymm3 = ymm4[0,1],ymm3[2,0],ymm4[4,5],ymm3[6,4]
-; AVX-NEXT: vunpcklps {{.*#+}} ymm4 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[4],ymm1[4],ymm0[5],ymm1[5]
-; AVX-NEXT: vextractf128 $1, %ymm4, %xmm4
-; AVX-NEXT: vunpcklpd {{.*#+}} ymm5 = ymm8[0],ymm2[0],ymm8[2],ymm2[2]
-; AVX-NEXT: vshufps {{.*#+}} ymm5 = ymm5[0,1,2,0,4,5,6,4]
-; AVX-NEXT: vextractf128 $1, %ymm5, %xmm5
-; AVX-NEXT: vblendps {{.*#+}} xmm4 = xmm4[0,1],xmm5[2,3]
-; AVX-NEXT: vblendps {{.*#+}} ymm3 = ymm4[0,1,2,3],ymm3[4,5,6,7]
-; AVX-NEXT: vmovups %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT: vunpcklps {{.*#+}} ymm3 = ymm7[0],ymm9[0],ymm7[1],ymm9[1],ymm7[4],ymm9[4],ymm7[5],ymm9[5]
-; AVX-NEXT: vshufps {{.*#+}} ymm4 = ymm11[1,0],ymm10[1,0],ymm11[5,4],ymm10[5,4]
-; AVX-NEXT: vshufps {{.*#+}} ymm3 = ymm4[2,0],ymm3[2,3],ymm4[6,4],ymm3[6,7]
-; AVX-NEXT: vunpcklps {{.*#+}} ymm4 = ymm2[0],ymm8[0],ymm2[1],ymm8[1],ymm2[4],ymm8[4],ymm2[5],ymm8[5]
-; AVX-NEXT: vextractf128 $1, %ymm4, %xmm4
-; AVX-NEXT: vshufps {{.*#+}} ymm5 = ymm1[1,0],ymm0[1,0],ymm1[5,4],ymm0[5,4]
-; AVX-NEXT: vshufps {{.*#+}} ymm5 = ymm5[2,0,2,3,6,4,6,7]
-; AVX-NEXT: vextractf128 $1, %ymm5, %xmm5
-; AVX-NEXT: vblendps {{.*#+}} xmm4 = xmm5[0,1],xmm4[2,3]
-; AVX-NEXT: vblendps {{.*#+}} ymm3 = ymm4[0,1,2,3],ymm3[4,5,6,7]
-; AVX-NEXT: vmovups %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT: vunpckhpd {{.*#+}} ymm3 = ymm9[1],ymm7[1],ymm9[3],ymm7[3]
-; AVX-NEXT: vunpckhps {{.*#+}} ymm4 = ymm10[2],ymm11[2],ymm10[3],ymm11[3],ymm10[6],ymm11[6],ymm10[7],ymm11[7]
-; AVX-NEXT: vshufps {{.*#+}} ymm3 = ymm4[0,1],ymm3[2,0],ymm4[4,5],ymm3[6,4]
-; AVX-NEXT: vunpckhps {{.*#+}} ymm4 = ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[6],ymm1[6],ymm0[7],ymm1[7]
-; AVX-NEXT: vextractf128 $1, %ymm4, %xmm4
-; AVX-NEXT: vunpckhpd {{.*#+}} ymm5 = ymm8[1],ymm2[1],ymm8[3],ymm2[3]
-; AVX-NEXT: vshufps {{.*#+}} ymm5 = ymm5[0,1,2,0,4,5,6,4]
-; AVX-NEXT: vextractf128 $1, %ymm5, %xmm5
-; AVX-NEXT: vblendps {{.*#+}} xmm4 = xmm4[0,1],xmm5[2,3]
-; AVX-NEXT: vblendps {{.*#+}} ymm3 = ymm4[0,1,2,3],ymm3[4,5,6,7]
-; AVX-NEXT: vmovups %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT: vmovaps 64(%rdi), %ymm3
-; AVX-NEXT: vunpckhps {{.*#+}} ymm6 = ymm7[2],ymm9[2],ymm7[3],ymm9[3],ymm7[6],ymm9[6],ymm7[7],ymm9[7]
-; AVX-NEXT: vmovaps 64(%rsi), %ymm4
-; AVX-NEXT: vshufps {{.*#+}} ymm7 = ymm11[3,0],ymm10[3,0],ymm11[7,4],ymm10[7,4]
-; AVX-NEXT: vmovaps 64(%r8), %ymm5
-; AVX-NEXT: vshufps {{.*#+}} ymm7 = ymm7[2,0],ymm6[2,3],ymm7[6,4],ymm6[6,7]
-; AVX-NEXT: vmovaps 64(%r9), %ymm6
-; AVX-NEXT: vunpckhps {{.*#+}} ymm8 = ymm2[2],ymm8[2],ymm2[3],ymm8[3],ymm2[6],ymm8[6],ymm2[7],ymm8[7]
-; AVX-NEXT: vmovaps 64(%r10), %ymm2
-; AVX-NEXT: vshufps {{.*#+}} ymm1 = ymm1[3,0],ymm0[3,0],ymm1[7,4],ymm0[7,4]
-; AVX-NEXT: vmovaps 64(%rax), %ymm0
-; AVX-NEXT: vextractf128 $1, %ymm8, %xmm8
-; AVX-NEXT: vshufps {{.*#+}} ymm1 = ymm1[2,0,2,3,6,4,6,7]
-; AVX-NEXT: vextractf128 $1, %ymm1, %xmm1
-; AVX-NEXT: vblendps {{.*#+}} xmm1 = xmm1[0,1],xmm8[2,3]
-; AVX-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm7[4,5,6,7]
-; AVX-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT: vunpcklpd {{.*#+}} ymm1 = ymm0[0],ymm2[0],ymm0[2],ymm2[2]
-; AVX-NEXT: vunpcklps {{.*#+}} ymm7 = ymm5[0],ymm6[0],ymm5[1],ymm6[1],ymm5[4],ymm6[4],ymm5[5],ymm6[5]
-; AVX-NEXT: vshufps {{.*#+}} ymm7 = ymm7[0,1],ymm1[2,0],ymm7[4,5],ymm1[6,4]
-; AVX-NEXT: vunpcklps {{.*#+}} ymm1 = ymm3[0],ymm4[0],ymm3[1],ymm4[1],ymm3[4],ymm4[4],ymm3[5],ymm4[5]
-; AVX-NEXT: vextractf128 $1, %ymm1, %xmm8
-; AVX-NEXT: vmovaps 64(%rdx), %ymm1
-; AVX-NEXT: vmovaps 64(%rcx), %ymm9
-; AVX-NEXT: vunpcklpd {{.*#+}} ymm10 = ymm9[0],ymm1[0],ymm9[2],ymm1[2]
-; AVX-NEXT: vshufps {{.*#+}} ymm10 = ymm10[0,1,2,0,4,5,6,4]
-; AVX-NEXT: vextractf128 $1, %ymm10, %xmm10
-; AVX-NEXT: vblendps {{.*#+}} xmm8 = xmm8[0,1],xmm10[2,3]
-; AVX-NEXT: vblendps {{.*#+}} ymm7 = ymm8[0,1,2,3],ymm7[4,5,6,7]
-; AVX-NEXT: vmovups %ymm7, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT: vunpcklps {{.*#+}} ymm7 = ymm2[0],ymm0[0],ymm2[1],ymm0[1],ymm2[4],ymm0[4],ymm2[5],ymm0[5]
-; AVX-NEXT: vshufps {{.*#+}} ymm8 = ymm6[1,0],ymm5[1,0],ymm6[5,4],ymm5[5,4]
-; AVX-NEXT: vshufps {{.*#+}} ymm7 = ymm8[2,0],ymm7[2,3],ymm8[6,4],ymm7[6,7]
-; AVX-NEXT: vunpcklps {{.*#+}} ymm8 = ymm1[0],ymm9[0],ymm1[1],ymm9[1],ymm1[4],ymm9[4],ymm1[5],ymm9[5]
-; AVX-NEXT: vextractf128 $1, %ymm8, %xmm8
-; AVX-NEXT: vshufps {{.*#+}} ymm10 = ymm4[1,0],ymm3[1,0],ymm4[5,4],ymm3[5,4]
-; AVX-NEXT: vshufps {{.*#+}} ymm10 = ymm10[2,0,2,3,6,4,6,7]
-; AVX-NEXT: vextractf128 $1, %ymm10, %xmm10
-; AVX-NEXT: vblendps {{.*#+}} xmm8 = xmm10[0,1],xmm8[2,3]
-; AVX-NEXT: vblendps {{.*#+}} ymm7 = ymm8[0,1,2,3],ymm7[4,5,6,7]
+; AVX-NEXT: vmovaps (%r10), %ymm10
+; AVX-NEXT: vunpckhps {{.*#+}} xmm7 = xmm7[2],xmm5[2],xmm7[3],xmm5[3]
+; AVX-NEXT: vmovaps (%rax), %ymm5
+; AVX-NEXT: vunpckhpd {{.*#+}} xmm7 = xmm12[1],xmm7[1]
+; AVX-NEXT: vunpckhps {{.*#+}} xmm8 = xmm9[2],xmm8[2],xmm9[3],xmm8[3]
+; AVX-NEXT: vinsertf128 $1, %xmm8, %ymm0, %ymm8
+; AVX-NEXT: vshufps {{.*#+}} xmm9 = xmm11[2,3,2,3]
+; AVX-NEXT: vinsertf128 $1, %xmm9, %ymm0, %ymm9
+; AVX-NEXT: vblendps {{.*#+}} ymm8 = ymm9[0,1,2,3,4,5],ymm8[6,7]
+; AVX-NEXT: vblendps {{.*#+}} ymm7 = ymm7[0,1,2,3],ymm8[4,5,6,7]
; AVX-NEXT: vmovups %ymm7, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT: vunpckhpd {{.*#+}} ymm7 = ymm0[1],ymm2[1],ymm0[3],ymm2[3]
-; AVX-NEXT: vunpckhps {{.*#+}} ymm8 = ymm5[2],ymm6[2],ymm5[3],ymm6[3],ymm5[6],ymm6[6],ymm5[7],ymm6[7]
+; AVX-NEXT: vunpcklpd {{.*#+}} ymm7 = ymm5[0],ymm10[0],ymm5[2],ymm10[2]
+; AVX-NEXT: vunpcklps {{.*#+}} ymm8 = ymm4[0],ymm6[0],ymm4[1],ymm6[1],ymm4[4],ymm6[4],ymm4[5],ymm6[5]
; AVX-NEXT: vshufps {{.*#+}} ymm7 = ymm8[0,1],ymm7[2,0],ymm8[4,5],ymm7[6,4]
-; AVX-NEXT: vunpckhps {{.*#+}} ymm8 = ymm3[2],ymm4[2],ymm3[3],ymm4[3],ymm3[6],ymm4[6],ymm3[7],ymm4[7]
+; AVX-NEXT: vunpcklpd {{.*#+}} ymm8 = ymm3[0],ymm2[0],ymm3[2],ymm2[2]
; AVX-NEXT: vextractf128 $1, %ymm8, %xmm8
-; AVX-NEXT: vunpckhpd {{.*#+}} ymm10 = ymm9[1],ymm1[1],ymm9[3],ymm1[3]
-; AVX-NEXT: vshufps {{.*#+}} ymm10 = ymm10[0,1,2,0,4,5,6,4]
-; AVX-NEXT: vextractf128 $1, %ymm10, %xmm10
-; AVX-NEXT: vblendps {{.*#+}} xmm8 = xmm8[0,1],xmm10[2,3]
-; AVX-NEXT: vblendps {{.*#+}} ymm7 = ymm8[0,1,2,3],ymm7[4,5,6,7]
-; AVX-NEXT: vmovups %ymm7, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT: vunpckhps {{.*#+}} ymm0 = ymm2[2],ymm0[2],ymm2[3],ymm0[3],ymm2[6],ymm0[6],ymm2[7],ymm0[7]
-; AVX-NEXT: vshufps {{.*#+}} ymm2 = ymm6[3,0],ymm5[3,0],ymm6[7,4],ymm5[7,4]
-; AVX-NEXT: vshufps {{.*#+}} ymm5 = ymm2[2,0],ymm0[2,3],ymm2[6,4],ymm0[6,7]
-; AVX-NEXT: vunpckhps {{.*#+}} ymm2 = ymm1[2],ymm9[2],ymm1[3],ymm9[3],ymm1[6],ymm9[6],ymm1[7],ymm9[7]
-; AVX-NEXT: vmovaps 96(%r8), %ymm0
-; AVX-NEXT: vshufps {{.*#+}} ymm3 = ymm4[3,0],ymm3[3,0],ymm4[7,4],ymm3[7,4]
-; AVX-NEXT: vmovaps 96(%r9), %ymm1
-; AVX-NEXT: vextractf128 $1, %ymm2, %xmm2
-; AVX-NEXT: vshufps {{.*#+}} ymm3 = ymm3[2,0,2,3,6,4,6,7]
-; AVX-NEXT: vextractf128 $1, %ymm3, %xmm3
-; AVX-NEXT: vblendps {{.*#+}} xmm3 = xmm3[0,1],xmm2[2,3]
-; AVX-NEXT: vmovaps 96(%r10), %ymm2
-; AVX-NEXT: vblendps {{.*#+}} ymm3 = ymm3[0,1,2,3],ymm5[4,5,6,7]
-; AVX-NEXT: vmovups %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT: vmovaps 96(%rax), %ymm5
-; AVX-NEXT: vunpcklpd {{.*#+}} ymm3 = ymm5[0],ymm2[0],ymm5[2],ymm2[2]
-; AVX-NEXT: vunpcklps {{.*#+}} ymm4 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[4],ymm1[4],ymm0[5],ymm1[5]
-; AVX-NEXT: vshufps {{.*#+}} ymm7 = ymm4[0,1],ymm3[2,0],ymm4[4,5],ymm3[6,4]
-; AVX-NEXT: vmovaps 96(%rdi), %ymm3
-; AVX-NEXT: vmovaps 96(%rsi), %ymm4
-; AVX-NEXT: vunpcklps {{.*#+}} ymm6 = ymm3[0],ymm4[0],ymm3[1],ymm4[1],ymm3[4],ymm4[4],ymm3[5],ymm4[5]
-; AVX-NEXT: vextractf128 $1, %ymm6, %xmm8
-; AVX-NEXT: vmovaps 96(%rdx), %ymm6
-; AVX-NEXT: vmovaps 96(%rcx), %ymm9
-; AVX-NEXT: vunpcklpd {{.*#+}} ymm10 = ymm9[0],ymm6[0],ymm9[2],ymm6[2]
-; AVX-NEXT: vshufps {{.*#+}} ymm10 = ymm10[0,1,2,0,4,5,6,4]
-; AVX-NEXT: vextractf128 $1, %ymm10, %xmm10
-; AVX-NEXT: vblendps {{.*#+}} xmm8 = xmm8[0,1],xmm10[2,3]
+; AVX-NEXT: vunpcklps {{.*#+}} ymm9 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[4],ymm1[4],ymm0[5],ymm1[5]
+; AVX-NEXT: vextractf128 $1, %ymm9, %xmm9
+; AVX-NEXT: vshufps {{.*#+}} xmm8 = xmm9[0,1],xmm8[2,0]
; AVX-NEXT: vblendps {{.*#+}} ymm7 = ymm8[0,1,2,3],ymm7[4,5,6,7]
; AVX-NEXT: vmovups %ymm7, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT: vunpcklps {{.*#+}} ymm7 = ymm2[0],ymm5[0],ymm2[1],ymm5[1],ymm2[4],ymm5[4],ymm2[5],ymm5[5]
-; AVX-NEXT: vshufps {{.*#+}} ymm8 = ymm1[1,0],ymm0[1,0],ymm1[5,4],ymm0[5,4]
+; AVX-NEXT: vunpcklps {{.*#+}} ymm7 = ymm10[0],ymm5[0],ymm10[1],ymm5[1],ymm10[4],ymm5[4],ymm10[5],ymm5[5]
+; AVX-NEXT: vshufps {{.*#+}} ymm8 = ymm6[1,0],ymm4[1,0],ymm6[5,4],ymm4[5,4]
; AVX-NEXT: vshufps {{.*#+}} ymm7 = ymm8[2,0],ymm7[2,3],ymm8[6,4],ymm7[6,7]
-; AVX-NEXT: vunpcklps {{.*#+}} ymm8 = ymm6[0],ymm9[0],ymm6[1],ymm9[1],ymm6[4],ymm9[4],ymm6[5],ymm9[5]
+; AVX-NEXT: vunpcklps {{.*#+}} ymm8 = ymm2[0],ymm3[0],ymm2[1],ymm3[1],ymm2[4],ymm3[4],ymm2[5],ymm3[5]
; AVX-NEXT: vextractf128 $1, %ymm8, %xmm8
-; AVX-NEXT: vshufps {{.*#+}} ymm10 = ymm4[1,0],ymm3[1,0],ymm4[5,4],ymm3[5,4]
-; AVX-NEXT: vshufps {{.*#+}} ymm10 = ymm10[2,0,2,3,6,4,6,7]
-; AVX-NEXT: vextractf128 $1, %ymm10, %xmm10
-; AVX-NEXT: vblendps {{.*#+}} xmm8 = xmm10[0,1],xmm8[2,3]
+; AVX-NEXT: vshufps {{.*#+}} ymm9 = ymm1[1,0],ymm0[1,0],ymm1[5,4],ymm0[5,4]
+; AVX-NEXT: vextractf128 $1, %ymm9, %xmm9
+; AVX-NEXT: vshufps {{.*#+}} xmm8 = xmm9[2,0],xmm8[2,3]
; AVX-NEXT: vblendps {{.*#+}} ymm7 = ymm8[0,1,2,3],ymm7[4,5,6,7]
; AVX-NEXT: vmovups %ymm7, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT: vunpckhpd {{.*#+}} ymm7 = ymm5[1],ymm2[1],ymm5[3],ymm2[3]
-; AVX-NEXT: vunpckhps {{.*#+}} ymm8 = ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[6],ymm1[6],ymm0[7],ymm1[7]
+; AVX-NEXT: vunpckhpd {{.*#+}} ymm7 = ymm5[1],ymm10[1],ymm5[3],ymm10[3]
+; AVX-NEXT: vunpckhps {{.*#+}} ymm8 = ymm4[2],ymm6[2],ymm4[3],ymm6[3],ymm4[6],ymm6[6],ymm4[7],ymm6[7]
; AVX-NEXT: vshufps {{.*#+}} ymm7 = ymm8[0,1],ymm7[2,0],ymm8[4,5],ymm7[6,4]
-; AVX-NEXT: vunpckhps {{.*#+}} ymm8 = ymm3[2],ymm4[2],ymm3[3],ymm4[3],ymm3[6],ymm4[6],ymm3[7],ymm4[7]
+; AVX-NEXT: vunpckhpd {{.*#+}} ymm8 = ymm3[1],ymm2[1],ymm3[3],ymm2[3]
; AVX-NEXT: vextractf128 $1, %ymm8, %xmm8
-; AVX-NEXT: vunpckhpd {{.*#+}} ymm10 = ymm9[1],ymm6[1],ymm9[3],ymm6[3]
-; AVX-NEXT: vshufps {{.*#+}} ymm10 = ymm10[0,1,2,0,4,5,6,4]
-; AVX-NEXT: vextractf128 $1, %ymm10, %xmm10
-; AVX-NEXT: vblendps {{.*#+}} xmm8 = xmm8[0,1],xmm10[2,3]
+; AVX-NEXT: vunpckhps {{.*#+}} ymm9 = ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[6],ymm1[6],ymm0[7],ymm1[7]
+; AVX-NEXT: vextractf128 $1, %ymm9, %xmm9
+; AVX-NEXT: vshufps {{.*#+}} xmm8 = xmm9[0,1],xmm8[2,0]
; AVX-NEXT: vblendps {{.*#+}} ymm7 = ymm8[0,1,2,3],ymm7[4,5,6,7]
; AVX-NEXT: vmovups %ymm7, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT: vunpckhps {{.*#+}} ymm2 = ymm2[2],ymm5[2],ymm2[3],ymm5[3],ymm2[6],ymm5[6],ymm2[7],ymm5[7]
-; AVX-NEXT: vshufps {{.*#+}} ymm0 = ymm1[3,0],ymm0[3,0],ymm1[7,4],ymm0[7,4]
-; AVX-NEXT: vshufps {{.*#+}} ymm0 = ymm0[2,0],ymm2[2,3],ymm0[6,4],ymm2[6,7]
-; AVX-NEXT: vunpckhps {{.*#+}} ymm1 = ymm6[2],ymm9[2],ymm6[3],ymm9[3],ymm6[6],ymm9[6],ymm6[7],ymm9[7]
-; AVX-NEXT: vshufps {{.*#+}} ymm2 = ymm4[3,0],ymm3[3,0],ymm4[7,4],ymm3[7,4]
-; AVX-NEXT: vextractf128 $1, %ymm1, %xmm1
-; AVX-NEXT: vshufps {{.*#+}} ymm2 = ymm2[2,0,2,3,6,4,6,7]
+; AVX-NEXT: vmovaps 32(%rcx), %xmm7
+; AVX-NEXT: vunpckhps {{.*#+}} ymm8 = ymm10[2],ymm5[2],ymm10[3],ymm5[3],ymm10[6],ymm5[6],ymm10[7],ymm5[7]
+; AVX-NEXT: vmovaps 32(%rax), %xmm5
+; AVX-NEXT: vshufps {{.*#+}} ymm6 = ymm6[3,0],ymm4[3,0],ymm6[7,4],ymm4[7,4]
+; AVX-NEXT: vmovaps 32(%r10), %xmm4
+; AVX-NEXT: vshufps {{.*#+}} ymm6 = ymm6[2,0],ymm8[2,3],ymm6[6,4],ymm8[6,7]
+; AVX-NEXT: vunpckhps {{.*#+}} ymm2 = ymm2[2],ymm3[2],ymm2[3],ymm3[3],ymm2[6],ymm3[6],ymm2[7],ymm3[7]
; AVX-NEXT: vextractf128 $1, %ymm2, %xmm2
-; AVX-NEXT: vblendps {{.*#+}} xmm1 = xmm2[0,1],xmm1[2,3]
-; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7]
-; AVX-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT: vmovaps 128(%r8), %ymm0
-; AVX-NEXT: vmovaps 128(%r9), %ymm1
-; AVX-NEXT: vmovaps 128(%r10), %ymm3
-; AVX-NEXT: vmovaps 128(%rax), %ymm5
-; AVX-NEXT: vunpcklpd {{.*#+}} ymm2 = ymm5[0],ymm3[0],ymm5[2],ymm3[2]
-; AVX-NEXT: vunpcklps {{.*#+}} ymm4 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[4],ymm1[4],ymm0[5],ymm1[5]
-; AVX-NEXT: vshufps {{.*#+}} ymm7 = ymm4[0,1],ymm2[2,0],ymm4[4,5],ymm2[6,4]
-; AVX-NEXT: vmovaps 128(%rdi), %ymm2
-; AVX-NEXT: vmovaps 128(%rsi), %ymm4
-; AVX-NEXT: vunpcklps {{.*#+}} ymm6 = ymm2[0],ymm4[0],ymm2[1],ymm4[1],ymm2[4],ymm4[4],ymm2[5],ymm4[5]
-; AVX-NEXT: vextractf128 $1, %ymm6, %xmm8
-; AVX-NEXT: vmovaps 128(%rdx), %ymm6
-; AVX-NEXT: vmovaps 128(%rcx), %ymm9
-; AVX-NEXT: vunpcklpd {{.*#+}} ymm10 = ymm9[0],ymm6[0],ymm9[2],ymm6[2]
-; AVX-NEXT: vshufps {{.*#+}} ymm10 = ymm10[0,1,2,0,4,5,6,4]
-; AVX-NEXT: vextractf128 $1, %ymm10, %xmm10
-; AVX-NEXT: vblendps {{.*#+}} xmm8 = xmm8[0,1],xmm10[2,3]
-; AVX-NEXT: vblendps {{.*#+}} ymm7 = ymm8[0,1,2,3],ymm7[4,5,6,7]
-; AVX-NEXT: vmovups %ymm7, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT: vunpcklps {{.*#+}} ymm7 = ymm3[0],ymm5[0],ymm3[1],ymm5[1],ymm3[4],ymm5[4],ymm3[5],ymm5[5]
-; AVX-NEXT: vshufps {{.*#+}} ymm8 = ymm1[1,0],ymm0[1,0],ymm1[5,4],ymm0[5,4]
-; AVX-NEXT: vshufps {{.*#+}} ymm7 = ymm8[2,0],ymm7[2,3],ymm8[6,4],ymm7[6,7]
-; AVX-NEXT: vunpcklps {{.*#+}} ymm8 = ymm6[0],ymm9[0],ymm6[1],ymm9[1],ymm6[4],ymm9[4],ymm6[5],ymm9[5]
-; AVX-NEXT: vextractf128 $1, %ymm8, %xmm8
-; AVX-NEXT: vshufps {{.*#+}} ymm10 = ymm4[1,0],ymm2[1,0],ymm4[5,4],ymm2[5,4]
-; AVX-NEXT: vshufps {{.*#+}} ymm10 = ymm10[2,0,2,3,6,4,6,7]
-; AVX-NEXT: vextractf128 $1, %ymm10, %xmm10
-; AVX-NEXT: vblendps {{.*#+}} xmm8 = xmm10[0,1],xmm8[2,3]
-; AVX-NEXT: vblendps {{.*#+}} ymm7 = ymm8[0,1,2,3],ymm7[4,5,6,7]
-; AVX-NEXT: vmovups %ymm7, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT: vunpckhpd {{.*#+}} ymm7 = ymm5[1],ymm3[1],ymm5[3],ymm3[3]
-; AVX-NEXT: vunpckhps {{.*#+}} ymm8 = ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[6],ymm1[6],ymm0[7],ymm1[7]
-; AVX-NEXT: vshufps {{.*#+}} ymm7 = ymm8[0,1],ymm7[2,0],ymm8[4,5],ymm7[6,4]
-; AVX-NEXT: vunpckhps {{.*#+}} ymm8 = ymm2[2],ymm4[2],ymm2[3],ymm4[3],ymm2[6],ymm4[6],ymm2[7],ymm4[7]
-; AVX-NEXT: vextractf128 $1, %ymm8, %xmm8
-; AVX-NEXT: vunpckhpd {{.*#+}} ymm10 = ymm9[1],ymm6[1],ymm9[3],ymm6[3]
-; AVX-NEXT: vshufps {{.*#+}} ymm10 = ymm10[0,1,2,0,4,5,6,4]
-; AVX-NEXT: vextractf128 $1, %ymm10, %xmm10
-; AVX-NEXT: vblendps {{.*#+}} xmm8 = xmm8[0,1],xmm10[2,3]
-; AVX-NEXT: vblendps {{.*#+}} ymm7 = ymm8[0,1,2,3],ymm7[4,5,6,7]
-; AVX-NEXT: vmovups %ymm7, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT: vunpckhps {{.*#+}} ymm3 = ymm3[2],ymm5[2],ymm3[3],ymm5[3],ymm3[6],ymm5[6],ymm3[7],ymm5[7]
; AVX-NEXT: vshufps {{.*#+}} ymm0 = ymm1[3,0],ymm0[3,0],ymm1[7,4],ymm0[7,4]
-; AVX-NEXT: vshufps {{.*#+}} ymm0 = ymm0[2,0],ymm3[2,3],ymm0[6,4],ymm3[6,7]
-; AVX-NEXT: vunpckhps {{.*#+}} ymm1 = ymm6[2],ymm9[2],ymm6[3],ymm9[3],ymm6[6],ymm9[6],ymm6[7],ymm9[7]
-; AVX-NEXT: vshufps {{.*#+}} ymm2 = ymm4[3,0],ymm2[3,0],ymm4[7,4],ymm2[7,4]
-; AVX-NEXT: vextractf128 $1, %ymm1, %xmm1
-; AVX-NEXT: vshufps {{.*#+}} ymm2 = ymm2[2,0,2,3,6,4,6,7]
-; AVX-NEXT: vextractf128 $1, %ymm2, %xmm2
-; AVX-NEXT: vblendps {{.*#+}} xmm1 = xmm2[0,1],xmm1[2,3]
-; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7]
+; AVX-NEXT: vextractf128 $1, %ymm0, %xmm0
+; AVX-NEXT: vshufps {{.*#+}} xmm0 = xmm0[2,0],xmm2[2,3]
+; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm6[4,5,6,7]
; AVX-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT: vmovaps 160(%r8), %ymm0
-; AVX-NEXT: vmovaps 160(%r9), %ymm1
-; AVX-NEXT: vmovaps 160(%r10), %ymm3
-; AVX-NEXT: vmovaps 160(%rax), %ymm5
-; AVX-NEXT: vunpcklpd {{.*#+}} ymm2 = ymm5[0],ymm3[0],ymm5[2],ymm3[2]
-; AVX-NEXT: vunpcklps {{.*#+}} ymm4 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[4],ymm1[4],ymm0[5],ymm1[5]
-; AVX-NEXT: vshufps {{.*#+}} ymm7 = ymm4[0,1],ymm2[2,0],ymm4[4,5],ymm2[6,4]
-; AVX-NEXT: vmovaps 160(%rdi), %ymm2
-; AVX-NEXT: vmovaps 160(%rsi), %ymm4
-; AVX-NEXT: vunpcklps {{.*#+}} ymm6 = ymm2[0],ymm4[0],ymm2[1],ymm4[1],ymm2[4],ymm4[4],ymm2[5],ymm4[5]
-; AVX-NEXT: vextractf128 $1, %ymm6, %xmm8
-; AVX-NEXT: vmovaps 160(%rdx), %ymm6
-; AVX-NEXT: vmovaps 160(%rcx), %ymm9
-; AVX-NEXT: vunpcklpd {{.*#+}} ymm10 = ymm9[0],ymm6[0],ymm9[2],ymm6[2]
-; AVX-NEXT: vshufps {{.*#+}} ymm10 = ymm10[0,1,2,0,4,5,6,4]
+; AVX-NEXT: vmovaps 32(%r9), %xmm1
+; AVX-NEXT: vmovaps 32(%r8), %xmm2
+; AVX-NEXT: vunpcklps {{.*#+}} xmm0 = xmm2[0],xmm1[0],xmm2[1],xmm1[1]
+; AVX-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
+; AVX-NEXT: vunpcklps {{.*#+}} xmm3 = xmm4[0],xmm5[0],xmm4[1],xmm5[1]
+; AVX-NEXT: vshufps {{.*#+}} xmm6 = xmm3[0,1,0,1]
+; AVX-NEXT: vinsertf128 $1, %xmm6, %ymm0, %ymm6
+; AVX-NEXT: vblendps {{.*#+}} ymm6 = ymm0[0,1,2,3,4,5],ymm6[6,7]
+; AVX-NEXT: vmovaps 32(%rdx), %xmm0
+; AVX-NEXT: vunpcklps {{.*#+}} xmm8 = xmm0[0],xmm7[0],xmm0[1],xmm7[1]
+; AVX-NEXT: vmovaps 32(%rsi), %xmm9
+; AVX-NEXT: vmovaps 32(%rdi), %xmm10
+; AVX-NEXT: vunpcklps {{.*#+}} xmm11 = xmm10[0],xmm9[0],xmm10[1],xmm9[1]
+; AVX-NEXT: vmovlhps {{.*#+}} xmm11 = xmm11[0],xmm8[0]
+; AVX-NEXT: vblendps {{.*#+}} ymm6 = ymm11[0,1,2,3],ymm6[4,5,6,7]
+; AVX-NEXT: vmovups %ymm6, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX-NEXT: vshufps {{.*#+}} xmm6 = xmm10[1,1,1,1]
+; AVX-NEXT: vblendps {{.*#+}} xmm6 = xmm6[0],xmm9[1],xmm6[2,3]
+; AVX-NEXT: vblendps {{.*#+}} xmm6 = xmm6[0,1],xmm8[2,3]
+; AVX-NEXT: vinsertf128 $1, %xmm3, %ymm0, %ymm3
+; AVX-NEXT: vshufps {{.*#+}} xmm8 = xmm2[1,1,1,1]
+; AVX-NEXT: vblendps {{.*#+}} xmm8 = xmm8[0],xmm1[1],xmm8[2,3]
+; AVX-NEXT: vinsertf128 $1, %xmm8, %ymm0, %ymm8
+; AVX-NEXT: vblendps {{.*#+}} ymm3 = ymm8[0,1,2,3,4,5],ymm3[6,7]
+; AVX-NEXT: vblendps {{.*#+}} ymm3 = ymm6[0,1,2,3],ymm3[4,5,6,7]
+; AVX-NEXT: vmovups %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX-NEXT: vunpckhps {{.*#+}} xmm3 = xmm10[2],xmm9[2],xmm10[3],xmm9[3]
+; AVX-NEXT: vunpckhps {{.*#+}} xmm2 = xmm2[2],xmm1[2],xmm2[3],xmm1[3]
+; AVX-NEXT: vshufps {{.*#+}} xmm1 = xmm5[2,2,2,2]
+; AVX-NEXT: vblendps {{.*#+}} xmm1 = xmm4[0,1,2],xmm1[3]
+; AVX-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm1
+; AVX-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm6
+; AVX-NEXT: vblendps {{.*#+}} ymm1 = ymm6[0,1,2,3,4,5],ymm1[6,7]
+; AVX-NEXT: vshufps {{.*#+}} xmm6 = xmm7[2,2,2,2]
+; AVX-NEXT: vblendps {{.*#+}} xmm6 = xmm0[0,1,2],xmm6[3]
+; AVX-NEXT: vblendps {{.*#+}} xmm6 = xmm3[0,1],xmm6[2,3]
+; AVX-NEXT: vblendps {{.*#+}} ymm1 = ymm6[0,1,2,3],ymm1[4,5,6,7]
+; AVX-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX-NEXT: vunpckhps {{.*#+}} xmm0 = xmm0[2],xmm7[2],xmm0[3],xmm7[3]
+; AVX-NEXT: vunpckhpd {{.*#+}} xmm3 = xmm3[1],xmm0[1]
+; AVX-NEXT: vmovaps 32(%r8), %ymm0
+; AVX-NEXT: vunpckhps {{.*#+}} xmm4 = xmm4[2],xmm5[2],xmm4[3],xmm5[3]
+; AVX-NEXT: vmovaps 32(%r9), %ymm1
+; AVX-NEXT: vinsertf128 $1, %xmm4, %ymm0, %ymm4
+; AVX-NEXT: vshufps {{.*#+}} xmm2 = xmm2[2,3,2,3]
+; AVX-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm2
+; AVX-NEXT: vblendps {{.*#+}} ymm4 = ymm2[0,1,2,3,4,5],ymm4[6,7]
+; AVX-NEXT: vmovaps 32(%r10), %ymm2
+; AVX-NEXT: vblendps {{.*#+}} ymm3 = ymm3[0,1,2,3],ymm4[4,5,6,7]
+; AVX-NEXT: vmovups %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX-NEXT: vmovaps 32(%rax), %ymm3
+; AVX-NEXT: vunpcklpd {{.*#+}} ymm4 = ymm3[0],ymm2[0],ymm3[2],ymm2[2]
+; AVX-NEXT: vunpcklps {{.*#+}} ymm5 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[4],ymm1[4],ymm0[5],ymm1[5]
+; AVX-NEXT: vshufps {{.*#+}} ymm6 = ymm5[0,1],ymm4[2,0],ymm5[4,5],ymm4[6,4]
+; AVX-NEXT: vmovaps 32(%rdx), %ymm4
+; AVX-NEXT: vmovaps 32(%rcx), %ymm5
+; AVX-NEXT: vunpcklpd {{.*#+}} ymm7 = ymm5[0],ymm4[0],ymm5[2],ymm4[2]
+; AVX-NEXT: vextractf128 $1, %ymm7, %xmm7
+; AVX-NEXT: vmovaps 32(%rdi), %ymm8
+; AVX-NEXT: vmovaps 32(%rsi), %ymm9
+; AVX-NEXT: vunpcklps {{.*#+}} ymm10 = ymm8[0],ymm9[0],ymm8[1],ymm9[1],ymm8[4],ymm9[4],ymm8[5],ymm9[5]
; AVX-NEXT: vextractf128 $1, %ymm10, %xmm10
-; AVX-NEXT: vblendps {{.*#+}} xmm8 = xmm8[0,1],xmm10[2,3]
-; AVX-NEXT: vblendps {{.*#+}} ymm7 = ymm8[0,1,2,3],ymm7[4,5,6,7]
-; AVX-NEXT: vmovups %ymm7, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT: vunpcklps {{.*#+}} ymm7 = ymm3[0],ymm5[0],ymm3[1],ymm5[1],ymm3[4],ymm5[4],ymm3[5],ymm5[5]
-; AVX-NEXT: vshufps {{.*#+}} ymm8 = ymm1[1,0],ymm0[1,0],ymm1[5,4],ymm0[5,4]
-; AVX-NEXT: vshufps {{.*#+}} ymm7 = ymm8[2,0],ymm7[2,3],ymm8[6,4],ymm7[6,7]
-; AVX-NEXT: vunpcklps {{.*#+}} ymm8 = ymm6[0],ymm9[0],ymm6[1],ymm9[1],ymm6[4],ymm9[4],ymm6[5],ymm9[5]
-; AVX-NEXT: vextractf128 $1, %ymm8, %xmm8
-; AVX-NEXT: vshufps {{.*#+}} ymm10 = ymm4[1,0],ymm2[1,0],ymm4[5,4],ymm2[5,4]
-; AVX-NEXT: vshufps {{.*#+}} ymm10 = ymm10[2,0,2,3,6,4,6,7]
+; AVX-NEXT: vshufps {{.*#+}} xmm7 = xmm10[0,1],xmm7[2,0]
+; AVX-NEXT: vblendps {{.*#+}} ymm6 = ymm7[0,1,2,3],ymm6[4,5,6,7]
+; AVX-NEXT: vmovups %ymm6, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX-NEXT: vunpcklps {{.*#+}} ymm6 = ymm2[0],ymm3[0],ymm2[1],ymm3[1],ymm2[4],ymm3[4],ymm2[5],ymm3[5]
+; AVX-NEXT: vshufps {{.*#+}} ymm7 = ymm1[1,0],ymm0[1,0],ymm1[5,4],ymm0[5,4]
+; AVX-NEXT: vshufps {{.*#+}} ymm6 = ymm7[2,0],ymm6[2,3],ymm7[6,4],ymm6[6,7]
+; AVX-NEXT: vunpcklps {{.*#+}} ymm7 = ymm4[0],ymm5[0],ymm4[1],ymm5[1],ymm4[4],ymm5[4],ymm4[5],ymm5[5]
+; AVX-NEXT: vextractf128 $1, %ymm7, %xmm7
+; AVX-NEXT: vshufps {{.*#+}} ymm10 = ymm9[1,0],ymm8[1,0],ymm9[5,4],ymm8[5,4]
; AVX-NEXT: vextractf128 $1, %ymm10, %xmm10
-; AVX-NEXT: vblendps {{.*#+}} xmm8 = xmm10[0,1],xmm8[2,3]
-; AVX-NEXT: vblendps {{.*#+}} ymm7 = ymm8[0,1,2,3],ymm7[4,5,6,7]
-; AVX-NEXT: vmovups %ymm7, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT: vunpckhpd {{.*#+}} ymm7 = ymm5[1],ymm3[1],ymm5[3],ymm3[3]
-; AVX-NEXT: vunpckhps {{.*#+}} ymm8 = ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[6],ymm1[6],ymm0[7],ymm1[7]
-; AVX-NEXT: vshufps {{.*#+}} ymm7 = ymm8[0,1],ymm7[2,0],ymm8[4,5],ymm7[6,4]
-; AVX-NEXT: vunpckhps {{.*#+}} ymm8 = ymm2[2],ymm4[2],ymm2[3],ymm4[3],ymm2[6],ymm4[6],ymm2[7],ymm4[7]
-; AVX-NEXT: vextractf128 $1, %ymm8, %xmm8
-; AVX-NEXT: vunpckhpd {{.*#+}} ymm10 = ymm9[1],ymm6[1],ymm9[3],ymm6[3]
-; AVX-NEXT: vshufps {{.*#+}} ymm10 = ymm10[0,1,2,0,4,5,6,4]
+; AVX-NEXT: vshufps {{.*#+}} xmm7 = xmm10[2,0],xmm7[2,3]
+; AVX-NEXT: vblendps {{.*#+}} ymm6 = ymm7[0,1,2,3],ymm6[4,5,6,7]
+; AVX-NEXT: vmovups %ymm6, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX-NEXT: vunpckhpd {{.*#+}} ymm6 = ymm3[1],ymm2[1],ymm3[3],ymm2[3]
+; AVX-NEXT: vunpckhps {{.*#+}} ymm7 = ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[6],ymm1[6],ymm0[7],ymm1[7]
+; AVX-NEXT: vshufps {{.*#+}} ymm6 = ymm7[0,1],ymm6[2,0],ymm7[4,5],ymm6[6,4]
+; AVX-NEXT: vunpckhpd {{.*#+}} ymm7 = ymm5[1],ymm4[1],ymm5[3],ymm4[3]
+; AVX-NEXT: vextractf128 $1, %ymm7, %xmm7
+; AVX-NEXT: vunpckhps {{.*#+}} ymm10 = ymm8[2],ymm9[2],ymm8[3],ymm9[3],ymm8[6],ymm9[6],ymm8[7],ymm9[7]
; AVX-NEXT: vextractf128 $1, %ymm10, %xmm10
-; AVX-NEXT: vblendps {{.*#+}} xmm8 = xmm8[0,1],xmm10[2,3]
-; AVX-NEXT: vblendps {{.*#+}} ymm7 = ymm8[0,1,2,3],ymm7[4,5,6,7]
-; AVX-NEXT: vmovups %ymm7, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT: vunpckhps {{.*#+}} ymm3 = ymm3[2],ymm5[2],ymm3[3],ymm5[3],ymm3[6],ymm5[6],ymm3[7],ymm5[7]
+; AVX-NEXT: vshufps {{.*#+}} xmm7 = xmm10[0,1],xmm7[2,0]
+; AVX-NEXT: vblendps {{.*#+}} ymm6 = ymm7[0,1,2,3],ymm6[4,5,6,7]
+; AVX-NEXT: vmovups %ymm6, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX-NEXT: vunpckhps {{.*#+}} ymm2 = ymm2[2],ymm3[2],ymm2[3],ymm3[3],ymm2[6],ymm3[6],ymm2[7],ymm3[7]
; AVX-NEXT: vshufps {{.*#+}} ymm0 = ymm1[3,0],ymm0[3,0],ymm1[7,4],ymm0[7,4]
-; AVX-NEXT: vshufps {{.*#+}} ymm0 = ymm0[2,0],ymm3[2,3],ymm0[6,4],ymm3[6,7]
-; AVX-NEXT: vunpckhps {{.*#+}} ymm1 = ymm6[2],ymm9[2],ymm6[3],ymm9[3],ymm6[6],ymm9[6],ymm6[7],ymm9[7]
-; AVX-NEXT: vshufps {{.*#+}} ymm2 = ymm4[3,0],ymm2[3,0],ymm4[7,4],ymm2[7,4]
+; AVX-NEXT: vshufps {{.*#+}} ymm0 = ymm0[2,0],ymm2[2,3],ymm0[6,4],ymm2[6,7]
+; AVX-NEXT: vunpckhps {{.*#+}} ymm1 = ymm4[2],ymm5[2],ymm4[3],ymm5[3],ymm4[6],ymm5[6],ymm4[7],ymm5[7]
+; AVX-NEXT: vshufps {{.*#+}} ymm2 = ymm9[3,0],ymm8[3,0],ymm9[7,4],ymm8[7,4]
; AVX-NEXT: vextractf128 $1, %ymm1, %xmm1
-; AVX-NEXT: vshufps {{.*#+}} ymm2 = ymm2[2,0,2,3,6,4,6,7]
; AVX-NEXT: vextractf128 $1, %ymm2, %xmm2
-; AVX-NEXT: vblendps {{.*#+}} xmm1 = xmm2[0,1],xmm1[2,3]
+; AVX-NEXT: vshufps {{.*#+}} xmm1 = xmm2[2,0],xmm1[2,3]
; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7]
; AVX-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT: vmovaps 192(%r8), %ymm0
-; AVX-NEXT: vmovaps 192(%r9), %ymm1
-; AVX-NEXT: vmovaps 192(%r10), %ymm3
-; AVX-NEXT: vmovaps 192(%rax), %ymm5
-; AVX-NEXT: vunpcklpd {{.*#+}} ymm2 = ymm5[0],ymm3[0],ymm5[2],ymm3[2]
-; AVX-NEXT: vunpcklps {{.*#+}} ymm4 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[4],ymm1[4],ymm0[5],ymm1[5]
-; AVX-NEXT: vshufps {{.*#+}} ymm7 = ymm4[0,1],ymm2[2,0],ymm4[4,5],ymm2[6,4]
-; AVX-NEXT: vmovaps 192(%rdi), %ymm2
-; AVX-NEXT: vmovaps 192(%rsi), %ymm4
-; AVX-NEXT: vunpcklps {{.*#+}} ymm6 = ymm2[0],ymm4[0],ymm2[1],ymm4[1],ymm2[4],ymm4[4],ymm2[5],ymm4[5]
-; AVX-NEXT: vextractf128 $1, %ymm6, %xmm8
-; AVX-NEXT: vmovaps 192(%rdx), %ymm6
-; AVX-NEXT: vmovaps 192(%rcx), %ymm9
-; AVX-NEXT: vunpcklpd {{.*#+}} ymm10 = ymm9[0],ymm6[0],ymm9[2],ymm6[2]
-; AVX-NEXT: vshufps {{.*#+}} ymm10 = ymm10[0,1,2,0,4,5,6,4]
-; AVX-NEXT: vextractf128 $1, %ymm10, %xmm10
-; AVX-NEXT: vblendps {{.*#+}} xmm8 = xmm8[0,1],xmm10[2,3]
-; AVX-NEXT: vblendps {{.*#+}} ymm7 = ymm8[0,1,2,3],ymm7[4,5,6,7]
-; AVX-NEXT: vmovups %ymm7, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT: vunpcklps {{.*#+}} ymm7 = ymm3[0],ymm5[0],ymm3[1],ymm5[1],ymm3[4],ymm5[4],ymm3[5],ymm5[5]
-; AVX-NEXT: vshufps {{.*#+}} ymm8 = ymm1[1,0],ymm0[1,0],ymm1[5,4],ymm0[5,4]
-; AVX-NEXT: vshufps {{.*#+}} ymm7 = ymm8[2,0],ymm7[2,3],ymm8[6,4],ymm7[6,7]
-; AVX-NEXT: vunpcklps {{.*#+}} ymm8 = ymm6[0],ymm9[0],ymm6[1],ymm9[1],ymm6[4],ymm9[4],ymm6[5],ymm9[5]
-; AVX-NEXT: vextractf128 $1, %ymm8, %xmm8
-; AVX-NEXT: vshufps {{.*#+}} ymm10 = ymm4[1,0],ymm2[1,0],ymm4[5,4],ymm2[5,4]
-; AVX-NEXT: vshufps {{.*#+}} ymm10 = ymm10[2,0,2,3,6,4,6,7]
-; AVX-NEXT: vextractf128 $1, %ymm10, %xmm10
-; AVX-NEXT: vblendps {{.*#+}} xmm8 = xmm10[0,1],xmm8[2,3]
-; AVX-NEXT: vblendps {{.*#+}} ymm7 = ymm8[0,1,2,3],ymm7[4,5,6,7]
-; AVX-NEXT: vmovups %ymm7, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT: vunpckhpd {{.*#+}} ymm7 = ymm5[1],ymm3[1],ymm5[3],ymm3[3]
-; AVX-NEXT: vunpckhps {{.*#+}} ymm8 = ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[6],ymm1[6],ymm0[7],ymm1[7]
-; AVX-NEXT: vshufps {{.*#+}} ymm7 = ymm8[0,1],ymm7[2,0],ymm8[4,5],ymm7[6,4]
-; AVX-NEXT: vunpckhps {{.*#+}} ymm8 = ymm2[2],ymm4[2],ymm2[3],ymm4[3],ymm2[6],ymm4[6],ymm2[7],ymm4[7]
-; AVX-NEXT: vextractf128 $1, %ymm8, %xmm8
-; AVX-NEXT: vunpckhpd {{.*#+}} ymm10 = ymm9[1],ymm6[1],ymm9[3],ymm6[3]
-; AVX-NEXT: vshufps {{.*#+}} ymm10 = ymm10[0,1,2,0,4,5,6,4]
-; AVX-NEXT: vextractf128 $1, %ymm10, %xmm10
-; AVX-NEXT: vblendps {{.*#+}} xmm8 = xmm8[0,1],xmm10[2,3]
-; AVX-NEXT: vblendps {{.*#+}} ymm7 = ymm8[0,1,2,3],ymm7[4,5,6,7]
-; AVX-NEXT: vmovups %ymm7, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT: vunpckhps {{.*#+}} ymm3 = ymm3[2],ymm5[2],ymm3[3],ymm5[3],ymm3[6],ymm5[6],ymm3[7],ymm5[7]
-; AVX-NEXT: vshufps {{.*#+}} ymm0 = ymm1[3,0],ymm0[3,0],ymm1[7,4],ymm0[7,4]
-; AVX-NEXT: vshufps {{.*#+}} ymm0 = ymm0[2,0],ymm3[2,3],ymm0[6,4],ymm3[6,7]
-; AVX-NEXT: vunpckhps {{.*#+}} ymm1 = ymm6[2],ymm9[2],ymm6[3],ymm9[3],ymm6[6],ymm9[6],ymm6[7],ymm9[7]
-; AVX-NEXT: vshufps {{.*#+}} ymm2 = ymm4[3,0],ymm2[3,0],ymm4[7,4],ymm2[7,4]
-; AVX-NEXT: vextractf128 $1, %ymm1, %xmm1
-; AVX-NEXT: vshufps {{.*#+}} ymm2 = ymm2[2,0,2,3,6,4,6,7]
-; AVX-NEXT: vextractf128 $1, %ymm2, %xmm2
-; AVX-NEXT: vblendps {{.*#+}} xmm1 = xmm2[0,1],xmm1[2,3]
-; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7]
-; AVX-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT: vmovaps 224(%r8), %ymm0
-; AVX-NEXT: vmovaps 224(%r9), %ymm1
-; AVX-NEXT: vmovaps 224(%r10), %ymm3
-; AVX-NEXT: vmovaps 224(%rax), %ymm5
-; AVX-NEXT: vunpcklpd {{.*#+}} ymm2 = ymm5[0],ymm3[0],ymm5[2],ymm3[2]
-; AVX-NEXT: vunpcklps {{.*#+}} ymm4 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[4],ymm1[4],ymm0[5],ymm1[5]
-; AVX-NEXT: vshufps {{.*#+}} ymm7 = ymm4[0,1],ymm2[2,0],ymm4[4,5],ymm2[6,4]
-; AVX-NEXT: vmovaps 224(%rdi), %ymm2
-; AVX-NEXT: vmovaps 224(%rsi), %ymm4
-; AVX-NEXT: vunpcklps {{.*#+}} ymm6 = ymm2[0],ymm4[0],ymm2[1],ymm4[1],ymm2[4],ymm4[4],ymm2[5],ymm4[5]
-; AVX-NEXT: vextractf128 $1, %ymm6, %xmm8
-; AVX-NEXT: vmovaps 224(%rdx), %ymm6
-; AVX-NEXT: vmovaps 224(%rcx), %ymm9
-; AVX-NEXT: vunpcklpd {{.*#+}} ymm10 = ymm9[0],ymm6[0],ymm9[2],ymm6[2]
-; AVX-NEXT: vshufps {{.*#+}} ymm10 = ymm10[0,1,2,0,4,5,6,4]
-; AVX-NEXT: vextractf128 $1, %ymm10, %xmm10
-; AVX-NEXT: vblendps {{.*#+}} xmm8 = xmm8[0,1],xmm10[2,3]
-; AVX-NEXT: vblendps {{.*#+}} ymm7 = ymm8[0,1,2,3],ymm7[4,5,6,7]
-; AVX-NEXT: vmovups %ymm7, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT: vunpcklps {{.*#+}} ymm7 = ymm3[0],ymm5[0],ymm3[1],ymm5[1],ymm3[4],ymm5[4],ymm3[5],ymm5[5]
-; AVX-NEXT: vshufps {{.*#+}} ymm8 = ymm1[1,0],ymm0[1,0],ymm1[5,4],ymm0[5,4]
-; AVX-NEXT: vshufps {{.*#+}} ymm7 = ymm8[2,0],ymm7[2,3],ymm8[6,4],ymm7[6,7]
-; AVX-NEXT: vunpcklps {{.*#+}} ymm8 = ymm6[0],ymm9[0],ymm6[1],ymm9[1],ymm6[4],ymm9[4],ymm6[5],ymm9[5]
-; AVX-NEXT: vextractf128 $1, %ymm8, %xmm8
-; AVX-NEXT: vshufps {{.*#+}} ymm10 = ymm4[1,0],ymm2[1,0],ymm4[5,4],ymm2[5,4]
-; AVX-NEXT: vshufps {{.*#+}} ymm10 = ymm10[2,0,2,3,6,4,6,7]
-; AVX-NEXT: vextractf128 $1, %ymm10, %xmm10
-; AVX-NEXT: vblendps {{.*#+}} xmm8 = xmm10[0,1],xmm8[2,3]
-; AVX-NEXT: vblendps {{.*#+}} ymm7 = ymm8[0,1,2,3],ymm7[4,5,6,7]
-; AVX-NEXT: vmovups %ymm7, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT: vunpckhpd {{.*#+}} ymm7 = ymm5[1],ymm3[1],ymm5[3],ymm3[3]
-; AVX-NEXT: vunpckhps {{.*#+}} ymm8 = ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[6],ymm1[6],ymm0[7],ymm1[7]
-; AVX-NEXT: vshufps {{.*#+}} ymm7 = ymm8[0,1],ymm7[2,0],ymm8[4,5],ymm7[6,4]
-; AVX-NEXT: vunpckhps {{.*#+}} ymm8 = ymm2[2],ymm4[2],ymm2[3],ymm4[3],ymm2[6],ymm4[6],ymm2[7],ymm4[7]
-; AVX-NEXT: vextractf128 $1, %ymm8, %xmm8
-; AVX-NEXT: vunpckhpd {{.*#+}} ymm10 = ymm9[1],ymm6[1],ymm9[3],ymm6[3]
-; AVX-NEXT: vshufps {{.*#+}} ymm10 = ymm10[0,1,2,0,4,5,6,4]
-; AVX-NEXT: vextractf128 $1, %ymm10, %xmm10
-; AVX-NEXT: vblendps {{.*#+}} xmm8 = xmm8[0,1],xmm10[2,3]
-; AVX-NEXT: vblendps {{.*#+}} ymm7 = ymm8[0,1,2,3],ymm7[4,5,6,7]
-; AVX-NEXT: vmovups %ymm7, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT: vunpckhps {{.*#+}} ymm3 = ymm3[2],ymm5[2],ymm3[3],ymm5[3],ymm3[6],ymm5[6],ymm3[7],ymm5[7]
-; AVX-NEXT: vshufps {{.*#+}} ymm0 = ymm1[3,0],ymm0[3,0],ymm1[7,4],ymm0[7,4]
-; AVX-NEXT: vshufps {{.*#+}} ymm0 = ymm0[2,0],ymm3[2,3],ymm0[6,4],ymm3[6,7]
-; AVX-NEXT: vunpckhps {{.*#+}} ymm1 = ymm6[2],ymm9[2],ymm6[3],ymm9[3],ymm6[6],ymm9[6],ymm6[7],ymm9[7]
-; AVX-NEXT: vshufps {{.*#+}} ymm2 = ymm4[3,0],ymm2[3,0],ymm4[7,4],ymm2[7,4]
-; AVX-NEXT: vextractf128 $1, %ymm1, %xmm1
-; AVX-NEXT: vshufps {{.*#+}} ymm2 = ymm2[2,0,2,3,6,4,6,7]
-; AVX-NEXT: vextractf128 $1, %ymm2, %xmm2
-; AVX-NEXT: vblendps {{.*#+}} xmm1 = xmm2[0,1],xmm1[2,3]
-; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7]
-; AVX-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT: vmovaps (%r9), %xmm2
-; AVX-NEXT: vmovaps (%r8), %xmm3
-; AVX-NEXT: vunpcklps {{.*#+}} xmm0 = xmm3[0],xmm2[0],xmm3[1],xmm2[1]
-; AVX-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm4
-; AVX-NEXT: vmovaps (%rax), %xmm0
-; AVX-NEXT: vmovaps (%r10), %xmm1
-; AVX-NEXT: vunpcklps {{.*#+}} xmm6 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
-; AVX-NEXT: vshufps {{.*#+}} xmm5 = xmm6[0,1,0,1]
-; AVX-NEXT: vinsertf128 $1, %xmm5, %ymm0, %ymm5
-; AVX-NEXT: vblendps {{.*#+}} ymm7 = ymm4[0,1,2,3,4,5],ymm5[6,7]
-; AVX-NEXT: vmovaps (%rcx), %xmm4
-; AVX-NEXT: vmovaps (%rdx), %xmm5
-; AVX-NEXT: vunpcklps {{.*#+}} xmm8 = xmm5[0],xmm4[0],xmm5[1],xmm4[1]
-; AVX-NEXT: vmovaps (%rsi), %xmm9
-; AVX-NEXT: vmovaps (%rdi), %xmm10
-; AVX-NEXT: vunpcklps {{.*#+}} xmm11 = xmm10[0],xmm9[0],xmm10[1],xmm9[1]
-; AVX-NEXT: vmovlhps {{.*#+}} xmm11 = xmm11[0],xmm8[0]
-; AVX-NEXT: vblendps {{.*#+}} ymm7 = ymm11[0,1,2,3],ymm7[4,5,6,7]
-; AVX-NEXT: vmovups %ymm7, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT: vshufps {{.*#+}} xmm7 = xmm10[1,1,1,1]
-; AVX-NEXT: vblendps {{.*#+}} xmm7 = xmm7[0],xmm9[1],xmm7[2,3]
-; AVX-NEXT: vblendps {{.*#+}} xmm7 = xmm7[0,1],xmm8[2,3]
-; AVX-NEXT: vinsertf128 $1, %xmm6, %ymm0, %ymm6
-; AVX-NEXT: vshufps {{.*#+}} xmm8 = xmm3[1,1,1,1]
-; AVX-NEXT: vblendps {{.*#+}} xmm8 = xmm8[0],xmm2[1],xmm8[2,3]
-; AVX-NEXT: vinsertf128 $1, %xmm8, %ymm0, %ymm8
-; AVX-NEXT: vblendps {{.*#+}} ymm6 = ymm8[0,1,2,3,4,5],ymm6[6,7]
-; AVX-NEXT: vblendps {{.*#+}} ymm6 = ymm7[0,1,2,3],ymm6[4,5,6,7]
-; AVX-NEXT: vmovups %ymm6, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT: vunpckhps {{.*#+}} xmm6 = xmm10[2],xmm9[2],xmm10[3],xmm9[3]
-; AVX-NEXT: vunpckhps {{.*#+}} xmm2 = xmm3[2],xmm2[2],xmm3[3],xmm2[3]
-; AVX-NEXT: vshufps {{.*#+}} xmm3 = xmm0[2,2,2,2]
-; AVX-NEXT: vblendps {{.*#+}} xmm3 = xmm1[0,1,2],xmm3[3]
-; AVX-NEXT: vinsertf128 $1, %xmm3, %ymm0, %ymm3
-; AVX-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm7
-; AVX-NEXT: vblendps {{.*#+}} ymm3 = ymm7[0,1,2,3,4,5],ymm3[6,7]
-; AVX-NEXT: vshufps {{.*#+}} xmm7 = xmm4[2,2,2,2]
-; AVX-NEXT: vblendps {{.*#+}} xmm7 = xmm5[0,1,2],xmm7[3]
-; AVX-NEXT: vblendps {{.*#+}} xmm7 = xmm6[0,1],xmm7[2,3]
-; AVX-NEXT: vblendps {{.*#+}} ymm3 = ymm7[0,1,2,3],ymm3[4,5,6,7]
-; AVX-NEXT: vmovups %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT: vunpckhps {{.*#+}} xmm3 = xmm5[2],xmm4[2],xmm5[3],xmm4[3]
-; AVX-NEXT: vunpckhpd {{.*#+}} xmm3 = xmm6[1],xmm3[1]
-; AVX-NEXT: vunpckhps {{.*#+}} xmm0 = xmm1[2],xmm0[2],xmm1[3],xmm0[3]
-; AVX-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
-; AVX-NEXT: vshufps {{.*#+}} xmm1 = xmm2[2,3,2,3]
-; AVX-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm1
-; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3,4,5],ymm0[6,7]
-; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm3[0,1,2,3],ymm0[4,5,6,7]
-; AVX-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT: vmovaps 32(%r9), %xmm4
-; AVX-NEXT: vmovaps 32(%r8), %xmm5
+; AVX-NEXT: vmovaps 64(%r9), %xmm4
+; AVX-NEXT: vmovaps 64(%r8), %xmm5
; AVX-NEXT: vunpcklps {{.*#+}} xmm0 = xmm5[0],xmm4[0],xmm5[1],xmm4[1]
; AVX-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm2
-; AVX-NEXT: vmovaps 32(%rax), %xmm0
-; AVX-NEXT: vmovaps 32(%r10), %xmm1
+; AVX-NEXT: vmovaps 64(%rax), %xmm0
+; AVX-NEXT: vmovaps 64(%r10), %xmm1
; AVX-NEXT: vunpcklps {{.*#+}} xmm6 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
; AVX-NEXT: vshufps {{.*#+}} xmm3 = xmm6[0,1,0,1]
; AVX-NEXT: vinsertf128 $1, %xmm3, %ymm0, %ymm3
; AVX-NEXT: vblendps {{.*#+}} ymm7 = ymm2[0,1,2,3,4,5],ymm3[6,7]
-; AVX-NEXT: vmovaps 32(%rcx), %xmm2
-; AVX-NEXT: vmovaps 32(%rdx), %xmm3
+; AVX-NEXT: vmovaps 64(%rcx), %xmm2
+; AVX-NEXT: vmovaps 64(%rdx), %xmm3
; AVX-NEXT: vunpcklps {{.*#+}} xmm8 = xmm3[0],xmm2[0],xmm3[1],xmm2[1]
-; AVX-NEXT: vmovaps 32(%rsi), %xmm9
-; AVX-NEXT: vmovaps 32(%rdi), %xmm10
+; AVX-NEXT: vmovaps 64(%rsi), %xmm9
+; AVX-NEXT: vmovaps 64(%rdi), %xmm10
; AVX-NEXT: vunpcklps {{.*#+}} xmm11 = xmm10[0],xmm9[0],xmm10[1],xmm9[1]
; AVX-NEXT: vmovlhps {{.*#+}} xmm11 = xmm11[0],xmm8[0]
; AVX-NEXT: vblendps {{.*#+}} ymm7 = ymm11[0,1,2,3],ymm7[4,5,6,7]
@@ -10245,21 +9968,69 @@ define void @store_i32_stride8_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3,4,5],ymm0[6,7]
; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm2[0,1,2,3],ymm0[4,5,6,7]
; AVX-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT: vmovaps 64(%r9), %xmm4
-; AVX-NEXT: vmovaps 64(%r8), %xmm5
+; AVX-NEXT: vmovaps 64(%r8), %ymm0
+; AVX-NEXT: vmovaps 64(%r9), %ymm1
+; AVX-NEXT: vmovaps 64(%r10), %ymm2
+; AVX-NEXT: vmovaps 64(%rax), %ymm3
+; AVX-NEXT: vunpcklpd {{.*#+}} ymm4 = ymm3[0],ymm2[0],ymm3[2],ymm2[2]
+; AVX-NEXT: vunpcklps {{.*#+}} ymm5 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[4],ymm1[4],ymm0[5],ymm1[5]
+; AVX-NEXT: vshufps {{.*#+}} ymm6 = ymm5[0,1],ymm4[2,0],ymm5[4,5],ymm4[6,4]
+; AVX-NEXT: vmovaps 64(%rdx), %ymm4
+; AVX-NEXT: vmovaps 64(%rcx), %ymm5
+; AVX-NEXT: vunpcklpd {{.*#+}} ymm7 = ymm5[0],ymm4[0],ymm5[2],ymm4[2]
+; AVX-NEXT: vextractf128 $1, %ymm7, %xmm7
+; AVX-NEXT: vmovaps 64(%rdi), %ymm8
+; AVX-NEXT: vmovaps 64(%rsi), %ymm9
+; AVX-NEXT: vunpcklps {{.*#+}} ymm10 = ymm8[0],ymm9[0],ymm8[1],ymm9[1],ymm8[4],ymm9[4],ymm8[5],ymm9[5]
+; AVX-NEXT: vextractf128 $1, %ymm10, %xmm10
+; AVX-NEXT: vshufps {{.*#+}} xmm7 = xmm10[0,1],xmm7[2,0]
+; AVX-NEXT: vblendps {{.*#+}} ymm6 = ymm7[0,1,2,3],ymm6[4,5,6,7]
+; AVX-NEXT: vmovups %ymm6, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX-NEXT: vunpcklps {{.*#+}} ymm6 = ymm2[0],ymm3[0],ymm2[1],ymm3[1],ymm2[4],ymm3[4],ymm2[5],ymm3[5]
+; AVX-NEXT: vshufps {{.*#+}} ymm7 = ymm1[1,0],ymm0[1,0],ymm1[5,4],ymm0[5,4]
+; AVX-NEXT: vshufps {{.*#+}} ymm6 = ymm7[2,0],ymm6[2,3],ymm7[6,4],ymm6[6,7]
+; AVX-NEXT: vunpcklps {{.*#+}} ymm7 = ymm4[0],ymm5[0],ymm4[1],ymm5[1],ymm4[4],ymm5[4],ymm4[5],ymm5[5]
+; AVX-NEXT: vextractf128 $1, %ymm7, %xmm7
+; AVX-NEXT: vshufps {{.*#+}} ymm10 = ymm9[1,0],ymm8[1,0],ymm9[5,4],ymm8[5,4]
+; AVX-NEXT: vextractf128 $1, %ymm10, %xmm10
+; AVX-NEXT: vshufps {{.*#+}} xmm7 = xmm10[2,0],xmm7[2,3]
+; AVX-NEXT: vblendps {{.*#+}} ymm6 = ymm7[0,1,2,3],ymm6[4,5,6,7]
+; AVX-NEXT: vmovups %ymm6, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX-NEXT: vunpckhpd {{.*#+}} ymm6 = ymm3[1],ymm2[1],ymm3[3],ymm2[3]
+; AVX-NEXT: vunpckhps {{.*#+}} ymm7 = ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[6],ymm1[6],ymm0[7],ymm1[7]
+; AVX-NEXT: vshufps {{.*#+}} ymm6 = ymm7[0,1],ymm6[2,0],ymm7[4,5],ymm6[6,4]
+; AVX-NEXT: vunpckhpd {{.*#+}} ymm7 = ymm5[1],ymm4[1],ymm5[3],ymm4[3]
+; AVX-NEXT: vextractf128 $1, %ymm7, %xmm7
+; AVX-NEXT: vunpckhps {{.*#+}} ymm10 = ymm8[2],ymm9[2],ymm8[3],ymm9[3],ymm8[6],ymm9[6],ymm8[7],ymm9[7]
+; AVX-NEXT: vextractf128 $1, %ymm10, %xmm10
+; AVX-NEXT: vshufps {{.*#+}} xmm7 = xmm10[0,1],xmm7[2,0]
+; AVX-NEXT: vblendps {{.*#+}} ymm6 = ymm7[0,1,2,3],ymm6[4,5,6,7]
+; AVX-NEXT: vmovups %ymm6, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX-NEXT: vunpckhps {{.*#+}} ymm2 = ymm2[2],ymm3[2],ymm2[3],ymm3[3],ymm2[6],ymm3[6],ymm2[7],ymm3[7]
+; AVX-NEXT: vshufps {{.*#+}} ymm0 = ymm1[3,0],ymm0[3,0],ymm1[7,4],ymm0[7,4]
+; AVX-NEXT: vshufps {{.*#+}} ymm0 = ymm0[2,0],ymm2[2,3],ymm0[6,4],ymm2[6,7]
+; AVX-NEXT: vunpckhps {{.*#+}} ymm1 = ymm4[2],ymm5[2],ymm4[3],ymm5[3],ymm4[6],ymm5[6],ymm4[7],ymm5[7]
+; AVX-NEXT: vshufps {{.*#+}} ymm2 = ymm9[3,0],ymm8[3,0],ymm9[7,4],ymm8[7,4]
+; AVX-NEXT: vextractf128 $1, %ymm1, %xmm1
+; AVX-NEXT: vextractf128 $1, %ymm2, %xmm2
+; AVX-NEXT: vshufps {{.*#+}} xmm1 = xmm2[2,0],xmm1[2,3]
+; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7]
+; AVX-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX-NEXT: vmovaps 96(%r9), %xmm4
+; AVX-NEXT: vmovaps 96(%r8), %xmm5
; AVX-NEXT: vunpcklps {{.*#+}} xmm0 = xmm5[0],xmm4[0],xmm5[1],xmm4[1]
; AVX-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm2
-; AVX-NEXT: vmovaps 64(%rax), %xmm0
-; AVX-NEXT: vmovaps 64(%r10), %xmm1
+; AVX-NEXT: vmovaps 96(%rax), %xmm0
+; AVX-NEXT: vmovaps 96(%r10), %xmm1
; AVX-NEXT: vunpcklps {{.*#+}} xmm6 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
; AVX-NEXT: vshufps {{.*#+}} xmm3 = xmm6[0,1,0,1]
; AVX-NEXT: vinsertf128 $1, %xmm3, %ymm0, %ymm3
; AVX-NEXT: vblendps {{.*#+}} ymm7 = ymm2[0,1,2,3,4,5],ymm3[6,7]
-; AVX-NEXT: vmovaps 64(%rcx), %xmm2
-; AVX-NEXT: vmovaps 64(%rdx), %xmm3
+; AVX-NEXT: vmovaps 96(%rcx), %xmm2
+; AVX-NEXT: vmovaps 96(%rdx), %xmm3
; AVX-NEXT: vunpcklps {{.*#+}} xmm8 = xmm3[0],xmm2[0],xmm3[1],xmm2[1]
-; AVX-NEXT: vmovaps 64(%rsi), %xmm9
-; AVX-NEXT: vmovaps 64(%rdi), %xmm10
+; AVX-NEXT: vmovaps 96(%rsi), %xmm9
+; AVX-NEXT: vmovaps 96(%rdi), %xmm10
; AVX-NEXT: vunpcklps {{.*#+}} xmm11 = xmm10[0],xmm9[0],xmm10[1],xmm9[1]
; AVX-NEXT: vmovlhps {{.*#+}} xmm11 = xmm11[0],xmm8[0]
; AVX-NEXT: vblendps {{.*#+}} ymm7 = ymm11[0,1,2,3],ymm7[4,5,6,7]
@@ -10295,21 +10066,69 @@ define void @store_i32_stride8_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3,4,5],ymm0[6,7]
; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm2[0,1,2,3],ymm0[4,5,6,7]
; AVX-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT: vmovaps 96(%r9), %xmm4
-; AVX-NEXT: vmovaps 96(%r8), %xmm5
+; AVX-NEXT: vmovaps 96(%r8), %ymm0
+; AVX-NEXT: vmovaps 96(%r9), %ymm1
+; AVX-NEXT: vmovaps 96(%r10), %ymm2
+; AVX-NEXT: vmovaps 96(%rax), %ymm3
+; AVX-NEXT: vunpcklpd {{.*#+}} ymm4 = ymm3[0],ymm2[0],ymm3[2],ymm2[2]
+; AVX-NEXT: vunpcklps {{.*#+}} ymm5 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[4],ymm1[4],ymm0[5],ymm1[5]
+; AVX-NEXT: vshufps {{.*#+}} ymm6 = ymm5[0,1],ymm4[2,0],ymm5[4,5],ymm4[6,4]
+; AVX-NEXT: vmovaps 96(%rdx), %ymm4
+; AVX-NEXT: vmovaps 96(%rcx), %ymm5
+; AVX-NEXT: vunpcklpd {{.*#+}} ymm7 = ymm5[0],ymm4[0],ymm5[2],ymm4[2]
+; AVX-NEXT: vextractf128 $1, %ymm7, %xmm7
+; AVX-NEXT: vmovaps 96(%rdi), %ymm8
+; AVX-NEXT: vmovaps 96(%rsi), %ymm9
+; AVX-NEXT: vunpcklps {{.*#+}} ymm10 = ymm8[0],ymm9[0],ymm8[1],ymm9[1],ymm8[4],ymm9[4],ymm8[5],ymm9[5]
+; AVX-NEXT: vextractf128 $1, %ymm10, %xmm10
+; AVX-NEXT: vshufps {{.*#+}} xmm7 = xmm10[0,1],xmm7[2,0]
+; AVX-NEXT: vblendps {{.*#+}} ymm6 = ymm7[0,1,2,3],ymm6[4,5,6,7]
+; AVX-NEXT: vmovups %ymm6, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX-NEXT: vunpcklps {{.*#+}} ymm6 = ymm2[0],ymm3[0],ymm2[1],ymm3[1],ymm2[4],ymm3[4],ymm2[5],ymm3[5]
+; AVX-NEXT: vshufps {{.*#+}} ymm7 = ymm1[1,0],ymm0[1,0],ymm1[5,4],ymm0[5,4]
+; AVX-NEXT: vshufps {{.*#+}} ymm6 = ymm7[2,0],ymm6[2,3],ymm7[6,4],ymm6[6,7]
+; AVX-NEXT: vunpcklps {{.*#+}} ymm7 = ymm4[0],ymm5[0],ymm4[1],ymm5[1],ymm4[4],ymm5[4],ymm4[5],ymm5[5]
+; AVX-NEXT: vextractf128 $1, %ymm7, %xmm7
+; AVX-NEXT: vshufps {{.*#+}} ymm10 = ymm9[1,0],ymm8[1,0],ymm9[5,4],ymm8[5,4]
+; AVX-NEXT: vextractf128 $1, %ymm10, %xmm10
+; AVX-NEXT: vshufps {{.*#+}} xmm7 = xmm10[2,0],xmm7[2,3]
+; AVX-NEXT: vblendps {{.*#+}} ymm6 = ymm7[0,1,2,3],ymm6[4,5,6,7]
+; AVX-NEXT: vmovups %ymm6, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX-NEXT: vunpckhpd {{.*#+}} ymm6 = ymm3[1],ymm2[1],ymm3[3],ymm2[3]
+; AVX-NEXT: vunpckhps {{.*#+}} ymm7 = ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[6],ymm1[6],ymm0[7],ymm1[7]
+; AVX-NEXT: vshufps {{.*#+}} ymm6 = ymm7[0,1],ymm6[2,0],ymm7[4,5],ymm6[6,4]
+; AVX-NEXT: vunpckhpd {{.*#+}} ymm7 = ymm5[1],ymm4[1],ymm5[3],ymm4[3]
+; AVX-NEXT: vextractf128 $1, %ymm7, %xmm7
+; AVX-NEXT: vunpckhps {{.*#+}} ymm10 = ymm8[2],ymm9[2],ymm8[3],ymm9[3],ymm8[6],ymm9[6],ymm8[7],ymm9[7]
+; AVX-NEXT: vextractf128 $1, %ymm10, %xmm10
+; AVX-NEXT: vshufps {{.*#+}} xmm7 = xmm10[0,1],xmm7[2,0]
+; AVX-NEXT: vblendps {{.*#+}} ymm6 = ymm7[0,1,2,3],ymm6[4,5,6,7]
+; AVX-NEXT: vmovups %ymm6, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX-NEXT: vunpckhps {{.*#+}} ymm2 = ymm2[2],ymm3[2],ymm2[3],ymm3[3],ymm2[6],ymm3[6],ymm2[7],ymm3[7]
+; AVX-NEXT: vshufps {{.*#+}} ymm0 = ymm1[3,0],ymm0[3,0],ymm1[7,4],ymm0[7,4]
+; AVX-NEXT: vshufps {{.*#+}} ymm0 = ymm0[2,0],ymm2[2,3],ymm0[6,4],ymm2[6,7]
+; AVX-NEXT: vunpckhps {{.*#+}} ymm1 = ymm4[2],ymm5[2],ymm4[3],ymm5[3],ymm4[6],ymm5[6],ymm4[7],ymm5[7]
+; AVX-NEXT: vshufps {{.*#+}} ymm2 = ymm9[3,0],ymm8[3,0],ymm9[7,4],ymm8[7,4]
+; AVX-NEXT: vextractf128 $1, %ymm1, %xmm1
+; AVX-NEXT: vextractf128 $1, %ymm2, %xmm2
+; AVX-NEXT: vshufps {{.*#+}} xmm1 = xmm2[2,0],xmm1[2,3]
+; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7]
+; AVX-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX-NEXT: vmovaps 128(%r9), %xmm4
+; AVX-NEXT: vmovaps 128(%r8), %xmm5
; AVX-NEXT: vunpcklps {{.*#+}} xmm0 = xmm5[0],xmm4[0],xmm5[1],xmm4[1]
; AVX-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm2
-; AVX-NEXT: vmovaps 96(%rax), %xmm0
-; AVX-NEXT: vmovaps 96(%r10), %xmm1
+; AVX-NEXT: vmovaps 128(%rax), %xmm0
+; AVX-NEXT: vmovaps 128(%r10), %xmm1
; AVX-NEXT: vunpcklps {{.*#+}} xmm6 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
; AVX-NEXT: vshufps {{.*#+}} xmm3 = xmm6[0,1,0,1]
; AVX-NEXT: vinsertf128 $1, %xmm3, %ymm0, %ymm3
; AVX-NEXT: vblendps {{.*#+}} ymm7 = ymm2[0,1,2,3,4,5],ymm3[6,7]
-; AVX-NEXT: vmovaps 96(%rcx), %xmm2
-; AVX-NEXT: vmovaps 96(%rdx), %xmm3
+; AVX-NEXT: vmovaps 128(%rcx), %xmm2
+; AVX-NEXT: vmovaps 128(%rdx), %xmm3
; AVX-NEXT: vunpcklps {{.*#+}} xmm8 = xmm3[0],xmm2[0],xmm3[1],xmm2[1]
-; AVX-NEXT: vmovaps 96(%rsi), %xmm9
-; AVX-NEXT: vmovaps 96(%rdi), %xmm10
+; AVX-NEXT: vmovaps 128(%rsi), %xmm9
+; AVX-NEXT: vmovaps 128(%rdi), %xmm10
; AVX-NEXT: vunpcklps {{.*#+}} xmm11 = xmm10[0],xmm9[0],xmm10[1],xmm9[1]
; AVX-NEXT: vmovlhps {{.*#+}} xmm11 = xmm11[0],xmm8[0]
; AVX-NEXT: vblendps {{.*#+}} ymm7 = ymm11[0,1,2,3],ymm7[4,5,6,7]
@@ -10345,21 +10164,69 @@ define void @store_i32_stride8_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3,4,5],ymm0[6,7]
; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm2[0,1,2,3],ymm0[4,5,6,7]
; AVX-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT: vmovaps 128(%r9), %xmm4
-; AVX-NEXT: vmovaps 128(%r8), %xmm5
+; AVX-NEXT: vmovaps 128(%r8), %ymm0
+; AVX-NEXT: vmovaps 128(%r9), %ymm1
+; AVX-NEXT: vmovaps 128(%r10), %ymm2
+; AVX-NEXT: vmovaps 128(%rax), %ymm3
+; AVX-NEXT: vunpcklpd {{.*#+}} ymm4 = ymm3[0],ymm2[0],ymm3[2],ymm2[2]
+; AVX-NEXT: vunpcklps {{.*#+}} ymm5 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[4],ymm1[4],ymm0[5],ymm1[5]
+; AVX-NEXT: vshufps {{.*#+}} ymm6 = ymm5[0,1],ymm4[2,0],ymm5[4,5],ymm4[6,4]
+; AVX-NEXT: vmovaps 128(%rdx), %ymm4
+; AVX-NEXT: vmovaps 128(%rcx), %ymm5
+; AVX-NEXT: vunpcklpd {{.*#+}} ymm7 = ymm5[0],ymm4[0],ymm5[2],ymm4[2]
+; AVX-NEXT: vextractf128 $1, %ymm7, %xmm7
+; AVX-NEXT: vmovaps 128(%rdi), %ymm8
+; AVX-NEXT: vmovaps 128(%rsi), %ymm9
+; AVX-NEXT: vunpcklps {{.*#+}} ymm10 = ymm8[0],ymm9[0],ymm8[1],ymm9[1],ymm8[4],ymm9[4],ymm8[5],ymm9[5]
+; AVX-NEXT: vextractf128 $1, %ymm10, %xmm10
+; AVX-NEXT: vshufps {{.*#+}} xmm7 = xmm10[0,1],xmm7[2,0]
+; AVX-NEXT: vblendps {{.*#+}} ymm6 = ymm7[0,1,2,3],ymm6[4,5,6,7]
+; AVX-NEXT: vmovups %ymm6, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX-NEXT: vunpcklps {{.*#+}} ymm6 = ymm2[0],ymm3[0],ymm2[1],ymm3[1],ymm2[4],ymm3[4],ymm2[5],ymm3[5]
+; AVX-NEXT: vshufps {{.*#+}} ymm7 = ymm1[1,0],ymm0[1,0],ymm1[5,4],ymm0[5,4]
+; AVX-NEXT: vshufps {{.*#+}} ymm6 = ymm7[2,0],ymm6[2,3],ymm7[6,4],ymm6[6,7]
+; AVX-NEXT: vunpcklps {{.*#+}} ymm7 = ymm4[0],ymm5[0],ymm4[1],ymm5[1],ymm4[4],ymm5[4],ymm4[5],ymm5[5]
+; AVX-NEXT: vextractf128 $1, %ymm7, %xmm7
+; AVX-NEXT: vshufps {{.*#+}} ymm10 = ymm9[1,0],ymm8[1,0],ymm9[5,4],ymm8[5,4]
+; AVX-NEXT: vextractf128 $1, %ymm10, %xmm10
+; AVX-NEXT: vshufps {{.*#+}} xmm7 = xmm10[2,0],xmm7[2,3]
+; AVX-NEXT: vblendps {{.*#+}} ymm6 = ymm7[0,1,2,3],ymm6[4,5,6,7]
+; AVX-NEXT: vmovups %ymm6, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX-NEXT: vunpckhpd {{.*#+}} ymm6 = ymm3[1],ymm2[1],ymm3[3],ymm2[3]
+; AVX-NEXT: vunpckhps {{.*#+}} ymm7 = ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[6],ymm1[6],ymm0[7],ymm1[7]
+; AVX-NEXT: vshufps {{.*#+}} ymm6 = ymm7[0,1],ymm6[2,0],ymm7[4,5],ymm6[6,4]
+; AVX-NEXT: vunpckhpd {{.*#+}} ymm7 = ymm5[1],ymm4[1],ymm5[3],ymm4[3]
+; AVX-NEXT: vextractf128 $1, %ymm7, %xmm7
+; AVX-NEXT: vunpckhps {{.*#+}} ymm10 = ymm8[2],ymm9[2],ymm8[3],ymm9[3],ymm8[6],ymm9[6],ymm8[7],ymm9[7]
+; AVX-NEXT: vextractf128 $1, %ymm10, %xmm10
+; AVX-NEXT: vshufps {{.*#+}} xmm7 = xmm10[0,1],xmm7[2,0]
+; AVX-NEXT: vblendps {{.*#+}} ymm6 = ymm7[0,1,2,3],ymm6[4,5,6,7]
+; AVX-NEXT: vmovups %ymm6, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX-NEXT: vunpckhps {{.*#+}} ymm2 = ymm2[2],ymm3[2],ymm2[3],ymm3[3],ymm2[6],ymm3[6],ymm2[7],ymm3[7]
+; AVX-NEXT: vshufps {{.*#+}} ymm0 = ymm1[3,0],ymm0[3,0],ymm1[7,4],ymm0[7,4]
+; AVX-NEXT: vshufps {{.*#+}} ymm0 = ymm0[2,0],ymm2[2,3],ymm0[6,4],ymm2[6,7]
+; AVX-NEXT: vunpckhps {{.*#+}} ymm1 = ymm4[2],ymm5[2],ymm4[3],ymm5[3],ymm4[6],ymm5[6],ymm4[7],ymm5[7]
+; AVX-NEXT: vshufps {{.*#+}} ymm2 = ymm9[3,0],ymm8[3,0],ymm9[7,4],ymm8[7,4]
+; AVX-NEXT: vextractf128 $1, %ymm1, %xmm1
+; AVX-NEXT: vextractf128 $1, %ymm2, %xmm2
+; AVX-NEXT: vshufps {{.*#+}} xmm1 = xmm2[2,0],xmm1[2,3]
+; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7]
+; AVX-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX-NEXT: vmovaps 160(%r9), %xmm4
+; AVX-NEXT: vmovaps 160(%r8), %xmm5
; AVX-NEXT: vunpcklps {{.*#+}} xmm0 = xmm5[0],xmm4[0],xmm5[1],xmm4[1]
; AVX-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm2
-; AVX-NEXT: vmovaps 128(%rax), %xmm0
-; AVX-NEXT: vmovaps 128(%r10), %xmm1
+; AVX-NEXT: vmovaps 160(%rax), %xmm0
+; AVX-NEXT: vmovaps 160(%r10), %xmm1
; AVX-NEXT: vunpcklps {{.*#+}} xmm6 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
; AVX-NEXT: vshufps {{.*#+}} xmm3 = xmm6[0,1,0,1]
; AVX-NEXT: vinsertf128 $1, %xmm3, %ymm0, %ymm3
; AVX-NEXT: vblendps {{.*#+}} ymm7 = ymm2[0,1,2,3,4,5],ymm3[6,7]
-; AVX-NEXT: vmovaps 128(%rcx), %xmm2
-; AVX-NEXT: vmovaps 128(%rdx), %xmm3
+; AVX-NEXT: vmovaps 160(%rcx), %xmm2
+; AVX-NEXT: vmovaps 160(%rdx), %xmm3
; AVX-NEXT: vunpcklps {{.*#+}} xmm8 = xmm3[0],xmm2[0],xmm3[1],xmm2[1]
-; AVX-NEXT: vmovaps 128(%rsi), %xmm9
-; AVX-NEXT: vmovaps 128(%rdi), %xmm10
+; AVX-NEXT: vmovaps 160(%rsi), %xmm9
+; AVX-NEXT: vmovaps 160(%rdi), %xmm10
; AVX-NEXT: vunpcklps {{.*#+}} xmm11 = xmm10[0],xmm9[0],xmm10[1],xmm9[1]
; AVX-NEXT: vmovlhps {{.*#+}} xmm11 = xmm11[0],xmm8[0]
; AVX-NEXT: vblendps {{.*#+}} ymm7 = ymm11[0,1,2,3],ymm7[4,5,6,7]
@@ -10394,22 +10261,70 @@ define void @store_i32_stride8_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
; AVX-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm1
; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3,4,5],ymm0[6,7]
; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm2[0,1,2,3],ymm0[4,5,6,7]
-; AVX-NEXT: vmovups %ymm0, (%rsp) # 32-byte Spill
-; AVX-NEXT: vmovaps 160(%r9), %xmm4
-; AVX-NEXT: vmovaps 160(%r8), %xmm5
+; AVX-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX-NEXT: vmovaps 160(%r8), %ymm0
+; AVX-NEXT: vmovaps 160(%r9), %ymm1
+; AVX-NEXT: vmovaps 160(%r10), %ymm2
+; AVX-NEXT: vmovaps 160(%rax), %ymm3
+; AVX-NEXT: vunpcklpd {{.*#+}} ymm4 = ymm3[0],ymm2[0],ymm3[2],ymm2[2]
+; AVX-NEXT: vunpcklps {{.*#+}} ymm5 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[4],ymm1[4],ymm0[5],ymm1[5]
+; AVX-NEXT: vshufps {{.*#+}} ymm6 = ymm5[0,1],ymm4[2,0],ymm5[4,5],ymm4[6,4]
+; AVX-NEXT: vmovaps 160(%rdx), %ymm4
+; AVX-NEXT: vmovaps 160(%rcx), %ymm5
+; AVX-NEXT: vunpcklpd {{.*#+}} ymm7 = ymm5[0],ymm4[0],ymm5[2],ymm4[2]
+; AVX-NEXT: vextractf128 $1, %ymm7, %xmm7
+; AVX-NEXT: vmovaps 160(%rdi), %ymm8
+; AVX-NEXT: vmovaps 160(%rsi), %ymm9
+; AVX-NEXT: vunpcklps {{.*#+}} ymm10 = ymm8[0],ymm9[0],ymm8[1],ymm9[1],ymm8[4],ymm9[4],ymm8[5],ymm9[5]
+; AVX-NEXT: vextractf128 $1, %ymm10, %xmm10
+; AVX-NEXT: vshufps {{.*#+}} xmm7 = xmm10[0,1],xmm7[2,0]
+; AVX-NEXT: vblendps {{.*#+}} ymm6 = ymm7[0,1,2,3],ymm6[4,5,6,7]
+; AVX-NEXT: vmovups %ymm6, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX-NEXT: vunpcklps {{.*#+}} ymm6 = ymm2[0],ymm3[0],ymm2[1],ymm3[1],ymm2[4],ymm3[4],ymm2[5],ymm3[5]
+; AVX-NEXT: vshufps {{.*#+}} ymm7 = ymm1[1,0],ymm0[1,0],ymm1[5,4],ymm0[5,4]
+; AVX-NEXT: vshufps {{.*#+}} ymm6 = ymm7[2,0],ymm6[2,3],ymm7[6,4],ymm6[6,7]
+; AVX-NEXT: vunpcklps {{.*#+}} ymm7 = ymm4[0],ymm5[0],ymm4[1],ymm5[1],ymm4[4],ymm5[4],ymm4[5],ymm5[5]
+; AVX-NEXT: vextractf128 $1, %ymm7, %xmm7
+; AVX-NEXT: vshufps {{.*#+}} ymm10 = ymm9[1,0],ymm8[1,0],ymm9[5,4],ymm8[5,4]
+; AVX-NEXT: vextractf128 $1, %ymm10, %xmm10
+; AVX-NEXT: vshufps {{.*#+}} xmm7 = xmm10[2,0],xmm7[2,3]
+; AVX-NEXT: vblendps {{.*#+}} ymm6 = ymm7[0,1,2,3],ymm6[4,5,6,7]
+; AVX-NEXT: vmovups %ymm6, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX-NEXT: vunpckhpd {{.*#+}} ymm6 = ymm3[1],ymm2[1],ymm3[3],ymm2[3]
+; AVX-NEXT: vunpckhps {{.*#+}} ymm7 = ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[6],ymm1[6],ymm0[7],ymm1[7]
+; AVX-NEXT: vshufps {{.*#+}} ymm6 = ymm7[0,1],ymm6[2,0],ymm7[4,5],ymm6[6,4]
+; AVX-NEXT: vunpckhpd {{.*#+}} ymm7 = ymm5[1],ymm4[1],ymm5[3],ymm4[3]
+; AVX-NEXT: vextractf128 $1, %ymm7, %xmm7
+; AVX-NEXT: vunpckhps {{.*#+}} ymm10 = ymm8[2],ymm9[2],ymm8[3],ymm9[3],ymm8[6],ymm9[6],ymm8[7],ymm9[7]
+; AVX-NEXT: vextractf128 $1, %ymm10, %xmm10
+; AVX-NEXT: vshufps {{.*#+}} xmm7 = xmm10[0,1],xmm7[2,0]
+; AVX-NEXT: vblendps {{.*#+}} ymm6 = ymm7[0,1,2,3],ymm6[4,5,6,7]
+; AVX-NEXT: vmovups %ymm6, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX-NEXT: vunpckhps {{.*#+}} ymm2 = ymm2[2],ymm3[2],ymm2[3],ymm3[3],ymm2[6],ymm3[6],ymm2[7],ymm3[7]
+; AVX-NEXT: vshufps {{.*#+}} ymm0 = ymm1[3,0],ymm0[3,0],ymm1[7,4],ymm0[7,4]
+; AVX-NEXT: vshufps {{.*#+}} ymm0 = ymm0[2,0],ymm2[2,3],ymm0[6,4],ymm2[6,7]
+; AVX-NEXT: vunpckhps {{.*#+}} ymm1 = ymm4[2],ymm5[2],ymm4[3],ymm5[3],ymm4[6],ymm5[6],ymm4[7],ymm5[7]
+; AVX-NEXT: vshufps {{.*#+}} ymm2 = ymm9[3,0],ymm8[3,0],ymm9[7,4],ymm8[7,4]
+; AVX-NEXT: vextractf128 $1, %ymm1, %xmm1
+; AVX-NEXT: vextractf128 $1, %ymm2, %xmm2
+; AVX-NEXT: vshufps {{.*#+}} xmm1 = xmm2[2,0],xmm1[2,3]
+; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7]
+; AVX-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX-NEXT: vmovaps 192(%r9), %xmm4
+; AVX-NEXT: vmovaps 192(%r8), %xmm5
; AVX-NEXT: vunpcklps {{.*#+}} xmm0 = xmm5[0],xmm4[0],xmm5[1],xmm4[1]
; AVX-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm2
-; AVX-NEXT: vmovaps 160(%rax), %xmm0
-; AVX-NEXT: vmovaps 160(%r10), %xmm1
+; AVX-NEXT: vmovaps 192(%rax), %xmm0
+; AVX-NEXT: vmovaps 192(%r10), %xmm1
; AVX-NEXT: vunpcklps {{.*#+}} xmm6 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
; AVX-NEXT: vshufps {{.*#+}} xmm3 = xmm6[0,1,0,1]
; AVX-NEXT: vinsertf128 $1, %xmm3, %ymm0, %ymm3
; AVX-NEXT: vblendps {{.*#+}} ymm7 = ymm2[0,1,2,3,4,5],ymm3[6,7]
-; AVX-NEXT: vmovaps 160(%rcx), %xmm2
-; AVX-NEXT: vmovaps 160(%rdx), %xmm3
+; AVX-NEXT: vmovaps 192(%rcx), %xmm2
+; AVX-NEXT: vmovaps 192(%rdx), %xmm3
; AVX-NEXT: vunpcklps {{.*#+}} xmm8 = xmm3[0],xmm2[0],xmm3[1],xmm2[1]
-; AVX-NEXT: vmovaps 160(%rsi), %xmm9
-; AVX-NEXT: vmovaps 160(%rdi), %xmm10
+; AVX-NEXT: vmovaps 192(%rsi), %xmm9
+; AVX-NEXT: vmovaps 192(%rdi), %xmm10
; AVX-NEXT: vunpcklps {{.*#+}} xmm11 = xmm10[0],xmm9[0],xmm10[1],xmm9[1]
; AVX-NEXT: vmovlhps {{.*#+}} xmm11 = xmm11[0],xmm8[0]
; AVX-NEXT: vblendps {{.*#+}} ymm7 = ymm11[0,1,2,3],ymm7[4,5,6,7]
@@ -10445,32 +10360,81 @@ define void @store_i32_stride8_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3,4,5],ymm0[6,7]
; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm2[0,1,2,3],ymm0[4,5,6,7]
; AVX-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT: vmovaps 192(%r9), %xmm4
-; AVX-NEXT: vmovaps 192(%r8), %xmm5
+; AVX-NEXT: vmovaps 192(%r8), %ymm0
+; AVX-NEXT: vmovaps 192(%r9), %ymm1
+; AVX-NEXT: vmovaps 192(%r10), %ymm2
+; AVX-NEXT: vmovaps 192(%rax), %ymm3
+; AVX-NEXT: vunpcklpd {{.*#+}} ymm4 = ymm3[0],ymm2[0],ymm3[2],ymm2[2]
+; AVX-NEXT: vunpcklps {{.*#+}} ymm5 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[4],ymm1[4],ymm0[5],ymm1[5]
+; AVX-NEXT: vshufps {{.*#+}} ymm6 = ymm5[0,1],ymm4[2,0],ymm5[4,5],ymm4[6,4]
+; AVX-NEXT: vmovaps 192(%rdx), %ymm4
+; AVX-NEXT: vmovaps 192(%rcx), %ymm5
+; AVX-NEXT: vunpcklpd {{.*#+}} ymm7 = ymm5[0],ymm4[0],ymm5[2],ymm4[2]
+; AVX-NEXT: vextractf128 $1, %ymm7, %xmm7
+; AVX-NEXT: vmovaps 192(%rdi), %ymm8
+; AVX-NEXT: vmovaps 192(%rsi), %ymm9
+; AVX-NEXT: vunpcklps {{.*#+}} ymm10 = ymm8[0],ymm9[0],ymm8[1],ymm9[1],ymm8[4],ymm9[4],ymm8[5],ymm9[5]
+; AVX-NEXT: vextractf128 $1, %ymm10, %xmm10
+; AVX-NEXT: vshufps {{.*#+}} xmm7 = xmm10[0,1],xmm7[2,0]
+; AVX-NEXT: vblendps {{.*#+}} ymm6 = ymm7[0,1,2,3],ymm6[4,5,6,7]
+; AVX-NEXT: vmovups %ymm6, (%rsp) # 32-byte Spill
+; AVX-NEXT: vunpcklps {{.*#+}} ymm6 = ymm2[0],ymm3[0],ymm2[1],ymm3[1],ymm2[4],ymm3[4],ymm2[5],ymm3[5]
+; AVX-NEXT: vshufps {{.*#+}} ymm7 = ymm1[1,0],ymm0[1,0],ymm1[5,4],ymm0[5,4]
+; AVX-NEXT: vshufps {{.*#+}} ymm6 = ymm7[2,0],ymm6[2,3],ymm7[6,4],ymm6[6,7]
+; AVX-NEXT: vunpcklps {{.*#+}} ymm7 = ymm4[0],ymm5[0],ymm4[1],ymm5[1],ymm4[4],ymm5[4],ymm4[5],ymm5[5]
+; AVX-NEXT: vextractf128 $1, %ymm7, %xmm7
+; AVX-NEXT: vshufps {{.*#+}} ymm10 = ymm9[1,0],ymm8[1,0],ymm9[5,4],ymm8[5,4]
+; AVX-NEXT: vextractf128 $1, %ymm10, %xmm10
+; AVX-NEXT: vshufps {{.*#+}} xmm7 = xmm10[2,0],xmm7[2,3]
+; AVX-NEXT: vblendps {{.*#+}} ymm6 = ymm7[0,1,2,3],ymm6[4,5,6,7]
+; AVX-NEXT: vmovups %ymm6, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX-NEXT: vunpckhpd {{.*#+}} ymm6 = ymm3[1],ymm2[1],ymm3[3],ymm2[3]
+; AVX-NEXT: vunpckhps {{.*#+}} ymm7 = ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[6],ymm1[6],ymm0[7],ymm1[7]
+; AVX-NEXT: vshufps {{.*#+}} ymm6 = ymm7[0,1],ymm6[2,0],ymm7[4,5],ymm6[6,4]
+; AVX-NEXT: vunpckhpd {{.*#+}} ymm7 = ymm5[1],ymm4[1],ymm5[3],ymm4[3]
+; AVX-NEXT: vextractf128 $1, %ymm7, %xmm7
+; AVX-NEXT: vunpckhps {{.*#+}} ymm10 = ymm8[2],ymm9[2],ymm8[3],ymm9[3],ymm8[6],ymm9[6],ymm8[7],ymm9[7]
+; AVX-NEXT: vextractf128 $1, %ymm10, %xmm10
+; AVX-NEXT: vshufps {{.*#+}} xmm7 = xmm10[0,1],xmm7[2,0]
+; AVX-NEXT: vblendps {{.*#+}} ymm6 = ymm7[0,1,2,3],ymm6[4,5,6,7]
+; AVX-NEXT: vmovups %ymm6, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX-NEXT: vunpckhps {{.*#+}} ymm2 = ymm2[2],ymm3[2],ymm2[3],ymm3[3],ymm2[6],ymm3[6],ymm2[7],ymm3[7]
+; AVX-NEXT: vshufps {{.*#+}} ymm0 = ymm1[3,0],ymm0[3,0],ymm1[7,4],ymm0[7,4]
+; AVX-NEXT: vshufps {{.*#+}} ymm0 = ymm0[2,0],ymm2[2,3],ymm0[6,4],ymm2[6,7]
+; AVX-NEXT: vunpckhps {{.*#+}} ymm1 = ymm4[2],ymm5[2],ymm4[3],ymm5[3],ymm4[6],ymm5[6],ymm4[7],ymm5[7]
+; AVX-NEXT: vshufps {{.*#+}} ymm2 = ymm9[3,0],ymm8[3,0],ymm9[7,4],ymm8[7,4]
+; AVX-NEXT: vextractf128 $1, %ymm1, %xmm1
+; AVX-NEXT: vextractf128 $1, %ymm2, %xmm2
+; AVX-NEXT: vshufps {{.*#+}} xmm1 = xmm2[2,0],xmm1[2,3]
+; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7]
+; AVX-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX-NEXT: vmovaps 224(%r9), %xmm4
+; AVX-NEXT: vmovaps 224(%r8), %xmm5
; AVX-NEXT: vunpcklps {{.*#+}} xmm0 = xmm5[0],xmm4[0],xmm5[1],xmm4[1]
; AVX-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm2
-; AVX-NEXT: vmovaps 192(%rax), %xmm9
-; AVX-NEXT: vmovaps 192(%r10), %xmm1
+; AVX-NEXT: vmovaps 224(%rax), %xmm9
+; AVX-NEXT: vmovaps 224(%r10), %xmm1
; AVX-NEXT: vunpcklps {{.*#+}} xmm6 = xmm1[0],xmm9[0],xmm1[1],xmm9[1]
; AVX-NEXT: vshufps {{.*#+}} xmm3 = xmm6[0,1,0,1]
; AVX-NEXT: vinsertf128 $1, %xmm3, %ymm0, %ymm3
; AVX-NEXT: vblendps {{.*#+}} ymm10 = ymm2[0,1,2,3,4,5],ymm3[6,7]
-; AVX-NEXT: vmovaps 192(%rcx), %xmm2
-; AVX-NEXT: vmovaps 192(%rdx), %xmm3
+; AVX-NEXT: vmovaps 224(%rcx), %xmm2
+; AVX-NEXT: vmovaps 224(%rdx), %xmm3
; AVX-NEXT: vunpcklps {{.*#+}} xmm8 = xmm3[0],xmm2[0],xmm3[1],xmm2[1]
-; AVX-NEXT: vmovaps 192(%rsi), %xmm0
-; AVX-NEXT: vmovaps 192(%rdi), %xmm7
-; AVX-NEXT: vunpcklps {{.*#+}} xmm15 = xmm7[0],xmm0[0],xmm7[1],xmm0[1]
-; AVX-NEXT: vmovlhps {{.*#+}} xmm15 = xmm15[0],xmm8[0]
-; AVX-NEXT: vblendps {{.*#+}} ymm13 = ymm15[0,1,2,3],ymm10[4,5,6,7]
-; AVX-NEXT: vshufps {{.*#+}} xmm15 = xmm7[1,1,1,1]
-; AVX-NEXT: vblendps {{.*#+}} xmm15 = xmm15[0],xmm0[1],xmm15[2,3]
-; AVX-NEXT: vblendps {{.*#+}} xmm8 = xmm15[0,1],xmm8[2,3]
+; AVX-NEXT: vmovaps 224(%rsi), %xmm0
+; AVX-NEXT: vmovaps 224(%rdi), %xmm7
+; AVX-NEXT: vunpcklps {{.*#+}} xmm11 = xmm7[0],xmm0[0],xmm7[1],xmm0[1]
+; AVX-NEXT: vmovlhps {{.*#+}} xmm11 = xmm11[0],xmm8[0]
+; AVX-NEXT: vblendps {{.*#+}} ymm10 = ymm11[0,1,2,3],ymm10[4,5,6,7]
+; AVX-NEXT: vmovups %ymm10, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX-NEXT: vshufps {{.*#+}} xmm10 = xmm7[1,1,1,1]
+; AVX-NEXT: vblendps {{.*#+}} xmm10 = xmm10[0],xmm0[1],xmm10[2,3]
+; AVX-NEXT: vblendps {{.*#+}} xmm8 = xmm10[0,1],xmm8[2,3]
; AVX-NEXT: vinsertf128 $1, %xmm6, %ymm0, %ymm6
-; AVX-NEXT: vshufps {{.*#+}} xmm15 = xmm5[1,1,1,1]
-; AVX-NEXT: vblendps {{.*#+}} xmm15 = xmm15[0],xmm4[1],xmm15[2,3]
-; AVX-NEXT: vinsertf128 $1, %xmm15, %ymm0, %ymm15
-; AVX-NEXT: vblendps {{.*#+}} ymm6 = ymm15[0,1,2,3,4,5],ymm6[6,7]
+; AVX-NEXT: vshufps {{.*#+}} xmm10 = xmm5[1,1,1,1]
+; AVX-NEXT: vblendps {{.*#+}} xmm10 = xmm10[0],xmm4[1],xmm10[2,3]
+; AVX-NEXT: vinsertf128 $1, %xmm10, %ymm0, %ymm10
+; AVX-NEXT: vblendps {{.*#+}} ymm6 = ymm10[0,1,2,3,4,5],ymm6[6,7]
; AVX-NEXT: vblendps {{.*#+}} ymm12 = ymm8[0,1,2,3],ymm6[4,5,6,7]
; AVX-NEXT: vunpckhps {{.*#+}} xmm0 = xmm7[2],xmm0[2],xmm7[3],xmm0[3]
; AVX-NEXT: vunpckhps {{.*#+}} xmm4 = xmm5[2],xmm4[2],xmm5[3],xmm4[3]
@@ -10491,126 +10455,77 @@ define void @store_i32_stride8_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
; AVX-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm2
; AVX-NEXT: vblendps {{.*#+}} ymm1 = ymm2[0,1,2,3,4,5],ymm1[6,7]
; AVX-NEXT: vblendps {{.*#+}} ymm10 = ymm0[0,1,2,3],ymm1[4,5,6,7]
-; AVX-NEXT: vmovaps 224(%r9), %xmm3
-; AVX-NEXT: vmovaps 224(%r8), %xmm9
-; AVX-NEXT: vunpcklps {{.*#+}} xmm0 = xmm9[0],xmm3[0],xmm9[1],xmm3[1]
-; AVX-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
-; AVX-NEXT: vmovaps 224(%rax), %xmm7
-; AVX-NEXT: vmovaps 224(%r10), %xmm6
-; AVX-NEXT: vunpcklps {{.*#+}} xmm15 = xmm6[0],xmm7[0],xmm6[1],xmm7[1]
-; AVX-NEXT: vshufps {{.*#+}} xmm2 = xmm15[0,1,0,1]
-; AVX-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm2
-; AVX-NEXT: vblendps {{.*#+}} ymm8 = ymm0[0,1,2,3,4,5],ymm2[6,7]
-; AVX-NEXT: vmovaps 224(%rcx), %xmm5
-; AVX-NEXT: vmovaps 224(%rdx), %xmm4
-; AVX-NEXT: vunpcklps {{.*#+}} xmm2 = xmm4[0],xmm5[0],xmm4[1],xmm5[1]
-; AVX-NEXT: vmovaps 224(%rsi), %xmm1
-; AVX-NEXT: vmovaps 224(%rdi), %xmm0
-; AVX-NEXT: vunpcklps {{.*#+}} xmm14 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
-; AVX-NEXT: vmovlhps {{.*#+}} xmm14 = xmm14[0],xmm2[0]
+; AVX-NEXT: vmovaps 224(%r8), %ymm7
+; AVX-NEXT: vmovaps 224(%r9), %ymm6
+; AVX-NEXT: vmovaps 224(%r10), %ymm5
+; AVX-NEXT: vmovaps 224(%rax), %ymm4
+; AVX-NEXT: vunpcklpd {{.*#+}} ymm0 = ymm4[0],ymm5[0],ymm4[2],ymm5[2]
+; AVX-NEXT: vunpcklps {{.*#+}} ymm1 = ymm7[0],ymm6[0],ymm7[1],ymm6[1],ymm7[4],ymm6[4],ymm7[5],ymm6[5]
+; AVX-NEXT: vshufps {{.*#+}} ymm8 = ymm1[0,1],ymm0[2,0],ymm1[4,5],ymm0[6,4]
+; AVX-NEXT: vmovaps 224(%rdx), %ymm3
+; AVX-NEXT: vmovaps 224(%rcx), %ymm2
+; AVX-NEXT: vunpcklpd {{.*#+}} ymm9 = ymm2[0],ymm3[0],ymm2[2],ymm3[2]
+; AVX-NEXT: vextractf128 $1, %ymm9, %xmm9
+; AVX-NEXT: vmovaps 224(%rdi), %ymm1
+; AVX-NEXT: vmovaps 224(%rsi), %ymm0
+; AVX-NEXT: vunpcklps {{.*#+}} ymm15 = ymm1[0],ymm0[0],ymm1[1],ymm0[1],ymm1[4],ymm0[4],ymm1[5],ymm0[5]
+; AVX-NEXT: vextractf128 $1, %ymm15, %xmm15
+; AVX-NEXT: vshufps {{.*#+}} xmm9 = xmm15[0,1],xmm9[2,0]
+; AVX-NEXT: vblendps {{.*#+}} ymm9 = ymm9[0,1,2,3],ymm8[4,5,6,7]
+; AVX-NEXT: vunpcklps {{.*#+}} ymm8 = ymm5[0],ymm4[0],ymm5[1],ymm4[1],ymm5[4],ymm4[4],ymm5[5],ymm4[5]
+; AVX-NEXT: vshufps {{.*#+}} ymm15 = ymm6[1,0],ymm7[1,0],ymm6[5,4],ymm7[5,4]
+; AVX-NEXT: vshufps {{.*#+}} ymm8 = ymm15[2,0],ymm8[2,3],ymm15[6,4],ymm8[6,7]
+; AVX-NEXT: vunpcklps {{.*#+}} ymm15 = ymm3[0],ymm2[0],ymm3[1],ymm2[1],ymm3[4],ymm2[4],ymm3[5],ymm2[5]
+; AVX-NEXT: vextractf128 $1, %ymm15, %xmm15
+; AVX-NEXT: vshufps {{.*#+}} ymm14 = ymm0[1,0],ymm1[1,0],ymm0[5,4],ymm1[5,4]
+; AVX-NEXT: vextractf128 $1, %ymm14, %xmm14
+; AVX-NEXT: vshufps {{.*#+}} xmm14 = xmm14[2,0],xmm15[2,3]
; AVX-NEXT: vblendps {{.*#+}} ymm8 = ymm14[0,1,2,3],ymm8[4,5,6,7]
-; AVX-NEXT: vshufps {{.*#+}} xmm14 = xmm0[1,1,1,1]
-; AVX-NEXT: vblendps {{.*#+}} xmm14 = xmm14[0],xmm1[1],xmm14[2,3]
-; AVX-NEXT: vblendps {{.*#+}} xmm2 = xmm14[0,1],xmm2[2,3]
-; AVX-NEXT: vinsertf128 $1, %xmm15, %ymm0, %ymm14
-; AVX-NEXT: vshufps {{.*#+}} xmm15 = xmm9[1,1,1,1]
-; AVX-NEXT: vblendps {{.*#+}} xmm15 = xmm15[0],xmm3[1],xmm15[2,3]
-; AVX-NEXT: vinsertf128 $1, %xmm15, %ymm0, %ymm15
-; AVX-NEXT: vblendps {{.*#+}} ymm14 = ymm15[0,1,2,3,4,5],ymm14[6,7]
-; AVX-NEXT: vblendps {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm14[4,5,6,7]
-; AVX-NEXT: vunpckhps {{.*#+}} xmm0 = xmm0[2],xmm1[2],xmm0[3],xmm1[3]
-; AVX-NEXT: vunpckhps {{.*#+}} xmm1 = xmm9[2],xmm3[2],xmm9[3],xmm3[3]
-; AVX-NEXT: vshufps {{.*#+}} xmm3 = xmm7[2,2,2,2]
-; AVX-NEXT: vblendps {{.*#+}} xmm3 = xmm6[0,1,2],xmm3[3]
-; AVX-NEXT: vinsertf128 $1, %xmm3, %ymm0, %ymm3
-; AVX-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm9
-; AVX-NEXT: vblendps {{.*#+}} ymm3 = ymm9[0,1,2,3,4,5],ymm3[6,7]
-; AVX-NEXT: vshufps {{.*#+}} xmm9 = xmm5[2,2,2,2]
-; AVX-NEXT: vblendps {{.*#+}} xmm9 = xmm4[0,1,2],xmm9[3]
-; AVX-NEXT: vblendps {{.*#+}} xmm9 = xmm0[0,1],xmm9[2,3]
-; AVX-NEXT: vblendps {{.*#+}} ymm3 = ymm9[0,1,2,3],ymm3[4,5,6,7]
-; AVX-NEXT: vunpckhps {{.*#+}} xmm4 = xmm4[2],xmm5[2],xmm4[3],xmm5[3]
-; AVX-NEXT: vunpckhpd {{.*#+}} xmm0 = xmm0[1],xmm4[1]
-; AVX-NEXT: vunpckhps {{.*#+}} xmm4 = xmm6[2],xmm7[2],xmm6[3],xmm7[3]
-; AVX-NEXT: vinsertf128 $1, %xmm4, %ymm0, %ymm4
-; AVX-NEXT: vshufps {{.*#+}} xmm1 = xmm1[2,3,2,3]
-; AVX-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm1
-; AVX-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3,4,5],ymm4[6,7]
-; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
+; AVX-NEXT: vunpckhpd {{.*#+}} ymm14 = ymm4[1],ymm5[1],ymm4[3],ymm5[3]
+; AVX-NEXT: vunpckhps {{.*#+}} ymm15 = ymm7[2],ymm6[2],ymm7[3],ymm6[3],ymm7[6],ymm6[6],ymm7[7],ymm6[7]
+; AVX-NEXT: vshufps {{.*#+}} ymm14 = ymm15[0,1],ymm14[2,0],ymm15[4,5],ymm14[6,4]
+; AVX-NEXT: vunpckhpd {{.*#+}} ymm15 = ymm2[1],ymm3[1],ymm2[3],ymm3[3]
+; AVX-NEXT: vextractf128 $1, %ymm15, %xmm15
+; AVX-NEXT: vunpckhps {{.*#+}} ymm13 = ymm1[2],ymm0[2],ymm1[3],ymm0[3],ymm1[6],ymm0[6],ymm1[7],ymm0[7]
+; AVX-NEXT: vextractf128 $1, %ymm13, %xmm13
+; AVX-NEXT: vshufps {{.*#+}} xmm13 = xmm13[0,1],xmm15[2,0]
+; AVX-NEXT: vblendps {{.*#+}} ymm13 = ymm13[0,1,2,3],ymm14[4,5,6,7]
+; AVX-NEXT: vunpckhps {{.*#+}} ymm4 = ymm5[2],ymm4[2],ymm5[3],ymm4[3],ymm5[6],ymm4[6],ymm5[7],ymm4[7]
+; AVX-NEXT: vshufps {{.*#+}} ymm5 = ymm6[3,0],ymm7[3,0],ymm6[7,4],ymm7[7,4]
+; AVX-NEXT: vshufps {{.*#+}} ymm4 = ymm5[2,0],ymm4[2,3],ymm5[6,4],ymm4[6,7]
+; AVX-NEXT: vunpckhps {{.*#+}} ymm2 = ymm3[2],ymm2[2],ymm3[3],ymm2[3],ymm3[6],ymm2[6],ymm3[7],ymm2[7]
+; AVX-NEXT: vshufps {{.*#+}} ymm0 = ymm0[3,0],ymm1[3,0],ymm0[7,4],ymm1[7,4]
+; AVX-NEXT: vextractf128 $1, %ymm2, %xmm1
+; AVX-NEXT: vextractf128 $1, %ymm0, %xmm0
+; AVX-NEXT: vshufps {{.*#+}} xmm0 = xmm0[2,0],xmm1[2,3]
+; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm4[4,5,6,7]
; AVX-NEXT: movq {{[0-9]+}}(%rsp), %rax
-; AVX-NEXT: vmovaps %ymm0, 1888(%rax)
-; AVX-NEXT: vmovaps %ymm3, 1856(%rax)
-; AVX-NEXT: vmovaps %ymm2, 1824(%rax)
-; AVX-NEXT: vmovaps %ymm8, 1792(%rax)
-; AVX-NEXT: vmovaps %ymm10, 1632(%rax)
-; AVX-NEXT: vmovaps %ymm11, 1600(%rax)
-; AVX-NEXT: vmovaps %ymm12, 1568(%rax)
-; AVX-NEXT: vmovaps %ymm13, 1536(%rax)
-; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX-NEXT: vmovaps %ymm0, 1376(%rax)
-; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX-NEXT: vmovaps %ymm0, 1344(%rax)
-; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX-NEXT: vmovaps %ymm0, 1312(%rax)
-; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX-NEXT: vmovaps %ymm0, 1280(%rax)
-; AVX-NEXT: vmovups (%rsp), %ymm0 # 32-byte Reload
-; AVX-NEXT: vmovaps %ymm0, 1120(%rax)
-; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX-NEXT: vmovaps %ymm0, 1088(%rax)
-; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX-NEXT: vmovaps %ymm0, 1056(%rax)
-; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX-NEXT: vmovaps %ymm0, 1024(%rax)
-; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX-NEXT: vmovaps %ymm0, 864(%rax)
-; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX-NEXT: vmovaps %ymm0, 832(%rax)
-; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX-NEXT: vmovaps %ymm0, 800(%rax)
-; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX-NEXT: vmovaps %ymm0, 768(%rax)
-; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX-NEXT: vmovaps %ymm0, 608(%rax)
-; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX-NEXT: vmovaps %ymm0, 576(%rax)
-; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX-NEXT: vmovaps %ymm0, 544(%rax)
-; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX-NEXT: vmovaps %ymm0, 512(%rax)
-; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX-NEXT: vmovaps %ymm0, 352(%rax)
-; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX-NEXT: vmovaps %ymm0, 320(%rax)
-; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX-NEXT: vmovaps %ymm0, 288(%rax)
-; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX-NEXT: vmovaps %ymm0, 256(%rax)
-; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX-NEXT: vmovaps %ymm0, 96(%rax)
-; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX-NEXT: vmovaps %ymm0, 64(%rax)
-; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX-NEXT: vmovaps %ymm0, 32(%rax)
-; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX-NEXT: vmovaps %ymm0, (%rax)
-; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX-NEXT: vmovaps %ymm0, 2016(%rax)
+; AVX-NEXT: vmovaps %ymm13, 1984(%rax)
+; AVX-NEXT: vmovaps %ymm8, 1952(%rax)
+; AVX-NEXT: vmovaps %ymm9, 1920(%rax)
+; AVX-NEXT: vmovaps %ymm10, 1888(%rax)
+; AVX-NEXT: vmovaps %ymm11, 1856(%rax)
+; AVX-NEXT: vmovaps %ymm12, 1824(%rax)
; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX-NEXT: vmovaps %ymm0, 1984(%rax)
-; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX-NEXT: vmovaps %ymm0, 1952(%rax)
-; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX-NEXT: vmovaps %ymm0, 1920(%rax)
+; AVX-NEXT: vmovaps %ymm0, 1792(%rax)
; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX-NEXT: vmovaps %ymm0, 1760(%rax)
; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX-NEXT: vmovaps %ymm0, 1728(%rax)
; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX-NEXT: vmovaps %ymm0, 1696(%rax)
-; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
+; AVX-NEXT: vmovups (%rsp), %ymm0 # 32-byte Reload
; AVX-NEXT: vmovaps %ymm0, 1664(%rax)
; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
+; AVX-NEXT: vmovaps %ymm0, 1632(%rax)
+; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
+; AVX-NEXT: vmovaps %ymm0, 1600(%rax)
+; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
+; AVX-NEXT: vmovaps %ymm0, 1568(%rax)
+; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
+; AVX-NEXT: vmovaps %ymm0, 1536(%rax)
+; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX-NEXT: vmovaps %ymm0, 1504(%rax)
; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX-NEXT: vmovaps %ymm0, 1472(%rax)
@@ -10619,6 +10534,14 @@ define void @store_i32_stride8_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX-NEXT: vmovaps %ymm0, 1408(%rax)
; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
+; AVX-NEXT: vmovaps %ymm0, 1376(%rax)
+; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
+; AVX-NEXT: vmovaps %ymm0, 1344(%rax)
+; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
+; AVX-NEXT: vmovaps %ymm0, 1312(%rax)
+; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
+; AVX-NEXT: vmovaps %ymm0, 1280(%rax)
+; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX-NEXT: vmovaps %ymm0, 1248(%rax)
; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX-NEXT: vmovaps %ymm0, 1216(%rax)
@@ -10627,6 +10550,14 @@ define void @store_i32_stride8_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX-NEXT: vmovaps %ymm0, 1152(%rax)
; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
+; AVX-NEXT: vmovaps %ymm0, 1120(%rax)
+; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
+; AVX-NEXT: vmovaps %ymm0, 1088(%rax)
+; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
+; AVX-NEXT: vmovaps %ymm0, 1056(%rax)
+; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
+; AVX-NEXT: vmovaps %ymm0, 1024(%rax)
+; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX-NEXT: vmovaps %ymm0, 992(%rax)
; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX-NEXT: vmovaps %ymm0, 960(%rax)
@@ -10635,6 +10566,14 @@ define void @store_i32_stride8_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX-NEXT: vmovaps %ymm0, 896(%rax)
; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
+; AVX-NEXT: vmovaps %ymm0, 864(%rax)
+; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
+; AVX-NEXT: vmovaps %ymm0, 832(%rax)
+; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
+; AVX-NEXT: vmovaps %ymm0, 800(%rax)
+; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
+; AVX-NEXT: vmovaps %ymm0, 768(%rax)
+; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX-NEXT: vmovaps %ymm0, 736(%rax)
; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX-NEXT: vmovaps %ymm0, 704(%rax)
@@ -10643,6 +10582,14 @@ define void @store_i32_stride8_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX-NEXT: vmovaps %ymm0, 640(%rax)
; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
+; AVX-NEXT: vmovaps %ymm0, 608(%rax)
+; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
+; AVX-NEXT: vmovaps %ymm0, 576(%rax)
+; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
+; AVX-NEXT: vmovaps %ymm0, 544(%rax)
+; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
+; AVX-NEXT: vmovaps %ymm0, 512(%rax)
+; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX-NEXT: vmovaps %ymm0, 480(%rax)
; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX-NEXT: vmovaps %ymm0, 448(%rax)
@@ -10651,6 +10598,14 @@ define void @store_i32_stride8_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX-NEXT: vmovaps %ymm0, 384(%rax)
; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
+; AVX-NEXT: vmovaps %ymm0, 352(%rax)
+; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
+; AVX-NEXT: vmovaps %ymm0, 320(%rax)
+; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
+; AVX-NEXT: vmovaps %ymm0, 288(%rax)
+; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
+; AVX-NEXT: vmovaps %ymm0, 256(%rax)
+; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX-NEXT: vmovaps %ymm0, 224(%rax)
; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX-NEXT: vmovaps %ymm0, 192(%rax)
@@ -10658,7 +10613,15 @@ define void @store_i32_stride8_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
; AVX-NEXT: vmovaps %ymm0, 160(%rax)
; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX-NEXT: vmovaps %ymm0, 128(%rax)
-; AVX-NEXT: addq $1672, %rsp # imm = 0x688
+; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
+; AVX-NEXT: vmovaps %ymm0, 96(%rax)
+; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
+; AVX-NEXT: vmovaps %ymm0, 64(%rax)
+; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
+; AVX-NEXT: vmovaps %ymm0, 32(%rax)
+; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
+; AVX-NEXT: vmovaps %ymm0, (%rax)
+; AVX-NEXT: addq $1704, %rsp # imm = 0x6A8
; AVX-NEXT: vzeroupper
; AVX-NEXT: retq
;
More information about the llvm-commits
mailing list