[llvm] 61aab82 - [X86] getFauxShuffleMask - insert_subvector - skip undemanded subvectors (#129042)
via llvm-commits
llvm-commits at lists.llvm.org
Thu Feb 27 07:25:07 PST 2025
Author: Simon Pilgrim
Date: 2025-02-27T15:25:02Z
New Revision: 61aab82135db3e5e69a660f395c2008e812b8946
URL: https://github.com/llvm/llvm-project/commit/61aab82135db3e5e69a660f395c2008e812b8946
DIFF: https://github.com/llvm/llvm-project/commit/61aab82135db3e5e69a660f395c2008e812b8946.diff
LOG: [X86] getFauxShuffleMask - insert_subvector - skip undemanded subvectors (#129042)
If the shuffle combine doesn't require the subvector of a insert_subvector node, we can just combine the base vector directly.
Added:
Modified:
llvm/lib/Target/X86/X86ISelLowering.cpp
llvm/test/CodeGen/X86/avx-insertelt.ll
llvm/test/CodeGen/X86/avx512-insert-extract.ll
llvm/test/CodeGen/X86/vector-interleaved-load-i32-stride-6.ll
llvm/test/CodeGen/X86/vector-interleaved-store-i32-stride-5.ll
llvm/test/CodeGen/X86/vector-interleaved-store-i32-stride-7.ll
llvm/test/CodeGen/X86/vector-interleaved-store-i64-stride-5.ll
llvm/test/CodeGen/X86/vector-interleaved-store-i64-stride-7.ll
llvm/test/CodeGen/X86/vector-pack-512.ll
llvm/test/CodeGen/X86/zero_extend_vector_inreg_of_broadcast.ll
llvm/test/CodeGen/X86/zero_extend_vector_inreg_of_broadcast_from_memory.ll
Removed:
################################################################################
diff --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp
index 63d66c66d94d2..12636f22d8409 100644
--- a/llvm/lib/Target/X86/X86ISelLowering.cpp
+++ b/llvm/lib/Target/X86/X86ISelLowering.cpp
@@ -6145,6 +6145,13 @@ static bool getFauxShuffleMask(SDValue N, const APInt &DemandedElts,
EVT SubVT = Sub.getValueType();
unsigned NumSubElts = SubVT.getVectorNumElements();
uint64_t InsertIdx = N.getConstantOperandVal(2);
+ // Subvector isn't demanded - just return the base vector.
+ if (DemandedElts.extractBits(NumSubElts, InsertIdx) == 0) {
+ Mask.resize(NumElts, SM_SentinelUndef);
+ std::iota(Mask.begin(), Mask.end(), 0);
+ Ops.push_back(Src);
+ return true;
+ }
// Handle CONCAT(SUB0, SUB1).
// Limit this to vXi64 vector cases to make the most of cross lane shuffles.
if (Depth > 0 && InsertIdx == NumSubElts && NumElts == (2 * NumSubElts) &&
diff --git a/llvm/test/CodeGen/X86/avx-insertelt.ll b/llvm/test/CodeGen/X86/avx-insertelt.ll
index d3ac3f1f64c83..18ca01290c914 100644
--- a/llvm/test/CodeGen/X86/avx-insertelt.ll
+++ b/llvm/test/CodeGen/X86/avx-insertelt.ll
@@ -270,11 +270,10 @@ define <16 x i16> @insert_i16_firstelts(<16 x i16> %x, i16 %s) {
; AVX2-LABEL: insert_i16_firstelts:
; AVX2: # %bb.0:
; AVX2-NEXT: vpinsrw $0, %edi, %xmm0, %xmm1
+; AVX2-NEXT: vmovd %edi, %xmm2
+; AVX2-NEXT: vpbroadcastw %xmm2, %ymm2
+; AVX2-NEXT: vpblendw {{.*#+}} ymm0 = ymm2[0],ymm0[1,2,3,4,5,6,7],ymm2[8],ymm0[9,10,11,12,13,14,15]
; AVX2-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7]
-; AVX2-NEXT: vmovd %edi, %xmm1
-; AVX2-NEXT: vpbroadcastw %xmm1, %ymm1
-; AVX2-NEXT: vpblendw {{.*#+}} ymm1 = ymm1[0],ymm0[1,2,3,4,5,6,7],ymm1[8],ymm0[9,10,11,12,13,14,15]
-; AVX2-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
; AVX2-NEXT: retq
%i0 = insertelement <16 x i16> %x, i16 %s, i32 0
%i1 = insertelement <16 x i16> %i0, i16 %s, i32 8
diff --git a/llvm/test/CodeGen/X86/avx512-insert-extract.ll b/llvm/test/CodeGen/X86/avx512-insert-extract.ll
index 7ce37c637a79c..31e4f7e350a05 100644
--- a/llvm/test/CodeGen/X86/avx512-insert-extract.ll
+++ b/llvm/test/CodeGen/X86/avx512-insert-extract.ll
@@ -693,20 +693,18 @@ define <16 x i16> @insert_v16i16(<16 x i16> %x, i16 %y, ptr %ptr) nounwind {
; KNL-LABEL: insert_v16i16:
; KNL: ## %bb.0:
; KNL-NEXT: vpinsrw $1, (%rsi), %xmm0, %xmm1
+; KNL-NEXT: vmovd %edi, %xmm2
+; KNL-NEXT: vpbroadcastw %xmm2, %ymm2
+; KNL-NEXT: vpblendw {{.*#+}} ymm0 = ymm0[0],ymm2[1],ymm0[2,3,4,5,6,7,8],ymm2[9],ymm0[10,11,12,13,14,15]
; KNL-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7]
-; KNL-NEXT: vmovd %edi, %xmm1
-; KNL-NEXT: vpbroadcastw %xmm1, %ymm1
-; KNL-NEXT: vpblendw {{.*#+}} ymm1 = ymm0[0],ymm1[1],ymm0[2,3,4,5,6,7,8],ymm1[9],ymm0[10,11,12,13,14,15]
-; KNL-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
; KNL-NEXT: retq
;
; SKX-LABEL: insert_v16i16:
; SKX: ## %bb.0:
; SKX-NEXT: vpinsrw $1, (%rsi), %xmm0, %xmm1
+; SKX-NEXT: vpbroadcastw %edi, %ymm2
+; SKX-NEXT: vpblendw {{.*#+}} ymm0 = ymm0[0],ymm2[1],ymm0[2,3,4,5,6,7,8],ymm2[9],ymm0[10,11,12,13,14,15]
; SKX-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7]
-; SKX-NEXT: vpbroadcastw %edi, %ymm1
-; SKX-NEXT: vpblendw {{.*#+}} ymm1 = ymm0[0],ymm1[1],ymm0[2,3,4,5,6,7,8],ymm1[9],ymm0[10,11,12,13,14,15]
-; SKX-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
; SKX-NEXT: retq
%val = load i16, ptr %ptr
%r1 = insertelement <16 x i16> %x, i16 %val, i32 1
diff --git a/llvm/test/CodeGen/X86/vector-interleaved-load-i32-stride-6.ll b/llvm/test/CodeGen/X86/vector-interleaved-load-i32-stride-6.ll
index d83a61e18d1ab..85ed61811af53 100644
--- a/llvm/test/CodeGen/X86/vector-interleaved-load-i32-stride-6.ll
+++ b/llvm/test/CodeGen/X86/vector-interleaved-load-i32-stride-6.ll
@@ -985,27 +985,27 @@ define void @load_i32_stride6_vf8(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; AVX-NEXT: movq {{[0-9]+}}(%rsp), %rax
; AVX-NEXT: vmovapd 160(%rdi), %ymm3
; AVX-NEXT: vmovapd 128(%rdi), %ymm4
-; AVX-NEXT: vmovaps 32(%rdi), %ymm6
-; AVX-NEXT: vmovaps (%rdi), %ymm7
; AVX-NEXT: vmovaps 96(%rdi), %ymm0
; AVX-NEXT: vmovaps 64(%rdi), %ymm1
-; AVX-NEXT: vinsertf128 $1, 96(%rdi), %ymm1, %ymm5
-; AVX-NEXT: vshufps {{.*#+}} ymm2 = ymm0[2,0],ymm5[0,0],ymm0[6,4],ymm5[4,4]
-; AVX-NEXT: vshufps {{.*#+}} ymm2 = ymm2[2,0],ymm5[2,2],ymm2[6,4],ymm5[6,6]
-; AVX-NEXT: vblendps {{.*#+}} ymm8 = ymm7[0,1,2,3],ymm6[4,5],ymm7[6,7]
-; AVX-NEXT: vextractf128 $1, %ymm8, %xmm9
-; AVX-NEXT: vblendps {{.*#+}} xmm10 = xmm8[0,1],xmm9[2,3]
-; AVX-NEXT: vshufps {{.*#+}} xmm10 = xmm10[0,2],xmm9[0,3]
-; AVX-NEXT: vblendps {{.*#+}} ymm2 = ymm10[0,1,2],ymm2[3,4,5,6,7]
+; AVX-NEXT: vmovaps 32(%rdi), %ymm6
+; AVX-NEXT: vmovaps (%rdi), %ymm7
+; AVX-NEXT: vblendps {{.*#+}} ymm5 = ymm7[0,1,2,3],ymm6[4,5],ymm7[6,7]
+; AVX-NEXT: vextractf128 $1, %ymm5, %xmm8
+; AVX-NEXT: vblendps {{.*#+}} xmm2 = xmm5[0,1],xmm8[2,3]
+; AVX-NEXT: vshufps {{.*#+}} xmm2 = xmm2[0,2],xmm8[0,3]
+; AVX-NEXT: vinsertf128 $1, 96(%rdi), %ymm1, %ymm9
+; AVX-NEXT: vshufps {{.*#+}} ymm10 = ymm0[2,0],ymm9[0,0],ymm0[6,4],ymm9[4,4]
+; AVX-NEXT: vshufps {{.*#+}} ymm10 = ymm10[2,0],ymm1[2,2],ymm10[6,4],ymm1[6,6]
+; AVX-NEXT: vblendps {{.*#+}} ymm2 = ymm2[0,1,2],ymm10[3,4,5,6,7]
; AVX-NEXT: vperm2f128 {{.*#+}} ymm10 = ymm4[2,3],ymm3[0,1]
; AVX-NEXT: vshufpd {{.*#+}} ymm11 = ymm10[0],ymm4[1],ymm10[3],ymm4[2]
; AVX-NEXT: vshufps {{.*#+}} ymm11 = ymm11[0,1,2,0,4,5,6,4]
; AVX-NEXT: vblendps {{.*#+}} ymm2 = ymm2[0,1,2,3,4,5],ymm11[6,7]
-; AVX-NEXT: vshufps {{.*#+}} ymm11 = ymm0[3,0],ymm5[1,0],ymm0[7,4],ymm5[5,4]
-; AVX-NEXT: vshufps {{.*#+}} ymm5 = ymm11[2,0],ymm5[2,3],ymm11[6,4],ymm5[6,7]
-; AVX-NEXT: vshufps {{.*#+}} xmm8 = xmm8[1,0],xmm9[3,0]
-; AVX-NEXT: vshufps {{.*#+}} xmm8 = xmm8[0,2],xmm9[1,3]
-; AVX-NEXT: vblendps {{.*#+}} ymm5 = ymm8[0,1,2],ymm5[3,4,5,6,7]
+; AVX-NEXT: vshufps {{.*#+}} xmm5 = xmm5[1,0],xmm8[3,0]
+; AVX-NEXT: vshufps {{.*#+}} xmm5 = xmm5[0,2],xmm8[1,3]
+; AVX-NEXT: vshufps {{.*#+}} ymm8 = ymm0[3,0],ymm9[1,0],ymm0[7,4],ymm9[5,4]
+; AVX-NEXT: vshufps {{.*#+}} ymm8 = ymm8[2,0],ymm1[2,3],ymm8[6,4],ymm1[6,7]
+; AVX-NEXT: vblendps {{.*#+}} ymm5 = ymm5[0,1,2],ymm8[3,4,5,6,7]
; AVX-NEXT: vshufps {{.*#+}} ymm8 = ymm10[3,1],ymm4[1,3],ymm10[7,5],ymm4[5,7]
; AVX-NEXT: vshufps {{.*#+}} ymm8 = ymm8[0,1,2,0,4,5,6,4]
; AVX-NEXT: vblendps {{.*#+}} ymm5 = ymm5[0,1,2,3,4,5],ymm8[6,7]
@@ -1953,162 +1953,161 @@ define void @load_i32_stride6_vf16(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
;
; AVX-LABEL: load_i32_stride6_vf16:
; AVX: # %bb.0:
-; AVX-NEXT: subq $328, %rsp # imm = 0x148
-; AVX-NEXT: vmovaps 224(%rdi), %ymm12
-; AVX-NEXT: vmovups %ymm12, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT: vmovaps 288(%rdi), %ymm10
-; AVX-NEXT: vmovaps 256(%rdi), %ymm4
-; AVX-NEXT: vmovups %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT: vmovapd 160(%rdi), %ymm1
-; AVX-NEXT: vmovupd %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT: vmovapd 128(%rdi), %ymm13
-; AVX-NEXT: vmovaps 32(%rdi), %ymm6
-; AVX-NEXT: vmovups %ymm6, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT: vmovaps (%rdi), %ymm14
+; AVX-NEXT: subq $264, %rsp # imm = 0x108
+; AVX-NEXT: vmovaps 224(%rdi), %ymm5
+; AVX-NEXT: vmovups %ymm5, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX-NEXT: vmovaps 192(%rdi), %ymm10
+; AVX-NEXT: vmovups %ymm10, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX-NEXT: vmovapd 160(%rdi), %ymm0
+; AVX-NEXT: vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX-NEXT: vmovapd 128(%rdi), %ymm7
; AVX-NEXT: vmovaps 96(%rdi), %ymm9
-; AVX-NEXT: vmovaps 64(%rdi), %ymm0
-; AVX-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT: vinsertf128 $1, 96(%rdi), %ymm0, %ymm2
-; AVX-NEXT: vshufps {{.*#+}} ymm5 = ymm9[2,0],ymm2[0,0],ymm9[6,4],ymm2[4,4]
-; AVX-NEXT: vshufps {{.*#+}} ymm5 = ymm5[2,0],ymm2[2,2],ymm5[6,4],ymm2[6,6]
-; AVX-NEXT: vblendps {{.*#+}} ymm6 = ymm14[0,1,2,3],ymm6[4,5],ymm14[6,7]
-; AVX-NEXT: vextractf128 $1, %ymm6, %xmm7
-; AVX-NEXT: vblendps {{.*#+}} xmm11 = xmm6[0,1],xmm7[2,3]
-; AVX-NEXT: vshufps {{.*#+}} xmm11 = xmm11[0,2],xmm7[0,3]
-; AVX-NEXT: vblendps {{.*#+}} ymm5 = ymm11[0,1,2],ymm5[3,4,5,6,7]
-; AVX-NEXT: vperm2f128 {{.*#+}} ymm8 = ymm13[2,3],ymm1[0,1]
-; AVX-NEXT: vshufpd {{.*#+}} ymm11 = ymm8[0],ymm13[1],ymm8[3],ymm13[2]
-; AVX-NEXT: vshufps {{.*#+}} ymm11 = ymm11[0,1,2,0,4,5,6,4]
-; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm5[0,1,2,3,4,5],ymm11[6,7]
+; AVX-NEXT: vmovaps 64(%rdi), %ymm15
+; AVX-NEXT: vmovaps 32(%rdi), %ymm3
+; AVX-NEXT: vmovups %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX-NEXT: vmovaps (%rdi), %ymm2
+; AVX-NEXT: vmovups %ymm2, (%rsp) # 32-byte Spill
+; AVX-NEXT: vblendps {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm3[4,5],ymm2[6,7]
+; AVX-NEXT: vextractf128 $1, %ymm2, %xmm6
+; AVX-NEXT: vblendps {{.*#+}} xmm3 = xmm2[0,1],xmm6[2,3]
+; AVX-NEXT: vshufps {{.*#+}} xmm3 = xmm3[0,2],xmm6[0,3]
+; AVX-NEXT: vinsertf128 $1, 96(%rdi), %ymm15, %ymm8
+; AVX-NEXT: vshufps {{.*#+}} ymm4 = ymm9[2,0],ymm8[0,0],ymm9[6,4],ymm8[4,4]
+; AVX-NEXT: vshufps {{.*#+}} ymm4 = ymm4[2,0],ymm15[2,2],ymm4[6,4],ymm15[6,6]
+; AVX-NEXT: vblendps {{.*#+}} ymm3 = ymm3[0,1,2],ymm4[3,4,5,6,7]
+; AVX-NEXT: vmovapd %ymm7, %ymm1
+; AVX-NEXT: vperm2f128 {{.*#+}} ymm7 = ymm7[2,3],ymm0[0,1]
+; AVX-NEXT: vshufpd {{.*#+}} ymm4 = ymm7[0],ymm1[1],ymm7[3],ymm1[2]
+; AVX-NEXT: vmovapd %ymm1, %ymm13
+; AVX-NEXT: vshufps {{.*#+}} ymm4 = ymm4[0,1,2,0,4,5,6,4]
+; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm3[0,1,2,3,4,5],ymm4[6,7]
; AVX-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT: vinsertf128 $1, 288(%rdi), %ymm4, %ymm1
-; AVX-NEXT: vshufps {{.*#+}} ymm5 = ymm10[2,0],ymm1[0,0],ymm10[6,4],ymm1[4,4]
-; AVX-NEXT: vshufps {{.*#+}} ymm5 = ymm5[2,0],ymm1[2,2],ymm5[6,4],ymm1[6,6]
-; AVX-NEXT: vmovaps 192(%rdi), %ymm15
-; AVX-NEXT: vblendps {{.*#+}} ymm3 = ymm15[0,1,2,3],ymm12[4,5],ymm15[6,7]
-; AVX-NEXT: vextractf128 $1, %ymm3, %xmm0
-; AVX-NEXT: vblendps {{.*#+}} xmm11 = xmm3[0,1],xmm0[2,3]
-; AVX-NEXT: vshufps {{.*#+}} xmm11 = xmm11[0,2],xmm0[0,3]
-; AVX-NEXT: vblendps {{.*#+}} ymm5 = ymm11[0,1,2],ymm5[3,4,5,6,7]
-; AVX-NEXT: vmovapd 352(%rdi), %ymm4
-; AVX-NEXT: vmovupd %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX-NEXT: vblendps {{.*#+}} ymm1 = ymm10[0,1,2,3],ymm5[4,5],ymm10[6,7]
+; AVX-NEXT: vextractf128 $1, %ymm1, %xmm0
+; AVX-NEXT: vblendps {{.*#+}} xmm3 = xmm1[0,1],xmm0[2,3]
+; AVX-NEXT: vshufps {{.*#+}} xmm12 = xmm3[0,2],xmm0[0,3]
+; AVX-NEXT: vmovaps 288(%rdi), %ymm11
+; AVX-NEXT: vmovaps 256(%rdi), %ymm10
+; AVX-NEXT: vinsertf128 $1, 288(%rdi), %ymm10, %ymm5
+; AVX-NEXT: vshufps {{.*#+}} ymm14 = ymm11[2,0],ymm5[0,0],ymm11[6,4],ymm5[4,4]
+; AVX-NEXT: vshufps {{.*#+}} ymm14 = ymm14[2,0],ymm10[2,2],ymm14[6,4],ymm10[6,6]
+; AVX-NEXT: vblendps {{.*#+}} ymm4 = ymm12[0,1,2],ymm14[3,4,5,6,7]
+; AVX-NEXT: vmovapd 352(%rdi), %ymm3
+; AVX-NEXT: vmovupd %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX-NEXT: vmovapd 320(%rdi), %ymm12
-; AVX-NEXT: vperm2f128 {{.*#+}} ymm4 = ymm12[2,3],ymm4[0,1]
-; AVX-NEXT: vshufpd {{.*#+}} ymm11 = ymm4[0],ymm12[1],ymm4[3],ymm12[2]
-; AVX-NEXT: vshufps {{.*#+}} ymm11 = ymm11[0,1,2,0,4,5,6,4]
-; AVX-NEXT: vblendps {{.*#+}} ymm5 = ymm5[0,1,2,3,4,5],ymm11[6,7]
-; AVX-NEXT: vmovups %ymm5, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT: vmovups %ymm9, (%rsp) # 32-byte Spill
-; AVX-NEXT: vshufps {{.*#+}} ymm11 = ymm9[3,0],ymm2[1,0],ymm9[7,4],ymm2[5,4]
-; AVX-NEXT: vshufps {{.*#+}} ymm2 = ymm11[2,0],ymm2[2,3],ymm11[6,4],ymm2[6,7]
-; AVX-NEXT: vshufps {{.*#+}} xmm6 = xmm6[1,0],xmm7[3,0]
-; AVX-NEXT: vshufps {{.*#+}} xmm6 = xmm6[0,2],xmm7[1,3]
-; AVX-NEXT: vblendps {{.*#+}} ymm2 = ymm6[0,1,2],ymm2[3,4,5,6,7]
-; AVX-NEXT: vshufps {{.*#+}} ymm6 = ymm8[3,1],ymm13[1,3],ymm8[7,5],ymm13[5,7]
-; AVX-NEXT: vshufps {{.*#+}} ymm6 = ymm6[0,1,2,0,4,5,6,4]
-; AVX-NEXT: vblendps {{.*#+}} ymm2 = ymm2[0,1,2,3,4,5],ymm6[6,7]
+; AVX-NEXT: vperm2f128 {{.*#+}} ymm3 = ymm12[2,3],ymm3[0,1]
+; AVX-NEXT: vshufpd {{.*#+}} ymm14 = ymm3[0],ymm12[1],ymm3[3],ymm12[2]
+; AVX-NEXT: vshufps {{.*#+}} ymm14 = ymm14[0,1,2,0,4,5,6,4]
+; AVX-NEXT: vblendps {{.*#+}} ymm4 = ymm4[0,1,2,3,4,5],ymm14[6,7]
+; AVX-NEXT: vmovups %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX-NEXT: vshufps {{.*#+}} xmm2 = xmm2[1,0],xmm6[3,0]
+; AVX-NEXT: vshufps {{.*#+}} xmm2 = xmm2[0,2],xmm6[1,3]
+; AVX-NEXT: vmovups %ymm9, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX-NEXT: vshufps {{.*#+}} ymm4 = ymm9[3,0],ymm8[1,0],ymm9[7,4],ymm8[5,4]
+; AVX-NEXT: vshufps {{.*#+}} ymm4 = ymm4[2,0],ymm15[2,3],ymm4[6,4],ymm15[6,7]
+; AVX-NEXT: vblendps {{.*#+}} ymm2 = ymm2[0,1,2],ymm4[3,4,5,6,7]
+; AVX-NEXT: vmovupd %ymm13, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX-NEXT: vshufps {{.*#+}} ymm4 = ymm7[3,1],ymm13[1,3],ymm7[7,5],ymm13[5,7]
+; AVX-NEXT: vshufps {{.*#+}} ymm4 = ymm4[0,1,2,0,4,5,6,4]
+; AVX-NEXT: vblendps {{.*#+}} ymm2 = ymm2[0,1,2,3,4,5],ymm4[6,7]
; AVX-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT: vmovups %ymm10, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT: vshufps {{.*#+}} ymm2 = ymm10[3,0],ymm1[1,0],ymm10[7,4],ymm1[5,4]
-; AVX-NEXT: vshufps {{.*#+}} ymm1 = ymm2[2,0],ymm1[2,3],ymm2[6,4],ymm1[6,7]
-; AVX-NEXT: vshufps {{.*#+}} xmm2 = xmm3[1,0],xmm0[3,0]
-; AVX-NEXT: vshufps {{.*#+}} xmm0 = xmm2[0,2],xmm0[1,3]
+; AVX-NEXT: vshufps {{.*#+}} xmm1 = xmm1[1,0],xmm0[3,0]
+; AVX-NEXT: vshufps {{.*#+}} xmm0 = xmm1[0,2],xmm0[1,3]
+; AVX-NEXT: vshufps {{.*#+}} ymm1 = ymm11[3,0],ymm5[1,0],ymm11[7,4],ymm5[5,4]
+; AVX-NEXT: vshufps {{.*#+}} ymm1 = ymm1[2,0],ymm10[2,3],ymm1[6,4],ymm10[6,7]
; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2],ymm1[3,4,5,6,7]
-; AVX-NEXT: vshufps {{.*#+}} ymm1 = ymm4[3,1],ymm12[1,3],ymm4[7,5],ymm12[5,7]
+; AVX-NEXT: vshufps {{.*#+}} ymm1 = ymm3[3,1],ymm12[1,3],ymm3[7,5],ymm12[5,7]
; AVX-NEXT: vshufps {{.*#+}} ymm1 = ymm1[0,1,2,0,4,5,6,4]
; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],ymm1[6,7]
; AVX-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT: vblendps $243, {{[-0-9]+}}(%r{{[sb]}}p), %ymm14, %ymm2 # 32-byte Folded Reload
-; AVX-NEXT: # ymm2 = mem[0,1],ymm14[2,3],mem[4,5,6,7]
-; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm7 # 32-byte Reload
-; AVX-NEXT: vshufps {{.*#+}} ymm1 = ymm9[2,1],ymm7[2,0],ymm9[6,5],ymm7[6,4]
+; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
+; AVX-NEXT: vblendps $12, (%rsp), %ymm0, %ymm2 # 32-byte Folded Reload
+; AVX-NEXT: # ymm2 = ymm0[0,1],mem[2,3],ymm0[4,5,6,7]
+; AVX-NEXT: vshufps {{.*#+}} ymm1 = ymm9[2,1],ymm15[2,0],ymm9[6,5],ymm15[6,4]
; AVX-NEXT: vperm2f128 {{.*#+}} ymm1 = ymm1[2,3,0,1]
-; AVX-NEXT: vextractf128 $1, %ymm2, %xmm5
-; AVX-NEXT: vshufps {{.*#+}} xmm3 = xmm2[2,0],xmm5[2,3]
+; AVX-NEXT: vextractf128 $1, %ymm2, %xmm7
+; AVX-NEXT: vshufps {{.*#+}} xmm3 = xmm2[2,0],xmm7[2,3]
; AVX-NEXT: vblendps {{.*#+}} ymm1 = ymm3[0,1,2],ymm1[3,4,5,6,7]
; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm9 # 32-byte Reload
-; AVX-NEXT: vblendps {{.*#+}} ymm4 = ymm13[0,1,2,3],ymm9[4,5],ymm13[6,7]
-; AVX-NEXT: vperm2f128 {{.*#+}} ymm8 = ymm4[2,3,0,1]
-; AVX-NEXT: vshufps {{.*#+}} ymm3 = ymm8[0,0],ymm4[2,0],ymm8[4,4],ymm4[6,4]
-; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3,4],ymm3[5,6,7]
-; AVX-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT: vblendps $243, {{[-0-9]+}}(%r{{[sb]}}p), %ymm15, %ymm11 # 32-byte Folded Reload
-; AVX-NEXT: # ymm11 = mem[0,1],ymm15[2,3],mem[4,5,6,7]
-; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload
-; AVX-NEXT: vshufps {{.*#+}} ymm1 = ymm10[2,1],ymm3[2,0],ymm10[6,5],ymm3[6,4]
+; AVX-NEXT: vblendps {{.*#+}} ymm3 = ymm13[0,1,2,3],ymm9[4,5],ymm13[6,7]
+; AVX-NEXT: vperm2f128 {{.*#+}} ymm4 = ymm3[2,3,0,1]
+; AVX-NEXT: vshufps {{.*#+}} ymm5 = ymm4[0,0],ymm3[2,0],ymm4[4,4],ymm3[6,4]
+; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3,4],ymm5[5,6,7]
+; AVX-NEXT: vmovups %ymm0, (%rsp) # 32-byte Spill
+; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
+; AVX-NEXT: vblendps $12, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm8 # 32-byte Folded Reload
+; AVX-NEXT: # ymm8 = ymm0[0,1],mem[2,3],ymm0[4,5,6,7]
+; AVX-NEXT: vshufps {{.*#+}} ymm1 = ymm11[2,1],ymm10[2,0],ymm11[6,5],ymm10[6,4]
; AVX-NEXT: vperm2f128 {{.*#+}} ymm1 = ymm1[2,3,0,1]
-; AVX-NEXT: vextractf128 $1, %ymm11, %xmm15
-; AVX-NEXT: vshufps {{.*#+}} xmm14 = xmm11[2,0],xmm15[2,3]
-; AVX-NEXT: vblendps {{.*#+}} ymm1 = ymm14[0,1,2],ymm1[3,4,5,6,7]
-; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm10 # 32-byte Reload
-; AVX-NEXT: vblendps {{.*#+}} ymm14 = ymm12[0,1,2,3],ymm10[4,5],ymm12[6,7]
-; AVX-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm14[2,3,0,1]
-; AVX-NEXT: vshufps {{.*#+}} ymm6 = ymm0[0,0],ymm14[2,0],ymm0[4,4],ymm14[6,4]
-; AVX-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3,4],ymm6[5,6,7]
+; AVX-NEXT: vextractf128 $1, %ymm8, %xmm14
+; AVX-NEXT: vshufps {{.*#+}} xmm6 = xmm8[2,0],xmm14[2,3]
+; AVX-NEXT: vblendps {{.*#+}} ymm1 = ymm6[0,1,2],ymm1[3,4,5,6,7]
+; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm13 # 32-byte Reload
+; AVX-NEXT: vblendps {{.*#+}} ymm6 = ymm12[0,1,2,3],ymm13[4,5],ymm12[6,7]
+; AVX-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm6[2,3,0,1]
+; AVX-NEXT: vshufps {{.*#+}} ymm5 = ymm0[0,0],ymm6[2,0],ymm0[4,4],ymm6[6,4]
+; AVX-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3,4],ymm5[5,6,7]
; AVX-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT: vshufps {{.*#+}} ymm4 = ymm8[0,1],ymm4[3,1],ymm8[4,5],ymm4[7,5]
-; AVX-NEXT: vshufps {{.*#+}} xmm2 = xmm2[3,1],xmm5[3,3]
-; AVX-NEXT: vmovaps %ymm7, %ymm1
-; AVX-NEXT: vmovups (%rsp), %ymm7 # 32-byte Reload
-; AVX-NEXT: vshufps {{.*#+}} ymm5 = ymm7[3,1],ymm1[2,1],ymm7[7,5],ymm1[6,5]
-; AVX-NEXT: vperm2f128 {{.*#+}} ymm5 = ymm5[2,3,0,1]
-; AVX-NEXT: vblendps {{.*#+}} ymm2 = ymm2[0,1,2],ymm5[3,4,5,6,7]
-; AVX-NEXT: vblendps {{.*#+}} ymm2 = ymm2[0,1,2,3,4],ymm4[5,6,7]
+; AVX-NEXT: vshufps {{.*#+}} ymm3 = ymm4[0,1],ymm3[3,1],ymm4[4,5],ymm3[7,5]
+; AVX-NEXT: vshufps {{.*#+}} xmm2 = xmm2[3,1],xmm7[3,3]
+; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
+; AVX-NEXT: vshufps {{.*#+}} ymm4 = ymm1[3,1],ymm15[2,1],ymm1[7,5],ymm15[6,5]
+; AVX-NEXT: vperm2f128 {{.*#+}} ymm4 = ymm4[2,3,0,1]
+; AVX-NEXT: vblendps {{.*#+}} ymm2 = ymm2[0,1,2],ymm4[3,4,5,6,7]
+; AVX-NEXT: vblendps {{.*#+}} ymm2 = ymm2[0,1,2,3,4],ymm3[5,6,7]
; AVX-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT: vshufps {{.*#+}} ymm0 = ymm0[0,1],ymm14[3,1],ymm0[4,5],ymm14[7,5]
-; AVX-NEXT: vshufps {{.*#+}} xmm2 = xmm11[3,1],xmm15[3,3]
-; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
-; AVX-NEXT: vshufps {{.*#+}} ymm5 = ymm4[3,1],ymm3[2,1],ymm4[7,5],ymm3[6,5]
-; AVX-NEXT: vmovaps %ymm3, %ymm15
-; AVX-NEXT: vperm2f128 {{.*#+}} ymm5 = ymm5[2,3,0,1]
-; AVX-NEXT: vblendps {{.*#+}} ymm2 = ymm2[0,1,2],ymm5[3,4,5,6,7]
+; AVX-NEXT: vshufps {{.*#+}} ymm0 = ymm0[0,1],ymm6[3,1],ymm0[4,5],ymm6[7,5]
+; AVX-NEXT: vshufps {{.*#+}} xmm2 = xmm8[3,1],xmm14[3,3]
+; AVX-NEXT: vshufps {{.*#+}} ymm4 = ymm11[3,1],ymm10[2,1],ymm11[7,5],ymm10[6,5]
+; AVX-NEXT: vperm2f128 {{.*#+}} ymm4 = ymm4[2,3,0,1]
+; AVX-NEXT: vblendps {{.*#+}} ymm2 = ymm2[0,1,2],ymm4[3,4,5,6,7]
; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm2[0,1,2,3,4],ymm0[5,6,7]
; AVX-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT: vblendps {{.*#+}} ymm9 = ymm9[0,1],ymm13[2,3],ymm9[4,5,6,7]
+; AVX-NEXT: vblendps $12, {{[-0-9]+}}(%r{{[sb]}}p), %ymm9, %ymm9 # 32-byte Folded Reload
+; AVX-NEXT: # ymm9 = ymm9[0,1],mem[2,3],ymm9[4,5,6,7]
; AVX-NEXT: vmovaps 32(%rdi), %xmm3
; AVX-NEXT: vshufps {{.*#+}} xmm2 = xmm3[2,2,3,3]
; AVX-NEXT: vmovaps 16(%rdi), %xmm5
; AVX-NEXT: vblendps {{.*#+}} xmm2 = xmm5[0],xmm2[1],xmm5[2,3]
; AVX-NEXT: vmovapd 80(%rdi), %xmm6
-; AVX-NEXT: vshufpd {{.*#+}} ymm8 = ymm6[1],ymm1[0],ymm6[2],ymm1[3]
-; AVX-NEXT: vshufps {{.*#+}} ymm8 = ymm7[0,1],ymm8[2,0],ymm7[4,5],ymm8[6,4]
-; AVX-NEXT: vmovaps %ymm7, %ymm13
-; AVX-NEXT: vblendps {{.*#+}} ymm2 = ymm2[0,1],ymm8[2,3,4,5,6,7]
-; AVX-NEXT: vperm2f128 {{.*#+}} ymm8 = ymm9[2,3,0,1]
-; AVX-NEXT: vshufps {{.*#+}} ymm11 = ymm9[2,0],ymm8[0,0],ymm9[6,4],ymm8[4,4]
-; AVX-NEXT: vshufps {{.*#+}} ymm11 = ymm8[0,2],ymm11[2,0],ymm8[4,6],ymm11[6,4]
-; AVX-NEXT: vblendps {{.*#+}} ymm2 = ymm2[0,1,2,3,4],ymm11[5,6,7]
-; AVX-NEXT: vblendps {{.*#+}} ymm11 = ymm10[0,1],ymm12[2,3],ymm10[4,5,6,7]
+; AVX-NEXT: vshufpd {{.*#+}} ymm7 = ymm6[1],ymm15[0],ymm6[2],ymm15[3]
+; AVX-NEXT: vshufps {{.*#+}} ymm7 = ymm1[0,1],ymm7[2,0],ymm1[4,5],ymm7[6,4]
+; AVX-NEXT: vblendps {{.*#+}} ymm2 = ymm2[0,1],ymm7[2,3,4,5,6,7]
+; AVX-NEXT: vperm2f128 {{.*#+}} ymm7 = ymm9[2,3,0,1]
+; AVX-NEXT: vshufps {{.*#+}} ymm8 = ymm9[2,0],ymm7[0,0],ymm9[6,4],ymm7[4,4]
+; AVX-NEXT: vshufps {{.*#+}} ymm8 = ymm7[0,2],ymm8[2,0],ymm7[4,6],ymm8[6,4]
+; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm2[0,1,2,3,4],ymm8[5,6,7]
+; AVX-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX-NEXT: vblendps {{.*#+}} ymm8 = ymm13[0,1],ymm12[2,3],ymm13[4,5,6,7]
; AVX-NEXT: vmovaps 224(%rdi), %xmm12
; AVX-NEXT: vshufps {{.*#+}} xmm14 = xmm12[2,2,3,3]
; AVX-NEXT: vmovaps 208(%rdi), %xmm0
; AVX-NEXT: vblendps {{.*#+}} xmm14 = xmm0[0],xmm14[1],xmm0[2,3]
; AVX-NEXT: vmovapd 272(%rdi), %xmm1
-; AVX-NEXT: vshufpd {{.*#+}} ymm7 = ymm1[1],ymm15[0],ymm1[2],ymm15[3]
-; AVX-NEXT: vshufps {{.*#+}} ymm7 = ymm4[0,1],ymm7[2,0],ymm4[4,5],ymm7[6,4]
-; AVX-NEXT: vmovaps %ymm4, %ymm10
-; AVX-NEXT: vblendps {{.*#+}} ymm7 = ymm14[0,1],ymm7[2,3,4,5,6,7]
-; AVX-NEXT: vperm2f128 {{.*#+}} ymm14 = ymm11[2,3,0,1]
-; AVX-NEXT: vshufps {{.*#+}} ymm4 = ymm11[2,0],ymm14[0,0],ymm11[6,4],ymm14[4,4]
-; AVX-NEXT: vshufps {{.*#+}} ymm4 = ymm14[0,2],ymm4[2,0],ymm14[4,6],ymm4[6,4]
-; AVX-NEXT: vblendps {{.*#+}} ymm4 = ymm7[0,1,2,3,4],ymm4[5,6,7]
-; AVX-NEXT: vshufps {{.*#+}} ymm7 = ymm9[3,0],ymm8[1,0],ymm9[7,4],ymm8[5,4]
-; AVX-NEXT: vshufps {{.*#+}} ymm7 = ymm8[0,3],ymm7[2,0],ymm8[4,7],ymm7[6,4]
+; AVX-NEXT: vshufpd {{.*#+}} ymm4 = ymm1[1],ymm10[0],ymm1[2],ymm10[3]
+; AVX-NEXT: vshufps {{.*#+}} ymm4 = ymm11[0,1],ymm4[2,0],ymm11[4,5],ymm4[6,4]
+; AVX-NEXT: vblendps {{.*#+}} ymm4 = ymm14[0,1],ymm4[2,3,4,5,6,7]
+; AVX-NEXT: vperm2f128 {{.*#+}} ymm14 = ymm8[2,3,0,1]
+; AVX-NEXT: vshufps {{.*#+}} ymm2 = ymm8[2,0],ymm14[0,0],ymm8[6,4],ymm14[4,4]
+; AVX-NEXT: vshufps {{.*#+}} ymm2 = ymm14[0,2],ymm2[2,0],ymm14[4,6],ymm2[6,4]
+; AVX-NEXT: vblendps {{.*#+}} ymm2 = ymm4[0,1,2,3,4],ymm2[5,6,7]
+; AVX-NEXT: vshufps {{.*#+}} ymm4 = ymm9[3,0],ymm7[1,0],ymm9[7,4],ymm7[5,4]
+; AVX-NEXT: vshufps {{.*#+}} ymm4 = ymm7[0,3],ymm4[2,0],ymm7[4,7],ymm4[6,4]
; AVX-NEXT: vblendps {{.*#+}} xmm3 = xmm5[0,1],xmm3[2,3]
-; AVX-NEXT: vshufps $215, {{[-0-9]+}}(%r{{[sb]}}p), %ymm6, %ymm5 # 32-byte Folded Reload
-; AVX-NEXT: # ymm5 = ymm6[3,1],mem[1,3],ymm6[7,5],mem[5,7]
-; AVX-NEXT: vshufps {{.*#+}} ymm5 = ymm13[1,1],ymm5[2,0],ymm13[5,5],ymm5[6,4]
+; AVX-NEXT: vshufps {{.*#+}} ymm5 = ymm6[3,1],ymm15[1,3],ymm6[7,5],ymm15[5,7]
+; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
+; AVX-NEXT: vshufps {{.*#+}} ymm5 = ymm6[1,1],ymm5[2,0],ymm6[5,5],ymm5[6,4]
; AVX-NEXT: vshufps {{.*#+}} xmm3 = xmm3[1,3,2,3]
; AVX-NEXT: vblendps {{.*#+}} ymm3 = ymm3[0,1],ymm5[2,3,4,5,6,7]
-; AVX-NEXT: vblendps {{.*#+}} ymm3 = ymm3[0,1,2,3,4],ymm7[5,6,7]
-; AVX-NEXT: vshufps {{.*#+}} ymm5 = ymm11[3,0],ymm14[1,0],ymm11[7,4],ymm14[5,4]
-; AVX-NEXT: vshufps {{.*#+}} ymm5 = ymm14[0,3],ymm5[2,0],ymm14[4,7],ymm5[6,4]
+; AVX-NEXT: vblendps {{.*#+}} ymm3 = ymm3[0,1,2,3,4],ymm4[5,6,7]
+; AVX-NEXT: vshufps {{.*#+}} ymm4 = ymm8[3,0],ymm14[1,0],ymm8[7,4],ymm14[5,4]
+; AVX-NEXT: vshufps {{.*#+}} ymm4 = ymm14[0,3],ymm4[2,0],ymm14[4,7],ymm4[6,4]
; AVX-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0,1],xmm12[2,3]
-; AVX-NEXT: vshufps {{.*#+}} ymm1 = ymm1[3,1],ymm15[1,3],ymm1[7,5],ymm15[5,7]
-; AVX-NEXT: vshufps {{.*#+}} ymm1 = ymm10[1,1],ymm1[2,0],ymm10[5,5],ymm1[6,4]
+; AVX-NEXT: vshufps {{.*#+}} ymm1 = ymm1[3,1],ymm10[1,3],ymm1[7,5],ymm10[5,7]
+; AVX-NEXT: vshufps {{.*#+}} ymm1 = ymm11[1,1],ymm1[2,0],ymm11[5,5],ymm1[6,4]
; AVX-NEXT: vshufps {{.*#+}} xmm0 = xmm0[1,3,2,3]
; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3,4,5,6,7]
-; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm5[5,6,7]
+; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm4[5,6,7]
; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
; AVX-NEXT: vmovaps %ymm1, 32(%rsi)
; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
@@ -2119,18 +2118,19 @@ define void @load_i32_stride6_vf16(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
; AVX-NEXT: vmovaps %ymm1, (%rdx)
; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
; AVX-NEXT: vmovaps %ymm1, 32(%rcx)
-; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
+; AVX-NEXT: vmovups (%rsp), %ymm1 # 32-byte Reload
; AVX-NEXT: vmovaps %ymm1, (%rcx)
; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
; AVX-NEXT: vmovaps %ymm1, 32(%r8)
; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
; AVX-NEXT: vmovaps %ymm1, (%r8)
-; AVX-NEXT: vmovaps %ymm4, 32(%r9)
-; AVX-NEXT: vmovaps %ymm2, (%r9)
+; AVX-NEXT: vmovaps %ymm2, 32(%r9)
+; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
+; AVX-NEXT: vmovaps %ymm1, (%r9)
; AVX-NEXT: movq {{[0-9]+}}(%rsp), %rax
; AVX-NEXT: vmovaps %ymm0, 32(%rax)
; AVX-NEXT: vmovaps %ymm3, (%rax)
-; AVX-NEXT: addq $328, %rsp # imm = 0x148
+; AVX-NEXT: addq $264, %rsp # imm = 0x108
; AVX-NEXT: vzeroupper
; AVX-NEXT: retq
;
@@ -4000,213 +4000,218 @@ define void @load_i32_stride6_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
;
; AVX-LABEL: load_i32_stride6_vf32:
; AVX: # %bb.0:
-; AVX-NEXT: subq $1032, %rsp # imm = 0x408
-; AVX-NEXT: vmovaps 416(%rdi), %ymm9
-; AVX-NEXT: vmovups %ymm9, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT: vmovaps 480(%rdi), %ymm4
-; AVX-NEXT: vmovups %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT: vmovaps 448(%rdi), %ymm5
-; AVX-NEXT: vmovups %ymm5, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT: vmovapd 160(%rdi), %ymm2
-; AVX-NEXT: vmovupd %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT: vmovapd 128(%rdi), %ymm3
-; AVX-NEXT: vmovupd %ymm3, (%rsp) # 32-byte Spill
-; AVX-NEXT: vmovaps 32(%rdi), %ymm6
-; AVX-NEXT: vmovups %ymm6, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT: vmovaps (%rdi), %ymm7
+; AVX-NEXT: subq $1048, %rsp # imm = 0x418
+; AVX-NEXT: vmovaps 416(%rdi), %ymm7
; AVX-NEXT: vmovups %ymm7, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT: vmovaps 96(%rdi), %ymm1
-; AVX-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT: vmovaps 64(%rdi), %ymm0
+; AVX-NEXT: vmovaps 384(%rdi), %ymm9
+; AVX-NEXT: vmovups %ymm9, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX-NEXT: vmovapd 160(%rdi), %ymm5
+; AVX-NEXT: vmovupd %ymm5, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX-NEXT: vmovapd 128(%rdi), %ymm6
+; AVX-NEXT: vmovaps 96(%rdi), %ymm2
+; AVX-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX-NEXT: vmovaps 64(%rdi), %ymm3
+; AVX-NEXT: vmovups %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX-NEXT: vmovaps 32(%rdi), %ymm0
; AVX-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT: vinsertf128 $1, 96(%rdi), %ymm0, %ymm8
-; AVX-NEXT: vshufps {{.*#+}} ymm0 = ymm1[2,0],ymm8[0,0],ymm1[6,4],ymm8[4,4]
-; AVX-NEXT: vshufps {{.*#+}} ymm0 = ymm0[2,0],ymm8[2,2],ymm0[6,4],ymm8[6,6]
-; AVX-NEXT: vblendps {{.*#+}} ymm12 = ymm7[0,1,2,3],ymm6[4,5],ymm7[6,7]
-; AVX-NEXT: vextractf128 $1, %ymm12, %xmm7
-; AVX-NEXT: vblendps {{.*#+}} xmm1 = xmm12[0,1],xmm7[2,3]
-; AVX-NEXT: vshufps {{.*#+}} xmm1 = xmm1[0,2],xmm7[0,3]
-; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3,4,5,6,7]
-; AVX-NEXT: vperm2f128 {{.*#+}} ymm1 = ymm3[2,3],ymm2[0,1]
+; AVX-NEXT: vmovaps (%rdi), %ymm1
+; AVX-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX-NEXT: vblendps {{.*#+}} ymm8 = ymm1[0,1,2,3],ymm0[4,5],ymm1[6,7]
+; AVX-NEXT: vextractf128 $1, %ymm8, %xmm4
+; AVX-NEXT: vblendps {{.*#+}} xmm0 = xmm8[0,1],xmm4[2,3]
+; AVX-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,2],xmm4[0,3]
+; AVX-NEXT: vinsertf128 $1, 96(%rdi), %ymm3, %ymm1
+; AVX-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX-NEXT: vshufps {{.*#+}} ymm1 = ymm2[2,0],ymm1[0,0],ymm2[6,4],ymm1[4,4]
+; AVX-NEXT: vshufps {{.*#+}} ymm1 = ymm1[2,0],ymm3[2,2],ymm1[6,4],ymm3[6,6]
+; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2],ymm1[3,4,5,6,7]
+; AVX-NEXT: vmovupd %ymm6, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX-NEXT: vperm2f128 {{.*#+}} ymm1 = ymm6[2,3],ymm5[0,1]
; AVX-NEXT: vmovupd %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT: vshufpd {{.*#+}} ymm1 = ymm1[0],ymm3[1],ymm1[3],ymm3[2]
+; AVX-NEXT: vshufpd {{.*#+}} ymm1 = ymm1[0],ymm6[1],ymm1[3],ymm6[2]
; AVX-NEXT: vshufps {{.*#+}} ymm1 = ymm1[0,1,2,0,4,5,6,4]
; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],ymm1[6,7]
; AVX-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT: vinsertf128 $1, 480(%rdi), %ymm5, %ymm6
-; AVX-NEXT: vshufps {{.*#+}} ymm0 = ymm4[2,0],ymm6[0,0],ymm4[6,4],ymm6[4,4]
-; AVX-NEXT: vshufps {{.*#+}} ymm0 = ymm0[2,0],ymm6[2,2],ymm0[6,4],ymm6[6,6]
-; AVX-NEXT: vmovaps 384(%rdi), %ymm1
+; AVX-NEXT: vblendps {{.*#+}} ymm11 = ymm9[0,1,2,3],ymm7[4,5],ymm9[6,7]
+; AVX-NEXT: vextractf128 $1, %ymm11, %xmm3
+; AVX-NEXT: vblendps {{.*#+}} xmm0 = xmm11[0,1],xmm3[2,3]
+; AVX-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,2],xmm3[0,3]
+; AVX-NEXT: vmovaps 480(%rdi), %ymm1
; AVX-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT: vblendps {{.*#+}} ymm13 = ymm1[0,1,2,3],ymm9[4,5],ymm1[6,7]
-; AVX-NEXT: vextractf128 $1, %ymm13, %xmm5
-; AVX-NEXT: vblendps {{.*#+}} xmm1 = xmm13[0,1],xmm5[2,3]
-; AVX-NEXT: vshufps {{.*#+}} xmm1 = xmm1[0,2],xmm5[0,3]
-; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3,4,5,6,7]
+; AVX-NEXT: vmovaps 448(%rdi), %ymm2
+; AVX-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX-NEXT: vinsertf128 $1, 480(%rdi), %ymm2, %ymm9
+; AVX-NEXT: vshufps {{.*#+}} ymm1 = ymm1[2,0],ymm9[0,0],ymm1[6,4],ymm9[4,4]
+; AVX-NEXT: vshufps {{.*#+}} ymm1 = ymm1[2,0],ymm2[2,2],ymm1[6,4],ymm2[6,6]
+; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2],ymm1[3,4,5,6,7]
; AVX-NEXT: vmovapd 544(%rdi), %ymm1
; AVX-NEXT: vmovupd %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX-NEXT: vmovapd 512(%rdi), %ymm2
-; AVX-NEXT: vmovupd %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX-NEXT: vmovupd %ymm2, (%rsp) # 32-byte Spill
; AVX-NEXT: vperm2f128 {{.*#+}} ymm1 = ymm2[2,3],ymm1[0,1]
; AVX-NEXT: vmovupd %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX-NEXT: vshufpd {{.*#+}} ymm1 = ymm1[0],ymm2[1],ymm1[3],ymm2[2]
; AVX-NEXT: vshufps {{.*#+}} ymm1 = ymm1[0,1,2,0,4,5,6,4]
; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],ymm1[6,7]
; AVX-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT: vmovaps 288(%rdi), %ymm1
-; AVX-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT: vmovaps 256(%rdi), %ymm0
+; AVX-NEXT: vmovaps 224(%rdi), %ymm0
; AVX-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT: vinsertf128 $1, 288(%rdi), %ymm0, %ymm3
-; AVX-NEXT: vshufps {{.*#+}} ymm0 = ymm1[2,0],ymm3[0,0],ymm1[6,4],ymm3[4,4]
-; AVX-NEXT: vshufps {{.*#+}} ymm0 = ymm0[2,0],ymm3[2,2],ymm0[6,4],ymm3[6,6]
-; AVX-NEXT: vmovaps 224(%rdi), %ymm1
+; AVX-NEXT: vmovaps 192(%rdi), %ymm1
; AVX-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT: vmovaps 192(%rdi), %ymm2
+; AVX-NEXT: vblendps {{.*#+}} ymm10 = ymm1[0,1,2,3],ymm0[4,5],ymm1[6,7]
+; AVX-NEXT: vextractf128 $1, %ymm10, %xmm1
+; AVX-NEXT: vblendps {{.*#+}} xmm0 = xmm10[0,1],xmm1[2,3]
+; AVX-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[0,3]
+; AVX-NEXT: vmovaps 288(%rdi), %ymm2
; AVX-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT: vblendps {{.*#+}} ymm11 = ymm2[0,1,2,3],ymm1[4,5],ymm2[6,7]
-; AVX-NEXT: vextractf128 $1, %ymm11, %xmm2
-; AVX-NEXT: vblendps {{.*#+}} xmm1 = xmm11[0,1],xmm2[2,3]
-; AVX-NEXT: vshufps {{.*#+}} xmm1 = xmm1[0,2],xmm2[0,3]
-; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3,4,5,6,7]
-; AVX-NEXT: vmovapd 352(%rdi), %ymm1
-; AVX-NEXT: vmovupd %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT: vmovapd 320(%rdi), %ymm4
-; AVX-NEXT: vmovupd %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT: vperm2f128 {{.*#+}} ymm1 = ymm4[2,3],ymm1[0,1]
-; AVX-NEXT: vmovupd %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT: vshufpd {{.*#+}} ymm1 = ymm1[0],ymm4[1],ymm1[3],ymm4[2]
-; AVX-NEXT: vshufps {{.*#+}} ymm1 = ymm1[0,1,2,0,4,5,6,4]
-; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],ymm1[6,7]
-; AVX-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT: vmovaps 672(%rdi), %ymm1
-; AVX-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT: vmovaps 640(%rdi), %ymm0
+; AVX-NEXT: vmovaps 256(%rdi), %ymm5
+; AVX-NEXT: vmovups %ymm5, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX-NEXT: vinsertf128 $1, 288(%rdi), %ymm5, %ymm6
+; AVX-NEXT: vmovups %ymm6, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX-NEXT: vshufps {{.*#+}} ymm2 = ymm2[2,0],ymm6[0,0],ymm2[6,4],ymm6[4,4]
+; AVX-NEXT: vshufps {{.*#+}} ymm2 = ymm2[2,0],ymm5[2,2],ymm2[6,4],ymm5[6,6]
+; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2],ymm2[3,4,5,6,7]
+; AVX-NEXT: vmovapd 352(%rdi), %ymm2
+; AVX-NEXT: vmovupd %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX-NEXT: vmovapd 320(%rdi), %ymm5
+; AVX-NEXT: vmovupd %ymm5, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX-NEXT: vperm2f128 {{.*#+}} ymm2 = ymm5[2,3],ymm2[0,1]
+; AVX-NEXT: vmovupd %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX-NEXT: vshufpd {{.*#+}} ymm2 = ymm2[0],ymm5[1],ymm2[3],ymm5[2]
+; AVX-NEXT: vshufps {{.*#+}} ymm2 = ymm2[0,1,2,0,4,5,6,4]
+; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],ymm2[6,7]
; AVX-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT: vinsertf128 $1, 672(%rdi), %ymm0, %ymm9
-; AVX-NEXT: vshufps {{.*#+}} ymm0 = ymm1[2,0],ymm9[0,0],ymm1[6,4],ymm9[4,4]
-; AVX-NEXT: vshufps {{.*#+}} ymm10 = ymm0[2,0],ymm9[2,2],ymm0[6,4],ymm9[6,6]
; AVX-NEXT: vmovaps 608(%rdi), %ymm0
; AVX-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT: vmovaps 576(%rdi), %ymm1
-; AVX-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT: vblendps {{.*#+}} ymm4 = ymm1[0,1,2,3],ymm0[4,5],ymm1[6,7]
-; AVX-NEXT: vextractf128 $1, %ymm4, %xmm1
-; AVX-NEXT: vblendps {{.*#+}} xmm15 = xmm4[0,1],xmm1[2,3]
-; AVX-NEXT: vshufps {{.*#+}} xmm15 = xmm15[0,2],xmm1[0,3]
-; AVX-NEXT: vblendps {{.*#+}} ymm15 = ymm15[0,1,2],ymm10[3,4,5,6,7]
-; AVX-NEXT: vmovapd 736(%rdi), %ymm10
-; AVX-NEXT: vmovupd %ymm10, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT: vmovapd 704(%rdi), %ymm0
+; AVX-NEXT: vmovaps 576(%rdi), %ymm2
+; AVX-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX-NEXT: vblendps {{.*#+}} ymm7 = ymm2[0,1,2,3],ymm0[4,5],ymm2[6,7]
+; AVX-NEXT: vextractf128 $1, %ymm7, %xmm2
+; AVX-NEXT: vblendps {{.*#+}} xmm6 = xmm7[0,1],xmm2[2,3]
+; AVX-NEXT: vshufps {{.*#+}} xmm6 = xmm6[0,2],xmm2[0,3]
+; AVX-NEXT: vmovaps 672(%rdi), %ymm5
+; AVX-NEXT: vmovups %ymm5, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX-NEXT: vmovaps 640(%rdi), %ymm0
+; AVX-NEXT: vinsertf128 $1, 672(%rdi), %ymm0, %ymm15
+; AVX-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX-NEXT: vshufps {{.*#+}} ymm14 = ymm5[2,0],ymm15[0,0],ymm5[6,4],ymm15[4,4]
+; AVX-NEXT: vshufps {{.*#+}} ymm14 = ymm14[2,0],ymm0[2,2],ymm14[6,4],ymm0[6,6]
+; AVX-NEXT: vblendps {{.*#+}} ymm14 = ymm6[0,1,2],ymm14[3,4,5,6,7]
+; AVX-NEXT: vmovapd 736(%rdi), %ymm0
; AVX-NEXT: vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT: vperm2f128 {{.*#+}} ymm10 = ymm0[2,3],ymm10[0,1]
-; AVX-NEXT: vshufpd {{.*#+}} ymm14 = ymm10[0],ymm0[1],ymm10[3],ymm0[2]
-; AVX-NEXT: vshufps {{.*#+}} ymm14 = ymm14[0,1,2,0,4,5,6,4]
-; AVX-NEXT: vblendps {{.*#+}} ymm14 = ymm15[0,1,2,3,4,5],ymm14[6,7]
-; AVX-NEXT: vmovups %ymm14, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm15 # 32-byte Reload
-; AVX-NEXT: vshufps {{.*#+}} ymm14 = ymm15[3,0],ymm8[1,0],ymm15[7,4],ymm8[5,4]
-; AVX-NEXT: vshufps {{.*#+}} ymm8 = ymm14[2,0],ymm8[2,3],ymm14[6,4],ymm8[6,7]
-; AVX-NEXT: vshufps {{.*#+}} xmm12 = xmm12[1,0],xmm7[3,0]
-; AVX-NEXT: vshufps {{.*#+}} xmm7 = xmm12[0,2],xmm7[1,3]
-; AVX-NEXT: vblendps {{.*#+}} ymm7 = ymm7[0,1,2],ymm8[3,4,5,6,7]
-; AVX-NEXT: vmovups (%rsp), %ymm12 # 32-byte Reload
+; AVX-NEXT: vmovapd 704(%rdi), %ymm5
+; AVX-NEXT: vmovupd %ymm5, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX-NEXT: vperm2f128 {{.*#+}} ymm12 = ymm5[2,3],ymm0[0,1]
+; AVX-NEXT: vshufpd {{.*#+}} ymm13 = ymm12[0],ymm5[1],ymm12[3],ymm5[2]
+; AVX-NEXT: vshufps {{.*#+}} ymm13 = ymm13[0,1,2,0,4,5,6,4]
+; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm14[0,1,2,3,4,5],ymm13[6,7]
+; AVX-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX-NEXT: vshufps {{.*#+}} xmm8 = xmm8[1,0],xmm4[3,0]
+; AVX-NEXT: vshufps {{.*#+}} xmm4 = xmm8[0,2],xmm4[1,3]
+; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm13 # 32-byte Reload
+; AVX-NEXT: vshufps $19, {{[-0-9]+}}(%r{{[sb]}}p), %ymm13, %ymm8 # 32-byte Folded Reload
+; AVX-NEXT: # ymm8 = ymm13[3,0],mem[1,0],ymm13[7,4],mem[5,4]
+; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm14 # 32-byte Reload
+; AVX-NEXT: vshufps {{.*#+}} ymm8 = ymm8[2,0],ymm14[2,3],ymm8[6,4],ymm14[6,7]
+; AVX-NEXT: vblendps {{.*#+}} ymm4 = ymm4[0,1,2],ymm8[3,4,5,6,7]
+; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload
; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX-NEXT: vshufps {{.*#+}} ymm8 = ymm0[3,1],ymm12[1,3],ymm0[7,5],ymm12[5,7]
+; AVX-NEXT: vshufps {{.*#+}} ymm8 = ymm0[3,1],ymm5[1,3],ymm0[7,5],ymm5[5,7]
; AVX-NEXT: vshufps {{.*#+}} ymm8 = ymm8[0,1,2,0,4,5,6,4]
-; AVX-NEXT: vblendps {{.*#+}} ymm7 = ymm7[0,1,2,3,4,5],ymm8[6,7]
-; AVX-NEXT: vmovups %ymm7, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm4[0,1,2,3,4,5],ymm8[6,7]
+; AVX-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX-NEXT: vshufps {{.*#+}} xmm4 = xmm11[1,0],xmm3[3,0]
+; AVX-NEXT: vshufps {{.*#+}} xmm3 = xmm4[0,2],xmm3[1,3]
; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm8 # 32-byte Reload
-; AVX-NEXT: vshufps {{.*#+}} ymm7 = ymm8[3,0],ymm6[1,0],ymm8[7,4],ymm6[5,4]
-; AVX-NEXT: vshufps {{.*#+}} ymm6 = ymm7[2,0],ymm6[2,3],ymm7[6,4],ymm6[6,7]
-; AVX-NEXT: vshufps {{.*#+}} xmm7 = xmm13[1,0],xmm5[3,0]
-; AVX-NEXT: vshufps {{.*#+}} xmm5 = xmm7[0,2],xmm5[1,3]
-; AVX-NEXT: vblendps {{.*#+}} ymm5 = ymm5[0,1,2],ymm6[3,4,5,6,7]
-; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm14 # 32-byte Reload
+; AVX-NEXT: vshufps {{.*#+}} ymm4 = ymm8[3,0],ymm9[1,0],ymm8[7,4],ymm9[5,4]
+; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
+; AVX-NEXT: vshufps {{.*#+}} ymm4 = ymm4[2,0],ymm6[2,3],ymm4[6,4],ymm6[6,7]
+; AVX-NEXT: vblendps {{.*#+}} ymm3 = ymm3[0,1,2],ymm4[3,4,5,6,7]
+; AVX-NEXT: vmovups (%rsp), %ymm9 # 32-byte Reload
; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX-NEXT: vshufps {{.*#+}} ymm6 = ymm0[3,1],ymm14[1,3],ymm0[7,5],ymm14[5,7]
-; AVX-NEXT: vshufps {{.*#+}} ymm6 = ymm6[0,1,2,0,4,5,6,4]
-; AVX-NEXT: vblendps {{.*#+}} ymm5 = ymm5[0,1,2,3,4,5],ymm6[6,7]
-; AVX-NEXT: vmovups %ymm5, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX-NEXT: vshufps {{.*#+}} ymm4 = ymm0[3,1],ymm9[1,3],ymm0[7,5],ymm9[5,7]
+; AVX-NEXT: vshufps {{.*#+}} ymm4 = ymm4[0,1,2,0,4,5,6,4]
+; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm3[0,1,2,3,4,5],ymm4[6,7]
+; AVX-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX-NEXT: vshufps {{.*#+}} xmm3 = xmm10[1,0],xmm1[3,0]
+; AVX-NEXT: vshufps {{.*#+}} xmm1 = xmm3[0,2],xmm1[1,3]
; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX-NEXT: vshufps {{.*#+}} ymm5 = ymm0[3,0],ymm3[1,0],ymm0[7,4],ymm3[5,4]
-; AVX-NEXT: vshufps {{.*#+}} ymm3 = ymm5[2,0],ymm3[2,3],ymm5[6,4],ymm3[6,7]
-; AVX-NEXT: vshufps {{.*#+}} xmm5 = xmm11[1,0],xmm2[3,0]
-; AVX-NEXT: vshufps {{.*#+}} xmm2 = xmm5[0,2],xmm2[1,3]
-; AVX-NEXT: vblendps {{.*#+}} ymm2 = ymm2[0,1,2],ymm3[3,4,5,6,7]
+; AVX-NEXT: vshufps $19, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm3 # 32-byte Folded Reload
+; AVX-NEXT: # ymm3 = ymm0[3,0],mem[1,0],ymm0[7,4],mem[5,4]
+; AVX-NEXT: vshufps $226, {{[-0-9]+}}(%r{{[sb]}}p), %ymm3, %ymm3 # 32-byte Folded Reload
+; AVX-NEXT: # ymm3 = ymm3[2,0],mem[2,3],ymm3[6,4],mem[6,7]
+; AVX-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1,2],ymm3[3,4,5,6,7]
; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX-NEXT: vshufps $215, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm3 # 32-byte Folded Reload
; AVX-NEXT: # ymm3 = ymm0[3,1],mem[1,3],ymm0[7,5],mem[5,7]
; AVX-NEXT: vshufps {{.*#+}} ymm3 = ymm3[0,1,2,0,4,5,6,4]
-; AVX-NEXT: vblendps {{.*#+}} ymm2 = ymm2[0,1,2,3,4,5],ymm3[6,7]
-; AVX-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3,4,5],ymm3[6,7]
+; AVX-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX-NEXT: vshufps {{.*#+}} xmm1 = xmm7[1,0],xmm2[3,0]
+; AVX-NEXT: vshufps {{.*#+}} xmm0 = xmm1[0,2],xmm2[1,3]
+; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm10 # 32-byte Reload
+; AVX-NEXT: vshufps {{.*#+}} ymm1 = ymm10[3,0],ymm15[1,0],ymm10[7,4],ymm15[5,4]
; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm11 # 32-byte Reload
-; AVX-NEXT: vshufps {{.*#+}} ymm2 = ymm11[3,0],ymm9[1,0],ymm11[7,4],ymm9[5,4]
-; AVX-NEXT: vshufps {{.*#+}} ymm3 = ymm2[2,0],ymm9[2,3],ymm2[6,4],ymm9[6,7]
-; AVX-NEXT: vshufps {{.*#+}} xmm2 = xmm4[1,0],xmm1[3,0]
-; AVX-NEXT: vshufps {{.*#+}} xmm0 = xmm2[0,2],xmm1[1,3]
-; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2],ymm3[3,4,5,6,7]
-; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm13 # 32-byte Reload
-; AVX-NEXT: vshufps {{.*#+}} ymm1 = ymm10[3,1],ymm13[1,3],ymm10[7,5],ymm13[5,7]
+; AVX-NEXT: vshufps {{.*#+}} ymm1 = ymm1[2,0],ymm11[2,3],ymm1[6,4],ymm11[6,7]
+; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2],ymm1[3,4,5,6,7]
+; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm15 # 32-byte Reload
+; AVX-NEXT: vshufps {{.*#+}} ymm1 = ymm12[3,1],ymm15[1,3],ymm12[7,5],ymm15[5,7]
; AVX-NEXT: vshufps {{.*#+}} ymm1 = ymm1[0,1,2,0,4,5,6,4]
; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],ymm1[6,7]
; AVX-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX-NEXT: vblendps $12, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm4 # 32-byte Folded Reload
; AVX-NEXT: # ymm4 = ymm0[0,1],mem[2,3],ymm0[4,5,6,7]
-; AVX-NEXT: vshufps $38, {{[-0-9]+}}(%r{{[sb]}}p), %ymm15, %ymm1 # 32-byte Folded Reload
-; AVX-NEXT: # ymm1 = ymm15[2,1],mem[2,0],ymm15[6,5],mem[6,4]
+; AVX-NEXT: vshufps {{.*#+}} ymm1 = ymm13[2,1],ymm14[2,0],ymm13[6,5],ymm14[6,4]
; AVX-NEXT: vperm2f128 {{.*#+}} ymm2 = ymm1[2,3,0,1]
; AVX-NEXT: vextractf128 $1, %ymm4, %xmm0
; AVX-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; AVX-NEXT: vshufps {{.*#+}} xmm3 = xmm4[2,0],xmm0[2,3]
; AVX-NEXT: vblendps {{.*#+}} ymm2 = ymm3[0,1,2],ymm2[3,4,5,6,7]
-; AVX-NEXT: vblendps $48, {{[-0-9]+}}(%r{{[sb]}}p), %ymm12, %ymm3 # 32-byte Folded Reload
-; AVX-NEXT: # ymm3 = ymm12[0,1,2,3],mem[4,5],ymm12[6,7]
+; AVX-NEXT: vblendps $48, {{[-0-9]+}}(%r{{[sb]}}p), %ymm5, %ymm3 # 32-byte Folded Reload
+; AVX-NEXT: # ymm3 = ymm5[0,1,2,3],mem[4,5],ymm5[6,7]
; AVX-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm3[2,3,0,1]
; AVX-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX-NEXT: vshufps {{.*#+}} ymm5 = ymm0[0,0],ymm3[2,0],ymm0[4,4],ymm3[6,4]
; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm2[0,1,2,3,4],ymm5[5,6,7]
; AVX-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX-NEXT: vblendps $12, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm6 # 32-byte Folded Reload
-; AVX-NEXT: # ymm6 = ymm0[0,1],mem[2,3],ymm0[4,5,6,7]
-; AVX-NEXT: vshufps $38, {{[-0-9]+}}(%r{{[sb]}}p), %ymm8, %ymm5 # 32-byte Folded Reload
-; AVX-NEXT: # ymm5 = ymm8[2,1],mem[2,0],ymm8[6,5],mem[6,4]
+; AVX-NEXT: vblendps $12, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
+; AVX-NEXT: # ymm0 = ymm0[0,1],mem[2,3],ymm0[4,5,6,7]
+; AVX-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX-NEXT: vshufps {{.*#+}} ymm5 = ymm8[2,1],ymm6[2,0],ymm8[6,5],ymm6[6,4]
; AVX-NEXT: vperm2f128 {{.*#+}} ymm5 = ymm5[2,3,0,1]
-; AVX-NEXT: vextractf128 $1, %ymm6, %xmm0
-; AVX-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX-NEXT: vshufps {{.*#+}} xmm7 = xmm6[2,0],xmm0[2,3]
+; AVX-NEXT: vextractf128 $1, %ymm0, %xmm1
+; AVX-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX-NEXT: vshufps {{.*#+}} xmm7 = xmm0[2,0],xmm1[2,3]
; AVX-NEXT: vblendps {{.*#+}} ymm5 = ymm7[0,1,2],ymm5[3,4,5,6,7]
-; AVX-NEXT: vblendps $48, {{[-0-9]+}}(%r{{[sb]}}p), %ymm14, %ymm2 # 32-byte Folded Reload
-; AVX-NEXT: # ymm2 = ymm14[0,1,2,3],mem[4,5],ymm14[6,7]
-; AVX-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm2[2,3,0,1]
-; AVX-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT: vshufps {{.*#+}} ymm7 = ymm0[0,0],ymm2[2,0],ymm0[4,4],ymm2[6,4]
+; AVX-NEXT: vblendps $48, {{[-0-9]+}}(%r{{[sb]}}p), %ymm9, %ymm2 # 32-byte Folded Reload
+; AVX-NEXT: # ymm2 = ymm9[0,1,2,3],mem[4,5],ymm9[6,7]
+; AVX-NEXT: vperm2f128 {{.*#+}} ymm6 = ymm2[2,3,0,1]
+; AVX-NEXT: vshufps {{.*#+}} ymm7 = ymm6[0,0],ymm2[2,0],ymm6[4,4],ymm2[6,4]
; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm5[0,1,2,3,4],ymm7[5,6,7]
; AVX-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX-NEXT: vblendps $12, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm9 # 32-byte Folded Reload
-; AVX-NEXT: # ymm9 = ymm0[0,1],mem[2,3],ymm0[4,5,6,7]
-; AVX-NEXT: vshufps $38, {{[-0-9]+}}(%r{{[sb]}}p), %ymm11, %ymm7 # 32-byte Folded Reload
-; AVX-NEXT: # ymm7 = ymm11[2,1],mem[2,0],ymm11[6,5],mem[6,4]
+; AVX-NEXT: vblendps $12, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm5 # 32-byte Folded Reload
+; AVX-NEXT: # ymm5 = ymm0[0,1],mem[2,3],ymm0[4,5,6,7]
+; AVX-NEXT: vshufps {{.*#+}} ymm7 = ymm10[2,1],ymm11[2,0],ymm10[6,5],ymm11[6,4]
; AVX-NEXT: vperm2f128 {{.*#+}} ymm10 = ymm7[2,3,0,1]
-; AVX-NEXT: vextractf128 $1, %ymm9, %xmm0
+; AVX-NEXT: vextractf128 $1, %ymm5, %xmm0
; AVX-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX-NEXT: vshufps {{.*#+}} xmm11 = xmm9[2,0],xmm0[2,3]
+; AVX-NEXT: vshufps {{.*#+}} xmm11 = xmm5[2,0],xmm0[2,3]
; AVX-NEXT: vblendps {{.*#+}} ymm12 = ymm11[0,1,2],ymm10[3,4,5,6,7]
-; AVX-NEXT: vblendps $48, {{[-0-9]+}}(%r{{[sb]}}p), %ymm13, %ymm10 # 32-byte Folded Reload
-; AVX-NEXT: # ymm10 = ymm13[0,1,2,3],mem[4,5],ymm13[6,7]
-; AVX-NEXT: vperm2f128 {{.*#+}} ymm7 = ymm10[2,3,0,1]
-; AVX-NEXT: vshufps {{.*#+}} ymm13 = ymm7[0,0],ymm10[2,0],ymm7[4,4],ymm10[6,4]
+; AVX-NEXT: vblendps $48, {{[-0-9]+}}(%r{{[sb]}}p), %ymm15, %ymm10 # 32-byte Folded Reload
+; AVX-NEXT: # ymm10 = ymm15[0,1,2,3],mem[4,5],ymm15[6,7]
+; AVX-NEXT: vperm2f128 {{.*#+}} ymm11 = ymm10[2,3,0,1]
+; AVX-NEXT: vshufps {{.*#+}} ymm13 = ymm11[0,0],ymm10[2,0],ymm11[4,4],ymm10[6,4]
; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm12[0,1,2,3,4],ymm13[5,6,7]
; AVX-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX-NEXT: vblendps $12, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm12 # 32-byte Folded Reload
; AVX-NEXT: # ymm12 = ymm0[0,1],mem[2,3],ymm0[4,5,6,7]
+; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm9 # 32-byte Reload
; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm8 # 32-byte Reload
-; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload
-; AVX-NEXT: vshufps {{.*#+}} ymm13 = ymm5[2,1],ymm8[2,0],ymm5[6,5],ymm8[6,4]
+; AVX-NEXT: vshufps {{.*#+}} ymm13 = ymm9[2,1],ymm8[2,0],ymm9[6,5],ymm8[6,4]
; AVX-NEXT: vperm2f128 {{.*#+}} ymm13 = ymm13[2,3,0,1]
; AVX-NEXT: vextractf128 $1, %ymm12, %xmm14
; AVX-NEXT: vshufps {{.*#+}} xmm15 = xmm12[2,0],xmm14[2,3]
@@ -4223,16 +4228,16 @@ define void @load_i32_stride6_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
; AVX-NEXT: vshufps $247, {{[-0-9]+}}(%r{{[sb]}}p), %xmm4, %xmm3 # 16-byte Folded Reload
; AVX-NEXT: # xmm3 = xmm4[3,1],mem[3,3]
; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm13 # 32-byte Reload
-; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm11 # 32-byte Reload
-; AVX-NEXT: vshufps {{.*#+}} ymm4 = ymm11[3,1],ymm13[2,1],ymm11[7,5],ymm13[6,5]
+; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm7 # 32-byte Reload
+; AVX-NEXT: vshufps {{.*#+}} ymm4 = ymm13[3,1],ymm7[2,1],ymm13[7,5],ymm7[6,5]
; AVX-NEXT: vperm2f128 {{.*#+}} ymm4 = ymm4[2,3,0,1]
; AVX-NEXT: vblendps {{.*#+}} ymm3 = ymm3[0,1,2],ymm4[3,4,5,6,7]
; AVX-NEXT: vblendps {{.*#+}} ymm1 = ymm3[0,1,2,3,4],ymm1[5,6,7]
; AVX-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
-; AVX-NEXT: vshufps {{.*#+}} ymm1 = ymm1[0,1],ymm2[3,1],ymm1[4,5],ymm2[7,5]
-; AVX-NEXT: vshufps $247, {{[-0-9]+}}(%r{{[sb]}}p), %xmm6, %xmm2 # 16-byte Folded Reload
-; AVX-NEXT: # xmm2 = xmm6[3,1],mem[3,3]
+; AVX-NEXT: vshufps {{.*#+}} ymm1 = ymm6[0,1],ymm2[3,1],ymm6[4,5],ymm2[7,5]
+; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
+; AVX-NEXT: vshufps $247, {{[-0-9]+}}(%r{{[sb]}}p), %xmm2, %xmm2 # 16-byte Folded Reload
+; AVX-NEXT: # xmm2 = xmm2[3,1],mem[3,3]
; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
; AVX-NEXT: vshufps {{.*#+}} ymm3 = ymm4[3,1],ymm6[2,1],ymm4[7,5],ymm6[6,5]
@@ -4242,27 +4247,27 @@ define void @load_i32_stride6_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
; AVX-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX-NEXT: vshufps {{.*#+}} ymm0 = ymm0[0,1],ymm15[3,1],ymm0[4,5],ymm15[7,5]
; AVX-NEXT: vshufps {{.*#+}} xmm1 = xmm12[3,1],xmm14[3,3]
-; AVX-NEXT: vmovaps %ymm5, %ymm3
-; AVX-NEXT: vshufps {{.*#+}} ymm2 = ymm5[3,1],ymm8[2,1],ymm5[7,5],ymm8[6,5]
+; AVX-NEXT: vmovaps %ymm9, %ymm3
+; AVX-NEXT: vshufps {{.*#+}} ymm2 = ymm9[3,1],ymm8[2,1],ymm9[7,5],ymm8[6,5]
; AVX-NEXT: vperm2f128 {{.*#+}} ymm2 = ymm2[2,3,0,1]
; AVX-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1,2],ymm2[3,4,5,6,7]
; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3,4],ymm0[5,6,7]
; AVX-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT: vshufps {{.*#+}} ymm0 = ymm7[0,1],ymm10[3,1],ymm7[4,5],ymm10[7,5]
-; AVX-NEXT: vshufps $247, {{[-0-9]+}}(%r{{[sb]}}p), %xmm9, %xmm1 # 16-byte Folded Reload
-; AVX-NEXT: # xmm1 = xmm9[3,1],mem[3,3]
-; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm12 # 32-byte Reload
-; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
-; AVX-NEXT: vshufps {{.*#+}} ymm2 = ymm2[3,1],ymm12[2,1],ymm2[7,5],ymm12[6,5]
+; AVX-NEXT: vshufps {{.*#+}} ymm0 = ymm11[0,1],ymm10[3,1],ymm11[4,5],ymm10[7,5]
+; AVX-NEXT: vshufps $247, {{[-0-9]+}}(%r{{[sb]}}p), %xmm5, %xmm1 # 16-byte Folded Reload
+; AVX-NEXT: # xmm1 = xmm5[3,1],mem[3,3]
+; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm11 # 32-byte Reload
+; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm15 # 32-byte Reload
+; AVX-NEXT: vshufps {{.*#+}} ymm2 = ymm11[3,1],ymm15[2,1],ymm11[7,5],ymm15[6,5]
; AVX-NEXT: vperm2f128 {{.*#+}} ymm2 = ymm2[2,3,0,1]
; AVX-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1,2],ymm2[3,4,5,6,7]
; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3,4],ymm0[5,6,7]
; AVX-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
+; AVX-NEXT: vmovups (%rsp), %ymm0 # 32-byte Reload
; AVX-NEXT: vblendps $243, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm2 # 32-byte Folded Reload
; AVX-NEXT: # ymm2 = mem[0,1],ymm0[2,3],mem[4,5,6,7]
; AVX-NEXT: vmovaps 416(%rdi), %xmm0
-; AVX-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
; AVX-NEXT: vshufps {{.*#+}} xmm0 = xmm0[2,2,3,3]
; AVX-NEXT: vmovaps 400(%rdi), %xmm1
; AVX-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
@@ -4273,104 +4278,106 @@ define void @load_i32_stride6_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
; AVX-NEXT: vshufps {{.*#+}} ymm1 = ymm4[0,1],ymm1[2,0],ymm4[4,5],ymm1[6,4]
; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3,4,5,6,7]
; AVX-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT: vperm2f128 {{.*#+}} ymm7 = ymm2[2,3,0,1]
-; AVX-NEXT: vshufps {{.*#+}} ymm1 = ymm2[2,0],ymm7[0,0],ymm2[6,4],ymm7[4,4]
-; AVX-NEXT: vshufps {{.*#+}} ymm1 = ymm7[0,2],ymm1[2,0],ymm7[4,6],ymm1[6,4]
+; AVX-NEXT: vperm2f128 {{.*#+}} ymm9 = ymm2[2,3,0,1]
+; AVX-NEXT: vshufps {{.*#+}} ymm1 = ymm2[2,0],ymm9[0,0],ymm2[6,4],ymm9[4,4]
+; AVX-NEXT: vshufps {{.*#+}} ymm1 = ymm9[0,2],ymm1[2,0],ymm9[4,6],ymm1[6,4]
; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm1[5,6,7]
; AVX-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT: vmovups (%rsp), %ymm0 # 32-byte Reload
-; AVX-NEXT: vblendps $243, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm6 # 32-byte Folded Reload
-; AVX-NEXT: # ymm6 = mem[0,1],ymm0[2,3],mem[4,5,6,7]
+; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
+; AVX-NEXT: vblendps $243, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm4 # 32-byte Folded Reload
+; AVX-NEXT: # ymm4 = mem[0,1],ymm0[2,3],mem[4,5,6,7]
; AVX-NEXT: vmovaps 32(%rdi), %xmm0
-; AVX-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
+; AVX-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; AVX-NEXT: vshufps {{.*#+}} xmm0 = xmm0[2,2,3,3]
-; AVX-NEXT: vmovaps 16(%rdi), %xmm15
-; AVX-NEXT: vblendps {{.*#+}} xmm0 = xmm15[0],xmm0[1],xmm15[2,3]
-; AVX-NEXT: vmovapd 80(%rdi), %xmm10
-; AVX-NEXT: vshufpd {{.*#+}} ymm1 = ymm10[1],ymm13[0],ymm10[2],ymm13[3]
-; AVX-NEXT: vshufps {{.*#+}} ymm1 = ymm11[0,1],ymm1[2,0],ymm11[4,5],ymm1[6,4]
+; AVX-NEXT: vmovaps 16(%rdi), %xmm14
+; AVX-NEXT: vblendps {{.*#+}} xmm0 = xmm14[0],xmm0[1],xmm14[2,3]
+; AVX-NEXT: vmovapd 80(%rdi), %xmm5
+; AVX-NEXT: vshufpd {{.*#+}} ymm1 = ymm5[1],ymm7[0],ymm5[2],ymm7[3]
+; AVX-NEXT: vshufps {{.*#+}} ymm1 = ymm13[0,1],ymm1[2,0],ymm13[4,5],ymm1[6,4]
; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3,4,5,6,7]
-; AVX-NEXT: vperm2f128 {{.*#+}} ymm1 = ymm6[2,3,0,1]
-; AVX-NEXT: vshufps {{.*#+}} ymm2 = ymm6[2,0],ymm1[0,0],ymm6[6,4],ymm1[4,4]
+; AVX-NEXT: vperm2f128 {{.*#+}} ymm1 = ymm4[2,3,0,1]
+; AVX-NEXT: vshufps {{.*#+}} ymm2 = ymm4[2,0],ymm1[0,0],ymm4[6,4],ymm1[4,4]
; AVX-NEXT: vshufps {{.*#+}} ymm2 = ymm1[0,2],ymm2[2,0],ymm1[4,6],ymm2[6,4]
; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm2[5,6,7]
; AVX-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX-NEXT: vblendps $243, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm5 # 32-byte Folded Reload
-; AVX-NEXT: # ymm5 = mem[0,1],ymm0[2,3],mem[4,5,6,7]
+; AVX-NEXT: vblendps $243, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm6 # 32-byte Folded Reload
+; AVX-NEXT: # ymm6 = mem[0,1],ymm0[2,3],mem[4,5,6,7]
; AVX-NEXT: vmovaps 224(%rdi), %xmm0
; AVX-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; AVX-NEXT: vshufps {{.*#+}} xmm0 = xmm0[2,2,3,3]
-; AVX-NEXT: vmovaps 208(%rdi), %xmm13
-; AVX-NEXT: vblendps {{.*#+}} xmm0 = xmm13[0],xmm0[1],xmm13[2,3]
+; AVX-NEXT: vmovaps 208(%rdi), %xmm2
+; AVX-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX-NEXT: vblendps {{.*#+}} xmm0 = xmm2[0],xmm0[1],xmm2[2,3]
; AVX-NEXT: vmovapd 272(%rdi), %xmm2
; AVX-NEXT: vmovupd %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX-NEXT: vshufpd {{.*#+}} ymm2 = ymm2[1],ymm8[0],ymm2[2],ymm8[3]
; AVX-NEXT: vshufps {{.*#+}} ymm2 = ymm3[0,1],ymm2[2,0],ymm3[4,5],ymm2[6,4]
; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm2[2,3,4,5,6,7]
-; AVX-NEXT: vperm2f128 {{.*#+}} ymm2 = ymm5[2,3,0,1]
-; AVX-NEXT: vshufps {{.*#+}} ymm3 = ymm5[2,0],ymm2[0,0],ymm5[6,4],ymm2[4,4]
+; AVX-NEXT: vperm2f128 {{.*#+}} ymm2 = ymm6[2,3,0,1]
+; AVX-NEXT: vshufps {{.*#+}} ymm3 = ymm6[2,0],ymm2[0,0],ymm6[6,4],ymm2[4,4]
; AVX-NEXT: vshufps {{.*#+}} ymm3 = ymm2[0,2],ymm3[2,0],ymm2[4,6],ymm3[6,4]
; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm3[5,6,7]
; AVX-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX-NEXT: vblendps $243, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm3 # 32-byte Folded Reload
-; AVX-NEXT: # ymm3 = mem[0,1],ymm0[2,3],mem[4,5,6,7]
-; AVX-NEXT: vmovaps 608(%rdi), %xmm11
-; AVX-NEXT: vshufps {{.*#+}} xmm0 = xmm11[2,2,3,3]
+; AVX-NEXT: vblendps $243, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm12 # 32-byte Folded Reload
+; AVX-NEXT: # ymm12 = mem[0,1],ymm0[2,3],mem[4,5,6,7]
+; AVX-NEXT: vmovaps 608(%rdi), %xmm13
+; AVX-NEXT: vshufps {{.*#+}} xmm0 = xmm13[2,2,3,3]
; AVX-NEXT: vmovaps 592(%rdi), %xmm8
; AVX-NEXT: vblendps {{.*#+}} xmm0 = xmm8[0],xmm0[1],xmm8[2,3]
-; AVX-NEXT: vmovapd 656(%rdi), %xmm9
-; AVX-NEXT: vshufpd {{.*#+}} ymm14 = ymm9[1],ymm12[0],ymm9[2],ymm12[3]
+; AVX-NEXT: vmovapd 656(%rdi), %xmm10
+; AVX-NEXT: vmovaps %ymm15, %ymm3
+; AVX-NEXT: vshufpd {{.*#+}} ymm15 = ymm10[1],ymm15[0],ymm10[2],ymm15[3]
+; AVX-NEXT: vshufps {{.*#+}} ymm15 = ymm11[0,1],ymm15[2,0],ymm11[4,5],ymm15[6,4]
+; AVX-NEXT: vblendps {{.*#+}} ymm15 = ymm0[0,1],ymm15[2,3,4,5,6,7]
+; AVX-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm12[2,3,0,1]
+; AVX-NEXT: vshufps {{.*#+}} ymm11 = ymm12[2,0],ymm0[0,0],ymm12[6,4],ymm0[4,4]
+; AVX-NEXT: vshufps {{.*#+}} ymm11 = ymm0[0,2],ymm11[2,0],ymm0[4,6],ymm11[6,4]
+; AVX-NEXT: vblendps {{.*#+}} ymm11 = ymm15[0,1,2,3,4],ymm11[5,6,7]
+; AVX-NEXT: vshufps {{.*#+}} ymm7 = ymm4[3,0],ymm1[1,0],ymm4[7,4],ymm1[5,4]
+; AVX-NEXT: vshufps {{.*#+}} ymm1 = ymm1[0,3],ymm7[2,0],ymm1[4,7],ymm7[6,4]
+; AVX-NEXT: vblendps $12, {{[-0-9]+}}(%r{{[sb]}}p), %xmm14, %xmm7 # 16-byte Folded Reload
+; AVX-NEXT: # xmm7 = xmm14[0,1],mem[2,3]
+; AVX-NEXT: vshufps $215, {{[-0-9]+}}(%r{{[sb]}}p), %ymm5, %ymm5 # 32-byte Folded Reload
+; AVX-NEXT: # ymm5 = ymm5[3,1],mem[1,3],ymm5[7,5],mem[5,7]
; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
-; AVX-NEXT: vshufps {{.*#+}} ymm14 = ymm4[0,1],ymm14[2,0],ymm4[4,5],ymm14[6,4]
-; AVX-NEXT: vblendps {{.*#+}} ymm14 = ymm0[0,1],ymm14[2,3,4,5,6,7]
-; AVX-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm3[2,3,0,1]
-; AVX-NEXT: vshufps {{.*#+}} ymm12 = ymm3[2,0],ymm0[0,0],ymm3[6,4],ymm0[4,4]
-; AVX-NEXT: vshufps {{.*#+}} ymm12 = ymm0[0,2],ymm12[2,0],ymm0[4,6],ymm12[6,4]
-; AVX-NEXT: vblendps {{.*#+}} ymm14 = ymm14[0,1,2,3,4],ymm12[5,6,7]
-; AVX-NEXT: vshufps {{.*#+}} ymm6 = ymm6[3,0],ymm1[1,0],ymm6[7,4],ymm1[5,4]
-; AVX-NEXT: vshufps {{.*#+}} ymm1 = ymm1[0,3],ymm6[2,0],ymm1[4,7],ymm6[6,4]
-; AVX-NEXT: vblendps $12, (%rsp), %xmm15, %xmm6 # 16-byte Folded Reload
-; AVX-NEXT: # xmm6 = xmm15[0,1],mem[2,3]
-; AVX-NEXT: vshufps $215, {{[-0-9]+}}(%r{{[sb]}}p), %ymm10, %ymm10 # 32-byte Folded Reload
-; AVX-NEXT: # ymm10 = ymm10[3,1],mem[1,3],ymm10[7,5],mem[5,7]
-; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm12 # 32-byte Reload
-; AVX-NEXT: vshufps {{.*#+}} ymm10 = ymm12[1,1],ymm10[2,0],ymm12[5,5],ymm10[6,4]
-; AVX-NEXT: vshufps {{.*#+}} xmm6 = xmm6[1,3,2,3]
-; AVX-NEXT: vblendps {{.*#+}} ymm6 = ymm6[0,1],ymm10[2,3,4,5,6,7]
-; AVX-NEXT: vblendps {{.*#+}} ymm6 = ymm6[0,1,2,3,4],ymm1[5,6,7]
-; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
-; AVX-NEXT: vshufps {{.*#+}} ymm1 = ymm1[3,0],ymm7[1,0],ymm1[7,4],ymm7[5,4]
-; AVX-NEXT: vshufps {{.*#+}} ymm1 = ymm7[0,3],ymm1[2,0],ymm7[4,7],ymm1[6,4]
-; AVX-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm7 # 16-byte Reload
-; AVX-NEXT: vblendps $12, {{[-0-9]+}}(%r{{[sb]}}p), %xmm7, %xmm7 # 16-byte Folded Reload
-; AVX-NEXT: # xmm7 = xmm7[0,1],mem[2,3]
-; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm10 # 32-byte Reload
-; AVX-NEXT: vshufps $215, {{[-0-9]+}}(%r{{[sb]}}p), %ymm10, %ymm10 # 32-byte Folded Reload
-; AVX-NEXT: # ymm10 = ymm10[3,1],mem[1,3],ymm10[7,5],mem[5,7]
-; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm12 # 32-byte Reload
-; AVX-NEXT: vshufps {{.*#+}} ymm10 = ymm12[1,1],ymm10[2,0],ymm12[5,5],ymm10[6,4]
+; AVX-NEXT: vshufps {{.*#+}} ymm5 = ymm4[1,1],ymm5[2,0],ymm4[5,5],ymm5[6,4]
; AVX-NEXT: vshufps {{.*#+}} xmm7 = xmm7[1,3,2,3]
-; AVX-NEXT: vblendps {{.*#+}} ymm7 = ymm7[0,1],ymm10[2,3,4,5,6,7]
-; AVX-NEXT: vblendps {{.*#+}} ymm7 = ymm7[0,1,2,3,4],ymm1[5,6,7]
-; AVX-NEXT: vshufps {{.*#+}} ymm1 = ymm5[3,0],ymm2[1,0],ymm5[7,4],ymm2[5,4]
+; AVX-NEXT: vblendps {{.*#+}} ymm5 = ymm7[0,1],ymm5[2,3,4,5,6,7]
+; AVX-NEXT: vblendps {{.*#+}} ymm7 = ymm5[0,1,2,3,4],ymm1[5,6,7]
+; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
+; AVX-NEXT: vshufps {{.*#+}} ymm1 = ymm1[3,0],ymm9[1,0],ymm1[7,4],ymm9[5,4]
+; AVX-NEXT: vshufps {{.*#+}} ymm1 = ymm9[0,3],ymm1[2,0],ymm9[4,7],ymm1[6,4]
+; AVX-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Reload
+; AVX-NEXT: vblendps $12, (%rsp), %xmm4, %xmm5 # 16-byte Folded Reload
+; AVX-NEXT: # xmm5 = xmm4[0,1],mem[2,3]
+; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
+; AVX-NEXT: vshufps $215, {{[-0-9]+}}(%r{{[sb]}}p), %ymm4, %ymm9 # 32-byte Folded Reload
+; AVX-NEXT: # ymm9 = ymm4[3,1],mem[1,3],ymm4[7,5],mem[5,7]
+; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
+; AVX-NEXT: vshufps {{.*#+}} ymm9 = ymm4[1,1],ymm9[2,0],ymm4[5,5],ymm9[6,4]
+; AVX-NEXT: vshufps {{.*#+}} xmm5 = xmm5[1,3,2,3]
+; AVX-NEXT: vblendps {{.*#+}} ymm5 = ymm5[0,1],ymm9[2,3,4,5,6,7]
+; AVX-NEXT: vblendps {{.*#+}} ymm5 = ymm5[0,1,2,3,4],ymm1[5,6,7]
+; AVX-NEXT: vshufps {{.*#+}} ymm1 = ymm6[3,0],ymm2[1,0],ymm6[7,4],ymm2[5,4]
; AVX-NEXT: vshufps {{.*#+}} ymm1 = ymm2[0,3],ymm1[2,0],ymm2[4,7],ymm1[6,4]
-; AVX-NEXT: vblendps $12, {{[-0-9]+}}(%r{{[sb]}}p), %xmm13, %xmm2 # 16-byte Folded Reload
-; AVX-NEXT: # xmm2 = xmm13[0,1],mem[2,3]
-; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload
-; AVX-NEXT: vshufps $215, {{[-0-9]+}}(%r{{[sb]}}p), %ymm5, %ymm5 # 32-byte Folded Reload
-; AVX-NEXT: # ymm5 = ymm5[3,1],mem[1,3],ymm5[7,5],mem[5,7]
-; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm10 # 32-byte Reload
-; AVX-NEXT: vshufps {{.*#+}} ymm5 = ymm10[1,1],ymm5[2,0],ymm10[5,5],ymm5[6,4]
+; AVX-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
+; AVX-NEXT: vblendps $12, {{[-0-9]+}}(%r{{[sb]}}p), %xmm2, %xmm2 # 16-byte Folded Reload
+; AVX-NEXT: # xmm2 = xmm2[0,1],mem[2,3]
+; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
+; AVX-NEXT: vshufps $215, {{[-0-9]+}}(%r{{[sb]}}p), %ymm4, %ymm6 # 32-byte Folded Reload
+; AVX-NEXT: # ymm6 = ymm4[3,1],mem[1,3],ymm4[7,5],mem[5,7]
+; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
+; AVX-NEXT: vshufps {{.*#+}} ymm6 = ymm4[1,1],ymm6[2,0],ymm4[5,5],ymm6[6,4]
; AVX-NEXT: vshufps {{.*#+}} xmm2 = xmm2[1,3,2,3]
-; AVX-NEXT: vblendps {{.*#+}} ymm2 = ymm2[0,1],ymm5[2,3,4,5,6,7]
+; AVX-NEXT: vblendps {{.*#+}} ymm2 = ymm2[0,1],ymm6[2,3,4,5,6,7]
; AVX-NEXT: vblendps {{.*#+}} ymm1 = ymm2[0,1,2,3,4],ymm1[5,6,7]
-; AVX-NEXT: vshufps {{.*#+}} ymm2 = ymm3[3,0],ymm0[1,0],ymm3[7,4],ymm0[5,4]
+; AVX-NEXT: vshufps {{.*#+}} ymm2 = ymm12[3,0],ymm0[1,0],ymm12[7,4],ymm0[5,4]
; AVX-NEXT: vshufps {{.*#+}} ymm0 = ymm0[0,3],ymm2[2,0],ymm0[4,7],ymm2[6,4]
-; AVX-NEXT: vblendps {{.*#+}} xmm2 = xmm8[0,1],xmm11[2,3]
-; AVX-NEXT: vshufps $215, {{[-0-9]+}}(%r{{[sb]}}p), %ymm9, %ymm3 # 32-byte Folded Reload
-; AVX-NEXT: # ymm3 = ymm9[3,1],mem[1,3],ymm9[7,5],mem[5,7]
+; AVX-NEXT: vblendps {{.*#+}} xmm2 = xmm8[0,1],xmm13[2,3]
+; AVX-NEXT: vshufps {{.*#+}} ymm3 = ymm10[3,1],ymm3[1,3],ymm10[7,5],ymm3[5,7]
+; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
; AVX-NEXT: vshufps {{.*#+}} ymm3 = ymm4[1,1],ymm3[2,0],ymm4[5,5],ymm3[6,4]
; AVX-NEXT: vshufps {{.*#+}} xmm2 = xmm2[1,3,2,3]
; AVX-NEXT: vblendps {{.*#+}} ymm2 = ymm2[0,1],ymm3[2,3,4,5,6,7]
@@ -4407,7 +4414,7 @@ define void @load_i32_stride6_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
; AVX-NEXT: vmovaps %ymm2, 64(%r8)
; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
; AVX-NEXT: vmovaps %ymm2, (%r8)
-; AVX-NEXT: vmovaps %ymm14, 96(%r9)
+; AVX-NEXT: vmovaps %ymm11, 96(%r9)
; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
; AVX-NEXT: vmovaps %ymm2, 32(%r9)
; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
@@ -4417,9 +4424,9 @@ define void @load_i32_stride6_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
; AVX-NEXT: movq {{[0-9]+}}(%rsp), %rax
; AVX-NEXT: vmovaps %ymm0, 96(%rax)
; AVX-NEXT: vmovaps %ymm1, 32(%rax)
-; AVX-NEXT: vmovaps %ymm7, 64(%rax)
-; AVX-NEXT: vmovaps %ymm6, (%rax)
-; AVX-NEXT: addq $1032, %rsp # imm = 0x408
+; AVX-NEXT: vmovaps %ymm5, 64(%rax)
+; AVX-NEXT: vmovaps %ymm7, (%rax)
+; AVX-NEXT: addq $1048, %rsp # imm = 0x418
; AVX-NEXT: vzeroupper
; AVX-NEXT: retq
;
@@ -8025,54 +8032,53 @@ define void @load_i32_stride6_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
;
; AVX-LABEL: load_i32_stride6_vf64:
; AVX: # %bb.0:
-; AVX-NEXT: subq $2584, %rsp # imm = 0xA18
+; AVX-NEXT: subq $2616, %rsp # imm = 0xA38
; AVX-NEXT: vmovaps 608(%rdi), %ymm6
; AVX-NEXT: vmovups %ymm6, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT: vmovaps 672(%rdi), %ymm2
-; AVX-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT: vmovaps 640(%rdi), %ymm3
-; AVX-NEXT: vmovups %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX-NEXT: vmovaps 576(%rdi), %ymm7
+; AVX-NEXT: vmovups %ymm7, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX-NEXT: vmovapd 352(%rdi), %ymm4
; AVX-NEXT: vmovupd %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX-NEXT: vmovapd 320(%rdi), %ymm5
-; AVX-NEXT: vmovupd %ymm5, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT: vmovaps 224(%rdi), %ymm7
-; AVX-NEXT: vmovups %ymm7, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT: vmovaps 192(%rdi), %ymm8
-; AVX-NEXT: vmovups %ymm8, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT: vmovaps 288(%rdi), %ymm1
+; AVX-NEXT: vmovaps 288(%rdi), %ymm2
+; AVX-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX-NEXT: vmovaps 256(%rdi), %ymm3
+; AVX-NEXT: vmovups %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX-NEXT: vmovaps 224(%rdi), %ymm0
+; AVX-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX-NEXT: vmovaps 192(%rdi), %ymm1
; AVX-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT: vmovaps 256(%rdi), %ymm0
+; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5],ymm1[6,7]
; AVX-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT: vinsertf128 $1, 288(%rdi), %ymm0, %ymm9
-; AVX-NEXT: vmovups %ymm9, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT: vshufps {{.*#+}} ymm0 = ymm1[2,0],ymm9[0,0],ymm1[6,4],ymm9[4,4]
-; AVX-NEXT: vshufps {{.*#+}} ymm0 = ymm0[2,0],ymm9[2,2],ymm0[6,4],ymm9[6,6]
-; AVX-NEXT: vblendps {{.*#+}} ymm1 = ymm8[0,1,2,3],ymm7[4,5],ymm8[6,7]
+; AVX-NEXT: vextractf128 $1, %ymm0, %xmm13
+; AVX-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0,1],xmm13[2,3]
+; AVX-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,2],xmm13[0,3]
+; AVX-NEXT: vinsertf128 $1, 288(%rdi), %ymm3, %ymm1
; AVX-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT: vextractf128 $1, %ymm1, %xmm9
-; AVX-NEXT: vblendps {{.*#+}} xmm1 = xmm1[0,1],xmm9[2,3]
-; AVX-NEXT: vshufps {{.*#+}} xmm1 = xmm1[0,2],xmm9[0,3]
-; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3,4,5,6,7]
+; AVX-NEXT: vshufps {{.*#+}} ymm1 = ymm2[2,0],ymm1[0,0],ymm2[6,4],ymm1[4,4]
+; AVX-NEXT: vshufps {{.*#+}} ymm1 = ymm1[2,0],ymm3[2,2],ymm1[6,4],ymm3[6,6]
+; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2],ymm1[3,4,5,6,7]
+; AVX-NEXT: vmovupd %ymm5, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX-NEXT: vperm2f128 {{.*#+}} ymm1 = ymm5[2,3],ymm4[0,1]
; AVX-NEXT: vmovupd %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX-NEXT: vshufpd {{.*#+}} ymm1 = ymm1[0],ymm5[1],ymm1[3],ymm5[2]
; AVX-NEXT: vshufps {{.*#+}} ymm1 = ymm1[0,1,2,0,4,5,6,4]
; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],ymm1[6,7]
; AVX-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm7[0,1,2,3],ymm6[4,5],ymm7[6,7]
+; AVX-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX-NEXT: vextractf128 $1, %ymm0, %xmm11
+; AVX-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0,1],xmm11[2,3]
+; AVX-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,2],xmm11[0,3]
+; AVX-NEXT: vmovaps 672(%rdi), %ymm2
+; AVX-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX-NEXT: vmovaps 640(%rdi), %ymm3
+; AVX-NEXT: vmovups %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX-NEXT: vinsertf128 $1, 672(%rdi), %ymm3, %ymm1
; AVX-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT: vshufps {{.*#+}} ymm0 = ymm2[2,0],ymm1[0,0],ymm2[6,4],ymm1[4,4]
-; AVX-NEXT: vshufps {{.*#+}} ymm0 = ymm0[2,0],ymm1[2,2],ymm0[6,4],ymm1[6,6]
-; AVX-NEXT: vmovaps 576(%rdi), %ymm1
-; AVX-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm6[4,5],ymm1[6,7]
-; AVX-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT: vextractf128 $1, %ymm1, %xmm2
-; AVX-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX-NEXT: vblendps {{.*#+}} xmm1 = xmm1[0,1],xmm2[2,3]
-; AVX-NEXT: vshufps {{.*#+}} xmm1 = xmm1[0,2],xmm2[0,3]
-; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3,4,5,6,7]
+; AVX-NEXT: vshufps {{.*#+}} ymm1 = ymm2[2,0],ymm1[0,0],ymm2[6,4],ymm1[4,4]
+; AVX-NEXT: vshufps {{.*#+}} ymm1 = ymm1[2,0],ymm3[2,2],ymm1[6,4],ymm3[6,6]
+; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2],ymm1[3,4,5,6,7]
; AVX-NEXT: vmovapd 736(%rdi), %ymm1
; AVX-NEXT: vmovupd %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX-NEXT: vmovapd 704(%rdi), %ymm2
@@ -8083,24 +8089,24 @@ define void @load_i32_stride6_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
; AVX-NEXT: vshufps {{.*#+}} ymm1 = ymm1[0,1,2,0,4,5,6,4]
; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],ymm1[6,7]
; AVX-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT: vmovaps 1056(%rdi), %ymm1
-; AVX-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT: vmovaps 1024(%rdi), %ymm0
+; AVX-NEXT: vmovaps 992(%rdi), %ymm0
; AVX-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT: vinsertf128 $1, 1056(%rdi), %ymm0, %ymm2
-; AVX-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT: vshufps {{.*#+}} ymm0 = ymm1[2,0],ymm2[0,0],ymm1[6,4],ymm2[4,4]
-; AVX-NEXT: vshufps {{.*#+}} ymm0 = ymm0[2,0],ymm2[2,2],ymm0[6,4],ymm2[6,6]
-; AVX-NEXT: vmovaps 992(%rdi), %ymm1
+; AVX-NEXT: vmovaps 960(%rdi), %ymm1
; AVX-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT: vmovaps 960(%rdi), %ymm2
+; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5],ymm1[6,7]
+; AVX-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX-NEXT: vextractf128 $1, %ymm0, %xmm8
+; AVX-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0,1],xmm8[2,3]
+; AVX-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,2],xmm8[0,3]
+; AVX-NEXT: vmovaps 1056(%rdi), %ymm2
; AVX-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT: vblendps {{.*#+}} ymm1 = ymm2[0,1,2,3],ymm1[4,5],ymm2[6,7]
+; AVX-NEXT: vmovaps 1024(%rdi), %ymm12
+; AVX-NEXT: vinsertf128 $1, 1056(%rdi), %ymm12, %ymm1
; AVX-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT: vextractf128 $1, %ymm1, %xmm13
-; AVX-NEXT: vblendps {{.*#+}} xmm1 = xmm1[0,1],xmm13[2,3]
-; AVX-NEXT: vshufps {{.*#+}} xmm1 = xmm1[0,2],xmm13[0,3]
-; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3,4,5,6,7]
+; AVX-NEXT: vshufps {{.*#+}} ymm1 = ymm2[2,0],ymm1[0,0],ymm2[6,4],ymm1[4,4]
+; AVX-NEXT: vshufps {{.*#+}} ymm1 = ymm1[2,0],ymm12[2,2],ymm1[6,4],ymm12[6,6]
+; AVX-NEXT: vmovups %ymm12, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2],ymm1[3,4,5,6,7]
; AVX-NEXT: vmovapd 1120(%rdi), %ymm1
; AVX-NEXT: vmovupd %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX-NEXT: vmovapd 1088(%rdi), %ymm2
@@ -8111,23 +8117,24 @@ define void @load_i32_stride6_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
; AVX-NEXT: vshufps {{.*#+}} ymm1 = ymm1[0,1,2,0,4,5,6,4]
; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],ymm1[6,7]
; AVX-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT: vmovaps 1440(%rdi), %ymm1
-; AVX-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT: vmovaps 1408(%rdi), %ymm0
+; AVX-NEXT: vmovaps 1376(%rdi), %ymm0
; AVX-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT: vinsertf128 $1, 1440(%rdi), %ymm0, %ymm11
-; AVX-NEXT: vshufps {{.*#+}} ymm0 = ymm1[2,0],ymm11[0,0],ymm1[6,4],ymm11[4,4]
-; AVX-NEXT: vshufps {{.*#+}} ymm0 = ymm0[2,0],ymm11[2,2],ymm0[6,4],ymm11[6,6]
-; AVX-NEXT: vmovaps 1376(%rdi), %ymm1
+; AVX-NEXT: vmovaps 1344(%rdi), %ymm1
; AVX-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT: vmovaps 1344(%rdi), %ymm2
+; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5],ymm1[6,7]
+; AVX-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX-NEXT: vextractf128 $1, %ymm0, %xmm6
+; AVX-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0,1],xmm6[2,3]
+; AVX-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,2],xmm6[0,3]
+; AVX-NEXT: vmovaps 1440(%rdi), %ymm2
; AVX-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT: vblendps {{.*#+}} ymm1 = ymm2[0,1,2,3],ymm1[4,5],ymm2[6,7]
+; AVX-NEXT: vmovaps 1408(%rdi), %ymm3
+; AVX-NEXT: vmovups %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX-NEXT: vinsertf128 $1, 1440(%rdi), %ymm3, %ymm1
; AVX-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT: vextractf128 $1, %ymm1, %xmm10
-; AVX-NEXT: vblendps {{.*#+}} xmm1 = xmm1[0,1],xmm10[2,3]
-; AVX-NEXT: vshufps {{.*#+}} xmm1 = xmm1[0,2],xmm10[0,3]
-; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3,4,5,6,7]
+; AVX-NEXT: vshufps {{.*#+}} ymm1 = ymm2[2,0],ymm1[0,0],ymm2[6,4],ymm1[4,4]
+; AVX-NEXT: vshufps {{.*#+}} ymm1 = ymm1[2,0],ymm3[2,2],ymm1[6,4],ymm3[6,6]
+; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2],ymm1[3,4,5,6,7]
; AVX-NEXT: vmovapd 1504(%rdi), %ymm1
; AVX-NEXT: vmovupd %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX-NEXT: vmovapd 1472(%rdi), %ymm2
@@ -8138,23 +8145,24 @@ define void @load_i32_stride6_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
; AVX-NEXT: vshufps {{.*#+}} ymm1 = ymm1[0,1,2,0,4,5,6,4]
; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],ymm1[6,7]
; AVX-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT: vmovaps 96(%rdi), %ymm1
-; AVX-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT: vmovaps 64(%rdi), %ymm0
+; AVX-NEXT: vmovaps 32(%rdi), %ymm0
; AVX-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT: vinsertf128 $1, 96(%rdi), %ymm0, %ymm8
-; AVX-NEXT: vshufps {{.*#+}} ymm0 = ymm1[2,0],ymm8[0,0],ymm1[6,4],ymm8[4,4]
-; AVX-NEXT: vshufps {{.*#+}} ymm0 = ymm0[2,0],ymm8[2,2],ymm0[6,4],ymm8[6,6]
-; AVX-NEXT: vmovaps 32(%rdi), %ymm1
+; AVX-NEXT: vmovaps (%rdi), %ymm1
; AVX-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT: vmovaps (%rdi), %ymm2
+; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5],ymm1[6,7]
+; AVX-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX-NEXT: vextractf128 $1, %ymm0, %xmm4
+; AVX-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0,1],xmm4[2,3]
+; AVX-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,2],xmm4[0,3]
+; AVX-NEXT: vmovaps 96(%rdi), %ymm2
; AVX-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT: vblendps {{.*#+}} ymm1 = ymm2[0,1,2,3],ymm1[4,5],ymm2[6,7]
+; AVX-NEXT: vmovaps 64(%rdi), %ymm3
+; AVX-NEXT: vmovups %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX-NEXT: vinsertf128 $1, 96(%rdi), %ymm3, %ymm1
; AVX-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT: vextractf128 $1, %ymm1, %xmm7
-; AVX-NEXT: vblendps {{.*#+}} xmm1 = xmm1[0,1],xmm7[2,3]
-; AVX-NEXT: vshufps {{.*#+}} xmm1 = xmm1[0,2],xmm7[0,3]
-; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3,4,5,6,7]
+; AVX-NEXT: vshufps {{.*#+}} ymm1 = ymm2[2,0],ymm1[0,0],ymm2[6,4],ymm1[4,4]
+; AVX-NEXT: vshufps {{.*#+}} ymm1 = ymm1[2,0],ymm3[2,2],ymm1[6,4],ymm3[6,6]
+; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2],ymm1[3,4,5,6,7]
; AVX-NEXT: vmovapd 160(%rdi), %ymm1
; AVX-NEXT: vmovupd %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX-NEXT: vmovapd 128(%rdi), %ymm2
@@ -8165,194 +8173,202 @@ define void @load_i32_stride6_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
; AVX-NEXT: vshufps {{.*#+}} ymm1 = ymm1[0,1,2,0,4,5,6,4]
; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],ymm1[6,7]
; AVX-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT: vmovaps 480(%rdi), %ymm1
-; AVX-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT: vmovaps 448(%rdi), %ymm0
+; AVX-NEXT: vmovaps 416(%rdi), %ymm0
; AVX-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT: vinsertf128 $1, 480(%rdi), %ymm0, %ymm6
-; AVX-NEXT: vshufps {{.*#+}} ymm0 = ymm1[2,0],ymm6[0,0],ymm1[6,4],ymm6[4,4]
-; AVX-NEXT: vshufps {{.*#+}} ymm0 = ymm0[2,0],ymm6[2,2],ymm0[6,4],ymm6[6,6]
-; AVX-NEXT: vmovaps 416(%rdi), %ymm1
+; AVX-NEXT: vmovaps 384(%rdi), %ymm1
; AVX-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT: vmovaps 384(%rdi), %ymm2
-; AVX-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT: vblendps {{.*#+}} ymm1 = ymm2[0,1,2,3],ymm1[4,5],ymm2[6,7]
+; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5],ymm1[6,7]
+; AVX-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX-NEXT: vextractf128 $1, %ymm0, %xmm2
+; AVX-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0,1],xmm2[2,3]
+; AVX-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,2],xmm2[0,3]
+; AVX-NEXT: vmovaps 480(%rdi), %ymm1
; AVX-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT: vextractf128 $1, %ymm1, %xmm4
-; AVX-NEXT: vblendps {{.*#+}} xmm1 = xmm1[0,1],xmm4[2,3]
-; AVX-NEXT: vshufps {{.*#+}} xmm1 = xmm1[0,2],xmm4[0,3]
-; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3,4,5,6,7]
+; AVX-NEXT: vmovaps 448(%rdi), %ymm3
+; AVX-NEXT: vmovups %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX-NEXT: vinsertf128 $1, 480(%rdi), %ymm3, %ymm5
+; AVX-NEXT: vmovups %ymm5, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX-NEXT: vshufps {{.*#+}} ymm1 = ymm1[2,0],ymm5[0,0],ymm1[6,4],ymm5[4,4]
+; AVX-NEXT: vshufps {{.*#+}} ymm1 = ymm1[2,0],ymm3[2,2],ymm1[6,4],ymm3[6,6]
+; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2],ymm1[3,4,5,6,7]
; AVX-NEXT: vmovapd 544(%rdi), %ymm1
; AVX-NEXT: vmovupd %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT: vmovapd 512(%rdi), %ymm2
-; AVX-NEXT: vmovupd %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT: vperm2f128 {{.*#+}} ymm1 = ymm2[2,3],ymm1[0,1]
-; AVX-NEXT: vmovupd %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT: vshufpd {{.*#+}} ymm1 = ymm1[0],ymm2[1],ymm1[3],ymm2[2]
+; AVX-NEXT: vmovapd 512(%rdi), %ymm3
+; AVX-NEXT: vmovupd %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX-NEXT: vperm2f128 {{.*#+}} ymm1 = ymm3[2,3],ymm1[0,1]
+; AVX-NEXT: vmovupd %ymm1, (%rsp) # 32-byte Spill
+; AVX-NEXT: vshufpd {{.*#+}} ymm1 = ymm1[0],ymm3[1],ymm1[3],ymm3[2]
; AVX-NEXT: vshufps {{.*#+}} ymm1 = ymm1[0,1,2,0,4,5,6,4]
; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],ymm1[6,7]
; AVX-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT: vmovaps 864(%rdi), %ymm1
-; AVX-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT: vmovaps 832(%rdi), %ymm0
+; AVX-NEXT: vmovaps 800(%rdi), %ymm0
; AVX-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT: vinsertf128 $1, 864(%rdi), %ymm0, %ymm3
-; AVX-NEXT: vshufps {{.*#+}} ymm0 = ymm1[2,0],ymm3[0,0],ymm1[6,4],ymm3[4,4]
-; AVX-NEXT: vshufps {{.*#+}} ymm0 = ymm0[2,0],ymm3[2,2],ymm0[6,4],ymm3[6,6]
-; AVX-NEXT: vmovaps 800(%rdi), %ymm1
-; AVX-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT: vmovaps 768(%rdi), %ymm2
-; AVX-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT: vblendps {{.*#+}} ymm1 = ymm2[0,1,2,3],ymm1[4,5],ymm2[6,7]
+; AVX-NEXT: vmovaps 768(%rdi), %ymm1
; AVX-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT: vextractf128 $1, %ymm1, %xmm2
-; AVX-NEXT: vblendps {{.*#+}} xmm1 = xmm1[0,1],xmm2[2,3]
-; AVX-NEXT: vshufps {{.*#+}} xmm1 = xmm1[0,2],xmm2[0,3]
-; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3,4,5,6,7]
-; AVX-NEXT: vmovapd 928(%rdi), %ymm1
-; AVX-NEXT: vmovupd %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX-NEXT: vblendps {{.*#+}} ymm10 = ymm1[0,1,2,3],ymm0[4,5],ymm1[6,7]
+; AVX-NEXT: vextractf128 $1, %ymm10, %xmm1
+; AVX-NEXT: vblendps {{.*#+}} xmm0 = xmm10[0,1],xmm1[2,3]
+; AVX-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[0,3]
+; AVX-NEXT: vmovaps 864(%rdi), %ymm3
+; AVX-NEXT: vmovups %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX-NEXT: vmovaps 832(%rdi), %ymm5
+; AVX-NEXT: vmovups %ymm5, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX-NEXT: vinsertf128 $1, 864(%rdi), %ymm5, %ymm7
+; AVX-NEXT: vmovups %ymm7, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX-NEXT: vshufps {{.*#+}} ymm3 = ymm3[2,0],ymm7[0,0],ymm3[6,4],ymm7[4,4]
+; AVX-NEXT: vshufps {{.*#+}} ymm3 = ymm3[2,0],ymm5[2,2],ymm3[6,4],ymm5[6,6]
+; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2],ymm3[3,4,5,6,7]
+; AVX-NEXT: vmovapd 928(%rdi), %ymm3
+; AVX-NEXT: vmovupd %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX-NEXT: vmovapd 896(%rdi), %ymm5
; AVX-NEXT: vmovupd %ymm5, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT: vperm2f128 {{.*#+}} ymm1 = ymm5[2,3],ymm1[0,1]
-; AVX-NEXT: vmovupd %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT: vshufpd {{.*#+}} ymm1 = ymm1[0],ymm5[1],ymm1[3],ymm5[2]
-; AVX-NEXT: vshufps {{.*#+}} ymm1 = ymm1[0,1,2,0,4,5,6,4]
-; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],ymm1[6,7]
-; AVX-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT: vmovaps 1248(%rdi), %ymm5
-; AVX-NEXT: vmovups %ymm5, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT: vmovaps 1216(%rdi), %ymm0
+; AVX-NEXT: vperm2f128 {{.*#+}} ymm3 = ymm5[2,3],ymm3[0,1]
+; AVX-NEXT: vmovupd %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX-NEXT: vshufpd {{.*#+}} ymm3 = ymm3[0],ymm5[1],ymm3[3],ymm5[2]
+; AVX-NEXT: vshufps {{.*#+}} ymm3 = ymm3[0,1,2,0,4,5,6,4]
+; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],ymm3[6,7]
; AVX-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT: vinsertf128 $1, 1248(%rdi), %ymm0, %ymm1
-; AVX-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT: vshufps {{.*#+}} ymm0 = ymm5[2,0],ymm1[0,0],ymm5[6,4],ymm1[4,4]
-; AVX-NEXT: vshufps {{.*#+}} ymm15 = ymm0[2,0],ymm1[2,2],ymm0[6,4],ymm1[6,6]
; AVX-NEXT: vmovaps 1184(%rdi), %ymm0
; AVX-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT: vmovaps 1152(%rdi), %ymm5
-; AVX-NEXT: vmovups %ymm5, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm5[0,1,2,3],ymm0[4,5],ymm5[6,7]
-; AVX-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT: vextractf128 $1, %ymm0, %xmm5
-; AVX-NEXT: vblendps {{.*#+}} xmm14 = xmm0[0,1],xmm5[2,3]
-; AVX-NEXT: vshufps {{.*#+}} xmm14 = xmm14[0,2],xmm5[0,3]
-; AVX-NEXT: vblendps {{.*#+}} ymm14 = ymm14[0,1,2],ymm15[3,4,5,6,7]
-; AVX-NEXT: vmovapd 1312(%rdi), %ymm12
-; AVX-NEXT: vmovupd %ymm12, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT: vmovapd 1280(%rdi), %ymm0
-; AVX-NEXT: vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT: vperm2f128 {{.*#+}} ymm1 = ymm0[2,3],ymm12[0,1]
-; AVX-NEXT: vmovupd %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT: vshufpd {{.*#+}} ymm15 = ymm1[0],ymm0[1],ymm1[3],ymm0[2]
-; AVX-NEXT: vshufps {{.*#+}} ymm15 = ymm15[0,1,2,0,4,5,6,4]
-; AVX-NEXT: vblendps {{.*#+}} ymm14 = ymm14[0,1,2,3,4,5],ymm15[6,7]
-; AVX-NEXT: vmovups %ymm14, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm12 # 32-byte Reload
-; AVX-NEXT: vshufps {{.*#+}} ymm14 = ymm12[3,0],ymm0[1,0],ymm12[7,4],ymm0[5,4]
-; AVX-NEXT: vshufps {{.*#+}} ymm14 = ymm14[2,0],ymm0[2,3],ymm14[6,4],ymm0[6,7]
-; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX-NEXT: vshufps {{.*#+}} xmm15 = xmm0[1,0],xmm9[3,0]
-; AVX-NEXT: vshufps {{.*#+}} xmm15 = xmm15[0,2],xmm9[1,3]
-; AVX-NEXT: vblendps {{.*#+}} ymm14 = ymm15[0,1,2],ymm14[3,4,5,6,7]
-; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm12 # 32-byte Reload
-; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX-NEXT: vshufps {{.*#+}} ymm15 = ymm0[3,1],ymm12[1,3],ymm0[7,5],ymm12[5,7]
-; AVX-NEXT: vshufps {{.*#+}} ymm15 = ymm15[0,1,2,0,4,5,6,4]
-; AVX-NEXT: vblendps {{.*#+}} ymm9 = ymm14[0,1,2,3,4,5],ymm15[6,7]
-; AVX-NEXT: vmovups %ymm9, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm9 # 32-byte Reload
-; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX-NEXT: vshufps {{.*#+}} ymm14 = ymm9[3,0],ymm0[1,0],ymm9[7,4],ymm0[5,4]
-; AVX-NEXT: vshufps {{.*#+}} ymm14 = ymm14[2,0],ymm0[2,3],ymm14[6,4],ymm0[6,7]
-; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
-; AVX-NEXT: vshufps {{.*#+}} xmm15 = xmm0[1,0],xmm1[3,0]
-; AVX-NEXT: vshufps {{.*#+}} xmm15 = xmm15[0,2],xmm1[1,3]
-; AVX-NEXT: vblendps {{.*#+}} ymm14 = ymm15[0,1,2],ymm14[3,4,5,6,7]
-; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX-NEXT: vshufps $215, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm15 # 32-byte Folded Reload
-; AVX-NEXT: # ymm15 = ymm0[3,1],mem[1,3],ymm0[7,5],mem[5,7]
-; AVX-NEXT: vshufps {{.*#+}} ymm15 = ymm15[0,1,2,0,4,5,6,4]
-; AVX-NEXT: vblendps {{.*#+}} ymm14 = ymm14[0,1,2,3,4,5],ymm15[6,7]
+; AVX-NEXT: vmovaps 1152(%rdi), %ymm3
+; AVX-NEXT: vmovups %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX-NEXT: vblendps {{.*#+}} ymm5 = ymm3[0,1,2,3],ymm0[4,5],ymm3[6,7]
+; AVX-NEXT: vextractf128 $1, %ymm5, %xmm0
+; AVX-NEXT: vblendps {{.*#+}} xmm9 = xmm5[0,1],xmm0[2,3]
+; AVX-NEXT: vshufps {{.*#+}} xmm9 = xmm9[0,2],xmm0[0,3]
+; AVX-NEXT: vmovaps 1248(%rdi), %ymm3
+; AVX-NEXT: vmovups %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX-NEXT: vmovaps 1216(%rdi), %ymm7
+; AVX-NEXT: vmovups %ymm7, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX-NEXT: vinsertf128 $1, 1248(%rdi), %ymm7, %ymm14
; AVX-NEXT: vmovups %ymm14, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
-; AVX-NEXT: vshufps {{.*#+}} ymm14 = ymm0[3,0],ymm1[1,0],ymm0[7,4],ymm1[5,4]
-; AVX-NEXT: vshufps {{.*#+}} ymm14 = ymm14[2,0],ymm1[2,3],ymm14[6,4],ymm1[6,7]
-; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX-NEXT: vshufps {{.*#+}} xmm15 = xmm0[1,0],xmm13[3,0]
-; AVX-NEXT: vshufps {{.*#+}} xmm13 = xmm15[0,2],xmm13[1,3]
+; AVX-NEXT: vshufps {{.*#+}} ymm15 = ymm3[2,0],ymm14[0,0],ymm3[6,4],ymm14[4,4]
+; AVX-NEXT: vshufps {{.*#+}} ymm15 = ymm15[2,0],ymm7[2,2],ymm15[6,4],ymm7[6,6]
+; AVX-NEXT: vblendps {{.*#+}} ymm15 = ymm9[0,1,2],ymm15[3,4,5,6,7]
+; AVX-NEXT: vmovapd 1312(%rdi), %ymm3
+; AVX-NEXT: vmovupd %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX-NEXT: vmovapd 1280(%rdi), %ymm7
+; AVX-NEXT: vmovupd %ymm7, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX-NEXT: vperm2f128 {{.*#+}} ymm3 = ymm7[2,3],ymm3[0,1]
+; AVX-NEXT: vmovupd %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX-NEXT: vshufpd {{.*#+}} ymm14 = ymm3[0],ymm7[1],ymm3[3],ymm7[2]
+; AVX-NEXT: vshufps {{.*#+}} ymm14 = ymm14[0,1,2,0,4,5,6,4]
+; AVX-NEXT: vblendps {{.*#+}} ymm3 = ymm15[0,1,2,3,4,5],ymm14[6,7]
+; AVX-NEXT: vmovups %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload
+; AVX-NEXT: vshufps {{.*#+}} xmm14 = xmm3[1,0],xmm13[3,0]
+; AVX-NEXT: vshufps {{.*#+}} xmm13 = xmm14[0,2],xmm13[1,3]
+; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload
+; AVX-NEXT: vshufps $19, {{[-0-9]+}}(%r{{[sb]}}p), %ymm3, %ymm14 # 32-byte Folded Reload
+; AVX-NEXT: # ymm14 = ymm3[3,0],mem[1,0],ymm3[7,4],mem[5,4]
+; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload
+; AVX-NEXT: vshufps {{.*#+}} ymm14 = ymm14[2,0],ymm3[2,3],ymm14[6,4],ymm3[6,7]
; AVX-NEXT: vblendps {{.*#+}} ymm13 = ymm13[0,1,2],ymm14[3,4,5,6,7]
-; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm15 # 32-byte Reload
-; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX-NEXT: vshufps {{.*#+}} ymm14 = ymm0[3,1],ymm15[1,3],ymm0[7,5],ymm15[5,7]
+; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm7 # 32-byte Reload
+; AVX-NEXT: vshufps $215, {{[-0-9]+}}(%r{{[sb]}}p), %ymm7, %ymm14 # 32-byte Folded Reload
+; AVX-NEXT: # ymm14 = ymm7[3,1],mem[1,3],ymm7[7,5],mem[5,7]
; AVX-NEXT: vshufps {{.*#+}} ymm14 = ymm14[0,1,2,0,4,5,6,4]
-; AVX-NEXT: vblendps {{.*#+}} ymm13 = ymm13[0,1,2,3,4,5],ymm14[6,7]
-; AVX-NEXT: vmovups %ymm13, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX-NEXT: vblendps {{.*#+}} ymm7 = ymm13[0,1,2,3,4,5],ymm14[6,7]
+; AVX-NEXT: vmovups %ymm7, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm7 # 32-byte Reload
+; AVX-NEXT: vshufps {{.*#+}} xmm13 = xmm7[1,0],xmm11[3,0]
+; AVX-NEXT: vshufps {{.*#+}} xmm11 = xmm13[0,2],xmm11[1,3]
; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm14 # 32-byte Reload
-; AVX-NEXT: vshufps {{.*#+}} ymm13 = ymm14[3,0],ymm11[1,0],ymm14[7,4],ymm11[5,4]
-; AVX-NEXT: vshufps {{.*#+}} ymm11 = ymm13[2,0],ymm11[2,3],ymm13[6,4],ymm11[6,7]
-; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX-NEXT: vshufps {{.*#+}} xmm13 = xmm0[1,0],xmm10[3,0]
-; AVX-NEXT: vshufps {{.*#+}} xmm10 = xmm13[0,2],xmm10[1,3]
-; AVX-NEXT: vblendps {{.*#+}} ymm10 = ymm10[0,1,2],ymm11[3,4,5,6,7]
+; AVX-NEXT: vshufps $19, {{[-0-9]+}}(%r{{[sb]}}p), %ymm14, %ymm13 # 32-byte Folded Reload
+; AVX-NEXT: # ymm13 = ymm14[3,0],mem[1,0],ymm14[7,4],mem[5,4]
+; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm7 # 32-byte Reload
+; AVX-NEXT: vshufps {{.*#+}} ymm13 = ymm13[2,0],ymm7[2,3],ymm13[6,4],ymm7[6,7]
+; AVX-NEXT: vblendps {{.*#+}} ymm11 = ymm11[0,1,2],ymm13[3,4,5,6,7]
+; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm9 # 32-byte Reload
; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm13 # 32-byte Reload
-; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX-NEXT: vshufps {{.*#+}} ymm11 = ymm0[3,1],ymm13[1,3],ymm0[7,5],ymm13[5,7]
+; AVX-NEXT: vshufps {{.*#+}} ymm13 = ymm13[3,1],ymm9[1,3],ymm13[7,5],ymm9[5,7]
+; AVX-NEXT: vshufps {{.*#+}} ymm13 = ymm13[0,1,2,0,4,5,6,4]
+; AVX-NEXT: vblendps {{.*#+}} ymm11 = ymm11[0,1,2,3,4,5],ymm13[6,7]
+; AVX-NEXT: vmovups %ymm11, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm11 # 32-byte Reload
+; AVX-NEXT: vshufps {{.*#+}} xmm11 = xmm11[1,0],xmm8[3,0]
+; AVX-NEXT: vshufps {{.*#+}} xmm8 = xmm11[0,2],xmm8[1,3]
+; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm13 # 32-byte Reload
+; AVX-NEXT: vshufps $19, {{[-0-9]+}}(%r{{[sb]}}p), %ymm13, %ymm11 # 32-byte Folded Reload
+; AVX-NEXT: # ymm11 = ymm13[3,0],mem[1,0],ymm13[7,4],mem[5,4]
+; AVX-NEXT: vshufps {{.*#+}} ymm11 = ymm11[2,0],ymm12[2,3],ymm11[6,4],ymm12[6,7]
+; AVX-NEXT: vblendps {{.*#+}} ymm8 = ymm8[0,1,2],ymm11[3,4,5,6,7]
+; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm12 # 32-byte Reload
+; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm11 # 32-byte Reload
+; AVX-NEXT: vshufps {{.*#+}} ymm11 = ymm11[3,1],ymm12[1,3],ymm11[7,5],ymm12[5,7]
; AVX-NEXT: vshufps {{.*#+}} ymm11 = ymm11[0,1,2,0,4,5,6,4]
-; AVX-NEXT: vblendps {{.*#+}} ymm10 = ymm10[0,1,2,3,4,5],ymm11[6,7]
-; AVX-NEXT: vmovups %ymm10, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX-NEXT: vblendps {{.*#+}} ymm8 = ymm8[0,1,2,3,4,5],ymm11[6,7]
+; AVX-NEXT: vmovups %ymm8, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm8 # 32-byte Reload
+; AVX-NEXT: vshufps {{.*#+}} xmm8 = xmm8[1,0],xmm6[3,0]
+; AVX-NEXT: vshufps {{.*#+}} xmm6 = xmm8[0,2],xmm6[1,3]
; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm11 # 32-byte Reload
-; AVX-NEXT: vshufps {{.*#+}} ymm10 = ymm11[3,0],ymm8[1,0],ymm11[7,4],ymm8[5,4]
-; AVX-NEXT: vshufps {{.*#+}} ymm8 = ymm10[2,0],ymm8[2,3],ymm10[6,4],ymm8[6,7]
-; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX-NEXT: vshufps {{.*#+}} xmm10 = xmm0[1,0],xmm7[3,0]
-; AVX-NEXT: vshufps {{.*#+}} xmm7 = xmm10[0,2],xmm7[1,3]
-; AVX-NEXT: vblendps {{.*#+}} ymm7 = ymm7[0,1,2],ymm8[3,4,5,6,7]
-; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX-NEXT: vshufps $215, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm8 # 32-byte Folded Reload
-; AVX-NEXT: # ymm8 = ymm0[3,1],mem[1,3],ymm0[7,5],mem[5,7]
+; AVX-NEXT: vshufps $19, {{[-0-9]+}}(%r{{[sb]}}p), %ymm11, %ymm8 # 32-byte Folded Reload
+; AVX-NEXT: # ymm8 = ymm11[3,0],mem[1,0],ymm11[7,4],mem[5,4]
+; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm15 # 32-byte Reload
+; AVX-NEXT: vshufps {{.*#+}} ymm8 = ymm8[2,0],ymm15[2,3],ymm8[6,4],ymm15[6,7]
+; AVX-NEXT: vblendps {{.*#+}} ymm6 = ymm6[0,1,2],ymm8[3,4,5,6,7]
+; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm8 # 32-byte Reload
+; AVX-NEXT: vshufps $215, {{[-0-9]+}}(%r{{[sb]}}p), %ymm8, %ymm8 # 32-byte Folded Reload
+; AVX-NEXT: # ymm8 = ymm8[3,1],mem[1,3],ymm8[7,5],mem[5,7]
; AVX-NEXT: vshufps {{.*#+}} ymm8 = ymm8[0,1,2,0,4,5,6,4]
-; AVX-NEXT: vblendps {{.*#+}} ymm7 = ymm7[0,1,2,3,4,5],ymm8[6,7]
-; AVX-NEXT: vmovups %ymm7, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX-NEXT: vblendps {{.*#+}} ymm6 = ymm6[0,1,2,3,4,5],ymm8[6,7]
+; AVX-NEXT: vmovups %ymm6, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
+; AVX-NEXT: vshufps {{.*#+}} xmm6 = xmm6[1,0],xmm4[3,0]
+; AVX-NEXT: vshufps {{.*#+}} xmm4 = xmm6[0,2],xmm4[1,3]
; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm8 # 32-byte Reload
-; AVX-NEXT: vshufps {{.*#+}} ymm7 = ymm8[3,0],ymm6[1,0],ymm8[7,4],ymm6[5,4]
-; AVX-NEXT: vshufps {{.*#+}} ymm6 = ymm7[2,0],ymm6[2,3],ymm7[6,4],ymm6[6,7]
-; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX-NEXT: vshufps {{.*#+}} xmm7 = xmm0[1,0],xmm4[3,0]
-; AVX-NEXT: vshufps {{.*#+}} xmm4 = xmm7[0,2],xmm4[1,3]
+; AVX-NEXT: vshufps $19, {{[-0-9]+}}(%r{{[sb]}}p), %ymm8, %ymm6 # 32-byte Folded Reload
+; AVX-NEXT: # ymm6 = ymm8[3,0],mem[1,0],ymm8[7,4],mem[5,4]
+; AVX-NEXT: vshufps $226, {{[-0-9]+}}(%r{{[sb]}}p), %ymm6, %ymm6 # 32-byte Folded Reload
+; AVX-NEXT: # ymm6 = ymm6[2,0],mem[2,3],ymm6[6,4],mem[6,7]
; AVX-NEXT: vblendps {{.*#+}} ymm4 = ymm4[0,1,2],ymm6[3,4,5,6,7]
-; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm7 # 32-byte Reload
-; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX-NEXT: vshufps {{.*#+}} ymm6 = ymm0[3,1],ymm7[1,3],ymm0[7,5],ymm7[5,7]
+; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
+; AVX-NEXT: vshufps $215, {{[-0-9]+}}(%r{{[sb]}}p), %ymm6, %ymm6 # 32-byte Folded Reload
+; AVX-NEXT: # ymm6 = ymm6[3,1],mem[1,3],ymm6[7,5],mem[5,7]
; AVX-NEXT: vshufps {{.*#+}} ymm6 = ymm6[0,1,2,0,4,5,6,4]
; AVX-NEXT: vblendps {{.*#+}} ymm4 = ymm4[0,1,2,3,4,5],ymm6[6,7]
; AVX-NEXT: vmovups %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
-; AVX-NEXT: vshufps {{.*#+}} ymm4 = ymm6[3,0],ymm3[1,0],ymm6[7,4],ymm3[5,4]
-; AVX-NEXT: vshufps {{.*#+}} ymm3 = ymm4[2,0],ymm3[2,3],ymm4[6,4],ymm3[6,7]
-; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX-NEXT: vshufps {{.*#+}} xmm4 = xmm0[1,0],xmm2[3,0]
+; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
+; AVX-NEXT: vshufps {{.*#+}} xmm4 = xmm4[1,0],xmm2[3,0]
; AVX-NEXT: vshufps {{.*#+}} xmm2 = xmm4[0,2],xmm2[1,3]
-; AVX-NEXT: vblendps {{.*#+}} ymm2 = ymm2[0,1,2],ymm3[3,4,5,6,7]
; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
-; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX-NEXT: vshufps {{.*#+}} ymm3 = ymm0[3,1],ymm4[1,3],ymm0[7,5],ymm4[5,7]
-; AVX-NEXT: vshufps {{.*#+}} ymm3 = ymm3[0,1,2,0,4,5,6,4]
-; AVX-NEXT: vblendps {{.*#+}} ymm2 = ymm2[0,1,2,3,4,5],ymm3[6,7]
+; AVX-NEXT: vshufps $19, {{[-0-9]+}}(%r{{[sb]}}p), %ymm4, %ymm4 # 32-byte Folded Reload
+; AVX-NEXT: # ymm4 = ymm4[3,0],mem[1,0],ymm4[7,4],mem[5,4]
+; AVX-NEXT: vshufps $226, {{[-0-9]+}}(%r{{[sb]}}p), %ymm4, %ymm4 # 32-byte Folded Reload
+; AVX-NEXT: # ymm4 = ymm4[2,0],mem[2,3],ymm4[6,4],mem[6,7]
+; AVX-NEXT: vblendps {{.*#+}} ymm2 = ymm2[0,1,2],ymm4[3,4,5,6,7]
+; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
+; AVX-NEXT: vmovups (%rsp), %ymm4 # 32-byte Reload
+; AVX-NEXT: vshufps {{.*#+}} ymm4 = ymm4[3,1],ymm6[1,3],ymm4[7,5],ymm6[5,7]
+; AVX-NEXT: vshufps {{.*#+}} ymm4 = ymm4[0,1,2,0,4,5,6,4]
+; AVX-NEXT: vblendps {{.*#+}} ymm2 = ymm2[0,1,2,3,4,5],ymm4[6,7]
; AVX-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload
-; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX-NEXT: vshufps {{.*#+}} ymm2 = ymm3[3,0],ymm0[1,0],ymm3[7,4],ymm0[5,4]
-; AVX-NEXT: vshufps {{.*#+}} ymm1 = ymm2[2,0],ymm0[2,3],ymm2[6,4],ymm0[6,7]
-; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX-NEXT: vshufps {{.*#+}} xmm2 = xmm0[1,0],xmm5[3,0]
-; AVX-NEXT: vshufps {{.*#+}} xmm0 = xmm2[0,2],xmm5[1,3]
-; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2],ymm1[3,4,5,6,7]
+; AVX-NEXT: vshufps {{.*#+}} xmm2 = xmm10[1,0],xmm1[3,0]
+; AVX-NEXT: vshufps {{.*#+}} xmm1 = xmm2[0,2],xmm1[1,3]
+; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
+; AVX-NEXT: vshufps $19, {{[-0-9]+}}(%r{{[sb]}}p), %ymm2, %ymm2 # 32-byte Folded Reload
+; AVX-NEXT: # ymm2 = ymm2[3,0],mem[1,0],ymm2[7,4],mem[5,4]
+; AVX-NEXT: vshufps $226, {{[-0-9]+}}(%r{{[sb]}}p), %ymm2, %ymm2 # 32-byte Folded Reload
+; AVX-NEXT: # ymm2 = ymm2[2,0],mem[2,3],ymm2[6,4],mem[6,7]
+; AVX-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1,2],ymm2[3,4,5,6,7]
+; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
-; AVX-NEXT: vshufps $215, {{[-0-9]+}}(%r{{[sb]}}p), %ymm2, %ymm1 # 32-byte Folded Reload
-; AVX-NEXT: # ymm1 = ymm2[3,1],mem[1,3],ymm2[7,5],mem[5,7]
+; AVX-NEXT: vshufps {{.*#+}} ymm2 = ymm2[3,1],ymm4[1,3],ymm2[7,5],ymm4[5,7]
+; AVX-NEXT: vshufps {{.*#+}} ymm2 = ymm2[0,1,2,0,4,5,6,4]
+; AVX-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3,4,5],ymm2[6,7]
+; AVX-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX-NEXT: vshufps {{.*#+}} xmm1 = xmm5[1,0],xmm0[3,0]
+; AVX-NEXT: vshufps {{.*#+}} xmm0 = xmm1[0,2],xmm0[1,3]
+; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
+; AVX-NEXT: vshufps $19, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm1 # 32-byte Folded Reload
+; AVX-NEXT: # ymm1 = ymm1[3,0],mem[1,0],ymm1[7,4],mem[5,4]
+; AVX-NEXT: vshufps $226, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm1 # 32-byte Folded Reload
+; AVX-NEXT: # ymm1 = ymm1[2,0],mem[2,3],ymm1[6,4],mem[6,7]
+; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2],ymm1[3,4,5,6,7]
+; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
+; AVX-NEXT: vshufps $215, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm1 # 32-byte Folded Reload
+; AVX-NEXT: # ymm1 = ymm1[3,1],mem[1,3],ymm1[7,5],mem[5,7]
; AVX-NEXT: vshufps {{.*#+}} ymm1 = ymm1[0,1,2,0,4,5,6,4]
; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],ymm1[6,7]
; AVX-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
@@ -8361,34 +8377,33 @@ define void @load_i32_stride6_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
; AVX-NEXT: # ymm2 = ymm0[0,1],mem[2,3],ymm0[4,5,6,7]
; AVX-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX-NEXT: vshufps $38, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
-; AVX-NEXT: # ymm0 = ymm0[2,1],mem[2,0],ymm0[6,5],mem[6,4]
+; AVX-NEXT: vshufps {{.*#+}} ymm0 = ymm0[2,1],ymm3[2,0],ymm0[6,5],ymm3[6,4]
; AVX-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3,0,1]
; AVX-NEXT: vextractf128 $1, %ymm2, %xmm1
; AVX-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; AVX-NEXT: vshufps {{.*#+}} xmm1 = xmm2[2,0],xmm1[2,3]
; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3,4,5,6,7]
-; AVX-NEXT: vblendps $48, {{[-0-9]+}}(%r{{[sb]}}p), %ymm12, %ymm12 # 32-byte Folded Reload
-; AVX-NEXT: # ymm12 = ymm12[0,1,2,3],mem[4,5],ymm12[6,7]
-; AVX-NEXT: vperm2f128 {{.*#+}} ymm1 = ymm12[2,3,0,1]
+; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
+; AVX-NEXT: vblendps $48, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm2 # 32-byte Folded Reload
+; AVX-NEXT: # ymm2 = ymm1[0,1,2,3],mem[4,5],ymm1[6,7]
+; AVX-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX-NEXT: vperm2f128 {{.*#+}} ymm1 = ymm2[2,3,0,1]
; AVX-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT: vshufps {{.*#+}} ymm1 = ymm1[0,0],ymm12[2,0],ymm1[4,4],ymm12[6,4]
+; AVX-NEXT: vshufps {{.*#+}} ymm1 = ymm1[0,0],ymm2[2,0],ymm1[4,4],ymm2[6,4]
; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm1[5,6,7]
; AVX-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX-NEXT: vblendps $12, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm2 # 32-byte Folded Reload
; AVX-NEXT: # ymm2 = ymm0[0,1],mem[2,3],ymm0[4,5,6,7]
; AVX-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT: vshufps $38, {{[-0-9]+}}(%r{{[sb]}}p), %ymm9, %ymm0 # 32-byte Folded Reload
-; AVX-NEXT: # ymm0 = ymm9[2,1],mem[2,0],ymm9[6,5],mem[6,4]
+; AVX-NEXT: vshufps {{.*#+}} ymm0 = ymm14[2,1],ymm7[2,0],ymm14[6,5],ymm7[6,4]
; AVX-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3,0,1]
; AVX-NEXT: vextractf128 $1, %ymm2, %xmm1
; AVX-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; AVX-NEXT: vshufps {{.*#+}} xmm1 = xmm2[2,0],xmm1[2,3]
; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3,4,5,6,7]
-; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
-; AVX-NEXT: vblendps $48, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm2 # 32-byte Folded Reload
-; AVX-NEXT: # ymm2 = ymm1[0,1,2,3],mem[4,5],ymm1[6,7]
+; AVX-NEXT: vblendps $48, {{[-0-9]+}}(%r{{[sb]}}p), %ymm9, %ymm2 # 32-byte Folded Reload
+; AVX-NEXT: # ymm2 = ymm9[0,1,2,3],mem[4,5],ymm9[6,7]
; AVX-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX-NEXT: vperm2f128 {{.*#+}} ymm1 = ymm2[2,3,0,1]
; AVX-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
@@ -8399,16 +8414,15 @@ define void @load_i32_stride6_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
; AVX-NEXT: vblendps $12, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm2 # 32-byte Folded Reload
; AVX-NEXT: # ymm2 = ymm0[0,1],mem[2,3],ymm0[4,5,6,7]
; AVX-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX-NEXT: vshufps $38, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
-; AVX-NEXT: # ymm0 = ymm0[2,1],mem[2,0],ymm0[6,5],mem[6,4]
+; AVX-NEXT: vshufps $38, {{[-0-9]+}}(%r{{[sb]}}p), %ymm13, %ymm0 # 32-byte Folded Reload
+; AVX-NEXT: # ymm0 = ymm13[2,1],mem[2,0],ymm13[6,5],mem[6,4]
; AVX-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3,0,1]
; AVX-NEXT: vextractf128 $1, %ymm2, %xmm1
; AVX-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; AVX-NEXT: vshufps {{.*#+}} xmm1 = xmm2[2,0],xmm1[2,3]
; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3,4,5,6,7]
-; AVX-NEXT: vblendps $48, {{[-0-9]+}}(%r{{[sb]}}p), %ymm15, %ymm2 # 32-byte Folded Reload
-; AVX-NEXT: # ymm2 = ymm15[0,1,2,3],mem[4,5],ymm15[6,7]
+; AVX-NEXT: vblendps $48, {{[-0-9]+}}(%r{{[sb]}}p), %ymm12, %ymm2 # 32-byte Folded Reload
+; AVX-NEXT: # ymm2 = ymm12[0,1,2,3],mem[4,5],ymm12[6,7]
; AVX-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX-NEXT: vperm2f128 {{.*#+}} ymm1 = ymm2[2,3,0,1]
; AVX-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
@@ -8419,15 +8433,15 @@ define void @load_i32_stride6_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
; AVX-NEXT: vblendps $12, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm2 # 32-byte Folded Reload
; AVX-NEXT: # ymm2 = ymm0[0,1],mem[2,3],ymm0[4,5,6,7]
; AVX-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT: vshufps $38, {{[-0-9]+}}(%r{{[sb]}}p), %ymm14, %ymm0 # 32-byte Folded Reload
-; AVX-NEXT: # ymm0 = ymm14[2,1],mem[2,0],ymm14[6,5],mem[6,4]
+; AVX-NEXT: vshufps {{.*#+}} ymm0 = ymm11[2,1],ymm15[2,0],ymm11[6,5],ymm15[6,4]
; AVX-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3,0,1]
; AVX-NEXT: vextractf128 $1, %ymm2, %xmm1
; AVX-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; AVX-NEXT: vshufps {{.*#+}} xmm1 = xmm2[2,0],xmm1[2,3]
; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3,4,5,6,7]
-; AVX-NEXT: vblendps $48, {{[-0-9]+}}(%r{{[sb]}}p), %ymm13, %ymm10 # 32-byte Folded Reload
-; AVX-NEXT: # ymm10 = ymm13[0,1,2,3],mem[4,5],ymm13[6,7]
+; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
+; AVX-NEXT: vblendps $48, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm10 # 32-byte Folded Reload
+; AVX-NEXT: # ymm10 = ymm1[0,1,2,3],mem[4,5],ymm1[6,7]
; AVX-NEXT: vperm2f128 {{.*#+}} ymm1 = ymm10[2,3,0,1]
; AVX-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX-NEXT: vshufps {{.*#+}} ymm1 = ymm1[0,0],ymm10[2,0],ymm1[4,4],ymm10[6,4]
@@ -8436,9 +8450,9 @@ define void @load_i32_stride6_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX-NEXT: vblendps $12, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm2 # 32-byte Folded Reload
; AVX-NEXT: # ymm2 = ymm0[0,1],mem[2,3],ymm0[4,5,6,7]
-; AVX-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT: vshufps $38, {{[-0-9]+}}(%r{{[sb]}}p), %ymm11, %ymm0 # 32-byte Folded Reload
-; AVX-NEXT: # ymm0 = ymm11[2,1],mem[2,0],ymm11[6,5],mem[6,4]
+; AVX-NEXT: vmovups %ymm2, (%rsp) # 32-byte Spill
+; AVX-NEXT: vshufps $38, {{[-0-9]+}}(%r{{[sb]}}p), %ymm8, %ymm0 # 32-byte Folded Reload
+; AVX-NEXT: # ymm0 = ymm8[2,1],mem[2,0],ymm8[6,5],mem[6,4]
; AVX-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3,0,1]
; AVX-NEXT: vextractf128 $1, %ymm2, %xmm1
; AVX-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
@@ -8457,15 +8471,16 @@ define void @load_i32_stride6_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
; AVX-NEXT: vblendps $12, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm2 # 32-byte Folded Reload
; AVX-NEXT: # ymm2 = ymm0[0,1],mem[2,3],ymm0[4,5,6,7]
; AVX-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT: vshufps $38, {{[-0-9]+}}(%r{{[sb]}}p), %ymm8, %ymm0 # 32-byte Folded Reload
-; AVX-NEXT: # ymm0 = ymm8[2,1],mem[2,0],ymm8[6,5],mem[6,4]
+; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
+; AVX-NEXT: vshufps $38, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
+; AVX-NEXT: # ymm0 = ymm0[2,1],mem[2,0],ymm0[6,5],mem[6,4]
; AVX-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3,0,1]
; AVX-NEXT: vextractf128 $1, %ymm2, %xmm1
; AVX-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; AVX-NEXT: vshufps {{.*#+}} xmm1 = xmm2[2,0],xmm1[2,3]
; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3,4,5,6,7]
-; AVX-NEXT: vblendps $48, {{[-0-9]+}}(%r{{[sb]}}p), %ymm7, %ymm9 # 32-byte Folded Reload
-; AVX-NEXT: # ymm9 = ymm7[0,1,2,3],mem[4,5],ymm7[6,7]
+; AVX-NEXT: vblendps $48, {{[-0-9]+}}(%r{{[sb]}}p), %ymm6, %ymm9 # 32-byte Folded Reload
+; AVX-NEXT: # ymm9 = ymm6[0,1,2,3],mem[4,5],ymm6[6,7]
; AVX-NEXT: vperm2f128 {{.*#+}} ymm1 = ymm9[2,3,0,1]
; AVX-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX-NEXT: vshufps {{.*#+}} ymm1 = ymm1[0,0],ymm9[2,0],ymm1[4,4],ymm9[6,4]
@@ -8474,8 +8489,9 @@ define void @load_i32_stride6_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX-NEXT: vblendps $12, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm8 # 32-byte Folded Reload
; AVX-NEXT: # ymm8 = ymm0[0,1],mem[2,3],ymm0[4,5,6,7]
-; AVX-NEXT: vshufps $38, {{[-0-9]+}}(%r{{[sb]}}p), %ymm6, %ymm0 # 32-byte Folded Reload
-; AVX-NEXT: # ymm0 = ymm6[2,1],mem[2,0],ymm6[6,5],mem[6,4]
+; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
+; AVX-NEXT: vshufps $38, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
+; AVX-NEXT: # ymm0 = ymm0[2,1],mem[2,0],ymm0[6,5],mem[6,4]
; AVX-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3,0,1]
; AVX-NEXT: vextractf128 $1, %ymm8, %xmm1
; AVX-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
@@ -8490,22 +8506,23 @@ define void @load_i32_stride6_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX-NEXT: vblendps $12, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm5 # 32-byte Folded Reload
; AVX-NEXT: # ymm5 = ymm0[0,1],mem[2,3],ymm0[4,5,6,7]
-; AVX-NEXT: vmovaps %ymm3, %ymm4
+; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload
; AVX-NEXT: vshufps {{.*#+}} ymm2 = ymm4[2,1],ymm3[2,0],ymm4[6,5],ymm3[6,4]
; AVX-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm2[2,3,0,1]
; AVX-NEXT: vextractf128 $1, %ymm5, %xmm7
; AVX-NEXT: vshufps {{.*#+}} xmm15 = xmm5[2,0],xmm7[2,3]
; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm15[0,1,2],ymm0[3,4,5,6,7]
-; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
-; AVX-NEXT: vblendps $48, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm15 # 32-byte Folded Reload
-; AVX-NEXT: # ymm15 = ymm1[0,1,2,3],mem[4,5],ymm1[6,7]
+; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
+; AVX-NEXT: vblendps $48, {{[-0-9]+}}(%r{{[sb]}}p), %ymm2, %ymm15 # 32-byte Folded Reload
+; AVX-NEXT: # ymm15 = ymm2[0,1,2,3],mem[4,5],ymm2[6,7]
; AVX-NEXT: vperm2f128 {{.*#+}} ymm2 = ymm15[2,3,0,1]
; AVX-NEXT: vshufps {{.*#+}} ymm11 = ymm2[0,0],ymm15[2,0],ymm2[4,4],ymm15[6,4]
; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm11[5,6,7]
; AVX-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX-NEXT: vshufps {{.*#+}} ymm0 = ymm0[0,1],ymm12[3,1],ymm0[4,5],ymm12[7,5]
+; AVX-NEXT: vshufps $116, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
+; AVX-NEXT: # ymm0 = ymm0[0,1],mem[3,1],ymm0[4,5],mem[7,5]
; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
; AVX-NEXT: vshufps $247, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1, %xmm11 # 16-byte Folded Reload
; AVX-NEXT: # xmm11 = xmm1[3,1],mem[3,3]
@@ -8535,9 +8552,9 @@ define void @load_i32_stride6_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX-NEXT: vshufps $247, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm11 # 16-byte Folded Reload
; AVX-NEXT: # xmm11 = xmm0[3,1],mem[3,3]
-; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX-NEXT: vshufps $103, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm12 # 32-byte Folded Reload
-; AVX-NEXT: # ymm12 = ymm0[3,1],mem[2,1],ymm0[7,5],mem[6,5]
+; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm12 # 32-byte Reload
+; AVX-NEXT: vshufps $103, {{[-0-9]+}}(%r{{[sb]}}p), %ymm12, %ymm12 # 32-byte Folded Reload
+; AVX-NEXT: # ymm12 = ymm12[3,1],mem[2,1],ymm12[7,5],mem[6,5]
; AVX-NEXT: vperm2f128 {{.*#+}} ymm12 = ymm12[2,3,0,1]
; AVX-NEXT: vblendps {{.*#+}} ymm11 = ymm11[0,1,2],ymm12[3,4,5,6,7]
; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm11[0,1,2,3,4],ymm1[5,6,7]
@@ -8587,7 +8604,7 @@ define void @load_i32_stride6_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX-NEXT: vshufps $116, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
; AVX-NEXT: # ymm0 = ymm0[0,1],mem[3,1],ymm0[4,5],mem[7,5]
-; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
+; AVX-NEXT: vmovups (%rsp), %ymm1 # 32-byte Reload
; AVX-NEXT: vshufps $247, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1, %xmm1 # 16-byte Folded Reload
; AVX-NEXT: # xmm1 = xmm1[3,1],mem[3,3]
; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload
@@ -8612,9 +8629,9 @@ define void @load_i32_stride6_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
; AVX-NEXT: vshufps {{.*#+}} ymm1 = ymm5[0,1],ymm1[2,0],ymm5[4,5],ymm1[6,4]
; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3,4,5,6,7]
; AVX-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT: vperm2f128 {{.*#+}} ymm10 = ymm2[2,3,0,1]
-; AVX-NEXT: vshufps {{.*#+}} ymm1 = ymm2[2,0],ymm10[0,0],ymm2[6,4],ymm10[4,4]
-; AVX-NEXT: vshufps {{.*#+}} ymm1 = ymm10[0,2],ymm1[2,0],ymm10[4,6],ymm1[6,4]
+; AVX-NEXT: vperm2f128 {{.*#+}} ymm11 = ymm2[2,3,0,1]
+; AVX-NEXT: vshufps {{.*#+}} ymm1 = ymm2[2,0],ymm11[0,0],ymm2[6,4],ymm11[4,4]
+; AVX-NEXT: vshufps {{.*#+}} ymm1 = ymm11[0,2],ymm1[2,0],ymm11[4,6],ymm1[6,4]
; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm1[5,6,7]
; AVX-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
@@ -8668,7 +8685,7 @@ define void @load_i32_stride6_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
; AVX-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; AVX-NEXT: vblendps {{.*#+}} xmm0 = xmm1[0],xmm0[1],xmm1[2,3]
; AVX-NEXT: vmovapd 656(%rdi), %xmm1
-; AVX-NEXT: vmovupd %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX-NEXT: vmovupd %ymm1, (%rsp) # 32-byte Spill
; AVX-NEXT: vshufpd $9, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm1 # 32-byte Folded Reload
; AVX-NEXT: # ymm1 = ymm1[1],mem[0],ymm1[2],mem[3]
; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
@@ -8680,8 +8697,8 @@ define void @load_i32_stride6_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm1[5,6,7]
; AVX-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX-NEXT: vblendps $243, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm11 # 32-byte Folded Reload
-; AVX-NEXT: # ymm11 = mem[0,1],ymm0[2,3],mem[4,5,6,7]
+; AVX-NEXT: vblendps $243, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm10 # 32-byte Folded Reload
+; AVX-NEXT: # ymm10 = mem[0,1],ymm0[2,3],mem[4,5,6,7]
; AVX-NEXT: vmovaps 800(%rdi), %xmm0
; AVX-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; AVX-NEXT: vshufps {{.*#+}} xmm0 = xmm0[2,2,3,3]
@@ -8693,14 +8710,14 @@ define void @load_i32_stride6_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
; AVX-NEXT: vshufpd {{.*#+}} ymm1 = ymm1[1],ymm6[0],ymm1[2],ymm6[3]
; AVX-NEXT: vshufps {{.*#+}} ymm1 = ymm3[0,1],ymm1[2,0],ymm3[4,5],ymm1[6,4]
; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3,4,5,6,7]
-; AVX-NEXT: vperm2f128 {{.*#+}} ymm3 = ymm11[2,3,0,1]
-; AVX-NEXT: vshufps {{.*#+}} ymm1 = ymm11[2,0],ymm3[0,0],ymm11[6,4],ymm3[4,4]
+; AVX-NEXT: vperm2f128 {{.*#+}} ymm3 = ymm10[2,3,0,1]
+; AVX-NEXT: vshufps {{.*#+}} ymm1 = ymm10[2,0],ymm3[0,0],ymm10[6,4],ymm3[4,4]
; AVX-NEXT: vshufps {{.*#+}} ymm1 = ymm3[0,2],ymm1[2,0],ymm3[4,6],ymm1[6,4]
; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm1[5,6,7]
; AVX-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX-NEXT: vblendps $243, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm14 # 32-byte Folded Reload
-; AVX-NEXT: # ymm14 = mem[0,1],ymm0[2,3],mem[4,5,6,7]
+; AVX-NEXT: vblendps $243, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm9 # 32-byte Folded Reload
+; AVX-NEXT: # ymm9 = mem[0,1],ymm0[2,3],mem[4,5,6,7]
; AVX-NEXT: vmovaps 992(%rdi), %xmm0
; AVX-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; AVX-NEXT: vshufps {{.*#+}} xmm0 = xmm0[2,2,3,3]
@@ -8714,8 +8731,8 @@ define void @load_i32_stride6_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
; AVX-NEXT: vshufps {{.*#+}} ymm1 = ymm2[0,1],ymm1[2,0],ymm2[4,5],ymm1[6,4]
; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3,4,5,6,7]
-; AVX-NEXT: vperm2f128 {{.*#+}} ymm2 = ymm14[2,3,0,1]
-; AVX-NEXT: vshufps {{.*#+}} ymm1 = ymm14[2,0],ymm2[0,0],ymm14[6,4],ymm2[4,4]
+; AVX-NEXT: vperm2f128 {{.*#+}} ymm2 = ymm9[2,3,0,1]
+; AVX-NEXT: vshufps {{.*#+}} ymm1 = ymm9[2,0],ymm2[0,0],ymm9[6,4],ymm2[4,4]
; AVX-NEXT: vshufps {{.*#+}} ymm1 = ymm2[0,2],ymm1[2,0],ymm2[4,6],ymm1[6,4]
; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm1[5,6,7]
; AVX-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
@@ -8745,114 +8762,114 @@ define void @load_i32_stride6_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
; AVX-NEXT: vmovaps 1376(%rdi), %xmm0
; AVX-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; AVX-NEXT: vshufps {{.*#+}} xmm0 = xmm0[2,2,3,3]
-; AVX-NEXT: vmovaps 1360(%rdi), %xmm9
-; AVX-NEXT: vmovaps %xmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX-NEXT: vblendps {{.*#+}} xmm0 = xmm9[0],xmm0[1],xmm9[2,3]
-; AVX-NEXT: vmovapd 1424(%rdi), %xmm9
-; AVX-NEXT: vmovupd %ymm9, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT: vshufpd $9, {{[-0-9]+}}(%r{{[sb]}}p), %ymm9, %ymm15 # 32-byte Folded Reload
-; AVX-NEXT: # ymm15 = ymm9[1],mem[0],ymm9[2],mem[3]
-; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm9 # 32-byte Reload
-; AVX-NEXT: vshufps {{.*#+}} ymm15 = ymm9[0,1],ymm15[2,0],ymm9[4,5],ymm15[6,4]
+; AVX-NEXT: vmovaps 1360(%rdi), %xmm13
+; AVX-NEXT: vmovaps %xmm13, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX-NEXT: vblendps {{.*#+}} xmm0 = xmm13[0],xmm0[1],xmm13[2,3]
+; AVX-NEXT: vmovapd 1424(%rdi), %xmm13
+; AVX-NEXT: vmovupd %ymm13, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX-NEXT: vshufpd $9, {{[-0-9]+}}(%r{{[sb]}}p), %ymm13, %ymm15 # 32-byte Folded Reload
+; AVX-NEXT: # ymm15 = ymm13[1],mem[0],ymm13[2],mem[3]
+; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm13 # 32-byte Reload
+; AVX-NEXT: vshufps {{.*#+}} ymm15 = ymm13[0,1],ymm15[2,0],ymm13[4,5],ymm15[6,4]
; AVX-NEXT: vblendps {{.*#+}} ymm15 = ymm0[0,1],ymm15[2,3,4,5,6,7]
; AVX-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm6[2,3,0,1]
; AVX-NEXT: vshufps {{.*#+}} ymm13 = ymm6[2,0],ymm0[0,0],ymm6[6,4],ymm0[4,4]
; AVX-NEXT: vshufps {{.*#+}} ymm13 = ymm0[0,2],ymm13[2,0],ymm0[4,6],ymm13[6,4]
-; AVX-NEXT: vblendps {{.*#+}} ymm9 = ymm15[0,1,2,3,4],ymm13[5,6,7]
-; AVX-NEXT: vmovups %ymm9, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm9 # 32-byte Reload
-; AVX-NEXT: vshufps {{.*#+}} ymm13 = ymm9[3,0],ymm10[1,0],ymm9[7,4],ymm10[5,4]
-; AVX-NEXT: vshufps {{.*#+}} ymm9 = ymm10[0,3],ymm13[2,0],ymm10[4,7],ymm13[6,4]
-; AVX-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm10 # 16-byte Reload
-; AVX-NEXT: vblendps $12, {{[-0-9]+}}(%r{{[sb]}}p), %xmm10, %xmm13 # 16-byte Folded Reload
-; AVX-NEXT: # xmm13 = xmm10[0,1],mem[2,3]
-; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm10 # 32-byte Reload
-; AVX-NEXT: vshufps $215, {{[-0-9]+}}(%r{{[sb]}}p), %ymm10, %ymm15 # 32-byte Folded Reload
-; AVX-NEXT: # ymm15 = ymm10[3,1],mem[1,3],ymm10[7,5],mem[5,7]
-; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm10 # 32-byte Reload
-; AVX-NEXT: vshufps {{.*#+}} ymm15 = ymm10[1,1],ymm15[2,0],ymm10[5,5],ymm15[6,4]
+; AVX-NEXT: vblendps {{.*#+}} ymm13 = ymm15[0,1,2,3,4],ymm13[5,6,7]
+; AVX-NEXT: vmovups %ymm13, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm13 # 32-byte Reload
+; AVX-NEXT: vshufps {{.*#+}} ymm13 = ymm13[3,0],ymm11[1,0],ymm13[7,4],ymm11[5,4]
+; AVX-NEXT: vshufps {{.*#+}} ymm11 = ymm11[0,3],ymm13[2,0],ymm11[4,7],ymm13[6,4]
+; AVX-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm13 # 16-byte Reload
+; AVX-NEXT: vblendps $12, {{[-0-9]+}}(%r{{[sb]}}p), %xmm13, %xmm13 # 16-byte Folded Reload
+; AVX-NEXT: # xmm13 = xmm13[0,1],mem[2,3]
+; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm15 # 32-byte Reload
+; AVX-NEXT: vshufps $215, {{[-0-9]+}}(%r{{[sb]}}p), %ymm15, %ymm15 # 32-byte Folded Reload
+; AVX-NEXT: # ymm15 = ymm15[3,1],mem[1,3],ymm15[7,5],mem[5,7]
+; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm14 # 32-byte Reload
+; AVX-NEXT: vshufps {{.*#+}} ymm15 = ymm14[1,1],ymm15[2,0],ymm14[5,5],ymm15[6,4]
; AVX-NEXT: vshufps {{.*#+}} xmm13 = xmm13[1,3,2,3]
; AVX-NEXT: vblendps {{.*#+}} ymm13 = ymm13[0,1],ymm15[2,3,4,5,6,7]
-; AVX-NEXT: vblendps {{.*#+}} ymm9 = ymm13[0,1,2,3,4],ymm9[5,6,7]
-; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm10 # 32-byte Reload
-; AVX-NEXT: vshufps {{.*#+}} ymm13 = ymm10[3,0],ymm7[1,0],ymm10[7,4],ymm7[5,4]
+; AVX-NEXT: vblendps {{.*#+}} ymm11 = ymm13[0,1,2,3,4],ymm11[5,6,7]
+; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm13 # 32-byte Reload
+; AVX-NEXT: vshufps {{.*#+}} ymm13 = ymm13[3,0],ymm7[1,0],ymm13[7,4],ymm7[5,4]
; AVX-NEXT: vshufps {{.*#+}} ymm7 = ymm7[0,3],ymm13[2,0],ymm7[4,7],ymm13[6,4]
-; AVX-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm10 # 16-byte Reload
-; AVX-NEXT: vblendps $12, {{[-0-9]+}}(%r{{[sb]}}p), %xmm10, %xmm13 # 16-byte Folded Reload
-; AVX-NEXT: # xmm13 = xmm10[0,1],mem[2,3]
-; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm10 # 32-byte Reload
-; AVX-NEXT: vshufps $215, {{[-0-9]+}}(%r{{[sb]}}p), %ymm10, %ymm15 # 32-byte Folded Reload
-; AVX-NEXT: # ymm15 = ymm10[3,1],mem[1,3],ymm10[7,5],mem[5,7]
-; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm10 # 32-byte Reload
-; AVX-NEXT: vshufps {{.*#+}} ymm15 = ymm10[1,1],ymm15[2,0],ymm10[5,5],ymm15[6,4]
+; AVX-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm13 # 16-byte Reload
+; AVX-NEXT: vblendps $12, {{[-0-9]+}}(%r{{[sb]}}p), %xmm13, %xmm13 # 16-byte Folded Reload
+; AVX-NEXT: # xmm13 = xmm13[0,1],mem[2,3]
+; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm14 # 32-byte Reload
+; AVX-NEXT: vshufps $215, {{[-0-9]+}}(%r{{[sb]}}p), %ymm14, %ymm15 # 32-byte Folded Reload
+; AVX-NEXT: # ymm15 = ymm14[3,1],mem[1,3],ymm14[7,5],mem[5,7]
+; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm14 # 32-byte Reload
+; AVX-NEXT: vshufps {{.*#+}} ymm15 = ymm14[1,1],ymm15[2,0],ymm14[5,5],ymm15[6,4]
; AVX-NEXT: vshufps {{.*#+}} xmm13 = xmm13[1,3,2,3]
; AVX-NEXT: vblendps {{.*#+}} ymm13 = ymm13[0,1],ymm15[2,3,4,5,6,7]
; AVX-NEXT: vblendps {{.*#+}} ymm7 = ymm13[0,1,2,3,4],ymm7[5,6,7]
-; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm10 # 32-byte Reload
-; AVX-NEXT: vshufps {{.*#+}} ymm13 = ymm10[3,0],ymm5[1,0],ymm10[7,4],ymm5[5,4]
+; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm13 # 32-byte Reload
+; AVX-NEXT: vshufps {{.*#+}} ymm13 = ymm13[3,0],ymm5[1,0],ymm13[7,4],ymm5[5,4]
; AVX-NEXT: vshufps {{.*#+}} ymm5 = ymm5[0,3],ymm13[2,0],ymm5[4,7],ymm13[6,4]
-; AVX-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm10 # 16-byte Reload
-; AVX-NEXT: vblendps $12, {{[-0-9]+}}(%r{{[sb]}}p), %xmm10, %xmm13 # 16-byte Folded Reload
-; AVX-NEXT: # xmm13 = xmm10[0,1],mem[2,3]
-; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm10 # 32-byte Reload
-; AVX-NEXT: vshufps $215, {{[-0-9]+}}(%r{{[sb]}}p), %ymm10, %ymm15 # 32-byte Folded Reload
-; AVX-NEXT: # ymm15 = ymm10[3,1],mem[1,3],ymm10[7,5],mem[5,7]
-; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm10 # 32-byte Reload
-; AVX-NEXT: vshufps {{.*#+}} ymm15 = ymm10[1,1],ymm15[2,0],ymm10[5,5],ymm15[6,4]
+; AVX-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm13 # 16-byte Reload
+; AVX-NEXT: vblendps $12, {{[-0-9]+}}(%r{{[sb]}}p), %xmm13, %xmm13 # 16-byte Folded Reload
+; AVX-NEXT: # xmm13 = xmm13[0,1],mem[2,3]
+; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm14 # 32-byte Reload
+; AVX-NEXT: vshufps $215, {{[-0-9]+}}(%r{{[sb]}}p), %ymm14, %ymm15 # 32-byte Folded Reload
+; AVX-NEXT: # ymm15 = ymm14[3,1],mem[1,3],ymm14[7,5],mem[5,7]
+; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm14 # 32-byte Reload
+; AVX-NEXT: vshufps {{.*#+}} ymm15 = ymm14[1,1],ymm15[2,0],ymm14[5,5],ymm15[6,4]
; AVX-NEXT: vshufps {{.*#+}} xmm13 = xmm13[1,3,2,3]
; AVX-NEXT: vblendps {{.*#+}} ymm13 = ymm13[0,1],ymm15[2,3,4,5,6,7]
; AVX-NEXT: vblendps {{.*#+}} ymm5 = ymm13[0,1,2,3,4],ymm5[5,6,7]
; AVX-NEXT: vshufps {{.*#+}} ymm12 = ymm12[3,0],ymm4[1,0],ymm12[7,4],ymm4[5,4]
; AVX-NEXT: vshufps {{.*#+}} ymm4 = ymm4[0,3],ymm12[2,0],ymm4[4,7],ymm12[6,4]
-; AVX-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm10 # 16-byte Reload
-; AVX-NEXT: vblendps $12, {{[-0-9]+}}(%r{{[sb]}}p), %xmm10, %xmm12 # 16-byte Folded Reload
-; AVX-NEXT: # xmm12 = xmm10[0,1],mem[2,3]
-; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm10 # 32-byte Reload
-; AVX-NEXT: vshufps $215, {{[-0-9]+}}(%r{{[sb]}}p), %ymm10, %ymm13 # 32-byte Folded Reload
-; AVX-NEXT: # ymm13 = ymm10[3,1],mem[1,3],ymm10[7,5],mem[5,7]
-; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm10 # 32-byte Reload
-; AVX-NEXT: vshufps {{.*#+}} ymm13 = ymm10[1,1],ymm13[2,0],ymm10[5,5],ymm13[6,4]
+; AVX-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm12 # 16-byte Reload
+; AVX-NEXT: vblendps $12, {{[-0-9]+}}(%r{{[sb]}}p), %xmm12, %xmm12 # 16-byte Folded Reload
+; AVX-NEXT: # xmm12 = xmm12[0,1],mem[2,3]
+; AVX-NEXT: vmovups (%rsp), %ymm13 # 32-byte Reload
+; AVX-NEXT: vshufps $215, {{[-0-9]+}}(%r{{[sb]}}p), %ymm13, %ymm13 # 32-byte Folded Reload
+; AVX-NEXT: # ymm13 = ymm13[3,1],mem[1,3],ymm13[7,5],mem[5,7]
+; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm15 # 32-byte Reload
+; AVX-NEXT: vshufps {{.*#+}} ymm13 = ymm15[1,1],ymm13[2,0],ymm15[5,5],ymm13[6,4]
; AVX-NEXT: vshufps {{.*#+}} xmm12 = xmm12[1,3,2,3]
; AVX-NEXT: vblendps {{.*#+}} ymm12 = ymm12[0,1],ymm13[2,3,4,5,6,7]
; AVX-NEXT: vblendps {{.*#+}} ymm4 = ymm12[0,1,2,3,4],ymm4[5,6,7]
-; AVX-NEXT: vshufps {{.*#+}} ymm11 = ymm11[3,0],ymm3[1,0],ymm11[7,4],ymm3[5,4]
-; AVX-NEXT: vshufps {{.*#+}} ymm3 = ymm3[0,3],ymm11[2,0],ymm3[4,7],ymm11[6,4]
-; AVX-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm10 # 16-byte Reload
-; AVX-NEXT: vblendps $12, {{[-0-9]+}}(%r{{[sb]}}p), %xmm10, %xmm11 # 16-byte Folded Reload
-; AVX-NEXT: # xmm11 = xmm10[0,1],mem[2,3]
-; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm10 # 32-byte Reload
-; AVX-NEXT: vshufps $215, {{[-0-9]+}}(%r{{[sb]}}p), %ymm10, %ymm12 # 32-byte Folded Reload
-; AVX-NEXT: # ymm12 = ymm10[3,1],mem[1,3],ymm10[7,5],mem[5,7]
-; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm10 # 32-byte Reload
-; AVX-NEXT: vshufps {{.*#+}} ymm12 = ymm10[1,1],ymm12[2,0],ymm10[5,5],ymm12[6,4]
-; AVX-NEXT: vshufps {{.*#+}} xmm11 = xmm11[1,3,2,3]
-; AVX-NEXT: vblendps {{.*#+}} ymm11 = ymm11[0,1],ymm12[2,3,4,5,6,7]
-; AVX-NEXT: vblendps {{.*#+}} ymm3 = ymm11[0,1,2,3,4],ymm3[5,6,7]
-; AVX-NEXT: vshufps {{.*#+}} ymm10 = ymm14[3,0],ymm2[1,0],ymm14[7,4],ymm2[5,4]
-; AVX-NEXT: vshufps {{.*#+}} ymm2 = ymm2[0,3],ymm10[2,0],ymm2[4,7],ymm10[6,4]
+; AVX-NEXT: vshufps {{.*#+}} ymm10 = ymm10[3,0],ymm3[1,0],ymm10[7,4],ymm3[5,4]
+; AVX-NEXT: vshufps {{.*#+}} ymm3 = ymm3[0,3],ymm10[2,0],ymm3[4,7],ymm10[6,4]
; AVX-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm10 # 16-byte Reload
; AVX-NEXT: vblendps $12, {{[-0-9]+}}(%r{{[sb]}}p), %xmm10, %xmm10 # 16-byte Folded Reload
; AVX-NEXT: # xmm10 = xmm10[0,1],mem[2,3]
-; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm11 # 32-byte Reload
-; AVX-NEXT: vshufps $215, {{[-0-9]+}}(%r{{[sb]}}p), %ymm11, %ymm11 # 32-byte Folded Reload
-; AVX-NEXT: # ymm11 = ymm11[3,1],mem[1,3],ymm11[7,5],mem[5,7]
; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm12 # 32-byte Reload
-; AVX-NEXT: vshufps {{.*#+}} ymm11 = ymm12[1,1],ymm11[2,0],ymm12[5,5],ymm11[6,4]
+; AVX-NEXT: vshufps $215, {{[-0-9]+}}(%r{{[sb]}}p), %ymm12, %ymm12 # 32-byte Folded Reload
+; AVX-NEXT: # ymm12 = ymm12[3,1],mem[1,3],ymm12[7,5],mem[5,7]
+; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm13 # 32-byte Reload
+; AVX-NEXT: vshufps {{.*#+}} ymm12 = ymm13[1,1],ymm12[2,0],ymm13[5,5],ymm12[6,4]
; AVX-NEXT: vshufps {{.*#+}} xmm10 = xmm10[1,3,2,3]
-; AVX-NEXT: vblendps {{.*#+}} ymm10 = ymm10[0,1],ymm11[2,3,4,5,6,7]
-; AVX-NEXT: vblendps {{.*#+}} ymm2 = ymm10[0,1,2,3,4],ymm2[5,6,7]
+; AVX-NEXT: vblendps {{.*#+}} ymm10 = ymm10[0,1],ymm12[2,3,4,5,6,7]
+; AVX-NEXT: vblendps {{.*#+}} ymm3 = ymm10[0,1,2,3,4],ymm3[5,6,7]
+; AVX-NEXT: vshufps {{.*#+}} ymm9 = ymm9[3,0],ymm2[1,0],ymm9[7,4],ymm2[5,4]
+; AVX-NEXT: vshufps {{.*#+}} ymm2 = ymm2[0,3],ymm9[2,0],ymm2[4,7],ymm9[6,4]
+; AVX-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm9 # 16-byte Reload
+; AVX-NEXT: vblendps $12, {{[-0-9]+}}(%r{{[sb]}}p), %xmm9, %xmm9 # 16-byte Folded Reload
+; AVX-NEXT: # xmm9 = xmm9[0,1],mem[2,3]
+; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm10 # 32-byte Reload
+; AVX-NEXT: vshufps $215, {{[-0-9]+}}(%r{{[sb]}}p), %ymm10, %ymm10 # 32-byte Folded Reload
+; AVX-NEXT: # ymm10 = ymm10[3,1],mem[1,3],ymm10[7,5],mem[5,7]
+; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm12 # 32-byte Reload
+; AVX-NEXT: vshufps {{.*#+}} ymm10 = ymm12[1,1],ymm10[2,0],ymm12[5,5],ymm10[6,4]
+; AVX-NEXT: vshufps {{.*#+}} xmm9 = xmm9[1,3,2,3]
+; AVX-NEXT: vblendps {{.*#+}} ymm9 = ymm9[0,1],ymm10[2,3,4,5,6,7]
+; AVX-NEXT: vblendps {{.*#+}} ymm2 = ymm9[0,1,2,3,4],ymm2[5,6,7]
; AVX-NEXT: vshufps {{.*#+}} ymm8 = ymm8[3,0],ymm1[1,0],ymm8[7,4],ymm1[5,4]
; AVX-NEXT: vshufps {{.*#+}} ymm1 = ymm1[0,3],ymm8[2,0],ymm1[4,7],ymm8[6,4]
; AVX-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm8 # 16-byte Reload
; AVX-NEXT: vblendps $12, {{[-0-9]+}}(%r{{[sb]}}p), %xmm8, %xmm8 # 16-byte Folded Reload
; AVX-NEXT: # xmm8 = xmm8[0,1],mem[2,3]
+; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm9 # 32-byte Reload
+; AVX-NEXT: vshufps $215, {{[-0-9]+}}(%r{{[sb]}}p), %ymm9, %ymm9 # 32-byte Folded Reload
+; AVX-NEXT: # ymm9 = ymm9[3,1],mem[1,3],ymm9[7,5],mem[5,7]
; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm10 # 32-byte Reload
-; AVX-NEXT: vshufps $215, {{[-0-9]+}}(%r{{[sb]}}p), %ymm10, %ymm10 # 32-byte Folded Reload
-; AVX-NEXT: # ymm10 = ymm10[3,1],mem[1,3],ymm10[7,5],mem[5,7]
-; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm11 # 32-byte Reload
-; AVX-NEXT: vshufps {{.*#+}} ymm10 = ymm11[1,1],ymm10[2,0],ymm11[5,5],ymm10[6,4]
+; AVX-NEXT: vshufps {{.*#+}} ymm9 = ymm10[1,1],ymm9[2,0],ymm10[5,5],ymm9[6,4]
; AVX-NEXT: vshufps {{.*#+}} xmm8 = xmm8[1,3,2,3]
-; AVX-NEXT: vblendps {{.*#+}} ymm8 = ymm8[0,1],ymm10[2,3,4,5,6,7]
+; AVX-NEXT: vblendps {{.*#+}} ymm8 = ymm8[0,1],ymm9[2,3,4,5,6,7]
; AVX-NEXT: vblendps {{.*#+}} ymm1 = ymm8[0,1,2,3,4],ymm1[5,6,7]
; AVX-NEXT: vshufps {{.*#+}} ymm6 = ymm6[3,0],ymm0[1,0],ymm6[7,4],ymm0[5,4]
; AVX-NEXT: vshufps {{.*#+}} ymm0 = ymm0[0,3],ymm6[2,0],ymm0[4,7],ymm6[6,4]
@@ -8862,8 +8879,8 @@ define void @load_i32_stride6_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm8 # 32-byte Reload
; AVX-NEXT: vshufps $215, {{[-0-9]+}}(%r{{[sb]}}p), %ymm8, %ymm8 # 32-byte Folded Reload
; AVX-NEXT: # ymm8 = ymm8[3,1],mem[1,3],ymm8[7,5],mem[5,7]
-; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm10 # 32-byte Reload
-; AVX-NEXT: vshufps {{.*#+}} ymm8 = ymm10[1,1],ymm8[2,0],ymm10[5,5],ymm8[6,4]
+; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm9 # 32-byte Reload
+; AVX-NEXT: vshufps {{.*#+}} ymm8 = ymm9[1,1],ymm8[2,0],ymm9[5,5],ymm8[6,4]
; AVX-NEXT: vshufps {{.*#+}} xmm6 = xmm6[1,3,2,3]
; AVX-NEXT: vblendps {{.*#+}} ymm6 = ymm6[0,1],ymm8[2,3,4,5,6,7]
; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm6[0,1,2,3,4],ymm0[5,6,7]
@@ -8955,8 +8972,8 @@ define void @load_i32_stride6_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
; AVX-NEXT: vmovaps %ymm4, 96(%rax)
; AVX-NEXT: vmovaps %ymm5, 64(%rax)
; AVX-NEXT: vmovaps %ymm7, 32(%rax)
-; AVX-NEXT: vmovaps %ymm9, (%rax)
-; AVX-NEXT: addq $2584, %rsp # imm = 0xA18
+; AVX-NEXT: vmovaps %ymm11, (%rax)
+; AVX-NEXT: addq $2616, %rsp # imm = 0xA38
; AVX-NEXT: vzeroupper
; AVX-NEXT: retq
;
diff --git a/llvm/test/CodeGen/X86/vector-interleaved-store-i32-stride-5.ll b/llvm/test/CodeGen/X86/vector-interleaved-store-i32-stride-5.ll
index 07d8a370a5f93..e88a651d29cef 100644
--- a/llvm/test/CodeGen/X86/vector-interleaved-store-i32-stride-5.ll
+++ b/llvm/test/CodeGen/X86/vector-interleaved-store-i32-stride-5.ll
@@ -346,26 +346,26 @@ define void @store_i32_stride5_vf4(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX-NEXT: vunpcklps {{.*#+}} ymm5 = ymm5[0],ymm6[0],ymm5[1],ymm6[1],ymm5[4],ymm6[4],ymm5[5],ymm6[5]
; AVX-NEXT: vshufpd {{.*#+}} ymm5 = ymm5[0,0,2,3]
; AVX-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm1
-; AVX-NEXT: vunpcklps {{.*#+}} ymm7 = ymm4[0],ymm1[0],ymm4[1],ymm1[1],ymm4[4],ymm1[4],ymm4[5],ymm1[5]
-; AVX-NEXT: vshufpd {{.*#+}} ymm7 = ymm7[0,0,3,3]
-; AVX-NEXT: vblendps {{.*#+}} ymm5 = ymm7[0,1],ymm5[2,3],ymm7[4,5,6],ymm5[7]
-; AVX-NEXT: vbroadcastf128 {{.*#+}} ymm7 = mem[0,1,0,1]
-; AVX-NEXT: vinsertf128 $1, %xmm7, %ymm0, %ymm0
-; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0],ymm5[1,2,3],ymm0[4],ymm5[5,6,7]
+; AVX-NEXT: vunpcklps {{.*#+}} ymm4 = ymm4[0],ymm1[0],ymm4[1],ymm1[1],ymm4[4],ymm1[4],ymm4[5],ymm1[5]
+; AVX-NEXT: vshufpd {{.*#+}} ymm4 = ymm4[0,0,3,3]
+; AVX-NEXT: vblendps {{.*#+}} ymm4 = ymm4[0,1],ymm5[2,3],ymm4[4,5,6],ymm5[7]
+; AVX-NEXT: vbroadcastf128 {{.*#+}} ymm5 = mem[0,1,0,1]
+; AVX-NEXT: vinsertf128 $1, %xmm5, %ymm0, %ymm7
+; AVX-NEXT: vblendps {{.*#+}} ymm4 = ymm7[0],ymm4[1,2,3],ymm7[4],ymm4[5,6,7]
; AVX-NEXT: vpermilps {{.*#+}} ymm1 = ymm1[u,u,u,2,u,u,u,7]
-; AVX-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1],ymm4[2],ymm1[3,4,5,6,7]
-; AVX-NEXT: vpermilps {{.*#+}} ymm4 = ymm6[1,u,u,u,6,u,u,u]
-; AVX-NEXT: vbroadcastss 8(%rcx), %ymm5
-; AVX-NEXT: vunpcklps {{.*#+}} ymm4 = ymm4[0],ymm5[0],ymm4[1],ymm5[1],ymm4[4],ymm5[4],ymm4[5],ymm5[5]
-; AVX-NEXT: vblendps {{.*#+}} ymm1 = ymm4[0,1],ymm1[2,3],ymm4[4,5],ymm1[6,7]
-; AVX-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0],ymm7[1],ymm1[2,3,4,5],ymm7[6],ymm1[7]
-; AVX-NEXT: vshufps {{.*#+}} xmm2 = xmm2[3,3],xmm3[3,3]
-; AVX-NEXT: vbroadcastss 12(%rsi), %xmm3
-; AVX-NEXT: vblendps {{.*#+}} xmm2 = xmm3[0],xmm2[1,2,3]
-; AVX-NEXT: vblendps {{.*#+}} xmm2 = xmm2[0,1,2],xmm7[3]
-; AVX-NEXT: vmovaps %xmm2, 64(%r9)
-; AVX-NEXT: vmovaps %ymm0, (%r9)
-; AVX-NEXT: vmovaps %ymm1, 32(%r9)
+; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1],ymm0[2],ymm1[3,4,5,6,7]
+; AVX-NEXT: vpermilps {{.*#+}} ymm1 = ymm6[1,u,u,u,6,u,u,u]
+; AVX-NEXT: vbroadcastss 8(%rcx), %ymm6
+; AVX-NEXT: vunpcklps {{.*#+}} ymm1 = ymm1[0],ymm6[0],ymm1[1],ymm6[1],ymm1[4],ymm6[4],ymm1[5],ymm6[5]
+; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3],ymm1[4,5],ymm0[6,7]
+; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0],ymm5[1],ymm0[2,3,4,5],ymm5[6],ymm0[7]
+; AVX-NEXT: vshufps {{.*#+}} xmm1 = xmm2[3,3],xmm3[3,3]
+; AVX-NEXT: vbroadcastss 12(%rsi), %xmm2
+; AVX-NEXT: vblendps {{.*#+}} xmm1 = xmm2[0],xmm1[1,2,3]
+; AVX-NEXT: vblendps {{.*#+}} xmm1 = xmm1[0,1,2],xmm5[3]
+; AVX-NEXT: vmovaps %xmm1, 64(%r9)
+; AVX-NEXT: vmovaps %ymm4, (%r9)
+; AVX-NEXT: vmovaps %ymm0, 32(%r9)
; AVX-NEXT: vzeroupper
; AVX-NEXT: retq
;
diff --git a/llvm/test/CodeGen/X86/vector-interleaved-store-i32-stride-7.ll b/llvm/test/CodeGen/X86/vector-interleaved-store-i32-stride-7.ll
index 34100adacbeb9..62e2aadd818c1 100644
--- a/llvm/test/CodeGen/X86/vector-interleaved-store-i32-stride-7.ll
+++ b/llvm/test/CodeGen/X86/vector-interleaved-store-i32-stride-7.ll
@@ -473,7 +473,7 @@ define void @store_i32_stride7_vf4(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX-NEXT: vinsertf128 $1, %xmm2, %ymm1, %ymm8
; AVX-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm9
; AVX-NEXT: vshufps {{.*#+}} ymm10 = ymm9[1,0],ymm8[1,0],ymm9[5,4],ymm8[5,4]
-; AVX-NEXT: vshufps {{.*#+}} ymm10 = ymm10[2,0],ymm8[2,1],ymm10[6,4],ymm8[6,5]
+; AVX-NEXT: vshufps {{.*#+}} ymm10 = ymm10[2,0],ymm1[2,1],ymm10[6,4],ymm1[6,5]
; AVX-NEXT: vinsertf128 $1, %xmm5, %ymm6, %ymm5
; AVX-NEXT: vunpckhpd {{.*#+}} ymm6 = ymm7[1],ymm5[1],ymm7[3],ymm5[3]
; AVX-NEXT: vshufps {{.*#+}} ymm6 = ymm5[1,1],ymm6[2,0],ymm5[5,5],ymm6[6,4]
diff --git a/llvm/test/CodeGen/X86/vector-interleaved-store-i64-stride-5.ll b/llvm/test/CodeGen/X86/vector-interleaved-store-i64-stride-5.ll
index bb609b9938a82..05c111ae5049f 100644
--- a/llvm/test/CodeGen/X86/vector-interleaved-store-i64-stride-5.ll
+++ b/llvm/test/CodeGen/X86/vector-interleaved-store-i64-stride-5.ll
@@ -62,16 +62,16 @@ define void @store_i64_stride5_vf2(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX2-NEXT: vmovaps (%rcx), %xmm2
; AVX2-NEXT: vmovaps (%r8), %xmm3
; AVX2-NEXT: vinsertf128 $1, (%rsi), %ymm0, %ymm0
-; AVX2-NEXT: vinsertf128 $1, %xmm2, %ymm1, %ymm1
-; AVX2-NEXT: vunpckhpd {{.*#+}} ymm4 = ymm0[1],ymm1[1],ymm0[3],ymm1[3]
-; AVX2-NEXT: vpermpd {{.*#+}} ymm4 = ymm4[0,0,2,1]
-; AVX2-NEXT: vblendps {{.*#+}} ymm4 = ymm3[0,1],ymm4[2,3,4,5,6,7]
-; AVX2-NEXT: vunpcklpd {{.*#+}} ymm0 = ymm0[0],ymm1[0],ymm0[2],ymm1[2]
+; AVX2-NEXT: vinsertf128 $1, %xmm2, %ymm1, %ymm4
+; AVX2-NEXT: vunpckhpd {{.*#+}} ymm1 = ymm0[1],ymm1[1],ymm0[3],ymm1[3]
+; AVX2-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[0,0,2,1]
+; AVX2-NEXT: vblendps {{.*#+}} ymm1 = ymm3[0,1],ymm1[2,3,4,5,6,7]
+; AVX2-NEXT: vunpcklpd {{.*#+}} ymm0 = ymm0[0],ymm4[0],ymm0[2],ymm4[2]
; AVX2-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[0,2,1,3]
-; AVX2-NEXT: vunpckhpd {{.*#+}} xmm1 = xmm2[1],xmm3[1]
-; AVX2-NEXT: vmovaps %xmm1, 64(%r9)
+; AVX2-NEXT: vunpckhpd {{.*#+}} xmm2 = xmm2[1],xmm3[1]
+; AVX2-NEXT: vmovaps %xmm2, 64(%r9)
; AVX2-NEXT: vmovaps %ymm0, (%r9)
-; AVX2-NEXT: vmovaps %ymm4, 32(%r9)
+; AVX2-NEXT: vmovaps %ymm1, 32(%r9)
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: retq
;
@@ -82,16 +82,16 @@ define void @store_i64_stride5_vf2(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX2-FP-NEXT: vmovaps (%rcx), %xmm2
; AVX2-FP-NEXT: vmovaps (%r8), %xmm3
; AVX2-FP-NEXT: vinsertf128 $1, (%rsi), %ymm0, %ymm0
-; AVX2-FP-NEXT: vinsertf128 $1, %xmm2, %ymm1, %ymm1
-; AVX2-FP-NEXT: vunpckhpd {{.*#+}} ymm4 = ymm0[1],ymm1[1],ymm0[3],ymm1[3]
-; AVX2-FP-NEXT: vpermpd {{.*#+}} ymm4 = ymm4[0,0,2,1]
-; AVX2-FP-NEXT: vblendps {{.*#+}} ymm4 = ymm3[0,1],ymm4[2,3,4,5,6,7]
-; AVX2-FP-NEXT: vunpcklpd {{.*#+}} ymm0 = ymm0[0],ymm1[0],ymm0[2],ymm1[2]
+; AVX2-FP-NEXT: vinsertf128 $1, %xmm2, %ymm1, %ymm4
+; AVX2-FP-NEXT: vunpckhpd {{.*#+}} ymm1 = ymm0[1],ymm1[1],ymm0[3],ymm1[3]
+; AVX2-FP-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[0,0,2,1]
+; AVX2-FP-NEXT: vblendps {{.*#+}} ymm1 = ymm3[0,1],ymm1[2,3,4,5,6,7]
+; AVX2-FP-NEXT: vunpcklpd {{.*#+}} ymm0 = ymm0[0],ymm4[0],ymm0[2],ymm4[2]
; AVX2-FP-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[0,2,1,3]
-; AVX2-FP-NEXT: vunpckhpd {{.*#+}} xmm1 = xmm2[1],xmm3[1]
-; AVX2-FP-NEXT: vmovaps %xmm1, 64(%r9)
+; AVX2-FP-NEXT: vunpckhpd {{.*#+}} xmm2 = xmm2[1],xmm3[1]
+; AVX2-FP-NEXT: vmovaps %xmm2, 64(%r9)
; AVX2-FP-NEXT: vmovaps %ymm0, (%r9)
-; AVX2-FP-NEXT: vmovaps %ymm4, 32(%r9)
+; AVX2-FP-NEXT: vmovaps %ymm1, 32(%r9)
; AVX2-FP-NEXT: vzeroupper
; AVX2-FP-NEXT: retq
;
@@ -102,16 +102,16 @@ define void @store_i64_stride5_vf2(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX2-FCP-NEXT: vmovaps (%rcx), %xmm2
; AVX2-FCP-NEXT: vmovaps (%r8), %xmm3
; AVX2-FCP-NEXT: vinsertf128 $1, (%rsi), %ymm0, %ymm0
-; AVX2-FCP-NEXT: vinsertf128 $1, %xmm2, %ymm1, %ymm1
-; AVX2-FCP-NEXT: vunpckhpd {{.*#+}} ymm4 = ymm0[1],ymm1[1],ymm0[3],ymm1[3]
-; AVX2-FCP-NEXT: vpermpd {{.*#+}} ymm4 = ymm4[0,0,2,1]
-; AVX2-FCP-NEXT: vblendps {{.*#+}} ymm4 = ymm3[0,1],ymm4[2,3,4,5,6,7]
-; AVX2-FCP-NEXT: vunpcklpd {{.*#+}} ymm0 = ymm0[0],ymm1[0],ymm0[2],ymm1[2]
+; AVX2-FCP-NEXT: vinsertf128 $1, %xmm2, %ymm1, %ymm4
+; AVX2-FCP-NEXT: vunpckhpd {{.*#+}} ymm1 = ymm0[1],ymm1[1],ymm0[3],ymm1[3]
+; AVX2-FCP-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[0,0,2,1]
+; AVX2-FCP-NEXT: vblendps {{.*#+}} ymm1 = ymm3[0,1],ymm1[2,3,4,5,6,7]
+; AVX2-FCP-NEXT: vunpcklpd {{.*#+}} ymm0 = ymm0[0],ymm4[0],ymm0[2],ymm4[2]
; AVX2-FCP-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[0,2,1,3]
-; AVX2-FCP-NEXT: vunpckhpd {{.*#+}} xmm1 = xmm2[1],xmm3[1]
-; AVX2-FCP-NEXT: vmovaps %xmm1, 64(%r9)
+; AVX2-FCP-NEXT: vunpckhpd {{.*#+}} xmm2 = xmm2[1],xmm3[1]
+; AVX2-FCP-NEXT: vmovaps %xmm2, 64(%r9)
; AVX2-FCP-NEXT: vmovaps %ymm0, (%r9)
-; AVX2-FCP-NEXT: vmovaps %ymm4, 32(%r9)
+; AVX2-FCP-NEXT: vmovaps %ymm1, 32(%r9)
; AVX2-FCP-NEXT: vzeroupper
; AVX2-FCP-NEXT: retq
;
diff --git a/llvm/test/CodeGen/X86/vector-interleaved-store-i64-stride-7.ll b/llvm/test/CodeGen/X86/vector-interleaved-store-i64-stride-7.ll
index 48e1d0a7fb930..d1fd4a360036b 100644
--- a/llvm/test/CodeGen/X86/vector-interleaved-store-i64-stride-7.ll
+++ b/llvm/test/CodeGen/X86/vector-interleaved-store-i64-stride-7.ll
@@ -79,24 +79,24 @@ define void @store_i64_stride7_vf2(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX2-NEXT: vmovaps (%r8), %xmm2
; AVX2-NEXT: vmovaps (%r9), %xmm3
; AVX2-NEXT: vmovaps (%r10), %xmm4
-; AVX2-NEXT: vinsertf128 $1, (%rsi), %ymm0, %ymm0
+; AVX2-NEXT: vinsertf128 $1, (%rsi), %ymm0, %ymm5
; AVX2-NEXT: vinsertf128 $1, (%rcx), %ymm1, %ymm1
-; AVX2-NEXT: vinsertf128 $1, %xmm3, %ymm2, %ymm5
-; AVX2-NEXT: vblendps {{.*#+}} ymm5 = ymm5[0,1],ymm0[2,3],ymm5[4,5,6,7]
-; AVX2-NEXT: vpermpd {{.*#+}} ymm5 = ymm5[0,2,2,1]
+; AVX2-NEXT: vinsertf128 $1, %xmm3, %ymm2, %ymm6
+; AVX2-NEXT: vblendps {{.*#+}} ymm0 = ymm6[0,1],ymm0[2,3],ymm6[4,5,6,7]
+; AVX2-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[0,2,2,1]
; AVX2-NEXT: vbroadcastsd %xmm4, %ymm6
-; AVX2-NEXT: vblendps {{.*#+}} ymm5 = ymm5[0,1,2,3],ymm6[4,5],ymm5[6,7]
-; AVX2-NEXT: vunpckhpd {{.*#+}} ymm6 = ymm0[1],ymm1[1],ymm0[3],ymm1[3]
+; AVX2-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm6[4,5],ymm0[6,7]
+; AVX2-NEXT: vunpckhpd {{.*#+}} ymm6 = ymm5[1],ymm1[1],ymm5[3],ymm1[3]
; AVX2-NEXT: vpermpd {{.*#+}} ymm6 = ymm6[2,1,3,3]
; AVX2-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm2
; AVX2-NEXT: vblendps {{.*#+}} ymm2 = ymm6[0,1,2,3,4,5],ymm2[6,7]
-; AVX2-NEXT: vunpcklpd {{.*#+}} ymm0 = ymm0[0],ymm1[0],ymm0[2],ymm1[2]
-; AVX2-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[0,2,1,3]
-; AVX2-NEXT: vunpckhpd {{.*#+}} xmm1 = xmm3[1],xmm4[1]
-; AVX2-NEXT: vmovaps %xmm1, 96(%rax)
-; AVX2-NEXT: vmovaps %ymm0, (%rax)
+; AVX2-NEXT: vunpcklpd {{.*#+}} ymm1 = ymm5[0],ymm1[0],ymm5[2],ymm1[2]
+; AVX2-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[0,2,1,3]
+; AVX2-NEXT: vunpckhpd {{.*#+}} xmm3 = xmm3[1],xmm4[1]
+; AVX2-NEXT: vmovaps %xmm3, 96(%rax)
+; AVX2-NEXT: vmovaps %ymm1, (%rax)
; AVX2-NEXT: vmovaps %ymm2, 64(%rax)
-; AVX2-NEXT: vmovaps %ymm5, 32(%rax)
+; AVX2-NEXT: vmovaps %ymm0, 32(%rax)
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: retq
;
@@ -109,24 +109,24 @@ define void @store_i64_stride7_vf2(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX2-FP-NEXT: vmovaps (%r8), %xmm2
; AVX2-FP-NEXT: vmovaps (%r9), %xmm3
; AVX2-FP-NEXT: vmovaps (%r10), %xmm4
-; AVX2-FP-NEXT: vinsertf128 $1, (%rsi), %ymm0, %ymm0
+; AVX2-FP-NEXT: vinsertf128 $1, (%rsi), %ymm0, %ymm5
; AVX2-FP-NEXT: vinsertf128 $1, (%rcx), %ymm1, %ymm1
-; AVX2-FP-NEXT: vinsertf128 $1, %xmm3, %ymm2, %ymm5
-; AVX2-FP-NEXT: vblendps {{.*#+}} ymm5 = ymm5[0,1],ymm0[2,3],ymm5[4,5,6,7]
-; AVX2-FP-NEXT: vpermpd {{.*#+}} ymm5 = ymm5[0,2,2,1]
+; AVX2-FP-NEXT: vinsertf128 $1, %xmm3, %ymm2, %ymm6
+; AVX2-FP-NEXT: vblendps {{.*#+}} ymm0 = ymm6[0,1],ymm0[2,3],ymm6[4,5,6,7]
+; AVX2-FP-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[0,2,2,1]
; AVX2-FP-NEXT: vbroadcastsd %xmm4, %ymm6
-; AVX2-FP-NEXT: vblendps {{.*#+}} ymm5 = ymm5[0,1,2,3],ymm6[4,5],ymm5[6,7]
-; AVX2-FP-NEXT: vunpckhpd {{.*#+}} ymm6 = ymm0[1],ymm1[1],ymm0[3],ymm1[3]
+; AVX2-FP-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm6[4,5],ymm0[6,7]
+; AVX2-FP-NEXT: vunpckhpd {{.*#+}} ymm6 = ymm5[1],ymm1[1],ymm5[3],ymm1[3]
; AVX2-FP-NEXT: vpermpd {{.*#+}} ymm6 = ymm6[2,1,3,3]
; AVX2-FP-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm2
; AVX2-FP-NEXT: vblendps {{.*#+}} ymm2 = ymm6[0,1,2,3,4,5],ymm2[6,7]
-; AVX2-FP-NEXT: vunpcklpd {{.*#+}} ymm0 = ymm0[0],ymm1[0],ymm0[2],ymm1[2]
-; AVX2-FP-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[0,2,1,3]
-; AVX2-FP-NEXT: vunpckhpd {{.*#+}} xmm1 = xmm3[1],xmm4[1]
-; AVX2-FP-NEXT: vmovaps %xmm1, 96(%rax)
-; AVX2-FP-NEXT: vmovaps %ymm0, (%rax)
+; AVX2-FP-NEXT: vunpcklpd {{.*#+}} ymm1 = ymm5[0],ymm1[0],ymm5[2],ymm1[2]
+; AVX2-FP-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[0,2,1,3]
+; AVX2-FP-NEXT: vunpckhpd {{.*#+}} xmm3 = xmm3[1],xmm4[1]
+; AVX2-FP-NEXT: vmovaps %xmm3, 96(%rax)
+; AVX2-FP-NEXT: vmovaps %ymm1, (%rax)
; AVX2-FP-NEXT: vmovaps %ymm2, 64(%rax)
-; AVX2-FP-NEXT: vmovaps %ymm5, 32(%rax)
+; AVX2-FP-NEXT: vmovaps %ymm0, 32(%rax)
; AVX2-FP-NEXT: vzeroupper
; AVX2-FP-NEXT: retq
;
@@ -139,24 +139,24 @@ define void @store_i64_stride7_vf2(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX2-FCP-NEXT: vmovaps (%r8), %xmm2
; AVX2-FCP-NEXT: vmovaps (%r9), %xmm3
; AVX2-FCP-NEXT: vmovaps (%r10), %xmm4
-; AVX2-FCP-NEXT: vinsertf128 $1, (%rsi), %ymm0, %ymm0
+; AVX2-FCP-NEXT: vinsertf128 $1, (%rsi), %ymm0, %ymm5
; AVX2-FCP-NEXT: vinsertf128 $1, (%rcx), %ymm1, %ymm1
-; AVX2-FCP-NEXT: vinsertf128 $1, %xmm3, %ymm2, %ymm5
-; AVX2-FCP-NEXT: vblendps {{.*#+}} ymm5 = ymm5[0,1],ymm0[2,3],ymm5[4,5,6,7]
-; AVX2-FCP-NEXT: vpermpd {{.*#+}} ymm5 = ymm5[0,2,2,1]
+; AVX2-FCP-NEXT: vinsertf128 $1, %xmm3, %ymm2, %ymm6
+; AVX2-FCP-NEXT: vblendps {{.*#+}} ymm0 = ymm6[0,1],ymm0[2,3],ymm6[4,5,6,7]
+; AVX2-FCP-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[0,2,2,1]
; AVX2-FCP-NEXT: vbroadcastsd %xmm4, %ymm6
-; AVX2-FCP-NEXT: vblendps {{.*#+}} ymm5 = ymm5[0,1,2,3],ymm6[4,5],ymm5[6,7]
-; AVX2-FCP-NEXT: vunpckhpd {{.*#+}} ymm6 = ymm0[1],ymm1[1],ymm0[3],ymm1[3]
+; AVX2-FCP-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm6[4,5],ymm0[6,7]
+; AVX2-FCP-NEXT: vunpckhpd {{.*#+}} ymm6 = ymm5[1],ymm1[1],ymm5[3],ymm1[3]
; AVX2-FCP-NEXT: vpermpd {{.*#+}} ymm6 = ymm6[2,1,3,3]
; AVX2-FCP-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm2
; AVX2-FCP-NEXT: vblendps {{.*#+}} ymm2 = ymm6[0,1,2,3,4,5],ymm2[6,7]
-; AVX2-FCP-NEXT: vunpcklpd {{.*#+}} ymm0 = ymm0[0],ymm1[0],ymm0[2],ymm1[2]
-; AVX2-FCP-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[0,2,1,3]
-; AVX2-FCP-NEXT: vunpckhpd {{.*#+}} xmm1 = xmm3[1],xmm4[1]
-; AVX2-FCP-NEXT: vmovaps %xmm1, 96(%rax)
-; AVX2-FCP-NEXT: vmovaps %ymm0, (%rax)
+; AVX2-FCP-NEXT: vunpcklpd {{.*#+}} ymm1 = ymm5[0],ymm1[0],ymm5[2],ymm1[2]
+; AVX2-FCP-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[0,2,1,3]
+; AVX2-FCP-NEXT: vunpckhpd {{.*#+}} xmm3 = xmm3[1],xmm4[1]
+; AVX2-FCP-NEXT: vmovaps %xmm3, 96(%rax)
+; AVX2-FCP-NEXT: vmovaps %ymm1, (%rax)
; AVX2-FCP-NEXT: vmovaps %ymm2, 64(%rax)
-; AVX2-FCP-NEXT: vmovaps %ymm5, 32(%rax)
+; AVX2-FCP-NEXT: vmovaps %ymm0, 32(%rax)
; AVX2-FCP-NEXT: vzeroupper
; AVX2-FCP-NEXT: retq
;
diff --git a/llvm/test/CodeGen/X86/vector-pack-512.ll b/llvm/test/CodeGen/X86/vector-pack-512.ll
index a3430358e6562..dc60bfdca53b2 100644
--- a/llvm/test/CodeGen/X86/vector-pack-512.ll
+++ b/llvm/test/CodeGen/X86/vector-pack-512.ll
@@ -47,24 +47,24 @@ define <32 x i16> @trunc_concat_packusdw_512(<16 x i32> %a0, <16 x i32> %a1) nou
define <64 x i8> @trunc_concat_packsswb_512(<32 x i16> %a0, <32 x i16> %a1) nounwind {
; AVX512F-LABEL: trunc_concat_packsswb_512:
; AVX512F: # %bb.0:
-; AVX512F-NEXT: vpsraw $15, %ymm0, %ymm2
-; AVX512F-NEXT: vextracti64x4 $1, %zmm0, %ymm0
+; AVX512F-NEXT: vextracti64x4 $1, %zmm0, %ymm2
+; AVX512F-NEXT: vpsraw $15, %ymm2, %ymm2
+; AVX512F-NEXT: vinserti64x4 $1, %ymm2, %zmm0, %zmm2
; AVX512F-NEXT: vpsraw $15, %ymm0, %ymm0
-; AVX512F-NEXT: vinserti64x4 $1, %ymm0, %zmm2, %zmm0
; AVX512F-NEXT: vpandd {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to16}, %zmm1, %zmm1
-; AVX512F-NEXT: vpmovsxbq {{.*#+}} zmm2 = [0,1,8,9,2,3,10,11]
-; AVX512F-NEXT: vpermi2q %zmm1, %zmm0, %zmm2
-; AVX512F-NEXT: vpmovsxbq {{.*#+}} zmm3 = [4,5,12,13,6,7,14,15]
+; AVX512F-NEXT: vpmovsxbq {{.*#+}} zmm3 = [0,1,8,9,2,3,10,11]
; AVX512F-NEXT: vpermi2q %zmm1, %zmm0, %zmm3
-; AVX512F-NEXT: vpmovzxwd {{.*#+}} zmm0 = ymm3[0],zero,ymm3[1],zero,ymm3[2],zero,ymm3[3],zero,ymm3[4],zero,ymm3[5],zero,ymm3[6],zero,ymm3[7],zero,ymm3[8],zero,ymm3[9],zero,ymm3[10],zero,ymm3[11],zero,ymm3[12],zero,ymm3[13],zero,ymm3[14],zero,ymm3[15],zero
-; AVX512F-NEXT: vpmovdb %zmm0, %xmm0
-; AVX512F-NEXT: vextracti64x4 $1, %zmm3, %ymm1
-; AVX512F-NEXT: vpmovzxwd {{.*#+}} zmm1 = ymm1[0],zero,ymm1[1],zero,ymm1[2],zero,ymm1[3],zero,ymm1[4],zero,ymm1[5],zero,ymm1[6],zero,ymm1[7],zero,ymm1[8],zero,ymm1[9],zero,ymm1[10],zero,ymm1[11],zero,ymm1[12],zero,ymm1[13],zero,ymm1[14],zero,ymm1[15],zero
+; AVX512F-NEXT: vpmovsxbq {{.*#+}} zmm0 = [4,5,12,13,6,7,14,15]
+; AVX512F-NEXT: vpermi2q %zmm1, %zmm2, %zmm0
+; AVX512F-NEXT: vpmovzxwd {{.*#+}} zmm1 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero
; AVX512F-NEXT: vpmovdb %zmm1, %xmm1
-; AVX512F-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0
-; AVX512F-NEXT: vpmovzxwd {{.*#+}} zmm1 = ymm2[0],zero,ymm2[1],zero,ymm2[2],zero,ymm2[3],zero,ymm2[4],zero,ymm2[5],zero,ymm2[6],zero,ymm2[7],zero,ymm2[8],zero,ymm2[9],zero,ymm2[10],zero,ymm2[11],zero,ymm2[12],zero,ymm2[13],zero,ymm2[14],zero,ymm2[15],zero
+; AVX512F-NEXT: vextracti64x4 $1, %zmm0, %ymm0
+; AVX512F-NEXT: vpmovzxwd {{.*#+}} zmm0 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero
+; AVX512F-NEXT: vpmovdb %zmm0, %xmm0
+; AVX512F-NEXT: vinserti128 $1, %xmm0, %ymm1, %ymm0
+; AVX512F-NEXT: vpmovzxwd {{.*#+}} zmm1 = ymm3[0],zero,ymm3[1],zero,ymm3[2],zero,ymm3[3],zero,ymm3[4],zero,ymm3[5],zero,ymm3[6],zero,ymm3[7],zero,ymm3[8],zero,ymm3[9],zero,ymm3[10],zero,ymm3[11],zero,ymm3[12],zero,ymm3[13],zero,ymm3[14],zero,ymm3[15],zero
; AVX512F-NEXT: vpmovdb %zmm1, %xmm1
-; AVX512F-NEXT: vextracti64x4 $1, %zmm2, %ymm2
+; AVX512F-NEXT: vextracti64x4 $1, %zmm3, %ymm2
; AVX512F-NEXT: vpmovzxwd {{.*#+}} zmm2 = ymm2[0],zero,ymm2[1],zero,ymm2[2],zero,ymm2[3],zero,ymm2[4],zero,ymm2[5],zero,ymm2[6],zero,ymm2[7],zero,ymm2[8],zero,ymm2[9],zero,ymm2[10],zero,ymm2[11],zero,ymm2[12],zero,ymm2[13],zero,ymm2[14],zero,ymm2[15],zero
; AVX512F-NEXT: vpmovdb %zmm2, %xmm2
; AVX512F-NEXT: vinserti128 $1, %xmm2, %ymm1, %ymm1
@@ -93,24 +93,24 @@ define <64 x i8> @trunc_concat_packsswb_512(<32 x i16> %a0, <32 x i16> %a1) noun
define <64 x i8> @trunc_concat_packuswb_512(<32 x i16> %a0, <32 x i16> %a1) nounwind {
; AVX512F-LABEL: trunc_concat_packuswb_512:
; AVX512F: # %bb.0:
-; AVX512F-NEXT: vpsrlw $15, %ymm0, %ymm2
-; AVX512F-NEXT: vextracti64x4 $1, %zmm0, %ymm0
+; AVX512F-NEXT: vextracti64x4 $1, %zmm0, %ymm2
+; AVX512F-NEXT: vpsrlw $15, %ymm2, %ymm2
+; AVX512F-NEXT: vinserti64x4 $1, %ymm2, %zmm0, %zmm2
; AVX512F-NEXT: vpsrlw $15, %ymm0, %ymm0
-; AVX512F-NEXT: vinserti64x4 $1, %ymm0, %zmm2, %zmm0
; AVX512F-NEXT: vpandd {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to16}, %zmm1, %zmm1
-; AVX512F-NEXT: vpmovsxbq {{.*#+}} zmm2 = [0,1,8,9,2,3,10,11]
-; AVX512F-NEXT: vpermi2q %zmm1, %zmm0, %zmm2
-; AVX512F-NEXT: vpmovsxbq {{.*#+}} zmm3 = [4,5,12,13,6,7,14,15]
+; AVX512F-NEXT: vpmovsxbq {{.*#+}} zmm3 = [0,1,8,9,2,3,10,11]
; AVX512F-NEXT: vpermi2q %zmm1, %zmm0, %zmm3
-; AVX512F-NEXT: vpmovzxwd {{.*#+}} zmm0 = ymm3[0],zero,ymm3[1],zero,ymm3[2],zero,ymm3[3],zero,ymm3[4],zero,ymm3[5],zero,ymm3[6],zero,ymm3[7],zero,ymm3[8],zero,ymm3[9],zero,ymm3[10],zero,ymm3[11],zero,ymm3[12],zero,ymm3[13],zero,ymm3[14],zero,ymm3[15],zero
-; AVX512F-NEXT: vpmovdb %zmm0, %xmm0
-; AVX512F-NEXT: vextracti64x4 $1, %zmm3, %ymm1
-; AVX512F-NEXT: vpmovzxwd {{.*#+}} zmm1 = ymm1[0],zero,ymm1[1],zero,ymm1[2],zero,ymm1[3],zero,ymm1[4],zero,ymm1[5],zero,ymm1[6],zero,ymm1[7],zero,ymm1[8],zero,ymm1[9],zero,ymm1[10],zero,ymm1[11],zero,ymm1[12],zero,ymm1[13],zero,ymm1[14],zero,ymm1[15],zero
+; AVX512F-NEXT: vpmovsxbq {{.*#+}} zmm0 = [4,5,12,13,6,7,14,15]
+; AVX512F-NEXT: vpermi2q %zmm1, %zmm2, %zmm0
+; AVX512F-NEXT: vpmovzxwd {{.*#+}} zmm1 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero
; AVX512F-NEXT: vpmovdb %zmm1, %xmm1
-; AVX512F-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0
-; AVX512F-NEXT: vpmovzxwd {{.*#+}} zmm1 = ymm2[0],zero,ymm2[1],zero,ymm2[2],zero,ymm2[3],zero,ymm2[4],zero,ymm2[5],zero,ymm2[6],zero,ymm2[7],zero,ymm2[8],zero,ymm2[9],zero,ymm2[10],zero,ymm2[11],zero,ymm2[12],zero,ymm2[13],zero,ymm2[14],zero,ymm2[15],zero
+; AVX512F-NEXT: vextracti64x4 $1, %zmm0, %ymm0
+; AVX512F-NEXT: vpmovzxwd {{.*#+}} zmm0 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero
+; AVX512F-NEXT: vpmovdb %zmm0, %xmm0
+; AVX512F-NEXT: vinserti128 $1, %xmm0, %ymm1, %ymm0
+; AVX512F-NEXT: vpmovzxwd {{.*#+}} zmm1 = ymm3[0],zero,ymm3[1],zero,ymm3[2],zero,ymm3[3],zero,ymm3[4],zero,ymm3[5],zero,ymm3[6],zero,ymm3[7],zero,ymm3[8],zero,ymm3[9],zero,ymm3[10],zero,ymm3[11],zero,ymm3[12],zero,ymm3[13],zero,ymm3[14],zero,ymm3[15],zero
; AVX512F-NEXT: vpmovdb %zmm1, %xmm1
-; AVX512F-NEXT: vextracti64x4 $1, %zmm2, %ymm2
+; AVX512F-NEXT: vextracti64x4 $1, %zmm3, %ymm2
; AVX512F-NEXT: vpmovzxwd {{.*#+}} zmm2 = ymm2[0],zero,ymm2[1],zero,ymm2[2],zero,ymm2[3],zero,ymm2[4],zero,ymm2[5],zero,ymm2[6],zero,ymm2[7],zero,ymm2[8],zero,ymm2[9],zero,ymm2[10],zero,ymm2[11],zero,ymm2[12],zero,ymm2[13],zero,ymm2[14],zero,ymm2[15],zero
; AVX512F-NEXT: vpmovdb %zmm2, %xmm2
; AVX512F-NEXT: vinserti128 $1, %xmm2, %ymm1, %ymm1
diff --git a/llvm/test/CodeGen/X86/zero_extend_vector_inreg_of_broadcast.ll b/llvm/test/CodeGen/X86/zero_extend_vector_inreg_of_broadcast.ll
index 42521b809b102..b5487ad192337 100644
--- a/llvm/test/CodeGen/X86/zero_extend_vector_inreg_of_broadcast.ll
+++ b/llvm/test/CodeGen/X86/zero_extend_vector_inreg_of_broadcast.ll
@@ -3746,10 +3746,11 @@ define void @vec384_i16_widen_to_i48_factor3_broadcast_to_v8i48_factor8(ptr %in.
; AVX2-NEXT: vmovdqa 48(%rdi), %xmm1
; AVX2-NEXT: vpaddb 48(%rsi), %xmm1, %xmm1
; AVX2-NEXT: vpaddb (%rsi), %xmm0, %xmm0
-; AVX2-NEXT: vpbroadcastw %xmm0, %xmm0
+; AVX2-NEXT: vpbroadcastw %xmm0, %ymm0
; AVX2-NEXT: vpblendw {{.*#+}} xmm1 = xmm0[0],xmm1[1,2],xmm0[3],xmm1[4,5],xmm0[6],xmm1[7]
-; AVX2-NEXT: vinserti128 $1, %xmm0, %ymm1, %ymm1
-; AVX2-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1, %ymm1
+; AVX2-NEXT: vpxor %xmm2, %xmm2, %xmm2
+; AVX2-NEXT: vpblendw {{.*#+}} ymm2 = ymm2[0],ymm0[1],ymm2[2,3],ymm0[4],ymm2[5,6],ymm0[7],ymm2[8],ymm0[9],ymm2[10,11],ymm0[12],ymm2[13,14],ymm0[15]
+; AVX2-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm2[4,5,6,7]
; AVX2-NEXT: vpxor %xmm2, %xmm2, %xmm2
; AVX2-NEXT: vpblendw {{.*#+}} xmm0 = xmm2[0,1],xmm0[2],xmm2[3,4],xmm0[5],xmm2[6,7]
; AVX2-NEXT: vpaddb (%rdx), %ymm1, %ymm1
@@ -4180,10 +4181,11 @@ define void @vec384_i16_widen_to_i96_factor6_broadcast_to_v4i96_factor4(ptr %in.
; AVX2-NEXT: vmovdqa 48(%rdi), %xmm1
; AVX2-NEXT: vpaddb 48(%rsi), %xmm1, %xmm1
; AVX2-NEXT: vpaddb (%rsi), %xmm0, %xmm0
-; AVX2-NEXT: vpbroadcastw %xmm0, %xmm0
+; AVX2-NEXT: vpbroadcastw %xmm0, %ymm0
; AVX2-NEXT: vpblendw {{.*#+}} xmm1 = xmm0[0],xmm1[1,2,3,4,5],xmm0[6],xmm1[7]
-; AVX2-NEXT: vinserti128 $1, %xmm0, %ymm1, %ymm1
-; AVX2-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1, %ymm1
+; AVX2-NEXT: vpxor %xmm2, %xmm2, %xmm2
+; AVX2-NEXT: vpblendw {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm0[4],ymm2[5,6,7,8,9,10,11],ymm0[12],ymm2[13,14,15]
+; AVX2-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm2[4,5,6,7]
; AVX2-NEXT: vpxor %xmm2, %xmm2, %xmm2
; AVX2-NEXT: vpblendw {{.*#+}} xmm0 = xmm2[0,1],xmm0[2],xmm2[3,4,5,6,7]
; AVX2-NEXT: vpaddb (%rdx), %ymm1, %ymm1
diff --git a/llvm/test/CodeGen/X86/zero_extend_vector_inreg_of_broadcast_from_memory.ll b/llvm/test/CodeGen/X86/zero_extend_vector_inreg_of_broadcast_from_memory.ll
index 7f206fd3bf2aa..95fde98536818 100644
--- a/llvm/test/CodeGen/X86/zero_extend_vector_inreg_of_broadcast_from_memory.ll
+++ b/llvm/test/CodeGen/X86/zero_extend_vector_inreg_of_broadcast_from_memory.ll
@@ -3004,76 +3004,81 @@ define void @vec384_i16_widen_to_i48_factor3_broadcast_to_v8i48_factor8(ptr %in.
;
; AVX2-SLOW-LABEL: vec384_i16_widen_to_i48_factor3_broadcast_to_v8i48_factor8:
; AVX2-SLOW: # %bb.0:
-; AVX2-SLOW-NEXT: vpbroadcastw (%rdi), %xmm0
+; AVX2-SLOW-NEXT: vpbroadcastw (%rdi), %ymm0
; AVX2-SLOW-NEXT: vpxor %xmm1, %xmm1, %xmm1
-; AVX2-SLOW-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1],xmm0[2],xmm1[3,4],xmm0[5],xmm1[6,7]
+; AVX2-SLOW-NEXT: vpblendw {{.*#+}} ymm1 = ymm1[0],ymm0[1],ymm1[2,3],ymm0[4],ymm1[5,6],ymm0[7],ymm1[8],ymm0[9],ymm1[10,11],ymm0[12],ymm1[13,14],ymm0[15]
; AVX2-SLOW-NEXT: vpblendw {{.*#+}} xmm2 = xmm0[0],mem[1,2],xmm0[3],mem[4,5],xmm0[6],mem[7]
-; AVX2-SLOW-NEXT: vinserti128 $1, %xmm0, %ymm2, %ymm0
-; AVX2-SLOW-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
-; AVX2-SLOW-NEXT: vpaddb (%rsi), %ymm0, %ymm0
-; AVX2-SLOW-NEXT: vpaddb 32(%rsi), %ymm1, %ymm1
-; AVX2-SLOW-NEXT: vmovdqa %ymm1, 32(%rdx)
-; AVX2-SLOW-NEXT: vmovdqa %ymm0, (%rdx)
+; AVX2-SLOW-NEXT: vpblendd {{.*#+}} ymm1 = ymm2[0,1,2,3],ymm1[4,5,6,7]
+; AVX2-SLOW-NEXT: vpxor %xmm2, %xmm2, %xmm2
+; AVX2-SLOW-NEXT: vpblendw {{.*#+}} xmm0 = xmm2[0,1],xmm0[2],xmm2[3,4],xmm0[5],xmm2[6,7]
+; AVX2-SLOW-NEXT: vpaddb (%rsi), %ymm1, %ymm1
+; AVX2-SLOW-NEXT: vpaddb 32(%rsi), %ymm0, %ymm0
+; AVX2-SLOW-NEXT: vmovdqa %ymm0, 32(%rdx)
+; AVX2-SLOW-NEXT: vmovdqa %ymm1, (%rdx)
; AVX2-SLOW-NEXT: vzeroupper
; AVX2-SLOW-NEXT: retq
;
; AVX2-FAST-PERLANE-LABEL: vec384_i16_widen_to_i48_factor3_broadcast_to_v8i48_factor8:
; AVX2-FAST-PERLANE: # %bb.0:
-; AVX2-FAST-PERLANE-NEXT: vmovdqa (%rdi), %xmm0
-; AVX2-FAST-PERLANE-NEXT: vpshufb {{.*#+}} xmm0 = zero,zero,zero,zero,xmm0[0,1],zero,zero,zero,zero,xmm0[0,1],zero,zero,zero,zero
-; AVX2-FAST-PERLANE-NEXT: vpbroadcastw (%rdi), %xmm1
-; AVX2-FAST-PERLANE-NEXT: vpblendw {{.*#+}} xmm2 = xmm1[0],mem[1,2],xmm1[3],mem[4,5],xmm1[6],mem[7]
-; AVX2-FAST-PERLANE-NEXT: vinserti128 $1, %xmm1, %ymm2, %ymm1
-; AVX2-FAST-PERLANE-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1, %ymm1
-; AVX2-FAST-PERLANE-NEXT: vpaddb (%rsi), %ymm1, %ymm1
-; AVX2-FAST-PERLANE-NEXT: vpaddb 32(%rsi), %ymm0, %ymm0
-; AVX2-FAST-PERLANE-NEXT: vmovdqa %ymm0, 32(%rdx)
-; AVX2-FAST-PERLANE-NEXT: vmovdqa %ymm1, (%rdx)
+; AVX2-FAST-PERLANE-NEXT: vpbroadcastw (%rdi), %ymm0
+; AVX2-FAST-PERLANE-NEXT: vpxor %xmm1, %xmm1, %xmm1
+; AVX2-FAST-PERLANE-NEXT: vpblendw {{.*#+}} ymm1 = ymm1[0],ymm0[1],ymm1[2,3],ymm0[4],ymm1[5,6],ymm0[7],ymm1[8],ymm0[9],ymm1[10,11],ymm0[12],ymm1[13,14],ymm0[15]
+; AVX2-FAST-PERLANE-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0],mem[1,2],xmm0[3],mem[4,5],xmm0[6],mem[7]
+; AVX2-FAST-PERLANE-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
+; AVX2-FAST-PERLANE-NEXT: vmovdqa (%rdi), %xmm1
+; AVX2-FAST-PERLANE-NEXT: vpshufb {{.*#+}} xmm1 = zero,zero,zero,zero,xmm1[0,1],zero,zero,zero,zero,xmm1[0,1],zero,zero,zero,zero
+; AVX2-FAST-PERLANE-NEXT: vpaddb (%rsi), %ymm0, %ymm0
+; AVX2-FAST-PERLANE-NEXT: vpaddb 32(%rsi), %ymm1, %ymm1
+; AVX2-FAST-PERLANE-NEXT: vmovdqa %ymm1, 32(%rdx)
+; AVX2-FAST-PERLANE-NEXT: vmovdqa %ymm0, (%rdx)
; AVX2-FAST-PERLANE-NEXT: vzeroupper
; AVX2-FAST-PERLANE-NEXT: retq
;
; AVX2-FAST-LABEL: vec384_i16_widen_to_i48_factor3_broadcast_to_v8i48_factor8:
; AVX2-FAST: # %bb.0:
-; AVX2-FAST-NEXT: vmovdqa (%rdi), %xmm0
-; AVX2-FAST-NEXT: vpshufb {{.*#+}} xmm0 = zero,zero,zero,zero,xmm0[0,1],zero,zero,zero,zero,xmm0[0,1],zero,zero,zero,zero
-; AVX2-FAST-NEXT: vpbroadcastw (%rdi), %xmm1
-; AVX2-FAST-NEXT: vpblendw {{.*#+}} xmm2 = xmm1[0],mem[1,2],xmm1[3],mem[4,5],xmm1[6],mem[7]
-; AVX2-FAST-NEXT: vinserti128 $1, %xmm1, %ymm2, %ymm1
-; AVX2-FAST-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1, %ymm1
-; AVX2-FAST-NEXT: vpaddb (%rsi), %ymm1, %ymm1
-; AVX2-FAST-NEXT: vpaddb 32(%rsi), %ymm0, %ymm0
-; AVX2-FAST-NEXT: vmovdqa %ymm0, 32(%rdx)
-; AVX2-FAST-NEXT: vmovdqa %ymm1, (%rdx)
+; AVX2-FAST-NEXT: vpbroadcastw (%rdi), %ymm0
+; AVX2-FAST-NEXT: vpxor %xmm1, %xmm1, %xmm1
+; AVX2-FAST-NEXT: vpblendw {{.*#+}} ymm1 = ymm1[0],ymm0[1],ymm1[2,3],ymm0[4],ymm1[5,6],ymm0[7],ymm1[8],ymm0[9],ymm1[10,11],ymm0[12],ymm1[13,14],ymm0[15]
+; AVX2-FAST-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0],mem[1,2],xmm0[3],mem[4,5],xmm0[6],mem[7]
+; AVX2-FAST-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
+; AVX2-FAST-NEXT: vmovdqa (%rdi), %xmm1
+; AVX2-FAST-NEXT: vpshufb {{.*#+}} xmm1 = zero,zero,zero,zero,xmm1[0,1],zero,zero,zero,zero,xmm1[0,1],zero,zero,zero,zero
+; AVX2-FAST-NEXT: vpaddb (%rsi), %ymm0, %ymm0
+; AVX2-FAST-NEXT: vpaddb 32(%rsi), %ymm1, %ymm1
+; AVX2-FAST-NEXT: vmovdqa %ymm1, 32(%rdx)
+; AVX2-FAST-NEXT: vmovdqa %ymm0, (%rdx)
; AVX2-FAST-NEXT: vzeroupper
; AVX2-FAST-NEXT: retq
;
; AVX512F-LABEL: vec384_i16_widen_to_i48_factor3_broadcast_to_v8i48_factor8:
; AVX512F: # %bb.0:
-; AVX512F-NEXT: vpbroadcastw (%rdi), %xmm0
+; AVX512F-NEXT: vpbroadcastw (%rdi), %ymm0
; AVX512F-NEXT: vpxor %xmm1, %xmm1, %xmm1
-; AVX512F-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1],xmm0[2],xmm1[3,4],xmm0[5],xmm1[6,7]
+; AVX512F-NEXT: vpblendw {{.*#+}} ymm1 = ymm1[0],ymm0[1],ymm1[2,3],ymm0[4],ymm1[5,6],ymm0[7],ymm1[8],ymm0[9],ymm1[10,11],ymm0[12],ymm1[13,14],ymm0[15]
; AVX512F-NEXT: vpblendw {{.*#+}} xmm2 = xmm0[0],mem[1,2],xmm0[3],mem[4,5],xmm0[6],mem[7]
-; AVX512F-NEXT: vinserti128 $1, %xmm0, %ymm2, %ymm0
-; AVX512F-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
-; AVX512F-NEXT: vpaddb (%rsi), %ymm0, %ymm0
-; AVX512F-NEXT: vpaddb 32(%rsi), %ymm1, %ymm1
-; AVX512F-NEXT: vmovdqa %ymm1, 32(%rdx)
-; AVX512F-NEXT: vmovdqa %ymm0, (%rdx)
+; AVX512F-NEXT: vpblendd {{.*#+}} ymm1 = ymm2[0,1,2,3],ymm1[4,5,6,7]
+; AVX512F-NEXT: vpxor %xmm2, %xmm2, %xmm2
+; AVX512F-NEXT: vpblendw {{.*#+}} xmm0 = xmm2[0,1],xmm0[2],xmm2[3,4],xmm0[5],xmm2[6,7]
+; AVX512F-NEXT: vpaddb (%rsi), %ymm1, %ymm1
+; AVX512F-NEXT: vpaddb 32(%rsi), %ymm0, %ymm0
+; AVX512F-NEXT: vmovdqa %ymm0, 32(%rdx)
+; AVX512F-NEXT: vmovdqa %ymm1, (%rdx)
; AVX512F-NEXT: vzeroupper
; AVX512F-NEXT: retq
;
; AVX512DQ-LABEL: vec384_i16_widen_to_i48_factor3_broadcast_to_v8i48_factor8:
; AVX512DQ: # %bb.0:
-; AVX512DQ-NEXT: vpbroadcastw (%rdi), %xmm0
+; AVX512DQ-NEXT: vpbroadcastw (%rdi), %ymm0
; AVX512DQ-NEXT: vpxor %xmm1, %xmm1, %xmm1
-; AVX512DQ-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1],xmm0[2],xmm1[3,4],xmm0[5],xmm1[6,7]
+; AVX512DQ-NEXT: vpblendw {{.*#+}} ymm1 = ymm1[0],ymm0[1],ymm1[2,3],ymm0[4],ymm1[5,6],ymm0[7],ymm1[8],ymm0[9],ymm1[10,11],ymm0[12],ymm1[13,14],ymm0[15]
; AVX512DQ-NEXT: vpblendw {{.*#+}} xmm2 = xmm0[0],mem[1,2],xmm0[3],mem[4,5],xmm0[6],mem[7]
-; AVX512DQ-NEXT: vinserti128 $1, %xmm0, %ymm2, %ymm0
-; AVX512DQ-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
-; AVX512DQ-NEXT: vpaddb (%rsi), %ymm0, %ymm0
-; AVX512DQ-NEXT: vpaddb 32(%rsi), %ymm1, %ymm1
-; AVX512DQ-NEXT: vmovdqa %ymm1, 32(%rdx)
-; AVX512DQ-NEXT: vmovdqa %ymm0, (%rdx)
+; AVX512DQ-NEXT: vpblendd {{.*#+}} ymm1 = ymm2[0,1,2,3],ymm1[4,5,6,7]
+; AVX512DQ-NEXT: vpxor %xmm2, %xmm2, %xmm2
+; AVX512DQ-NEXT: vpblendw {{.*#+}} xmm0 = xmm2[0,1],xmm0[2],xmm2[3,4],xmm0[5],xmm2[6,7]
+; AVX512DQ-NEXT: vpaddb (%rsi), %ymm1, %ymm1
+; AVX512DQ-NEXT: vpaddb 32(%rsi), %ymm0, %ymm0
+; AVX512DQ-NEXT: vmovdqa %ymm0, 32(%rdx)
+; AVX512DQ-NEXT: vmovdqa %ymm1, (%rdx)
; AVX512DQ-NEXT: vzeroupper
; AVX512DQ-NEXT: retq
;
@@ -3333,76 +3338,81 @@ define void @vec384_i16_widen_to_i96_factor6_broadcast_to_v4i96_factor4(ptr %in.
;
; AVX2-SLOW-LABEL: vec384_i16_widen_to_i96_factor6_broadcast_to_v4i96_factor4:
; AVX2-SLOW: # %bb.0:
-; AVX2-SLOW-NEXT: vpbroadcastw (%rdi), %xmm0
+; AVX2-SLOW-NEXT: vpbroadcastw (%rdi), %ymm0
; AVX2-SLOW-NEXT: vpxor %xmm1, %xmm1, %xmm1
-; AVX2-SLOW-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1],xmm0[2],xmm1[3,4,5,6,7]
+; AVX2-SLOW-NEXT: vpblendw {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm0[4],ymm1[5,6,7,8,9,10,11],ymm0[12],ymm1[13,14,15]
; AVX2-SLOW-NEXT: vpblendw {{.*#+}} xmm2 = xmm0[0],mem[1,2,3,4,5],xmm0[6],mem[7]
-; AVX2-SLOW-NEXT: vinserti128 $1, %xmm0, %ymm2, %ymm0
-; AVX2-SLOW-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
-; AVX2-SLOW-NEXT: vpaddb (%rsi), %ymm0, %ymm0
-; AVX2-SLOW-NEXT: vpaddb 32(%rsi), %ymm1, %ymm1
-; AVX2-SLOW-NEXT: vmovdqa %ymm1, 32(%rdx)
-; AVX2-SLOW-NEXT: vmovdqa %ymm0, (%rdx)
+; AVX2-SLOW-NEXT: vpblendd {{.*#+}} ymm1 = ymm2[0,1,2,3],ymm1[4,5,6,7]
+; AVX2-SLOW-NEXT: vpxor %xmm2, %xmm2, %xmm2
+; AVX2-SLOW-NEXT: vpblendw {{.*#+}} xmm0 = xmm2[0,1],xmm0[2],xmm2[3,4,5,6,7]
+; AVX2-SLOW-NEXT: vpaddb (%rsi), %ymm1, %ymm1
+; AVX2-SLOW-NEXT: vpaddb 32(%rsi), %ymm0, %ymm0
+; AVX2-SLOW-NEXT: vmovdqa %ymm0, 32(%rdx)
+; AVX2-SLOW-NEXT: vmovdqa %ymm1, (%rdx)
; AVX2-SLOW-NEXT: vzeroupper
; AVX2-SLOW-NEXT: retq
;
; AVX2-FAST-PERLANE-LABEL: vec384_i16_widen_to_i96_factor6_broadcast_to_v4i96_factor4:
; AVX2-FAST-PERLANE: # %bb.0:
-; AVX2-FAST-PERLANE-NEXT: vmovdqa (%rdi), %xmm0
-; AVX2-FAST-PERLANE-NEXT: vpshufb {{.*#+}} xmm0 = zero,zero,zero,zero,xmm0[0,1],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
-; AVX2-FAST-PERLANE-NEXT: vpbroadcastw (%rdi), %xmm1
-; AVX2-FAST-PERLANE-NEXT: vpblendw {{.*#+}} xmm2 = xmm1[0],mem[1,2,3,4,5],xmm1[6],mem[7]
-; AVX2-FAST-PERLANE-NEXT: vinserti128 $1, %xmm1, %ymm2, %ymm1
-; AVX2-FAST-PERLANE-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1, %ymm1
-; AVX2-FAST-PERLANE-NEXT: vpaddb (%rsi), %ymm1, %ymm1
-; AVX2-FAST-PERLANE-NEXT: vpaddb 32(%rsi), %ymm0, %ymm0
-; AVX2-FAST-PERLANE-NEXT: vmovdqa %ymm0, 32(%rdx)
-; AVX2-FAST-PERLANE-NEXT: vmovdqa %ymm1, (%rdx)
+; AVX2-FAST-PERLANE-NEXT: vpbroadcastw (%rdi), %ymm0
+; AVX2-FAST-PERLANE-NEXT: vpxor %xmm1, %xmm1, %xmm1
+; AVX2-FAST-PERLANE-NEXT: vpblendw {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm0[4],ymm1[5,6,7,8,9,10,11],ymm0[12],ymm1[13,14,15]
+; AVX2-FAST-PERLANE-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0],mem[1,2,3,4,5],xmm0[6],mem[7]
+; AVX2-FAST-PERLANE-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
+; AVX2-FAST-PERLANE-NEXT: vmovdqa (%rdi), %xmm1
+; AVX2-FAST-PERLANE-NEXT: vpshufb {{.*#+}} xmm1 = zero,zero,zero,zero,xmm1[0,1],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; AVX2-FAST-PERLANE-NEXT: vpaddb (%rsi), %ymm0, %ymm0
+; AVX2-FAST-PERLANE-NEXT: vpaddb 32(%rsi), %ymm1, %ymm1
+; AVX2-FAST-PERLANE-NEXT: vmovdqa %ymm1, 32(%rdx)
+; AVX2-FAST-PERLANE-NEXT: vmovdqa %ymm0, (%rdx)
; AVX2-FAST-PERLANE-NEXT: vzeroupper
; AVX2-FAST-PERLANE-NEXT: retq
;
; AVX2-FAST-LABEL: vec384_i16_widen_to_i96_factor6_broadcast_to_v4i96_factor4:
; AVX2-FAST: # %bb.0:
-; AVX2-FAST-NEXT: vmovdqa (%rdi), %xmm0
-; AVX2-FAST-NEXT: vpshufb {{.*#+}} xmm0 = zero,zero,zero,zero,xmm0[0,1],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
-; AVX2-FAST-NEXT: vpbroadcastw (%rdi), %xmm1
-; AVX2-FAST-NEXT: vpblendw {{.*#+}} xmm2 = xmm1[0],mem[1,2,3,4,5],xmm1[6],mem[7]
-; AVX2-FAST-NEXT: vinserti128 $1, %xmm1, %ymm2, %ymm1
-; AVX2-FAST-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1, %ymm1
-; AVX2-FAST-NEXT: vpaddb (%rsi), %ymm1, %ymm1
-; AVX2-FAST-NEXT: vpaddb 32(%rsi), %ymm0, %ymm0
-; AVX2-FAST-NEXT: vmovdqa %ymm0, 32(%rdx)
-; AVX2-FAST-NEXT: vmovdqa %ymm1, (%rdx)
+; AVX2-FAST-NEXT: vpbroadcastw (%rdi), %ymm0
+; AVX2-FAST-NEXT: vpxor %xmm1, %xmm1, %xmm1
+; AVX2-FAST-NEXT: vpblendw {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm0[4],ymm1[5,6,7,8,9,10,11],ymm0[12],ymm1[13,14,15]
+; AVX2-FAST-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0],mem[1,2,3,4,5],xmm0[6],mem[7]
+; AVX2-FAST-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
+; AVX2-FAST-NEXT: vmovdqa (%rdi), %xmm1
+; AVX2-FAST-NEXT: vpshufb {{.*#+}} xmm1 = zero,zero,zero,zero,xmm1[0,1],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; AVX2-FAST-NEXT: vpaddb (%rsi), %ymm0, %ymm0
+; AVX2-FAST-NEXT: vpaddb 32(%rsi), %ymm1, %ymm1
+; AVX2-FAST-NEXT: vmovdqa %ymm1, 32(%rdx)
+; AVX2-FAST-NEXT: vmovdqa %ymm0, (%rdx)
; AVX2-FAST-NEXT: vzeroupper
; AVX2-FAST-NEXT: retq
;
; AVX512F-LABEL: vec384_i16_widen_to_i96_factor6_broadcast_to_v4i96_factor4:
; AVX512F: # %bb.0:
-; AVX512F-NEXT: vpbroadcastw (%rdi), %xmm0
+; AVX512F-NEXT: vpbroadcastw (%rdi), %ymm0
; AVX512F-NEXT: vpxor %xmm1, %xmm1, %xmm1
-; AVX512F-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1],xmm0[2],xmm1[3,4,5,6,7]
+; AVX512F-NEXT: vpblendw {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm0[4],ymm1[5,6,7,8,9,10,11],ymm0[12],ymm1[13,14,15]
; AVX512F-NEXT: vpblendw {{.*#+}} xmm2 = xmm0[0],mem[1,2,3,4,5],xmm0[6],mem[7]
-; AVX512F-NEXT: vinserti128 $1, %xmm0, %ymm2, %ymm0
-; AVX512F-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
-; AVX512F-NEXT: vpaddb (%rsi), %ymm0, %ymm0
-; AVX512F-NEXT: vpaddb 32(%rsi), %ymm1, %ymm1
-; AVX512F-NEXT: vmovdqa %ymm1, 32(%rdx)
-; AVX512F-NEXT: vmovdqa %ymm0, (%rdx)
+; AVX512F-NEXT: vpblendd {{.*#+}} ymm1 = ymm2[0,1,2,3],ymm1[4,5,6,7]
+; AVX512F-NEXT: vpxor %xmm2, %xmm2, %xmm2
+; AVX512F-NEXT: vpblendw {{.*#+}} xmm0 = xmm2[0,1],xmm0[2],xmm2[3,4,5,6,7]
+; AVX512F-NEXT: vpaddb (%rsi), %ymm1, %ymm1
+; AVX512F-NEXT: vpaddb 32(%rsi), %ymm0, %ymm0
+; AVX512F-NEXT: vmovdqa %ymm0, 32(%rdx)
+; AVX512F-NEXT: vmovdqa %ymm1, (%rdx)
; AVX512F-NEXT: vzeroupper
; AVX512F-NEXT: retq
;
; AVX512DQ-LABEL: vec384_i16_widen_to_i96_factor6_broadcast_to_v4i96_factor4:
; AVX512DQ: # %bb.0:
-; AVX512DQ-NEXT: vpbroadcastw (%rdi), %xmm0
+; AVX512DQ-NEXT: vpbroadcastw (%rdi), %ymm0
; AVX512DQ-NEXT: vpxor %xmm1, %xmm1, %xmm1
-; AVX512DQ-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1],xmm0[2],xmm1[3,4,5,6,7]
+; AVX512DQ-NEXT: vpblendw {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm0[4],ymm1[5,6,7,8,9,10,11],ymm0[12],ymm1[13,14,15]
; AVX512DQ-NEXT: vpblendw {{.*#+}} xmm2 = xmm0[0],mem[1,2,3,4,5],xmm0[6],mem[7]
-; AVX512DQ-NEXT: vinserti128 $1, %xmm0, %ymm2, %ymm0
-; AVX512DQ-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
-; AVX512DQ-NEXT: vpaddb (%rsi), %ymm0, %ymm0
-; AVX512DQ-NEXT: vpaddb 32(%rsi), %ymm1, %ymm1
-; AVX512DQ-NEXT: vmovdqa %ymm1, 32(%rdx)
-; AVX512DQ-NEXT: vmovdqa %ymm0, (%rdx)
+; AVX512DQ-NEXT: vpblendd {{.*#+}} ymm1 = ymm2[0,1,2,3],ymm1[4,5,6,7]
+; AVX512DQ-NEXT: vpxor %xmm2, %xmm2, %xmm2
+; AVX512DQ-NEXT: vpblendw {{.*#+}} xmm0 = xmm2[0,1],xmm0[2],xmm2[3,4,5,6,7]
+; AVX512DQ-NEXT: vpaddb (%rsi), %ymm1, %ymm1
+; AVX512DQ-NEXT: vpaddb 32(%rsi), %ymm0, %ymm0
+; AVX512DQ-NEXT: vmovdqa %ymm0, 32(%rdx)
+; AVX512DQ-NEXT: vmovdqa %ymm1, (%rdx)
; AVX512DQ-NEXT: vzeroupper
; AVX512DQ-NEXT: retq
;
@@ -3594,11 +3604,12 @@ define void @vec384_i16_widen_to_i192_factor12_broadcast_to_v2i192_factor2(ptr %
;
; AVX2-LABEL: vec384_i16_widen_to_i192_factor12_broadcast_to_v2i192_factor2:
; AVX2: # %bb.0:
-; AVX2-NEXT: vmovdqa 48(%rdi), %xmm0
-; AVX2-NEXT: vpblendw {{.*#+}} xmm0 = mem[0],xmm0[1,2,3,4,5,6,7]
-; AVX2-NEXT: vpbroadcastw (%rdi), %xmm1
-; AVX2-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0
-; AVX2-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
+; AVX2-NEXT: vpbroadcastw (%rdi), %ymm0
+; AVX2-NEXT: vpxor %xmm1, %xmm1, %xmm1
+; AVX2-NEXT: vpblendw {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4],ymm1[5,6,7,8,9,10,11],ymm0[12],ymm1[13,14,15]
+; AVX2-NEXT: vmovdqa 48(%rdi), %xmm1
+; AVX2-NEXT: vpblendw {{.*#+}} xmm1 = mem[0],xmm1[1,2,3,4,5,6,7]
+; AVX2-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7]
; AVX2-NEXT: vmovaps 32(%rsi), %ymm1
; AVX2-NEXT: vpaddb (%rsi), %ymm0, %ymm0
; AVX2-NEXT: vmovaps %ymm1, 32(%rdx)
@@ -3608,11 +3619,12 @@ define void @vec384_i16_widen_to_i192_factor12_broadcast_to_v2i192_factor2(ptr %
;
; AVX512F-LABEL: vec384_i16_widen_to_i192_factor12_broadcast_to_v2i192_factor2:
; AVX512F: # %bb.0:
-; AVX512F-NEXT: vmovdqa 48(%rdi), %xmm0
-; AVX512F-NEXT: vpblendw {{.*#+}} xmm0 = mem[0],xmm0[1,2,3,4,5,6,7]
-; AVX512F-NEXT: vpbroadcastw (%rdi), %xmm1
-; AVX512F-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0
-; AVX512F-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
+; AVX512F-NEXT: vpbroadcastw (%rdi), %ymm0
+; AVX512F-NEXT: vpxor %xmm1, %xmm1, %xmm1
+; AVX512F-NEXT: vpblendw {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4],ymm1[5,6,7,8,9,10,11],ymm0[12],ymm1[13,14,15]
+; AVX512F-NEXT: vmovdqa 48(%rdi), %xmm1
+; AVX512F-NEXT: vpblendw {{.*#+}} xmm1 = mem[0],xmm1[1,2,3,4,5,6,7]
+; AVX512F-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7]
; AVX512F-NEXT: vpaddb (%rsi), %ymm0, %ymm0
; AVX512F-NEXT: vmovaps 32(%rsi), %ymm1
; AVX512F-NEXT: vmovaps %ymm1, 32(%rdx)
@@ -3622,11 +3634,12 @@ define void @vec384_i16_widen_to_i192_factor12_broadcast_to_v2i192_factor2(ptr %
;
; AVX512DQ-LABEL: vec384_i16_widen_to_i192_factor12_broadcast_to_v2i192_factor2:
; AVX512DQ: # %bb.0:
-; AVX512DQ-NEXT: vmovdqa 48(%rdi), %xmm0
-; AVX512DQ-NEXT: vpblendw {{.*#+}} xmm0 = mem[0],xmm0[1,2,3,4,5,6,7]
-; AVX512DQ-NEXT: vpbroadcastw (%rdi), %xmm1
-; AVX512DQ-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0
-; AVX512DQ-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
+; AVX512DQ-NEXT: vpbroadcastw (%rdi), %ymm0
+; AVX512DQ-NEXT: vpxor %xmm1, %xmm1, %xmm1
+; AVX512DQ-NEXT: vpblendw {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4],ymm1[5,6,7,8,9,10,11],ymm0[12],ymm1[13,14,15]
+; AVX512DQ-NEXT: vmovdqa 48(%rdi), %xmm1
+; AVX512DQ-NEXT: vpblendw {{.*#+}} xmm1 = mem[0],xmm1[1,2,3,4,5,6,7]
+; AVX512DQ-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7]
; AVX512DQ-NEXT: vpaddb (%rsi), %ymm0, %ymm0
; AVX512DQ-NEXT: vmovaps 32(%rsi), %ymm1
; AVX512DQ-NEXT: vmovaps %ymm1, 32(%rdx)
More information about the llvm-commits
mailing list