[llvm] 8ae0e1f - [X86] Create X86ISD::SHUF128 512-bit masks with getV4X86ShuffleImm8ForMask

Simon Pilgrim via llvm-commits llvm-commits at lists.llvm.org
Wed Aug 9 03:13:22 PDT 2023


Author: Simon Pilgrim
Date: 2023-08-09T11:13:03+01:00
New Revision: 8ae0e1f58d151daaa9fed5ce70497984a368ad3c

URL: https://github.com/llvm/llvm-project/commit/8ae0e1f58d151daaa9fed5ce70497984a368ad3c
DIFF: https://github.com/llvm/llvm-project/commit/8ae0e1f58d151daaa9fed5ce70497984a368ad3c.diff

LOG: [X86] Create X86ISD::SHUF128 512-bit masks with getV4X86ShuffleImm8ForMask

This allows us to use the same canonicalizations as PSHUFD/SHUFPS etc. to avoid unnecessary demanded elts (better splat detection, blend pass through etc.) instead of defaulting to zero mask values.

Added: 
    

Modified: 
    llvm/lib/Target/X86/X86ISelLowering.cpp
    llvm/test/CodeGen/X86/vector-interleaved-load-i64-stride-7.ll
    llvm/test/CodeGen/X86/vector-interleaved-store-i32-stride-6.ll
    llvm/test/CodeGen/X86/vector-interleaved-store-i64-stride-7.ll
    llvm/test/CodeGen/X86/vector-shuffle-512-v16.ll
    llvm/test/CodeGen/X86/vector-shuffle-512-v8.ll
    llvm/test/CodeGen/X86/vector-shuffle-v1.ll

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp
index c0a87ec3a00dbc..2ec5b2930a0d63 100644
--- a/llvm/lib/Target/X86/X86ISelLowering.cpp
+++ b/llvm/lib/Target/X86/X86ISelLowering.cpp
@@ -16414,8 +16414,8 @@ static SDValue lowerV4X128Shuffle(const SDLoc &DL, MVT VT, ArrayRef<int> Mask,
 
   // Try to lower to vshuf64x2/vshuf32x4.
   SDValue Ops[2] = {DAG.getUNDEF(VT), DAG.getUNDEF(VT)};
-  unsigned PermMask = 0;
-  // Insure elements came from the same Op.
+  int PermMask[4] = {-1, -1, -1, -1};
+  // Ensure elements came from the same Op.
   for (int i = 0; i < 4; ++i) {
     assert(Widened128Mask[i] >= -1 && "Illegal shuffle sentinel value");
     if (Widened128Mask[i] < 0)
@@ -16428,13 +16428,11 @@ static SDValue lowerV4X128Shuffle(const SDLoc &DL, MVT VT, ArrayRef<int> Mask,
     else if (Ops[OpIndex] != Op)
       return SDValue();
 
-    // Convert the 128-bit shuffle mask selection values into 128-bit selection
-    // bits defined by a vshuf64x2 instruction's immediate control byte.
-    PermMask |= (Widened128Mask[i] % 4) << (i * 2);
+    PermMask[i] = Widened128Mask[i] % 4;
   }
 
   return DAG.getNode(X86ISD::SHUF128, DL, VT, Ops[0], Ops[1],
-                     DAG.getTargetConstant(PermMask, DL, MVT::i8));
+                     getV4X86ShuffleImm8ForMask(PermMask, DL, DAG));
 }
 
 /// Handle lowering of 8-lane 64-bit floating point shuffles.
@@ -37689,8 +37687,8 @@ static SDValue combineX86ShuffleChain(ArrayRef<SDValue> Inputs, SDValue Root,
     auto MatchSHUF128 = [&](MVT ShuffleVT, const SDLoc &DL,
                             ArrayRef<int> ScaledMask, SDValue V1, SDValue V2,
                             SelectionDAG &DAG) {
-      unsigned PermMask = 0;
-      // Insure elements came from the same Op.
+      int PermMask[4] = {-1, -1, -1, -1};
+      // Ensure elements came from the same Op.
       SDValue Ops[2] = {DAG.getUNDEF(ShuffleVT), DAG.getUNDEF(ShuffleVT)};
       for (int i = 0; i < 4; ++i) {
         assert(ScaledMask[i] >= -1 && "Illegal shuffle sentinel value");
@@ -37704,16 +37702,13 @@ static SDValue combineX86ShuffleChain(ArrayRef<SDValue> Inputs, SDValue Root,
         else if (Ops[OpIndex] != Op)
           return SDValue();
 
-        // Convert the 128-bit shuffle mask selection values into 128-bit
-        // selection bits defined by a vshuf64x2 instruction's immediate control
-        // byte.
-        PermMask |= (ScaledMask[i] % 4) << (i * 2);
+        PermMask[i] = ScaledMask[i] % 4;
       }
 
       return DAG.getNode(X86ISD::SHUF128, DL, ShuffleVT,
                          CanonicalizeShuffleInput(ShuffleVT, Ops[0]),
                          CanonicalizeShuffleInput(ShuffleVT, Ops[1]),
-                         DAG.getTargetConstant(PermMask, DL, MVT::i8));
+                         getV4X86ShuffleImm8ForMask(PermMask, DL, DAG));
     };
 
     // FIXME: Is there a better way to do this? is256BitLaneRepeatedShuffleMask

diff  --git a/llvm/test/CodeGen/X86/vector-interleaved-load-i64-stride-7.ll b/llvm/test/CodeGen/X86/vector-interleaved-load-i64-stride-7.ll
index c74575023b4629..70b718cd7f282f 100644
--- a/llvm/test/CodeGen/X86/vector-interleaved-load-i64-stride-7.ll
+++ b/llvm/test/CodeGen/X86/vector-interleaved-load-i64-stride-7.ll
@@ -870,7 +870,7 @@ define void @load_i64_stride7_vf8(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
 ; AVX512F-NEXT:    vpermi2q %zmm2, %zmm5, %zmm3
 ; AVX512F-NEXT:    movb $24, %al
 ; AVX512F-NEXT:    kmovw %eax, %k1
-; AVX512F-NEXT:    vshufi64x2 {{.*#+}} zmm3 {%k1} = zmm7[0,1,4,5],zmm6[4,5,0,1]
+; AVX512F-NEXT:    vshufi64x2 {{.*#+}} zmm3 {%k1} = zmm7[4,5,4,5],zmm6[4,5,4,5]
 ; AVX512F-NEXT:    vbroadcasti32x4 {{.*#+}} zmm8 = [10,3,10,3,10,3,10,3]
 ; AVX512F-NEXT:    # zmm8 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
 ; AVX512F-NEXT:    vpermi2q %zmm1, %zmm4, %zmm8
@@ -983,7 +983,7 @@ define void @load_i64_stride7_vf8(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
 ; AVX512BW-NEXT:    vpermi2q %zmm2, %zmm5, %zmm3
 ; AVX512BW-NEXT:    movb $24, %r11b
 ; AVX512BW-NEXT:    kmovd %r11d, %k1
-; AVX512BW-NEXT:    vshufi64x2 {{.*#+}} zmm3 {%k1} = zmm7[0,1,4,5],zmm6[4,5,0,1]
+; AVX512BW-NEXT:    vshufi64x2 {{.*#+}} zmm3 {%k1} = zmm7[4,5,4,5],zmm6[4,5,4,5]
 ; AVX512BW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm8 = [10,3,10,3,10,3,10,3]
 ; AVX512BW-NEXT:    # zmm8 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
 ; AVX512BW-NEXT:    vpermi2q %zmm1, %zmm4, %zmm8
@@ -1851,7 +1851,7 @@ define void @load_i64_stride7_vf16(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512F-NEXT:    vpermt2q %zmm26, %zmm7, %zmm29
 ; AVX512F-NEXT:    movb $24, %r11b
 ; AVX512F-NEXT:    kmovw %r11d, %k2
-; AVX512F-NEXT:    vshufi64x2 {{.*#+}} zmm29 {%k2} = zmm10[0,1,4,5],zmm27[4,5,0,1]
+; AVX512F-NEXT:    vshufi64x2 {{.*#+}} zmm29 {%k2} = zmm10[4,5,4,5],zmm27[4,5,4,5]
 ; AVX512F-NEXT:    vbroadcasti32x4 {{.*#+}} zmm16 = [10,3,10,3,10,3,10,3]
 ; AVX512F-NEXT:    # zmm16 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
 ; AVX512F-NEXT:    vmovdqa64 %zmm9, %zmm17
@@ -1863,7 +1863,7 @@ define void @load_i64_stride7_vf16(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512F-NEXT:    kmovw %r11d, %k1
 ; AVX512F-NEXT:    vmovdqa64 %zmm17, %zmm29 {%k1}
 ; AVX512F-NEXT:    vpermi2q %zmm24, %zmm6, %zmm7
-; AVX512F-NEXT:    vshufi64x2 {{.*#+}} zmm7 {%k2} = zmm2[0,1,4,5],zmm25[4,5,0,1]
+; AVX512F-NEXT:    vshufi64x2 {{.*#+}} zmm7 {%k2} = zmm2[4,5,4,5],zmm25[4,5,4,5]
 ; AVX512F-NEXT:    vpermi2q %zmm30, %zmm0, %zmm16
 ; AVX512F-NEXT:    vpermt2q %zmm28, %zmm18, %zmm16
 ; AVX512F-NEXT:    vmovdqa64 %zmm16, %zmm7 {%k1}
@@ -2037,7 +2037,7 @@ define void @load_i64_stride7_vf16(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512BW-NEXT:    vpermt2q %zmm27, %zmm7, %zmm29
 ; AVX512BW-NEXT:    movb $24, %r11b
 ; AVX512BW-NEXT:    kmovd %r11d, %k2
-; AVX512BW-NEXT:    vshufi64x2 {{.*#+}} zmm29 {%k2} = zmm10[0,1,4,5],zmm28[4,5,0,1]
+; AVX512BW-NEXT:    vshufi64x2 {{.*#+}} zmm29 {%k2} = zmm10[4,5,4,5],zmm28[4,5,4,5]
 ; AVX512BW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm16 = [10,3,10,3,10,3,10,3]
 ; AVX512BW-NEXT:    # zmm16 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
 ; AVX512BW-NEXT:    vmovdqa64 %zmm9, %zmm17
@@ -2049,7 +2049,7 @@ define void @load_i64_stride7_vf16(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512BW-NEXT:    kmovd %r11d, %k1
 ; AVX512BW-NEXT:    vmovdqa64 %zmm17, %zmm29 {%k1}
 ; AVX512BW-NEXT:    vpermi2q %zmm24, %zmm6, %zmm7
-; AVX512BW-NEXT:    vshufi64x2 {{.*#+}} zmm7 {%k2} = zmm2[0,1,4,5],zmm26[4,5,0,1]
+; AVX512BW-NEXT:    vshufi64x2 {{.*#+}} zmm7 {%k2} = zmm2[4,5,4,5],zmm26[4,5,4,5]
 ; AVX512BW-NEXT:    vpermi2q %zmm31, %zmm0, %zmm16
 ; AVX512BW-NEXT:    vpermt2q %zmm30, %zmm18, %zmm16
 ; AVX512BW-NEXT:    vmovdqa64 %zmm16, %zmm7 {%k1}
@@ -3974,7 +3974,7 @@ define void @load_i64_stride7_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqa {{.*#+}} ymm15 = <0,7,14,u>
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm0, %zmm17
 ; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm7, %zmm15, %zmm17
-; AVX512F-ONLY-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm17 {%k1} = zmm14[0,1,4,5],zmm12[4,5,0,1]
+; AVX512F-ONLY-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm17 {%k1} = zmm14[4,5,4,5],zmm12[4,5,4,5]
 ; AVX512F-ONLY-SLOW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm0 = [14,0,0,7,14,0,0,7]
 ; AVX512F-ONLY-SLOW-NEXT:    # zmm0 = mem[0,1,2,3,0,1,2,3]
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm14, %zmm28
@@ -3992,7 +3992,7 @@ define void @load_i64_stride7_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm14, %zmm16
 ; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm2, %zmm15, %zmm16
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm16 {%k1} = zmm1[0,1,4,5],zmm29[4,5,0,1]
+; AVX512F-ONLY-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm16 {%k1} = zmm1[4,5,4,5],zmm29[4,5,4,5]
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm1, %zmm27
 ; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm29, %zmm0, %zmm27
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm29, %zmm20
@@ -4007,7 +4007,7 @@ define void @load_i64_stride7_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 1344(%rdi), %zmm5
 ; AVX512F-ONLY-SLOW-NEXT:    vpermi2q %zmm3, %zmm5, %zmm15
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm8 # 64-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm15 {%k1} = zmm8[0,1,4,5],zmm13[4,5,0,1]
+; AVX512F-ONLY-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm15 {%k1} = zmm8[4,5,4,5],zmm13[4,5,4,5]
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm6 # 64-byte Reload
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm6, %zmm1
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm19, %zmm9
@@ -4017,7 +4017,7 @@ define void @load_i64_stride7_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512F-ONLY-SLOW-NEXT:    vpermi2q %zmm8, %zmm13, %zmm18
 ; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm13, %zmm24, %zmm8
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm29 {%k1} = zmm6[0,1,4,5],zmm9[4,5,0,1]
+; AVX512F-ONLY-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm29 {%k1} = zmm6[4,5,4,5],zmm9[4,5,4,5]
 ; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm9, %zmm24, %zmm6
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 912(%rdi), %xmm13
@@ -4414,7 +4414,7 @@ define void @load_i64_stride7_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm15 = <0,7,14,u>
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm0, %zmm17
 ; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm7, %zmm15, %zmm17
-; AVX512F-ONLY-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm17 {%k1} = zmm14[0,1,4,5],zmm12[4,5,0,1]
+; AVX512F-ONLY-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm17 {%k1} = zmm14[4,5,4,5],zmm12[4,5,4,5]
 ; AVX512F-ONLY-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm0 = [14,0,0,7,14,0,0,7]
 ; AVX512F-ONLY-FAST-NEXT:    # zmm0 = mem[0,1,2,3,0,1,2,3]
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm14, %zmm28
@@ -4432,7 +4432,7 @@ define void @load_i64_stride7_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm14, %zmm16
 ; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm2, %zmm15, %zmm16
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm16 {%k1} = zmm1[0,1,4,5],zmm29[4,5,0,1]
+; AVX512F-ONLY-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm16 {%k1} = zmm1[4,5,4,5],zmm29[4,5,4,5]
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm1, %zmm27
 ; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm29, %zmm0, %zmm27
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm29, %zmm20
@@ -4447,7 +4447,7 @@ define void @load_i64_stride7_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 1344(%rdi), %zmm5
 ; AVX512F-ONLY-FAST-NEXT:    vpermi2q %zmm3, %zmm5, %zmm15
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm8 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm15 {%k1} = zmm8[0,1,4,5],zmm13[4,5,0,1]
+; AVX512F-ONLY-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm15 {%k1} = zmm8[4,5,4,5],zmm13[4,5,4,5]
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm6 # 64-byte Reload
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm6, %zmm1
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm19, %zmm9
@@ -4457,7 +4457,7 @@ define void @load_i64_stride7_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512F-ONLY-FAST-NEXT:    vpermi2q %zmm8, %zmm13, %zmm18
 ; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm13, %zmm24, %zmm8
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm29 {%k1} = zmm6[0,1,4,5],zmm9[4,5,0,1]
+; AVX512F-ONLY-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm29 {%k1} = zmm6[4,5,4,5],zmm9[4,5,4,5]
 ; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm9, %zmm24, %zmm6
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa 912(%rdi), %xmm13
@@ -4854,7 +4854,7 @@ define void @load_i64_stride7_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512DQ-SLOW-NEXT:    vmovdqa {{.*#+}} ymm15 = <0,7,14,u>
 ; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm0, %zmm17
 ; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm7, %zmm15, %zmm17
-; AVX512DQ-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm17 {%k1} = zmm14[0,1,4,5],zmm12[4,5,0,1]
+; AVX512DQ-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm17 {%k1} = zmm14[4,5,4,5],zmm12[4,5,4,5]
 ; AVX512DQ-SLOW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm0 = [14,0,0,7,14,0,0,7]
 ; AVX512DQ-SLOW-NEXT:    # zmm0 = mem[0,1,2,3,0,1,2,3]
 ; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm14, %zmm28
@@ -4872,7 +4872,7 @@ define void @load_i64_stride7_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm14, %zmm16
 ; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm2, %zmm15, %zmm16
 ; AVX512DQ-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512DQ-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm16 {%k1} = zmm1[0,1,4,5],zmm29[4,5,0,1]
+; AVX512DQ-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm16 {%k1} = zmm1[4,5,4,5],zmm29[4,5,4,5]
 ; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm1, %zmm27
 ; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm29, %zmm0, %zmm27
 ; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm29, %zmm20
@@ -4887,7 +4887,7 @@ define void @load_i64_stride7_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512DQ-SLOW-NEXT:    vmovdqa64 1344(%rdi), %zmm5
 ; AVX512DQ-SLOW-NEXT:    vpermi2q %zmm3, %zmm5, %zmm15
 ; AVX512DQ-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm8 # 64-byte Reload
-; AVX512DQ-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm15 {%k1} = zmm8[0,1,4,5],zmm13[4,5,0,1]
+; AVX512DQ-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm15 {%k1} = zmm8[4,5,4,5],zmm13[4,5,4,5]
 ; AVX512DQ-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm6 # 64-byte Reload
 ; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm6, %zmm1
 ; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm19, %zmm9
@@ -4897,7 +4897,7 @@ define void @load_i64_stride7_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512DQ-SLOW-NEXT:    vpermi2q %zmm8, %zmm13, %zmm18
 ; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm13, %zmm24, %zmm8
 ; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm29 {%k1} = zmm6[0,1,4,5],zmm9[4,5,0,1]
+; AVX512DQ-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm29 {%k1} = zmm6[4,5,4,5],zmm9[4,5,4,5]
 ; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm9, %zmm24, %zmm6
 ; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512DQ-SLOW-NEXT:    vmovdqa 912(%rdi), %xmm13
@@ -5294,7 +5294,7 @@ define void @load_i64_stride7_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} ymm15 = <0,7,14,u>
 ; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm0, %zmm17
 ; AVX512DQ-FAST-NEXT:    vpermt2q %zmm7, %zmm15, %zmm17
-; AVX512DQ-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm17 {%k1} = zmm14[0,1,4,5],zmm12[4,5,0,1]
+; AVX512DQ-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm17 {%k1} = zmm14[4,5,4,5],zmm12[4,5,4,5]
 ; AVX512DQ-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm0 = [14,0,0,7,14,0,0,7]
 ; AVX512DQ-FAST-NEXT:    # zmm0 = mem[0,1,2,3,0,1,2,3]
 ; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm14, %zmm28
@@ -5312,7 +5312,7 @@ define void @load_i64_stride7_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm14, %zmm16
 ; AVX512DQ-FAST-NEXT:    vpermt2q %zmm2, %zmm15, %zmm16
 ; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm16 {%k1} = zmm1[0,1,4,5],zmm29[4,5,0,1]
+; AVX512DQ-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm16 {%k1} = zmm1[4,5,4,5],zmm29[4,5,4,5]
 ; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm1, %zmm27
 ; AVX512DQ-FAST-NEXT:    vpermt2q %zmm29, %zmm0, %zmm27
 ; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm29, %zmm20
@@ -5327,7 +5327,7 @@ define void @load_i64_stride7_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512DQ-FAST-NEXT:    vmovdqa64 1344(%rdi), %zmm5
 ; AVX512DQ-FAST-NEXT:    vpermi2q %zmm3, %zmm5, %zmm15
 ; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm8 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm15 {%k1} = zmm8[0,1,4,5],zmm13[4,5,0,1]
+; AVX512DQ-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm15 {%k1} = zmm8[4,5,4,5],zmm13[4,5,4,5]
 ; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm6 # 64-byte Reload
 ; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm6, %zmm1
 ; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm19, %zmm9
@@ -5337,7 +5337,7 @@ define void @load_i64_stride7_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512DQ-FAST-NEXT:    vpermi2q %zmm8, %zmm13, %zmm18
 ; AVX512DQ-FAST-NEXT:    vpermt2q %zmm13, %zmm24, %zmm8
 ; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm29 {%k1} = zmm6[0,1,4,5],zmm9[4,5,0,1]
+; AVX512DQ-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm29 {%k1} = zmm6[4,5,4,5],zmm9[4,5,4,5]
 ; AVX512DQ-FAST-NEXT:    vpermt2q %zmm9, %zmm24, %zmm6
 ; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512DQ-FAST-NEXT:    vmovdqa 912(%rdi), %xmm13
@@ -5733,7 +5733,7 @@ define void @load_i64_stride7_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa {{.*#+}} ymm15 = <0,7,14,u>
 ; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm0, %zmm17
 ; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm5, %zmm15, %zmm17
-; AVX512BW-ONLY-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm17 {%k1} = zmm14[0,1,4,5],zmm12[4,5,0,1]
+; AVX512BW-ONLY-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm17 {%k1} = zmm14[4,5,4,5],zmm12[4,5,4,5]
 ; AVX512BW-ONLY-SLOW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm0 = [14,0,0,7,14,0,0,7]
 ; AVX512BW-ONLY-SLOW-NEXT:    # zmm0 = mem[0,1,2,3,0,1,2,3]
 ; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm14, %zmm28
@@ -5751,7 +5751,7 @@ define void @load_i64_stride7_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm12, %zmm14
 ; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm3, %zmm15, %zmm14
 ; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512BW-ONLY-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm14 {%k1} = zmm1[0,1,4,5],zmm7[4,5,0,1]
+; AVX512BW-ONLY-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm14 {%k1} = zmm1[4,5,4,5],zmm7[4,5,4,5]
 ; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm1, %zmm9
 ; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm7, %zmm0, %zmm9
 ; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm7, %zmm21
@@ -5766,7 +5766,7 @@ define void @load_i64_stride7_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 1344(%rdi), %zmm7
 ; AVX512BW-ONLY-SLOW-NEXT:    vpermi2q %zmm2, %zmm7, %zmm15
 ; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 (%rsp), %zmm6 # 64-byte Reload
-; AVX512BW-ONLY-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm15 {%k1} = zmm6[0,1,4,5],zmm13[4,5,0,1]
+; AVX512BW-ONLY-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm15 {%k1} = zmm6[4,5,4,5],zmm13[4,5,4,5]
 ; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm10 # 64-byte Reload
 ; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm10, %zmm1
 ; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm30, %zmm0, %zmm1
@@ -5776,7 +5776,7 @@ define void @load_i64_stride7_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512BW-ONLY-SLOW-NEXT:    vpermi2q %zmm6, %zmm13, %zmm16
 ; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm13, %zmm24, %zmm6
 ; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm6, (%rsp) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm29 {%k1} = zmm10[0,1,4,5],zmm30[4,5,0,1]
+; AVX512BW-ONLY-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm29 {%k1} = zmm10[4,5,4,5],zmm30[4,5,4,5]
 ; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm30, %zmm24, %zmm10
 ; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa 912(%rdi), %xmm13
@@ -6170,7 +6170,7 @@ define void @load_i64_stride7_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512BW-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm15 = <0,7,14,u>
 ; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm0, %zmm17
 ; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm5, %zmm15, %zmm17
-; AVX512BW-ONLY-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm17 {%k1} = zmm14[0,1,4,5],zmm12[4,5,0,1]
+; AVX512BW-ONLY-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm17 {%k1} = zmm14[4,5,4,5],zmm12[4,5,4,5]
 ; AVX512BW-ONLY-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm0 = [14,0,0,7,14,0,0,7]
 ; AVX512BW-ONLY-FAST-NEXT:    # zmm0 = mem[0,1,2,3,0,1,2,3]
 ; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm14, %zmm28
@@ -6188,7 +6188,7 @@ define void @load_i64_stride7_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm12, %zmm14
 ; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm3, %zmm15, %zmm14
 ; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512BW-ONLY-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm14 {%k1} = zmm1[0,1,4,5],zmm7[4,5,0,1]
+; AVX512BW-ONLY-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm14 {%k1} = zmm1[4,5,4,5],zmm7[4,5,4,5]
 ; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm1, %zmm9
 ; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm7, %zmm0, %zmm9
 ; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm7, %zmm21
@@ -6203,7 +6203,7 @@ define void @load_i64_stride7_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 1344(%rdi), %zmm7
 ; AVX512BW-ONLY-FAST-NEXT:    vpermi2q %zmm2, %zmm7, %zmm15
 ; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 (%rsp), %zmm6 # 64-byte Reload
-; AVX512BW-ONLY-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm15 {%k1} = zmm6[0,1,4,5],zmm13[4,5,0,1]
+; AVX512BW-ONLY-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm15 {%k1} = zmm6[4,5,4,5],zmm13[4,5,4,5]
 ; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm10 # 64-byte Reload
 ; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm10, %zmm1
 ; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm30, %zmm0, %zmm1
@@ -6213,7 +6213,7 @@ define void @load_i64_stride7_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512BW-ONLY-FAST-NEXT:    vpermi2q %zmm6, %zmm13, %zmm16
 ; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm13, %zmm24, %zmm6
 ; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm6, (%rsp) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm29 {%k1} = zmm10[0,1,4,5],zmm30[4,5,0,1]
+; AVX512BW-ONLY-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm29 {%k1} = zmm10[4,5,4,5],zmm30[4,5,4,5]
 ; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm30, %zmm24, %zmm10
 ; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512BW-ONLY-FAST-NEXT:    vmovdqa 912(%rdi), %xmm13
@@ -6607,7 +6607,7 @@ define void @load_i64_stride7_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512DQBW-SLOW-NEXT:    vmovdqa {{.*#+}} ymm15 = <0,7,14,u>
 ; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm0, %zmm17
 ; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm5, %zmm15, %zmm17
-; AVX512DQBW-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm17 {%k1} = zmm14[0,1,4,5],zmm12[4,5,0,1]
+; AVX512DQBW-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm17 {%k1} = zmm14[4,5,4,5],zmm12[4,5,4,5]
 ; AVX512DQBW-SLOW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm0 = [14,0,0,7,14,0,0,7]
 ; AVX512DQBW-SLOW-NEXT:    # zmm0 = mem[0,1,2,3,0,1,2,3]
 ; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm14, %zmm28
@@ -6625,7 +6625,7 @@ define void @load_i64_stride7_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm12, %zmm14
 ; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm3, %zmm15, %zmm14
 ; AVX512DQBW-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512DQBW-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm14 {%k1} = zmm1[0,1,4,5],zmm7[4,5,0,1]
+; AVX512DQBW-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm14 {%k1} = zmm1[4,5,4,5],zmm7[4,5,4,5]
 ; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm1, %zmm9
 ; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm7, %zmm0, %zmm9
 ; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm7, %zmm21
@@ -6640,7 +6640,7 @@ define void @load_i64_stride7_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512DQBW-SLOW-NEXT:    vmovdqa64 1344(%rdi), %zmm7
 ; AVX512DQBW-SLOW-NEXT:    vpermi2q %zmm2, %zmm7, %zmm15
 ; AVX512DQBW-SLOW-NEXT:    vmovdqu64 (%rsp), %zmm6 # 64-byte Reload
-; AVX512DQBW-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm15 {%k1} = zmm6[0,1,4,5],zmm13[4,5,0,1]
+; AVX512DQBW-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm15 {%k1} = zmm6[4,5,4,5],zmm13[4,5,4,5]
 ; AVX512DQBW-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm10 # 64-byte Reload
 ; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm10, %zmm1
 ; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm30, %zmm0, %zmm1
@@ -6650,7 +6650,7 @@ define void @load_i64_stride7_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512DQBW-SLOW-NEXT:    vpermi2q %zmm6, %zmm13, %zmm16
 ; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm13, %zmm24, %zmm6
 ; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm6, (%rsp) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm29 {%k1} = zmm10[0,1,4,5],zmm30[4,5,0,1]
+; AVX512DQBW-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm29 {%k1} = zmm10[4,5,4,5],zmm30[4,5,4,5]
 ; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm30, %zmm24, %zmm10
 ; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512DQBW-SLOW-NEXT:    vmovdqa 912(%rdi), %xmm13
@@ -7044,7 +7044,7 @@ define void @load_i64_stride7_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512DQBW-FAST-NEXT:    vmovdqa {{.*#+}} ymm15 = <0,7,14,u>
 ; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm0, %zmm17
 ; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm5, %zmm15, %zmm17
-; AVX512DQBW-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm17 {%k1} = zmm14[0,1,4,5],zmm12[4,5,0,1]
+; AVX512DQBW-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm17 {%k1} = zmm14[4,5,4,5],zmm12[4,5,4,5]
 ; AVX512DQBW-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm0 = [14,0,0,7,14,0,0,7]
 ; AVX512DQBW-FAST-NEXT:    # zmm0 = mem[0,1,2,3,0,1,2,3]
 ; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm14, %zmm28
@@ -7062,7 +7062,7 @@ define void @load_i64_stride7_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm12, %zmm14
 ; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm3, %zmm15, %zmm14
 ; AVX512DQBW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512DQBW-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm14 {%k1} = zmm1[0,1,4,5],zmm7[4,5,0,1]
+; AVX512DQBW-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm14 {%k1} = zmm1[4,5,4,5],zmm7[4,5,4,5]
 ; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm1, %zmm9
 ; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm7, %zmm0, %zmm9
 ; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm7, %zmm21
@@ -7077,7 +7077,7 @@ define void @load_i64_stride7_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512DQBW-FAST-NEXT:    vmovdqa64 1344(%rdi), %zmm7
 ; AVX512DQBW-FAST-NEXT:    vpermi2q %zmm2, %zmm7, %zmm15
 ; AVX512DQBW-FAST-NEXT:    vmovdqu64 (%rsp), %zmm6 # 64-byte Reload
-; AVX512DQBW-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm15 {%k1} = zmm6[0,1,4,5],zmm13[4,5,0,1]
+; AVX512DQBW-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm15 {%k1} = zmm6[4,5,4,5],zmm13[4,5,4,5]
 ; AVX512DQBW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm10 # 64-byte Reload
 ; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm10, %zmm1
 ; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm30, %zmm0, %zmm1
@@ -7087,7 +7087,7 @@ define void @load_i64_stride7_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512DQBW-FAST-NEXT:    vpermi2q %zmm6, %zmm13, %zmm16
 ; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm13, %zmm24, %zmm6
 ; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm6, (%rsp) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm29 {%k1} = zmm10[0,1,4,5],zmm30[4,5,0,1]
+; AVX512DQBW-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm29 {%k1} = zmm10[4,5,4,5],zmm30[4,5,4,5]
 ; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm30, %zmm24, %zmm10
 ; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512DQBW-FAST-NEXT:    vmovdqa 912(%rdi), %xmm13
@@ -11331,7 +11331,7 @@ define void @load_i64_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm19 # 64-byte Reload
 ; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm12 # 64-byte Reload
 ; AVX512F-NEXT:    vpermt2q %zmm12, %zmm9, %zmm19
-; AVX512F-NEXT:    vshufi64x2 {{.*#+}} zmm19 {%k1} = zmm11[0,1,4,5],zmm6[4,5,0,1]
+; AVX512F-NEXT:    vshufi64x2 {{.*#+}} zmm19 {%k1} = zmm11[4,5,4,5],zmm6[4,5,4,5]
 ; AVX512F-NEXT:    vbroadcasti64x4 {{.*#+}} zmm2 = [7,0,9,0,7,0,9,0]
 ; AVX512F-NEXT:    # zmm2 = mem[0,1,2,3,0,1,2,3]
 ; AVX512F-NEXT:    vmovdqa64 %zmm6, %zmm1
@@ -11344,7 +11344,7 @@ define void @load_i64_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512F-NEXT:    vmovdqu64 (%rsp), %zmm29 # 64-byte Reload
 ; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm10 # 64-byte Reload
 ; AVX512F-NEXT:    vpermt2q %zmm10, %zmm9, %zmm29
-; AVX512F-NEXT:    vshufi64x2 {{.*#+}} zmm29 {%k1} = zmm13[0,1,4,5],zmm18[4,5,0,1]
+; AVX512F-NEXT:    vshufi64x2 {{.*#+}} zmm29 {%k1} = zmm13[4,5,4,5],zmm18[4,5,4,5]
 ; AVX512F-NEXT:    vmovdqa64 %zmm18, %zmm4
 ; AVX512F-NEXT:    vpermt2q %zmm13, %zmm2, %zmm4
 ; AVX512F-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
@@ -11353,7 +11353,7 @@ define void @load_i64_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm18 # 64-byte Reload
 ; AVX512F-NEXT:    vpermt2q {{[-0-9]+}}(%r{{[sb]}}p), %zmm9, %zmm18 # 64-byte Folded Reload
 ; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-NEXT:    vshufi64x2 {{.*#+}} zmm18 {%k1} = zmm0[0,1,4,5],zmm16[4,5,0,1]
+; AVX512F-NEXT:    vshufi64x2 {{.*#+}} zmm18 {%k1} = zmm0[4,5,4,5],zmm16[4,5,4,5]
 ; AVX512F-NEXT:    vmovdqa64 %zmm16, %zmm4
 ; AVX512F-NEXT:    vpermt2q %zmm0, %zmm2, %zmm4
 ; AVX512F-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
@@ -11363,7 +11363,7 @@ define void @load_i64_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm6 # 64-byte Reload
 ; AVX512F-NEXT:    vpermt2q %zmm6, %zmm9, %zmm26
 ; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-NEXT:    vshufi64x2 {{.*#+}} zmm26 {%k1} = zmm0[0,1,4,5],zmm17[4,5,0,1]
+; AVX512F-NEXT:    vshufi64x2 {{.*#+}} zmm26 {%k1} = zmm0[4,5,4,5],zmm17[4,5,4,5]
 ; AVX512F-NEXT:    vmovdqa64 %zmm17, %zmm4
 ; AVX512F-NEXT:    vpermt2q %zmm0, %zmm2, %zmm4
 ; AVX512F-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
@@ -11373,7 +11373,7 @@ define void @load_i64_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm22 # 64-byte Reload
 ; AVX512F-NEXT:    vpermt2q %zmm22, %zmm9, %zmm31
 ; AVX512F-NEXT:    vmovdqa64 %zmm7, %zmm0
-; AVX512F-NEXT:    vshufi64x2 {{.*#+}} zmm31 {%k1} = zmm7[0,1,4,5],zmm21[4,5,0,1]
+; AVX512F-NEXT:    vshufi64x2 {{.*#+}} zmm31 {%k1} = zmm7[4,5,4,5],zmm21[4,5,4,5]
 ; AVX512F-NEXT:    vmovdqa64 %zmm21, %zmm4
 ; AVX512F-NEXT:    vpermt2q %zmm7, %zmm2, %zmm4
 ; AVX512F-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
@@ -11383,7 +11383,7 @@ define void @load_i64_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm21 # 64-byte Reload
 ; AVX512F-NEXT:    vpermt2q %zmm21, %zmm9, %zmm17
 ; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-NEXT:    vshufi64x2 {{.*#+}} zmm17 {%k1} = zmm0[0,1,4,5],zmm23[4,5,0,1]
+; AVX512F-NEXT:    vshufi64x2 {{.*#+}} zmm17 {%k1} = zmm0[4,5,4,5],zmm23[4,5,4,5]
 ; AVX512F-NEXT:    vmovdqa64 %zmm23, %zmm4
 ; AVX512F-NEXT:    vpermt2q %zmm0, %zmm2, %zmm4
 ; AVX512F-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
@@ -11397,7 +11397,7 @@ define void @load_i64_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
 ; AVX512F-NEXT:    vpermi2q %zmm14, %zmm0, %zmm9
 ; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-NEXT:    vshufi64x2 {{.*#+}} zmm9 {%k1} = zmm0[0,1,4,5],zmm30[4,5,0,1]
+; AVX512F-NEXT:    vshufi64x2 {{.*#+}} zmm9 {%k1} = zmm0[4,5,4,5],zmm30[4,5,4,5]
 ; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
 ; AVX512F-NEXT:    vmovdqa64 %zmm5, %zmm23
 ; AVX512F-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
@@ -11405,7 +11405,7 @@ define void @load_i64_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512F-NEXT:    vpermi2q %zmm0, %zmm30, %zmm2
 ; AVX512F-NEXT:    vpermt2q %zmm30, %zmm3, %zmm0
 ; AVX512F-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-NEXT:    vshufi64x2 {{.*#+}} zmm13 {%k1} = zmm1[0,1,4,5],zmm5[4,5,0,1]
+; AVX512F-NEXT:    vshufi64x2 {{.*#+}} zmm13 {%k1} = zmm1[4,5,4,5],zmm5[4,5,4,5]
 ; AVX512F-NEXT:    vpermt2q %zmm5, %zmm3, %zmm1
 ; AVX512F-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512F-NEXT:    vmovdqa {{.*#+}} ymm0 = <9,0,7,u>
@@ -12313,7 +12313,7 @@ define void @load_i64_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
 ; AVX512BW-NEXT:    vpermt2q %zmm3, %zmm14, %zmm27
 ; AVX512BW-NEXT:    vmovdqa64 %zmm8, %zmm1
-; AVX512BW-NEXT:    vshufi64x2 {{.*#+}} zmm27 {%k1} = zmm8[0,1,4,5],zmm26[4,5,0,1]
+; AVX512BW-NEXT:    vshufi64x2 {{.*#+}} zmm27 {%k1} = zmm8[4,5,4,5],zmm26[4,5,4,5]
 ; AVX512BW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm6 = [7,0,9,0,7,0,9,0]
 ; AVX512BW-NEXT:    # zmm6 = mem[0,1,2,3,0,1,2,3]
 ; AVX512BW-NEXT:    vmovdqa64 %zmm26, %zmm0
@@ -12326,7 +12326,7 @@ define void @load_i64_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm29 # 64-byte Reload
 ; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm11 # 64-byte Reload
 ; AVX512BW-NEXT:    vpermt2q %zmm11, %zmm14, %zmm29
-; AVX512BW-NEXT:    vshufi64x2 {{.*#+}} zmm29 {%k1} = zmm10[0,1,4,5],zmm16[4,5,0,1]
+; AVX512BW-NEXT:    vshufi64x2 {{.*#+}} zmm29 {%k1} = zmm10[4,5,4,5],zmm16[4,5,4,5]
 ; AVX512BW-NEXT:    vmovdqa64 %zmm16, %zmm4
 ; AVX512BW-NEXT:    vpermt2q %zmm10, %zmm6, %zmm4
 ; AVX512BW-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
@@ -12336,7 +12336,7 @@ define void @load_i64_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm10 # 64-byte Reload
 ; AVX512BW-NEXT:    vpermt2q %zmm10, %zmm14, %zmm26
 ; AVX512BW-NEXT:    vmovdqa64 %zmm13, %zmm1
-; AVX512BW-NEXT:    vshufi64x2 {{.*#+}} zmm26 {%k1} = zmm13[0,1,4,5],zmm17[4,5,0,1]
+; AVX512BW-NEXT:    vshufi64x2 {{.*#+}} zmm26 {%k1} = zmm13[4,5,4,5],zmm17[4,5,4,5]
 ; AVX512BW-NEXT:    vmovdqa64 %zmm17, %zmm0
 ; AVX512BW-NEXT:    vpermt2q %zmm13, %zmm6, %zmm0
 ; AVX512BW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
@@ -12346,7 +12346,7 @@ define void @load_i64_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm30 # 64-byte Reload
 ; AVX512BW-NEXT:    vpermt2q %zmm30, %zmm14, %zmm17
 ; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512BW-NEXT:    vshufi64x2 {{.*#+}} zmm17 {%k1} = zmm0[0,1,4,5],zmm15[4,5,0,1]
+; AVX512BW-NEXT:    vshufi64x2 {{.*#+}} zmm17 {%k1} = zmm0[4,5,4,5],zmm15[4,5,4,5]
 ; AVX512BW-NEXT:    vmovdqa64 %zmm15, %zmm4
 ; AVX512BW-NEXT:    vpermt2q %zmm0, %zmm6, %zmm4
 ; AVX512BW-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
@@ -12356,7 +12356,7 @@ define void @load_i64_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm25 # 64-byte Reload
 ; AVX512BW-NEXT:    vpermt2q %zmm25, %zmm14, %zmm16
 ; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512BW-NEXT:    vshufi64x2 {{.*#+}} zmm16 {%k1} = zmm12[0,1,4,5],zmm0[4,5,0,1]
+; AVX512BW-NEXT:    vshufi64x2 {{.*#+}} zmm16 {%k1} = zmm12[4,5,4,5],zmm0[4,5,4,5]
 ; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm1
 ; AVX512BW-NEXT:    vpermt2q %zmm12, %zmm6, %zmm1
 ; AVX512BW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
@@ -12366,7 +12366,7 @@ define void @load_i64_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm22 # 64-byte Reload
 ; AVX512BW-NEXT:    vpermt2q %zmm22, %zmm14, %zmm15
 ; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512BW-NEXT:    vshufi64x2 {{.*#+}} zmm15 {%k1} = zmm0[0,1,4,5],zmm21[4,5,0,1]
+; AVX512BW-NEXT:    vshufi64x2 {{.*#+}} zmm15 {%k1} = zmm0[4,5,4,5],zmm21[4,5,4,5]
 ; AVX512BW-NEXT:    vmovdqa64 %zmm21, %zmm4
 ; AVX512BW-NEXT:    vpermt2q %zmm0, %zmm6, %zmm4
 ; AVX512BW-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
@@ -12380,7 +12380,7 @@ define void @load_i64_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512BW-NEXT:    vpermi2q %zmm21, %zmm0, %zmm14
 ; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
 ; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
-; AVX512BW-NEXT:    vshufi64x2 {{.*#+}} zmm14 {%k1} = zmm0[0,1,4,5],zmm4[4,5,0,1]
+; AVX512BW-NEXT:    vshufi64x2 {{.*#+}} zmm14 {%k1} = zmm0[4,5,4,5],zmm4[4,5,4,5]
 ; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
 ; AVX512BW-NEXT:    vmovdqa64 %zmm5, %zmm23
 ; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
@@ -12388,7 +12388,7 @@ define void @load_i64_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512BW-NEXT:    vpermi2q %zmm0, %zmm4, %zmm6
 ; AVX512BW-NEXT:    vpermt2q %zmm4, %zmm2, %zmm0
 ; AVX512BW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-NEXT:    vshufi64x2 {{.*#+}} zmm12 {%k1} = zmm1[0,1,4,5],zmm5[4,5,0,1]
+; AVX512BW-NEXT:    vshufi64x2 {{.*#+}} zmm12 {%k1} = zmm1[4,5,4,5],zmm5[4,5,4,5]
 ; AVX512BW-NEXT:    vpermt2q %zmm5, %zmm2, %zmm1
 ; AVX512BW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512BW-NEXT:    vmovdqa {{.*#+}} ymm2 = <9,0,7,u>

diff  --git a/llvm/test/CodeGen/X86/vector-interleaved-store-i32-stride-6.ll b/llvm/test/CodeGen/X86/vector-interleaved-store-i32-stride-6.ll
index 4c7b0bcdc11c12..4aee13aae36f93 100644
--- a/llvm/test/CodeGen/X86/vector-interleaved-store-i32-stride-6.ll
+++ b/llvm/test/CodeGen/X86/vector-interleaved-store-i32-stride-6.ll
@@ -1655,12 +1655,12 @@ define void @store_i32_stride6_vf16(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX512F-SLOW-NEXT:    vpermi2d (%rcx), %ymm7, %ymm8
 ; AVX512F-SLOW-NEXT:    movb $36, %cl
 ; AVX512F-SLOW-NEXT:    kmovw %ecx, %k1
-; AVX512F-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm6 {%k1} = zmm8[0,1,0,1,2,3,0,1]
+; AVX512F-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm6 {%k1} = zmm8[0,1,0,1,2,3,6,7]
 ; AVX512F-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm7 = <18,u,2,3,4,5,19,u,8,9,10,11,20,u,14,15>
 ; AVX512F-SLOW-NEXT:    vpermi2d %zmm1, %zmm6, %zmm7
 ; AVX512F-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm6 = [0,18,2,3,4,5,6,19,8,9,10,11,12,20,14,15]
 ; AVX512F-SLOW-NEXT:    vpermi2d %zmm0, %zmm7, %zmm6
-; AVX512F-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm7 = <u,u,0,16,11,27,u,u,15,31,12,28,u,u,0,16>
+; AVX512F-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm7 = <u,u,0,16,11,27,u,u,15,31,12,28,u,u,12,28>
 ; AVX512F-SLOW-NEXT:    vpermi2d %zmm5, %zmm4, %zmm7
 ; AVX512F-SLOW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm8 = [12,28,11,27,0,0,13,29,12,28,11,27,0,0,13,29]
 ; AVX512F-SLOW-NEXT:    # zmm8 = mem[0,1,2,3,0,1,2,3]
@@ -1699,7 +1699,7 @@ define void @store_i32_stride6_vf16(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX512F-SLOW-NEXT:    vpermi2d %zmm5, %zmm4, %zmm7
 ; AVX512F-SLOW-NEXT:    vmovdqa (%rdi), %ymm11
 ; AVX512F-SLOW-NEXT:    vpunpckhdq {{.*#+}} ymm11 = ymm11[2],mem[2],ymm11[3],mem[3],ymm11[6],mem[6],ymm11[7],mem[7]
-; AVX512F-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm7 {%k1} = zmm11[0,1,2,3,2,3,0,1]
+; AVX512F-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm7 {%k1} = zmm11[2,3,2,3,2,3,2,3]
 ; AVX512F-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm11 = <0,1,21,u,4,5,6,7,22,u,10,11,12,13,23,u>
 ; AVX512F-SLOW-NEXT:    vpermi2d %zmm1, %zmm7, %zmm11
 ; AVX512F-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm7 = [0,1,2,21,4,5,6,7,8,22,10,11,12,13,14,23]
@@ -1708,7 +1708,7 @@ define void @store_i32_stride6_vf16(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX512F-SLOW-NEXT:    # zmm11 = mem[0,1,2,3,0,1,2,3]
 ; AVX512F-SLOW-NEXT:    vpermi2d %zmm5, %zmm4, %zmm11
 ; AVX512F-SLOW-NEXT:    vpunpckhdq {{.*#+}} zmm2 = zmm2[2],zmm3[2],zmm2[3],zmm3[3],zmm2[6],zmm3[6],zmm2[7],zmm3[7],zmm2[10],zmm3[10],zmm2[11],zmm3[11],zmm2[14],zmm3[14],zmm2[15],zmm3[15]
-; AVX512F-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm11 {%k1} = zmm2[0,1,6,7,6,7,0,1]
+; AVX512F-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm11 {%k1} = zmm2[6,7,6,7,6,7,6,7]
 ; AVX512F-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm2 = <0,1,29,u,4,5,6,7,30,u,10,11,12,13,31,u>
 ; AVX512F-SLOW-NEXT:    vpermi2d %zmm1, %zmm11, %zmm2
 ; AVX512F-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm1 = [0,1,2,29,4,5,6,7,8,30,10,11,12,13,14,31]
@@ -1744,7 +1744,7 @@ define void @store_i32_stride6_vf16(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX512F-FAST-NEXT:    vpermi2d %zmm1, %zmm7, %zmm8
 ; AVX512F-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm6 = [0,1,2,3,4,16,6,7,8,9,10,17,12,13,14,15]
 ; AVX512F-FAST-NEXT:    vpermi2d %zmm0, %zmm8, %zmm6
-; AVX512F-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm7 = <u,u,0,16,3,19,u,u,7,23,4,20,u,u,0,16>
+; AVX512F-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm7 = <u,u,0,16,3,19,u,u,7,23,4,20,u,u,u,u>
 ; AVX512F-FAST-NEXT:    vpermi2d %zmm4, %zmm2, %zmm7
 ; AVX512F-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm8 = [4,20,3,19,0,0,5,21,4,20,3,19,0,0,5,21]
 ; AVX512F-FAST-NEXT:    # zmm8 = mem[0,1,2,3,0,1,2,3]
@@ -1756,7 +1756,8 @@ define void @store_i32_stride6_vf16(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX512F-FAST-NEXT:    vpermi2d %zmm1, %zmm8, %zmm7
 ; AVX512F-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm8 = [0,18,2,3,4,5,6,19,8,9,10,11,12,20,14,15]
 ; AVX512F-FAST-NEXT:    vpermi2d %zmm0, %zmm7, %zmm8
-; AVX512F-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm7 = <2,18,u,u,6,22,7,23,u,u,7,23,2,18,u,u>
+; AVX512F-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} zmm7 = [6,22,7,23,6,22,7,23,6,22,7,23,6,22,7,23]
+; AVX512F-FAST-NEXT:    # zmm7 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
 ; AVX512F-FAST-NEXT:    vpermi2d %zmm5, %zmm3, %zmm7
 ; AVX512F-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm9 = [5,21,0,0,7,23,6,22,5,21,0,0,7,23,6,22]
 ; AVX512F-FAST-NEXT:    # zmm9 = mem[0,1,2,3,0,1,2,3]
@@ -1777,7 +1778,7 @@ define void @store_i32_stride6_vf16(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX512F-FAST-NEXT:    vpermi2d %zmm1, %zmm10, %zmm7
 ; AVX512F-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm10 = [0,1,2,3,4,24,6,7,8,9,10,25,12,13,14,15]
 ; AVX512F-FAST-NEXT:    vpermi2d %zmm0, %zmm7, %zmm10
-; AVX512F-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm7 = <u,u,0,16,11,27,u,u,15,31,12,28,u,u,0,16>
+; AVX512F-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm7 = <u,u,0,16,11,27,u,u,15,31,12,28,u,u,12,28>
 ; AVX512F-FAST-NEXT:    vpermi2d %zmm4, %zmm2, %zmm7
 ; AVX512F-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm11 = [12,28,11,27,0,0,13,29,12,28,11,27,0,0,13,29]
 ; AVX512F-FAST-NEXT:    # zmm11 = mem[0,1,2,3,0,1,2,3]
@@ -1787,7 +1788,8 @@ define void @store_i32_stride6_vf16(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX512F-FAST-NEXT:    vpermi2d %zmm1, %zmm11, %zmm7
 ; AVX512F-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm11 = [0,26,2,3,4,5,6,27,8,9,10,11,12,28,14,15]
 ; AVX512F-FAST-NEXT:    vpermi2d %zmm0, %zmm7, %zmm11
-; AVX512F-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm7 = <2,18,u,u,14,30,15,31,u,u,15,31,2,18,u,u>
+; AVX512F-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} zmm7 = [14,30,15,31,14,30,15,31,14,30,15,31,14,30,15,31]
+; AVX512F-FAST-NEXT:    # zmm7 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
 ; AVX512F-FAST-NEXT:    vpermi2d %zmm5, %zmm3, %zmm7
 ; AVX512F-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm3 = [13,29,0,0,15,31,14,30,13,29,0,0,15,31,14,30]
 ; AVX512F-FAST-NEXT:    # zmm3 = mem[0,1,2,3,0,1,2,3]
@@ -1823,12 +1825,12 @@ define void @store_i32_stride6_vf16(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX512BW-SLOW-NEXT:    vpermi2d (%rcx), %ymm7, %ymm8
 ; AVX512BW-SLOW-NEXT:    movb $36, %cl
 ; AVX512BW-SLOW-NEXT:    kmovd %ecx, %k1
-; AVX512BW-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm6 {%k1} = zmm8[0,1,0,1,2,3,0,1]
+; AVX512BW-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm6 {%k1} = zmm8[0,1,0,1,2,3,6,7]
 ; AVX512BW-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm7 = <18,u,2,3,4,5,19,u,8,9,10,11,20,u,14,15>
 ; AVX512BW-SLOW-NEXT:    vpermi2d %zmm1, %zmm6, %zmm7
 ; AVX512BW-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm6 = [0,18,2,3,4,5,6,19,8,9,10,11,12,20,14,15]
 ; AVX512BW-SLOW-NEXT:    vpermi2d %zmm0, %zmm7, %zmm6
-; AVX512BW-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm7 = <u,u,0,16,11,27,u,u,15,31,12,28,u,u,0,16>
+; AVX512BW-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm7 = <u,u,0,16,11,27,u,u,15,31,12,28,u,u,12,28>
 ; AVX512BW-SLOW-NEXT:    vpermi2d %zmm5, %zmm4, %zmm7
 ; AVX512BW-SLOW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm8 = [12,28,11,27,0,0,13,29,12,28,11,27,0,0,13,29]
 ; AVX512BW-SLOW-NEXT:    # zmm8 = mem[0,1,2,3,0,1,2,3]
@@ -1867,7 +1869,7 @@ define void @store_i32_stride6_vf16(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX512BW-SLOW-NEXT:    vpermi2d %zmm5, %zmm4, %zmm7
 ; AVX512BW-SLOW-NEXT:    vmovdqa (%rdi), %ymm11
 ; AVX512BW-SLOW-NEXT:    vpunpckhdq {{.*#+}} ymm11 = ymm11[2],mem[2],ymm11[3],mem[3],ymm11[6],mem[6],ymm11[7],mem[7]
-; AVX512BW-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm7 {%k1} = zmm11[0,1,2,3,2,3,0,1]
+; AVX512BW-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm7 {%k1} = zmm11[2,3,2,3,2,3,2,3]
 ; AVX512BW-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm11 = <0,1,21,u,4,5,6,7,22,u,10,11,12,13,23,u>
 ; AVX512BW-SLOW-NEXT:    vpermi2d %zmm1, %zmm7, %zmm11
 ; AVX512BW-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm7 = [0,1,2,21,4,5,6,7,8,22,10,11,12,13,14,23]
@@ -1876,7 +1878,7 @@ define void @store_i32_stride6_vf16(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX512BW-SLOW-NEXT:    # zmm11 = mem[0,1,2,3,0,1,2,3]
 ; AVX512BW-SLOW-NEXT:    vpermi2d %zmm5, %zmm4, %zmm11
 ; AVX512BW-SLOW-NEXT:    vpunpckhdq {{.*#+}} zmm2 = zmm2[2],zmm3[2],zmm2[3],zmm3[3],zmm2[6],zmm3[6],zmm2[7],zmm3[7],zmm2[10],zmm3[10],zmm2[11],zmm3[11],zmm2[14],zmm3[14],zmm2[15],zmm3[15]
-; AVX512BW-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm11 {%k1} = zmm2[0,1,6,7,6,7,0,1]
+; AVX512BW-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm11 {%k1} = zmm2[6,7,6,7,6,7,6,7]
 ; AVX512BW-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm2 = <0,1,29,u,4,5,6,7,30,u,10,11,12,13,31,u>
 ; AVX512BW-SLOW-NEXT:    vpermi2d %zmm1, %zmm11, %zmm2
 ; AVX512BW-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm1 = [0,1,2,29,4,5,6,7,8,30,10,11,12,13,14,31]
@@ -1912,7 +1914,7 @@ define void @store_i32_stride6_vf16(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX512BW-FAST-NEXT:    vpermi2d %zmm1, %zmm7, %zmm8
 ; AVX512BW-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm6 = [0,1,2,3,4,16,6,7,8,9,10,17,12,13,14,15]
 ; AVX512BW-FAST-NEXT:    vpermi2d %zmm0, %zmm8, %zmm6
-; AVX512BW-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm7 = <u,u,0,16,3,19,u,u,7,23,4,20,u,u,0,16>
+; AVX512BW-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm7 = <u,u,0,16,3,19,u,u,7,23,4,20,u,u,u,u>
 ; AVX512BW-FAST-NEXT:    vpermi2d %zmm4, %zmm2, %zmm7
 ; AVX512BW-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm8 = [4,20,3,19,0,0,5,21,4,20,3,19,0,0,5,21]
 ; AVX512BW-FAST-NEXT:    # zmm8 = mem[0,1,2,3,0,1,2,3]
@@ -1924,7 +1926,8 @@ define void @store_i32_stride6_vf16(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX512BW-FAST-NEXT:    vpermi2d %zmm1, %zmm8, %zmm7
 ; AVX512BW-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm8 = [0,18,2,3,4,5,6,19,8,9,10,11,12,20,14,15]
 ; AVX512BW-FAST-NEXT:    vpermi2d %zmm0, %zmm7, %zmm8
-; AVX512BW-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm7 = <2,18,u,u,6,22,7,23,u,u,7,23,2,18,u,u>
+; AVX512BW-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} zmm7 = [6,22,7,23,6,22,7,23,6,22,7,23,6,22,7,23]
+; AVX512BW-FAST-NEXT:    # zmm7 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
 ; AVX512BW-FAST-NEXT:    vpermi2d %zmm5, %zmm3, %zmm7
 ; AVX512BW-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm9 = [5,21,0,0,7,23,6,22,5,21,0,0,7,23,6,22]
 ; AVX512BW-FAST-NEXT:    # zmm9 = mem[0,1,2,3,0,1,2,3]
@@ -1945,7 +1948,7 @@ define void @store_i32_stride6_vf16(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX512BW-FAST-NEXT:    vpermi2d %zmm1, %zmm10, %zmm7
 ; AVX512BW-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm10 = [0,1,2,3,4,24,6,7,8,9,10,25,12,13,14,15]
 ; AVX512BW-FAST-NEXT:    vpermi2d %zmm0, %zmm7, %zmm10
-; AVX512BW-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm7 = <u,u,0,16,11,27,u,u,15,31,12,28,u,u,0,16>
+; AVX512BW-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm7 = <u,u,0,16,11,27,u,u,15,31,12,28,u,u,12,28>
 ; AVX512BW-FAST-NEXT:    vpermi2d %zmm4, %zmm2, %zmm7
 ; AVX512BW-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm11 = [12,28,11,27,0,0,13,29,12,28,11,27,0,0,13,29]
 ; AVX512BW-FAST-NEXT:    # zmm11 = mem[0,1,2,3,0,1,2,3]
@@ -1955,7 +1958,8 @@ define void @store_i32_stride6_vf16(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX512BW-FAST-NEXT:    vpermi2d %zmm1, %zmm11, %zmm7
 ; AVX512BW-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm11 = [0,26,2,3,4,5,6,27,8,9,10,11,12,28,14,15]
 ; AVX512BW-FAST-NEXT:    vpermi2d %zmm0, %zmm7, %zmm11
-; AVX512BW-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm7 = <2,18,u,u,14,30,15,31,u,u,15,31,2,18,u,u>
+; AVX512BW-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} zmm7 = [14,30,15,31,14,30,15,31,14,30,15,31,14,30,15,31]
+; AVX512BW-FAST-NEXT:    # zmm7 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
 ; AVX512BW-FAST-NEXT:    vpermi2d %zmm5, %zmm3, %zmm7
 ; AVX512BW-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm3 = [13,29,0,0,15,31,14,30,13,29,0,0,15,31,14,30]
 ; AVX512BW-FAST-NEXT:    # zmm3 = mem[0,1,2,3,0,1,2,3]
@@ -3902,17 +3906,17 @@ define void @store_i32_stride6_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX512F-SLOW-NEXT:    vpermt2d (%rcx), %ymm21, %ymm14
 ; AVX512F-SLOW-NEXT:    movb $36, %dl
 ; AVX512F-SLOW-NEXT:    kmovw %edx, %k1
-; AVX512F-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm12 {%k1} = zmm14[0,1,0,1,2,3,0,1]
+; AVX512F-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm12 {%k1} = zmm14[0,1,0,1,2,3,6,7]
 ; AVX512F-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm14 = <18,u,2,3,4,5,19,u,8,9,10,11,20,u,14,15>
 ; AVX512F-SLOW-NEXT:    vpermt2d %zmm6, %zmm14, %zmm12
 ; AVX512F-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm22 = [0,18,2,3,4,5,6,19,8,9,10,11,12,20,14,15]
 ; AVX512F-SLOW-NEXT:    vpermt2d %zmm7, %zmm22, %zmm12
 ; AVX512F-SLOW-NEXT:    vpermi2d %zmm4, %zmm2, %zmm3
 ; AVX512F-SLOW-NEXT:    vpermt2d 64(%rcx), %ymm21, %ymm20
-; AVX512F-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm3 {%k1} = zmm20[0,1,0,1,2,3,0,1]
+; AVX512F-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm3 {%k1} = zmm20[0,1,0,1,2,3,6,7]
 ; AVX512F-SLOW-NEXT:    vpermt2d %zmm9, %zmm14, %zmm3
 ; AVX512F-SLOW-NEXT:    vpermt2d %zmm10, %zmm22, %zmm3
-; AVX512F-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm14 = [3,19,0,16,11,27,8,24,15,31,12,28,3,19,0,16]
+; AVX512F-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm14 = [3,19,0,16,11,27,8,24,15,31,12,28,15,31,12,28]
 ; AVX512F-SLOW-NEXT:    vmovdqa64 %zmm0, %zmm20
 ; AVX512F-SLOW-NEXT:    vpermt2d %zmm8, %zmm14, %zmm20
 ; AVX512F-SLOW-NEXT:    vmovdqa64 %zmm20, %zmm11 {%k1}
@@ -3959,27 +3963,27 @@ define void @store_i32_stride6_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX512F-SLOW-NEXT:    vmovdqa64 (%rdi), %ymm21
 ; AVX512F-SLOW-NEXT:    vmovdqa64 64(%rdi), %ymm22
 ; AVX512F-SLOW-NEXT:    vpunpckhdq {{.*#+}} ymm22 = ymm22[2],mem[2],ymm22[3],mem[3],ymm22[6],mem[6],ymm22[7],mem[7]
-; AVX512F-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm20 {%k1} = zmm22[0,1,2,3,2,3,0,1]
+; AVX512F-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm20 {%k1} = zmm22[2,3,2,3,2,3,2,3]
 ; AVX512F-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm22 = <0,1,21,u,4,5,6,7,22,u,10,11,12,13,23,u>
 ; AVX512F-SLOW-NEXT:    vpermt2d %zmm9, %zmm22, %zmm20
 ; AVX512F-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm23 = [0,1,2,21,4,5,6,7,8,22,10,11,12,13,14,23]
 ; AVX512F-SLOW-NEXT:    vpermt2d %zmm10, %zmm23, %zmm20
 ; AVX512F-SLOW-NEXT:    vpermi2d %zmm5, %zmm1, %zmm14
 ; AVX512F-SLOW-NEXT:    vpunpckhdq {{.*#+}} ymm21 = ymm21[2],mem[2],ymm21[3],mem[3],ymm21[6],mem[6],ymm21[7],mem[7]
-; AVX512F-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm14 {%k1} = zmm21[0,1,2,3,2,3,0,1]
+; AVX512F-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm14 {%k1} = zmm21[2,3,2,3,2,3,2,3]
 ; AVX512F-SLOW-NEXT:    vpermt2d %zmm6, %zmm22, %zmm14
 ; AVX512F-SLOW-NEXT:    vpermt2d %zmm7, %zmm23, %zmm14
 ; AVX512F-SLOW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm21 = [13,29,0,0,15,31,14,30,13,29,0,0,15,31,14,30]
 ; AVX512F-SLOW-NEXT:    # zmm21 = mem[0,1,2,3,0,1,2,3]
 ; AVX512F-SLOW-NEXT:    vpermt2d %zmm8, %zmm21, %zmm0
 ; AVX512F-SLOW-NEXT:    vpunpckhdq {{.*#+}} zmm2 = zmm2[2],zmm4[2],zmm2[3],zmm4[3],zmm2[6],zmm4[6],zmm2[7],zmm4[7],zmm2[10],zmm4[10],zmm2[11],zmm4[11],zmm2[14],zmm4[14],zmm2[15],zmm4[15]
-; AVX512F-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm0 {%k1} = zmm2[0,1,6,7,6,7,0,1]
+; AVX512F-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm0 {%k1} = zmm2[6,7,6,7,6,7,6,7]
 ; AVX512F-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm2 = <0,1,29,u,4,5,6,7,30,u,10,11,12,13,31,u>
 ; AVX512F-SLOW-NEXT:    vpermt2d %zmm9, %zmm2, %zmm0
 ; AVX512F-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm4 = [0,1,2,29,4,5,6,7,8,30,10,11,12,13,14,31]
 ; AVX512F-SLOW-NEXT:    vpermt2d %zmm10, %zmm4, %zmm0
 ; AVX512F-SLOW-NEXT:    vpermt2d %zmm5, %zmm21, %zmm1
-; AVX512F-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm1 {%k1} = zmm19[0,1,6,7,6,7,0,1]
+; AVX512F-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm1 {%k1} = zmm19[6,7,6,7,6,7,6,7]
 ; AVX512F-SLOW-NEXT:    vpermt2d %zmm6, %zmm2, %zmm1
 ; AVX512F-SLOW-NEXT:    vpermt2d %zmm7, %zmm4, %zmm1
 ; AVX512F-SLOW-NEXT:    vmovdqa64 %zmm18, (%rax)
@@ -4015,14 +4019,15 @@ define void @store_i32_stride6_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX512F-FAST-NEXT:    # zmm14 = mem[0,1,2,3,0,1,2,3]
 ; AVX512F-FAST-NEXT:    vmovdqa64 %zmm1, %zmm2
 ; AVX512F-FAST-NEXT:    vpermt2d %zmm7, %zmm14, %zmm2
-; AVX512F-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm12 = [2,18,3,19,14,30,15,31,14,30,15,31,2,18,3,19]
+; AVX512F-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} zmm12 = [14,30,15,31,14,30,15,31,14,30,15,31,14,30,15,31]
+; AVX512F-FAST-NEXT:    # zmm12 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
 ; AVX512F-FAST-NEXT:    vmovdqa64 %zmm0, %zmm15
 ; AVX512F-FAST-NEXT:    vpermt2d %zmm8, %zmm12, %zmm15
 ; AVX512F-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm6 = [13,29,0,0,15,31,14,30,13,29,0,0,15,31,14,30]
 ; AVX512F-FAST-NEXT:    # zmm6 = mem[0,1,2,3,0,1,2,3]
 ; AVX512F-FAST-NEXT:    vmovdqa64 %zmm4, %zmm5
 ; AVX512F-FAST-NEXT:    vpermt2d %zmm24, %zmm6, %zmm5
-; AVX512F-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm17 = [3,19,0,16,11,27,8,24,15,31,12,28,3,19,0,16]
+; AVX512F-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm17 = [3,19,0,16,11,27,8,24,15,31,12,28,15,31,12,28]
 ; AVX512F-FAST-NEXT:    vmovdqa64 %zmm4, %zmm19
 ; AVX512F-FAST-NEXT:    vpermt2d %zmm24, %zmm17, %zmm19
 ; AVX512F-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm13 = [12,28,11,27,0,0,13,29,12,28,11,27,0,0,13,29]
@@ -4043,9 +4048,10 @@ define void @store_i32_stride6_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX512F-FAST-NEXT:    vpermt2d %zmm24, %zmm22, %zmm20
 ; AVX512F-FAST-NEXT:    vmovdqa64 %zmm4, %zmm26
 ; AVX512F-FAST-NEXT:    vpermt2d %zmm24, %zmm25, %zmm4
-; AVX512F-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm25 = [3,19,0,16,3,19,0,16,7,23,4,20,3,19,0,16]
+; AVX512F-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm25 = <3,19,0,16,3,19,0,16,7,23,4,20,u,u,u,u>
 ; AVX512F-FAST-NEXT:    vpermt2d %zmm24, %zmm25, %zmm26
-; AVX512F-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm24 = [2,18,3,19,6,22,7,23,6,22,7,23,2,18,3,19]
+; AVX512F-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} zmm24 = [6,22,7,23,6,22,7,23,6,22,7,23,6,22,7,23]
+; AVX512F-FAST-NEXT:    # zmm24 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
 ; AVX512F-FAST-NEXT:    vpermi2d %zmm9, %zmm3, %zmm6
 ; AVX512F-FAST-NEXT:    vpermi2d %zmm9, %zmm3, %zmm17
 ; AVX512F-FAST-NEXT:    vpermi2d %zmm9, %zmm3, %zmm21
@@ -4175,17 +4181,17 @@ define void @store_i32_stride6_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX512BW-SLOW-NEXT:    vpermt2d (%rcx), %ymm21, %ymm14
 ; AVX512BW-SLOW-NEXT:    movb $36, %dl
 ; AVX512BW-SLOW-NEXT:    kmovd %edx, %k1
-; AVX512BW-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm12 {%k1} = zmm14[0,1,0,1,2,3,0,1]
+; AVX512BW-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm12 {%k1} = zmm14[0,1,0,1,2,3,6,7]
 ; AVX512BW-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm14 = <18,u,2,3,4,5,19,u,8,9,10,11,20,u,14,15>
 ; AVX512BW-SLOW-NEXT:    vpermt2d %zmm6, %zmm14, %zmm12
 ; AVX512BW-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm22 = [0,18,2,3,4,5,6,19,8,9,10,11,12,20,14,15]
 ; AVX512BW-SLOW-NEXT:    vpermt2d %zmm7, %zmm22, %zmm12
 ; AVX512BW-SLOW-NEXT:    vpermi2d %zmm4, %zmm2, %zmm3
 ; AVX512BW-SLOW-NEXT:    vpermt2d 64(%rcx), %ymm21, %ymm20
-; AVX512BW-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm3 {%k1} = zmm20[0,1,0,1,2,3,0,1]
+; AVX512BW-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm3 {%k1} = zmm20[0,1,0,1,2,3,6,7]
 ; AVX512BW-SLOW-NEXT:    vpermt2d %zmm9, %zmm14, %zmm3
 ; AVX512BW-SLOW-NEXT:    vpermt2d %zmm10, %zmm22, %zmm3
-; AVX512BW-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm14 = [3,19,0,16,11,27,8,24,15,31,12,28,3,19,0,16]
+; AVX512BW-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm14 = [3,19,0,16,11,27,8,24,15,31,12,28,15,31,12,28]
 ; AVX512BW-SLOW-NEXT:    vmovdqa64 %zmm0, %zmm20
 ; AVX512BW-SLOW-NEXT:    vpermt2d %zmm8, %zmm14, %zmm20
 ; AVX512BW-SLOW-NEXT:    vmovdqa64 %zmm20, %zmm11 {%k1}
@@ -4232,27 +4238,27 @@ define void @store_i32_stride6_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX512BW-SLOW-NEXT:    vmovdqa64 (%rdi), %ymm21
 ; AVX512BW-SLOW-NEXT:    vmovdqa64 64(%rdi), %ymm22
 ; AVX512BW-SLOW-NEXT:    vpunpckhdq {{.*#+}} ymm22 = ymm22[2],mem[2],ymm22[3],mem[3],ymm22[6],mem[6],ymm22[7],mem[7]
-; AVX512BW-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm20 {%k1} = zmm22[0,1,2,3,2,3,0,1]
+; AVX512BW-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm20 {%k1} = zmm22[2,3,2,3,2,3,2,3]
 ; AVX512BW-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm22 = <0,1,21,u,4,5,6,7,22,u,10,11,12,13,23,u>
 ; AVX512BW-SLOW-NEXT:    vpermt2d %zmm9, %zmm22, %zmm20
 ; AVX512BW-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm23 = [0,1,2,21,4,5,6,7,8,22,10,11,12,13,14,23]
 ; AVX512BW-SLOW-NEXT:    vpermt2d %zmm10, %zmm23, %zmm20
 ; AVX512BW-SLOW-NEXT:    vpermi2d %zmm5, %zmm1, %zmm14
 ; AVX512BW-SLOW-NEXT:    vpunpckhdq {{.*#+}} ymm21 = ymm21[2],mem[2],ymm21[3],mem[3],ymm21[6],mem[6],ymm21[7],mem[7]
-; AVX512BW-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm14 {%k1} = zmm21[0,1,2,3,2,3,0,1]
+; AVX512BW-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm14 {%k1} = zmm21[2,3,2,3,2,3,2,3]
 ; AVX512BW-SLOW-NEXT:    vpermt2d %zmm6, %zmm22, %zmm14
 ; AVX512BW-SLOW-NEXT:    vpermt2d %zmm7, %zmm23, %zmm14
 ; AVX512BW-SLOW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm21 = [13,29,0,0,15,31,14,30,13,29,0,0,15,31,14,30]
 ; AVX512BW-SLOW-NEXT:    # zmm21 = mem[0,1,2,3,0,1,2,3]
 ; AVX512BW-SLOW-NEXT:    vpermt2d %zmm8, %zmm21, %zmm0
 ; AVX512BW-SLOW-NEXT:    vpunpckhdq {{.*#+}} zmm2 = zmm2[2],zmm4[2],zmm2[3],zmm4[3],zmm2[6],zmm4[6],zmm2[7],zmm4[7],zmm2[10],zmm4[10],zmm2[11],zmm4[11],zmm2[14],zmm4[14],zmm2[15],zmm4[15]
-; AVX512BW-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm0 {%k1} = zmm2[0,1,6,7,6,7,0,1]
+; AVX512BW-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm0 {%k1} = zmm2[6,7,6,7,6,7,6,7]
 ; AVX512BW-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm2 = <0,1,29,u,4,5,6,7,30,u,10,11,12,13,31,u>
 ; AVX512BW-SLOW-NEXT:    vpermt2d %zmm9, %zmm2, %zmm0
 ; AVX512BW-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm4 = [0,1,2,29,4,5,6,7,8,30,10,11,12,13,14,31]
 ; AVX512BW-SLOW-NEXT:    vpermt2d %zmm10, %zmm4, %zmm0
 ; AVX512BW-SLOW-NEXT:    vpermt2d %zmm5, %zmm21, %zmm1
-; AVX512BW-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm1 {%k1} = zmm19[0,1,6,7,6,7,0,1]
+; AVX512BW-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm1 {%k1} = zmm19[6,7,6,7,6,7,6,7]
 ; AVX512BW-SLOW-NEXT:    vpermt2d %zmm6, %zmm2, %zmm1
 ; AVX512BW-SLOW-NEXT:    vpermt2d %zmm7, %zmm4, %zmm1
 ; AVX512BW-SLOW-NEXT:    vmovdqa64 %zmm18, (%rax)
@@ -4288,14 +4294,15 @@ define void @store_i32_stride6_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX512BW-FAST-NEXT:    # zmm14 = mem[0,1,2,3,0,1,2,3]
 ; AVX512BW-FAST-NEXT:    vmovdqa64 %zmm1, %zmm2
 ; AVX512BW-FAST-NEXT:    vpermt2d %zmm7, %zmm14, %zmm2
-; AVX512BW-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm12 = [2,18,3,19,14,30,15,31,14,30,15,31,2,18,3,19]
+; AVX512BW-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} zmm12 = [14,30,15,31,14,30,15,31,14,30,15,31,14,30,15,31]
+; AVX512BW-FAST-NEXT:    # zmm12 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
 ; AVX512BW-FAST-NEXT:    vmovdqa64 %zmm0, %zmm15
 ; AVX512BW-FAST-NEXT:    vpermt2d %zmm8, %zmm12, %zmm15
 ; AVX512BW-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm6 = [13,29,0,0,15,31,14,30,13,29,0,0,15,31,14,30]
 ; AVX512BW-FAST-NEXT:    # zmm6 = mem[0,1,2,3,0,1,2,3]
 ; AVX512BW-FAST-NEXT:    vmovdqa64 %zmm4, %zmm5
 ; AVX512BW-FAST-NEXT:    vpermt2d %zmm24, %zmm6, %zmm5
-; AVX512BW-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm17 = [3,19,0,16,11,27,8,24,15,31,12,28,3,19,0,16]
+; AVX512BW-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm17 = [3,19,0,16,11,27,8,24,15,31,12,28,15,31,12,28]
 ; AVX512BW-FAST-NEXT:    vmovdqa64 %zmm4, %zmm19
 ; AVX512BW-FAST-NEXT:    vpermt2d %zmm24, %zmm17, %zmm19
 ; AVX512BW-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm13 = [12,28,11,27,0,0,13,29,12,28,11,27,0,0,13,29]
@@ -4316,9 +4323,10 @@ define void @store_i32_stride6_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX512BW-FAST-NEXT:    vpermt2d %zmm24, %zmm22, %zmm20
 ; AVX512BW-FAST-NEXT:    vmovdqa64 %zmm4, %zmm26
 ; AVX512BW-FAST-NEXT:    vpermt2d %zmm24, %zmm25, %zmm4
-; AVX512BW-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm25 = [3,19,0,16,3,19,0,16,7,23,4,20,3,19,0,16]
+; AVX512BW-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm25 = <3,19,0,16,3,19,0,16,7,23,4,20,u,u,u,u>
 ; AVX512BW-FAST-NEXT:    vpermt2d %zmm24, %zmm25, %zmm26
-; AVX512BW-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm24 = [2,18,3,19,6,22,7,23,6,22,7,23,2,18,3,19]
+; AVX512BW-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} zmm24 = [6,22,7,23,6,22,7,23,6,22,7,23,6,22,7,23]
+; AVX512BW-FAST-NEXT:    # zmm24 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
 ; AVX512BW-FAST-NEXT:    vpermi2d %zmm9, %zmm3, %zmm6
 ; AVX512BW-FAST-NEXT:    vpermi2d %zmm9, %zmm3, %zmm17
 ; AVX512BW-FAST-NEXT:    vpermi2d %zmm9, %zmm3, %zmm21
@@ -8390,7 +8398,7 @@ define void @store_i32_stride6_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX512F-SLOW-NEXT:    vmovdqa64 %zmm5, %zmm6
 ; AVX512F-SLOW-NEXT:    vpermt2d %zmm16, %zmm9, %zmm6
 ; AVX512F-SLOW-NEXT:    vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm10 = [3,19,0,16,11,27,8,24,15,31,12,28,3,19,0,16]
+; AVX512F-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm10 = [3,19,0,16,11,27,8,24,15,31,12,28,15,31,12,28]
 ; AVX512F-SLOW-NEXT:    vmovdqa64 %zmm5, %zmm6
 ; AVX512F-SLOW-NEXT:    vpermt2d %zmm16, %zmm10, %zmm6
 ; AVX512F-SLOW-NEXT:    vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
@@ -8440,22 +8448,22 @@ define void @store_i32_stride6_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX512F-SLOW-NEXT:    vpermt2d (%rcx), %ymm0, %ymm14
 ; AVX512F-SLOW-NEXT:    movb $36, %al
 ; AVX512F-SLOW-NEXT:    kmovw %eax, %k1
-; AVX512F-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm25 {%k1} = zmm14[0,1,0,1,2,3,0,1]
+; AVX512F-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm25 {%k1} = zmm14[0,1,0,1,2,3,6,7]
 ; AVX512F-SLOW-NEXT:    vmovdqa64 %zmm29, %zmm19
 ; AVX512F-SLOW-NEXT:    vmovdqa64 %zmm20, %zmm4
 ; AVX512F-SLOW-NEXT:    vpermt2d %zmm20, %zmm21, %zmm19
 ; AVX512F-SLOW-NEXT:    vpermt2d 64(%rcx), %ymm0, %ymm13
-; AVX512F-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm19 {%k1} = zmm13[0,1,0,1,2,3,0,1]
+; AVX512F-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm19 {%k1} = zmm13[0,1,0,1,2,3,6,7]
 ; AVX512F-SLOW-NEXT:    vmovdqa64 %zmm24, %zmm16
 ; AVX512F-SLOW-NEXT:    vpermt2d %zmm17, %zmm21, %zmm16
 ; AVX512F-SLOW-NEXT:    vpermt2d 128(%rcx), %ymm0, %ymm31
-; AVX512F-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm16 {%k1} = zmm31[0,1,0,1,2,3,0,1]
+; AVX512F-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm16 {%k1} = zmm31[0,1,0,1,2,3,6,7]
 ; AVX512F-SLOW-NEXT:    vmovdqa 192(%rdx), %ymm14
 ; AVX512F-SLOW-NEXT:    vpermt2d 192(%rcx), %ymm0, %ymm14
 ; AVX512F-SLOW-NEXT:    vmovdqa64 192(%rdi), %zmm20
 ; AVX512F-SLOW-NEXT:    vmovdqa64 192(%rsi), %zmm31
 ; AVX512F-SLOW-NEXT:    vpermi2d %zmm31, %zmm20, %zmm21
-; AVX512F-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm21 {%k1} = zmm14[0,1,0,1,2,3,0,1]
+; AVX512F-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm21 {%k1} = zmm14[0,1,0,1,2,3,6,7]
 ; AVX512F-SLOW-NEXT:    vmovdqa64 (%r8), %zmm12
 ; AVX512F-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm14 = <18,u,2,3,4,5,19,u,8,9,10,11,20,u,14,15>
 ; AVX512F-SLOW-NEXT:    vpermt2d %zmm12, %zmm14, %zmm25
@@ -8536,16 +8544,16 @@ define void @store_i32_stride6_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX512F-SLOW-NEXT:    vpermt2d %zmm13, %zmm17, %zmm1
 ; AVX512F-SLOW-NEXT:    vmovdqa64 (%rdi), %ymm17
 ; AVX512F-SLOW-NEXT:    vpunpckhdq {{.*#+}} ymm17 = ymm17[2],mem[2],ymm17[3],mem[3],ymm17[6],mem[6],ymm17[7],mem[7]
-; AVX512F-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm27 {%k1} = zmm17[0,1,2,3,2,3,0,1]
+; AVX512F-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm27 {%k1} = zmm17[2,3,2,3,2,3,2,3]
 ; AVX512F-SLOW-NEXT:    vmovdqa64 64(%rdi), %ymm17
 ; AVX512F-SLOW-NEXT:    vpunpckhdq {{.*#+}} ymm17 = ymm17[2],mem[2],ymm17[3],mem[3],ymm17[6],mem[6],ymm17[7],mem[7]
-; AVX512F-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm23 {%k1} = zmm17[0,1,2,3,2,3,0,1]
+; AVX512F-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm23 {%k1} = zmm17[2,3,2,3,2,3,2,3]
 ; AVX512F-SLOW-NEXT:    vmovdqa64 128(%rdi), %ymm17
 ; AVX512F-SLOW-NEXT:    vpunpckhdq {{.*#+}} ymm17 = ymm17[2],mem[2],ymm17[3],mem[3],ymm17[6],mem[6],ymm17[7],mem[7]
-; AVX512F-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm26 {%k1} = zmm17[0,1,2,3,2,3,0,1]
+; AVX512F-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm26 {%k1} = zmm17[2,3,2,3,2,3,2,3]
 ; AVX512F-SLOW-NEXT:    vmovdqa64 192(%rdi), %ymm17
 ; AVX512F-SLOW-NEXT:    vpunpckhdq {{.*#+}} ymm17 = ymm17[2],mem[2],ymm17[3],mem[3],ymm17[6],mem[6],ymm17[7],mem[7]
-; AVX512F-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm28 {%k1} = zmm17[0,1,2,3,2,3,0,1]
+; AVX512F-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm28 {%k1} = zmm17[2,3,2,3,2,3,2,3]
 ; AVX512F-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm17 = <0,1,21,u,4,5,6,7,22,u,10,11,12,13,23,u>
 ; AVX512F-SLOW-NEXT:    vpermt2d %zmm12, %zmm17, %zmm27
 ; AVX512F-SLOW-NEXT:    vpermt2d %zmm10, %zmm17, %zmm23
@@ -8553,19 +8561,19 @@ define void @store_i32_stride6_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX512F-SLOW-NEXT:    vpermt2d %zmm13, %zmm17, %zmm28
 ; AVX512F-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
 ; AVX512F-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm17 # 64-byte Reload
-; AVX512F-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm0 {%k1} = zmm17[0,1,6,7,6,7,0,1]
+; AVX512F-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm0 {%k1} = zmm17[6,7,6,7,6,7,6,7]
 ; AVX512F-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm17 = <0,1,29,u,4,5,6,7,30,u,10,11,12,13,31,u>
 ; AVX512F-SLOW-NEXT:    vpermt2d %zmm12, %zmm17, %zmm0
 ; AVX512F-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm12 # 64-byte Reload
-; AVX512F-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm12 {%k1} = zmm29[0,1,6,7,6,7,0,1]
+; AVX512F-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm12 {%k1} = zmm29[6,7,6,7,6,7,6,7]
 ; AVX512F-SLOW-NEXT:    vpermt2d %zmm10, %zmm17, %zmm12
 ; AVX512F-SLOW-NEXT:    vmovdqa64 %zmm12, %zmm29
-; AVX512F-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm30 {%k1} = zmm24[0,1,6,7,6,7,0,1]
+; AVX512F-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm30 {%k1} = zmm24[6,7,6,7,6,7,6,7]
 ; AVX512F-SLOW-NEXT:    vpermt2d %zmm9, %zmm17, %zmm30
 ; AVX512F-SLOW-NEXT:    vmovdqa64 (%r9), %zmm9
 ; AVX512F-SLOW-NEXT:    vpunpckhdq {{.*#+}} zmm10 = zmm20[2],zmm31[2],zmm20[3],zmm31[3],zmm20[6],zmm31[6],zmm20[7],zmm31[7],zmm20[10],zmm31[10],zmm20[11],zmm31[11],zmm20[14],zmm31[14],zmm20[15],zmm31[15]
 ; AVX512F-SLOW-NEXT:    vmovdqa64 64(%r9), %zmm12
-; AVX512F-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm18 {%k1} = zmm10[0,1,6,7,6,7,0,1]
+; AVX512F-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm18 {%k1} = zmm10[6,7,6,7,6,7,6,7]
 ; AVX512F-SLOW-NEXT:    vmovdqa64 128(%r9), %zmm10
 ; AVX512F-SLOW-NEXT:    vpermt2d %zmm13, %zmm17, %zmm18
 ; AVX512F-SLOW-NEXT:    vmovdqa64 192(%r9), %zmm13
@@ -8649,13 +8657,14 @@ define void @store_i32_stride6_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX512F-FAST-NEXT:    # zmm5 = mem[0,1,2,3,0,1,2,3]
 ; AVX512F-FAST-NEXT:    vmovdqa64 %zmm24, %zmm21
 ; AVX512F-FAST-NEXT:    vpermt2d %zmm20, %zmm5, %zmm21
-; AVX512F-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm10 = [3,19,0,16,3,19,0,16,7,23,4,20,3,19,0,16]
+; AVX512F-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm10 = <3,19,0,16,3,19,0,16,7,23,4,20,u,u,u,u>
 ; AVX512F-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm6 = [4,20,3,19,0,0,5,21,4,20,3,19,0,0,5,21]
 ; AVX512F-FAST-NEXT:    # zmm6 = mem[0,1,2,3,0,1,2,3]
 ; AVX512F-FAST-NEXT:    vmovdqa64 %zmm24, %zmm28
 ; AVX512F-FAST-NEXT:    vpermt2d %zmm20, %zmm6, %zmm28
 ; AVX512F-FAST-NEXT:    vmovdqa64 %zmm6, %zmm9
-; AVX512F-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm6 = [2,18,3,19,6,22,7,23,6,22,7,23,2,18,3,19]
+; AVX512F-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} zmm6 = [6,22,7,23,6,22,7,23,6,22,7,23,6,22,7,23]
+; AVX512F-FAST-NEXT:    # zmm6 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
 ; AVX512F-FAST-NEXT:    vmovdqa64 %zmm24, %zmm11
 ; AVX512F-FAST-NEXT:    vpermt2d %zmm20, %zmm6, %zmm11
 ; AVX512F-FAST-NEXT:    vmovdqu64 %zmm11, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
@@ -8670,7 +8679,8 @@ define void @store_i32_stride6_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX512F-FAST-NEXT:    vmovdqa64 %zmm24, %zmm6
 ; AVX512F-FAST-NEXT:    vpermt2d %zmm20, %zmm13, %zmm6
 ; AVX512F-FAST-NEXT:    vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm27 = [2,18,3,19,14,30,15,31,14,30,15,31,2,18,3,19]
+; AVX512F-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} zmm27 = [14,30,15,31,14,30,15,31,14,30,15,31,14,30,15,31]
+; AVX512F-FAST-NEXT:    # zmm27 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
 ; AVX512F-FAST-NEXT:    vpermt2d %zmm20, %zmm27, %zmm24
 ; AVX512F-FAST-NEXT:    vmovdqa64 %zmm4, %zmm6
 ; AVX512F-FAST-NEXT:    vpermt2d %zmm25, %zmm5, %zmm6
@@ -8727,7 +8737,7 @@ define void @store_i32_stride6_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX512F-FAST-NEXT:    # zmm6 = mem[0,1,2,3,0,1,2,3]
 ; AVX512F-FAST-NEXT:    vmovdqa64 %zmm7, %zmm12
 ; AVX512F-FAST-NEXT:    vpermt2d %zmm15, %zmm6, %zmm12
-; AVX512F-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm7 = [3,19,0,16,11,27,8,24,15,31,12,28,3,19,0,16]
+; AVX512F-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm7 = [3,19,0,16,11,27,8,24,15,31,12,28,15,31,12,28]
 ; AVX512F-FAST-NEXT:    vmovdqa64 %zmm9, %zmm22
 ; AVX512F-FAST-NEXT:    vpermt2d %zmm15, %zmm7, %zmm22
 ; AVX512F-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm2 = [13,29,0,0,15,31,14,30,13,29,0,0,15,31,14,30]
@@ -8961,7 +8971,7 @@ define void @store_i32_stride6_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX512BW-SLOW-NEXT:    vmovdqa64 %zmm5, %zmm6
 ; AVX512BW-SLOW-NEXT:    vpermt2d %zmm16, %zmm9, %zmm6
 ; AVX512BW-SLOW-NEXT:    vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm10 = [3,19,0,16,11,27,8,24,15,31,12,28,3,19,0,16]
+; AVX512BW-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm10 = [3,19,0,16,11,27,8,24,15,31,12,28,15,31,12,28]
 ; AVX512BW-SLOW-NEXT:    vmovdqa64 %zmm5, %zmm6
 ; AVX512BW-SLOW-NEXT:    vpermt2d %zmm16, %zmm10, %zmm6
 ; AVX512BW-SLOW-NEXT:    vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
@@ -9011,22 +9021,22 @@ define void @store_i32_stride6_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX512BW-SLOW-NEXT:    vpermt2d (%rcx), %ymm0, %ymm14
 ; AVX512BW-SLOW-NEXT:    movb $36, %al
 ; AVX512BW-SLOW-NEXT:    kmovd %eax, %k1
-; AVX512BW-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm25 {%k1} = zmm14[0,1,0,1,2,3,0,1]
+; AVX512BW-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm25 {%k1} = zmm14[0,1,0,1,2,3,6,7]
 ; AVX512BW-SLOW-NEXT:    vmovdqa64 %zmm29, %zmm19
 ; AVX512BW-SLOW-NEXT:    vmovdqa64 %zmm20, %zmm4
 ; AVX512BW-SLOW-NEXT:    vpermt2d %zmm20, %zmm21, %zmm19
 ; AVX512BW-SLOW-NEXT:    vpermt2d 64(%rcx), %ymm0, %ymm13
-; AVX512BW-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm19 {%k1} = zmm13[0,1,0,1,2,3,0,1]
+; AVX512BW-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm19 {%k1} = zmm13[0,1,0,1,2,3,6,7]
 ; AVX512BW-SLOW-NEXT:    vmovdqa64 %zmm24, %zmm16
 ; AVX512BW-SLOW-NEXT:    vpermt2d %zmm17, %zmm21, %zmm16
 ; AVX512BW-SLOW-NEXT:    vpermt2d 128(%rcx), %ymm0, %ymm31
-; AVX512BW-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm16 {%k1} = zmm31[0,1,0,1,2,3,0,1]
+; AVX512BW-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm16 {%k1} = zmm31[0,1,0,1,2,3,6,7]
 ; AVX512BW-SLOW-NEXT:    vmovdqa 192(%rdx), %ymm14
 ; AVX512BW-SLOW-NEXT:    vpermt2d 192(%rcx), %ymm0, %ymm14
 ; AVX512BW-SLOW-NEXT:    vmovdqa64 192(%rdi), %zmm20
 ; AVX512BW-SLOW-NEXT:    vmovdqa64 192(%rsi), %zmm31
 ; AVX512BW-SLOW-NEXT:    vpermi2d %zmm31, %zmm20, %zmm21
-; AVX512BW-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm21 {%k1} = zmm14[0,1,0,1,2,3,0,1]
+; AVX512BW-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm21 {%k1} = zmm14[0,1,0,1,2,3,6,7]
 ; AVX512BW-SLOW-NEXT:    vmovdqa64 (%r8), %zmm12
 ; AVX512BW-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm14 = <18,u,2,3,4,5,19,u,8,9,10,11,20,u,14,15>
 ; AVX512BW-SLOW-NEXT:    vpermt2d %zmm12, %zmm14, %zmm25
@@ -9107,16 +9117,16 @@ define void @store_i32_stride6_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX512BW-SLOW-NEXT:    vpermt2d %zmm13, %zmm17, %zmm1
 ; AVX512BW-SLOW-NEXT:    vmovdqa64 (%rdi), %ymm17
 ; AVX512BW-SLOW-NEXT:    vpunpckhdq {{.*#+}} ymm17 = ymm17[2],mem[2],ymm17[3],mem[3],ymm17[6],mem[6],ymm17[7],mem[7]
-; AVX512BW-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm27 {%k1} = zmm17[0,1,2,3,2,3,0,1]
+; AVX512BW-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm27 {%k1} = zmm17[2,3,2,3,2,3,2,3]
 ; AVX512BW-SLOW-NEXT:    vmovdqa64 64(%rdi), %ymm17
 ; AVX512BW-SLOW-NEXT:    vpunpckhdq {{.*#+}} ymm17 = ymm17[2],mem[2],ymm17[3],mem[3],ymm17[6],mem[6],ymm17[7],mem[7]
-; AVX512BW-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm23 {%k1} = zmm17[0,1,2,3,2,3,0,1]
+; AVX512BW-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm23 {%k1} = zmm17[2,3,2,3,2,3,2,3]
 ; AVX512BW-SLOW-NEXT:    vmovdqa64 128(%rdi), %ymm17
 ; AVX512BW-SLOW-NEXT:    vpunpckhdq {{.*#+}} ymm17 = ymm17[2],mem[2],ymm17[3],mem[3],ymm17[6],mem[6],ymm17[7],mem[7]
-; AVX512BW-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm26 {%k1} = zmm17[0,1,2,3,2,3,0,1]
+; AVX512BW-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm26 {%k1} = zmm17[2,3,2,3,2,3,2,3]
 ; AVX512BW-SLOW-NEXT:    vmovdqa64 192(%rdi), %ymm17
 ; AVX512BW-SLOW-NEXT:    vpunpckhdq {{.*#+}} ymm17 = ymm17[2],mem[2],ymm17[3],mem[3],ymm17[6],mem[6],ymm17[7],mem[7]
-; AVX512BW-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm28 {%k1} = zmm17[0,1,2,3,2,3,0,1]
+; AVX512BW-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm28 {%k1} = zmm17[2,3,2,3,2,3,2,3]
 ; AVX512BW-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm17 = <0,1,21,u,4,5,6,7,22,u,10,11,12,13,23,u>
 ; AVX512BW-SLOW-NEXT:    vpermt2d %zmm12, %zmm17, %zmm27
 ; AVX512BW-SLOW-NEXT:    vpermt2d %zmm10, %zmm17, %zmm23
@@ -9124,19 +9134,19 @@ define void @store_i32_stride6_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX512BW-SLOW-NEXT:    vpermt2d %zmm13, %zmm17, %zmm28
 ; AVX512BW-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
 ; AVX512BW-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm17 # 64-byte Reload
-; AVX512BW-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm0 {%k1} = zmm17[0,1,6,7,6,7,0,1]
+; AVX512BW-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm0 {%k1} = zmm17[6,7,6,7,6,7,6,7]
 ; AVX512BW-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm17 = <0,1,29,u,4,5,6,7,30,u,10,11,12,13,31,u>
 ; AVX512BW-SLOW-NEXT:    vpermt2d %zmm12, %zmm17, %zmm0
 ; AVX512BW-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm12 # 64-byte Reload
-; AVX512BW-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm12 {%k1} = zmm29[0,1,6,7,6,7,0,1]
+; AVX512BW-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm12 {%k1} = zmm29[6,7,6,7,6,7,6,7]
 ; AVX512BW-SLOW-NEXT:    vpermt2d %zmm10, %zmm17, %zmm12
 ; AVX512BW-SLOW-NEXT:    vmovdqa64 %zmm12, %zmm29
-; AVX512BW-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm30 {%k1} = zmm24[0,1,6,7,6,7,0,1]
+; AVX512BW-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm30 {%k1} = zmm24[6,7,6,7,6,7,6,7]
 ; AVX512BW-SLOW-NEXT:    vpermt2d %zmm9, %zmm17, %zmm30
 ; AVX512BW-SLOW-NEXT:    vmovdqa64 (%r9), %zmm9
 ; AVX512BW-SLOW-NEXT:    vpunpckhdq {{.*#+}} zmm10 = zmm20[2],zmm31[2],zmm20[3],zmm31[3],zmm20[6],zmm31[6],zmm20[7],zmm31[7],zmm20[10],zmm31[10],zmm20[11],zmm31[11],zmm20[14],zmm31[14],zmm20[15],zmm31[15]
 ; AVX512BW-SLOW-NEXT:    vmovdqa64 64(%r9), %zmm12
-; AVX512BW-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm18 {%k1} = zmm10[0,1,6,7,6,7,0,1]
+; AVX512BW-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm18 {%k1} = zmm10[6,7,6,7,6,7,6,7]
 ; AVX512BW-SLOW-NEXT:    vmovdqa64 128(%r9), %zmm10
 ; AVX512BW-SLOW-NEXT:    vpermt2d %zmm13, %zmm17, %zmm18
 ; AVX512BW-SLOW-NEXT:    vmovdqa64 192(%r9), %zmm13
@@ -9220,13 +9230,14 @@ define void @store_i32_stride6_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX512BW-FAST-NEXT:    # zmm5 = mem[0,1,2,3,0,1,2,3]
 ; AVX512BW-FAST-NEXT:    vmovdqa64 %zmm24, %zmm21
 ; AVX512BW-FAST-NEXT:    vpermt2d %zmm20, %zmm5, %zmm21
-; AVX512BW-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm10 = [3,19,0,16,3,19,0,16,7,23,4,20,3,19,0,16]
+; AVX512BW-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm10 = <3,19,0,16,3,19,0,16,7,23,4,20,u,u,u,u>
 ; AVX512BW-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm6 = [4,20,3,19,0,0,5,21,4,20,3,19,0,0,5,21]
 ; AVX512BW-FAST-NEXT:    # zmm6 = mem[0,1,2,3,0,1,2,3]
 ; AVX512BW-FAST-NEXT:    vmovdqa64 %zmm24, %zmm28
 ; AVX512BW-FAST-NEXT:    vpermt2d %zmm20, %zmm6, %zmm28
 ; AVX512BW-FAST-NEXT:    vmovdqa64 %zmm6, %zmm9
-; AVX512BW-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm6 = [2,18,3,19,6,22,7,23,6,22,7,23,2,18,3,19]
+; AVX512BW-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} zmm6 = [6,22,7,23,6,22,7,23,6,22,7,23,6,22,7,23]
+; AVX512BW-FAST-NEXT:    # zmm6 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
 ; AVX512BW-FAST-NEXT:    vmovdqa64 %zmm24, %zmm11
 ; AVX512BW-FAST-NEXT:    vpermt2d %zmm20, %zmm6, %zmm11
 ; AVX512BW-FAST-NEXT:    vmovdqu64 %zmm11, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
@@ -9241,7 +9252,8 @@ define void @store_i32_stride6_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX512BW-FAST-NEXT:    vmovdqa64 %zmm24, %zmm6
 ; AVX512BW-FAST-NEXT:    vpermt2d %zmm20, %zmm13, %zmm6
 ; AVX512BW-FAST-NEXT:    vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm27 = [2,18,3,19,14,30,15,31,14,30,15,31,2,18,3,19]
+; AVX512BW-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} zmm27 = [14,30,15,31,14,30,15,31,14,30,15,31,14,30,15,31]
+; AVX512BW-FAST-NEXT:    # zmm27 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
 ; AVX512BW-FAST-NEXT:    vpermt2d %zmm20, %zmm27, %zmm24
 ; AVX512BW-FAST-NEXT:    vmovdqa64 %zmm4, %zmm6
 ; AVX512BW-FAST-NEXT:    vpermt2d %zmm25, %zmm5, %zmm6
@@ -9298,7 +9310,7 @@ define void @store_i32_stride6_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX512BW-FAST-NEXT:    # zmm6 = mem[0,1,2,3,0,1,2,3]
 ; AVX512BW-FAST-NEXT:    vmovdqa64 %zmm7, %zmm12
 ; AVX512BW-FAST-NEXT:    vpermt2d %zmm15, %zmm6, %zmm12
-; AVX512BW-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm7 = [3,19,0,16,11,27,8,24,15,31,12,28,3,19,0,16]
+; AVX512BW-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm7 = [3,19,0,16,11,27,8,24,15,31,12,28,15,31,12,28]
 ; AVX512BW-FAST-NEXT:    vmovdqa64 %zmm9, %zmm22
 ; AVX512BW-FAST-NEXT:    vpermt2d %zmm15, %zmm7, %zmm22
 ; AVX512BW-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm2 = [13,29,0,0,15,31,14,30,13,29,0,0,15,31,14,30]

diff  --git a/llvm/test/CodeGen/X86/vector-interleaved-store-i64-stride-7.ll b/llvm/test/CodeGen/X86/vector-interleaved-store-i64-stride-7.ll
index 6cd85102818512..59857b1a3671ed 100644
--- a/llvm/test/CodeGen/X86/vector-interleaved-store-i64-stride-7.ll
+++ b/llvm/test/CodeGen/X86/vector-interleaved-store-i64-stride-7.ll
@@ -822,7 +822,7 @@ define void @store_i64_stride7_vf8(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
 ; AVX512F-ONLY-SLOW-NEXT:    vpunpcklqdq {{.*#+}} ymm13 = ymm12[0],ymm7[0],ymm12[2],ymm7[2]
 ; AVX512F-ONLY-SLOW-NEXT:    movb $28, %sil
 ; AVX512F-ONLY-SLOW-NEXT:    kmovw %esi, %k2
-; AVX512F-ONLY-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm9 {%k2} = zmm13[0,1,2,3],zmm4[2,3,0,1]
+; AVX512F-ONLY-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm9 {%k2} = zmm13[2,3,2,3],zmm4[2,3,2,3]
 ; AVX512F-ONLY-SLOW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm13 = [1,0,10,2,1,0,10,2]
 ; AVX512F-ONLY-SLOW-NEXT:    # zmm13 = mem[0,1,2,3,0,1,2,3]
 ; AVX512F-ONLY-SLOW-NEXT:    vpermi2q %zmm0, %zmm1, %zmm13
@@ -958,7 +958,7 @@ define void @store_i64_stride7_vf8(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
 ; AVX512F-ONLY-FAST-NEXT:    vpunpcklqdq {{.*#+}} ymm9 = ymm12[0],ymm9[0],ymm12[2],ymm9[2]
 ; AVX512F-ONLY-FAST-NEXT:    movb $28, %sil
 ; AVX512F-ONLY-FAST-NEXT:    kmovw %esi, %k2
-; AVX512F-ONLY-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm14 {%k2} = zmm9[0,1,2,3],zmm4[2,3,0,1]
+; AVX512F-ONLY-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm14 {%k2} = zmm9[2,3,2,3],zmm4[2,3,2,3]
 ; AVX512F-ONLY-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm9 = [1,0,10,2,1,0,10,2]
 ; AVX512F-ONLY-FAST-NEXT:    # zmm9 = mem[0,1,2,3,0,1,2,3]
 ; AVX512F-ONLY-FAST-NEXT:    vpermi2q %zmm0, %zmm1, %zmm9
@@ -1035,7 +1035,7 @@ define void @store_i64_stride7_vf8(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
 ; AVX512DQ-SLOW-NEXT:    vpunpcklqdq {{.*#+}} ymm11 = ymm10[0],ymm9[0],ymm10[2],ymm9[2]
 ; AVX512DQ-SLOW-NEXT:    movb $28, %sil
 ; AVX512DQ-SLOW-NEXT:    kmovw %esi, %k2
-; AVX512DQ-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm6 {%k2} = zmm11[0,1,2,3],zmm4[2,3,0,1]
+; AVX512DQ-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm6 {%k2} = zmm11[2,3,2,3],zmm4[2,3,2,3]
 ; AVX512DQ-SLOW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm11 = [15,7,15,7,15,7,15,7]
 ; AVX512DQ-SLOW-NEXT:    # zmm11 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
 ; AVX512DQ-SLOW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm12 = [9,1,9,1,9,1,9,1]
@@ -1170,7 +1170,7 @@ define void @store_i64_stride7_vf8(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
 ; AVX512DQ-FAST-NEXT:    vpunpcklqdq {{.*#+}} ymm10 = ymm11[0],ymm10[0],ymm11[2],ymm10[2]
 ; AVX512DQ-FAST-NEXT:    movb $28, %sil
 ; AVX512DQ-FAST-NEXT:    kmovw %esi, %k2
-; AVX512DQ-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm9 {%k2} = zmm10[0,1,2,3],zmm4[2,3,0,1]
+; AVX512DQ-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm9 {%k2} = zmm10[2,3,2,3],zmm4[2,3,2,3]
 ; AVX512DQ-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} zmm10 = [15,7,15,7,15,7,15,7]
 ; AVX512DQ-FAST-NEXT:    # zmm10 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
 ; AVX512DQ-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} zmm11 = [9,1,9,1,9,1,9,1]
@@ -1316,7 +1316,7 @@ define void @store_i64_stride7_vf8(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
 ; AVX512BW-ONLY-SLOW-NEXT:    vpunpcklqdq {{.*#+}} ymm13 = ymm12[0],ymm7[0],ymm12[2],ymm7[2]
 ; AVX512BW-ONLY-SLOW-NEXT:    movb $28, %sil
 ; AVX512BW-ONLY-SLOW-NEXT:    kmovd %esi, %k2
-; AVX512BW-ONLY-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm9 {%k2} = zmm13[0,1,2,3],zmm4[2,3,0,1]
+; AVX512BW-ONLY-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm9 {%k2} = zmm13[2,3,2,3],zmm4[2,3,2,3]
 ; AVX512BW-ONLY-SLOW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm13 = [1,0,10,2,1,0,10,2]
 ; AVX512BW-ONLY-SLOW-NEXT:    # zmm13 = mem[0,1,2,3,0,1,2,3]
 ; AVX512BW-ONLY-SLOW-NEXT:    vpermi2q %zmm0, %zmm1, %zmm13
@@ -1452,7 +1452,7 @@ define void @store_i64_stride7_vf8(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
 ; AVX512BW-ONLY-FAST-NEXT:    vpunpcklqdq {{.*#+}} ymm9 = ymm12[0],ymm9[0],ymm12[2],ymm9[2]
 ; AVX512BW-ONLY-FAST-NEXT:    movb $28, %sil
 ; AVX512BW-ONLY-FAST-NEXT:    kmovd %esi, %k2
-; AVX512BW-ONLY-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm14 {%k2} = zmm9[0,1,2,3],zmm4[2,3,0,1]
+; AVX512BW-ONLY-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm14 {%k2} = zmm9[2,3,2,3],zmm4[2,3,2,3]
 ; AVX512BW-ONLY-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm9 = [1,0,10,2,1,0,10,2]
 ; AVX512BW-ONLY-FAST-NEXT:    # zmm9 = mem[0,1,2,3,0,1,2,3]
 ; AVX512BW-ONLY-FAST-NEXT:    vpermi2q %zmm0, %zmm1, %zmm9
@@ -1529,7 +1529,7 @@ define void @store_i64_stride7_vf8(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
 ; AVX512DQBW-SLOW-NEXT:    vpunpcklqdq {{.*#+}} ymm11 = ymm10[0],ymm9[0],ymm10[2],ymm9[2]
 ; AVX512DQBW-SLOW-NEXT:    movb $28, %sil
 ; AVX512DQBW-SLOW-NEXT:    kmovd %esi, %k2
-; AVX512DQBW-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm6 {%k2} = zmm11[0,1,2,3],zmm4[2,3,0,1]
+; AVX512DQBW-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm6 {%k2} = zmm11[2,3,2,3],zmm4[2,3,2,3]
 ; AVX512DQBW-SLOW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm11 = [15,7,15,7,15,7,15,7]
 ; AVX512DQBW-SLOW-NEXT:    # zmm11 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
 ; AVX512DQBW-SLOW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm12 = [9,1,9,1,9,1,9,1]
@@ -1664,7 +1664,7 @@ define void @store_i64_stride7_vf8(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
 ; AVX512DQBW-FAST-NEXT:    vpunpcklqdq {{.*#+}} ymm10 = ymm11[0],ymm10[0],ymm11[2],ymm10[2]
 ; AVX512DQBW-FAST-NEXT:    movb $28, %sil
 ; AVX512DQBW-FAST-NEXT:    kmovd %esi, %k2
-; AVX512DQBW-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm9 {%k2} = zmm10[0,1,2,3],zmm4[2,3,0,1]
+; AVX512DQBW-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm9 {%k2} = zmm10[2,3,2,3],zmm4[2,3,2,3]
 ; AVX512DQBW-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} zmm10 = [15,7,15,7,15,7,15,7]
 ; AVX512DQBW-FAST-NEXT:    # zmm10 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
 ; AVX512DQBW-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} zmm11 = [9,1,9,1,9,1,9,1]
@@ -2595,7 +2595,7 @@ define void @store_i64_stride7_vf16(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX512F-ONLY-SLOW-NEXT:    vpunpcklqdq {{.*#+}} ymm20 = ymm19[0],ymm21[0],ymm19[2],ymm21[2]
 ; AVX512F-ONLY-SLOW-NEXT:    movb $28, %sil
 ; AVX512F-ONLY-SLOW-NEXT:    kmovw %esi, %k3
-; AVX512F-ONLY-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm14 {%k3} = zmm20[0,1,2,3],zmm23[2,3,0,1]
+; AVX512F-ONLY-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm14 {%k3} = zmm20[2,3,2,3],zmm23[2,3,2,3]
 ; AVX512F-ONLY-SLOW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm20 = [6,13,14,7,6,13,14,7]
 ; AVX512F-ONLY-SLOW-NEXT:    # zmm20 = mem[0,1,2,3,0,1,2,3]
 ; AVX512F-ONLY-SLOW-NEXT:    vpermi2q %zmm26, %zmm22, %zmm20
@@ -2609,7 +2609,7 @@ define void @store_i64_stride7_vf16(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX512F-ONLY-SLOW-NEXT:    vpermi2q %zmm5, %zmm8, %zmm17
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm30, %zmm17 {%k1}
 ; AVX512F-ONLY-SLOW-NEXT:    vpunpcklqdq {{.*#+}} ymm30 = ymm29[0],ymm31[0],ymm29[2],ymm31[2]
-; AVX512F-ONLY-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm17 {%k3} = zmm30[0,1,2,3],zmm22[2,3,0,1]
+; AVX512F-ONLY-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm17 {%k3} = zmm30[2,3,2,3],zmm22[2,3,2,3]
 ; AVX512F-ONLY-SLOW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm30 = [4,9,0,3,4,9,0,3]
 ; AVX512F-ONLY-SLOW-NEXT:    # zmm30 = mem[0,1,2,3,0,1,2,3]
 ; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm22, %zmm30, %zmm16
@@ -2867,14 +2867,14 @@ define void @store_i64_stride7_vf16(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm17, %zmm1 {%k1}
 ; AVX512F-ONLY-FAST-NEXT:    movb $28, %al
 ; AVX512F-ONLY-FAST-NEXT:    kmovw %eax, %k1
-; AVX512F-ONLY-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm14 {%k1} = zmm5[0,1,2,3],zmm10[2,3,0,1]
+; AVX512F-ONLY-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm14 {%k1} = zmm5[2,3,2,3],zmm10[2,3,2,3]
 ; AVX512F-ONLY-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm5 = [1,0,10,2,1,0,10,2]
 ; AVX512F-ONLY-FAST-NEXT:    # zmm5 = mem[0,1,2,3,0,1,2,3]
 ; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm11, %zmm5, %zmm0
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm3, %zmm11
 ; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm2, %zmm5, %zmm11
 ; AVX512F-ONLY-FAST-NEXT:    vpunpcklqdq {{.*#+}} ymm5 = ymm8[0],ymm22[0],ymm8[2],ymm22[2]
-; AVX512F-ONLY-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm1 {%k1} = zmm5[0,1,2,3],zmm13[2,3,0,1]
+; AVX512F-ONLY-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm1 {%k1} = zmm5[2,3,2,3],zmm13[2,3,2,3]
 ; AVX512F-ONLY-FAST-NEXT:    vpbroadcastq 72(%rcx), %ymm5
 ; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm5 = mem[0,1,2,3],ymm5[4,5,6,7]
 ; AVX512F-ONLY-FAST-NEXT:    movb $6, %al
@@ -3021,7 +3021,7 @@ define void @store_i64_stride7_vf16(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX512DQ-SLOW-NEXT:    vpunpcklqdq {{.*#+}} ymm20 = ymm19[0],ymm21[0],ymm19[2],ymm21[2]
 ; AVX512DQ-SLOW-NEXT:    movb $28, %sil
 ; AVX512DQ-SLOW-NEXT:    kmovw %esi, %k3
-; AVX512DQ-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm14 {%k3} = zmm20[0,1,2,3],zmm23[2,3,0,1]
+; AVX512DQ-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm14 {%k3} = zmm20[2,3,2,3],zmm23[2,3,2,3]
 ; AVX512DQ-SLOW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm20 = [6,13,14,7,6,13,14,7]
 ; AVX512DQ-SLOW-NEXT:    # zmm20 = mem[0,1,2,3,0,1,2,3]
 ; AVX512DQ-SLOW-NEXT:    vpermi2q %zmm26, %zmm22, %zmm20
@@ -3036,7 +3036,7 @@ define void @store_i64_stride7_vf16(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX512DQ-SLOW-NEXT:    vpermi2q %zmm5, %zmm8, %zmm18
 ; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm30, %zmm18 {%k1}
 ; AVX512DQ-SLOW-NEXT:    vpunpcklqdq {{.*#+}} ymm30 = ymm29[0],ymm31[0],ymm29[2],ymm31[2]
-; AVX512DQ-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm18 {%k3} = zmm30[0,1,2,3],zmm22[2,3,0,1]
+; AVX512DQ-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm18 {%k3} = zmm30[2,3,2,3],zmm22[2,3,2,3]
 ; AVX512DQ-SLOW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm30 = [4,9,0,3,4,9,0,3]
 ; AVX512DQ-SLOW-NEXT:    # zmm30 = mem[0,1,2,3,0,1,2,3]
 ; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm22, %zmm30, %zmm15
@@ -3290,14 +3290,14 @@ define void @store_i64_stride7_vf16(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm18, %zmm1 {%k1}
 ; AVX512DQ-FAST-NEXT:    movb $28, %al
 ; AVX512DQ-FAST-NEXT:    kmovw %eax, %k1
-; AVX512DQ-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm14 {%k1} = zmm5[0,1,2,3],zmm10[2,3,0,1]
+; AVX512DQ-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm14 {%k1} = zmm5[2,3,2,3],zmm10[2,3,2,3]
 ; AVX512DQ-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm5 = [1,0,10,2,1,0,10,2]
 ; AVX512DQ-FAST-NEXT:    # zmm5 = mem[0,1,2,3,0,1,2,3]
 ; AVX512DQ-FAST-NEXT:    vpermt2q %zmm11, %zmm5, %zmm0
 ; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm3, %zmm11
 ; AVX512DQ-FAST-NEXT:    vpermt2q %zmm2, %zmm5, %zmm11
 ; AVX512DQ-FAST-NEXT:    vpunpcklqdq {{.*#+}} ymm5 = ymm7[0],ymm21[0],ymm7[2],ymm21[2]
-; AVX512DQ-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm1 {%k1} = zmm5[0,1,2,3],zmm13[2,3,0,1]
+; AVX512DQ-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm1 {%k1} = zmm5[2,3,2,3],zmm13[2,3,2,3]
 ; AVX512DQ-FAST-NEXT:    vpbroadcastq 72(%rcx), %ymm5
 ; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm5 = mem[0,1,2,3],ymm5[4,5,6,7]
 ; AVX512DQ-FAST-NEXT:    movb $6, %al
@@ -3443,7 +3443,7 @@ define void @store_i64_stride7_vf16(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX512BW-ONLY-SLOW-NEXT:    vpunpcklqdq {{.*#+}} ymm20 = ymm19[0],ymm21[0],ymm19[2],ymm21[2]
 ; AVX512BW-ONLY-SLOW-NEXT:    movb $28, %sil
 ; AVX512BW-ONLY-SLOW-NEXT:    kmovd %esi, %k3
-; AVX512BW-ONLY-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm14 {%k3} = zmm20[0,1,2,3],zmm23[2,3,0,1]
+; AVX512BW-ONLY-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm14 {%k3} = zmm20[2,3,2,3],zmm23[2,3,2,3]
 ; AVX512BW-ONLY-SLOW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm20 = [6,13,14,7,6,13,14,7]
 ; AVX512BW-ONLY-SLOW-NEXT:    # zmm20 = mem[0,1,2,3,0,1,2,3]
 ; AVX512BW-ONLY-SLOW-NEXT:    vpermi2q %zmm26, %zmm22, %zmm20
@@ -3457,7 +3457,7 @@ define void @store_i64_stride7_vf16(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX512BW-ONLY-SLOW-NEXT:    vpermi2q %zmm5, %zmm8, %zmm17
 ; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm30, %zmm17 {%k1}
 ; AVX512BW-ONLY-SLOW-NEXT:    vpunpcklqdq {{.*#+}} ymm30 = ymm29[0],ymm31[0],ymm29[2],ymm31[2]
-; AVX512BW-ONLY-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm17 {%k3} = zmm30[0,1,2,3],zmm22[2,3,0,1]
+; AVX512BW-ONLY-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm17 {%k3} = zmm30[2,3,2,3],zmm22[2,3,2,3]
 ; AVX512BW-ONLY-SLOW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm30 = [4,9,0,3,4,9,0,3]
 ; AVX512BW-ONLY-SLOW-NEXT:    # zmm30 = mem[0,1,2,3,0,1,2,3]
 ; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm22, %zmm30, %zmm16
@@ -3715,14 +3715,14 @@ define void @store_i64_stride7_vf16(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm17, %zmm1 {%k1}
 ; AVX512BW-ONLY-FAST-NEXT:    movb $28, %al
 ; AVX512BW-ONLY-FAST-NEXT:    kmovd %eax, %k1
-; AVX512BW-ONLY-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm14 {%k1} = zmm5[0,1,2,3],zmm10[2,3,0,1]
+; AVX512BW-ONLY-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm14 {%k1} = zmm5[2,3,2,3],zmm10[2,3,2,3]
 ; AVX512BW-ONLY-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm5 = [1,0,10,2,1,0,10,2]
 ; AVX512BW-ONLY-FAST-NEXT:    # zmm5 = mem[0,1,2,3,0,1,2,3]
 ; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm11, %zmm5, %zmm0
 ; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm3, %zmm11
 ; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm2, %zmm5, %zmm11
 ; AVX512BW-ONLY-FAST-NEXT:    vpunpcklqdq {{.*#+}} ymm5 = ymm8[0],ymm22[0],ymm8[2],ymm22[2]
-; AVX512BW-ONLY-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm1 {%k1} = zmm5[0,1,2,3],zmm13[2,3,0,1]
+; AVX512BW-ONLY-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm1 {%k1} = zmm5[2,3,2,3],zmm13[2,3,2,3]
 ; AVX512BW-ONLY-FAST-NEXT:    vpbroadcastq 72(%rcx), %ymm5
 ; AVX512BW-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm5 = mem[0,1,2,3],ymm5[4,5,6,7]
 ; AVX512BW-ONLY-FAST-NEXT:    movb $6, %al
@@ -3869,7 +3869,7 @@ define void @store_i64_stride7_vf16(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX512DQBW-SLOW-NEXT:    vpunpcklqdq {{.*#+}} ymm20 = ymm19[0],ymm21[0],ymm19[2],ymm21[2]
 ; AVX512DQBW-SLOW-NEXT:    movb $28, %sil
 ; AVX512DQBW-SLOW-NEXT:    kmovd %esi, %k3
-; AVX512DQBW-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm14 {%k3} = zmm20[0,1,2,3],zmm23[2,3,0,1]
+; AVX512DQBW-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm14 {%k3} = zmm20[2,3,2,3],zmm23[2,3,2,3]
 ; AVX512DQBW-SLOW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm20 = [6,13,14,7,6,13,14,7]
 ; AVX512DQBW-SLOW-NEXT:    # zmm20 = mem[0,1,2,3,0,1,2,3]
 ; AVX512DQBW-SLOW-NEXT:    vpermi2q %zmm26, %zmm22, %zmm20
@@ -3884,7 +3884,7 @@ define void @store_i64_stride7_vf16(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX512DQBW-SLOW-NEXT:    vpermi2q %zmm5, %zmm8, %zmm18
 ; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm30, %zmm18 {%k1}
 ; AVX512DQBW-SLOW-NEXT:    vpunpcklqdq {{.*#+}} ymm30 = ymm29[0],ymm31[0],ymm29[2],ymm31[2]
-; AVX512DQBW-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm18 {%k3} = zmm30[0,1,2,3],zmm22[2,3,0,1]
+; AVX512DQBW-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm18 {%k3} = zmm30[2,3,2,3],zmm22[2,3,2,3]
 ; AVX512DQBW-SLOW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm30 = [4,9,0,3,4,9,0,3]
 ; AVX512DQBW-SLOW-NEXT:    # zmm30 = mem[0,1,2,3,0,1,2,3]
 ; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm22, %zmm30, %zmm15
@@ -4138,14 +4138,14 @@ define void @store_i64_stride7_vf16(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm18, %zmm1 {%k1}
 ; AVX512DQBW-FAST-NEXT:    movb $28, %al
 ; AVX512DQBW-FAST-NEXT:    kmovd %eax, %k1
-; AVX512DQBW-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm14 {%k1} = zmm5[0,1,2,3],zmm10[2,3,0,1]
+; AVX512DQBW-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm14 {%k1} = zmm5[2,3,2,3],zmm10[2,3,2,3]
 ; AVX512DQBW-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm5 = [1,0,10,2,1,0,10,2]
 ; AVX512DQBW-FAST-NEXT:    # zmm5 = mem[0,1,2,3,0,1,2,3]
 ; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm11, %zmm5, %zmm0
 ; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm3, %zmm11
 ; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm2, %zmm5, %zmm11
 ; AVX512DQBW-FAST-NEXT:    vpunpcklqdq {{.*#+}} ymm5 = ymm7[0],ymm21[0],ymm7[2],ymm21[2]
-; AVX512DQBW-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm1 {%k1} = zmm5[0,1,2,3],zmm13[2,3,0,1]
+; AVX512DQBW-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm1 {%k1} = zmm5[2,3,2,3],zmm13[2,3,2,3]
 ; AVX512DQBW-FAST-NEXT:    vpbroadcastq 72(%rcx), %ymm5
 ; AVX512DQBW-FAST-NEXT:    vpblendd {{.*#+}} ymm5 = mem[0,1,2,3],ymm5[4,5,6,7]
 ; AVX512DQBW-FAST-NEXT:    movb $6, %al
@@ -5816,7 +5816,7 @@ define void @store_i64_stride7_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX512F-ONLY-SLOW-NEXT:    vpunpcklqdq {{.*#+}} ymm1 = ymm1[0],ymm17[0],ymm1[2],ymm17[2]
 ; AVX512F-ONLY-SLOW-NEXT:    movb $28, %r10b
 ; AVX512F-ONLY-SLOW-NEXT:    kmovw %r10d, %k2
-; AVX512F-ONLY-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm7 {%k2} = zmm1[0,1,2,3],zmm5[2,3,0,1]
+; AVX512F-ONLY-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm7 {%k2} = zmm1[2,3,2,3],zmm5[2,3,2,3]
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512F-ONLY-SLOW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm17 = [4,12,0,5,4,12,0,5]
 ; AVX512F-ONLY-SLOW-NEXT:    # zmm17 = mem[0,1,2,3,0,1,2,3]
@@ -5851,7 +5851,7 @@ define void @store_i64_stride7_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm4, %zmm20, %zmm5
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm0, %zmm5 {%k1}
 ; AVX512F-ONLY-SLOW-NEXT:    vpunpcklqdq {{.*#+}} ymm0 = ymm19[0],ymm16[0],ymm19[2],ymm16[2]
-; AVX512F-ONLY-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm5 {%k2} = zmm0[0,1,2,3],zmm6[2,3,0,1]
+; AVX512F-ONLY-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm5 {%k2} = zmm0[2,3,2,3],zmm6[2,3,2,3]
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 64(%r8), %zmm0
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 64(%r9), %zmm19
@@ -5890,7 +5890,7 @@ define void @store_i64_stride7_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 128(%r8), %ymm16
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %ymm16, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX512F-ONLY-SLOW-NEXT:    vpunpcklqdq {{.*#+}} ymm0 = ymm16[0],ymm0[0],ymm16[2],ymm0[2]
-; AVX512F-ONLY-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm6 {%k2} = zmm0[0,1,2,3],zmm18[2,3,0,1]
+; AVX512F-ONLY-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm6 {%k2} = zmm0[2,3,2,3],zmm18[2,3,2,3]
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 128(%r8), %zmm0
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 128(%r9), %zmm6
@@ -6127,7 +6127,7 @@ define void @store_i64_stride7_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX512F-ONLY-SLOW-NEXT:    vpermi2q %zmm25, %zmm16, %zmm18
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 192(%r8), %ymm16
 ; AVX512F-ONLY-SLOW-NEXT:    vpunpcklqdq {{.*#+}} ymm16 = ymm16[0],mem[0],ymm16[2],mem[2]
-; AVX512F-ONLY-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm0 {%k2} = zmm16[0,1,2,3],zmm4[2,3,0,1]
+; AVX512F-ONLY-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm0 {%k2} = zmm16[2,3,2,3],zmm4[2,3,2,3]
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm0, %zmm22
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm17, %zmm14 {%k3}
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
@@ -6259,7 +6259,7 @@ define void @store_i64_stride7_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX512F-ONLY-FAST-NEXT:    vpunpcklqdq {{.*#+}} ymm3 = ymm19[0],ymm1[0],ymm19[2],ymm1[2]
 ; AVX512F-ONLY-FAST-NEXT:    movb $28, %r10b
 ; AVX512F-ONLY-FAST-NEXT:    kmovw %r10d, %k2
-; AVX512F-ONLY-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm10 {%k2} = zmm3[0,1,2,3],zmm4[2,3,0,1]
+; AVX512F-ONLY-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm10 {%k2} = zmm3[2,3,2,3],zmm4[2,3,2,3]
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} ymm28 = [1,3,7,7]
 ; AVX512F-ONLY-FAST-NEXT:    vpermt2q %ymm1, %ymm28, %ymm19
@@ -6303,7 +6303,7 @@ define void @store_i64_stride7_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm17, %zmm16, %zmm4
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm0, %zmm4 {%k1}
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 64(%rax), %zmm14
-; AVX512F-ONLY-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm4 {%k2} = zmm1[0,1,2,3],zmm14[2,3,0,1]
+; AVX512F-ONLY-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm4 {%k2} = zmm1[2,3,2,3],zmm14[2,3,2,3]
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 64(%r8), %zmm0
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 64(%r9), %zmm23
@@ -6336,7 +6336,7 @@ define void @store_i64_stride7_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm18, %zmm22
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm1, %zmm26 {%k1}
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 128(%rax), %zmm12
-; AVX512F-ONLY-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm26 {%k2} = zmm3[0,1,2,3],zmm12[2,3,0,1]
+; AVX512F-ONLY-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm26 {%k2} = zmm3[2,3,2,3],zmm12[2,3,2,3]
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 128(%r8), %zmm1
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 128(%r9), %zmm20
 ; AVX512F-ONLY-FAST-NEXT:    vpermi2q %zmm20, %zmm1, %zmm19
@@ -6591,7 +6591,7 @@ define void @store_i64_stride7_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX512F-ONLY-FAST-NEXT:    vpermi2q %zmm20, %zmm10, %zmm12
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa 192(%r8), %ymm10
 ; AVX512F-ONLY-FAST-NEXT:    vpunpcklqdq {{.*#+}} ymm10 = ymm10[0],mem[0],ymm10[2],mem[2]
-; AVX512F-ONLY-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm14 {%k2} = zmm10[0,1,2,3],zmm8[2,3,0,1]
+; AVX512F-ONLY-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm14 {%k2} = zmm10[2,3,2,3],zmm8[2,3,2,3]
 ; AVX512F-ONLY-FAST-NEXT:    movb $64, %al
 ; AVX512F-ONLY-FAST-NEXT:    kmovw %eax, %k1
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm4, %zmm9 {%k1}
@@ -6703,7 +6703,7 @@ define void @store_i64_stride7_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX512DQ-SLOW-NEXT:    vpunpcklqdq {{.*#+}} ymm1 = ymm1[0],ymm16[0],ymm1[2],ymm16[2]
 ; AVX512DQ-SLOW-NEXT:    movb $28, %r10b
 ; AVX512DQ-SLOW-NEXT:    kmovw %r10d, %k2
-; AVX512DQ-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm5 {%k2} = zmm1[0,1,2,3],zmm3[2,3,0,1]
+; AVX512DQ-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm5 {%k2} = zmm1[2,3,2,3],zmm3[2,3,2,3]
 ; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512DQ-SLOW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm16 = [4,12,0,5,4,12,0,5]
 ; AVX512DQ-SLOW-NEXT:    # zmm16 = mem[0,1,2,3,0,1,2,3]
@@ -6738,7 +6738,7 @@ define void @store_i64_stride7_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm24, %zmm20, %zmm5
 ; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm0, %zmm5 {%k1}
 ; AVX512DQ-SLOW-NEXT:    vpunpcklqdq {{.*#+}} ymm0 = ymm19[0],ymm17[0],ymm19[2],ymm17[2]
-; AVX512DQ-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm5 {%k2} = zmm0[0,1,2,3],zmm6[2,3,0,1]
+; AVX512DQ-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm5 {%k2} = zmm0[2,3,2,3],zmm6[2,3,2,3]
 ; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512DQ-SLOW-NEXT:    vmovdqa64 64(%r8), %zmm0
 ; AVX512DQ-SLOW-NEXT:    vmovdqa64 64(%r9), %zmm21
@@ -6779,7 +6779,7 @@ define void @store_i64_stride7_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX512DQ-SLOW-NEXT:    vmovdqa64 128(%r8), %ymm18
 ; AVX512DQ-SLOW-NEXT:    vmovdqu64 %ymm18, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX512DQ-SLOW-NEXT:    vpunpcklqdq {{.*#+}} ymm0 = ymm18[0],ymm0[0],ymm18[2],ymm0[2]
-; AVX512DQ-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm6 {%k2} = zmm0[0,1,2,3],zmm20[2,3,0,1]
+; AVX512DQ-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm6 {%k2} = zmm0[2,3,2,3],zmm20[2,3,2,3]
 ; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512DQ-SLOW-NEXT:    vmovdqa64 128(%r8), %zmm0
 ; AVX512DQ-SLOW-NEXT:    vmovdqa64 128(%r9), %zmm6
@@ -7011,7 +7011,7 @@ define void @store_i64_stride7_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX512DQ-SLOW-NEXT:    vpermi2q %zmm7, %zmm21, %zmm20
 ; AVX512DQ-SLOW-NEXT:    vmovdqa64 192(%r8), %ymm21
 ; AVX512DQ-SLOW-NEXT:    vpunpcklqdq {{.*#+}} ymm21 = ymm21[0],mem[0],ymm21[2],mem[2]
-; AVX512DQ-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm0 {%k2} = zmm21[0,1,2,3],zmm3[2,3,0,1]
+; AVX512DQ-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm0 {%k2} = zmm21[2,3,2,3],zmm3[2,3,2,3]
 ; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm0, %zmm21
 ; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm16, %zmm10 {%k3}
 ; AVX512DQ-SLOW-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
@@ -7144,7 +7144,7 @@ define void @store_i64_stride7_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX512DQ-FAST-NEXT:    vpunpcklqdq {{.*#+}} ymm2 = ymm16[0],ymm1[0],ymm16[2],ymm1[2]
 ; AVX512DQ-FAST-NEXT:    movb $28, %r10b
 ; AVX512DQ-FAST-NEXT:    kmovw %r10d, %k2
-; AVX512DQ-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm8 {%k2} = zmm2[0,1,2,3],zmm7[2,3,0,1]
+; AVX512DQ-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm8 {%k2} = zmm2[2,3,2,3],zmm7[2,3,2,3]
 ; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512DQ-FAST-NEXT:    vmovdqa64 {{.*#+}} ymm27 = [1,3,7,7]
 ; AVX512DQ-FAST-NEXT:    vpermt2q %ymm1, %ymm27, %ymm16
@@ -7187,7 +7187,7 @@ define void @store_i64_stride7_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX512DQ-FAST-NEXT:    vpermt2q %zmm19, %zmm13, %zmm4
 ; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm2, %zmm4 {%k1}
 ; AVX512DQ-FAST-NEXT:    vmovdqa64 64(%rax), %zmm13
-; AVX512DQ-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm4 {%k2} = zmm1[0,1,2,3],zmm13[2,3,0,1]
+; AVX512DQ-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm4 {%k2} = zmm1[2,3,2,3],zmm13[2,3,2,3]
 ; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512DQ-FAST-NEXT:    vmovdqa64 64(%r8), %zmm1
 ; AVX512DQ-FAST-NEXT:    vmovdqa64 64(%r9), %zmm4
@@ -7220,7 +7220,7 @@ define void @store_i64_stride7_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm7, %zmm20
 ; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm1, %zmm2 {%k1}
 ; AVX512DQ-FAST-NEXT:    vmovdqa64 128(%rax), %zmm12
-; AVX512DQ-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm2 {%k2} = zmm0[0,1,2,3],zmm12[2,3,0,1]
+; AVX512DQ-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm2 {%k2} = zmm0[2,3,2,3],zmm12[2,3,2,3]
 ; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512DQ-FAST-NEXT:    vmovdqa64 128(%r8), %zmm0
 ; AVX512DQ-FAST-NEXT:    vmovdqa64 128(%r9), %zmm1
@@ -7468,7 +7468,7 @@ define void @store_i64_stride7_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX512DQ-FAST-NEXT:    vpermi2q %zmm9, %zmm12, %zmm13
 ; AVX512DQ-FAST-NEXT:    vmovdqa 192(%r8), %ymm12
 ; AVX512DQ-FAST-NEXT:    vpunpcklqdq {{.*#+}} ymm12 = ymm12[0],mem[0],ymm12[2],mem[2]
-; AVX512DQ-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm21 {%k2} = zmm12[0,1,2,3],zmm5[2,3,0,1]
+; AVX512DQ-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm21 {%k2} = zmm12[2,3,2,3],zmm5[2,3,2,3]
 ; AVX512DQ-FAST-NEXT:    movb $64, %al
 ; AVX512DQ-FAST-NEXT:    kmovw %eax, %k1
 ; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm8, %zmm10 {%k1}
@@ -7581,7 +7581,7 @@ define void @store_i64_stride7_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX512BW-ONLY-SLOW-NEXT:    vpunpcklqdq {{.*#+}} ymm1 = ymm1[0],ymm17[0],ymm1[2],ymm17[2]
 ; AVX512BW-ONLY-SLOW-NEXT:    movb $28, %r10b
 ; AVX512BW-ONLY-SLOW-NEXT:    kmovd %r10d, %k2
-; AVX512BW-ONLY-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm7 {%k2} = zmm1[0,1,2,3],zmm5[2,3,0,1]
+; AVX512BW-ONLY-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm7 {%k2} = zmm1[2,3,2,3],zmm5[2,3,2,3]
 ; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512BW-ONLY-SLOW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm17 = [4,12,0,5,4,12,0,5]
 ; AVX512BW-ONLY-SLOW-NEXT:    # zmm17 = mem[0,1,2,3,0,1,2,3]
@@ -7616,7 +7616,7 @@ define void @store_i64_stride7_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm4, %zmm20, %zmm5
 ; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm0, %zmm5 {%k1}
 ; AVX512BW-ONLY-SLOW-NEXT:    vpunpcklqdq {{.*#+}} ymm0 = ymm19[0],ymm16[0],ymm19[2],ymm16[2]
-; AVX512BW-ONLY-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm5 {%k2} = zmm0[0,1,2,3],zmm6[2,3,0,1]
+; AVX512BW-ONLY-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm5 {%k2} = zmm0[2,3,2,3],zmm6[2,3,2,3]
 ; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 64(%r8), %zmm0
 ; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 64(%r9), %zmm19
@@ -7655,7 +7655,7 @@ define void @store_i64_stride7_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 128(%r8), %ymm16
 ; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %ymm16, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX512BW-ONLY-SLOW-NEXT:    vpunpcklqdq {{.*#+}} ymm0 = ymm16[0],ymm0[0],ymm16[2],ymm0[2]
-; AVX512BW-ONLY-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm6 {%k2} = zmm0[0,1,2,3],zmm18[2,3,0,1]
+; AVX512BW-ONLY-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm6 {%k2} = zmm0[2,3,2,3],zmm18[2,3,2,3]
 ; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 128(%r8), %zmm0
 ; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 128(%r9), %zmm6
@@ -7892,7 +7892,7 @@ define void @store_i64_stride7_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX512BW-ONLY-SLOW-NEXT:    vpermi2q %zmm25, %zmm16, %zmm18
 ; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 192(%r8), %ymm16
 ; AVX512BW-ONLY-SLOW-NEXT:    vpunpcklqdq {{.*#+}} ymm16 = ymm16[0],mem[0],ymm16[2],mem[2]
-; AVX512BW-ONLY-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm0 {%k2} = zmm16[0,1,2,3],zmm4[2,3,0,1]
+; AVX512BW-ONLY-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm0 {%k2} = zmm16[2,3,2,3],zmm4[2,3,2,3]
 ; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm0, %zmm22
 ; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm17, %zmm14 {%k3}
 ; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
@@ -8024,7 +8024,7 @@ define void @store_i64_stride7_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX512BW-ONLY-FAST-NEXT:    vpunpcklqdq {{.*#+}} ymm3 = ymm19[0],ymm1[0],ymm19[2],ymm1[2]
 ; AVX512BW-ONLY-FAST-NEXT:    movb $28, %r10b
 ; AVX512BW-ONLY-FAST-NEXT:    kmovd %r10d, %k2
-; AVX512BW-ONLY-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm10 {%k2} = zmm3[0,1,2,3],zmm4[2,3,0,1]
+; AVX512BW-ONLY-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm10 {%k2} = zmm3[2,3,2,3],zmm4[2,3,2,3]
 ; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} ymm28 = [1,3,7,7]
 ; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %ymm1, %ymm28, %ymm19
@@ -8068,7 +8068,7 @@ define void @store_i64_stride7_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm17, %zmm16, %zmm4
 ; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm0, %zmm4 {%k1}
 ; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 64(%rax), %zmm14
-; AVX512BW-ONLY-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm4 {%k2} = zmm1[0,1,2,3],zmm14[2,3,0,1]
+; AVX512BW-ONLY-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm4 {%k2} = zmm1[2,3,2,3],zmm14[2,3,2,3]
 ; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 64(%r8), %zmm0
 ; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 64(%r9), %zmm23
@@ -8101,7 +8101,7 @@ define void @store_i64_stride7_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm18, %zmm22
 ; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm1, %zmm26 {%k1}
 ; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 128(%rax), %zmm12
-; AVX512BW-ONLY-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm26 {%k2} = zmm3[0,1,2,3],zmm12[2,3,0,1]
+; AVX512BW-ONLY-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm26 {%k2} = zmm3[2,3,2,3],zmm12[2,3,2,3]
 ; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 128(%r8), %zmm1
 ; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 128(%r9), %zmm20
 ; AVX512BW-ONLY-FAST-NEXT:    vpermi2q %zmm20, %zmm1, %zmm19
@@ -8356,7 +8356,7 @@ define void @store_i64_stride7_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX512BW-ONLY-FAST-NEXT:    vpermi2q %zmm20, %zmm10, %zmm12
 ; AVX512BW-ONLY-FAST-NEXT:    vmovdqa 192(%r8), %ymm10
 ; AVX512BW-ONLY-FAST-NEXT:    vpunpcklqdq {{.*#+}} ymm10 = ymm10[0],mem[0],ymm10[2],mem[2]
-; AVX512BW-ONLY-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm14 {%k2} = zmm10[0,1,2,3],zmm8[2,3,0,1]
+; AVX512BW-ONLY-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm14 {%k2} = zmm10[2,3,2,3],zmm8[2,3,2,3]
 ; AVX512BW-ONLY-FAST-NEXT:    movb $64, %al
 ; AVX512BW-ONLY-FAST-NEXT:    kmovd %eax, %k1
 ; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm4, %zmm9 {%k1}
@@ -8468,7 +8468,7 @@ define void @store_i64_stride7_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX512DQBW-SLOW-NEXT:    vpunpcklqdq {{.*#+}} ymm1 = ymm1[0],ymm16[0],ymm1[2],ymm16[2]
 ; AVX512DQBW-SLOW-NEXT:    movb $28, %r10b
 ; AVX512DQBW-SLOW-NEXT:    kmovd %r10d, %k2
-; AVX512DQBW-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm5 {%k2} = zmm1[0,1,2,3],zmm3[2,3,0,1]
+; AVX512DQBW-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm5 {%k2} = zmm1[2,3,2,3],zmm3[2,3,2,3]
 ; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512DQBW-SLOW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm16 = [4,12,0,5,4,12,0,5]
 ; AVX512DQBW-SLOW-NEXT:    # zmm16 = mem[0,1,2,3,0,1,2,3]
@@ -8503,7 +8503,7 @@ define void @store_i64_stride7_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm24, %zmm20, %zmm5
 ; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm0, %zmm5 {%k1}
 ; AVX512DQBW-SLOW-NEXT:    vpunpcklqdq {{.*#+}} ymm0 = ymm19[0],ymm17[0],ymm19[2],ymm17[2]
-; AVX512DQBW-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm5 {%k2} = zmm0[0,1,2,3],zmm6[2,3,0,1]
+; AVX512DQBW-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm5 {%k2} = zmm0[2,3,2,3],zmm6[2,3,2,3]
 ; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512DQBW-SLOW-NEXT:    vmovdqa64 64(%r8), %zmm0
 ; AVX512DQBW-SLOW-NEXT:    vmovdqa64 64(%r9), %zmm21
@@ -8544,7 +8544,7 @@ define void @store_i64_stride7_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX512DQBW-SLOW-NEXT:    vmovdqa64 128(%r8), %ymm18
 ; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %ymm18, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX512DQBW-SLOW-NEXT:    vpunpcklqdq {{.*#+}} ymm0 = ymm18[0],ymm0[0],ymm18[2],ymm0[2]
-; AVX512DQBW-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm6 {%k2} = zmm0[0,1,2,3],zmm20[2,3,0,1]
+; AVX512DQBW-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm6 {%k2} = zmm0[2,3,2,3],zmm20[2,3,2,3]
 ; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512DQBW-SLOW-NEXT:    vmovdqa64 128(%r8), %zmm0
 ; AVX512DQBW-SLOW-NEXT:    vmovdqa64 128(%r9), %zmm6
@@ -8776,7 +8776,7 @@ define void @store_i64_stride7_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX512DQBW-SLOW-NEXT:    vpermi2q %zmm7, %zmm21, %zmm20
 ; AVX512DQBW-SLOW-NEXT:    vmovdqa64 192(%r8), %ymm21
 ; AVX512DQBW-SLOW-NEXT:    vpunpcklqdq {{.*#+}} ymm21 = ymm21[0],mem[0],ymm21[2],mem[2]
-; AVX512DQBW-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm0 {%k2} = zmm21[0,1,2,3],zmm3[2,3,0,1]
+; AVX512DQBW-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm0 {%k2} = zmm21[2,3,2,3],zmm3[2,3,2,3]
 ; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm0, %zmm21
 ; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm16, %zmm10 {%k3}
 ; AVX512DQBW-SLOW-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
@@ -8909,7 +8909,7 @@ define void @store_i64_stride7_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX512DQBW-FAST-NEXT:    vpunpcklqdq {{.*#+}} ymm2 = ymm16[0],ymm1[0],ymm16[2],ymm1[2]
 ; AVX512DQBW-FAST-NEXT:    movb $28, %r10b
 ; AVX512DQBW-FAST-NEXT:    kmovd %r10d, %k2
-; AVX512DQBW-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm8 {%k2} = zmm2[0,1,2,3],zmm7[2,3,0,1]
+; AVX512DQBW-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm8 {%k2} = zmm2[2,3,2,3],zmm7[2,3,2,3]
 ; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512DQBW-FAST-NEXT:    vmovdqa64 {{.*#+}} ymm27 = [1,3,7,7]
 ; AVX512DQBW-FAST-NEXT:    vpermt2q %ymm1, %ymm27, %ymm16
@@ -8952,7 +8952,7 @@ define void @store_i64_stride7_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm19, %zmm13, %zmm4
 ; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm2, %zmm4 {%k1}
 ; AVX512DQBW-FAST-NEXT:    vmovdqa64 64(%rax), %zmm13
-; AVX512DQBW-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm4 {%k2} = zmm1[0,1,2,3],zmm13[2,3,0,1]
+; AVX512DQBW-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm4 {%k2} = zmm1[2,3,2,3],zmm13[2,3,2,3]
 ; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512DQBW-FAST-NEXT:    vmovdqa64 64(%r8), %zmm1
 ; AVX512DQBW-FAST-NEXT:    vmovdqa64 64(%r9), %zmm4
@@ -8985,7 +8985,7 @@ define void @store_i64_stride7_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm7, %zmm20
 ; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm1, %zmm2 {%k1}
 ; AVX512DQBW-FAST-NEXT:    vmovdqa64 128(%rax), %zmm12
-; AVX512DQBW-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm2 {%k2} = zmm0[0,1,2,3],zmm12[2,3,0,1]
+; AVX512DQBW-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm2 {%k2} = zmm0[2,3,2,3],zmm12[2,3,2,3]
 ; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512DQBW-FAST-NEXT:    vmovdqa64 128(%r8), %zmm0
 ; AVX512DQBW-FAST-NEXT:    vmovdqa64 128(%r9), %zmm1
@@ -9233,7 +9233,7 @@ define void @store_i64_stride7_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX512DQBW-FAST-NEXT:    vpermi2q %zmm9, %zmm12, %zmm13
 ; AVX512DQBW-FAST-NEXT:    vmovdqa 192(%r8), %ymm12
 ; AVX512DQBW-FAST-NEXT:    vpunpcklqdq {{.*#+}} ymm12 = ymm12[0],mem[0],ymm12[2],mem[2]
-; AVX512DQBW-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm21 {%k2} = zmm12[0,1,2,3],zmm5[2,3,0,1]
+; AVX512DQBW-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm21 {%k2} = zmm12[2,3,2,3],zmm5[2,3,2,3]
 ; AVX512DQBW-FAST-NEXT:    movb $64, %al
 ; AVX512DQBW-FAST-NEXT:    kmovd %eax, %k1
 ; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm8, %zmm10 {%k1}
@@ -12671,7 +12671,7 @@ define void @store_i64_stride7_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX512F-ONLY-SLOW-NEXT:    vpunpcklqdq {{.*#+}} ymm1 = ymm1[0],ymm10[0],ymm1[2],ymm10[2]
 ; AVX512F-ONLY-SLOW-NEXT:    movb $28, %r10b
 ; AVX512F-ONLY-SLOW-NEXT:    kmovw %r10d, %k2
-; AVX512F-ONLY-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm2 {%k2} = zmm1[0,1,2,3],zmm6[2,3,0,1]
+; AVX512F-ONLY-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm2 {%k2} = zmm1[2,3,2,3],zmm6[2,3,2,3]
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512F-ONLY-SLOW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm22 = [4,12,0,5,4,12,0,5]
 ; AVX512F-ONLY-SLOW-NEXT:    # zmm22 = mem[0,1,2,3,0,1,2,3]
@@ -12706,7 +12706,7 @@ define void @store_i64_stride7_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm17, %zmm19, %zmm1
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm0, %zmm1 {%k1}
 ; AVX512F-ONLY-SLOW-NEXT:    vpunpcklqdq {{.*#+}} ymm0 = ymm13[0],ymm11[0],ymm13[2],ymm11[2]
-; AVX512F-ONLY-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm1 {%k2} = zmm0[0,1,2,3],zmm8[2,3,0,1]
+; AVX512F-ONLY-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm1 {%k2} = zmm0[2,3,2,3],zmm8[2,3,2,3]
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 64(%r8), %zmm0
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 64(%r9), %zmm6
@@ -12742,7 +12742,7 @@ define void @store_i64_stride7_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 128(%r8), %ymm8
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqu %ymm8, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX512F-ONLY-SLOW-NEXT:    vpunpcklqdq {{.*#+}} ymm0 = ymm8[0],ymm0[0],ymm8[2],ymm0[2]
-; AVX512F-ONLY-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm1 {%k2} = zmm0[0,1,2,3],zmm11[2,3,0,1]
+; AVX512F-ONLY-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm1 {%k2} = zmm0[2,3,2,3],zmm11[2,3,2,3]
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 128(%r8), %zmm0
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 128(%r9), %zmm8
@@ -12780,7 +12780,7 @@ define void @store_i64_stride7_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 192(%r8), %ymm11
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqu %ymm11, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX512F-ONLY-SLOW-NEXT:    vpunpcklqdq {{.*#+}} ymm1 = ymm11[0],ymm1[0],ymm11[2],ymm1[2]
-; AVX512F-ONLY-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm8 {%k2} = zmm1[0,1,2,3],zmm25[2,3,0,1]
+; AVX512F-ONLY-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm8 {%k2} = zmm1[2,3,2,3],zmm25[2,3,2,3]
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 192(%r8), %zmm1
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 192(%r9), %zmm14
@@ -12818,7 +12818,7 @@ define void @store_i64_stride7_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 256(%r8), %ymm25
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %ymm25, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX512F-ONLY-SLOW-NEXT:    vpunpcklqdq {{.*#+}} ymm1 = ymm25[0],ymm1[0],ymm25[2],ymm1[2]
-; AVX512F-ONLY-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm8 {%k2} = zmm1[0,1,2,3],zmm0[2,3,0,1]
+; AVX512F-ONLY-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm8 {%k2} = zmm1[2,3,2,3],zmm0[2,3,2,3]
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 256(%r8), %zmm1
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 256(%r9), %zmm12
@@ -12862,7 +12862,7 @@ define void @store_i64_stride7_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 320(%r8), %ymm1
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX512F-ONLY-SLOW-NEXT:    vpunpcklqdq {{.*#+}} ymm1 = ymm1[0],ymm0[0],ymm1[2],ymm0[2]
-; AVX512F-ONLY-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm11 {%k2} = zmm1[0,1,2,3],zmm2[2,3,0,1]
+; AVX512F-ONLY-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm11 {%k2} = zmm1[2,3,2,3],zmm2[2,3,2,3]
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm11, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 320(%r8), %zmm1
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 320(%r9), %zmm11
@@ -13203,7 +13203,7 @@ define void @store_i64_stride7_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 384(%r8), %ymm7
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqu %ymm7, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX512F-ONLY-SLOW-NEXT:    vpunpcklqdq {{.*#+}} ymm4 = ymm7[0],ymm4[0],ymm7[2],ymm4[2]
-; AVX512F-ONLY-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm6 {%k2} = zmm4[0,1,2,3],zmm2[2,3,0,1]
+; AVX512F-ONLY-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm6 {%k2} = zmm4[2,3,2,3],zmm2[2,3,2,3]
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
 ; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm2, %zmm4, %zmm21
@@ -13617,7 +13617,7 @@ define void @store_i64_stride7_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX512F-ONLY-FAST-NEXT:    vpunpcklqdq {{.*#+}} ymm1 = ymm27[0],ymm1[0],ymm27[2],ymm1[2]
 ; AVX512F-ONLY-FAST-NEXT:    movb $28, %r10b
 ; AVX512F-ONLY-FAST-NEXT:    kmovw %r10d, %k2
-; AVX512F-ONLY-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm2 {%k2} = zmm1[0,1,2,3],zmm3[2,3,0,1]
+; AVX512F-ONLY-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm2 {%k2} = zmm1[2,3,2,3],zmm3[2,3,2,3]
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512F-ONLY-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm8 = [4,12,0,5,4,12,0,5]
 ; AVX512F-ONLY-FAST-NEXT:    # zmm8 = mem[0,1,2,3,0,1,2,3]
@@ -13653,7 +13653,7 @@ define void @store_i64_stride7_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm17, %zmm21, %zmm1
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm0, %zmm1 {%k1}
 ; AVX512F-ONLY-FAST-NEXT:    vpunpcklqdq {{.*#+}} ymm0 = ymm25[0],ymm6[0],ymm25[2],ymm6[2]
-; AVX512F-ONLY-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm1 {%k2} = zmm0[0,1,2,3],zmm4[2,3,0,1]
+; AVX512F-ONLY-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm1 {%k2} = zmm0[2,3,2,3],zmm4[2,3,2,3]
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 64(%r8), %zmm0
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 64(%r9), %zmm3
@@ -13691,7 +13691,7 @@ define void @store_i64_stride7_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa 128(%r8), %ymm3
 ; AVX512F-ONLY-FAST-NEXT:    vpunpcklqdq {{.*#+}} ymm0 = ymm3[0],ymm0[0],ymm3[2],ymm0[2]
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm3, %ymm23
-; AVX512F-ONLY-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm1 {%k2} = zmm0[0,1,2,3],zmm4[2,3,0,1]
+; AVX512F-ONLY-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm1 {%k2} = zmm0[2,3,2,3],zmm4[2,3,2,3]
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 128(%r8), %zmm0
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 128(%r9), %zmm3
@@ -13730,7 +13730,7 @@ define void @store_i64_stride7_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 192(%r9), %ymm18
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa 192(%r8), %ymm14
 ; AVX512F-ONLY-FAST-NEXT:    vpunpcklqdq {{.*#+}} ymm1 = ymm14[0],ymm18[0],ymm14[2],ymm18[2]
-; AVX512F-ONLY-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm3 {%k2} = zmm1[0,1,2,3],zmm5[2,3,0,1]
+; AVX512F-ONLY-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm3 {%k2} = zmm1[2,3,2,3],zmm5[2,3,2,3]
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 192(%r8), %zmm1
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 192(%r9), %zmm4
@@ -13766,7 +13766,7 @@ define void @store_i64_stride7_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa 256(%r9), %ymm10
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 256(%r8), %ymm19
 ; AVX512F-ONLY-FAST-NEXT:    vpunpcklqdq {{.*#+}} ymm3 = ymm19[0],ymm10[0],ymm19[2],ymm10[2]
-; AVX512F-ONLY-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm5 {%k2} = zmm3[0,1,2,3],zmm30[2,3,0,1]
+; AVX512F-ONLY-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm5 {%k2} = zmm3[2,3,2,3],zmm30[2,3,2,3]
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 256(%r8), %zmm3
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 256(%r9), %zmm0
@@ -13810,7 +13810,7 @@ define void @store_i64_stride7_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa 320(%r9), %ymm3
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa 320(%r8), %ymm1
 ; AVX512F-ONLY-FAST-NEXT:    vpunpcklqdq {{.*#+}} ymm4 = ymm1[0],ymm3[0],ymm1[2],ymm3[2]
-; AVX512F-ONLY-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm2 {%k2} = zmm4[0,1,2,3],zmm15[2,3,0,1]
+; AVX512F-ONLY-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm2 {%k2} = zmm4[2,3,2,3],zmm15[2,3,2,3]
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 320(%r8), %zmm4
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 320(%r9), %zmm2
@@ -14198,7 +14198,7 @@ define void @store_i64_stride7_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm0, %zmm3 {%k1}
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm3 {%k2} = zmm0[0,1,2,3],zmm1[2,3,0,1]
+; AVX512F-ONLY-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm3 {%k2} = zmm0[2,3,2,3],zmm1[2,3,2,3]
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
 ; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm1, %zmm0, %zmm15
@@ -14575,7 +14575,7 @@ define void @store_i64_stride7_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX512DQ-SLOW-NEXT:    vpunpcklqdq {{.*#+}} ymm1 = ymm1[0],ymm10[0],ymm1[2],ymm10[2]
 ; AVX512DQ-SLOW-NEXT:    movb $28, %r10b
 ; AVX512DQ-SLOW-NEXT:    kmovw %r10d, %k2
-; AVX512DQ-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm2 {%k2} = zmm1[0,1,2,3],zmm6[2,3,0,1]
+; AVX512DQ-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm2 {%k2} = zmm1[2,3,2,3],zmm6[2,3,2,3]
 ; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512DQ-SLOW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm2 = [4,12,0,5,4,12,0,5]
 ; AVX512DQ-SLOW-NEXT:    # zmm2 = mem[0,1,2,3,0,1,2,3]
@@ -14611,7 +14611,7 @@ define void @store_i64_stride7_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm17, %zmm18, %zmm1
 ; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm0, %zmm1 {%k1}
 ; AVX512DQ-SLOW-NEXT:    vpunpcklqdq {{.*#+}} ymm0 = ymm13[0],ymm11[0],ymm13[2],ymm11[2]
-; AVX512DQ-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm1 {%k2} = zmm0[0,1,2,3],zmm8[2,3,0,1]
+; AVX512DQ-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm1 {%k2} = zmm0[2,3,2,3],zmm8[2,3,2,3]
 ; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512DQ-SLOW-NEXT:    vmovdqa64 64(%r8), %zmm0
 ; AVX512DQ-SLOW-NEXT:    vmovdqa64 64(%r9), %zmm6
@@ -14649,7 +14649,7 @@ define void @store_i64_stride7_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX512DQ-SLOW-NEXT:    vmovdqa 128(%r8), %ymm8
 ; AVX512DQ-SLOW-NEXT:    vmovdqu %ymm8, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX512DQ-SLOW-NEXT:    vpunpcklqdq {{.*#+}} ymm0 = ymm8[0],ymm0[0],ymm8[2],ymm0[2]
-; AVX512DQ-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm1 {%k2} = zmm0[0,1,2,3],zmm11[2,3,0,1]
+; AVX512DQ-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm1 {%k2} = zmm0[2,3,2,3],zmm11[2,3,2,3]
 ; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512DQ-SLOW-NEXT:    vmovdqa64 128(%r8), %zmm0
 ; AVX512DQ-SLOW-NEXT:    vmovdqa64 128(%r9), %zmm8
@@ -14686,7 +14686,7 @@ define void @store_i64_stride7_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX512DQ-SLOW-NEXT:    vmovdqa 192(%r8), %ymm11
 ; AVX512DQ-SLOW-NEXT:    vmovdqu %ymm11, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX512DQ-SLOW-NEXT:    vpunpcklqdq {{.*#+}} ymm1 = ymm11[0],ymm1[0],ymm11[2],ymm1[2]
-; AVX512DQ-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm8 {%k2} = zmm1[0,1,2,3],zmm26[2,3,0,1]
+; AVX512DQ-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm8 {%k2} = zmm1[2,3,2,3],zmm26[2,3,2,3]
 ; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512DQ-SLOW-NEXT:    vmovdqa64 192(%r8), %zmm1
 ; AVX512DQ-SLOW-NEXT:    vmovdqa64 192(%r9), %zmm27
@@ -14722,7 +14722,7 @@ define void @store_i64_stride7_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX512DQ-SLOW-NEXT:    vmovdqa64 256(%r8), %ymm28
 ; AVX512DQ-SLOW-NEXT:    vmovdqu64 %ymm28, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX512DQ-SLOW-NEXT:    vpunpcklqdq {{.*#+}} ymm1 = ymm28[0],ymm1[0],ymm28[2],ymm1[2]
-; AVX512DQ-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm26 {%k2} = zmm1[0,1,2,3],zmm0[2,3,0,1]
+; AVX512DQ-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm26 {%k2} = zmm1[2,3,2,3],zmm0[2,3,2,3]
 ; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm26, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512DQ-SLOW-NEXT:    vmovdqa64 256(%r8), %zmm1
 ; AVX512DQ-SLOW-NEXT:    vmovdqa64 256(%r9), %zmm8
@@ -14766,7 +14766,7 @@ define void @store_i64_stride7_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX512DQ-SLOW-NEXT:    vmovdqa 320(%r8), %ymm1
 ; AVX512DQ-SLOW-NEXT:    vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX512DQ-SLOW-NEXT:    vpunpcklqdq {{.*#+}} ymm1 = ymm1[0],ymm0[0],ymm1[2],ymm0[2]
-; AVX512DQ-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm14 {%k2} = zmm1[0,1,2,3],zmm17[2,3,0,1]
+; AVX512DQ-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm14 {%k2} = zmm1[2,3,2,3],zmm17[2,3,2,3]
 ; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm14, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512DQ-SLOW-NEXT:    vmovdqa64 320(%r8), %zmm1
 ; AVX512DQ-SLOW-NEXT:    vmovdqa64 320(%r9), %zmm14
@@ -15109,7 +15109,7 @@ define void @store_i64_stride7_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX512DQ-SLOW-NEXT:    vmovdqa 384(%r8), %ymm6
 ; AVX512DQ-SLOW-NEXT:    vmovdqu %ymm6, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX512DQ-SLOW-NEXT:    vpunpcklqdq {{.*#+}} ymm6 = ymm6[0],ymm13[0],ymm6[2],ymm13[2]
-; AVX512DQ-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm8 {%k2} = zmm6[0,1,2,3],zmm4[2,3,0,1]
+; AVX512DQ-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm8 {%k2} = zmm6[2,3,2,3],zmm4[2,3,2,3]
 ; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512DQ-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm6 # 64-byte Reload
 ; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm4, %zmm6, %zmm21
@@ -15519,7 +15519,7 @@ define void @store_i64_stride7_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX512DQ-FAST-NEXT:    vpunpcklqdq {{.*#+}} ymm1 = ymm29[0],ymm1[0],ymm29[2],ymm1[2]
 ; AVX512DQ-FAST-NEXT:    movb $28, %r10b
 ; AVX512DQ-FAST-NEXT:    kmovw %r10d, %k2
-; AVX512DQ-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm2 {%k2} = zmm1[0,1,2,3],zmm3[2,3,0,1]
+; AVX512DQ-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm2 {%k2} = zmm1[2,3,2,3],zmm3[2,3,2,3]
 ; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512DQ-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm30 = [4,12,0,5,4,12,0,5]
 ; AVX512DQ-FAST-NEXT:    # zmm30 = mem[0,1,2,3,0,1,2,3]
@@ -15556,7 +15556,7 @@ define void @store_i64_stride7_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX512DQ-FAST-NEXT:    vpermt2q %zmm16, %zmm12, %zmm1
 ; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm0, %zmm1 {%k1}
 ; AVX512DQ-FAST-NEXT:    vpunpcklqdq {{.*#+}} ymm0 = ymm27[0],ymm8[0],ymm27[2],ymm8[2]
-; AVX512DQ-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm1 {%k2} = zmm0[0,1,2,3],zmm4[2,3,0,1]
+; AVX512DQ-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm1 {%k2} = zmm0[2,3,2,3],zmm4[2,3,2,3]
 ; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512DQ-FAST-NEXT:    vmovdqa64 64(%r8), %zmm0
 ; AVX512DQ-FAST-NEXT:    vmovdqa64 64(%r9), %zmm3
@@ -15594,7 +15594,7 @@ define void @store_i64_stride7_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX512DQ-FAST-NEXT:    vmovdqa64 128(%r9), %ymm28
 ; AVX512DQ-FAST-NEXT:    vmovdqa64 128(%r8), %ymm23
 ; AVX512DQ-FAST-NEXT:    vpunpcklqdq {{.*#+}} ymm0 = ymm23[0],ymm28[0],ymm23[2],ymm28[2]
-; AVX512DQ-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm1 {%k2} = zmm0[0,1,2,3],zmm4[2,3,0,1]
+; AVX512DQ-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm1 {%k2} = zmm0[2,3,2,3],zmm4[2,3,2,3]
 ; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512DQ-FAST-NEXT:    vmovdqa64 128(%r8), %zmm0
 ; AVX512DQ-FAST-NEXT:    vmovdqa64 128(%r9), %zmm3
@@ -15631,7 +15631,7 @@ define void @store_i64_stride7_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX512DQ-FAST-NEXT:    vmovdqa64 192(%r9), %ymm24
 ; AVX512DQ-FAST-NEXT:    vmovdqa 192(%r8), %ymm13
 ; AVX512DQ-FAST-NEXT:    vpunpcklqdq {{.*#+}} ymm1 = ymm13[0],ymm24[0],ymm13[2],ymm24[2]
-; AVX512DQ-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm3 {%k2} = zmm1[0,1,2,3],zmm5[2,3,0,1]
+; AVX512DQ-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm3 {%k2} = zmm1[2,3,2,3],zmm5[2,3,2,3]
 ; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512DQ-FAST-NEXT:    vmovdqa64 192(%r8), %zmm1
 ; AVX512DQ-FAST-NEXT:    vmovdqa64 192(%r9), %zmm21
@@ -15668,7 +15668,7 @@ define void @store_i64_stride7_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX512DQ-FAST-NEXT:    vmovdqa 256(%r9), %ymm12
 ; AVX512DQ-FAST-NEXT:    vmovdqa64 256(%r8), %ymm22
 ; AVX512DQ-FAST-NEXT:    vpunpcklqdq {{.*#+}} ymm3 = ymm22[0],ymm12[0],ymm22[2],ymm12[2]
-; AVX512DQ-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm0 {%k2} = zmm3[0,1,2,3],zmm1[2,3,0,1]
+; AVX512DQ-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm0 {%k2} = zmm3[2,3,2,3],zmm1[2,3,2,3]
 ; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512DQ-FAST-NEXT:    vmovdqa64 256(%r8), %zmm3
 ; AVX512DQ-FAST-NEXT:    vmovdqa64 256(%r9), %zmm2
@@ -15711,7 +15711,7 @@ define void @store_i64_stride7_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX512DQ-FAST-NEXT:    vmovdqa 320(%r9), %ymm3
 ; AVX512DQ-FAST-NEXT:    vmovdqa 320(%r8), %ymm1
 ; AVX512DQ-FAST-NEXT:    vpunpcklqdq {{.*#+}} ymm4 = ymm1[0],ymm3[0],ymm1[2],ymm3[2]
-; AVX512DQ-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm0 {%k2} = zmm4[0,1,2,3],zmm10[2,3,0,1]
+; AVX512DQ-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm0 {%k2} = zmm4[2,3,2,3],zmm10[2,3,2,3]
 ; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512DQ-FAST-NEXT:    vmovdqa64 320(%r8), %zmm4
 ; AVX512DQ-FAST-NEXT:    vmovdqa64 320(%r9), %zmm9
@@ -16099,7 +16099,7 @@ define void @store_i64_stride7_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
 ; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm0, %zmm3 {%k1}
 ; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm3 {%k2} = zmm0[0,1,2,3],zmm1[2,3,0,1]
+; AVX512DQ-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm3 {%k2} = zmm0[2,3,2,3],zmm1[2,3,2,3]
 ; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
 ; AVX512DQ-FAST-NEXT:    vpermt2q %zmm1, %zmm0, %zmm16
@@ -16470,7 +16470,7 @@ define void @store_i64_stride7_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX512BW-ONLY-SLOW-NEXT:    vpunpcklqdq {{.*#+}} ymm1 = ymm1[0],ymm10[0],ymm1[2],ymm10[2]
 ; AVX512BW-ONLY-SLOW-NEXT:    movb $28, %r10b
 ; AVX512BW-ONLY-SLOW-NEXT:    kmovd %r10d, %k2
-; AVX512BW-ONLY-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm2 {%k2} = zmm1[0,1,2,3],zmm6[2,3,0,1]
+; AVX512BW-ONLY-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm2 {%k2} = zmm1[2,3,2,3],zmm6[2,3,2,3]
 ; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512BW-ONLY-SLOW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm22 = [4,12,0,5,4,12,0,5]
 ; AVX512BW-ONLY-SLOW-NEXT:    # zmm22 = mem[0,1,2,3,0,1,2,3]
@@ -16505,7 +16505,7 @@ define void @store_i64_stride7_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm17, %zmm19, %zmm1
 ; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm0, %zmm1 {%k1}
 ; AVX512BW-ONLY-SLOW-NEXT:    vpunpcklqdq {{.*#+}} ymm0 = ymm13[0],ymm11[0],ymm13[2],ymm11[2]
-; AVX512BW-ONLY-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm1 {%k2} = zmm0[0,1,2,3],zmm8[2,3,0,1]
+; AVX512BW-ONLY-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm1 {%k2} = zmm0[2,3,2,3],zmm8[2,3,2,3]
 ; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 64(%r8), %zmm0
 ; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 64(%r9), %zmm6
@@ -16541,7 +16541,7 @@ define void @store_i64_stride7_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa 128(%r8), %ymm8
 ; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu %ymm8, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX512BW-ONLY-SLOW-NEXT:    vpunpcklqdq {{.*#+}} ymm0 = ymm8[0],ymm0[0],ymm8[2],ymm0[2]
-; AVX512BW-ONLY-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm1 {%k2} = zmm0[0,1,2,3],zmm11[2,3,0,1]
+; AVX512BW-ONLY-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm1 {%k2} = zmm0[2,3,2,3],zmm11[2,3,2,3]
 ; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 128(%r8), %zmm0
 ; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 128(%r9), %zmm8
@@ -16579,7 +16579,7 @@ define void @store_i64_stride7_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa 192(%r8), %ymm11
 ; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu %ymm11, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX512BW-ONLY-SLOW-NEXT:    vpunpcklqdq {{.*#+}} ymm1 = ymm11[0],ymm1[0],ymm11[2],ymm1[2]
-; AVX512BW-ONLY-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm8 {%k2} = zmm1[0,1,2,3],zmm25[2,3,0,1]
+; AVX512BW-ONLY-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm8 {%k2} = zmm1[2,3,2,3],zmm25[2,3,2,3]
 ; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 192(%r8), %zmm1
 ; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 192(%r9), %zmm14
@@ -16617,7 +16617,7 @@ define void @store_i64_stride7_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 256(%r8), %ymm25
 ; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %ymm25, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX512BW-ONLY-SLOW-NEXT:    vpunpcklqdq {{.*#+}} ymm1 = ymm25[0],ymm1[0],ymm25[2],ymm1[2]
-; AVX512BW-ONLY-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm8 {%k2} = zmm1[0,1,2,3],zmm0[2,3,0,1]
+; AVX512BW-ONLY-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm8 {%k2} = zmm1[2,3,2,3],zmm0[2,3,2,3]
 ; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 256(%r8), %zmm1
 ; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 256(%r9), %zmm12
@@ -16661,7 +16661,7 @@ define void @store_i64_stride7_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa 320(%r8), %ymm1
 ; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX512BW-ONLY-SLOW-NEXT:    vpunpcklqdq {{.*#+}} ymm1 = ymm1[0],ymm0[0],ymm1[2],ymm0[2]
-; AVX512BW-ONLY-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm11 {%k2} = zmm1[0,1,2,3],zmm2[2,3,0,1]
+; AVX512BW-ONLY-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm11 {%k2} = zmm1[2,3,2,3],zmm2[2,3,2,3]
 ; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm11, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 320(%r8), %zmm1
 ; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 320(%r9), %zmm11
@@ -17002,7 +17002,7 @@ define void @store_i64_stride7_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa 384(%r8), %ymm7
 ; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu %ymm7, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX512BW-ONLY-SLOW-NEXT:    vpunpcklqdq {{.*#+}} ymm4 = ymm7[0],ymm4[0],ymm7[2],ymm4[2]
-; AVX512BW-ONLY-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm6 {%k2} = zmm4[0,1,2,3],zmm2[2,3,0,1]
+; AVX512BW-ONLY-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm6 {%k2} = zmm4[2,3,2,3],zmm2[2,3,2,3]
 ; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
 ; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm2, %zmm4, %zmm21
@@ -17416,7 +17416,7 @@ define void @store_i64_stride7_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX512BW-ONLY-FAST-NEXT:    vpunpcklqdq {{.*#+}} ymm1 = ymm27[0],ymm1[0],ymm27[2],ymm1[2]
 ; AVX512BW-ONLY-FAST-NEXT:    movb $28, %r10b
 ; AVX512BW-ONLY-FAST-NEXT:    kmovd %r10d, %k2
-; AVX512BW-ONLY-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm2 {%k2} = zmm1[0,1,2,3],zmm3[2,3,0,1]
+; AVX512BW-ONLY-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm2 {%k2} = zmm1[2,3,2,3],zmm3[2,3,2,3]
 ; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512BW-ONLY-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm8 = [4,12,0,5,4,12,0,5]
 ; AVX512BW-ONLY-FAST-NEXT:    # zmm8 = mem[0,1,2,3,0,1,2,3]
@@ -17452,7 +17452,7 @@ define void @store_i64_stride7_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm17, %zmm21, %zmm1
 ; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm0, %zmm1 {%k1}
 ; AVX512BW-ONLY-FAST-NEXT:    vpunpcklqdq {{.*#+}} ymm0 = ymm25[0],ymm6[0],ymm25[2],ymm6[2]
-; AVX512BW-ONLY-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm1 {%k2} = zmm0[0,1,2,3],zmm4[2,3,0,1]
+; AVX512BW-ONLY-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm1 {%k2} = zmm0[2,3,2,3],zmm4[2,3,2,3]
 ; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 64(%r8), %zmm0
 ; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 64(%r9), %zmm3
@@ -17490,7 +17490,7 @@ define void @store_i64_stride7_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX512BW-ONLY-FAST-NEXT:    vmovdqa 128(%r8), %ymm3
 ; AVX512BW-ONLY-FAST-NEXT:    vpunpcklqdq {{.*#+}} ymm0 = ymm3[0],ymm0[0],ymm3[2],ymm0[2]
 ; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %ymm3, %ymm23
-; AVX512BW-ONLY-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm1 {%k2} = zmm0[0,1,2,3],zmm4[2,3,0,1]
+; AVX512BW-ONLY-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm1 {%k2} = zmm0[2,3,2,3],zmm4[2,3,2,3]
 ; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 128(%r8), %zmm0
 ; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 128(%r9), %zmm3
@@ -17529,7 +17529,7 @@ define void @store_i64_stride7_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 192(%r9), %ymm18
 ; AVX512BW-ONLY-FAST-NEXT:    vmovdqa 192(%r8), %ymm14
 ; AVX512BW-ONLY-FAST-NEXT:    vpunpcklqdq {{.*#+}} ymm1 = ymm14[0],ymm18[0],ymm14[2],ymm18[2]
-; AVX512BW-ONLY-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm3 {%k2} = zmm1[0,1,2,3],zmm5[2,3,0,1]
+; AVX512BW-ONLY-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm3 {%k2} = zmm1[2,3,2,3],zmm5[2,3,2,3]
 ; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 192(%r8), %zmm1
 ; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 192(%r9), %zmm4
@@ -17565,7 +17565,7 @@ define void @store_i64_stride7_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX512BW-ONLY-FAST-NEXT:    vmovdqa 256(%r9), %ymm10
 ; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 256(%r8), %ymm19
 ; AVX512BW-ONLY-FAST-NEXT:    vpunpcklqdq {{.*#+}} ymm3 = ymm19[0],ymm10[0],ymm19[2],ymm10[2]
-; AVX512BW-ONLY-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm5 {%k2} = zmm3[0,1,2,3],zmm30[2,3,0,1]
+; AVX512BW-ONLY-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm5 {%k2} = zmm3[2,3,2,3],zmm30[2,3,2,3]
 ; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 256(%r8), %zmm3
 ; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 256(%r9), %zmm0
@@ -17609,7 +17609,7 @@ define void @store_i64_stride7_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX512BW-ONLY-FAST-NEXT:    vmovdqa 320(%r9), %ymm3
 ; AVX512BW-ONLY-FAST-NEXT:    vmovdqa 320(%r8), %ymm1
 ; AVX512BW-ONLY-FAST-NEXT:    vpunpcklqdq {{.*#+}} ymm4 = ymm1[0],ymm3[0],ymm1[2],ymm3[2]
-; AVX512BW-ONLY-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm2 {%k2} = zmm4[0,1,2,3],zmm15[2,3,0,1]
+; AVX512BW-ONLY-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm2 {%k2} = zmm4[2,3,2,3],zmm15[2,3,2,3]
 ; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 320(%r8), %zmm4
 ; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 320(%r9), %zmm2
@@ -17997,7 +17997,7 @@ define void @store_i64_stride7_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
 ; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm0, %zmm3 {%k1}
 ; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512BW-ONLY-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm3 {%k2} = zmm0[0,1,2,3],zmm1[2,3,0,1]
+; AVX512BW-ONLY-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm3 {%k2} = zmm0[2,3,2,3],zmm1[2,3,2,3]
 ; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
 ; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm1, %zmm0, %zmm15
@@ -18374,7 +18374,7 @@ define void @store_i64_stride7_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX512DQBW-SLOW-NEXT:    vpunpcklqdq {{.*#+}} ymm1 = ymm1[0],ymm10[0],ymm1[2],ymm10[2]
 ; AVX512DQBW-SLOW-NEXT:    movb $28, %r10b
 ; AVX512DQBW-SLOW-NEXT:    kmovd %r10d, %k2
-; AVX512DQBW-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm2 {%k2} = zmm1[0,1,2,3],zmm6[2,3,0,1]
+; AVX512DQBW-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm2 {%k2} = zmm1[2,3,2,3],zmm6[2,3,2,3]
 ; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512DQBW-SLOW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm2 = [4,12,0,5,4,12,0,5]
 ; AVX512DQBW-SLOW-NEXT:    # zmm2 = mem[0,1,2,3,0,1,2,3]
@@ -18410,7 +18410,7 @@ define void @store_i64_stride7_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm17, %zmm18, %zmm1
 ; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm0, %zmm1 {%k1}
 ; AVX512DQBW-SLOW-NEXT:    vpunpcklqdq {{.*#+}} ymm0 = ymm13[0],ymm11[0],ymm13[2],ymm11[2]
-; AVX512DQBW-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm1 {%k2} = zmm0[0,1,2,3],zmm8[2,3,0,1]
+; AVX512DQBW-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm1 {%k2} = zmm0[2,3,2,3],zmm8[2,3,2,3]
 ; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512DQBW-SLOW-NEXT:    vmovdqa64 64(%r8), %zmm0
 ; AVX512DQBW-SLOW-NEXT:    vmovdqa64 64(%r9), %zmm6
@@ -18448,7 +18448,7 @@ define void @store_i64_stride7_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX512DQBW-SLOW-NEXT:    vmovdqa 128(%r8), %ymm8
 ; AVX512DQBW-SLOW-NEXT:    vmovdqu %ymm8, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX512DQBW-SLOW-NEXT:    vpunpcklqdq {{.*#+}} ymm0 = ymm8[0],ymm0[0],ymm8[2],ymm0[2]
-; AVX512DQBW-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm1 {%k2} = zmm0[0,1,2,3],zmm11[2,3,0,1]
+; AVX512DQBW-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm1 {%k2} = zmm0[2,3,2,3],zmm11[2,3,2,3]
 ; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512DQBW-SLOW-NEXT:    vmovdqa64 128(%r8), %zmm0
 ; AVX512DQBW-SLOW-NEXT:    vmovdqa64 128(%r9), %zmm8
@@ -18485,7 +18485,7 @@ define void @store_i64_stride7_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX512DQBW-SLOW-NEXT:    vmovdqa 192(%r8), %ymm11
 ; AVX512DQBW-SLOW-NEXT:    vmovdqu %ymm11, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX512DQBW-SLOW-NEXT:    vpunpcklqdq {{.*#+}} ymm1 = ymm11[0],ymm1[0],ymm11[2],ymm1[2]
-; AVX512DQBW-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm8 {%k2} = zmm1[0,1,2,3],zmm26[2,3,0,1]
+; AVX512DQBW-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm8 {%k2} = zmm1[2,3,2,3],zmm26[2,3,2,3]
 ; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512DQBW-SLOW-NEXT:    vmovdqa64 192(%r8), %zmm1
 ; AVX512DQBW-SLOW-NEXT:    vmovdqa64 192(%r9), %zmm27
@@ -18521,7 +18521,7 @@ define void @store_i64_stride7_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX512DQBW-SLOW-NEXT:    vmovdqa64 256(%r8), %ymm28
 ; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %ymm28, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX512DQBW-SLOW-NEXT:    vpunpcklqdq {{.*#+}} ymm1 = ymm28[0],ymm1[0],ymm28[2],ymm1[2]
-; AVX512DQBW-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm26 {%k2} = zmm1[0,1,2,3],zmm0[2,3,0,1]
+; AVX512DQBW-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm26 {%k2} = zmm1[2,3,2,3],zmm0[2,3,2,3]
 ; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm26, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512DQBW-SLOW-NEXT:    vmovdqa64 256(%r8), %zmm1
 ; AVX512DQBW-SLOW-NEXT:    vmovdqa64 256(%r9), %zmm8
@@ -18565,7 +18565,7 @@ define void @store_i64_stride7_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX512DQBW-SLOW-NEXT:    vmovdqa 320(%r8), %ymm1
 ; AVX512DQBW-SLOW-NEXT:    vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX512DQBW-SLOW-NEXT:    vpunpcklqdq {{.*#+}} ymm1 = ymm1[0],ymm0[0],ymm1[2],ymm0[2]
-; AVX512DQBW-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm14 {%k2} = zmm1[0,1,2,3],zmm17[2,3,0,1]
+; AVX512DQBW-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm14 {%k2} = zmm1[2,3,2,3],zmm17[2,3,2,3]
 ; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm14, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512DQBW-SLOW-NEXT:    vmovdqa64 320(%r8), %zmm1
 ; AVX512DQBW-SLOW-NEXT:    vmovdqa64 320(%r9), %zmm14
@@ -18908,7 +18908,7 @@ define void @store_i64_stride7_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX512DQBW-SLOW-NEXT:    vmovdqa 384(%r8), %ymm6
 ; AVX512DQBW-SLOW-NEXT:    vmovdqu %ymm6, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX512DQBW-SLOW-NEXT:    vpunpcklqdq {{.*#+}} ymm6 = ymm6[0],ymm13[0],ymm6[2],ymm13[2]
-; AVX512DQBW-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm8 {%k2} = zmm6[0,1,2,3],zmm4[2,3,0,1]
+; AVX512DQBW-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm8 {%k2} = zmm6[2,3,2,3],zmm4[2,3,2,3]
 ; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512DQBW-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm6 # 64-byte Reload
 ; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm4, %zmm6, %zmm21
@@ -19318,7 +19318,7 @@ define void @store_i64_stride7_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX512DQBW-FAST-NEXT:    vpunpcklqdq {{.*#+}} ymm1 = ymm29[0],ymm1[0],ymm29[2],ymm1[2]
 ; AVX512DQBW-FAST-NEXT:    movb $28, %r10b
 ; AVX512DQBW-FAST-NEXT:    kmovd %r10d, %k2
-; AVX512DQBW-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm2 {%k2} = zmm1[0,1,2,3],zmm3[2,3,0,1]
+; AVX512DQBW-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm2 {%k2} = zmm1[2,3,2,3],zmm3[2,3,2,3]
 ; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512DQBW-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm30 = [4,12,0,5,4,12,0,5]
 ; AVX512DQBW-FAST-NEXT:    # zmm30 = mem[0,1,2,3,0,1,2,3]
@@ -19355,7 +19355,7 @@ define void @store_i64_stride7_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm16, %zmm12, %zmm1
 ; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm0, %zmm1 {%k1}
 ; AVX512DQBW-FAST-NEXT:    vpunpcklqdq {{.*#+}} ymm0 = ymm27[0],ymm8[0],ymm27[2],ymm8[2]
-; AVX512DQBW-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm1 {%k2} = zmm0[0,1,2,3],zmm4[2,3,0,1]
+; AVX512DQBW-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm1 {%k2} = zmm0[2,3,2,3],zmm4[2,3,2,3]
 ; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512DQBW-FAST-NEXT:    vmovdqa64 64(%r8), %zmm0
 ; AVX512DQBW-FAST-NEXT:    vmovdqa64 64(%r9), %zmm3
@@ -19393,7 +19393,7 @@ define void @store_i64_stride7_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX512DQBW-FAST-NEXT:    vmovdqa64 128(%r9), %ymm28
 ; AVX512DQBW-FAST-NEXT:    vmovdqa64 128(%r8), %ymm23
 ; AVX512DQBW-FAST-NEXT:    vpunpcklqdq {{.*#+}} ymm0 = ymm23[0],ymm28[0],ymm23[2],ymm28[2]
-; AVX512DQBW-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm1 {%k2} = zmm0[0,1,2,3],zmm4[2,3,0,1]
+; AVX512DQBW-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm1 {%k2} = zmm0[2,3,2,3],zmm4[2,3,2,3]
 ; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512DQBW-FAST-NEXT:    vmovdqa64 128(%r8), %zmm0
 ; AVX512DQBW-FAST-NEXT:    vmovdqa64 128(%r9), %zmm3
@@ -19430,7 +19430,7 @@ define void @store_i64_stride7_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX512DQBW-FAST-NEXT:    vmovdqa64 192(%r9), %ymm24
 ; AVX512DQBW-FAST-NEXT:    vmovdqa 192(%r8), %ymm13
 ; AVX512DQBW-FAST-NEXT:    vpunpcklqdq {{.*#+}} ymm1 = ymm13[0],ymm24[0],ymm13[2],ymm24[2]
-; AVX512DQBW-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm3 {%k2} = zmm1[0,1,2,3],zmm5[2,3,0,1]
+; AVX512DQBW-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm3 {%k2} = zmm1[2,3,2,3],zmm5[2,3,2,3]
 ; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512DQBW-FAST-NEXT:    vmovdqa64 192(%r8), %zmm1
 ; AVX512DQBW-FAST-NEXT:    vmovdqa64 192(%r9), %zmm21
@@ -19467,7 +19467,7 @@ define void @store_i64_stride7_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX512DQBW-FAST-NEXT:    vmovdqa 256(%r9), %ymm12
 ; AVX512DQBW-FAST-NEXT:    vmovdqa64 256(%r8), %ymm22
 ; AVX512DQBW-FAST-NEXT:    vpunpcklqdq {{.*#+}} ymm3 = ymm22[0],ymm12[0],ymm22[2],ymm12[2]
-; AVX512DQBW-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm0 {%k2} = zmm3[0,1,2,3],zmm1[2,3,0,1]
+; AVX512DQBW-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm0 {%k2} = zmm3[2,3,2,3],zmm1[2,3,2,3]
 ; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512DQBW-FAST-NEXT:    vmovdqa64 256(%r8), %zmm3
 ; AVX512DQBW-FAST-NEXT:    vmovdqa64 256(%r9), %zmm2
@@ -19510,7 +19510,7 @@ define void @store_i64_stride7_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX512DQBW-FAST-NEXT:    vmovdqa 320(%r9), %ymm3
 ; AVX512DQBW-FAST-NEXT:    vmovdqa 320(%r8), %ymm1
 ; AVX512DQBW-FAST-NEXT:    vpunpcklqdq {{.*#+}} ymm4 = ymm1[0],ymm3[0],ymm1[2],ymm3[2]
-; AVX512DQBW-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm0 {%k2} = zmm4[0,1,2,3],zmm10[2,3,0,1]
+; AVX512DQBW-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm0 {%k2} = zmm4[2,3,2,3],zmm10[2,3,2,3]
 ; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512DQBW-FAST-NEXT:    vmovdqa64 320(%r8), %zmm4
 ; AVX512DQBW-FAST-NEXT:    vmovdqa64 320(%r9), %zmm9
@@ -19898,7 +19898,7 @@ define void @store_i64_stride7_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX512DQBW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
 ; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm0, %zmm3 {%k1}
 ; AVX512DQBW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQBW-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm3 {%k2} = zmm0[0,1,2,3],zmm1[2,3,0,1]
+; AVX512DQBW-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm3 {%k2} = zmm0[2,3,2,3],zmm1[2,3,2,3]
 ; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512DQBW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
 ; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm1, %zmm0, %zmm16

diff  --git a/llvm/test/CodeGen/X86/vector-shuffle-512-v16.ll b/llvm/test/CodeGen/X86/vector-shuffle-512-v16.ll
index f0cdfc9beb0fcf..6fd61d2a5dfa50 100644
--- a/llvm/test/CodeGen/X86/vector-shuffle-512-v16.ll
+++ b/llvm/test/CodeGen/X86/vector-shuffle-512-v16.ll
@@ -867,7 +867,7 @@ define void @ispc_1864(ptr %arg) {
 ; ALL-NEXT:    vbroadcastss {{.*#+}} ymm0 = [-5.0E+0,-5.0E+0,-5.0E+0,-5.0E+0,-5.0E+0,-5.0E+0,-5.0E+0,-5.0E+0]
 ; ALL-NEXT:    vmulps 32(%rdi), %ymm0, %ymm0
 ; ALL-NEXT:    vcvtps2pd %ymm0, %zmm0
-; ALL-NEXT:    vshuff64x2 {{.*#+}} zmm0 = zmm0[2,3,4,5,0,1,0,1]
+; ALL-NEXT:    vshuff64x2 {{.*#+}} zmm0 = zmm0[2,3,4,5,4,5,6,7]
 ; ALL-NEXT:    vmovapd %ymm0, {{[0-9]+}}(%rsp)
 ; ALL-NEXT:    movq %rbp, %rsp
 ; ALL-NEXT:    popq %rbp

diff  --git a/llvm/test/CodeGen/X86/vector-shuffle-512-v8.ll b/llvm/test/CodeGen/X86/vector-shuffle-512-v8.ll
index de66f67cf71d32..b54073a5de7128 100644
--- a/llvm/test/CodeGen/X86/vector-shuffle-512-v8.ll
+++ b/llvm/test/CodeGen/X86/vector-shuffle-512-v8.ll
@@ -1849,7 +1849,7 @@ define <8 x double> @shuffle_v8f64_23014567(<8 x double> %a0, <8 x double> %a1)
 define <8 x double> @shuffle_v8f64_2301uu67(<8 x double> %a0, <8 x double> %a1) {
 ; ALL-LABEL: shuffle_v8f64_2301uu67:
 ; ALL:       # %bb.0:
-; ALL-NEXT:    vshuff64x2 {{.*#+}} zmm0 = zmm1[2,3,0,1,0,1,6,7]
+; ALL-NEXT:    vshuff64x2 {{.*#+}} zmm0 = zmm1[2,3,0,1,4,5,6,7]
 ; ALL-NEXT:    ret{{[l|q]}}
   %1 = shufflevector <8 x double> %a1, <8 x double> undef, <8 x i32> <i32 2, i32 3, i32 0, i32 1, i32 undef, i32 undef, i32 6, i32 7>
   ret <8 x double> %1
@@ -1867,7 +1867,7 @@ define <8 x double> @shuffle_v8f64_2301uuuu(<8 x double> %a0, <8 x double> %a1)
 define <8 x double> @shuffle_v8f64_uuu2301(<8 x double> %a0, <8 x double> %a1) {
 ; ALL-LABEL: shuffle_v8f64_uuu2301:
 ; ALL:       # %bb.0:
-; ALL-NEXT:    vshuff64x2 {{.*#+}} zmm0 = zmm1[0,1,0,1,2,3,0,1]
+; ALL-NEXT:    vshuff64x2 {{.*#+}} zmm0 = zmm1[0,1,2,3,2,3,0,1]
 ; ALL-NEXT:    ret{{[l|q]}}
   %1 = shufflevector <8 x double> %a1, <8 x double> undef, <8 x i32> <i32 undef, i32 undef, i32 undef, i32 undef, i32 2, i32 3, i32 0, i32 1>
   ret <8 x double> %1

diff  --git a/llvm/test/CodeGen/X86/vector-shuffle-v1.ll b/llvm/test/CodeGen/X86/vector-shuffle-v1.ll
index ec0bddcc749b29..809d94b649fb48 100644
--- a/llvm/test/CodeGen/X86/vector-shuffle-v1.ll
+++ b/llvm/test/CodeGen/X86/vector-shuffle-v1.ll
@@ -604,7 +604,7 @@ define i8 @shuf8i1_0_1_4_5_u_u_u_u(i8 %a) {
 ; AVX512F:       # %bb.0:
 ; AVX512F-NEXT:    kmovw %edi, %k1
 ; AVX512F-NEXT:    vpternlogq $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
-; AVX512F-NEXT:    vshufi64x2 {{.*#+}} zmm0 = zmm0[0,1,4,5,0,1,0,1]
+; AVX512F-NEXT:    vshufi64x2 {{.*#+}} zmm0 = zmm0[0,1,4,5,4,5,6,7]
 ; AVX512F-NEXT:    vpsllq $63, %zmm0, %zmm0
 ; AVX512F-NEXT:    vptestmq %zmm0, %zmm0, %k0
 ; AVX512F-NEXT:    kmovw %k0, %eax


        


More information about the llvm-commits mailing list