[llvm] 2869053 - [X86] vector-interleaved tests - add AVX512F/AVX512DQ/AVX512BW/AVX512DQBW-ONLY common prefixes to merge more SLOW/FAST checks

Simon Pilgrim via llvm-commits llvm-commits at lists.llvm.org
Mon Nov 27 07:06:56 PST 2023


Author: Simon Pilgrim
Date: 2023-11-27T15:06:24Z
New Revision: 286905351f5b0f520ec4ed6081f7008a0b292aa5

URL: https://github.com/llvm/llvm-project/commit/286905351f5b0f520ec4ed6081f7008a0b292aa5
DIFF: https://github.com/llvm/llvm-project/commit/286905351f5b0f520ec4ed6081f7008a0b292aa5.diff

LOG: [X86] vector-interleaved tests - add AVX512F/AVX512DQ/AVX512BW/AVX512DQBW-ONLY common prefixes to merge more SLOW/FAST checks

Not used by many vector-interleaved tests, but its a LOT easier to maintain if we use the same prefixes for all of them.

Added: 
    

Modified: 
    llvm/test/CodeGen/X86/vector-interleaved-load-i16-stride-2.ll
    llvm/test/CodeGen/X86/vector-interleaved-load-i16-stride-3.ll
    llvm/test/CodeGen/X86/vector-interleaved-load-i16-stride-4.ll
    llvm/test/CodeGen/X86/vector-interleaved-load-i16-stride-5.ll
    llvm/test/CodeGen/X86/vector-interleaved-load-i16-stride-6.ll
    llvm/test/CodeGen/X86/vector-interleaved-load-i16-stride-7.ll
    llvm/test/CodeGen/X86/vector-interleaved-load-i16-stride-8.ll
    llvm/test/CodeGen/X86/vector-interleaved-load-i32-stride-2.ll
    llvm/test/CodeGen/X86/vector-interleaved-load-i32-stride-3.ll
    llvm/test/CodeGen/X86/vector-interleaved-load-i32-stride-4.ll
    llvm/test/CodeGen/X86/vector-interleaved-load-i32-stride-5.ll
    llvm/test/CodeGen/X86/vector-interleaved-load-i32-stride-6.ll
    llvm/test/CodeGen/X86/vector-interleaved-load-i32-stride-7.ll
    llvm/test/CodeGen/X86/vector-interleaved-load-i32-stride-8.ll
    llvm/test/CodeGen/X86/vector-interleaved-load-i64-stride-2.ll
    llvm/test/CodeGen/X86/vector-interleaved-load-i64-stride-3.ll
    llvm/test/CodeGen/X86/vector-interleaved-load-i64-stride-4.ll
    llvm/test/CodeGen/X86/vector-interleaved-load-i64-stride-5.ll
    llvm/test/CodeGen/X86/vector-interleaved-load-i64-stride-6.ll
    llvm/test/CodeGen/X86/vector-interleaved-load-i64-stride-7.ll
    llvm/test/CodeGen/X86/vector-interleaved-load-i64-stride-8.ll
    llvm/test/CodeGen/X86/vector-interleaved-load-i8-stride-2.ll
    llvm/test/CodeGen/X86/vector-interleaved-load-i8-stride-3.ll
    llvm/test/CodeGen/X86/vector-interleaved-load-i8-stride-4.ll
    llvm/test/CodeGen/X86/vector-interleaved-load-i8-stride-5.ll
    llvm/test/CodeGen/X86/vector-interleaved-load-i8-stride-6.ll
    llvm/test/CodeGen/X86/vector-interleaved-load-i8-stride-7.ll
    llvm/test/CodeGen/X86/vector-interleaved-load-i8-stride-8.ll
    llvm/test/CodeGen/X86/vector-interleaved-store-i16-stride-2.ll
    llvm/test/CodeGen/X86/vector-interleaved-store-i16-stride-3.ll
    llvm/test/CodeGen/X86/vector-interleaved-store-i16-stride-4.ll
    llvm/test/CodeGen/X86/vector-interleaved-store-i16-stride-5.ll
    llvm/test/CodeGen/X86/vector-interleaved-store-i16-stride-6.ll
    llvm/test/CodeGen/X86/vector-interleaved-store-i16-stride-7.ll
    llvm/test/CodeGen/X86/vector-interleaved-store-i16-stride-8.ll
    llvm/test/CodeGen/X86/vector-interleaved-store-i32-stride-2.ll
    llvm/test/CodeGen/X86/vector-interleaved-store-i32-stride-3.ll
    llvm/test/CodeGen/X86/vector-interleaved-store-i32-stride-4.ll
    llvm/test/CodeGen/X86/vector-interleaved-store-i32-stride-5.ll
    llvm/test/CodeGen/X86/vector-interleaved-store-i32-stride-6.ll
    llvm/test/CodeGen/X86/vector-interleaved-store-i32-stride-7.ll
    llvm/test/CodeGen/X86/vector-interleaved-store-i32-stride-8.ll
    llvm/test/CodeGen/X86/vector-interleaved-store-i64-stride-2.ll
    llvm/test/CodeGen/X86/vector-interleaved-store-i64-stride-3.ll
    llvm/test/CodeGen/X86/vector-interleaved-store-i64-stride-4.ll
    llvm/test/CodeGen/X86/vector-interleaved-store-i64-stride-5.ll
    llvm/test/CodeGen/X86/vector-interleaved-store-i64-stride-6.ll
    llvm/test/CodeGen/X86/vector-interleaved-store-i64-stride-7.ll
    llvm/test/CodeGen/X86/vector-interleaved-store-i64-stride-8.ll
    llvm/test/CodeGen/X86/vector-interleaved-store-i8-stride-2.ll
    llvm/test/CodeGen/X86/vector-interleaved-store-i8-stride-3.ll
    llvm/test/CodeGen/X86/vector-interleaved-store-i8-stride-4.ll
    llvm/test/CodeGen/X86/vector-interleaved-store-i8-stride-5.ll
    llvm/test/CodeGen/X86/vector-interleaved-store-i8-stride-6.ll
    llvm/test/CodeGen/X86/vector-interleaved-store-i8-stride-7.ll
    llvm/test/CodeGen/X86/vector-interleaved-store-i8-stride-8.ll

Removed: 
    


################################################################################
diff  --git a/llvm/test/CodeGen/X86/vector-interleaved-load-i16-stride-2.ll b/llvm/test/CodeGen/X86/vector-interleaved-load-i16-stride-2.ll
index 900847c8a191fe1..eeea912a56a69ae 100644
--- a/llvm/test/CodeGen/X86/vector-interleaved-load-i16-stride-2.ll
+++ b/llvm/test/CodeGen/X86/vector-interleaved-load-i16-stride-2.ll
@@ -4,14 +4,14 @@
 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx2 | FileCheck %s --check-prefixes=AVX,AVX1,AVX2,AVX2-ONLY,AVX2-SLOW,FALLBACK2
 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx2,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX1,AVX2,AVX2-ONLY,AVX2-FAST,FALLBACK3
 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx2,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX1,AVX2,AVX2-ONLY,AVX2-FAST-PERLANE,FALLBACK4
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512-SLOW,AVX512F-SLOW,AVX512F-ONLY-SLOW,FALLBACK5
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512-FAST,AVX512F-FAST,AVX512F-ONLY-FAST,FALLBACK6
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512-SLOW,AVX512F-SLOW,AVX512DQ-SLOW,FALLBACK7
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512-FAST,AVX512F-FAST,AVX512DQ-FAST,FALLBACK8
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512bw | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512-SLOW,AVX512BW-SLOW,AVX512BW-ONLY-SLOW,FALLBACK9
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512bw,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512-FAST,AVX512BW-FAST,AVX512BW-ONLY-FAST,FALLBACK10
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq,+avx512bw | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512-SLOW,AVX512BW-SLOW,AVX512DQBW-SLOW,FALLBACK11
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq,+avx512bw,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512-FAST,AVX512BW-FAST,AVX512DQBW-FAST,FALLBACK12
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512F-ONLY,AVX512-SLOW,AVX512F-SLOW,AVX512F-ONLY-SLOW,FALLBACK5
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512F-ONLY,AVX512-FAST,AVX512F-FAST,AVX512F-ONLY-FAST,FALLBACK6
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512DQ-ONLY,AVX512-SLOW,AVX512F-SLOW,AVX512DQ-SLOW,FALLBACK7
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512DQ-ONLY,AVX512-FAST,AVX512F-FAST,AVX512DQ-FAST,FALLBACK8
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512bw | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512BW-ONLY,AVX512-SLOW,AVX512BW-SLOW,AVX512BW-ONLY-SLOW,FALLBACK9
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512bw,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512BW-ONLY,AVX512-FAST,AVX512BW-FAST,AVX512BW-ONLY-FAST,FALLBACK10
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq,+avx512bw | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512DQBW-ONLY,AVX512-SLOW,AVX512BW-SLOW,AVX512DQBW-SLOW,FALLBACK11
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq,+avx512bw,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512DQBW-ONLY,AVX512-FAST,AVX512BW-FAST,AVX512DQBW-FAST,FALLBACK12
 
 ; These patterns are produced by LoopVectorizer for interleaved loads.
 
@@ -968,14 +968,18 @@ define void @load_i16_stride2_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1) no
 ; AVX512-FAST: {{.*}}
 ; AVX512-SLOW: {{.*}}
 ; AVX512BW-FAST: {{.*}}
+; AVX512BW-ONLY: {{.*}}
 ; AVX512BW-ONLY-FAST: {{.*}}
 ; AVX512BW-ONLY-SLOW: {{.*}}
 ; AVX512BW-SLOW: {{.*}}
 ; AVX512DQ-FAST: {{.*}}
+; AVX512DQ-ONLY: {{.*}}
 ; AVX512DQ-SLOW: {{.*}}
 ; AVX512DQBW-FAST: {{.*}}
+; AVX512DQBW-ONLY: {{.*}}
 ; AVX512DQBW-SLOW: {{.*}}
 ; AVX512F-FAST: {{.*}}
+; AVX512F-ONLY: {{.*}}
 ; AVX512F-ONLY-FAST: {{.*}}
 ; AVX512F-ONLY-SLOW: {{.*}}
 ; AVX512F-SLOW: {{.*}}

diff  --git a/llvm/test/CodeGen/X86/vector-interleaved-load-i16-stride-3.ll b/llvm/test/CodeGen/X86/vector-interleaved-load-i16-stride-3.ll
index 1b47c0f3193d3a9..8aed554ff58f540 100644
--- a/llvm/test/CodeGen/X86/vector-interleaved-load-i16-stride-3.ll
+++ b/llvm/test/CodeGen/X86/vector-interleaved-load-i16-stride-3.ll
@@ -4,14 +4,14 @@
 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx2 | FileCheck %s --check-prefixes=AVX,AVX1,AVX2,AVX2-ONLY,AVX2-SLOW,FALLBACK2
 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx2,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX1,AVX2,AVX2-ONLY,AVX2-FAST,FALLBACK3
 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx2,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX1,AVX2,AVX2-ONLY,AVX2-FAST-PERLANE,FALLBACK4
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512-SLOW,AVX512F-SLOW,AVX512F-ONLY-SLOW,FALLBACK5
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512-FAST,AVX512F-FAST,AVX512F-ONLY-FAST,FALLBACK6
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512-SLOW,AVX512F-SLOW,AVX512DQ-SLOW,FALLBACK7
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512-FAST,AVX512F-FAST,AVX512DQ-FAST,FALLBACK8
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512bw | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512-SLOW,AVX512BW-SLOW,AVX512BW-ONLY-SLOW,FALLBACK9
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512bw,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512-FAST,AVX512BW-FAST,AVX512BW-ONLY-FAST,FALLBACK10
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq,+avx512bw | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512-SLOW,AVX512BW-SLOW,AVX512DQBW-SLOW,FALLBACK11
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq,+avx512bw,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512-FAST,AVX512BW-FAST,AVX512DQBW-FAST,FALLBACK12
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512F-ONLY,AVX512-SLOW,AVX512F-SLOW,AVX512F-ONLY-SLOW,FALLBACK5
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512F-ONLY,AVX512-FAST,AVX512F-FAST,AVX512F-ONLY-FAST,FALLBACK6
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512DQ-ONLY,AVX512-SLOW,AVX512F-SLOW,AVX512DQ-SLOW,FALLBACK7
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512DQ-ONLY,AVX512-FAST,AVX512F-FAST,AVX512DQ-FAST,FALLBACK8
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512bw | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512BW-ONLY,AVX512-SLOW,AVX512BW-SLOW,AVX512BW-ONLY-SLOW,FALLBACK9
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512bw,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512BW-ONLY,AVX512-FAST,AVX512BW-FAST,AVX512BW-ONLY-FAST,FALLBACK10
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq,+avx512bw | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512DQBW-ONLY,AVX512-SLOW,AVX512BW-SLOW,AVX512DQBW-SLOW,FALLBACK11
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq,+avx512bw,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512DQBW-ONLY,AVX512-FAST,AVX512BW-FAST,AVX512DQBW-FAST,FALLBACK12
 
 ; These patterns are produced by LoopVectorizer for interleaved loads.
 
@@ -2347,12 +2347,16 @@ define void @load_i16_stride3_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX1: {{.*}}
 ; AVX2: {{.*}}
 ; AVX512: {{.*}}
+; AVX512BW-ONLY: {{.*}}
 ; AVX512BW-ONLY-FAST: {{.*}}
 ; AVX512BW-ONLY-SLOW: {{.*}}
 ; AVX512DQ-FAST: {{.*}}
+; AVX512DQ-ONLY: {{.*}}
 ; AVX512DQ-SLOW: {{.*}}
 ; AVX512DQBW-FAST: {{.*}}
+; AVX512DQBW-ONLY: {{.*}}
 ; AVX512DQBW-SLOW: {{.*}}
+; AVX512F-ONLY: {{.*}}
 ; AVX512F-ONLY-FAST: {{.*}}
 ; AVX512F-ONLY-SLOW: {{.*}}
 ; FALLBACK0: {{.*}}

diff  --git a/llvm/test/CodeGen/X86/vector-interleaved-load-i16-stride-4.ll b/llvm/test/CodeGen/X86/vector-interleaved-load-i16-stride-4.ll
index 5a7abf8cdf9fbfe..8eb266876004046 100644
--- a/llvm/test/CodeGen/X86/vector-interleaved-load-i16-stride-4.ll
+++ b/llvm/test/CodeGen/X86/vector-interleaved-load-i16-stride-4.ll
@@ -4,14 +4,14 @@
 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx2 | FileCheck %s --check-prefixes=AVX,AVX1,AVX2,AVX2-ONLY,AVX2-SLOW,FALLBACK2
 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx2,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX1,AVX2,AVX2-ONLY,AVX2-FAST,FALLBACK3
 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx2,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX1,AVX2,AVX2-ONLY,AVX2-FAST-PERLANE,FALLBACK4
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512-SLOW,AVX512F-SLOW,AVX512F-ONLY-SLOW,FALLBACK5
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512-FAST,AVX512F-FAST,AVX512F-ONLY-FAST,FALLBACK6
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512-SLOW,AVX512F-SLOW,AVX512DQ-SLOW,FALLBACK7
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512-FAST,AVX512F-FAST,AVX512DQ-FAST,FALLBACK8
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512bw | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512-SLOW,AVX512BW-SLOW,AVX512BW-ONLY-SLOW,FALLBACK9
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512bw,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512-FAST,AVX512BW-FAST,AVX512BW-ONLY-FAST,FALLBACK10
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq,+avx512bw | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512-SLOW,AVX512BW-SLOW,AVX512DQBW-SLOW,FALLBACK11
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq,+avx512bw,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512-FAST,AVX512BW-FAST,AVX512DQBW-FAST,FALLBACK12
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512F-ONLY,AVX512-SLOW,AVX512F-SLOW,AVX512F-ONLY-SLOW,FALLBACK5
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512F-ONLY,AVX512-FAST,AVX512F-FAST,AVX512F-ONLY-FAST,FALLBACK6
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512DQ-ONLY,AVX512-SLOW,AVX512F-SLOW,AVX512DQ-SLOW,FALLBACK7
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512DQ-ONLY,AVX512-FAST,AVX512F-FAST,AVX512DQ-FAST,FALLBACK8
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512bw | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512BW-ONLY,AVX512-SLOW,AVX512BW-SLOW,AVX512BW-ONLY-SLOW,FALLBACK9
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512bw,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512BW-ONLY,AVX512-FAST,AVX512BW-FAST,AVX512BW-ONLY-FAST,FALLBACK10
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq,+avx512bw | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512DQBW-ONLY,AVX512-SLOW,AVX512BW-SLOW,AVX512DQBW-SLOW,FALLBACK11
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq,+avx512bw,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512DQBW-ONLY,AVX512-FAST,AVX512BW-FAST,AVX512DQBW-FAST,FALLBACK12
 
 ; These patterns are produced by LoopVectorizer for interleaved loads.
 
@@ -5425,14 +5425,18 @@ define void @load_i16_stride4_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX2: {{.*}}
 ; AVX2-ONLY: {{.*}}
 ; AVX512BW-FAST: {{.*}}
+; AVX512BW-ONLY: {{.*}}
 ; AVX512BW-ONLY-FAST: {{.*}}
 ; AVX512BW-ONLY-SLOW: {{.*}}
 ; AVX512BW-SLOW: {{.*}}
 ; AVX512DQ-FAST: {{.*}}
+; AVX512DQ-ONLY: {{.*}}
 ; AVX512DQ-SLOW: {{.*}}
 ; AVX512DQBW-FAST: {{.*}}
+; AVX512DQBW-ONLY: {{.*}}
 ; AVX512DQBW-SLOW: {{.*}}
 ; AVX512F: {{.*}}
+; AVX512F-ONLY: {{.*}}
 ; AVX512F-ONLY-FAST: {{.*}}
 ; AVX512F-ONLY-SLOW: {{.*}}
 ; FALLBACK0: {{.*}}

diff  --git a/llvm/test/CodeGen/X86/vector-interleaved-load-i16-stride-5.ll b/llvm/test/CodeGen/X86/vector-interleaved-load-i16-stride-5.ll
index 8cfd55b3044f569..f5399e791f6e843 100644
--- a/llvm/test/CodeGen/X86/vector-interleaved-load-i16-stride-5.ll
+++ b/llvm/test/CodeGen/X86/vector-interleaved-load-i16-stride-5.ll
@@ -4,14 +4,14 @@
 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx2 | FileCheck %s --check-prefixes=AVX,AVX1,AVX2,AVX2-ONLY,AVX2-SLOW,FALLBACK2
 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx2,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX1,AVX2,AVX2-ONLY,AVX2-FAST,FALLBACK3
 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx2,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX1,AVX2,AVX2-ONLY,AVX2-FAST-PERLANE,FALLBACK4
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512-SLOW,AVX512F-SLOW,AVX512F-ONLY-SLOW,FALLBACK5
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512-FAST,AVX512F-FAST,AVX512F-ONLY-FAST,FALLBACK6
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512-SLOW,AVX512F-SLOW,AVX512DQ-SLOW,FALLBACK7
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512-FAST,AVX512F-FAST,AVX512DQ-FAST,FALLBACK8
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512bw | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512-SLOW,AVX512BW-SLOW,AVX512BW-ONLY-SLOW,FALLBACK9
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512bw,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512-FAST,AVX512BW-FAST,AVX512BW-ONLY-FAST,FALLBACK10
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq,+avx512bw | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512-SLOW,AVX512BW-SLOW,AVX512DQBW-SLOW,FALLBACK11
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq,+avx512bw,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512-FAST,AVX512BW-FAST,AVX512DQBW-FAST,FALLBACK12
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512F-ONLY,AVX512-SLOW,AVX512F-SLOW,AVX512F-ONLY-SLOW,FALLBACK5
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512F-ONLY,AVX512-FAST,AVX512F-FAST,AVX512F-ONLY-FAST,FALLBACK6
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512DQ-ONLY,AVX512-SLOW,AVX512F-SLOW,AVX512DQ-SLOW,FALLBACK7
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512DQ-ONLY,AVX512-FAST,AVX512F-FAST,AVX512DQ-FAST,FALLBACK8
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512bw | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512BW-ONLY,AVX512-SLOW,AVX512BW-SLOW,AVX512BW-ONLY-SLOW,FALLBACK9
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512bw,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512BW-ONLY,AVX512-FAST,AVX512BW-FAST,AVX512BW-ONLY-FAST,FALLBACK10
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq,+avx512bw | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512DQBW-ONLY,AVX512-SLOW,AVX512BW-SLOW,AVX512DQBW-SLOW,FALLBACK11
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq,+avx512bw,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512DQBW-ONLY,AVX512-FAST,AVX512BW-FAST,AVX512DQBW-FAST,FALLBACK12
 
 ; These patterns are produced by LoopVectorizer for interleaved loads.
 
@@ -7728,14 +7728,18 @@ define void @load_i16_stride5_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX2-ONLY: {{.*}}
 ; AVX512: {{.*}}
 ; AVX512BW-FAST: {{.*}}
+; AVX512BW-ONLY: {{.*}}
 ; AVX512BW-ONLY-FAST: {{.*}}
 ; AVX512BW-ONLY-SLOW: {{.*}}
 ; AVX512BW-SLOW: {{.*}}
 ; AVX512DQ-FAST: {{.*}}
+; AVX512DQ-ONLY: {{.*}}
 ; AVX512DQ-SLOW: {{.*}}
 ; AVX512DQBW-FAST: {{.*}}
+; AVX512DQBW-ONLY: {{.*}}
 ; AVX512DQBW-SLOW: {{.*}}
 ; AVX512F: {{.*}}
+; AVX512F-ONLY: {{.*}}
 ; AVX512F-ONLY-FAST: {{.*}}
 ; AVX512F-ONLY-SLOW: {{.*}}
 ; FALLBACK0: {{.*}}

diff  --git a/llvm/test/CodeGen/X86/vector-interleaved-load-i16-stride-6.ll b/llvm/test/CodeGen/X86/vector-interleaved-load-i16-stride-6.ll
index 3decaa366af785f..77207838c2d047c 100644
--- a/llvm/test/CodeGen/X86/vector-interleaved-load-i16-stride-6.ll
+++ b/llvm/test/CodeGen/X86/vector-interleaved-load-i16-stride-6.ll
@@ -4,14 +4,14 @@
 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx2 | FileCheck %s --check-prefixes=AVX,AVX1,AVX2,AVX2-ONLY,AVX2-SLOW,FALLBACK2
 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx2,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX1,AVX2,AVX2-ONLY,AVX2-FAST,FALLBACK3
 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx2,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX1,AVX2,AVX2-ONLY,AVX2-FAST-PERLANE,FALLBACK4
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512-SLOW,AVX512F-SLOW,AVX512F-ONLY-SLOW,FALLBACK5
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512-FAST,AVX512F-FAST,AVX512F-ONLY-FAST,FALLBACK6
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512-SLOW,AVX512F-SLOW,AVX512DQ-SLOW,FALLBACK7
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512-FAST,AVX512F-FAST,AVX512DQ-FAST,FALLBACK8
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512bw | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512-SLOW,AVX512BW-SLOW,AVX512BW-ONLY-SLOW,FALLBACK9
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512bw,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512-FAST,AVX512BW-FAST,AVX512BW-ONLY-FAST,FALLBACK10
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq,+avx512bw | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512-SLOW,AVX512BW-SLOW,AVX512DQBW-SLOW,FALLBACK11
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq,+avx512bw,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512-FAST,AVX512BW-FAST,AVX512DQBW-FAST,FALLBACK12
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512F-ONLY,AVX512-SLOW,AVX512F-SLOW,AVX512F-ONLY-SLOW,FALLBACK5
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512F-ONLY,AVX512-FAST,AVX512F-FAST,AVX512F-ONLY-FAST,FALLBACK6
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512DQ-ONLY,AVX512-SLOW,AVX512F-SLOW,AVX512DQ-SLOW,FALLBACK7
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512DQ-ONLY,AVX512-FAST,AVX512F-FAST,AVX512DQ-FAST,FALLBACK8
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512bw | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512BW-ONLY,AVX512-SLOW,AVX512BW-SLOW,AVX512BW-ONLY-SLOW,FALLBACK9
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512bw,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512BW-ONLY,AVX512-FAST,AVX512BW-FAST,AVX512BW-ONLY-FAST,FALLBACK10
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq,+avx512bw | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512DQBW-ONLY,AVX512-SLOW,AVX512BW-SLOW,AVX512DQBW-SLOW,FALLBACK11
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq,+avx512bw,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512DQBW-ONLY,AVX512-FAST,AVX512BW-FAST,AVX512DQBW-FAST,FALLBACK12
 
 ; These patterns are produced by LoopVectorizer for interleaved loads.
 
@@ -11880,12 +11880,16 @@ define void @load_i16_stride6_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX2-ONLY: {{.*}}
 ; AVX512: {{.*}}
 ; AVX512-FAST: {{.*}}
+; AVX512BW-ONLY: {{.*}}
 ; AVX512BW-ONLY-FAST: {{.*}}
 ; AVX512BW-ONLY-SLOW: {{.*}}
 ; AVX512BW-SLOW: {{.*}}
+; AVX512DQ-ONLY: {{.*}}
 ; AVX512DQBW-FAST: {{.*}}
+; AVX512DQBW-ONLY: {{.*}}
 ; AVX512DQBW-SLOW: {{.*}}
 ; AVX512F: {{.*}}
+; AVX512F-ONLY: {{.*}}
 ; FALLBACK0: {{.*}}
 ; FALLBACK1: {{.*}}
 ; FALLBACK10: {{.*}}

diff  --git a/llvm/test/CodeGen/X86/vector-interleaved-load-i16-stride-7.ll b/llvm/test/CodeGen/X86/vector-interleaved-load-i16-stride-7.ll
index 720e3464ead2007..9a3bbf1d416cc05 100644
--- a/llvm/test/CodeGen/X86/vector-interleaved-load-i16-stride-7.ll
+++ b/llvm/test/CodeGen/X86/vector-interleaved-load-i16-stride-7.ll
@@ -4,14 +4,14 @@
 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx2 | FileCheck %s --check-prefixes=AVX,AVX1,AVX2,AVX2-ONLY,AVX2-SLOW,FALLBACK2
 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx2,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX1,AVX2,AVX2-ONLY,AVX2-FAST,FALLBACK3
 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx2,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX1,AVX2,AVX2-ONLY,AVX2-FAST-PERLANE,FALLBACK4
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512-SLOW,AVX512F-SLOW,AVX512F-ONLY-SLOW,FALLBACK5
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512-FAST,AVX512F-FAST,AVX512F-ONLY-FAST,FALLBACK6
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512-SLOW,AVX512F-SLOW,AVX512DQ-SLOW,FALLBACK7
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512-FAST,AVX512F-FAST,AVX512DQ-FAST,FALLBACK8
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512bw | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512-SLOW,AVX512BW-SLOW,AVX512BW-ONLY-SLOW,FALLBACK9
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512bw,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512-FAST,AVX512BW-FAST,AVX512BW-ONLY-FAST,FALLBACK10
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq,+avx512bw | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512-SLOW,AVX512BW-SLOW,AVX512DQBW-SLOW,FALLBACK11
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq,+avx512bw,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512-FAST,AVX512BW-FAST,AVX512DQBW-FAST,FALLBACK12
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512F-ONLY,AVX512-SLOW,AVX512F-SLOW,AVX512F-ONLY-SLOW,FALLBACK5
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512F-ONLY,AVX512-FAST,AVX512F-FAST,AVX512F-ONLY-FAST,FALLBACK6
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512DQ-ONLY,AVX512-SLOW,AVX512F-SLOW,AVX512DQ-SLOW,FALLBACK7
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512DQ-ONLY,AVX512-FAST,AVX512F-FAST,AVX512DQ-FAST,FALLBACK8
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512bw | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512BW-ONLY,AVX512-SLOW,AVX512BW-SLOW,AVX512BW-ONLY-SLOW,FALLBACK9
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512bw,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512BW-ONLY,AVX512-FAST,AVX512BW-FAST,AVX512BW-ONLY-FAST,FALLBACK10
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq,+avx512bw | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512DQBW-ONLY,AVX512-SLOW,AVX512BW-SLOW,AVX512DQBW-SLOW,FALLBACK11
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq,+avx512bw,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512DQBW-ONLY,AVX512-FAST,AVX512BW-FAST,AVX512DQBW-FAST,FALLBACK12
 
 ; These patterns are produced by LoopVectorizer for interleaved loads.
 
@@ -16175,12 +16175,16 @@ define void @load_i16_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX2-ONLY: {{.*}}
 ; AVX512: {{.*}}
 ; AVX512-FAST: {{.*}}
+; AVX512BW-ONLY: {{.*}}
 ; AVX512BW-ONLY-FAST: {{.*}}
 ; AVX512BW-ONLY-SLOW: {{.*}}
 ; AVX512BW-SLOW: {{.*}}
+; AVX512DQ-ONLY: {{.*}}
 ; AVX512DQBW-FAST: {{.*}}
+; AVX512DQBW-ONLY: {{.*}}
 ; AVX512DQBW-SLOW: {{.*}}
 ; AVX512F: {{.*}}
+; AVX512F-ONLY: {{.*}}
 ; FALLBACK0: {{.*}}
 ; FALLBACK1: {{.*}}
 ; FALLBACK10: {{.*}}

diff  --git a/llvm/test/CodeGen/X86/vector-interleaved-load-i16-stride-8.ll b/llvm/test/CodeGen/X86/vector-interleaved-load-i16-stride-8.ll
index 47f5ac1c5fc4065..e3d60d0a4dc1eb8 100644
--- a/llvm/test/CodeGen/X86/vector-interleaved-load-i16-stride-8.ll
+++ b/llvm/test/CodeGen/X86/vector-interleaved-load-i16-stride-8.ll
@@ -4,14 +4,14 @@
 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx2 | FileCheck %s --check-prefixes=AVX,AVX1,AVX2,AVX2-ONLY,AVX2-SLOW,FALLBACK2
 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx2,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX1,AVX2,AVX2-ONLY,AVX2-FAST,FALLBACK3
 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx2,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX1,AVX2,AVX2-ONLY,AVX2-FAST-PERLANE,FALLBACK4
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512-SLOW,AVX512F-SLOW,AVX512F-ONLY-SLOW,FALLBACK5
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512-FAST,AVX512F-FAST,AVX512F-ONLY-FAST,FALLBACK6
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512-SLOW,AVX512F-SLOW,AVX512DQ-SLOW,FALLBACK7
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512-FAST,AVX512F-FAST,AVX512DQ-FAST,FALLBACK8
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512bw | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512-SLOW,AVX512BW-SLOW,AVX512BW-ONLY-SLOW,FALLBACK9
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512bw,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512-FAST,AVX512BW-FAST,AVX512BW-ONLY-FAST,FALLBACK10
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq,+avx512bw | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512-SLOW,AVX512BW-SLOW,AVX512DQBW-SLOW,FALLBACK11
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq,+avx512bw,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512-FAST,AVX512BW-FAST,AVX512DQBW-FAST,FALLBACK12
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512F-ONLY,AVX512-SLOW,AVX512F-SLOW,AVX512F-ONLY-SLOW,FALLBACK5
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512F-ONLY,AVX512-FAST,AVX512F-FAST,AVX512F-ONLY-FAST,FALLBACK6
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512DQ-ONLY,AVX512-SLOW,AVX512F-SLOW,AVX512DQ-SLOW,FALLBACK7
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512DQ-ONLY,AVX512-FAST,AVX512F-FAST,AVX512DQ-FAST,FALLBACK8
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512bw | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512BW-ONLY,AVX512-SLOW,AVX512BW-SLOW,AVX512BW-ONLY-SLOW,FALLBACK9
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512bw,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512BW-ONLY,AVX512-FAST,AVX512BW-FAST,AVX512BW-ONLY-FAST,FALLBACK10
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq,+avx512bw | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512DQBW-ONLY,AVX512-SLOW,AVX512BW-SLOW,AVX512DQBW-SLOW,FALLBACK11
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq,+avx512bw,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512DQBW-ONLY,AVX512-FAST,AVX512BW-FAST,AVX512DQBW-FAST,FALLBACK12
 
 ; These patterns are produced by LoopVectorizer for interleaved loads.
 
@@ -8744,1129 +8744,567 @@ define void @load_i16_stride8_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512F-FAST-NEXT:    vzeroupper
 ; AVX512F-FAST-NEXT:    retq
 ;
-; AVX512BW-ONLY-SLOW-LABEL: load_i16_stride8_vf64:
-; AVX512BW-ONLY-SLOW:       # %bb.0:
-; AVX512BW-ONLY-SLOW-NEXT:    subq $1096, %rsp # imm = 0x448
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 704(%rdi), %zmm6
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 640(%rdi), %zmm8
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 576(%rdi), %zmm28
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 512(%rdi), %zmm11
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm11, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 832(%rdi), %zmm5
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 768(%rdi), %zmm29
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 960(%rdi), %zmm21
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 896(%rdi), %zmm25
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 (%rdi), %zmm7
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 64(%rdi), %zmm17
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 128(%rdi), %zmm4
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 192(%rdi), %zmm9
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 320(%rdi), %zmm20
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 256(%rdi), %zmm26
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 448(%rdi), %zmm22
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 384(%rdi), %zmm2
-; AVX512BW-ONLY-SLOW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm0 = [0,8,16,24,32,40,48,56,0,8,16,24,32,40,48,56,0,8,16,24,32,40,48,56,0,8,16,24,32,40,48,56]
-; AVX512BW-ONLY-SLOW-NEXT:    # zmm0 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm2, %zmm1
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm2, %zmm12
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2w %zmm22, %zmm0, %zmm1
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm26, %zmm2
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2w %zmm20, %zmm0, %zmm2
-; AVX512BW-ONLY-SLOW-NEXT:    movb $-64, %al
-; AVX512BW-ONLY-SLOW-NEXT:    kmovd %eax, %k1
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm1, %zmm2 {%k1}
-; AVX512BW-ONLY-SLOW-NEXT:    vbroadcasti128 {{.*#+}} ymm1 = [0,8,16,24,32,40,48,56,0,8,16,24,32,40,48,56]
-; AVX512BW-ONLY-SLOW-NEXT:    # ymm1 = mem[0,1,0,1]
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm4, %zmm3
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm4, %zmm10
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2w %zmm9, %zmm1, %zmm3
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm7, %zmm4
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2w %zmm17, %zmm0, %zmm4
-; AVX512BW-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm3 = ymm4[0,1,2,3],ymm3[4,5,6,7]
-; AVX512BW-ONLY-SLOW-NEXT:    vinserti64x4 $0, %ymm3, %zmm2, %zmm2
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm25, %zmm2
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2w %zmm21, %zmm0, %zmm2
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm29, %zmm3
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2w %zmm5, %zmm0, %zmm3
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm2, %zmm3 {%k1}
-; AVX512BW-ONLY-SLOW-NEXT:    vpermi2w %zmm28, %zmm11, %zmm0
-; AVX512BW-ONLY-SLOW-NEXT:    vpermi2w %zmm6, %zmm8, %zmm1
-; AVX512BW-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
-; AVX512BW-ONLY-SLOW-NEXT:    vinserti64x4 $0, %ymm0, %zmm3, %zmm0
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm4 = [1,9,17,25,33,41,49,57,1,9,17,25,33,41,49,57,1,9,17,25,33,41,49,57,1,9,17,25,33,41,49,57]
-; AVX512BW-ONLY-SLOW-NEXT:    # zmm4 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm12, %zmm0
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2w %zmm22, %zmm4, %zmm0
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm26, %zmm1
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2w %zmm20, %zmm4, %zmm1
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm0, %zmm1 {%k1}
-; AVX512BW-ONLY-SLOW-NEXT:    vbroadcasti128 {{.*#+}} ymm2 = [1,9,17,25,33,41,49,57,1,9,17,25,33,41,49,57]
-; AVX512BW-ONLY-SLOW-NEXT:    # ymm2 = mem[0,1,0,1]
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm10, %zmm0
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2w %zmm9, %zmm2, %zmm0
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm7, %zmm2
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2w %zmm17, %zmm4, %zmm2
-; AVX512BW-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm2[0,1,2,3],ymm0[4,5,6,7]
-; AVX512BW-ONLY-SLOW-NEXT:    vinserti64x4 $0, %ymm0, %zmm1, %zmm0
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm0 = [2,10,18,26,34,42,50,58,2,10,18,26,34,42,50,58,2,10,18,26,34,42,50,58,2,10,18,26,34,42,50,58]
-; AVX512BW-ONLY-SLOW-NEXT:    # zmm0 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm12, %zmm1
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2w %zmm22, %zmm0, %zmm1
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm2 = [3,11,19,27,35,43,51,59,3,11,19,27,35,43,51,59,3,11,19,27,35,43,51,59,3,11,19,27,35,43,51,59]
-; AVX512BW-ONLY-SLOW-NEXT:    # zmm2 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm12, %zmm1
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2w %zmm22, %zmm2, %zmm1
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm2, %zmm3
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm10 = [4,12,20,28,36,44,52,60,4,12,20,28,36,44,52,60,4,12,20,28,36,44,52,60,4,12,20,28,36,44,52,60]
-; AVX512BW-ONLY-SLOW-NEXT:    # zmm10 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm12, %zmm2
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2w %zmm22, %zmm10, %zmm2
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm15 = [5,13,21,29,37,45,53,61,5,13,21,29,37,45,53,61,5,13,21,29,37,45,53,61,5,13,21,29,37,45,53,61]
-; AVX512BW-ONLY-SLOW-NEXT:    # zmm15 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm12, %zmm1
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2w %zmm22, %zmm15, %zmm1
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm13 = [6,14,22,30,38,46,54,62,6,14,22,30,38,46,54,62,6,14,22,30,38,46,54,62,6,14,22,30,38,46,54,62]
-; AVX512BW-ONLY-SLOW-NEXT:    # zmm13 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm12, %zmm1
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2w %zmm22, %zmm13, %zmm1
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm1 = [7,15,23,31,39,47,55,63,7,15,23,31,39,47,55,63,7,15,23,31,39,47,55,63,7,15,23,31,39,47,55,63]
-; AVX512BW-ONLY-SLOW-NEXT:    # zmm1 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2w %zmm22, %zmm1, %zmm12
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm12, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm26, %zmm24
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2w %zmm20, %zmm0, %zmm24
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm26, %zmm27
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2w %zmm20, %zmm3, %zmm27
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm26, %zmm31
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2w %zmm20, %zmm10, %zmm31
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm26, %zmm30
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2w %zmm20, %zmm15, %zmm30
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm26, %zmm22
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2w %zmm20, %zmm13, %zmm22
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2w %zmm20, %zmm1, %zmm26
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm7, %zmm8
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2w %zmm17, %zmm0, %zmm8
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm7, %zmm12
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2w %zmm17, %zmm3, %zmm12
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm7, %zmm6
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2w %zmm17, %zmm10, %zmm6
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm6, (%rsp) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm7, %zmm6
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2w %zmm17, %zmm15, %zmm6
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm7, %zmm6
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2w %zmm17, %zmm13, %zmm6
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2w %zmm17, %zmm1, %zmm7
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm25, %zmm18
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2w %zmm21, %zmm4, %zmm18
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm25, %zmm19
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2w %zmm21, %zmm0, %zmm19
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm25, %zmm23
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm3, %zmm6
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2w %zmm21, %zmm3, %zmm23
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm25, %zmm16
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2w %zmm21, %zmm10, %zmm16
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm25, %zmm20
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2w %zmm21, %zmm15, %zmm20
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm25, %zmm17
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2w %zmm21, %zmm13, %zmm17
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2w %zmm21, %zmm1, %zmm25
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm29, %zmm9
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2w %zmm5, %zmm4, %zmm9
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm29, %zmm11
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2w %zmm5, %zmm0, %zmm11
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm0, %zmm3
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm29, %zmm14
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2w %zmm5, %zmm6, %zmm14
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm29, %zmm21
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2w %zmm5, %zmm10, %zmm21
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm29, %zmm7
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm15, %zmm2
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2w %zmm5, %zmm15, %zmm7
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm29, %zmm15
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2w %zmm5, %zmm13, %zmm15
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2w %zmm5, %zmm1, %zmm29
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512BW-ONLY-SLOW-NEXT:    vpermi2w %zmm28, %zmm0, %zmm4
-; AVX512BW-ONLY-SLOW-NEXT:    vpermi2w %zmm28, %zmm0, %zmm3
-; AVX512BW-ONLY-SLOW-NEXT:    vpermi2w %zmm28, %zmm0, %zmm6
-; AVX512BW-ONLY-SLOW-NEXT:    vpermi2w %zmm28, %zmm0, %zmm10
-; AVX512BW-ONLY-SLOW-NEXT:    vpermi2w %zmm28, %zmm0, %zmm2
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vpermi2w %zmm28, %zmm0, %zmm13
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2w %zmm28, %zmm1, %zmm0
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm18, %zmm9 {%k1}
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm28 # 64-byte Reload
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512BW-ONLY-SLOW-NEXT:    vbroadcasti128 {{.*#+}} ymm0 = [1,9,17,25,33,41,49,57,1,9,17,25,33,41,49,57]
-; AVX512BW-ONLY-SLOW-NEXT:    # ymm0 = mem[0,1,0,1]
-; AVX512BW-ONLY-SLOW-NEXT:    vpermi2w %zmm28, %zmm2, %zmm0
-; AVX512BW-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm4[0,1,2,3],ymm0[4,5,6,7]
-; AVX512BW-ONLY-SLOW-NEXT:    vinserti64x4 $0, %ymm0, %zmm9, %zmm18
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm0, %zmm24 {%k1}
-; AVX512BW-ONLY-SLOW-NEXT:    vbroadcasti128 {{.*#+}} ymm9 = [2,10,18,26,34,42,50,58,2,10,18,26,34,42,50,58]
-; AVX512BW-ONLY-SLOW-NEXT:    # ymm9 = mem[0,1,0,1]
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm4, %zmm0
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2w %zmm5, %zmm9, %zmm0
-; AVX512BW-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm8[0,1,2,3],ymm0[4,5,6,7]
-; AVX512BW-ONLY-SLOW-NEXT:    vinserti64x4 $0, %ymm0, %zmm24, %zmm24
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm19, %zmm11 {%k1}
-; AVX512BW-ONLY-SLOW-NEXT:    vpermi2w %zmm28, %zmm2, %zmm9
-; AVX512BW-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm1 = ymm3[0,1,2,3],ymm9[4,5,6,7]
-; AVX512BW-ONLY-SLOW-NEXT:    vinserti64x4 $0, %ymm1, %zmm11, %zmm1
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm0, %zmm27 {%k1}
-; AVX512BW-ONLY-SLOW-NEXT:    vbroadcasti128 {{.*#+}} ymm3 = [3,11,19,27,35,43,51,59,3,11,19,27,35,43,51,59]
-; AVX512BW-ONLY-SLOW-NEXT:    # ymm3 = mem[0,1,0,1]
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm4, %zmm0
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm4, %zmm9
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2w %zmm5, %zmm3, %zmm9
-; AVX512BW-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm4 = ymm12[0,1,2,3],ymm9[4,5,6,7]
-; AVX512BW-ONLY-SLOW-NEXT:    vinserti64x4 $0, %ymm4, %zmm27, %zmm4
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm23, %zmm14 {%k1}
-; AVX512BW-ONLY-SLOW-NEXT:    vpermi2w %zmm28, %zmm2, %zmm3
-; AVX512BW-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm3 = ymm6[0,1,2,3],ymm3[4,5,6,7]
-; AVX512BW-ONLY-SLOW-NEXT:    vinserti64x4 $0, %ymm3, %zmm14, %zmm3
-; AVX512BW-ONLY-SLOW-NEXT:    vbroadcasti128 {{.*#+}} ymm6 = [4,12,20,28,36,44,52,60,4,12,20,28,36,44,52,60]
-; AVX512BW-ONLY-SLOW-NEXT:    # ymm6 = mem[0,1,0,1]
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm0, %zmm8
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2w %zmm5, %zmm6, %zmm8
-; AVX512BW-ONLY-SLOW-NEXT:    vbroadcasti128 {{.*#+}} ymm9 = [5,13,21,29,37,45,53,61,5,13,21,29,37,45,53,61]
-; AVX512BW-ONLY-SLOW-NEXT:    # ymm9 = mem[0,1,0,1]
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm0, %zmm11
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2w %zmm5, %zmm9, %zmm11
-; AVX512BW-ONLY-SLOW-NEXT:    vbroadcasti128 {{.*#+}} ymm12 = [6,14,22,30,38,46,54,62,6,14,22,30,38,46,54,62]
-; AVX512BW-ONLY-SLOW-NEXT:    # ymm12 = mem[0,1,0,1]
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm0, %zmm14
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2w %zmm5, %zmm12, %zmm14
-; AVX512BW-ONLY-SLOW-NEXT:    vbroadcasti32x4 {{.*#+}} ymm19 = [7,15,23,31,39,47,55,63,7,15,23,31,39,47,55,63]
-; AVX512BW-ONLY-SLOW-NEXT:    # ymm19 = mem[0,1,2,3,0,1,2,3]
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2w %zmm5, %zmm19, %zmm0
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm0, %zmm5
-; AVX512BW-ONLY-SLOW-NEXT:    vpermi2w %zmm28, %zmm2, %zmm6
-; AVX512BW-ONLY-SLOW-NEXT:    vpermi2w %zmm28, %zmm2, %zmm9
-; AVX512BW-ONLY-SLOW-NEXT:    vpermi2w %zmm28, %zmm2, %zmm12
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2w %zmm28, %zmm19, %zmm2
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm2, %zmm0
-; AVX512BW-ONLY-SLOW-NEXT:    vpblendd $15, (%rsp), %ymm8, %ymm8 # 32-byte Folded Reload
-; AVX512BW-ONLY-SLOW-NEXT:    # ymm8 = mem[0,1,2,3],ymm8[4,5,6,7]
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm2, %zmm31 {%k1}
-; AVX512BW-ONLY-SLOW-NEXT:    vinserti64x4 $0, %ymm8, %zmm31, %zmm8
-; AVX512BW-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm6 = ymm10[0,1,2,3],ymm6[4,5,6,7]
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm16, %zmm21 {%k1}
-; AVX512BW-ONLY-SLOW-NEXT:    vinserti64x4 $0, %ymm6, %zmm21, %zmm6
-; AVX512BW-ONLY-SLOW-NEXT:    vpblendd $15, {{[-0-9]+}}(%r{{[sb]}}p), %ymm11, %ymm10 # 32-byte Folded Reload
-; AVX512BW-ONLY-SLOW-NEXT:    # ymm10 = mem[0,1,2,3],ymm11[4,5,6,7]
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm2, %zmm30 {%k1}
-; AVX512BW-ONLY-SLOW-NEXT:    vinserti64x4 $0, %ymm10, %zmm30, %zmm10
-; AVX512BW-ONLY-SLOW-NEXT:    vpblendd $15, {{[-0-9]+}}(%r{{[sb]}}p), %ymm9, %ymm9 # 32-byte Folded Reload
-; AVX512BW-ONLY-SLOW-NEXT:    # ymm9 = mem[0,1,2,3],ymm9[4,5,6,7]
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm20, %zmm7 {%k1}
-; AVX512BW-ONLY-SLOW-NEXT:    vinserti64x4 $0, %ymm9, %zmm7, %zmm7
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm2, %zmm22 {%k1}
-; AVX512BW-ONLY-SLOW-NEXT:    vpblendd $15, {{[-0-9]+}}(%r{{[sb]}}p), %ymm14, %ymm9 # 32-byte Folded Reload
-; AVX512BW-ONLY-SLOW-NEXT:    # ymm9 = mem[0,1,2,3],ymm14[4,5,6,7]
-; AVX512BW-ONLY-SLOW-NEXT:    vinserti64x4 $0, %ymm9, %zmm22, %zmm9
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm17, %zmm15 {%k1}
-; AVX512BW-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm2 = ymm13[0,1,2,3],ymm12[4,5,6,7]
-; AVX512BW-ONLY-SLOW-NEXT:    vinserti64x4 $0, %ymm2, %zmm15, %zmm2
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm11 # 64-byte Reload
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm11, %zmm26 {%k1}
-; AVX512BW-ONLY-SLOW-NEXT:    vpblendd $15, {{[-0-9]+}}(%r{{[sb]}}p), %ymm5, %ymm11 # 32-byte Folded Reload
-; AVX512BW-ONLY-SLOW-NEXT:    # ymm11 = mem[0,1,2,3],ymm5[4,5,6,7]
-; AVX512BW-ONLY-SLOW-NEXT:    vinserti64x4 $0, %ymm11, %zmm26, %zmm5
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm25, %zmm29 {%k1}
-; AVX512BW-ONLY-SLOW-NEXT:    vpblendd $15, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm11 # 32-byte Folded Reload
-; AVX512BW-ONLY-SLOW-NEXT:    # ymm11 = mem[0,1,2,3],ymm0[4,5,6,7]
-; AVX512BW-ONLY-SLOW-NEXT:    vinserti64x4 $0, %ymm11, %zmm29, %zmm11
-; AVX512BW-ONLY-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512BW-ONLY-SLOW-NEXT:    vmovaps %zmm0, 64(%rsi)
-; AVX512BW-ONLY-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512BW-ONLY-SLOW-NEXT:    vmovaps %zmm0, (%rsi)
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm18, 64(%rdx)
-; AVX512BW-ONLY-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512BW-ONLY-SLOW-NEXT:    vmovaps %zmm0, (%rdx)
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm1, 64(%rcx)
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm24, (%rcx)
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm3, 64(%r8)
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm4, (%r8)
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm6, 64(%r9)
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm8, (%r9)
-; AVX512BW-ONLY-SLOW-NEXT:    movq {{[0-9]+}}(%rsp), %rax
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm7, 64(%rax)
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm10, (%rax)
-; AVX512BW-ONLY-SLOW-NEXT:    movq {{[0-9]+}}(%rsp), %rax
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm2, 64(%rax)
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm9, (%rax)
-; AVX512BW-ONLY-SLOW-NEXT:    movq {{[0-9]+}}(%rsp), %rax
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm11, 64(%rax)
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm5, (%rax)
-; AVX512BW-ONLY-SLOW-NEXT:    addq $1096, %rsp # imm = 0x448
-; AVX512BW-ONLY-SLOW-NEXT:    vzeroupper
-; AVX512BW-ONLY-SLOW-NEXT:    retq
+; AVX512BW-ONLY-LABEL: load_i16_stride8_vf64:
+; AVX512BW-ONLY:       # %bb.0:
+; AVX512BW-ONLY-NEXT:    subq $1096, %rsp # imm = 0x448
+; AVX512BW-ONLY-NEXT:    vmovdqa64 704(%rdi), %zmm6
+; AVX512BW-ONLY-NEXT:    vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-NEXT:    vmovdqa64 640(%rdi), %zmm8
+; AVX512BW-ONLY-NEXT:    vmovdqu64 %zmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-NEXT:    vmovdqa64 576(%rdi), %zmm28
+; AVX512BW-ONLY-NEXT:    vmovdqa64 512(%rdi), %zmm11
+; AVX512BW-ONLY-NEXT:    vmovdqu64 %zmm11, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-NEXT:    vmovdqa64 832(%rdi), %zmm5
+; AVX512BW-ONLY-NEXT:    vmovdqa64 768(%rdi), %zmm29
+; AVX512BW-ONLY-NEXT:    vmovdqa64 960(%rdi), %zmm21
+; AVX512BW-ONLY-NEXT:    vmovdqa64 896(%rdi), %zmm25
+; AVX512BW-ONLY-NEXT:    vmovdqa64 (%rdi), %zmm7
+; AVX512BW-ONLY-NEXT:    vmovdqa64 64(%rdi), %zmm17
+; AVX512BW-ONLY-NEXT:    vmovdqa64 128(%rdi), %zmm4
+; AVX512BW-ONLY-NEXT:    vmovdqa64 192(%rdi), %zmm9
+; AVX512BW-ONLY-NEXT:    vmovdqa64 320(%rdi), %zmm20
+; AVX512BW-ONLY-NEXT:    vmovdqa64 256(%rdi), %zmm26
+; AVX512BW-ONLY-NEXT:    vmovdqa64 448(%rdi), %zmm22
+; AVX512BW-ONLY-NEXT:    vmovdqa64 384(%rdi), %zmm2
+; AVX512BW-ONLY-NEXT:    vbroadcasti32x4 {{.*#+}} zmm0 = [0,8,16,24,32,40,48,56,0,8,16,24,32,40,48,56,0,8,16,24,32,40,48,56,0,8,16,24,32,40,48,56]
+; AVX512BW-ONLY-NEXT:    # zmm0 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm2, %zmm1
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm2, %zmm12
+; AVX512BW-ONLY-NEXT:    vpermt2w %zmm22, %zmm0, %zmm1
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm26, %zmm2
+; AVX512BW-ONLY-NEXT:    vpermt2w %zmm20, %zmm0, %zmm2
+; AVX512BW-ONLY-NEXT:    movb $-64, %al
+; AVX512BW-ONLY-NEXT:    kmovd %eax, %k1
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm1, %zmm2 {%k1}
+; AVX512BW-ONLY-NEXT:    vbroadcasti128 {{.*#+}} ymm1 = [0,8,16,24,32,40,48,56,0,8,16,24,32,40,48,56]
+; AVX512BW-ONLY-NEXT:    # ymm1 = mem[0,1,0,1]
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm4, %zmm3
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm4, %zmm10
+; AVX512BW-ONLY-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-NEXT:    vpermt2w %zmm9, %zmm1, %zmm3
+; AVX512BW-ONLY-NEXT:    vmovdqu64 %zmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm7, %zmm4
+; AVX512BW-ONLY-NEXT:    vpermt2w %zmm17, %zmm0, %zmm4
+; AVX512BW-ONLY-NEXT:    vpblendd {{.*#+}} ymm3 = ymm4[0,1,2,3],ymm3[4,5,6,7]
+; AVX512BW-ONLY-NEXT:    vinserti64x4 $0, %ymm3, %zmm2, %zmm2
+; AVX512BW-ONLY-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm25, %zmm2
+; AVX512BW-ONLY-NEXT:    vpermt2w %zmm21, %zmm0, %zmm2
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm29, %zmm3
+; AVX512BW-ONLY-NEXT:    vpermt2w %zmm5, %zmm0, %zmm3
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm2, %zmm3 {%k1}
+; AVX512BW-ONLY-NEXT:    vpermi2w %zmm28, %zmm11, %zmm0
+; AVX512BW-ONLY-NEXT:    vpermi2w %zmm6, %zmm8, %zmm1
+; AVX512BW-ONLY-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
+; AVX512BW-ONLY-NEXT:    vinserti64x4 $0, %ymm0, %zmm3, %zmm0
+; AVX512BW-ONLY-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-NEXT:    vbroadcasti32x4 {{.*#+}} zmm4 = [1,9,17,25,33,41,49,57,1,9,17,25,33,41,49,57,1,9,17,25,33,41,49,57,1,9,17,25,33,41,49,57]
+; AVX512BW-ONLY-NEXT:    # zmm4 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm12, %zmm0
+; AVX512BW-ONLY-NEXT:    vpermt2w %zmm22, %zmm4, %zmm0
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm26, %zmm1
+; AVX512BW-ONLY-NEXT:    vpermt2w %zmm20, %zmm4, %zmm1
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm0, %zmm1 {%k1}
+; AVX512BW-ONLY-NEXT:    vbroadcasti128 {{.*#+}} ymm2 = [1,9,17,25,33,41,49,57,1,9,17,25,33,41,49,57]
+; AVX512BW-ONLY-NEXT:    # ymm2 = mem[0,1,0,1]
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm10, %zmm0
+; AVX512BW-ONLY-NEXT:    vpermt2w %zmm9, %zmm2, %zmm0
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm7, %zmm2
+; AVX512BW-ONLY-NEXT:    vpermt2w %zmm17, %zmm4, %zmm2
+; AVX512BW-ONLY-NEXT:    vpblendd {{.*#+}} ymm0 = ymm2[0,1,2,3],ymm0[4,5,6,7]
+; AVX512BW-ONLY-NEXT:    vinserti64x4 $0, %ymm0, %zmm1, %zmm0
+; AVX512BW-ONLY-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-NEXT:    vbroadcasti32x4 {{.*#+}} zmm0 = [2,10,18,26,34,42,50,58,2,10,18,26,34,42,50,58,2,10,18,26,34,42,50,58,2,10,18,26,34,42,50,58]
+; AVX512BW-ONLY-NEXT:    # zmm0 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm12, %zmm1
+; AVX512BW-ONLY-NEXT:    vpermt2w %zmm22, %zmm0, %zmm1
+; AVX512BW-ONLY-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-NEXT:    vbroadcasti32x4 {{.*#+}} zmm2 = [3,11,19,27,35,43,51,59,3,11,19,27,35,43,51,59,3,11,19,27,35,43,51,59,3,11,19,27,35,43,51,59]
+; AVX512BW-ONLY-NEXT:    # zmm2 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm12, %zmm1
+; AVX512BW-ONLY-NEXT:    vpermt2w %zmm22, %zmm2, %zmm1
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm2, %zmm3
+; AVX512BW-ONLY-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-NEXT:    vbroadcasti32x4 {{.*#+}} zmm10 = [4,12,20,28,36,44,52,60,4,12,20,28,36,44,52,60,4,12,20,28,36,44,52,60,4,12,20,28,36,44,52,60]
+; AVX512BW-ONLY-NEXT:    # zmm10 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm12, %zmm2
+; AVX512BW-ONLY-NEXT:    vpermt2w %zmm22, %zmm10, %zmm2
+; AVX512BW-ONLY-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-NEXT:    vbroadcasti32x4 {{.*#+}} zmm15 = [5,13,21,29,37,45,53,61,5,13,21,29,37,45,53,61,5,13,21,29,37,45,53,61,5,13,21,29,37,45,53,61]
+; AVX512BW-ONLY-NEXT:    # zmm15 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm12, %zmm1
+; AVX512BW-ONLY-NEXT:    vpermt2w %zmm22, %zmm15, %zmm1
+; AVX512BW-ONLY-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-NEXT:    vbroadcasti32x4 {{.*#+}} zmm13 = [6,14,22,30,38,46,54,62,6,14,22,30,38,46,54,62,6,14,22,30,38,46,54,62,6,14,22,30,38,46,54,62]
+; AVX512BW-ONLY-NEXT:    # zmm13 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm12, %zmm1
+; AVX512BW-ONLY-NEXT:    vpermt2w %zmm22, %zmm13, %zmm1
+; AVX512BW-ONLY-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-NEXT:    vbroadcasti32x4 {{.*#+}} zmm1 = [7,15,23,31,39,47,55,63,7,15,23,31,39,47,55,63,7,15,23,31,39,47,55,63,7,15,23,31,39,47,55,63]
+; AVX512BW-ONLY-NEXT:    # zmm1 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512BW-ONLY-NEXT:    vpermt2w %zmm22, %zmm1, %zmm12
+; AVX512BW-ONLY-NEXT:    vmovdqu64 %zmm12, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm26, %zmm24
+; AVX512BW-ONLY-NEXT:    vpermt2w %zmm20, %zmm0, %zmm24
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm26, %zmm27
+; AVX512BW-ONLY-NEXT:    vpermt2w %zmm20, %zmm3, %zmm27
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm26, %zmm31
+; AVX512BW-ONLY-NEXT:    vpermt2w %zmm20, %zmm10, %zmm31
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm26, %zmm30
+; AVX512BW-ONLY-NEXT:    vpermt2w %zmm20, %zmm15, %zmm30
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm26, %zmm22
+; AVX512BW-ONLY-NEXT:    vpermt2w %zmm20, %zmm13, %zmm22
+; AVX512BW-ONLY-NEXT:    vpermt2w %zmm20, %zmm1, %zmm26
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm7, %zmm8
+; AVX512BW-ONLY-NEXT:    vpermt2w %zmm17, %zmm0, %zmm8
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm7, %zmm12
+; AVX512BW-ONLY-NEXT:    vpermt2w %zmm17, %zmm3, %zmm12
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm7, %zmm6
+; AVX512BW-ONLY-NEXT:    vpermt2w %zmm17, %zmm10, %zmm6
+; AVX512BW-ONLY-NEXT:    vmovdqu64 %zmm6, (%rsp) # 64-byte Spill
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm7, %zmm6
+; AVX512BW-ONLY-NEXT:    vpermt2w %zmm17, %zmm15, %zmm6
+; AVX512BW-ONLY-NEXT:    vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm7, %zmm6
+; AVX512BW-ONLY-NEXT:    vpermt2w %zmm17, %zmm13, %zmm6
+; AVX512BW-ONLY-NEXT:    vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-NEXT:    vpermt2w %zmm17, %zmm1, %zmm7
+; AVX512BW-ONLY-NEXT:    vmovdqu64 %zmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm25, %zmm18
+; AVX512BW-ONLY-NEXT:    vpermt2w %zmm21, %zmm4, %zmm18
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm25, %zmm19
+; AVX512BW-ONLY-NEXT:    vpermt2w %zmm21, %zmm0, %zmm19
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm25, %zmm23
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm3, %zmm6
+; AVX512BW-ONLY-NEXT:    vpermt2w %zmm21, %zmm3, %zmm23
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm25, %zmm16
+; AVX512BW-ONLY-NEXT:    vpermt2w %zmm21, %zmm10, %zmm16
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm25, %zmm20
+; AVX512BW-ONLY-NEXT:    vpermt2w %zmm21, %zmm15, %zmm20
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm25, %zmm17
+; AVX512BW-ONLY-NEXT:    vpermt2w %zmm21, %zmm13, %zmm17
+; AVX512BW-ONLY-NEXT:    vpermt2w %zmm21, %zmm1, %zmm25
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm29, %zmm9
+; AVX512BW-ONLY-NEXT:    vpermt2w %zmm5, %zmm4, %zmm9
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm29, %zmm11
+; AVX512BW-ONLY-NEXT:    vpermt2w %zmm5, %zmm0, %zmm11
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm0, %zmm3
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm29, %zmm14
+; AVX512BW-ONLY-NEXT:    vpermt2w %zmm5, %zmm6, %zmm14
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm29, %zmm21
+; AVX512BW-ONLY-NEXT:    vpermt2w %zmm5, %zmm10, %zmm21
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm29, %zmm7
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm15, %zmm2
+; AVX512BW-ONLY-NEXT:    vpermt2w %zmm5, %zmm15, %zmm7
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm29, %zmm15
+; AVX512BW-ONLY-NEXT:    vpermt2w %zmm5, %zmm13, %zmm15
+; AVX512BW-ONLY-NEXT:    vpermt2w %zmm5, %zmm1, %zmm29
+; AVX512BW-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512BW-ONLY-NEXT:    vpermi2w %zmm28, %zmm0, %zmm4
+; AVX512BW-ONLY-NEXT:    vpermi2w %zmm28, %zmm0, %zmm3
+; AVX512BW-ONLY-NEXT:    vpermi2w %zmm28, %zmm0, %zmm6
+; AVX512BW-ONLY-NEXT:    vpermi2w %zmm28, %zmm0, %zmm10
+; AVX512BW-ONLY-NEXT:    vpermi2w %zmm28, %zmm0, %zmm2
+; AVX512BW-ONLY-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-NEXT:    vpermi2w %zmm28, %zmm0, %zmm13
+; AVX512BW-ONLY-NEXT:    vpermt2w %zmm28, %zmm1, %zmm0
+; AVX512BW-ONLY-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm18, %zmm9 {%k1}
+; AVX512BW-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm28 # 64-byte Reload
+; AVX512BW-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
+; AVX512BW-ONLY-NEXT:    vbroadcasti128 {{.*#+}} ymm0 = [1,9,17,25,33,41,49,57,1,9,17,25,33,41,49,57]
+; AVX512BW-ONLY-NEXT:    # ymm0 = mem[0,1,0,1]
+; AVX512BW-ONLY-NEXT:    vpermi2w %zmm28, %zmm2, %zmm0
+; AVX512BW-ONLY-NEXT:    vpblendd {{.*#+}} ymm0 = ymm4[0,1,2,3],ymm0[4,5,6,7]
+; AVX512BW-ONLY-NEXT:    vinserti64x4 $0, %ymm0, %zmm9, %zmm18
+; AVX512BW-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm0, %zmm24 {%k1}
+; AVX512BW-ONLY-NEXT:    vbroadcasti128 {{.*#+}} ymm9 = [2,10,18,26,34,42,50,58,2,10,18,26,34,42,50,58]
+; AVX512BW-ONLY-NEXT:    # ymm9 = mem[0,1,0,1]
+; AVX512BW-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm4, %zmm0
+; AVX512BW-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
+; AVX512BW-ONLY-NEXT:    vpermt2w %zmm5, %zmm9, %zmm0
+; AVX512BW-ONLY-NEXT:    vpblendd {{.*#+}} ymm0 = ymm8[0,1,2,3],ymm0[4,5,6,7]
+; AVX512BW-ONLY-NEXT:    vinserti64x4 $0, %ymm0, %zmm24, %zmm24
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm19, %zmm11 {%k1}
+; AVX512BW-ONLY-NEXT:    vpermi2w %zmm28, %zmm2, %zmm9
+; AVX512BW-ONLY-NEXT:    vpblendd {{.*#+}} ymm1 = ymm3[0,1,2,3],ymm9[4,5,6,7]
+; AVX512BW-ONLY-NEXT:    vinserti64x4 $0, %ymm1, %zmm11, %zmm1
+; AVX512BW-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm0, %zmm27 {%k1}
+; AVX512BW-ONLY-NEXT:    vbroadcasti128 {{.*#+}} ymm3 = [3,11,19,27,35,43,51,59,3,11,19,27,35,43,51,59]
+; AVX512BW-ONLY-NEXT:    # ymm3 = mem[0,1,0,1]
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm4, %zmm0
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm4, %zmm9
+; AVX512BW-ONLY-NEXT:    vpermt2w %zmm5, %zmm3, %zmm9
+; AVX512BW-ONLY-NEXT:    vpblendd {{.*#+}} ymm4 = ymm12[0,1,2,3],ymm9[4,5,6,7]
+; AVX512BW-ONLY-NEXT:    vinserti64x4 $0, %ymm4, %zmm27, %zmm4
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm23, %zmm14 {%k1}
+; AVX512BW-ONLY-NEXT:    vpermi2w %zmm28, %zmm2, %zmm3
+; AVX512BW-ONLY-NEXT:    vpblendd {{.*#+}} ymm3 = ymm6[0,1,2,3],ymm3[4,5,6,7]
+; AVX512BW-ONLY-NEXT:    vinserti64x4 $0, %ymm3, %zmm14, %zmm3
+; AVX512BW-ONLY-NEXT:    vbroadcasti128 {{.*#+}} ymm6 = [4,12,20,28,36,44,52,60,4,12,20,28,36,44,52,60]
+; AVX512BW-ONLY-NEXT:    # ymm6 = mem[0,1,0,1]
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm0, %zmm8
+; AVX512BW-ONLY-NEXT:    vpermt2w %zmm5, %zmm6, %zmm8
+; AVX512BW-ONLY-NEXT:    vbroadcasti128 {{.*#+}} ymm9 = [5,13,21,29,37,45,53,61,5,13,21,29,37,45,53,61]
+; AVX512BW-ONLY-NEXT:    # ymm9 = mem[0,1,0,1]
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm0, %zmm11
+; AVX512BW-ONLY-NEXT:    vpermt2w %zmm5, %zmm9, %zmm11
+; AVX512BW-ONLY-NEXT:    vbroadcasti128 {{.*#+}} ymm12 = [6,14,22,30,38,46,54,62,6,14,22,30,38,46,54,62]
+; AVX512BW-ONLY-NEXT:    # ymm12 = mem[0,1,0,1]
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm0, %zmm14
+; AVX512BW-ONLY-NEXT:    vpermt2w %zmm5, %zmm12, %zmm14
+; AVX512BW-ONLY-NEXT:    vbroadcasti32x4 {{.*#+}} ymm19 = [7,15,23,31,39,47,55,63,7,15,23,31,39,47,55,63]
+; AVX512BW-ONLY-NEXT:    # ymm19 = mem[0,1,2,3,0,1,2,3]
+; AVX512BW-ONLY-NEXT:    vpermt2w %zmm5, %zmm19, %zmm0
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm0, %zmm5
+; AVX512BW-ONLY-NEXT:    vpermi2w %zmm28, %zmm2, %zmm6
+; AVX512BW-ONLY-NEXT:    vpermi2w %zmm28, %zmm2, %zmm9
+; AVX512BW-ONLY-NEXT:    vpermi2w %zmm28, %zmm2, %zmm12
+; AVX512BW-ONLY-NEXT:    vpermt2w %zmm28, %zmm19, %zmm2
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm2, %zmm0
+; AVX512BW-ONLY-NEXT:    vpblendd $15, (%rsp), %ymm8, %ymm8 # 32-byte Folded Reload
+; AVX512BW-ONLY-NEXT:    # ymm8 = mem[0,1,2,3],ymm8[4,5,6,7]
+; AVX512BW-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm2, %zmm31 {%k1}
+; AVX512BW-ONLY-NEXT:    vinserti64x4 $0, %ymm8, %zmm31, %zmm8
+; AVX512BW-ONLY-NEXT:    vpblendd {{.*#+}} ymm6 = ymm10[0,1,2,3],ymm6[4,5,6,7]
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm16, %zmm21 {%k1}
+; AVX512BW-ONLY-NEXT:    vinserti64x4 $0, %ymm6, %zmm21, %zmm6
+; AVX512BW-ONLY-NEXT:    vpblendd $15, {{[-0-9]+}}(%r{{[sb]}}p), %ymm11, %ymm10 # 32-byte Folded Reload
+; AVX512BW-ONLY-NEXT:    # ymm10 = mem[0,1,2,3],ymm11[4,5,6,7]
+; AVX512BW-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm2, %zmm30 {%k1}
+; AVX512BW-ONLY-NEXT:    vinserti64x4 $0, %ymm10, %zmm30, %zmm10
+; AVX512BW-ONLY-NEXT:    vpblendd $15, {{[-0-9]+}}(%r{{[sb]}}p), %ymm9, %ymm9 # 32-byte Folded Reload
+; AVX512BW-ONLY-NEXT:    # ymm9 = mem[0,1,2,3],ymm9[4,5,6,7]
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm20, %zmm7 {%k1}
+; AVX512BW-ONLY-NEXT:    vinserti64x4 $0, %ymm9, %zmm7, %zmm7
+; AVX512BW-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm2, %zmm22 {%k1}
+; AVX512BW-ONLY-NEXT:    vpblendd $15, {{[-0-9]+}}(%r{{[sb]}}p), %ymm14, %ymm9 # 32-byte Folded Reload
+; AVX512BW-ONLY-NEXT:    # ymm9 = mem[0,1,2,3],ymm14[4,5,6,7]
+; AVX512BW-ONLY-NEXT:    vinserti64x4 $0, %ymm9, %zmm22, %zmm9
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm17, %zmm15 {%k1}
+; AVX512BW-ONLY-NEXT:    vpblendd {{.*#+}} ymm2 = ymm13[0,1,2,3],ymm12[4,5,6,7]
+; AVX512BW-ONLY-NEXT:    vinserti64x4 $0, %ymm2, %zmm15, %zmm2
+; AVX512BW-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm11 # 64-byte Reload
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm11, %zmm26 {%k1}
+; AVX512BW-ONLY-NEXT:    vpblendd $15, {{[-0-9]+}}(%r{{[sb]}}p), %ymm5, %ymm11 # 32-byte Folded Reload
+; AVX512BW-ONLY-NEXT:    # ymm11 = mem[0,1,2,3],ymm5[4,5,6,7]
+; AVX512BW-ONLY-NEXT:    vinserti64x4 $0, %ymm11, %zmm26, %zmm5
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm25, %zmm29 {%k1}
+; AVX512BW-ONLY-NEXT:    vpblendd $15, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm11 # 32-byte Folded Reload
+; AVX512BW-ONLY-NEXT:    # ymm11 = mem[0,1,2,3],ymm0[4,5,6,7]
+; AVX512BW-ONLY-NEXT:    vinserti64x4 $0, %ymm11, %zmm29, %zmm11
+; AVX512BW-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512BW-ONLY-NEXT:    vmovaps %zmm0, 64(%rsi)
+; AVX512BW-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512BW-ONLY-NEXT:    vmovaps %zmm0, (%rsi)
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm18, 64(%rdx)
+; AVX512BW-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512BW-ONLY-NEXT:    vmovaps %zmm0, (%rdx)
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm1, 64(%rcx)
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm24, (%rcx)
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm3, 64(%r8)
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm4, (%r8)
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm6, 64(%r9)
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm8, (%r9)
+; AVX512BW-ONLY-NEXT:    movq {{[0-9]+}}(%rsp), %rax
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm7, 64(%rax)
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm10, (%rax)
+; AVX512BW-ONLY-NEXT:    movq {{[0-9]+}}(%rsp), %rax
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm2, 64(%rax)
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm9, (%rax)
+; AVX512BW-ONLY-NEXT:    movq {{[0-9]+}}(%rsp), %rax
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm11, 64(%rax)
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm5, (%rax)
+; AVX512BW-ONLY-NEXT:    addq $1096, %rsp # imm = 0x448
+; AVX512BW-ONLY-NEXT:    vzeroupper
+; AVX512BW-ONLY-NEXT:    retq
 ;
-; AVX512BW-ONLY-FAST-LABEL: load_i16_stride8_vf64:
-; AVX512BW-ONLY-FAST:       # %bb.0:
-; AVX512BW-ONLY-FAST-NEXT:    subq $1096, %rsp # imm = 0x448
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 704(%rdi), %zmm6
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 640(%rdi), %zmm8
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 576(%rdi), %zmm28
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 512(%rdi), %zmm11
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm11, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 832(%rdi), %zmm5
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 768(%rdi), %zmm29
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 960(%rdi), %zmm21
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 896(%rdi), %zmm25
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 (%rdi), %zmm7
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 64(%rdi), %zmm17
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 128(%rdi), %zmm4
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 192(%rdi), %zmm9
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 320(%rdi), %zmm20
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 256(%rdi), %zmm26
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 448(%rdi), %zmm22
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 384(%rdi), %zmm2
-; AVX512BW-ONLY-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} zmm0 = [0,8,16,24,32,40,48,56,0,8,16,24,32,40,48,56,0,8,16,24,32,40,48,56,0,8,16,24,32,40,48,56]
-; AVX512BW-ONLY-FAST-NEXT:    # zmm0 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm2, %zmm1
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm2, %zmm12
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2w %zmm22, %zmm0, %zmm1
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm26, %zmm2
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2w %zmm20, %zmm0, %zmm2
-; AVX512BW-ONLY-FAST-NEXT:    movb $-64, %al
-; AVX512BW-ONLY-FAST-NEXT:    kmovd %eax, %k1
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm1, %zmm2 {%k1}
-; AVX512BW-ONLY-FAST-NEXT:    vbroadcasti128 {{.*#+}} ymm1 = [0,8,16,24,32,40,48,56,0,8,16,24,32,40,48,56]
-; AVX512BW-ONLY-FAST-NEXT:    # ymm1 = mem[0,1,0,1]
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm4, %zmm3
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm4, %zmm10
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2w %zmm9, %zmm1, %zmm3
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm7, %zmm4
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2w %zmm17, %zmm0, %zmm4
-; AVX512BW-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm3 = ymm4[0,1,2,3],ymm3[4,5,6,7]
-; AVX512BW-ONLY-FAST-NEXT:    vinserti64x4 $0, %ymm3, %zmm2, %zmm2
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm25, %zmm2
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2w %zmm21, %zmm0, %zmm2
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm29, %zmm3
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2w %zmm5, %zmm0, %zmm3
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm2, %zmm3 {%k1}
-; AVX512BW-ONLY-FAST-NEXT:    vpermi2w %zmm28, %zmm11, %zmm0
-; AVX512BW-ONLY-FAST-NEXT:    vpermi2w %zmm6, %zmm8, %zmm1
-; AVX512BW-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
-; AVX512BW-ONLY-FAST-NEXT:    vinserti64x4 $0, %ymm0, %zmm3, %zmm0
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} zmm4 = [1,9,17,25,33,41,49,57,1,9,17,25,33,41,49,57,1,9,17,25,33,41,49,57,1,9,17,25,33,41,49,57]
-; AVX512BW-ONLY-FAST-NEXT:    # zmm4 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm12, %zmm0
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2w %zmm22, %zmm4, %zmm0
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm26, %zmm1
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2w %zmm20, %zmm4, %zmm1
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm0, %zmm1 {%k1}
-; AVX512BW-ONLY-FAST-NEXT:    vbroadcasti128 {{.*#+}} ymm2 = [1,9,17,25,33,41,49,57,1,9,17,25,33,41,49,57]
-; AVX512BW-ONLY-FAST-NEXT:    # ymm2 = mem[0,1,0,1]
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm10, %zmm0
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2w %zmm9, %zmm2, %zmm0
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm7, %zmm2
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2w %zmm17, %zmm4, %zmm2
-; AVX512BW-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm2[0,1,2,3],ymm0[4,5,6,7]
-; AVX512BW-ONLY-FAST-NEXT:    vinserti64x4 $0, %ymm0, %zmm1, %zmm0
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} zmm0 = [2,10,18,26,34,42,50,58,2,10,18,26,34,42,50,58,2,10,18,26,34,42,50,58,2,10,18,26,34,42,50,58]
-; AVX512BW-ONLY-FAST-NEXT:    # zmm0 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm12, %zmm1
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2w %zmm22, %zmm0, %zmm1
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} zmm2 = [3,11,19,27,35,43,51,59,3,11,19,27,35,43,51,59,3,11,19,27,35,43,51,59,3,11,19,27,35,43,51,59]
-; AVX512BW-ONLY-FAST-NEXT:    # zmm2 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm12, %zmm1
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2w %zmm22, %zmm2, %zmm1
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm2, %zmm3
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} zmm10 = [4,12,20,28,36,44,52,60,4,12,20,28,36,44,52,60,4,12,20,28,36,44,52,60,4,12,20,28,36,44,52,60]
-; AVX512BW-ONLY-FAST-NEXT:    # zmm10 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm12, %zmm2
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2w %zmm22, %zmm10, %zmm2
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} zmm15 = [5,13,21,29,37,45,53,61,5,13,21,29,37,45,53,61,5,13,21,29,37,45,53,61,5,13,21,29,37,45,53,61]
-; AVX512BW-ONLY-FAST-NEXT:    # zmm15 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm12, %zmm1
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2w %zmm22, %zmm15, %zmm1
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} zmm13 = [6,14,22,30,38,46,54,62,6,14,22,30,38,46,54,62,6,14,22,30,38,46,54,62,6,14,22,30,38,46,54,62]
-; AVX512BW-ONLY-FAST-NEXT:    # zmm13 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm12, %zmm1
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2w %zmm22, %zmm13, %zmm1
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} zmm1 = [7,15,23,31,39,47,55,63,7,15,23,31,39,47,55,63,7,15,23,31,39,47,55,63,7,15,23,31,39,47,55,63]
-; AVX512BW-ONLY-FAST-NEXT:    # zmm1 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2w %zmm22, %zmm1, %zmm12
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm12, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm26, %zmm24
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2w %zmm20, %zmm0, %zmm24
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm26, %zmm27
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2w %zmm20, %zmm3, %zmm27
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm26, %zmm31
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2w %zmm20, %zmm10, %zmm31
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm26, %zmm30
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2w %zmm20, %zmm15, %zmm30
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm26, %zmm22
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2w %zmm20, %zmm13, %zmm22
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2w %zmm20, %zmm1, %zmm26
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm7, %zmm8
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2w %zmm17, %zmm0, %zmm8
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm7, %zmm12
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2w %zmm17, %zmm3, %zmm12
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm7, %zmm6
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2w %zmm17, %zmm10, %zmm6
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm6, (%rsp) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm7, %zmm6
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2w %zmm17, %zmm15, %zmm6
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm7, %zmm6
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2w %zmm17, %zmm13, %zmm6
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2w %zmm17, %zmm1, %zmm7
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm25, %zmm18
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2w %zmm21, %zmm4, %zmm18
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm25, %zmm19
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2w %zmm21, %zmm0, %zmm19
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm25, %zmm23
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm3, %zmm6
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2w %zmm21, %zmm3, %zmm23
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm25, %zmm16
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2w %zmm21, %zmm10, %zmm16
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm25, %zmm20
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2w %zmm21, %zmm15, %zmm20
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm25, %zmm17
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2w %zmm21, %zmm13, %zmm17
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2w %zmm21, %zmm1, %zmm25
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm29, %zmm9
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2w %zmm5, %zmm4, %zmm9
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm29, %zmm11
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2w %zmm5, %zmm0, %zmm11
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm0, %zmm3
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm29, %zmm14
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2w %zmm5, %zmm6, %zmm14
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm29, %zmm21
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2w %zmm5, %zmm10, %zmm21
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm29, %zmm7
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm15, %zmm2
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2w %zmm5, %zmm15, %zmm7
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm29, %zmm15
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2w %zmm5, %zmm13, %zmm15
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2w %zmm5, %zmm1, %zmm29
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512BW-ONLY-FAST-NEXT:    vpermi2w %zmm28, %zmm0, %zmm4
-; AVX512BW-ONLY-FAST-NEXT:    vpermi2w %zmm28, %zmm0, %zmm3
-; AVX512BW-ONLY-FAST-NEXT:    vpermi2w %zmm28, %zmm0, %zmm6
-; AVX512BW-ONLY-FAST-NEXT:    vpermi2w %zmm28, %zmm0, %zmm10
-; AVX512BW-ONLY-FAST-NEXT:    vpermi2w %zmm28, %zmm0, %zmm2
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vpermi2w %zmm28, %zmm0, %zmm13
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2w %zmm28, %zmm1, %zmm0
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm18, %zmm9 {%k1}
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm28 # 64-byte Reload
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512BW-ONLY-FAST-NEXT:    vbroadcasti128 {{.*#+}} ymm0 = [1,9,17,25,33,41,49,57,1,9,17,25,33,41,49,57]
-; AVX512BW-ONLY-FAST-NEXT:    # ymm0 = mem[0,1,0,1]
-; AVX512BW-ONLY-FAST-NEXT:    vpermi2w %zmm28, %zmm2, %zmm0
-; AVX512BW-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm4[0,1,2,3],ymm0[4,5,6,7]
-; AVX512BW-ONLY-FAST-NEXT:    vinserti64x4 $0, %ymm0, %zmm9, %zmm18
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm0, %zmm24 {%k1}
-; AVX512BW-ONLY-FAST-NEXT:    vbroadcasti128 {{.*#+}} ymm9 = [2,10,18,26,34,42,50,58,2,10,18,26,34,42,50,58]
-; AVX512BW-ONLY-FAST-NEXT:    # ymm9 = mem[0,1,0,1]
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm4, %zmm0
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2w %zmm5, %zmm9, %zmm0
-; AVX512BW-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm8[0,1,2,3],ymm0[4,5,6,7]
-; AVX512BW-ONLY-FAST-NEXT:    vinserti64x4 $0, %ymm0, %zmm24, %zmm24
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm19, %zmm11 {%k1}
-; AVX512BW-ONLY-FAST-NEXT:    vpermi2w %zmm28, %zmm2, %zmm9
-; AVX512BW-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm1 = ymm3[0,1,2,3],ymm9[4,5,6,7]
-; AVX512BW-ONLY-FAST-NEXT:    vinserti64x4 $0, %ymm1, %zmm11, %zmm1
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm0, %zmm27 {%k1}
-; AVX512BW-ONLY-FAST-NEXT:    vbroadcasti128 {{.*#+}} ymm3 = [3,11,19,27,35,43,51,59,3,11,19,27,35,43,51,59]
-; AVX512BW-ONLY-FAST-NEXT:    # ymm3 = mem[0,1,0,1]
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm4, %zmm0
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm4, %zmm9
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2w %zmm5, %zmm3, %zmm9
-; AVX512BW-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm4 = ymm12[0,1,2,3],ymm9[4,5,6,7]
-; AVX512BW-ONLY-FAST-NEXT:    vinserti64x4 $0, %ymm4, %zmm27, %zmm4
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm23, %zmm14 {%k1}
-; AVX512BW-ONLY-FAST-NEXT:    vpermi2w %zmm28, %zmm2, %zmm3
-; AVX512BW-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm3 = ymm6[0,1,2,3],ymm3[4,5,6,7]
-; AVX512BW-ONLY-FAST-NEXT:    vinserti64x4 $0, %ymm3, %zmm14, %zmm3
-; AVX512BW-ONLY-FAST-NEXT:    vbroadcasti128 {{.*#+}} ymm6 = [4,12,20,28,36,44,52,60,4,12,20,28,36,44,52,60]
-; AVX512BW-ONLY-FAST-NEXT:    # ymm6 = mem[0,1,0,1]
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm0, %zmm8
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2w %zmm5, %zmm6, %zmm8
-; AVX512BW-ONLY-FAST-NEXT:    vbroadcasti128 {{.*#+}} ymm9 = [5,13,21,29,37,45,53,61,5,13,21,29,37,45,53,61]
-; AVX512BW-ONLY-FAST-NEXT:    # ymm9 = mem[0,1,0,1]
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm0, %zmm11
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2w %zmm5, %zmm9, %zmm11
-; AVX512BW-ONLY-FAST-NEXT:    vbroadcasti128 {{.*#+}} ymm12 = [6,14,22,30,38,46,54,62,6,14,22,30,38,46,54,62]
-; AVX512BW-ONLY-FAST-NEXT:    # ymm12 = mem[0,1,0,1]
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm0, %zmm14
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2w %zmm5, %zmm12, %zmm14
-; AVX512BW-ONLY-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} ymm19 = [7,15,23,31,39,47,55,63,7,15,23,31,39,47,55,63]
-; AVX512BW-ONLY-FAST-NEXT:    # ymm19 = mem[0,1,2,3,0,1,2,3]
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2w %zmm5, %zmm19, %zmm0
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm0, %zmm5
-; AVX512BW-ONLY-FAST-NEXT:    vpermi2w %zmm28, %zmm2, %zmm6
-; AVX512BW-ONLY-FAST-NEXT:    vpermi2w %zmm28, %zmm2, %zmm9
-; AVX512BW-ONLY-FAST-NEXT:    vpermi2w %zmm28, %zmm2, %zmm12
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2w %zmm28, %zmm19, %zmm2
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm2, %zmm0
-; AVX512BW-ONLY-FAST-NEXT:    vpblendd $15, (%rsp), %ymm8, %ymm8 # 32-byte Folded Reload
-; AVX512BW-ONLY-FAST-NEXT:    # ymm8 = mem[0,1,2,3],ymm8[4,5,6,7]
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm2, %zmm31 {%k1}
-; AVX512BW-ONLY-FAST-NEXT:    vinserti64x4 $0, %ymm8, %zmm31, %zmm8
-; AVX512BW-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm6 = ymm10[0,1,2,3],ymm6[4,5,6,7]
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm16, %zmm21 {%k1}
-; AVX512BW-ONLY-FAST-NEXT:    vinserti64x4 $0, %ymm6, %zmm21, %zmm6
-; AVX512BW-ONLY-FAST-NEXT:    vpblendd $15, {{[-0-9]+}}(%r{{[sb]}}p), %ymm11, %ymm10 # 32-byte Folded Reload
-; AVX512BW-ONLY-FAST-NEXT:    # ymm10 = mem[0,1,2,3],ymm11[4,5,6,7]
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm2, %zmm30 {%k1}
-; AVX512BW-ONLY-FAST-NEXT:    vinserti64x4 $0, %ymm10, %zmm30, %zmm10
-; AVX512BW-ONLY-FAST-NEXT:    vpblendd $15, {{[-0-9]+}}(%r{{[sb]}}p), %ymm9, %ymm9 # 32-byte Folded Reload
-; AVX512BW-ONLY-FAST-NEXT:    # ymm9 = mem[0,1,2,3],ymm9[4,5,6,7]
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm20, %zmm7 {%k1}
-; AVX512BW-ONLY-FAST-NEXT:    vinserti64x4 $0, %ymm9, %zmm7, %zmm7
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm2, %zmm22 {%k1}
-; AVX512BW-ONLY-FAST-NEXT:    vpblendd $15, {{[-0-9]+}}(%r{{[sb]}}p), %ymm14, %ymm9 # 32-byte Folded Reload
-; AVX512BW-ONLY-FAST-NEXT:    # ymm9 = mem[0,1,2,3],ymm14[4,5,6,7]
-; AVX512BW-ONLY-FAST-NEXT:    vinserti64x4 $0, %ymm9, %zmm22, %zmm9
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm17, %zmm15 {%k1}
-; AVX512BW-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm2 = ymm13[0,1,2,3],ymm12[4,5,6,7]
-; AVX512BW-ONLY-FAST-NEXT:    vinserti64x4 $0, %ymm2, %zmm15, %zmm2
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm11 # 64-byte Reload
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm11, %zmm26 {%k1}
-; AVX512BW-ONLY-FAST-NEXT:    vpblendd $15, {{[-0-9]+}}(%r{{[sb]}}p), %ymm5, %ymm11 # 32-byte Folded Reload
-; AVX512BW-ONLY-FAST-NEXT:    # ymm11 = mem[0,1,2,3],ymm5[4,5,6,7]
-; AVX512BW-ONLY-FAST-NEXT:    vinserti64x4 $0, %ymm11, %zmm26, %zmm5
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm25, %zmm29 {%k1}
-; AVX512BW-ONLY-FAST-NEXT:    vpblendd $15, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm11 # 32-byte Folded Reload
-; AVX512BW-ONLY-FAST-NEXT:    # ymm11 = mem[0,1,2,3],ymm0[4,5,6,7]
-; AVX512BW-ONLY-FAST-NEXT:    vinserti64x4 $0, %ymm11, %zmm29, %zmm11
-; AVX512BW-ONLY-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512BW-ONLY-FAST-NEXT:    vmovaps %zmm0, 64(%rsi)
-; AVX512BW-ONLY-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512BW-ONLY-FAST-NEXT:    vmovaps %zmm0, (%rsi)
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm18, 64(%rdx)
-; AVX512BW-ONLY-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512BW-ONLY-FAST-NEXT:    vmovaps %zmm0, (%rdx)
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm1, 64(%rcx)
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm24, (%rcx)
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm3, 64(%r8)
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm4, (%r8)
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm6, 64(%r9)
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm8, (%r9)
-; AVX512BW-ONLY-FAST-NEXT:    movq {{[0-9]+}}(%rsp), %rax
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm7, 64(%rax)
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm10, (%rax)
-; AVX512BW-ONLY-FAST-NEXT:    movq {{[0-9]+}}(%rsp), %rax
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm2, 64(%rax)
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm9, (%rax)
-; AVX512BW-ONLY-FAST-NEXT:    movq {{[0-9]+}}(%rsp), %rax
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm11, 64(%rax)
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm5, (%rax)
-; AVX512BW-ONLY-FAST-NEXT:    addq $1096, %rsp # imm = 0x448
-; AVX512BW-ONLY-FAST-NEXT:    vzeroupper
-; AVX512BW-ONLY-FAST-NEXT:    retq
-;
-; AVX512DQBW-SLOW-LABEL: load_i16_stride8_vf64:
-; AVX512DQBW-SLOW:       # %bb.0:
-; AVX512DQBW-SLOW-NEXT:    subq $1096, %rsp # imm = 0x448
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 704(%rdi), %zmm6
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 640(%rdi), %zmm8
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 576(%rdi), %zmm28
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 512(%rdi), %zmm11
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm11, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 832(%rdi), %zmm5
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 768(%rdi), %zmm29
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 960(%rdi), %zmm21
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 896(%rdi), %zmm25
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 (%rdi), %zmm7
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 64(%rdi), %zmm17
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 128(%rdi), %zmm4
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 192(%rdi), %zmm9
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 320(%rdi), %zmm20
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 256(%rdi), %zmm26
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 448(%rdi), %zmm22
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 384(%rdi), %zmm2
-; AVX512DQBW-SLOW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm0 = [0,8,16,24,32,40,48,56,0,8,16,24,32,40,48,56,0,8,16,24,32,40,48,56,0,8,16,24,32,40,48,56]
-; AVX512DQBW-SLOW-NEXT:    # zmm0 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm2, %zmm1
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm2, %zmm12
-; AVX512DQBW-SLOW-NEXT:    vpermt2w %zmm22, %zmm0, %zmm1
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm26, %zmm2
-; AVX512DQBW-SLOW-NEXT:    vpermt2w %zmm20, %zmm0, %zmm2
-; AVX512DQBW-SLOW-NEXT:    movb $-64, %al
-; AVX512DQBW-SLOW-NEXT:    kmovd %eax, %k1
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm1, %zmm2 {%k1}
-; AVX512DQBW-SLOW-NEXT:    vbroadcasti128 {{.*#+}} ymm1 = [0,8,16,24,32,40,48,56,0,8,16,24,32,40,48,56]
-; AVX512DQBW-SLOW-NEXT:    # ymm1 = mem[0,1,0,1]
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm4, %zmm3
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm4, %zmm10
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vpermt2w %zmm9, %zmm1, %zmm3
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm7, %zmm4
-; AVX512DQBW-SLOW-NEXT:    vpermt2w %zmm17, %zmm0, %zmm4
-; AVX512DQBW-SLOW-NEXT:    vpblendd {{.*#+}} ymm3 = ymm4[0,1,2,3],ymm3[4,5,6,7]
-; AVX512DQBW-SLOW-NEXT:    vinserti64x4 $0, %ymm3, %zmm2, %zmm2
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm25, %zmm2
-; AVX512DQBW-SLOW-NEXT:    vpermt2w %zmm21, %zmm0, %zmm2
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm29, %zmm3
-; AVX512DQBW-SLOW-NEXT:    vpermt2w %zmm5, %zmm0, %zmm3
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm2, %zmm3 {%k1}
-; AVX512DQBW-SLOW-NEXT:    vpermi2w %zmm28, %zmm11, %zmm0
-; AVX512DQBW-SLOW-NEXT:    vpermi2w %zmm6, %zmm8, %zmm1
-; AVX512DQBW-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
-; AVX512DQBW-SLOW-NEXT:    vinserti64x4 $0, %ymm0, %zmm3, %zmm0
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm4 = [1,9,17,25,33,41,49,57,1,9,17,25,33,41,49,57,1,9,17,25,33,41,49,57,1,9,17,25,33,41,49,57]
-; AVX512DQBW-SLOW-NEXT:    # zmm4 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm12, %zmm0
-; AVX512DQBW-SLOW-NEXT:    vpermt2w %zmm22, %zmm4, %zmm0
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm26, %zmm1
-; AVX512DQBW-SLOW-NEXT:    vpermt2w %zmm20, %zmm4, %zmm1
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm0, %zmm1 {%k1}
-; AVX512DQBW-SLOW-NEXT:    vbroadcasti128 {{.*#+}} ymm2 = [1,9,17,25,33,41,49,57,1,9,17,25,33,41,49,57]
-; AVX512DQBW-SLOW-NEXT:    # ymm2 = mem[0,1,0,1]
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm10, %zmm0
-; AVX512DQBW-SLOW-NEXT:    vpermt2w %zmm9, %zmm2, %zmm0
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm7, %zmm2
-; AVX512DQBW-SLOW-NEXT:    vpermt2w %zmm17, %zmm4, %zmm2
-; AVX512DQBW-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm2[0,1,2,3],ymm0[4,5,6,7]
-; AVX512DQBW-SLOW-NEXT:    vinserti64x4 $0, %ymm0, %zmm1, %zmm0
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm0 = [2,10,18,26,34,42,50,58,2,10,18,26,34,42,50,58,2,10,18,26,34,42,50,58,2,10,18,26,34,42,50,58]
-; AVX512DQBW-SLOW-NEXT:    # zmm0 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm12, %zmm1
-; AVX512DQBW-SLOW-NEXT:    vpermt2w %zmm22, %zmm0, %zmm1
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm2 = [3,11,19,27,35,43,51,59,3,11,19,27,35,43,51,59,3,11,19,27,35,43,51,59,3,11,19,27,35,43,51,59]
-; AVX512DQBW-SLOW-NEXT:    # zmm2 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm12, %zmm1
-; AVX512DQBW-SLOW-NEXT:    vpermt2w %zmm22, %zmm2, %zmm1
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm2, %zmm3
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm10 = [4,12,20,28,36,44,52,60,4,12,20,28,36,44,52,60,4,12,20,28,36,44,52,60,4,12,20,28,36,44,52,60]
-; AVX512DQBW-SLOW-NEXT:    # zmm10 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm12, %zmm2
-; AVX512DQBW-SLOW-NEXT:    vpermt2w %zmm22, %zmm10, %zmm2
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm15 = [5,13,21,29,37,45,53,61,5,13,21,29,37,45,53,61,5,13,21,29,37,45,53,61,5,13,21,29,37,45,53,61]
-; AVX512DQBW-SLOW-NEXT:    # zmm15 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm12, %zmm1
-; AVX512DQBW-SLOW-NEXT:    vpermt2w %zmm22, %zmm15, %zmm1
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm13 = [6,14,22,30,38,46,54,62,6,14,22,30,38,46,54,62,6,14,22,30,38,46,54,62,6,14,22,30,38,46,54,62]
-; AVX512DQBW-SLOW-NEXT:    # zmm13 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm12, %zmm1
-; AVX512DQBW-SLOW-NEXT:    vpermt2w %zmm22, %zmm13, %zmm1
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm1 = [7,15,23,31,39,47,55,63,7,15,23,31,39,47,55,63,7,15,23,31,39,47,55,63,7,15,23,31,39,47,55,63]
-; AVX512DQBW-SLOW-NEXT:    # zmm1 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512DQBW-SLOW-NEXT:    vpermt2w %zmm22, %zmm1, %zmm12
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm12, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm26, %zmm24
-; AVX512DQBW-SLOW-NEXT:    vpermt2w %zmm20, %zmm0, %zmm24
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm26, %zmm27
-; AVX512DQBW-SLOW-NEXT:    vpermt2w %zmm20, %zmm3, %zmm27
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm26, %zmm31
-; AVX512DQBW-SLOW-NEXT:    vpermt2w %zmm20, %zmm10, %zmm31
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm26, %zmm30
-; AVX512DQBW-SLOW-NEXT:    vpermt2w %zmm20, %zmm15, %zmm30
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm26, %zmm22
-; AVX512DQBW-SLOW-NEXT:    vpermt2w %zmm20, %zmm13, %zmm22
-; AVX512DQBW-SLOW-NEXT:    vpermt2w %zmm20, %zmm1, %zmm26
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm7, %zmm8
-; AVX512DQBW-SLOW-NEXT:    vpermt2w %zmm17, %zmm0, %zmm8
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm7, %zmm12
-; AVX512DQBW-SLOW-NEXT:    vpermt2w %zmm17, %zmm3, %zmm12
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm7, %zmm6
-; AVX512DQBW-SLOW-NEXT:    vpermt2w %zmm17, %zmm10, %zmm6
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm6, (%rsp) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm7, %zmm6
-; AVX512DQBW-SLOW-NEXT:    vpermt2w %zmm17, %zmm15, %zmm6
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm7, %zmm6
-; AVX512DQBW-SLOW-NEXT:    vpermt2w %zmm17, %zmm13, %zmm6
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vpermt2w %zmm17, %zmm1, %zmm7
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm25, %zmm18
-; AVX512DQBW-SLOW-NEXT:    vpermt2w %zmm21, %zmm4, %zmm18
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm25, %zmm19
-; AVX512DQBW-SLOW-NEXT:    vpermt2w %zmm21, %zmm0, %zmm19
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm25, %zmm23
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm3, %zmm6
-; AVX512DQBW-SLOW-NEXT:    vpermt2w %zmm21, %zmm3, %zmm23
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm25, %zmm16
-; AVX512DQBW-SLOW-NEXT:    vpermt2w %zmm21, %zmm10, %zmm16
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm25, %zmm20
-; AVX512DQBW-SLOW-NEXT:    vpermt2w %zmm21, %zmm15, %zmm20
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm25, %zmm17
-; AVX512DQBW-SLOW-NEXT:    vpermt2w %zmm21, %zmm13, %zmm17
-; AVX512DQBW-SLOW-NEXT:    vpermt2w %zmm21, %zmm1, %zmm25
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm29, %zmm9
-; AVX512DQBW-SLOW-NEXT:    vpermt2w %zmm5, %zmm4, %zmm9
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm29, %zmm11
-; AVX512DQBW-SLOW-NEXT:    vpermt2w %zmm5, %zmm0, %zmm11
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm0, %zmm3
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm29, %zmm14
-; AVX512DQBW-SLOW-NEXT:    vpermt2w %zmm5, %zmm6, %zmm14
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm29, %zmm21
-; AVX512DQBW-SLOW-NEXT:    vpermt2w %zmm5, %zmm10, %zmm21
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm29, %zmm7
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm15, %zmm2
-; AVX512DQBW-SLOW-NEXT:    vpermt2w %zmm5, %zmm15, %zmm7
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm29, %zmm15
-; AVX512DQBW-SLOW-NEXT:    vpermt2w %zmm5, %zmm13, %zmm15
-; AVX512DQBW-SLOW-NEXT:    vpermt2w %zmm5, %zmm1, %zmm29
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQBW-SLOW-NEXT:    vpermi2w %zmm28, %zmm0, %zmm4
-; AVX512DQBW-SLOW-NEXT:    vpermi2w %zmm28, %zmm0, %zmm3
-; AVX512DQBW-SLOW-NEXT:    vpermi2w %zmm28, %zmm0, %zmm6
-; AVX512DQBW-SLOW-NEXT:    vpermi2w %zmm28, %zmm0, %zmm10
-; AVX512DQBW-SLOW-NEXT:    vpermi2w %zmm28, %zmm0, %zmm2
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vpermi2w %zmm28, %zmm0, %zmm13
-; AVX512DQBW-SLOW-NEXT:    vpermt2w %zmm28, %zmm1, %zmm0
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm18, %zmm9 {%k1}
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm28 # 64-byte Reload
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512DQBW-SLOW-NEXT:    vbroadcasti128 {{.*#+}} ymm0 = [1,9,17,25,33,41,49,57,1,9,17,25,33,41,49,57]
-; AVX512DQBW-SLOW-NEXT:    # ymm0 = mem[0,1,0,1]
-; AVX512DQBW-SLOW-NEXT:    vpermi2w %zmm28, %zmm2, %zmm0
-; AVX512DQBW-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm4[0,1,2,3],ymm0[4,5,6,7]
-; AVX512DQBW-SLOW-NEXT:    vinserti64x4 $0, %ymm0, %zmm9, %zmm18
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm0, %zmm24 {%k1}
-; AVX512DQBW-SLOW-NEXT:    vbroadcasti128 {{.*#+}} ymm9 = [2,10,18,26,34,42,50,58,2,10,18,26,34,42,50,58]
-; AVX512DQBW-SLOW-NEXT:    # ymm9 = mem[0,1,0,1]
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm4, %zmm0
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
-; AVX512DQBW-SLOW-NEXT:    vpermt2w %zmm5, %zmm9, %zmm0
-; AVX512DQBW-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm8[0,1,2,3],ymm0[4,5,6,7]
-; AVX512DQBW-SLOW-NEXT:    vinserti64x4 $0, %ymm0, %zmm24, %zmm24
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm19, %zmm11 {%k1}
-; AVX512DQBW-SLOW-NEXT:    vpermi2w %zmm28, %zmm2, %zmm9
-; AVX512DQBW-SLOW-NEXT:    vpblendd {{.*#+}} ymm1 = ymm3[0,1,2,3],ymm9[4,5,6,7]
-; AVX512DQBW-SLOW-NEXT:    vinserti64x4 $0, %ymm1, %zmm11, %zmm1
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm0, %zmm27 {%k1}
-; AVX512DQBW-SLOW-NEXT:    vbroadcasti128 {{.*#+}} ymm3 = [3,11,19,27,35,43,51,59,3,11,19,27,35,43,51,59]
-; AVX512DQBW-SLOW-NEXT:    # ymm3 = mem[0,1,0,1]
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm4, %zmm0
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm4, %zmm9
-; AVX512DQBW-SLOW-NEXT:    vpermt2w %zmm5, %zmm3, %zmm9
-; AVX512DQBW-SLOW-NEXT:    vpblendd {{.*#+}} ymm4 = ymm12[0,1,2,3],ymm9[4,5,6,7]
-; AVX512DQBW-SLOW-NEXT:    vinserti64x4 $0, %ymm4, %zmm27, %zmm4
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm23, %zmm14 {%k1}
-; AVX512DQBW-SLOW-NEXT:    vpermi2w %zmm28, %zmm2, %zmm3
-; AVX512DQBW-SLOW-NEXT:    vpblendd {{.*#+}} ymm3 = ymm6[0,1,2,3],ymm3[4,5,6,7]
-; AVX512DQBW-SLOW-NEXT:    vinserti64x4 $0, %ymm3, %zmm14, %zmm3
-; AVX512DQBW-SLOW-NEXT:    vbroadcasti128 {{.*#+}} ymm6 = [4,12,20,28,36,44,52,60,4,12,20,28,36,44,52,60]
-; AVX512DQBW-SLOW-NEXT:    # ymm6 = mem[0,1,0,1]
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm0, %zmm8
-; AVX512DQBW-SLOW-NEXT:    vpermt2w %zmm5, %zmm6, %zmm8
-; AVX512DQBW-SLOW-NEXT:    vbroadcasti128 {{.*#+}} ymm9 = [5,13,21,29,37,45,53,61,5,13,21,29,37,45,53,61]
-; AVX512DQBW-SLOW-NEXT:    # ymm9 = mem[0,1,0,1]
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm0, %zmm11
-; AVX512DQBW-SLOW-NEXT:    vpermt2w %zmm5, %zmm9, %zmm11
-; AVX512DQBW-SLOW-NEXT:    vbroadcasti128 {{.*#+}} ymm12 = [6,14,22,30,38,46,54,62,6,14,22,30,38,46,54,62]
-; AVX512DQBW-SLOW-NEXT:    # ymm12 = mem[0,1,0,1]
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm0, %zmm14
-; AVX512DQBW-SLOW-NEXT:    vpermt2w %zmm5, %zmm12, %zmm14
-; AVX512DQBW-SLOW-NEXT:    vbroadcasti64x2 {{.*#+}} ymm19 = [7,15,23,31,39,47,55,63,7,15,23,31,39,47,55,63]
-; AVX512DQBW-SLOW-NEXT:    # ymm19 = mem[0,1,0,1]
-; AVX512DQBW-SLOW-NEXT:    vpermt2w %zmm5, %zmm19, %zmm0
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm0, %zmm5
-; AVX512DQBW-SLOW-NEXT:    vpermi2w %zmm28, %zmm2, %zmm6
-; AVX512DQBW-SLOW-NEXT:    vpermi2w %zmm28, %zmm2, %zmm9
-; AVX512DQBW-SLOW-NEXT:    vpermi2w %zmm28, %zmm2, %zmm12
-; AVX512DQBW-SLOW-NEXT:    vpermt2w %zmm28, %zmm19, %zmm2
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm2, %zmm0
-; AVX512DQBW-SLOW-NEXT:    vpblendd $15, (%rsp), %ymm8, %ymm8 # 32-byte Folded Reload
-; AVX512DQBW-SLOW-NEXT:    # ymm8 = mem[0,1,2,3],ymm8[4,5,6,7]
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm2, %zmm31 {%k1}
-; AVX512DQBW-SLOW-NEXT:    vinserti64x4 $0, %ymm8, %zmm31, %zmm8
-; AVX512DQBW-SLOW-NEXT:    vpblendd {{.*#+}} ymm6 = ymm10[0,1,2,3],ymm6[4,5,6,7]
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm16, %zmm21 {%k1}
-; AVX512DQBW-SLOW-NEXT:    vinserti64x4 $0, %ymm6, %zmm21, %zmm6
-; AVX512DQBW-SLOW-NEXT:    vpblendd $15, {{[-0-9]+}}(%r{{[sb]}}p), %ymm11, %ymm10 # 32-byte Folded Reload
-; AVX512DQBW-SLOW-NEXT:    # ymm10 = mem[0,1,2,3],ymm11[4,5,6,7]
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm2, %zmm30 {%k1}
-; AVX512DQBW-SLOW-NEXT:    vinserti64x4 $0, %ymm10, %zmm30, %zmm10
-; AVX512DQBW-SLOW-NEXT:    vpblendd $15, {{[-0-9]+}}(%r{{[sb]}}p), %ymm9, %ymm9 # 32-byte Folded Reload
-; AVX512DQBW-SLOW-NEXT:    # ymm9 = mem[0,1,2,3],ymm9[4,5,6,7]
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm20, %zmm7 {%k1}
-; AVX512DQBW-SLOW-NEXT:    vinserti64x4 $0, %ymm9, %zmm7, %zmm7
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm2, %zmm22 {%k1}
-; AVX512DQBW-SLOW-NEXT:    vpblendd $15, {{[-0-9]+}}(%r{{[sb]}}p), %ymm14, %ymm9 # 32-byte Folded Reload
-; AVX512DQBW-SLOW-NEXT:    # ymm9 = mem[0,1,2,3],ymm14[4,5,6,7]
-; AVX512DQBW-SLOW-NEXT:    vinserti64x4 $0, %ymm9, %zmm22, %zmm9
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm17, %zmm15 {%k1}
-; AVX512DQBW-SLOW-NEXT:    vpblendd {{.*#+}} ymm2 = ymm13[0,1,2,3],ymm12[4,5,6,7]
-; AVX512DQBW-SLOW-NEXT:    vinserti64x4 $0, %ymm2, %zmm15, %zmm2
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm11 # 64-byte Reload
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm11, %zmm26 {%k1}
-; AVX512DQBW-SLOW-NEXT:    vpblendd $15, {{[-0-9]+}}(%r{{[sb]}}p), %ymm5, %ymm11 # 32-byte Folded Reload
-; AVX512DQBW-SLOW-NEXT:    # ymm11 = mem[0,1,2,3],ymm5[4,5,6,7]
-; AVX512DQBW-SLOW-NEXT:    vinserti64x4 $0, %ymm11, %zmm26, %zmm5
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm25, %zmm29 {%k1}
-; AVX512DQBW-SLOW-NEXT:    vpblendd $15, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm11 # 32-byte Folded Reload
-; AVX512DQBW-SLOW-NEXT:    # ymm11 = mem[0,1,2,3],ymm0[4,5,6,7]
-; AVX512DQBW-SLOW-NEXT:    vinserti64x4 $0, %ymm11, %zmm29, %zmm11
-; AVX512DQBW-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQBW-SLOW-NEXT:    vmovaps %zmm0, 64(%rsi)
-; AVX512DQBW-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQBW-SLOW-NEXT:    vmovaps %zmm0, (%rsi)
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm18, 64(%rdx)
-; AVX512DQBW-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQBW-SLOW-NEXT:    vmovaps %zmm0, (%rdx)
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm1, 64(%rcx)
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm24, (%rcx)
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm3, 64(%r8)
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm4, (%r8)
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm6, 64(%r9)
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm8, (%r9)
-; AVX512DQBW-SLOW-NEXT:    movq {{[0-9]+}}(%rsp), %rax
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm7, 64(%rax)
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm10, (%rax)
-; AVX512DQBW-SLOW-NEXT:    movq {{[0-9]+}}(%rsp), %rax
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm2, 64(%rax)
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm9, (%rax)
-; AVX512DQBW-SLOW-NEXT:    movq {{[0-9]+}}(%rsp), %rax
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm11, 64(%rax)
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm5, (%rax)
-; AVX512DQBW-SLOW-NEXT:    addq $1096, %rsp # imm = 0x448
-; AVX512DQBW-SLOW-NEXT:    vzeroupper
-; AVX512DQBW-SLOW-NEXT:    retq
-;
-; AVX512DQBW-FAST-LABEL: load_i16_stride8_vf64:
-; AVX512DQBW-FAST:       # %bb.0:
-; AVX512DQBW-FAST-NEXT:    subq $1096, %rsp # imm = 0x448
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 704(%rdi), %zmm6
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 640(%rdi), %zmm8
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 576(%rdi), %zmm28
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 512(%rdi), %zmm11
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm11, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 832(%rdi), %zmm5
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 768(%rdi), %zmm29
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 960(%rdi), %zmm21
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 896(%rdi), %zmm25
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 (%rdi), %zmm7
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 64(%rdi), %zmm17
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 128(%rdi), %zmm4
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 192(%rdi), %zmm9
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 320(%rdi), %zmm20
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 256(%rdi), %zmm26
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 448(%rdi), %zmm22
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 384(%rdi), %zmm2
-; AVX512DQBW-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} zmm0 = [0,8,16,24,32,40,48,56,0,8,16,24,32,40,48,56,0,8,16,24,32,40,48,56,0,8,16,24,32,40,48,56]
-; AVX512DQBW-FAST-NEXT:    # zmm0 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm2, %zmm1
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm2, %zmm12
-; AVX512DQBW-FAST-NEXT:    vpermt2w %zmm22, %zmm0, %zmm1
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm26, %zmm2
-; AVX512DQBW-FAST-NEXT:    vpermt2w %zmm20, %zmm0, %zmm2
-; AVX512DQBW-FAST-NEXT:    movb $-64, %al
-; AVX512DQBW-FAST-NEXT:    kmovd %eax, %k1
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm1, %zmm2 {%k1}
-; AVX512DQBW-FAST-NEXT:    vbroadcasti128 {{.*#+}} ymm1 = [0,8,16,24,32,40,48,56,0,8,16,24,32,40,48,56]
-; AVX512DQBW-FAST-NEXT:    # ymm1 = mem[0,1,0,1]
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm4, %zmm3
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm4, %zmm10
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vpermt2w %zmm9, %zmm1, %zmm3
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm7, %zmm4
-; AVX512DQBW-FAST-NEXT:    vpermt2w %zmm17, %zmm0, %zmm4
-; AVX512DQBW-FAST-NEXT:    vpblendd {{.*#+}} ymm3 = ymm4[0,1,2,3],ymm3[4,5,6,7]
-; AVX512DQBW-FAST-NEXT:    vinserti64x4 $0, %ymm3, %zmm2, %zmm2
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm25, %zmm2
-; AVX512DQBW-FAST-NEXT:    vpermt2w %zmm21, %zmm0, %zmm2
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm29, %zmm3
-; AVX512DQBW-FAST-NEXT:    vpermt2w %zmm5, %zmm0, %zmm3
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm2, %zmm3 {%k1}
-; AVX512DQBW-FAST-NEXT:    vpermi2w %zmm28, %zmm11, %zmm0
-; AVX512DQBW-FAST-NEXT:    vpermi2w %zmm6, %zmm8, %zmm1
-; AVX512DQBW-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
-; AVX512DQBW-FAST-NEXT:    vinserti64x4 $0, %ymm0, %zmm3, %zmm0
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} zmm4 = [1,9,17,25,33,41,49,57,1,9,17,25,33,41,49,57,1,9,17,25,33,41,49,57,1,9,17,25,33,41,49,57]
-; AVX512DQBW-FAST-NEXT:    # zmm4 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm12, %zmm0
-; AVX512DQBW-FAST-NEXT:    vpermt2w %zmm22, %zmm4, %zmm0
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm26, %zmm1
-; AVX512DQBW-FAST-NEXT:    vpermt2w %zmm20, %zmm4, %zmm1
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm0, %zmm1 {%k1}
-; AVX512DQBW-FAST-NEXT:    vbroadcasti128 {{.*#+}} ymm2 = [1,9,17,25,33,41,49,57,1,9,17,25,33,41,49,57]
-; AVX512DQBW-FAST-NEXT:    # ymm2 = mem[0,1,0,1]
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm10, %zmm0
-; AVX512DQBW-FAST-NEXT:    vpermt2w %zmm9, %zmm2, %zmm0
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm7, %zmm2
-; AVX512DQBW-FAST-NEXT:    vpermt2w %zmm17, %zmm4, %zmm2
-; AVX512DQBW-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm2[0,1,2,3],ymm0[4,5,6,7]
-; AVX512DQBW-FAST-NEXT:    vinserti64x4 $0, %ymm0, %zmm1, %zmm0
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} zmm0 = [2,10,18,26,34,42,50,58,2,10,18,26,34,42,50,58,2,10,18,26,34,42,50,58,2,10,18,26,34,42,50,58]
-; AVX512DQBW-FAST-NEXT:    # zmm0 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm12, %zmm1
-; AVX512DQBW-FAST-NEXT:    vpermt2w %zmm22, %zmm0, %zmm1
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} zmm2 = [3,11,19,27,35,43,51,59,3,11,19,27,35,43,51,59,3,11,19,27,35,43,51,59,3,11,19,27,35,43,51,59]
-; AVX512DQBW-FAST-NEXT:    # zmm2 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm12, %zmm1
-; AVX512DQBW-FAST-NEXT:    vpermt2w %zmm22, %zmm2, %zmm1
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm2, %zmm3
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} zmm10 = [4,12,20,28,36,44,52,60,4,12,20,28,36,44,52,60,4,12,20,28,36,44,52,60,4,12,20,28,36,44,52,60]
-; AVX512DQBW-FAST-NEXT:    # zmm10 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm12, %zmm2
-; AVX512DQBW-FAST-NEXT:    vpermt2w %zmm22, %zmm10, %zmm2
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} zmm15 = [5,13,21,29,37,45,53,61,5,13,21,29,37,45,53,61,5,13,21,29,37,45,53,61,5,13,21,29,37,45,53,61]
-; AVX512DQBW-FAST-NEXT:    # zmm15 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm12, %zmm1
-; AVX512DQBW-FAST-NEXT:    vpermt2w %zmm22, %zmm15, %zmm1
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} zmm13 = [6,14,22,30,38,46,54,62,6,14,22,30,38,46,54,62,6,14,22,30,38,46,54,62,6,14,22,30,38,46,54,62]
-; AVX512DQBW-FAST-NEXT:    # zmm13 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm12, %zmm1
-; AVX512DQBW-FAST-NEXT:    vpermt2w %zmm22, %zmm13, %zmm1
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} zmm1 = [7,15,23,31,39,47,55,63,7,15,23,31,39,47,55,63,7,15,23,31,39,47,55,63,7,15,23,31,39,47,55,63]
-; AVX512DQBW-FAST-NEXT:    # zmm1 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512DQBW-FAST-NEXT:    vpermt2w %zmm22, %zmm1, %zmm12
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm12, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm26, %zmm24
-; AVX512DQBW-FAST-NEXT:    vpermt2w %zmm20, %zmm0, %zmm24
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm26, %zmm27
-; AVX512DQBW-FAST-NEXT:    vpermt2w %zmm20, %zmm3, %zmm27
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm26, %zmm31
-; AVX512DQBW-FAST-NEXT:    vpermt2w %zmm20, %zmm10, %zmm31
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm26, %zmm30
-; AVX512DQBW-FAST-NEXT:    vpermt2w %zmm20, %zmm15, %zmm30
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm26, %zmm22
-; AVX512DQBW-FAST-NEXT:    vpermt2w %zmm20, %zmm13, %zmm22
-; AVX512DQBW-FAST-NEXT:    vpermt2w %zmm20, %zmm1, %zmm26
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm7, %zmm8
-; AVX512DQBW-FAST-NEXT:    vpermt2w %zmm17, %zmm0, %zmm8
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm7, %zmm12
-; AVX512DQBW-FAST-NEXT:    vpermt2w %zmm17, %zmm3, %zmm12
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm7, %zmm6
-; AVX512DQBW-FAST-NEXT:    vpermt2w %zmm17, %zmm10, %zmm6
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm6, (%rsp) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm7, %zmm6
-; AVX512DQBW-FAST-NEXT:    vpermt2w %zmm17, %zmm15, %zmm6
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm7, %zmm6
-; AVX512DQBW-FAST-NEXT:    vpermt2w %zmm17, %zmm13, %zmm6
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vpermt2w %zmm17, %zmm1, %zmm7
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm25, %zmm18
-; AVX512DQBW-FAST-NEXT:    vpermt2w %zmm21, %zmm4, %zmm18
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm25, %zmm19
-; AVX512DQBW-FAST-NEXT:    vpermt2w %zmm21, %zmm0, %zmm19
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm25, %zmm23
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm3, %zmm6
-; AVX512DQBW-FAST-NEXT:    vpermt2w %zmm21, %zmm3, %zmm23
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm25, %zmm16
-; AVX512DQBW-FAST-NEXT:    vpermt2w %zmm21, %zmm10, %zmm16
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm25, %zmm20
-; AVX512DQBW-FAST-NEXT:    vpermt2w %zmm21, %zmm15, %zmm20
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm25, %zmm17
-; AVX512DQBW-FAST-NEXT:    vpermt2w %zmm21, %zmm13, %zmm17
-; AVX512DQBW-FAST-NEXT:    vpermt2w %zmm21, %zmm1, %zmm25
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm29, %zmm9
-; AVX512DQBW-FAST-NEXT:    vpermt2w %zmm5, %zmm4, %zmm9
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm29, %zmm11
-; AVX512DQBW-FAST-NEXT:    vpermt2w %zmm5, %zmm0, %zmm11
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm0, %zmm3
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm29, %zmm14
-; AVX512DQBW-FAST-NEXT:    vpermt2w %zmm5, %zmm6, %zmm14
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm29, %zmm21
-; AVX512DQBW-FAST-NEXT:    vpermt2w %zmm5, %zmm10, %zmm21
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm29, %zmm7
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm15, %zmm2
-; AVX512DQBW-FAST-NEXT:    vpermt2w %zmm5, %zmm15, %zmm7
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm29, %zmm15
-; AVX512DQBW-FAST-NEXT:    vpermt2w %zmm5, %zmm13, %zmm15
-; AVX512DQBW-FAST-NEXT:    vpermt2w %zmm5, %zmm1, %zmm29
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQBW-FAST-NEXT:    vpermi2w %zmm28, %zmm0, %zmm4
-; AVX512DQBW-FAST-NEXT:    vpermi2w %zmm28, %zmm0, %zmm3
-; AVX512DQBW-FAST-NEXT:    vpermi2w %zmm28, %zmm0, %zmm6
-; AVX512DQBW-FAST-NEXT:    vpermi2w %zmm28, %zmm0, %zmm10
-; AVX512DQBW-FAST-NEXT:    vpermi2w %zmm28, %zmm0, %zmm2
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vpermi2w %zmm28, %zmm0, %zmm13
-; AVX512DQBW-FAST-NEXT:    vpermt2w %zmm28, %zmm1, %zmm0
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm18, %zmm9 {%k1}
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm28 # 64-byte Reload
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512DQBW-FAST-NEXT:    vbroadcasti128 {{.*#+}} ymm0 = [1,9,17,25,33,41,49,57,1,9,17,25,33,41,49,57]
-; AVX512DQBW-FAST-NEXT:    # ymm0 = mem[0,1,0,1]
-; AVX512DQBW-FAST-NEXT:    vpermi2w %zmm28, %zmm2, %zmm0
-; AVX512DQBW-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm4[0,1,2,3],ymm0[4,5,6,7]
-; AVX512DQBW-FAST-NEXT:    vinserti64x4 $0, %ymm0, %zmm9, %zmm18
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm0, %zmm24 {%k1}
-; AVX512DQBW-FAST-NEXT:    vbroadcasti128 {{.*#+}} ymm9 = [2,10,18,26,34,42,50,58,2,10,18,26,34,42,50,58]
-; AVX512DQBW-FAST-NEXT:    # ymm9 = mem[0,1,0,1]
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm4, %zmm0
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
-; AVX512DQBW-FAST-NEXT:    vpermt2w %zmm5, %zmm9, %zmm0
-; AVX512DQBW-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm8[0,1,2,3],ymm0[4,5,6,7]
-; AVX512DQBW-FAST-NEXT:    vinserti64x4 $0, %ymm0, %zmm24, %zmm24
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm19, %zmm11 {%k1}
-; AVX512DQBW-FAST-NEXT:    vpermi2w %zmm28, %zmm2, %zmm9
-; AVX512DQBW-FAST-NEXT:    vpblendd {{.*#+}} ymm1 = ymm3[0,1,2,3],ymm9[4,5,6,7]
-; AVX512DQBW-FAST-NEXT:    vinserti64x4 $0, %ymm1, %zmm11, %zmm1
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm0, %zmm27 {%k1}
-; AVX512DQBW-FAST-NEXT:    vbroadcasti128 {{.*#+}} ymm3 = [3,11,19,27,35,43,51,59,3,11,19,27,35,43,51,59]
-; AVX512DQBW-FAST-NEXT:    # ymm3 = mem[0,1,0,1]
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm4, %zmm0
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm4, %zmm9
-; AVX512DQBW-FAST-NEXT:    vpermt2w %zmm5, %zmm3, %zmm9
-; AVX512DQBW-FAST-NEXT:    vpblendd {{.*#+}} ymm4 = ymm12[0,1,2,3],ymm9[4,5,6,7]
-; AVX512DQBW-FAST-NEXT:    vinserti64x4 $0, %ymm4, %zmm27, %zmm4
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm23, %zmm14 {%k1}
-; AVX512DQBW-FAST-NEXT:    vpermi2w %zmm28, %zmm2, %zmm3
-; AVX512DQBW-FAST-NEXT:    vpblendd {{.*#+}} ymm3 = ymm6[0,1,2,3],ymm3[4,5,6,7]
-; AVX512DQBW-FAST-NEXT:    vinserti64x4 $0, %ymm3, %zmm14, %zmm3
-; AVX512DQBW-FAST-NEXT:    vbroadcasti128 {{.*#+}} ymm6 = [4,12,20,28,36,44,52,60,4,12,20,28,36,44,52,60]
-; AVX512DQBW-FAST-NEXT:    # ymm6 = mem[0,1,0,1]
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm0, %zmm8
-; AVX512DQBW-FAST-NEXT:    vpermt2w %zmm5, %zmm6, %zmm8
-; AVX512DQBW-FAST-NEXT:    vbroadcasti128 {{.*#+}} ymm9 = [5,13,21,29,37,45,53,61,5,13,21,29,37,45,53,61]
-; AVX512DQBW-FAST-NEXT:    # ymm9 = mem[0,1,0,1]
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm0, %zmm11
-; AVX512DQBW-FAST-NEXT:    vpermt2w %zmm5, %zmm9, %zmm11
-; AVX512DQBW-FAST-NEXT:    vbroadcasti128 {{.*#+}} ymm12 = [6,14,22,30,38,46,54,62,6,14,22,30,38,46,54,62]
-; AVX512DQBW-FAST-NEXT:    # ymm12 = mem[0,1,0,1]
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm0, %zmm14
-; AVX512DQBW-FAST-NEXT:    vpermt2w %zmm5, %zmm12, %zmm14
-; AVX512DQBW-FAST-NEXT:    vbroadcasti64x2 {{.*#+}} ymm19 = [7,15,23,31,39,47,55,63,7,15,23,31,39,47,55,63]
-; AVX512DQBW-FAST-NEXT:    # ymm19 = mem[0,1,0,1]
-; AVX512DQBW-FAST-NEXT:    vpermt2w %zmm5, %zmm19, %zmm0
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm0, %zmm5
-; AVX512DQBW-FAST-NEXT:    vpermi2w %zmm28, %zmm2, %zmm6
-; AVX512DQBW-FAST-NEXT:    vpermi2w %zmm28, %zmm2, %zmm9
-; AVX512DQBW-FAST-NEXT:    vpermi2w %zmm28, %zmm2, %zmm12
-; AVX512DQBW-FAST-NEXT:    vpermt2w %zmm28, %zmm19, %zmm2
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm2, %zmm0
-; AVX512DQBW-FAST-NEXT:    vpblendd $15, (%rsp), %ymm8, %ymm8 # 32-byte Folded Reload
-; AVX512DQBW-FAST-NEXT:    # ymm8 = mem[0,1,2,3],ymm8[4,5,6,7]
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm2, %zmm31 {%k1}
-; AVX512DQBW-FAST-NEXT:    vinserti64x4 $0, %ymm8, %zmm31, %zmm8
-; AVX512DQBW-FAST-NEXT:    vpblendd {{.*#+}} ymm6 = ymm10[0,1,2,3],ymm6[4,5,6,7]
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm16, %zmm21 {%k1}
-; AVX512DQBW-FAST-NEXT:    vinserti64x4 $0, %ymm6, %zmm21, %zmm6
-; AVX512DQBW-FAST-NEXT:    vpblendd $15, {{[-0-9]+}}(%r{{[sb]}}p), %ymm11, %ymm10 # 32-byte Folded Reload
-; AVX512DQBW-FAST-NEXT:    # ymm10 = mem[0,1,2,3],ymm11[4,5,6,7]
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm2, %zmm30 {%k1}
-; AVX512DQBW-FAST-NEXT:    vinserti64x4 $0, %ymm10, %zmm30, %zmm10
-; AVX512DQBW-FAST-NEXT:    vpblendd $15, {{[-0-9]+}}(%r{{[sb]}}p), %ymm9, %ymm9 # 32-byte Folded Reload
-; AVX512DQBW-FAST-NEXT:    # ymm9 = mem[0,1,2,3],ymm9[4,5,6,7]
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm20, %zmm7 {%k1}
-; AVX512DQBW-FAST-NEXT:    vinserti64x4 $0, %ymm9, %zmm7, %zmm7
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm2, %zmm22 {%k1}
-; AVX512DQBW-FAST-NEXT:    vpblendd $15, {{[-0-9]+}}(%r{{[sb]}}p), %ymm14, %ymm9 # 32-byte Folded Reload
-; AVX512DQBW-FAST-NEXT:    # ymm9 = mem[0,1,2,3],ymm14[4,5,6,7]
-; AVX512DQBW-FAST-NEXT:    vinserti64x4 $0, %ymm9, %zmm22, %zmm9
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm17, %zmm15 {%k1}
-; AVX512DQBW-FAST-NEXT:    vpblendd {{.*#+}} ymm2 = ymm13[0,1,2,3],ymm12[4,5,6,7]
-; AVX512DQBW-FAST-NEXT:    vinserti64x4 $0, %ymm2, %zmm15, %zmm2
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm11 # 64-byte Reload
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm11, %zmm26 {%k1}
-; AVX512DQBW-FAST-NEXT:    vpblendd $15, {{[-0-9]+}}(%r{{[sb]}}p), %ymm5, %ymm11 # 32-byte Folded Reload
-; AVX512DQBW-FAST-NEXT:    # ymm11 = mem[0,1,2,3],ymm5[4,5,6,7]
-; AVX512DQBW-FAST-NEXT:    vinserti64x4 $0, %ymm11, %zmm26, %zmm5
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm25, %zmm29 {%k1}
-; AVX512DQBW-FAST-NEXT:    vpblendd $15, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm11 # 32-byte Folded Reload
-; AVX512DQBW-FAST-NEXT:    # ymm11 = mem[0,1,2,3],ymm0[4,5,6,7]
-; AVX512DQBW-FAST-NEXT:    vinserti64x4 $0, %ymm11, %zmm29, %zmm11
-; AVX512DQBW-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQBW-FAST-NEXT:    vmovaps %zmm0, 64(%rsi)
-; AVX512DQBW-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQBW-FAST-NEXT:    vmovaps %zmm0, (%rsi)
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm18, 64(%rdx)
-; AVX512DQBW-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQBW-FAST-NEXT:    vmovaps %zmm0, (%rdx)
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm1, 64(%rcx)
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm24, (%rcx)
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm3, 64(%r8)
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm4, (%r8)
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm6, 64(%r9)
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm8, (%r9)
-; AVX512DQBW-FAST-NEXT:    movq {{[0-9]+}}(%rsp), %rax
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm7, 64(%rax)
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm10, (%rax)
-; AVX512DQBW-FAST-NEXT:    movq {{[0-9]+}}(%rsp), %rax
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm2, 64(%rax)
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm9, (%rax)
-; AVX512DQBW-FAST-NEXT:    movq {{[0-9]+}}(%rsp), %rax
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm11, 64(%rax)
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm5, (%rax)
-; AVX512DQBW-FAST-NEXT:    addq $1096, %rsp # imm = 0x448
-; AVX512DQBW-FAST-NEXT:    vzeroupper
-; AVX512DQBW-FAST-NEXT:    retq
+; AVX512DQBW-ONLY-LABEL: load_i16_stride8_vf64:
+; AVX512DQBW-ONLY:       # %bb.0:
+; AVX512DQBW-ONLY-NEXT:    subq $1096, %rsp # imm = 0x448
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 704(%rdi), %zmm6
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 640(%rdi), %zmm8
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 %zmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 576(%rdi), %zmm28
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 512(%rdi), %zmm11
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 %zmm11, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 832(%rdi), %zmm5
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 768(%rdi), %zmm29
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 960(%rdi), %zmm21
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 896(%rdi), %zmm25
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 (%rdi), %zmm7
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 64(%rdi), %zmm17
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 128(%rdi), %zmm4
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 192(%rdi), %zmm9
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 320(%rdi), %zmm20
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 256(%rdi), %zmm26
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 448(%rdi), %zmm22
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 384(%rdi), %zmm2
+; AVX512DQBW-ONLY-NEXT:    vbroadcasti32x4 {{.*#+}} zmm0 = [0,8,16,24,32,40,48,56,0,8,16,24,32,40,48,56,0,8,16,24,32,40,48,56,0,8,16,24,32,40,48,56]
+; AVX512DQBW-ONLY-NEXT:    # zmm0 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm2, %zmm1
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm2, %zmm12
+; AVX512DQBW-ONLY-NEXT:    vpermt2w %zmm22, %zmm0, %zmm1
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm26, %zmm2
+; AVX512DQBW-ONLY-NEXT:    vpermt2w %zmm20, %zmm0, %zmm2
+; AVX512DQBW-ONLY-NEXT:    movb $-64, %al
+; AVX512DQBW-ONLY-NEXT:    kmovd %eax, %k1
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm1, %zmm2 {%k1}
+; AVX512DQBW-ONLY-NEXT:    vbroadcasti128 {{.*#+}} ymm1 = [0,8,16,24,32,40,48,56,0,8,16,24,32,40,48,56]
+; AVX512DQBW-ONLY-NEXT:    # ymm1 = mem[0,1,0,1]
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm4, %zmm3
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm4, %zmm10
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-ONLY-NEXT:    vpermt2w %zmm9, %zmm1, %zmm3
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 %zmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm7, %zmm4
+; AVX512DQBW-ONLY-NEXT:    vpermt2w %zmm17, %zmm0, %zmm4
+; AVX512DQBW-ONLY-NEXT:    vpblendd {{.*#+}} ymm3 = ymm4[0,1,2,3],ymm3[4,5,6,7]
+; AVX512DQBW-ONLY-NEXT:    vinserti64x4 $0, %ymm3, %zmm2, %zmm2
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm25, %zmm2
+; AVX512DQBW-ONLY-NEXT:    vpermt2w %zmm21, %zmm0, %zmm2
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm29, %zmm3
+; AVX512DQBW-ONLY-NEXT:    vpermt2w %zmm5, %zmm0, %zmm3
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm2, %zmm3 {%k1}
+; AVX512DQBW-ONLY-NEXT:    vpermi2w %zmm28, %zmm11, %zmm0
+; AVX512DQBW-ONLY-NEXT:    vpermi2w %zmm6, %zmm8, %zmm1
+; AVX512DQBW-ONLY-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
+; AVX512DQBW-ONLY-NEXT:    vinserti64x4 $0, %ymm0, %zmm3, %zmm0
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-ONLY-NEXT:    vbroadcasti32x4 {{.*#+}} zmm4 = [1,9,17,25,33,41,49,57,1,9,17,25,33,41,49,57,1,9,17,25,33,41,49,57,1,9,17,25,33,41,49,57]
+; AVX512DQBW-ONLY-NEXT:    # zmm4 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm12, %zmm0
+; AVX512DQBW-ONLY-NEXT:    vpermt2w %zmm22, %zmm4, %zmm0
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm26, %zmm1
+; AVX512DQBW-ONLY-NEXT:    vpermt2w %zmm20, %zmm4, %zmm1
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm0, %zmm1 {%k1}
+; AVX512DQBW-ONLY-NEXT:    vbroadcasti128 {{.*#+}} ymm2 = [1,9,17,25,33,41,49,57,1,9,17,25,33,41,49,57]
+; AVX512DQBW-ONLY-NEXT:    # ymm2 = mem[0,1,0,1]
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm10, %zmm0
+; AVX512DQBW-ONLY-NEXT:    vpermt2w %zmm9, %zmm2, %zmm0
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm7, %zmm2
+; AVX512DQBW-ONLY-NEXT:    vpermt2w %zmm17, %zmm4, %zmm2
+; AVX512DQBW-ONLY-NEXT:    vpblendd {{.*#+}} ymm0 = ymm2[0,1,2,3],ymm0[4,5,6,7]
+; AVX512DQBW-ONLY-NEXT:    vinserti64x4 $0, %ymm0, %zmm1, %zmm0
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-ONLY-NEXT:    vbroadcasti32x4 {{.*#+}} zmm0 = [2,10,18,26,34,42,50,58,2,10,18,26,34,42,50,58,2,10,18,26,34,42,50,58,2,10,18,26,34,42,50,58]
+; AVX512DQBW-ONLY-NEXT:    # zmm0 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm12, %zmm1
+; AVX512DQBW-ONLY-NEXT:    vpermt2w %zmm22, %zmm0, %zmm1
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-ONLY-NEXT:    vbroadcasti32x4 {{.*#+}} zmm2 = [3,11,19,27,35,43,51,59,3,11,19,27,35,43,51,59,3,11,19,27,35,43,51,59,3,11,19,27,35,43,51,59]
+; AVX512DQBW-ONLY-NEXT:    # zmm2 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm12, %zmm1
+; AVX512DQBW-ONLY-NEXT:    vpermt2w %zmm22, %zmm2, %zmm1
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm2, %zmm3
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-ONLY-NEXT:    vbroadcasti32x4 {{.*#+}} zmm10 = [4,12,20,28,36,44,52,60,4,12,20,28,36,44,52,60,4,12,20,28,36,44,52,60,4,12,20,28,36,44,52,60]
+; AVX512DQBW-ONLY-NEXT:    # zmm10 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm12, %zmm2
+; AVX512DQBW-ONLY-NEXT:    vpermt2w %zmm22, %zmm10, %zmm2
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-ONLY-NEXT:    vbroadcasti32x4 {{.*#+}} zmm15 = [5,13,21,29,37,45,53,61,5,13,21,29,37,45,53,61,5,13,21,29,37,45,53,61,5,13,21,29,37,45,53,61]
+; AVX512DQBW-ONLY-NEXT:    # zmm15 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm12, %zmm1
+; AVX512DQBW-ONLY-NEXT:    vpermt2w %zmm22, %zmm15, %zmm1
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-ONLY-NEXT:    vbroadcasti32x4 {{.*#+}} zmm13 = [6,14,22,30,38,46,54,62,6,14,22,30,38,46,54,62,6,14,22,30,38,46,54,62,6,14,22,30,38,46,54,62]
+; AVX512DQBW-ONLY-NEXT:    # zmm13 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm12, %zmm1
+; AVX512DQBW-ONLY-NEXT:    vpermt2w %zmm22, %zmm13, %zmm1
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-ONLY-NEXT:    vbroadcasti32x4 {{.*#+}} zmm1 = [7,15,23,31,39,47,55,63,7,15,23,31,39,47,55,63,7,15,23,31,39,47,55,63,7,15,23,31,39,47,55,63]
+; AVX512DQBW-ONLY-NEXT:    # zmm1 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512DQBW-ONLY-NEXT:    vpermt2w %zmm22, %zmm1, %zmm12
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 %zmm12, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm26, %zmm24
+; AVX512DQBW-ONLY-NEXT:    vpermt2w %zmm20, %zmm0, %zmm24
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm26, %zmm27
+; AVX512DQBW-ONLY-NEXT:    vpermt2w %zmm20, %zmm3, %zmm27
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm26, %zmm31
+; AVX512DQBW-ONLY-NEXT:    vpermt2w %zmm20, %zmm10, %zmm31
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm26, %zmm30
+; AVX512DQBW-ONLY-NEXT:    vpermt2w %zmm20, %zmm15, %zmm30
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm26, %zmm22
+; AVX512DQBW-ONLY-NEXT:    vpermt2w %zmm20, %zmm13, %zmm22
+; AVX512DQBW-ONLY-NEXT:    vpermt2w %zmm20, %zmm1, %zmm26
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm7, %zmm8
+; AVX512DQBW-ONLY-NEXT:    vpermt2w %zmm17, %zmm0, %zmm8
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm7, %zmm12
+; AVX512DQBW-ONLY-NEXT:    vpermt2w %zmm17, %zmm3, %zmm12
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm7, %zmm6
+; AVX512DQBW-ONLY-NEXT:    vpermt2w %zmm17, %zmm10, %zmm6
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 %zmm6, (%rsp) # 64-byte Spill
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm7, %zmm6
+; AVX512DQBW-ONLY-NEXT:    vpermt2w %zmm17, %zmm15, %zmm6
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm7, %zmm6
+; AVX512DQBW-ONLY-NEXT:    vpermt2w %zmm17, %zmm13, %zmm6
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-ONLY-NEXT:    vpermt2w %zmm17, %zmm1, %zmm7
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 %zmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm25, %zmm18
+; AVX512DQBW-ONLY-NEXT:    vpermt2w %zmm21, %zmm4, %zmm18
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm25, %zmm19
+; AVX512DQBW-ONLY-NEXT:    vpermt2w %zmm21, %zmm0, %zmm19
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm25, %zmm23
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm3, %zmm6
+; AVX512DQBW-ONLY-NEXT:    vpermt2w %zmm21, %zmm3, %zmm23
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm25, %zmm16
+; AVX512DQBW-ONLY-NEXT:    vpermt2w %zmm21, %zmm10, %zmm16
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm25, %zmm20
+; AVX512DQBW-ONLY-NEXT:    vpermt2w %zmm21, %zmm15, %zmm20
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm25, %zmm17
+; AVX512DQBW-ONLY-NEXT:    vpermt2w %zmm21, %zmm13, %zmm17
+; AVX512DQBW-ONLY-NEXT:    vpermt2w %zmm21, %zmm1, %zmm25
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm29, %zmm9
+; AVX512DQBW-ONLY-NEXT:    vpermt2w %zmm5, %zmm4, %zmm9
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm29, %zmm11
+; AVX512DQBW-ONLY-NEXT:    vpermt2w %zmm5, %zmm0, %zmm11
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm0, %zmm3
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm29, %zmm14
+; AVX512DQBW-ONLY-NEXT:    vpermt2w %zmm5, %zmm6, %zmm14
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm29, %zmm21
+; AVX512DQBW-ONLY-NEXT:    vpermt2w %zmm5, %zmm10, %zmm21
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm29, %zmm7
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm15, %zmm2
+; AVX512DQBW-ONLY-NEXT:    vpermt2w %zmm5, %zmm15, %zmm7
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm29, %zmm15
+; AVX512DQBW-ONLY-NEXT:    vpermt2w %zmm5, %zmm13, %zmm15
+; AVX512DQBW-ONLY-NEXT:    vpermt2w %zmm5, %zmm1, %zmm29
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512DQBW-ONLY-NEXT:    vpermi2w %zmm28, %zmm0, %zmm4
+; AVX512DQBW-ONLY-NEXT:    vpermi2w %zmm28, %zmm0, %zmm3
+; AVX512DQBW-ONLY-NEXT:    vpermi2w %zmm28, %zmm0, %zmm6
+; AVX512DQBW-ONLY-NEXT:    vpermi2w %zmm28, %zmm0, %zmm10
+; AVX512DQBW-ONLY-NEXT:    vpermi2w %zmm28, %zmm0, %zmm2
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-ONLY-NEXT:    vpermi2w %zmm28, %zmm0, %zmm13
+; AVX512DQBW-ONLY-NEXT:    vpermt2w %zmm28, %zmm1, %zmm0
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm18, %zmm9 {%k1}
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm28 # 64-byte Reload
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
+; AVX512DQBW-ONLY-NEXT:    vbroadcasti128 {{.*#+}} ymm0 = [1,9,17,25,33,41,49,57,1,9,17,25,33,41,49,57]
+; AVX512DQBW-ONLY-NEXT:    # ymm0 = mem[0,1,0,1]
+; AVX512DQBW-ONLY-NEXT:    vpermi2w %zmm28, %zmm2, %zmm0
+; AVX512DQBW-ONLY-NEXT:    vpblendd {{.*#+}} ymm0 = ymm4[0,1,2,3],ymm0[4,5,6,7]
+; AVX512DQBW-ONLY-NEXT:    vinserti64x4 $0, %ymm0, %zmm9, %zmm18
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm0, %zmm24 {%k1}
+; AVX512DQBW-ONLY-NEXT:    vbroadcasti128 {{.*#+}} ymm9 = [2,10,18,26,34,42,50,58,2,10,18,26,34,42,50,58]
+; AVX512DQBW-ONLY-NEXT:    # ymm9 = mem[0,1,0,1]
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm4, %zmm0
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
+; AVX512DQBW-ONLY-NEXT:    vpermt2w %zmm5, %zmm9, %zmm0
+; AVX512DQBW-ONLY-NEXT:    vpblendd {{.*#+}} ymm0 = ymm8[0,1,2,3],ymm0[4,5,6,7]
+; AVX512DQBW-ONLY-NEXT:    vinserti64x4 $0, %ymm0, %zmm24, %zmm24
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm19, %zmm11 {%k1}
+; AVX512DQBW-ONLY-NEXT:    vpermi2w %zmm28, %zmm2, %zmm9
+; AVX512DQBW-ONLY-NEXT:    vpblendd {{.*#+}} ymm1 = ymm3[0,1,2,3],ymm9[4,5,6,7]
+; AVX512DQBW-ONLY-NEXT:    vinserti64x4 $0, %ymm1, %zmm11, %zmm1
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm0, %zmm27 {%k1}
+; AVX512DQBW-ONLY-NEXT:    vbroadcasti128 {{.*#+}} ymm3 = [3,11,19,27,35,43,51,59,3,11,19,27,35,43,51,59]
+; AVX512DQBW-ONLY-NEXT:    # ymm3 = mem[0,1,0,1]
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm4, %zmm0
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm4, %zmm9
+; AVX512DQBW-ONLY-NEXT:    vpermt2w %zmm5, %zmm3, %zmm9
+; AVX512DQBW-ONLY-NEXT:    vpblendd {{.*#+}} ymm4 = ymm12[0,1,2,3],ymm9[4,5,6,7]
+; AVX512DQBW-ONLY-NEXT:    vinserti64x4 $0, %ymm4, %zmm27, %zmm4
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm23, %zmm14 {%k1}
+; AVX512DQBW-ONLY-NEXT:    vpermi2w %zmm28, %zmm2, %zmm3
+; AVX512DQBW-ONLY-NEXT:    vpblendd {{.*#+}} ymm3 = ymm6[0,1,2,3],ymm3[4,5,6,7]
+; AVX512DQBW-ONLY-NEXT:    vinserti64x4 $0, %ymm3, %zmm14, %zmm3
+; AVX512DQBW-ONLY-NEXT:    vbroadcasti128 {{.*#+}} ymm6 = [4,12,20,28,36,44,52,60,4,12,20,28,36,44,52,60]
+; AVX512DQBW-ONLY-NEXT:    # ymm6 = mem[0,1,0,1]
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm0, %zmm8
+; AVX512DQBW-ONLY-NEXT:    vpermt2w %zmm5, %zmm6, %zmm8
+; AVX512DQBW-ONLY-NEXT:    vbroadcasti128 {{.*#+}} ymm9 = [5,13,21,29,37,45,53,61,5,13,21,29,37,45,53,61]
+; AVX512DQBW-ONLY-NEXT:    # ymm9 = mem[0,1,0,1]
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm0, %zmm11
+; AVX512DQBW-ONLY-NEXT:    vpermt2w %zmm5, %zmm9, %zmm11
+; AVX512DQBW-ONLY-NEXT:    vbroadcasti128 {{.*#+}} ymm12 = [6,14,22,30,38,46,54,62,6,14,22,30,38,46,54,62]
+; AVX512DQBW-ONLY-NEXT:    # ymm12 = mem[0,1,0,1]
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm0, %zmm14
+; AVX512DQBW-ONLY-NEXT:    vpermt2w %zmm5, %zmm12, %zmm14
+; AVX512DQBW-ONLY-NEXT:    vbroadcasti64x2 {{.*#+}} ymm19 = [7,15,23,31,39,47,55,63,7,15,23,31,39,47,55,63]
+; AVX512DQBW-ONLY-NEXT:    # ymm19 = mem[0,1,0,1]
+; AVX512DQBW-ONLY-NEXT:    vpermt2w %zmm5, %zmm19, %zmm0
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm0, %zmm5
+; AVX512DQBW-ONLY-NEXT:    vpermi2w %zmm28, %zmm2, %zmm6
+; AVX512DQBW-ONLY-NEXT:    vpermi2w %zmm28, %zmm2, %zmm9
+; AVX512DQBW-ONLY-NEXT:    vpermi2w %zmm28, %zmm2, %zmm12
+; AVX512DQBW-ONLY-NEXT:    vpermt2w %zmm28, %zmm19, %zmm2
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm2, %zmm0
+; AVX512DQBW-ONLY-NEXT:    vpblendd $15, (%rsp), %ymm8, %ymm8 # 32-byte Folded Reload
+; AVX512DQBW-ONLY-NEXT:    # ymm8 = mem[0,1,2,3],ymm8[4,5,6,7]
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm2, %zmm31 {%k1}
+; AVX512DQBW-ONLY-NEXT:    vinserti64x4 $0, %ymm8, %zmm31, %zmm8
+; AVX512DQBW-ONLY-NEXT:    vpblendd {{.*#+}} ymm6 = ymm10[0,1,2,3],ymm6[4,5,6,7]
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm16, %zmm21 {%k1}
+; AVX512DQBW-ONLY-NEXT:    vinserti64x4 $0, %ymm6, %zmm21, %zmm6
+; AVX512DQBW-ONLY-NEXT:    vpblendd $15, {{[-0-9]+}}(%r{{[sb]}}p), %ymm11, %ymm10 # 32-byte Folded Reload
+; AVX512DQBW-ONLY-NEXT:    # ymm10 = mem[0,1,2,3],ymm11[4,5,6,7]
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm2, %zmm30 {%k1}
+; AVX512DQBW-ONLY-NEXT:    vinserti64x4 $0, %ymm10, %zmm30, %zmm10
+; AVX512DQBW-ONLY-NEXT:    vpblendd $15, {{[-0-9]+}}(%r{{[sb]}}p), %ymm9, %ymm9 # 32-byte Folded Reload
+; AVX512DQBW-ONLY-NEXT:    # ymm9 = mem[0,1,2,3],ymm9[4,5,6,7]
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm20, %zmm7 {%k1}
+; AVX512DQBW-ONLY-NEXT:    vinserti64x4 $0, %ymm9, %zmm7, %zmm7
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm2, %zmm22 {%k1}
+; AVX512DQBW-ONLY-NEXT:    vpblendd $15, {{[-0-9]+}}(%r{{[sb]}}p), %ymm14, %ymm9 # 32-byte Folded Reload
+; AVX512DQBW-ONLY-NEXT:    # ymm9 = mem[0,1,2,3],ymm14[4,5,6,7]
+; AVX512DQBW-ONLY-NEXT:    vinserti64x4 $0, %ymm9, %zmm22, %zmm9
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm17, %zmm15 {%k1}
+; AVX512DQBW-ONLY-NEXT:    vpblendd {{.*#+}} ymm2 = ymm13[0,1,2,3],ymm12[4,5,6,7]
+; AVX512DQBW-ONLY-NEXT:    vinserti64x4 $0, %ymm2, %zmm15, %zmm2
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm11 # 64-byte Reload
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm11, %zmm26 {%k1}
+; AVX512DQBW-ONLY-NEXT:    vpblendd $15, {{[-0-9]+}}(%r{{[sb]}}p), %ymm5, %ymm11 # 32-byte Folded Reload
+; AVX512DQBW-ONLY-NEXT:    # ymm11 = mem[0,1,2,3],ymm5[4,5,6,7]
+; AVX512DQBW-ONLY-NEXT:    vinserti64x4 $0, %ymm11, %zmm26, %zmm5
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm25, %zmm29 {%k1}
+; AVX512DQBW-ONLY-NEXT:    vpblendd $15, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm11 # 32-byte Folded Reload
+; AVX512DQBW-ONLY-NEXT:    # ymm11 = mem[0,1,2,3],ymm0[4,5,6,7]
+; AVX512DQBW-ONLY-NEXT:    vinserti64x4 $0, %ymm11, %zmm29, %zmm11
+; AVX512DQBW-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512DQBW-ONLY-NEXT:    vmovaps %zmm0, 64(%rsi)
+; AVX512DQBW-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512DQBW-ONLY-NEXT:    vmovaps %zmm0, (%rsi)
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm18, 64(%rdx)
+; AVX512DQBW-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512DQBW-ONLY-NEXT:    vmovaps %zmm0, (%rdx)
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm1, 64(%rcx)
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm24, (%rcx)
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm3, 64(%r8)
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm4, (%r8)
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm6, 64(%r9)
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm8, (%r9)
+; AVX512DQBW-ONLY-NEXT:    movq {{[0-9]+}}(%rsp), %rax
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm7, 64(%rax)
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm10, (%rax)
+; AVX512DQBW-ONLY-NEXT:    movq {{[0-9]+}}(%rsp), %rax
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm2, 64(%rax)
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm9, (%rax)
+; AVX512DQBW-ONLY-NEXT:    movq {{[0-9]+}}(%rsp), %rax
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm11, 64(%rax)
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm5, (%rax)
+; AVX512DQBW-ONLY-NEXT:    addq $1096, %rsp # imm = 0x448
+; AVX512DQBW-ONLY-NEXT:    vzeroupper
+; AVX512DQBW-ONLY-NEXT:    retq
   %wide.vec = load <512 x i16>, ptr %in.vec, align 64
   %strided.vec0 = shufflevector <512 x i16> %wide.vec, <512 x i16> poison, <64 x i32> <i32 0, i32 8, i32 16, i32 24, i32 32, i32 40, i32 48, i32 56, i32 64, i32 72, i32 80, i32 88, i32 96, i32 104, i32 112, i32 120, i32 128, i32 136, i32 144, i32 152, i32 160, i32 168, i32 176, i32 184, i32 192, i32 200, i32 208, i32 216, i32 224, i32 232, i32 240, i32 248, i32 256, i32 264, i32 272, i32 280, i32 288, i32 296, i32 304, i32 312, i32 320, i32 328, i32 336, i32 344, i32 352, i32 360, i32 368, i32 376, i32 384, i32 392, i32 400, i32 408, i32 416, i32 424, i32 432, i32 440, i32 448, i32 456, i32 464, i32 472, i32 480, i32 488, i32 496, i32 504>
   %strided.vec1 = shufflevector <512 x i16> %wide.vec, <512 x i16> poison, <64 x i32> <i32 1, i32 9, i32 17, i32 25, i32 33, i32 41, i32 49, i32 57, i32 65, i32 73, i32 81, i32 89, i32 97, i32 105, i32 113, i32 121, i32 129, i32 137, i32 145, i32 153, i32 161, i32 169, i32 177, i32 185, i32 193, i32 201, i32 209, i32 217, i32 225, i32 233, i32 241, i32 249, i32 257, i32 265, i32 273, i32 281, i32 289, i32 297, i32 305, i32 313, i32 321, i32 329, i32 337, i32 345, i32 353, i32 361, i32 369, i32 377, i32 385, i32 393, i32 401, i32 409, i32 417, i32 425, i32 433, i32 441, i32 449, i32 457, i32 465, i32 473, i32 481, i32 489, i32 497, i32 505>
@@ -9896,10 +9334,16 @@ define void @load_i16_stride8_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512-FAST: {{.*}}
 ; AVX512-SLOW: {{.*}}
 ; AVX512BW-FAST: {{.*}}
+; AVX512BW-ONLY-FAST: {{.*}}
+; AVX512BW-ONLY-SLOW: {{.*}}
 ; AVX512BW-SLOW: {{.*}}
 ; AVX512DQ-FAST: {{.*}}
+; AVX512DQ-ONLY: {{.*}}
 ; AVX512DQ-SLOW: {{.*}}
+; AVX512DQBW-FAST: {{.*}}
+; AVX512DQBW-SLOW: {{.*}}
 ; AVX512F: {{.*}}
+; AVX512F-ONLY: {{.*}}
 ; AVX512F-ONLY-FAST: {{.*}}
 ; AVX512F-ONLY-SLOW: {{.*}}
 ; FALLBACK0: {{.*}}

diff  --git a/llvm/test/CodeGen/X86/vector-interleaved-load-i32-stride-2.ll b/llvm/test/CodeGen/X86/vector-interleaved-load-i32-stride-2.ll
index 85b318556a5b907..7bb842df05dd4d3 100644
--- a/llvm/test/CodeGen/X86/vector-interleaved-load-i32-stride-2.ll
+++ b/llvm/test/CodeGen/X86/vector-interleaved-load-i32-stride-2.ll
@@ -4,14 +4,14 @@
 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx2 | FileCheck %s --check-prefixes=AVX,AVX1,AVX2,AVX2-ONLY,AVX2-SLOW,FALLBACK2
 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx2,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX1,AVX2,AVX2-ONLY,AVX2-FAST,FALLBACK3
 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx2,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX1,AVX2,AVX2-ONLY,AVX2-FAST-PERLANE,FALLBACK4
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512-SLOW,AVX512F-SLOW,AVX512F-ONLY-SLOW,FALLBACK5
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512-FAST,AVX512F-FAST,AVX512F-ONLY-FAST,FALLBACK6
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512-SLOW,AVX512F-SLOW,AVX512DQ-SLOW,FALLBACK7
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512-FAST,AVX512F-FAST,AVX512DQ-FAST,FALLBACK8
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512bw | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512-SLOW,AVX512BW-SLOW,AVX512BW-ONLY-SLOW,FALLBACK9
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512bw,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512-FAST,AVX512BW-FAST,AVX512BW-ONLY-FAST,FALLBACK10
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq,+avx512bw | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512-SLOW,AVX512BW-SLOW,AVX512DQBW-SLOW,FALLBACK11
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq,+avx512bw,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512-FAST,AVX512BW-FAST,AVX512DQBW-FAST,FALLBACK12
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512F-ONLY,AVX512-SLOW,AVX512F-SLOW,AVX512F-ONLY-SLOW,FALLBACK5
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512F-ONLY,AVX512-FAST,AVX512F-FAST,AVX512F-ONLY-FAST,FALLBACK6
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512DQ-ONLY,AVX512-SLOW,AVX512F-SLOW,AVX512DQ-SLOW,FALLBACK7
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512DQ-ONLY,AVX512-FAST,AVX512F-FAST,AVX512DQ-FAST,FALLBACK8
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512bw | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512BW-ONLY,AVX512-SLOW,AVX512BW-SLOW,AVX512BW-ONLY-SLOW,FALLBACK9
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512bw,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512BW-ONLY,AVX512-FAST,AVX512BW-FAST,AVX512BW-ONLY-FAST,FALLBACK10
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq,+avx512bw | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512DQBW-ONLY,AVX512-SLOW,AVX512BW-SLOW,AVX512DQBW-SLOW,FALLBACK11
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq,+avx512bw,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512DQBW-ONLY,AVX512-FAST,AVX512BW-FAST,AVX512DQBW-FAST,FALLBACK12
 
 ; These patterns are produced by LoopVectorizer for interleaved loads.
 
@@ -740,15 +740,19 @@ define void @load_i32_stride2_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1) no
 ; AVX2-SLOW: {{.*}}
 ; AVX512BW: {{.*}}
 ; AVX512BW-FAST: {{.*}}
+; AVX512BW-ONLY: {{.*}}
 ; AVX512BW-ONLY-FAST: {{.*}}
 ; AVX512BW-ONLY-SLOW: {{.*}}
 ; AVX512BW-SLOW: {{.*}}
 ; AVX512DQ-FAST: {{.*}}
+; AVX512DQ-ONLY: {{.*}}
 ; AVX512DQ-SLOW: {{.*}}
 ; AVX512DQBW-FAST: {{.*}}
+; AVX512DQBW-ONLY: {{.*}}
 ; AVX512DQBW-SLOW: {{.*}}
 ; AVX512F: {{.*}}
 ; AVX512F-FAST: {{.*}}
+; AVX512F-ONLY: {{.*}}
 ; AVX512F-ONLY-FAST: {{.*}}
 ; AVX512F-ONLY-SLOW: {{.*}}
 ; AVX512F-SLOW: {{.*}}

diff  --git a/llvm/test/CodeGen/X86/vector-interleaved-load-i32-stride-3.ll b/llvm/test/CodeGen/X86/vector-interleaved-load-i32-stride-3.ll
index e812fb903a808e1..e398c68fa81ca1a 100644
--- a/llvm/test/CodeGen/X86/vector-interleaved-load-i32-stride-3.ll
+++ b/llvm/test/CodeGen/X86/vector-interleaved-load-i32-stride-3.ll
@@ -4,14 +4,14 @@
 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx2 | FileCheck %s --check-prefixes=AVX,AVX1,AVX2,AVX2-ONLY,AVX2-SLOW,FALLBACK2
 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx2,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX1,AVX2,AVX2-ONLY,AVX2-FAST,FALLBACK3
 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx2,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX1,AVX2,AVX2-ONLY,AVX2-FAST-PERLANE,FALLBACK4
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512-SLOW,AVX512F-SLOW,AVX512F-ONLY-SLOW,FALLBACK5
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512-FAST,AVX512F-FAST,AVX512F-ONLY-FAST,FALLBACK6
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512-SLOW,AVX512F-SLOW,AVX512DQ-SLOW,FALLBACK7
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512-FAST,AVX512F-FAST,AVX512DQ-FAST,FALLBACK8
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512bw | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512-SLOW,AVX512BW-SLOW,AVX512BW-ONLY-SLOW,FALLBACK9
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512bw,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512-FAST,AVX512BW-FAST,AVX512BW-ONLY-FAST,FALLBACK10
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq,+avx512bw | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512-SLOW,AVX512BW-SLOW,AVX512DQBW-SLOW,FALLBACK11
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq,+avx512bw,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512-FAST,AVX512BW-FAST,AVX512DQBW-FAST,FALLBACK12
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512F-ONLY,AVX512-SLOW,AVX512F-SLOW,AVX512F-ONLY-SLOW,FALLBACK5
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512F-ONLY,AVX512-FAST,AVX512F-FAST,AVX512F-ONLY-FAST,FALLBACK6
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512DQ-ONLY,AVX512-SLOW,AVX512F-SLOW,AVX512DQ-SLOW,FALLBACK7
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512DQ-ONLY,AVX512-FAST,AVX512F-FAST,AVX512DQ-FAST,FALLBACK8
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512bw | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512BW-ONLY,AVX512-SLOW,AVX512BW-SLOW,AVX512BW-ONLY-SLOW,FALLBACK9
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512bw,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512BW-ONLY,AVX512-FAST,AVX512BW-FAST,AVX512BW-ONLY-FAST,FALLBACK10
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq,+avx512bw | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512DQBW-ONLY,AVX512-SLOW,AVX512BW-SLOW,AVX512DQBW-SLOW,FALLBACK11
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq,+avx512bw,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512DQBW-ONLY,AVX512-FAST,AVX512BW-FAST,AVX512DQBW-FAST,FALLBACK12
 
 ; These patterns are produced by LoopVectorizer for interleaved loads.
 
@@ -3022,15 +3022,19 @@ define void @load_i32_stride3_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX2: {{.*}}
 ; AVX512BW: {{.*}}
 ; AVX512BW-FAST: {{.*}}
+; AVX512BW-ONLY: {{.*}}
 ; AVX512BW-ONLY-FAST: {{.*}}
 ; AVX512BW-ONLY-SLOW: {{.*}}
 ; AVX512BW-SLOW: {{.*}}
 ; AVX512DQ-FAST: {{.*}}
+; AVX512DQ-ONLY: {{.*}}
 ; AVX512DQ-SLOW: {{.*}}
 ; AVX512DQBW-FAST: {{.*}}
+; AVX512DQBW-ONLY: {{.*}}
 ; AVX512DQBW-SLOW: {{.*}}
 ; AVX512F: {{.*}}
 ; AVX512F-FAST: {{.*}}
+; AVX512F-ONLY: {{.*}}
 ; AVX512F-ONLY-FAST: {{.*}}
 ; AVX512F-ONLY-SLOW: {{.*}}
 ; AVX512F-SLOW: {{.*}}

diff  --git a/llvm/test/CodeGen/X86/vector-interleaved-load-i32-stride-4.ll b/llvm/test/CodeGen/X86/vector-interleaved-load-i32-stride-4.ll
index 7b4df6a350e3374..a08234ffb482f5a 100644
--- a/llvm/test/CodeGen/X86/vector-interleaved-load-i32-stride-4.ll
+++ b/llvm/test/CodeGen/X86/vector-interleaved-load-i32-stride-4.ll
@@ -4,14 +4,14 @@
 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx2 | FileCheck %s --check-prefixes=AVX,AVX1,AVX2,AVX2-ONLY,AVX2-SLOW,FALLBACK2
 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx2,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX1,AVX2,AVX2-ONLY,AVX2-FAST,FALLBACK3
 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx2,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX1,AVX2,AVX2-ONLY,AVX2-FAST-PERLANE,FALLBACK4
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512-SLOW,AVX512F-SLOW,AVX512F-ONLY-SLOW,FALLBACK5
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512-FAST,AVX512F-FAST,AVX512F-ONLY-FAST,FALLBACK6
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512-SLOW,AVX512F-SLOW,AVX512DQ-SLOW,FALLBACK7
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512-FAST,AVX512F-FAST,AVX512DQ-FAST,FALLBACK8
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512bw | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512-SLOW,AVX512BW-SLOW,AVX512BW-ONLY-SLOW,FALLBACK9
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512bw,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512-FAST,AVX512BW-FAST,AVX512BW-ONLY-FAST,FALLBACK10
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq,+avx512bw | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512-SLOW,AVX512BW-SLOW,AVX512DQBW-SLOW,FALLBACK11
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq,+avx512bw,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512-FAST,AVX512BW-FAST,AVX512DQBW-FAST,FALLBACK12
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512F-ONLY,AVX512-SLOW,AVX512F-SLOW,AVX512F-ONLY-SLOW,FALLBACK5
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512F-ONLY,AVX512-FAST,AVX512F-FAST,AVX512F-ONLY-FAST,FALLBACK6
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512DQ-ONLY,AVX512-SLOW,AVX512F-SLOW,AVX512DQ-SLOW,FALLBACK7
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512DQ-ONLY,AVX512-FAST,AVX512F-FAST,AVX512DQ-FAST,FALLBACK8
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512bw | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512BW-ONLY,AVX512-SLOW,AVX512BW-SLOW,AVX512BW-ONLY-SLOW,FALLBACK9
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512bw,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512BW-ONLY,AVX512-FAST,AVX512BW-FAST,AVX512BW-ONLY-FAST,FALLBACK10
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq,+avx512bw | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512DQBW-ONLY,AVX512-SLOW,AVX512BW-SLOW,AVX512DQBW-SLOW,FALLBACK11
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq,+avx512bw,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512DQBW-ONLY,AVX512-FAST,AVX512BW-FAST,AVX512DQBW-FAST,FALLBACK12
 
 ; These patterns are produced by LoopVectorizer for interleaved loads.
 
@@ -3439,15 +3439,19 @@ define void @load_i32_stride4_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX2-SLOW: {{.*}}
 ; AVX512BW: {{.*}}
 ; AVX512BW-FAST: {{.*}}
+; AVX512BW-ONLY: {{.*}}
 ; AVX512BW-ONLY-FAST: {{.*}}
 ; AVX512BW-ONLY-SLOW: {{.*}}
 ; AVX512BW-SLOW: {{.*}}
 ; AVX512DQ-FAST: {{.*}}
+; AVX512DQ-ONLY: {{.*}}
 ; AVX512DQ-SLOW: {{.*}}
 ; AVX512DQBW-FAST: {{.*}}
+; AVX512DQBW-ONLY: {{.*}}
 ; AVX512DQBW-SLOW: {{.*}}
 ; AVX512F: {{.*}}
 ; AVX512F-FAST: {{.*}}
+; AVX512F-ONLY: {{.*}}
 ; AVX512F-ONLY-FAST: {{.*}}
 ; AVX512F-ONLY-SLOW: {{.*}}
 ; AVX512F-SLOW: {{.*}}

diff  --git a/llvm/test/CodeGen/X86/vector-interleaved-load-i32-stride-5.ll b/llvm/test/CodeGen/X86/vector-interleaved-load-i32-stride-5.ll
index 70c84aa6d2c998b..0998868da1f52bf 100644
--- a/llvm/test/CodeGen/X86/vector-interleaved-load-i32-stride-5.ll
+++ b/llvm/test/CodeGen/X86/vector-interleaved-load-i32-stride-5.ll
@@ -4,14 +4,14 @@
 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx2 | FileCheck %s --check-prefixes=AVX,AVX1,AVX2,AVX2-ONLY,AVX2-SLOW,FALLBACK2
 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx2,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX1,AVX2,AVX2-ONLY,AVX2-FAST,FALLBACK3
 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx2,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX1,AVX2,AVX2-ONLY,AVX2-FAST-PERLANE,FALLBACK4
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512-SLOW,AVX512F-SLOW,AVX512F-ONLY-SLOW,FALLBACK5
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512-FAST,AVX512F-FAST,AVX512F-ONLY-FAST,FALLBACK6
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512-SLOW,AVX512F-SLOW,AVX512DQ-SLOW,FALLBACK7
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512-FAST,AVX512F-FAST,AVX512DQ-FAST,FALLBACK8
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512bw | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512-SLOW,AVX512BW-SLOW,AVX512BW-ONLY-SLOW,FALLBACK9
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512bw,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512-FAST,AVX512BW-FAST,AVX512BW-ONLY-FAST,FALLBACK10
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq,+avx512bw | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512-SLOW,AVX512BW-SLOW,AVX512DQBW-SLOW,FALLBACK11
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq,+avx512bw,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512-FAST,AVX512BW-FAST,AVX512DQBW-FAST,FALLBACK12
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512F-ONLY,AVX512-SLOW,AVX512F-SLOW,AVX512F-ONLY-SLOW,FALLBACK5
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512F-ONLY,AVX512-FAST,AVX512F-FAST,AVX512F-ONLY-FAST,FALLBACK6
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512DQ-ONLY,AVX512-SLOW,AVX512F-SLOW,AVX512DQ-SLOW,FALLBACK7
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512DQ-ONLY,AVX512-FAST,AVX512F-FAST,AVX512DQ-FAST,FALLBACK8
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512bw | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512BW-ONLY,AVX512-SLOW,AVX512BW-SLOW,AVX512BW-ONLY-SLOW,FALLBACK9
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512bw,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512BW-ONLY,AVX512-FAST,AVX512BW-FAST,AVX512BW-ONLY-FAST,FALLBACK10
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq,+avx512bw | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512DQBW-ONLY,AVX512-SLOW,AVX512BW-SLOW,AVX512DQBW-SLOW,FALLBACK11
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq,+avx512bw,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512DQBW-ONLY,AVX512-FAST,AVX512BW-FAST,AVX512DQBW-FAST,FALLBACK12
 
 ; These patterns are produced by LoopVectorizer for interleaved loads.
 
@@ -5252,14 +5252,18 @@ define void @load_i32_stride5_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX2-FAST-PERLANE: {{.*}}
 ; AVX2-SLOW: {{.*}}
 ; AVX512BW-FAST: {{.*}}
+; AVX512BW-ONLY: {{.*}}
 ; AVX512BW-ONLY-FAST: {{.*}}
 ; AVX512BW-ONLY-SLOW: {{.*}}
 ; AVX512BW-SLOW: {{.*}}
 ; AVX512DQ-FAST: {{.*}}
+; AVX512DQ-ONLY: {{.*}}
 ; AVX512DQ-SLOW: {{.*}}
 ; AVX512DQBW-FAST: {{.*}}
+; AVX512DQBW-ONLY: {{.*}}
 ; AVX512DQBW-SLOW: {{.*}}
 ; AVX512F-FAST: {{.*}}
+; AVX512F-ONLY: {{.*}}
 ; AVX512F-ONLY-FAST: {{.*}}
 ; AVX512F-ONLY-SLOW: {{.*}}
 ; AVX512F-SLOW: {{.*}}

diff  --git a/llvm/test/CodeGen/X86/vector-interleaved-load-i32-stride-6.ll b/llvm/test/CodeGen/X86/vector-interleaved-load-i32-stride-6.ll
index 8c709508720a44b..5d3f85d6ab6924e 100644
--- a/llvm/test/CodeGen/X86/vector-interleaved-load-i32-stride-6.ll
+++ b/llvm/test/CodeGen/X86/vector-interleaved-load-i32-stride-6.ll
@@ -4,14 +4,14 @@
 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx2 | FileCheck %s --check-prefixes=AVX,AVX1,AVX2,AVX2-ONLY,AVX2-SLOW,FALLBACK2
 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx2,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX1,AVX2,AVX2-ONLY,AVX2-FAST,FALLBACK3
 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx2,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX1,AVX2,AVX2-ONLY,AVX2-FAST-PERLANE,FALLBACK4
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512-SLOW,AVX512F-SLOW,AVX512F-ONLY-SLOW,FALLBACK5
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512-FAST,AVX512F-FAST,AVX512F-ONLY-FAST,FALLBACK6
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512-SLOW,AVX512F-SLOW,AVX512DQ-SLOW,FALLBACK7
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512-FAST,AVX512F-FAST,AVX512DQ-FAST,FALLBACK8
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512bw | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512BW-SLOW,AVX512BW-ONLY-SLOW,FALLBACK9
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512bw,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512-FAST,AVX512BW-FAST,AVX512BW-ONLY-FAST,FALLBACK10
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq,+avx512bw | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512BW-SLOW,AVX512DQBW-SLOW,FALLBACK11
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq,+avx512bw,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512-FAST,AVX512BW-FAST,AVX512DQBW-FAST,FALLBACK12
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512F-ONLY,AVX512-SLOW,AVX512F-SLOW,AVX512F-ONLY-SLOW,FALLBACK5
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512F-ONLY,AVX512-FAST,AVX512F-FAST,AVX512F-ONLY-FAST,FALLBACK6
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512DQ-ONLY,AVX512-SLOW,AVX512F-SLOW,AVX512DQ-SLOW,FALLBACK7
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512DQ-ONLY,AVX512-FAST,AVX512F-FAST,AVX512DQ-FAST,FALLBACK8
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512bw | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512BW-ONLY,AVX512-SLOW,AVX512BW-SLOW,AVX512BW-ONLY-SLOW,FALLBACK9
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512bw,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512BW-ONLY,AVX512-FAST,AVX512BW-FAST,AVX512BW-ONLY-FAST,FALLBACK10
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq,+avx512bw | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512DQBW-ONLY,AVX512-SLOW,AVX512BW-SLOW,AVX512DQBW-SLOW,FALLBACK11
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq,+avx512bw,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512DQBW-ONLY,AVX512-FAST,AVX512BW-FAST,AVX512DQBW-FAST,FALLBACK12
 
 ; These patterns are produced by LoopVectorizer for interleaved loads.
 
@@ -165,39 +165,6 @@ define void @load_i32_stride6_vf2(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
 ; AVX512-FAST-NEXT:    vmovlps %xmm3, (%rax)
 ; AVX512-FAST-NEXT:    vzeroupper
 ; AVX512-FAST-NEXT:    retq
-;
-; AVX512BW-SLOW-LABEL: load_i32_stride6_vf2:
-; AVX512BW-SLOW:       # %bb.0:
-; AVX512BW-SLOW-NEXT:    movq {{[0-9]+}}(%rsp), %rax
-; AVX512BW-SLOW-NEXT:    vmovdqa (%rdi), %xmm0
-; AVX512BW-SLOW-NEXT:    vmovaps 16(%rdi), %xmm1
-; AVX512BW-SLOW-NEXT:    vmovdqa 32(%rdi), %xmm2
-; AVX512BW-SLOW-NEXT:    vextractps $2, %xmm1, %r10d
-; AVX512BW-SLOW-NEXT:    vpinsrd $1, %r10d, %xmm0, %xmm3
-; AVX512BW-SLOW-NEXT:    vextractps $3, %xmm1, %r10d
-; AVX512BW-SLOW-NEXT:    vpshufd {{.*#+}} xmm1 = xmm0[1,1,1,1]
-; AVX512BW-SLOW-NEXT:    vpinsrd $1, %r10d, %xmm1, %xmm1
-; AVX512BW-SLOW-NEXT:    vpbroadcastd 8(%rdi), %xmm4
-; AVX512BW-SLOW-NEXT:    vmovd %xmm2, %r10d
-; AVX512BW-SLOW-NEXT:    vpinsrd $1, %r10d, %xmm4, %xmm4
-; AVX512BW-SLOW-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[3,3,3,3]
-; AVX512BW-SLOW-NEXT:    vpblendd {{.*#+}} xmm0 = xmm0[0],xmm2[1],xmm0[2,3]
-; AVX512BW-SLOW-NEXT:    vmovddup {{.*#+}} xmm2 = [4,2,4,2]
-; AVX512BW-SLOW-NEXT:    # xmm2 = mem[0,0]
-; AVX512BW-SLOW-NEXT:    vmovaps 32(%rdi), %ymm5
-; AVX512BW-SLOW-NEXT:    vblendps {{.*#+}} ymm5 = ymm5[0,1,2,3],mem[4,5,6,7]
-; AVX512BW-SLOW-NEXT:    vpermps %ymm5, %ymm2, %ymm2
-; AVX512BW-SLOW-NEXT:    vmovddup {{.*#+}} xmm6 = [5,3,5,3]
-; AVX512BW-SLOW-NEXT:    # xmm6 = mem[0,0]
-; AVX512BW-SLOW-NEXT:    vpermps %ymm5, %ymm6, %ymm5
-; AVX512BW-SLOW-NEXT:    vmovq %xmm3, (%rsi)
-; AVX512BW-SLOW-NEXT:    vmovq %xmm1, (%rdx)
-; AVX512BW-SLOW-NEXT:    vmovq %xmm4, (%rcx)
-; AVX512BW-SLOW-NEXT:    vmovq %xmm0, (%r8)
-; AVX512BW-SLOW-NEXT:    vmovlps %xmm2, (%r9)
-; AVX512BW-SLOW-NEXT:    vmovlps %xmm5, (%rax)
-; AVX512BW-SLOW-NEXT:    vzeroupper
-; AVX512BW-SLOW-NEXT:    retq
   %wide.vec = load <12 x i32>, ptr %in.vec, align 64
   %strided.vec0 = shufflevector <12 x i32> %wide.vec, <12 x i32> poison, <2 x i32> <i32 0, i32 6>
   %strided.vec1 = shufflevector <12 x i32> %wide.vec, <12 x i32> poison, <2 x i32> <i32 1, i32 7>
@@ -10299,13 +10266,18 @@ define void @load_i32_stride6_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX1: {{.*}}
 ; AVX2: {{.*}}
 ; AVX512BW-FAST: {{.*}}
+; AVX512BW-ONLY: {{.*}}
 ; AVX512BW-ONLY-FAST: {{.*}}
 ; AVX512BW-ONLY-SLOW: {{.*}}
+; AVX512BW-SLOW: {{.*}}
 ; AVX512DQ-FAST: {{.*}}
+; AVX512DQ-ONLY: {{.*}}
 ; AVX512DQ-SLOW: {{.*}}
 ; AVX512DQBW-FAST: {{.*}}
+; AVX512DQBW-ONLY: {{.*}}
 ; AVX512DQBW-SLOW: {{.*}}
 ; AVX512F-FAST: {{.*}}
+; AVX512F-ONLY: {{.*}}
 ; AVX512F-ONLY-FAST: {{.*}}
 ; AVX512F-ONLY-SLOW: {{.*}}
 ; AVX512F-SLOW: {{.*}}

diff  --git a/llvm/test/CodeGen/X86/vector-interleaved-load-i32-stride-7.ll b/llvm/test/CodeGen/X86/vector-interleaved-load-i32-stride-7.ll
index 7c98c94de19be04..705fda41f71e17e 100644
--- a/llvm/test/CodeGen/X86/vector-interleaved-load-i32-stride-7.ll
+++ b/llvm/test/CodeGen/X86/vector-interleaved-load-i32-stride-7.ll
@@ -4,14 +4,14 @@
 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx2 | FileCheck %s --check-prefixes=AVX,AVX1,AVX2,AVX2-ONLY,AVX2-SLOW,FALLBACK2
 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx2,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX1,AVX2,AVX2-ONLY,AVX2-FAST,FALLBACK3
 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx2,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX1,AVX2,AVX2-ONLY,AVX2-FAST-PERLANE,FALLBACK4
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512-SLOW,AVX512F-SLOW,AVX512F-ONLY-SLOW,FALLBACK5
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512-FAST,AVX512F-FAST,AVX512F-ONLY-FAST,FALLBACK6
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512-SLOW,AVX512F-SLOW,AVX512DQ-SLOW,FALLBACK7
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512-FAST,AVX512F-FAST,AVX512DQ-FAST,FALLBACK8
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512bw | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512-SLOW,AVX512BW-SLOW,AVX512BW-ONLY-SLOW,FALLBACK9
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512bw,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512-FAST,AVX512BW-FAST,AVX512BW-ONLY-FAST,FALLBACK10
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq,+avx512bw | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512-SLOW,AVX512BW-SLOW,AVX512DQBW-SLOW,FALLBACK11
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq,+avx512bw,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512-FAST,AVX512BW-FAST,AVX512DQBW-FAST,FALLBACK12
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512F-ONLY,AVX512-SLOW,AVX512F-SLOW,AVX512F-ONLY-SLOW,FALLBACK5
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512F-ONLY,AVX512-FAST,AVX512F-FAST,AVX512F-ONLY-FAST,FALLBACK6
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512DQ-ONLY,AVX512-SLOW,AVX512F-SLOW,AVX512DQ-SLOW,FALLBACK7
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512DQ-ONLY,AVX512-FAST,AVX512F-FAST,AVX512DQ-FAST,FALLBACK8
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512bw | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512BW-ONLY,AVX512-SLOW,AVX512BW-SLOW,AVX512BW-ONLY-SLOW,FALLBACK9
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512bw,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512BW-ONLY,AVX512-FAST,AVX512BW-FAST,AVX512BW-ONLY-FAST,FALLBACK10
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq,+avx512bw | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512DQBW-ONLY,AVX512-SLOW,AVX512BW-SLOW,AVX512DQBW-SLOW,FALLBACK11
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq,+avx512bw,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512DQBW-ONLY,AVX512-FAST,AVX512BW-FAST,AVX512DQBW-FAST,FALLBACK12
 
 ; These patterns are produced by LoopVectorizer for interleaved loads.
 
@@ -12632,14 +12632,18 @@ define void @load_i32_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX1: {{.*}}
 ; AVX2: {{.*}}
 ; AVX512BW-FAST: {{.*}}
+; AVX512BW-ONLY: {{.*}}
 ; AVX512BW-ONLY-FAST: {{.*}}
 ; AVX512BW-ONLY-SLOW: {{.*}}
 ; AVX512BW-SLOW: {{.*}}
 ; AVX512DQ-FAST: {{.*}}
+; AVX512DQ-ONLY: {{.*}}
 ; AVX512DQ-SLOW: {{.*}}
 ; AVX512DQBW-FAST: {{.*}}
+; AVX512DQBW-ONLY: {{.*}}
 ; AVX512DQBW-SLOW: {{.*}}
 ; AVX512F-FAST: {{.*}}
+; AVX512F-ONLY: {{.*}}
 ; AVX512F-ONLY-FAST: {{.*}}
 ; AVX512F-ONLY-SLOW: {{.*}}
 ; AVX512F-SLOW: {{.*}}

diff  --git a/llvm/test/CodeGen/X86/vector-interleaved-load-i32-stride-8.ll b/llvm/test/CodeGen/X86/vector-interleaved-load-i32-stride-8.ll
index a47102b6f7c0ae5..794356f8bc279c7 100644
--- a/llvm/test/CodeGen/X86/vector-interleaved-load-i32-stride-8.ll
+++ b/llvm/test/CodeGen/X86/vector-interleaved-load-i32-stride-8.ll
@@ -4,14 +4,14 @@
 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx2 | FileCheck %s --check-prefixes=AVX,AVX1,AVX2,AVX2-ONLY,AVX2-SLOW,FALLBACK2
 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx2,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX1,AVX2,AVX2-ONLY,AVX2-FAST,FALLBACK3
 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx2,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX1,AVX2,AVX2-ONLY,AVX2-FAST-PERLANE,FALLBACK4
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512-SLOW,AVX512F-SLOW,AVX512F-ONLY-SLOW,FALLBACK5
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512-FAST,AVX512F-FAST,AVX512F-ONLY-FAST,FALLBACK6
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512-SLOW,AVX512F-SLOW,AVX512DQ-SLOW,FALLBACK7
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512-FAST,AVX512F-FAST,AVX512DQ-FAST,FALLBACK8
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512bw | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512-SLOW,AVX512BW-SLOW,AVX512BW-ONLY-SLOW,FALLBACK9
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512bw,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512BW-FAST,AVX512BW-ONLY-FAST,FALLBACK10
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq,+avx512bw | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512-SLOW,AVX512BW-SLOW,AVX512DQBW-SLOW,FALLBACK11
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq,+avx512bw,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512BW-FAST,AVX512DQBW-FAST,FALLBACK12
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512F-ONLY,AVX512-SLOW,AVX512F-SLOW,AVX512F-ONLY-SLOW,FALLBACK5
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512F-ONLY,AVX512-FAST,AVX512F-FAST,AVX512F-ONLY-FAST,FALLBACK6
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512DQ-ONLY,AVX512-SLOW,AVX512F-SLOW,AVX512DQ-SLOW,FALLBACK7
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512DQ-ONLY,AVX512-FAST,AVX512F-FAST,AVX512DQ-FAST,FALLBACK8
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512bw | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512BW-ONLY,AVX512-SLOW,AVX512BW-SLOW,AVX512BW-ONLY-SLOW,FALLBACK9
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512bw,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512BW-ONLY,AVX512-FAST,AVX512BW-FAST,AVX512BW-ONLY-FAST,FALLBACK10
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq,+avx512bw | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512DQBW-ONLY,AVX512-SLOW,AVX512BW-SLOW,AVX512DQBW-SLOW,FALLBACK11
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq,+avx512bw,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512DQBW-ONLY,AVX512-FAST,AVX512BW-FAST,AVX512DQBW-FAST,FALLBACK12
 
 ; These patterns are produced by LoopVectorizer for interleaved loads.
 
@@ -178,40 +178,6 @@ define void @load_i32_stride8_vf2(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
 ; AVX512-FAST-NEXT:    vmovq %xmm1, (%rax)
 ; AVX512-FAST-NEXT:    vzeroupper
 ; AVX512-FAST-NEXT:    retq
-;
-; AVX512BW-FAST-LABEL: load_i32_stride8_vf2:
-; AVX512BW-FAST:       # %bb.0:
-; AVX512BW-FAST-NEXT:    movq {{[0-9]+}}(%rsp), %rax
-; AVX512BW-FAST-NEXT:    movq {{[0-9]+}}(%rsp), %r10
-; AVX512BW-FAST-NEXT:    movq {{[0-9]+}}(%rsp), %r11
-; AVX512BW-FAST-NEXT:    vmovdqa (%rdi), %xmm0
-; AVX512BW-FAST-NEXT:    vmovdqa 32(%rdi), %xmm1
-; AVX512BW-FAST-NEXT:    vpunpckldq {{.*#+}} xmm2 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
-; AVX512BW-FAST-NEXT:    vpbroadcastq {{.*#+}} xmm3 = [1,5,1,5]
-; AVX512BW-FAST-NEXT:    vpermi2d %xmm1, %xmm0, %xmm3
-; AVX512BW-FAST-NEXT:    vpunpckhdq {{.*#+}} xmm0 = xmm0[2],xmm1[2],xmm0[3],xmm1[3]
-; AVX512BW-FAST-NEXT:    vmovdqa 32(%rdi), %ymm1
-; AVX512BW-FAST-NEXT:    vmovdqa (%rdi), %ymm4
-; AVX512BW-FAST-NEXT:    vpunpckldq {{.*#+}} ymm5 = ymm4[0],ymm1[0],ymm4[1],ymm1[1],ymm4[4],ymm1[4],ymm4[5],ymm1[5]
-; AVX512BW-FAST-NEXT:    vextracti128 $1, %ymm5, %xmm5
-; AVX512BW-FAST-NEXT:    vbroadcasti128 {{.*#+}} ymm6 = [5,13,5,5,5,13,5,5]
-; AVX512BW-FAST-NEXT:    # ymm6 = mem[0,1,0,1]
-; AVX512BW-FAST-NEXT:    vpermi2d %ymm1, %ymm4, %ymm6
-; AVX512BW-FAST-NEXT:    vextracti128 $1, %ymm6, %xmm6
-; AVX512BW-FAST-NEXT:    vpunpckhdq {{.*#+}} ymm1 = ymm4[2],ymm1[2],ymm4[3],ymm1[3],ymm4[6],ymm1[6],ymm4[7],ymm1[7]
-; AVX512BW-FAST-NEXT:    vextracti128 $1, %ymm1, %xmm4
-; AVX512BW-FAST-NEXT:    vpshufd {{.*#+}} ymm1 = ymm1[2,3,2,3,6,7,6,7]
-; AVX512BW-FAST-NEXT:    vextracti128 $1, %ymm1, %xmm1
-; AVX512BW-FAST-NEXT:    vmovq %xmm2, (%rsi)
-; AVX512BW-FAST-NEXT:    vmovq %xmm3, (%rdx)
-; AVX512BW-FAST-NEXT:    vmovq %xmm0, (%rcx)
-; AVX512BW-FAST-NEXT:    vpextrq $1, %xmm0, (%r8)
-; AVX512BW-FAST-NEXT:    vmovq %xmm5, (%r9)
-; AVX512BW-FAST-NEXT:    vmovq %xmm6, (%r11)
-; AVX512BW-FAST-NEXT:    vmovq %xmm4, (%r10)
-; AVX512BW-FAST-NEXT:    vmovq %xmm1, (%rax)
-; AVX512BW-FAST-NEXT:    vzeroupper
-; AVX512BW-FAST-NEXT:    retq
   %wide.vec = load <16 x i32>, ptr %in.vec, align 64
   %strided.vec0 = shufflevector <16 x i32> %wide.vec, <16 x i32> poison, <2 x i32> <i32 0, i32 8>
   %strided.vec1 = shufflevector <16 x i32> %wide.vec, <16 x i32> poison, <2 x i32> <i32 1, i32 9>
@@ -3721,2253 +3687,1129 @@ define void @load_i32_stride8_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX2-ONLY-NEXT:    vzeroupper
 ; AVX2-ONLY-NEXT:    retq
 ;
-; AVX512F-ONLY-SLOW-LABEL: load_i32_stride8_vf32:
-; AVX512F-ONLY-SLOW:       # %bb.0:
-; AVX512F-ONLY-SLOW-NEXT:    subq $1096, %rsp # imm = 0x448
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 704(%rdi), %zmm6
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 640(%rdi), %zmm8
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 576(%rdi), %zmm28
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 512(%rdi), %zmm11
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm11, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 832(%rdi), %zmm5
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 768(%rdi), %zmm29
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 960(%rdi), %zmm21
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 896(%rdi), %zmm25
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 (%rdi), %zmm7
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 64(%rdi), %zmm17
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 128(%rdi), %zmm4
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 192(%rdi), %zmm9
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 320(%rdi), %zmm20
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 256(%rdi), %zmm26
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 448(%rdi), %zmm22
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 384(%rdi), %zmm2
-; AVX512F-ONLY-SLOW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm0 = [0,8,16,24,0,8,16,24,0,8,16,24,0,8,16,24]
-; AVX512F-ONLY-SLOW-NEXT:    # zmm0 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm2, %zmm1
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm2, %zmm12
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2d %zmm22, %zmm0, %zmm1
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm26, %zmm2
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2d %zmm20, %zmm0, %zmm2
-; AVX512F-ONLY-SLOW-NEXT:    movb $-64, %al
-; AVX512F-ONLY-SLOW-NEXT:    kmovw %eax, %k1
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm1, %zmm2 {%k1}
-; AVX512F-ONLY-SLOW-NEXT:    vbroadcasti128 {{.*#+}} ymm1 = [0,8,16,24,0,8,16,24]
-; AVX512F-ONLY-SLOW-NEXT:    # ymm1 = mem[0,1,0,1]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm4, %zmm3
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm4, %zmm10
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2d %zmm9, %zmm1, %zmm3
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm7, %zmm4
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2d %zmm17, %zmm0, %zmm4
-; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm3 = ymm4[0,1,2,3],ymm3[4,5,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $0, %ymm3, %zmm2, %zmm2
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm25, %zmm2
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2d %zmm21, %zmm0, %zmm2
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm29, %zmm3
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2d %zmm5, %zmm0, %zmm3
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm2, %zmm3 {%k1}
-; AVX512F-ONLY-SLOW-NEXT:    vpermi2d %zmm28, %zmm11, %zmm0
-; AVX512F-ONLY-SLOW-NEXT:    vpermi2d %zmm6, %zmm8, %zmm1
-; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $0, %ymm0, %zmm3, %zmm0
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm4 = [1,9,17,25,1,9,17,25,1,9,17,25,1,9,17,25]
-; AVX512F-ONLY-SLOW-NEXT:    # zmm4 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm12, %zmm0
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2d %zmm22, %zmm4, %zmm0
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm26, %zmm1
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2d %zmm20, %zmm4, %zmm1
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm0, %zmm1 {%k1}
-; AVX512F-ONLY-SLOW-NEXT:    vbroadcasti128 {{.*#+}} ymm2 = [1,9,17,25,1,9,17,25]
-; AVX512F-ONLY-SLOW-NEXT:    # ymm2 = mem[0,1,0,1]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm10, %zmm0
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2d %zmm9, %zmm2, %zmm0
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm7, %zmm2
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2d %zmm17, %zmm4, %zmm2
-; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm2[0,1,2,3],ymm0[4,5,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $0, %ymm0, %zmm1, %zmm0
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm0 = [2,10,18,26,2,10,18,26,2,10,18,26,2,10,18,26]
-; AVX512F-ONLY-SLOW-NEXT:    # zmm0 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm12, %zmm1
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2d %zmm22, %zmm0, %zmm1
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm2 = [3,11,19,27,3,11,19,27,3,11,19,27,3,11,19,27]
-; AVX512F-ONLY-SLOW-NEXT:    # zmm2 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm12, %zmm1
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2d %zmm22, %zmm2, %zmm1
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm2, %zmm3
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm10 = [4,12,20,28,4,12,20,28,4,12,20,28,4,12,20,28]
-; AVX512F-ONLY-SLOW-NEXT:    # zmm10 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm12, %zmm2
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2d %zmm22, %zmm10, %zmm2
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm15 = [5,13,21,29,5,13,21,29,5,13,21,29,5,13,21,29]
-; AVX512F-ONLY-SLOW-NEXT:    # zmm15 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm12, %zmm1
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2d %zmm22, %zmm15, %zmm1
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm13 = [6,14,22,30,6,14,22,30,6,14,22,30,6,14,22,30]
-; AVX512F-ONLY-SLOW-NEXT:    # zmm13 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm12, %zmm1
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2d %zmm22, %zmm13, %zmm1
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm1 = [7,15,23,31,7,15,23,31,7,15,23,31,7,15,23,31]
-; AVX512F-ONLY-SLOW-NEXT:    # zmm1 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2d %zmm22, %zmm1, %zmm12
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm12, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm26, %zmm24
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2d %zmm20, %zmm0, %zmm24
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm26, %zmm27
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2d %zmm20, %zmm3, %zmm27
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm26, %zmm31
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2d %zmm20, %zmm10, %zmm31
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm26, %zmm30
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2d %zmm20, %zmm15, %zmm30
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm26, %zmm22
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2d %zmm20, %zmm13, %zmm22
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2d %zmm20, %zmm1, %zmm26
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm7, %zmm8
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2d %zmm17, %zmm0, %zmm8
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm7, %zmm12
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2d %zmm17, %zmm3, %zmm12
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm7, %zmm6
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2d %zmm17, %zmm10, %zmm6
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm6, (%rsp) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm7, %zmm6
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2d %zmm17, %zmm15, %zmm6
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm7, %zmm6
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2d %zmm17, %zmm13, %zmm6
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2d %zmm17, %zmm1, %zmm7
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm25, %zmm18
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2d %zmm21, %zmm4, %zmm18
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm25, %zmm19
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2d %zmm21, %zmm0, %zmm19
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm25, %zmm23
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm3, %zmm6
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2d %zmm21, %zmm3, %zmm23
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm25, %zmm16
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2d %zmm21, %zmm10, %zmm16
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm25, %zmm20
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2d %zmm21, %zmm15, %zmm20
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm25, %zmm17
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2d %zmm21, %zmm13, %zmm17
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2d %zmm21, %zmm1, %zmm25
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm29, %zmm9
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2d %zmm5, %zmm4, %zmm9
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm29, %zmm11
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2d %zmm5, %zmm0, %zmm11
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm0, %zmm3
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm29, %zmm14
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2d %zmm5, %zmm6, %zmm14
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm29, %zmm21
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2d %zmm5, %zmm10, %zmm21
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm29, %zmm7
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm15, %zmm2
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2d %zmm5, %zmm15, %zmm7
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm29, %zmm15
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2d %zmm5, %zmm13, %zmm15
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2d %zmm5, %zmm1, %zmm29
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vpermi2d %zmm28, %zmm0, %zmm4
-; AVX512F-ONLY-SLOW-NEXT:    vpermi2d %zmm28, %zmm0, %zmm3
-; AVX512F-ONLY-SLOW-NEXT:    vpermi2d %zmm28, %zmm0, %zmm6
-; AVX512F-ONLY-SLOW-NEXT:    vpermi2d %zmm28, %zmm0, %zmm10
-; AVX512F-ONLY-SLOW-NEXT:    vpermi2d %zmm28, %zmm0, %zmm2
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vpermi2d %zmm28, %zmm0, %zmm13
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2d %zmm28, %zmm1, %zmm0
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm18, %zmm9 {%k1}
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm28 # 64-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vbroadcasti128 {{.*#+}} ymm0 = [1,9,17,25,1,9,17,25]
-; AVX512F-ONLY-SLOW-NEXT:    # ymm0 = mem[0,1,0,1]
-; AVX512F-ONLY-SLOW-NEXT:    vpermi2d %zmm28, %zmm2, %zmm0
-; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm4[0,1,2,3],ymm0[4,5,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $0, %ymm0, %zmm9, %zmm18
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm0, %zmm24 {%k1}
-; AVX512F-ONLY-SLOW-NEXT:    vbroadcasti128 {{.*#+}} ymm9 = [2,10,18,26,2,10,18,26]
-; AVX512F-ONLY-SLOW-NEXT:    # ymm9 = mem[0,1,0,1]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm4, %zmm0
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2d %zmm5, %zmm9, %zmm0
-; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm8[0,1,2,3],ymm0[4,5,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $0, %ymm0, %zmm24, %zmm24
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm19, %zmm11 {%k1}
-; AVX512F-ONLY-SLOW-NEXT:    vpermi2d %zmm28, %zmm2, %zmm9
-; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm1 = ymm3[0,1,2,3],ymm9[4,5,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $0, %ymm1, %zmm11, %zmm1
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm0, %zmm27 {%k1}
-; AVX512F-ONLY-SLOW-NEXT:    vbroadcasti128 {{.*#+}} ymm3 = [3,11,19,27,3,11,19,27]
-; AVX512F-ONLY-SLOW-NEXT:    # ymm3 = mem[0,1,0,1]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm4, %zmm0
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm4, %zmm9
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2d %zmm5, %zmm3, %zmm9
-; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm4 = ymm12[0,1,2,3],ymm9[4,5,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $0, %ymm4, %zmm27, %zmm4
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm23, %zmm14 {%k1}
-; AVX512F-ONLY-SLOW-NEXT:    vpermi2d %zmm28, %zmm2, %zmm3
-; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm3 = ymm6[0,1,2,3],ymm3[4,5,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $0, %ymm3, %zmm14, %zmm3
-; AVX512F-ONLY-SLOW-NEXT:    vbroadcasti128 {{.*#+}} ymm6 = [4,12,20,28,4,12,20,28]
-; AVX512F-ONLY-SLOW-NEXT:    # ymm6 = mem[0,1,0,1]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm0, %zmm8
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2d %zmm5, %zmm6, %zmm8
-; AVX512F-ONLY-SLOW-NEXT:    vbroadcasti128 {{.*#+}} ymm9 = [5,13,21,29,5,13,21,29]
-; AVX512F-ONLY-SLOW-NEXT:    # ymm9 = mem[0,1,0,1]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm0, %zmm11
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2d %zmm5, %zmm9, %zmm11
-; AVX512F-ONLY-SLOW-NEXT:    vbroadcasti128 {{.*#+}} ymm12 = [6,14,22,30,6,14,22,30]
-; AVX512F-ONLY-SLOW-NEXT:    # ymm12 = mem[0,1,0,1]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm0, %zmm14
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2d %zmm5, %zmm12, %zmm14
-; AVX512F-ONLY-SLOW-NEXT:    vbroadcasti32x4 {{.*#+}} ymm19 = [7,15,23,31,7,15,23,31]
-; AVX512F-ONLY-SLOW-NEXT:    # ymm19 = mem[0,1,2,3,0,1,2,3]
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2d %zmm5, %zmm19, %zmm0
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm0, %zmm5
-; AVX512F-ONLY-SLOW-NEXT:    vpermi2d %zmm28, %zmm2, %zmm6
-; AVX512F-ONLY-SLOW-NEXT:    vpermi2d %zmm28, %zmm2, %zmm9
-; AVX512F-ONLY-SLOW-NEXT:    vpermi2d %zmm28, %zmm2, %zmm12
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2d %zmm28, %zmm19, %zmm2
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm2, %zmm0
-; AVX512F-ONLY-SLOW-NEXT:    vpblendd $15, (%rsp), %ymm8, %ymm8 # 32-byte Folded Reload
-; AVX512F-ONLY-SLOW-NEXT:    # ymm8 = mem[0,1,2,3],ymm8[4,5,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm2, %zmm31 {%k1}
-; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $0, %ymm8, %zmm31, %zmm8
-; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm6 = ymm10[0,1,2,3],ymm6[4,5,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm16, %zmm21 {%k1}
-; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $0, %ymm6, %zmm21, %zmm6
-; AVX512F-ONLY-SLOW-NEXT:    vpblendd $15, {{[-0-9]+}}(%r{{[sb]}}p), %ymm11, %ymm10 # 32-byte Folded Reload
-; AVX512F-ONLY-SLOW-NEXT:    # ymm10 = mem[0,1,2,3],ymm11[4,5,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm2, %zmm30 {%k1}
-; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $0, %ymm10, %zmm30, %zmm10
-; AVX512F-ONLY-SLOW-NEXT:    vpblendd $15, {{[-0-9]+}}(%r{{[sb]}}p), %ymm9, %ymm9 # 32-byte Folded Reload
-; AVX512F-ONLY-SLOW-NEXT:    # ymm9 = mem[0,1,2,3],ymm9[4,5,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm20, %zmm7 {%k1}
-; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $0, %ymm9, %zmm7, %zmm7
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm2, %zmm22 {%k1}
-; AVX512F-ONLY-SLOW-NEXT:    vpblendd $15, {{[-0-9]+}}(%r{{[sb]}}p), %ymm14, %ymm9 # 32-byte Folded Reload
-; AVX512F-ONLY-SLOW-NEXT:    # ymm9 = mem[0,1,2,3],ymm14[4,5,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $0, %ymm9, %zmm22, %zmm9
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm17, %zmm15 {%k1}
-; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm2 = ymm13[0,1,2,3],ymm12[4,5,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $0, %ymm2, %zmm15, %zmm2
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm11 # 64-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm11, %zmm26 {%k1}
-; AVX512F-ONLY-SLOW-NEXT:    vpblendd $15, {{[-0-9]+}}(%r{{[sb]}}p), %ymm5, %ymm11 # 32-byte Folded Reload
-; AVX512F-ONLY-SLOW-NEXT:    # ymm11 = mem[0,1,2,3],ymm5[4,5,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $0, %ymm11, %zmm26, %zmm5
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm25, %zmm29 {%k1}
-; AVX512F-ONLY-SLOW-NEXT:    vpblendd $15, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm11 # 32-byte Folded Reload
-; AVX512F-ONLY-SLOW-NEXT:    # ymm11 = mem[0,1,2,3],ymm0[4,5,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $0, %ymm11, %zmm29, %zmm11
-; AVX512F-ONLY-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vmovaps %zmm0, 64(%rsi)
-; AVX512F-ONLY-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vmovaps %zmm0, (%rsi)
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm18, 64(%rdx)
-; AVX512F-ONLY-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vmovaps %zmm0, (%rdx)
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm1, 64(%rcx)
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm24, (%rcx)
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm3, 64(%r8)
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm4, (%r8)
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm6, 64(%r9)
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm8, (%r9)
-; AVX512F-ONLY-SLOW-NEXT:    movq {{[0-9]+}}(%rsp), %rax
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm7, 64(%rax)
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm10, (%rax)
-; AVX512F-ONLY-SLOW-NEXT:    movq {{[0-9]+}}(%rsp), %rax
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm2, 64(%rax)
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm9, (%rax)
-; AVX512F-ONLY-SLOW-NEXT:    movq {{[0-9]+}}(%rsp), %rax
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm11, 64(%rax)
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm5, (%rax)
-; AVX512F-ONLY-SLOW-NEXT:    addq $1096, %rsp # imm = 0x448
-; AVX512F-ONLY-SLOW-NEXT:    vzeroupper
-; AVX512F-ONLY-SLOW-NEXT:    retq
-;
-; AVX512F-ONLY-FAST-LABEL: load_i32_stride8_vf32:
-; AVX512F-ONLY-FAST:       # %bb.0:
-; AVX512F-ONLY-FAST-NEXT:    subq $1096, %rsp # imm = 0x448
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 704(%rdi), %zmm6
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 640(%rdi), %zmm8
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 576(%rdi), %zmm28
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 512(%rdi), %zmm11
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm11, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 832(%rdi), %zmm5
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 768(%rdi), %zmm29
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 960(%rdi), %zmm21
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 896(%rdi), %zmm25
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 (%rdi), %zmm7
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 64(%rdi), %zmm17
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 128(%rdi), %zmm4
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 192(%rdi), %zmm9
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 320(%rdi), %zmm20
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 256(%rdi), %zmm26
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 448(%rdi), %zmm22
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 384(%rdi), %zmm2
-; AVX512F-ONLY-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} zmm0 = [0,8,16,24,0,8,16,24,0,8,16,24,0,8,16,24]
-; AVX512F-ONLY-FAST-NEXT:    # zmm0 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm2, %zmm1
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm2, %zmm12
-; AVX512F-ONLY-FAST-NEXT:    vpermt2d %zmm22, %zmm0, %zmm1
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm26, %zmm2
-; AVX512F-ONLY-FAST-NEXT:    vpermt2d %zmm20, %zmm0, %zmm2
-; AVX512F-ONLY-FAST-NEXT:    movb $-64, %al
-; AVX512F-ONLY-FAST-NEXT:    kmovw %eax, %k1
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm1, %zmm2 {%k1}
-; AVX512F-ONLY-FAST-NEXT:    vbroadcasti128 {{.*#+}} ymm1 = [0,8,16,24,0,8,16,24]
-; AVX512F-ONLY-FAST-NEXT:    # ymm1 = mem[0,1,0,1]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm4, %zmm3
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm4, %zmm10
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vpermt2d %zmm9, %zmm1, %zmm3
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm7, %zmm4
-; AVX512F-ONLY-FAST-NEXT:    vpermt2d %zmm17, %zmm0, %zmm4
-; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm3 = ymm4[0,1,2,3],ymm3[4,5,6,7]
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $0, %ymm3, %zmm2, %zmm2
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm25, %zmm2
-; AVX512F-ONLY-FAST-NEXT:    vpermt2d %zmm21, %zmm0, %zmm2
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm29, %zmm3
-; AVX512F-ONLY-FAST-NEXT:    vpermt2d %zmm5, %zmm0, %zmm3
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm2, %zmm3 {%k1}
-; AVX512F-ONLY-FAST-NEXT:    vpermi2d %zmm28, %zmm11, %zmm0
-; AVX512F-ONLY-FAST-NEXT:    vpermi2d %zmm6, %zmm8, %zmm1
-; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $0, %ymm0, %zmm3, %zmm0
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} zmm4 = [1,9,17,25,1,9,17,25,1,9,17,25,1,9,17,25]
-; AVX512F-ONLY-FAST-NEXT:    # zmm4 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm12, %zmm0
-; AVX512F-ONLY-FAST-NEXT:    vpermt2d %zmm22, %zmm4, %zmm0
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm26, %zmm1
-; AVX512F-ONLY-FAST-NEXT:    vpermt2d %zmm20, %zmm4, %zmm1
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm0, %zmm1 {%k1}
-; AVX512F-ONLY-FAST-NEXT:    vbroadcasti128 {{.*#+}} ymm2 = [1,9,17,25,1,9,17,25]
-; AVX512F-ONLY-FAST-NEXT:    # ymm2 = mem[0,1,0,1]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm10, %zmm0
-; AVX512F-ONLY-FAST-NEXT:    vpermt2d %zmm9, %zmm2, %zmm0
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm7, %zmm2
-; AVX512F-ONLY-FAST-NEXT:    vpermt2d %zmm17, %zmm4, %zmm2
-; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm2[0,1,2,3],ymm0[4,5,6,7]
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $0, %ymm0, %zmm1, %zmm0
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} zmm0 = [2,10,18,26,2,10,18,26,2,10,18,26,2,10,18,26]
-; AVX512F-ONLY-FAST-NEXT:    # zmm0 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm12, %zmm1
-; AVX512F-ONLY-FAST-NEXT:    vpermt2d %zmm22, %zmm0, %zmm1
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} zmm2 = [3,11,19,27,3,11,19,27,3,11,19,27,3,11,19,27]
-; AVX512F-ONLY-FAST-NEXT:    # zmm2 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm12, %zmm1
-; AVX512F-ONLY-FAST-NEXT:    vpermt2d %zmm22, %zmm2, %zmm1
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm2, %zmm3
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} zmm10 = [4,12,20,28,4,12,20,28,4,12,20,28,4,12,20,28]
-; AVX512F-ONLY-FAST-NEXT:    # zmm10 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm12, %zmm2
-; AVX512F-ONLY-FAST-NEXT:    vpermt2d %zmm22, %zmm10, %zmm2
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} zmm15 = [5,13,21,29,5,13,21,29,5,13,21,29,5,13,21,29]
-; AVX512F-ONLY-FAST-NEXT:    # zmm15 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm12, %zmm1
-; AVX512F-ONLY-FAST-NEXT:    vpermt2d %zmm22, %zmm15, %zmm1
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} zmm13 = [6,14,22,30,6,14,22,30,6,14,22,30,6,14,22,30]
-; AVX512F-ONLY-FAST-NEXT:    # zmm13 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm12, %zmm1
-; AVX512F-ONLY-FAST-NEXT:    vpermt2d %zmm22, %zmm13, %zmm1
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} zmm1 = [7,15,23,31,7,15,23,31,7,15,23,31,7,15,23,31]
-; AVX512F-ONLY-FAST-NEXT:    # zmm1 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512F-ONLY-FAST-NEXT:    vpermt2d %zmm22, %zmm1, %zmm12
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm12, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm26, %zmm24
-; AVX512F-ONLY-FAST-NEXT:    vpermt2d %zmm20, %zmm0, %zmm24
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm26, %zmm27
-; AVX512F-ONLY-FAST-NEXT:    vpermt2d %zmm20, %zmm3, %zmm27
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm26, %zmm31
-; AVX512F-ONLY-FAST-NEXT:    vpermt2d %zmm20, %zmm10, %zmm31
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm26, %zmm30
-; AVX512F-ONLY-FAST-NEXT:    vpermt2d %zmm20, %zmm15, %zmm30
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm26, %zmm22
-; AVX512F-ONLY-FAST-NEXT:    vpermt2d %zmm20, %zmm13, %zmm22
-; AVX512F-ONLY-FAST-NEXT:    vpermt2d %zmm20, %zmm1, %zmm26
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm7, %zmm8
-; AVX512F-ONLY-FAST-NEXT:    vpermt2d %zmm17, %zmm0, %zmm8
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm7, %zmm12
-; AVX512F-ONLY-FAST-NEXT:    vpermt2d %zmm17, %zmm3, %zmm12
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm7, %zmm6
-; AVX512F-ONLY-FAST-NEXT:    vpermt2d %zmm17, %zmm10, %zmm6
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm6, (%rsp) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm7, %zmm6
-; AVX512F-ONLY-FAST-NEXT:    vpermt2d %zmm17, %zmm15, %zmm6
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm7, %zmm6
-; AVX512F-ONLY-FAST-NEXT:    vpermt2d %zmm17, %zmm13, %zmm6
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vpermt2d %zmm17, %zmm1, %zmm7
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm25, %zmm18
-; AVX512F-ONLY-FAST-NEXT:    vpermt2d %zmm21, %zmm4, %zmm18
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm25, %zmm19
-; AVX512F-ONLY-FAST-NEXT:    vpermt2d %zmm21, %zmm0, %zmm19
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm25, %zmm23
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm3, %zmm6
-; AVX512F-ONLY-FAST-NEXT:    vpermt2d %zmm21, %zmm3, %zmm23
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm25, %zmm16
-; AVX512F-ONLY-FAST-NEXT:    vpermt2d %zmm21, %zmm10, %zmm16
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm25, %zmm20
-; AVX512F-ONLY-FAST-NEXT:    vpermt2d %zmm21, %zmm15, %zmm20
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm25, %zmm17
-; AVX512F-ONLY-FAST-NEXT:    vpermt2d %zmm21, %zmm13, %zmm17
-; AVX512F-ONLY-FAST-NEXT:    vpermt2d %zmm21, %zmm1, %zmm25
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm29, %zmm9
-; AVX512F-ONLY-FAST-NEXT:    vpermt2d %zmm5, %zmm4, %zmm9
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm29, %zmm11
-; AVX512F-ONLY-FAST-NEXT:    vpermt2d %zmm5, %zmm0, %zmm11
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm0, %zmm3
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm29, %zmm14
-; AVX512F-ONLY-FAST-NEXT:    vpermt2d %zmm5, %zmm6, %zmm14
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm29, %zmm21
-; AVX512F-ONLY-FAST-NEXT:    vpermt2d %zmm5, %zmm10, %zmm21
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm29, %zmm7
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm15, %zmm2
-; AVX512F-ONLY-FAST-NEXT:    vpermt2d %zmm5, %zmm15, %zmm7
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm29, %zmm15
-; AVX512F-ONLY-FAST-NEXT:    vpermt2d %zmm5, %zmm13, %zmm15
-; AVX512F-ONLY-FAST-NEXT:    vpermt2d %zmm5, %zmm1, %zmm29
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vpermi2d %zmm28, %zmm0, %zmm4
-; AVX512F-ONLY-FAST-NEXT:    vpermi2d %zmm28, %zmm0, %zmm3
-; AVX512F-ONLY-FAST-NEXT:    vpermi2d %zmm28, %zmm0, %zmm6
-; AVX512F-ONLY-FAST-NEXT:    vpermi2d %zmm28, %zmm0, %zmm10
-; AVX512F-ONLY-FAST-NEXT:    vpermi2d %zmm28, %zmm0, %zmm2
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vpermi2d %zmm28, %zmm0, %zmm13
-; AVX512F-ONLY-FAST-NEXT:    vpermt2d %zmm28, %zmm1, %zmm0
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm18, %zmm9 {%k1}
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm28 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vbroadcasti128 {{.*#+}} ymm0 = [1,9,17,25,1,9,17,25]
-; AVX512F-ONLY-FAST-NEXT:    # ymm0 = mem[0,1,0,1]
-; AVX512F-ONLY-FAST-NEXT:    vpermi2d %zmm28, %zmm2, %zmm0
-; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm4[0,1,2,3],ymm0[4,5,6,7]
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $0, %ymm0, %zmm9, %zmm18
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm0, %zmm24 {%k1}
-; AVX512F-ONLY-FAST-NEXT:    vbroadcasti128 {{.*#+}} ymm9 = [2,10,18,26,2,10,18,26]
-; AVX512F-ONLY-FAST-NEXT:    # ymm9 = mem[0,1,0,1]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm4, %zmm0
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vpermt2d %zmm5, %zmm9, %zmm0
-; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm8[0,1,2,3],ymm0[4,5,6,7]
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $0, %ymm0, %zmm24, %zmm24
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm19, %zmm11 {%k1}
-; AVX512F-ONLY-FAST-NEXT:    vpermi2d %zmm28, %zmm2, %zmm9
-; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm1 = ymm3[0,1,2,3],ymm9[4,5,6,7]
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $0, %ymm1, %zmm11, %zmm1
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm0, %zmm27 {%k1}
-; AVX512F-ONLY-FAST-NEXT:    vbroadcasti128 {{.*#+}} ymm3 = [3,11,19,27,3,11,19,27]
-; AVX512F-ONLY-FAST-NEXT:    # ymm3 = mem[0,1,0,1]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm4, %zmm0
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm4, %zmm9
-; AVX512F-ONLY-FAST-NEXT:    vpermt2d %zmm5, %zmm3, %zmm9
-; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm4 = ymm12[0,1,2,3],ymm9[4,5,6,7]
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $0, %ymm4, %zmm27, %zmm4
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm23, %zmm14 {%k1}
-; AVX512F-ONLY-FAST-NEXT:    vpermi2d %zmm28, %zmm2, %zmm3
-; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm3 = ymm6[0,1,2,3],ymm3[4,5,6,7]
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $0, %ymm3, %zmm14, %zmm3
-; AVX512F-ONLY-FAST-NEXT:    vbroadcasti128 {{.*#+}} ymm6 = [4,12,20,28,4,12,20,28]
-; AVX512F-ONLY-FAST-NEXT:    # ymm6 = mem[0,1,0,1]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm0, %zmm8
-; AVX512F-ONLY-FAST-NEXT:    vpermt2d %zmm5, %zmm6, %zmm8
-; AVX512F-ONLY-FAST-NEXT:    vbroadcasti128 {{.*#+}} ymm9 = [5,13,21,29,5,13,21,29]
-; AVX512F-ONLY-FAST-NEXT:    # ymm9 = mem[0,1,0,1]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm0, %zmm11
-; AVX512F-ONLY-FAST-NEXT:    vpermt2d %zmm5, %zmm9, %zmm11
-; AVX512F-ONLY-FAST-NEXT:    vbroadcasti128 {{.*#+}} ymm12 = [6,14,22,30,6,14,22,30]
-; AVX512F-ONLY-FAST-NEXT:    # ymm12 = mem[0,1,0,1]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm0, %zmm14
-; AVX512F-ONLY-FAST-NEXT:    vpermt2d %zmm5, %zmm12, %zmm14
-; AVX512F-ONLY-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} ymm19 = [7,15,23,31,7,15,23,31]
-; AVX512F-ONLY-FAST-NEXT:    # ymm19 = mem[0,1,2,3,0,1,2,3]
-; AVX512F-ONLY-FAST-NEXT:    vpermt2d %zmm5, %zmm19, %zmm0
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm0, %zmm5
-; AVX512F-ONLY-FAST-NEXT:    vpermi2d %zmm28, %zmm2, %zmm6
-; AVX512F-ONLY-FAST-NEXT:    vpermi2d %zmm28, %zmm2, %zmm9
-; AVX512F-ONLY-FAST-NEXT:    vpermi2d %zmm28, %zmm2, %zmm12
-; AVX512F-ONLY-FAST-NEXT:    vpermt2d %zmm28, %zmm19, %zmm2
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm2, %zmm0
-; AVX512F-ONLY-FAST-NEXT:    vpblendd $15, (%rsp), %ymm8, %ymm8 # 32-byte Folded Reload
-; AVX512F-ONLY-FAST-NEXT:    # ymm8 = mem[0,1,2,3],ymm8[4,5,6,7]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm2, %zmm31 {%k1}
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $0, %ymm8, %zmm31, %zmm8
-; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm6 = ymm10[0,1,2,3],ymm6[4,5,6,7]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm16, %zmm21 {%k1}
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $0, %ymm6, %zmm21, %zmm6
-; AVX512F-ONLY-FAST-NEXT:    vpblendd $15, {{[-0-9]+}}(%r{{[sb]}}p), %ymm11, %ymm10 # 32-byte Folded Reload
-; AVX512F-ONLY-FAST-NEXT:    # ymm10 = mem[0,1,2,3],ymm11[4,5,6,7]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm2, %zmm30 {%k1}
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $0, %ymm10, %zmm30, %zmm10
-; AVX512F-ONLY-FAST-NEXT:    vpblendd $15, {{[-0-9]+}}(%r{{[sb]}}p), %ymm9, %ymm9 # 32-byte Folded Reload
-; AVX512F-ONLY-FAST-NEXT:    # ymm9 = mem[0,1,2,3],ymm9[4,5,6,7]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm20, %zmm7 {%k1}
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $0, %ymm9, %zmm7, %zmm7
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm2, %zmm22 {%k1}
-; AVX512F-ONLY-FAST-NEXT:    vpblendd $15, {{[-0-9]+}}(%r{{[sb]}}p), %ymm14, %ymm9 # 32-byte Folded Reload
-; AVX512F-ONLY-FAST-NEXT:    # ymm9 = mem[0,1,2,3],ymm14[4,5,6,7]
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $0, %ymm9, %zmm22, %zmm9
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm17, %zmm15 {%k1}
-; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm2 = ymm13[0,1,2,3],ymm12[4,5,6,7]
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $0, %ymm2, %zmm15, %zmm2
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm11 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm11, %zmm26 {%k1}
-; AVX512F-ONLY-FAST-NEXT:    vpblendd $15, {{[-0-9]+}}(%r{{[sb]}}p), %ymm5, %ymm11 # 32-byte Folded Reload
-; AVX512F-ONLY-FAST-NEXT:    # ymm11 = mem[0,1,2,3],ymm5[4,5,6,7]
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $0, %ymm11, %zmm26, %zmm5
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm25, %zmm29 {%k1}
-; AVX512F-ONLY-FAST-NEXT:    vpblendd $15, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm11 # 32-byte Folded Reload
-; AVX512F-ONLY-FAST-NEXT:    # ymm11 = mem[0,1,2,3],ymm0[4,5,6,7]
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $0, %ymm11, %zmm29, %zmm11
-; AVX512F-ONLY-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vmovaps %zmm0, 64(%rsi)
-; AVX512F-ONLY-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vmovaps %zmm0, (%rsi)
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm18, 64(%rdx)
-; AVX512F-ONLY-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vmovaps %zmm0, (%rdx)
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm1, 64(%rcx)
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm24, (%rcx)
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm3, 64(%r8)
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm4, (%r8)
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm6, 64(%r9)
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm8, (%r9)
-; AVX512F-ONLY-FAST-NEXT:    movq {{[0-9]+}}(%rsp), %rax
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm7, 64(%rax)
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm10, (%rax)
-; AVX512F-ONLY-FAST-NEXT:    movq {{[0-9]+}}(%rsp), %rax
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm2, 64(%rax)
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm9, (%rax)
-; AVX512F-ONLY-FAST-NEXT:    movq {{[0-9]+}}(%rsp), %rax
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm11, 64(%rax)
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm5, (%rax)
-; AVX512F-ONLY-FAST-NEXT:    addq $1096, %rsp # imm = 0x448
-; AVX512F-ONLY-FAST-NEXT:    vzeroupper
-; AVX512F-ONLY-FAST-NEXT:    retq
-;
-; AVX512DQ-SLOW-LABEL: load_i32_stride8_vf32:
-; AVX512DQ-SLOW:       # %bb.0:
-; AVX512DQ-SLOW-NEXT:    subq $1096, %rsp # imm = 0x448
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 704(%rdi), %zmm6
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 640(%rdi), %zmm8
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 576(%rdi), %zmm28
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 512(%rdi), %zmm11
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm11, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 832(%rdi), %zmm5
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 768(%rdi), %zmm29
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 960(%rdi), %zmm21
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 896(%rdi), %zmm25
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 (%rdi), %zmm7
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 64(%rdi), %zmm17
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 128(%rdi), %zmm4
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 192(%rdi), %zmm9
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 320(%rdi), %zmm20
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 256(%rdi), %zmm26
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 448(%rdi), %zmm22
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 384(%rdi), %zmm2
-; AVX512DQ-SLOW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm0 = [0,8,16,24,0,8,16,24,0,8,16,24,0,8,16,24]
-; AVX512DQ-SLOW-NEXT:    # zmm0 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm2, %zmm1
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm2, %zmm12
-; AVX512DQ-SLOW-NEXT:    vpermt2d %zmm22, %zmm0, %zmm1
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm26, %zmm2
-; AVX512DQ-SLOW-NEXT:    vpermt2d %zmm20, %zmm0, %zmm2
-; AVX512DQ-SLOW-NEXT:    movb $-64, %al
-; AVX512DQ-SLOW-NEXT:    kmovw %eax, %k1
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm1, %zmm2 {%k1}
-; AVX512DQ-SLOW-NEXT:    vbroadcasti128 {{.*#+}} ymm1 = [0,8,16,24,0,8,16,24]
-; AVX512DQ-SLOW-NEXT:    # ymm1 = mem[0,1,0,1]
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm4, %zmm3
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm4, %zmm10
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vpermt2d %zmm9, %zmm1, %zmm3
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm7, %zmm4
-; AVX512DQ-SLOW-NEXT:    vpermt2d %zmm17, %zmm0, %zmm4
-; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm3 = ymm4[0,1,2,3],ymm3[4,5,6,7]
-; AVX512DQ-SLOW-NEXT:    vinserti64x4 $0, %ymm3, %zmm2, %zmm2
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm25, %zmm2
-; AVX512DQ-SLOW-NEXT:    vpermt2d %zmm21, %zmm0, %zmm2
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm29, %zmm3
-; AVX512DQ-SLOW-NEXT:    vpermt2d %zmm5, %zmm0, %zmm3
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm2, %zmm3 {%k1}
-; AVX512DQ-SLOW-NEXT:    vpermi2d %zmm28, %zmm11, %zmm0
-; AVX512DQ-SLOW-NEXT:    vpermi2d %zmm6, %zmm8, %zmm1
-; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
-; AVX512DQ-SLOW-NEXT:    vinserti64x4 $0, %ymm0, %zmm3, %zmm0
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm4 = [1,9,17,25,1,9,17,25,1,9,17,25,1,9,17,25]
-; AVX512DQ-SLOW-NEXT:    # zmm4 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm12, %zmm0
-; AVX512DQ-SLOW-NEXT:    vpermt2d %zmm22, %zmm4, %zmm0
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm26, %zmm1
-; AVX512DQ-SLOW-NEXT:    vpermt2d %zmm20, %zmm4, %zmm1
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm0, %zmm1 {%k1}
-; AVX512DQ-SLOW-NEXT:    vbroadcasti128 {{.*#+}} ymm2 = [1,9,17,25,1,9,17,25]
-; AVX512DQ-SLOW-NEXT:    # ymm2 = mem[0,1,0,1]
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm10, %zmm0
-; AVX512DQ-SLOW-NEXT:    vpermt2d %zmm9, %zmm2, %zmm0
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm7, %zmm2
-; AVX512DQ-SLOW-NEXT:    vpermt2d %zmm17, %zmm4, %zmm2
-; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm2[0,1,2,3],ymm0[4,5,6,7]
-; AVX512DQ-SLOW-NEXT:    vinserti64x4 $0, %ymm0, %zmm1, %zmm0
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm0 = [2,10,18,26,2,10,18,26,2,10,18,26,2,10,18,26]
-; AVX512DQ-SLOW-NEXT:    # zmm0 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm12, %zmm1
-; AVX512DQ-SLOW-NEXT:    vpermt2d %zmm22, %zmm0, %zmm1
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm2 = [3,11,19,27,3,11,19,27,3,11,19,27,3,11,19,27]
-; AVX512DQ-SLOW-NEXT:    # zmm2 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm12, %zmm1
-; AVX512DQ-SLOW-NEXT:    vpermt2d %zmm22, %zmm2, %zmm1
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm2, %zmm3
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm10 = [4,12,20,28,4,12,20,28,4,12,20,28,4,12,20,28]
-; AVX512DQ-SLOW-NEXT:    # zmm10 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm12, %zmm2
-; AVX512DQ-SLOW-NEXT:    vpermt2d %zmm22, %zmm10, %zmm2
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm15 = [5,13,21,29,5,13,21,29,5,13,21,29,5,13,21,29]
-; AVX512DQ-SLOW-NEXT:    # zmm15 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm12, %zmm1
-; AVX512DQ-SLOW-NEXT:    vpermt2d %zmm22, %zmm15, %zmm1
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm13 = [6,14,22,30,6,14,22,30,6,14,22,30,6,14,22,30]
-; AVX512DQ-SLOW-NEXT:    # zmm13 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm12, %zmm1
-; AVX512DQ-SLOW-NEXT:    vpermt2d %zmm22, %zmm13, %zmm1
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm1 = [7,15,23,31,7,15,23,31,7,15,23,31,7,15,23,31]
-; AVX512DQ-SLOW-NEXT:    # zmm1 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512DQ-SLOW-NEXT:    vpermt2d %zmm22, %zmm1, %zmm12
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm12, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm26, %zmm24
-; AVX512DQ-SLOW-NEXT:    vpermt2d %zmm20, %zmm0, %zmm24
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm26, %zmm27
-; AVX512DQ-SLOW-NEXT:    vpermt2d %zmm20, %zmm3, %zmm27
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm26, %zmm31
-; AVX512DQ-SLOW-NEXT:    vpermt2d %zmm20, %zmm10, %zmm31
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm26, %zmm30
-; AVX512DQ-SLOW-NEXT:    vpermt2d %zmm20, %zmm15, %zmm30
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm26, %zmm22
-; AVX512DQ-SLOW-NEXT:    vpermt2d %zmm20, %zmm13, %zmm22
-; AVX512DQ-SLOW-NEXT:    vpermt2d %zmm20, %zmm1, %zmm26
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm7, %zmm8
-; AVX512DQ-SLOW-NEXT:    vpermt2d %zmm17, %zmm0, %zmm8
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm7, %zmm12
-; AVX512DQ-SLOW-NEXT:    vpermt2d %zmm17, %zmm3, %zmm12
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm7, %zmm6
-; AVX512DQ-SLOW-NEXT:    vpermt2d %zmm17, %zmm10, %zmm6
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm6, (%rsp) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm7, %zmm6
-; AVX512DQ-SLOW-NEXT:    vpermt2d %zmm17, %zmm15, %zmm6
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm7, %zmm6
-; AVX512DQ-SLOW-NEXT:    vpermt2d %zmm17, %zmm13, %zmm6
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vpermt2d %zmm17, %zmm1, %zmm7
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm25, %zmm18
-; AVX512DQ-SLOW-NEXT:    vpermt2d %zmm21, %zmm4, %zmm18
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm25, %zmm19
-; AVX512DQ-SLOW-NEXT:    vpermt2d %zmm21, %zmm0, %zmm19
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm25, %zmm23
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm3, %zmm6
-; AVX512DQ-SLOW-NEXT:    vpermt2d %zmm21, %zmm3, %zmm23
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm25, %zmm16
-; AVX512DQ-SLOW-NEXT:    vpermt2d %zmm21, %zmm10, %zmm16
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm25, %zmm20
-; AVX512DQ-SLOW-NEXT:    vpermt2d %zmm21, %zmm15, %zmm20
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm25, %zmm17
-; AVX512DQ-SLOW-NEXT:    vpermt2d %zmm21, %zmm13, %zmm17
-; AVX512DQ-SLOW-NEXT:    vpermt2d %zmm21, %zmm1, %zmm25
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm29, %zmm9
-; AVX512DQ-SLOW-NEXT:    vpermt2d %zmm5, %zmm4, %zmm9
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm29, %zmm11
-; AVX512DQ-SLOW-NEXT:    vpermt2d %zmm5, %zmm0, %zmm11
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm0, %zmm3
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm29, %zmm14
-; AVX512DQ-SLOW-NEXT:    vpermt2d %zmm5, %zmm6, %zmm14
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm29, %zmm21
-; AVX512DQ-SLOW-NEXT:    vpermt2d %zmm5, %zmm10, %zmm21
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm29, %zmm7
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm15, %zmm2
-; AVX512DQ-SLOW-NEXT:    vpermt2d %zmm5, %zmm15, %zmm7
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm29, %zmm15
-; AVX512DQ-SLOW-NEXT:    vpermt2d %zmm5, %zmm13, %zmm15
-; AVX512DQ-SLOW-NEXT:    vpermt2d %zmm5, %zmm1, %zmm29
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQ-SLOW-NEXT:    vpermi2d %zmm28, %zmm0, %zmm4
-; AVX512DQ-SLOW-NEXT:    vpermi2d %zmm28, %zmm0, %zmm3
-; AVX512DQ-SLOW-NEXT:    vpermi2d %zmm28, %zmm0, %zmm6
-; AVX512DQ-SLOW-NEXT:    vpermi2d %zmm28, %zmm0, %zmm10
-; AVX512DQ-SLOW-NEXT:    vpermi2d %zmm28, %zmm0, %zmm2
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vpermi2d %zmm28, %zmm0, %zmm13
-; AVX512DQ-SLOW-NEXT:    vpermt2d %zmm28, %zmm1, %zmm0
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm18, %zmm9 {%k1}
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm28 # 64-byte Reload
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512DQ-SLOW-NEXT:    vbroadcasti128 {{.*#+}} ymm0 = [1,9,17,25,1,9,17,25]
-; AVX512DQ-SLOW-NEXT:    # ymm0 = mem[0,1,0,1]
-; AVX512DQ-SLOW-NEXT:    vpermi2d %zmm28, %zmm2, %zmm0
-; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm4[0,1,2,3],ymm0[4,5,6,7]
-; AVX512DQ-SLOW-NEXT:    vinserti64x4 $0, %ymm0, %zmm9, %zmm18
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm0, %zmm24 {%k1}
-; AVX512DQ-SLOW-NEXT:    vbroadcasti128 {{.*#+}} ymm9 = [2,10,18,26,2,10,18,26]
-; AVX512DQ-SLOW-NEXT:    # ymm9 = mem[0,1,0,1]
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm4, %zmm0
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
-; AVX512DQ-SLOW-NEXT:    vpermt2d %zmm5, %zmm9, %zmm0
-; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm8[0,1,2,3],ymm0[4,5,6,7]
-; AVX512DQ-SLOW-NEXT:    vinserti64x4 $0, %ymm0, %zmm24, %zmm24
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm19, %zmm11 {%k1}
-; AVX512DQ-SLOW-NEXT:    vpermi2d %zmm28, %zmm2, %zmm9
-; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm1 = ymm3[0,1,2,3],ymm9[4,5,6,7]
-; AVX512DQ-SLOW-NEXT:    vinserti64x4 $0, %ymm1, %zmm11, %zmm1
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm0, %zmm27 {%k1}
-; AVX512DQ-SLOW-NEXT:    vbroadcasti128 {{.*#+}} ymm3 = [3,11,19,27,3,11,19,27]
-; AVX512DQ-SLOW-NEXT:    # ymm3 = mem[0,1,0,1]
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm4, %zmm0
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm4, %zmm9
-; AVX512DQ-SLOW-NEXT:    vpermt2d %zmm5, %zmm3, %zmm9
-; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm4 = ymm12[0,1,2,3],ymm9[4,5,6,7]
-; AVX512DQ-SLOW-NEXT:    vinserti64x4 $0, %ymm4, %zmm27, %zmm4
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm23, %zmm14 {%k1}
-; AVX512DQ-SLOW-NEXT:    vpermi2d %zmm28, %zmm2, %zmm3
-; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm3 = ymm6[0,1,2,3],ymm3[4,5,6,7]
-; AVX512DQ-SLOW-NEXT:    vinserti64x4 $0, %ymm3, %zmm14, %zmm3
-; AVX512DQ-SLOW-NEXT:    vbroadcasti128 {{.*#+}} ymm6 = [4,12,20,28,4,12,20,28]
-; AVX512DQ-SLOW-NEXT:    # ymm6 = mem[0,1,0,1]
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm0, %zmm8
-; AVX512DQ-SLOW-NEXT:    vpermt2d %zmm5, %zmm6, %zmm8
-; AVX512DQ-SLOW-NEXT:    vbroadcasti128 {{.*#+}} ymm9 = [5,13,21,29,5,13,21,29]
-; AVX512DQ-SLOW-NEXT:    # ymm9 = mem[0,1,0,1]
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm0, %zmm11
-; AVX512DQ-SLOW-NEXT:    vpermt2d %zmm5, %zmm9, %zmm11
-; AVX512DQ-SLOW-NEXT:    vbroadcasti128 {{.*#+}} ymm12 = [6,14,22,30,6,14,22,30]
-; AVX512DQ-SLOW-NEXT:    # ymm12 = mem[0,1,0,1]
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm0, %zmm14
-; AVX512DQ-SLOW-NEXT:    vpermt2d %zmm5, %zmm12, %zmm14
-; AVX512DQ-SLOW-NEXT:    vbroadcasti64x2 {{.*#+}} ymm19 = [7,15,23,31,7,15,23,31]
-; AVX512DQ-SLOW-NEXT:    # ymm19 = mem[0,1,0,1]
-; AVX512DQ-SLOW-NEXT:    vpermt2d %zmm5, %zmm19, %zmm0
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm0, %zmm5
-; AVX512DQ-SLOW-NEXT:    vpermi2d %zmm28, %zmm2, %zmm6
-; AVX512DQ-SLOW-NEXT:    vpermi2d %zmm28, %zmm2, %zmm9
-; AVX512DQ-SLOW-NEXT:    vpermi2d %zmm28, %zmm2, %zmm12
-; AVX512DQ-SLOW-NEXT:    vpermt2d %zmm28, %zmm19, %zmm2
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm2, %zmm0
-; AVX512DQ-SLOW-NEXT:    vpblendd $15, (%rsp), %ymm8, %ymm8 # 32-byte Folded Reload
-; AVX512DQ-SLOW-NEXT:    # ymm8 = mem[0,1,2,3],ymm8[4,5,6,7]
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm2, %zmm31 {%k1}
-; AVX512DQ-SLOW-NEXT:    vinserti64x4 $0, %ymm8, %zmm31, %zmm8
-; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm6 = ymm10[0,1,2,3],ymm6[4,5,6,7]
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm16, %zmm21 {%k1}
-; AVX512DQ-SLOW-NEXT:    vinserti64x4 $0, %ymm6, %zmm21, %zmm6
-; AVX512DQ-SLOW-NEXT:    vpblendd $15, {{[-0-9]+}}(%r{{[sb]}}p), %ymm11, %ymm10 # 32-byte Folded Reload
-; AVX512DQ-SLOW-NEXT:    # ymm10 = mem[0,1,2,3],ymm11[4,5,6,7]
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm2, %zmm30 {%k1}
-; AVX512DQ-SLOW-NEXT:    vinserti64x4 $0, %ymm10, %zmm30, %zmm10
-; AVX512DQ-SLOW-NEXT:    vpblendd $15, {{[-0-9]+}}(%r{{[sb]}}p), %ymm9, %ymm9 # 32-byte Folded Reload
-; AVX512DQ-SLOW-NEXT:    # ymm9 = mem[0,1,2,3],ymm9[4,5,6,7]
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm20, %zmm7 {%k1}
-; AVX512DQ-SLOW-NEXT:    vinserti64x4 $0, %ymm9, %zmm7, %zmm7
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm2, %zmm22 {%k1}
-; AVX512DQ-SLOW-NEXT:    vpblendd $15, {{[-0-9]+}}(%r{{[sb]}}p), %ymm14, %ymm9 # 32-byte Folded Reload
-; AVX512DQ-SLOW-NEXT:    # ymm9 = mem[0,1,2,3],ymm14[4,5,6,7]
-; AVX512DQ-SLOW-NEXT:    vinserti64x4 $0, %ymm9, %zmm22, %zmm9
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm17, %zmm15 {%k1}
-; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm2 = ymm13[0,1,2,3],ymm12[4,5,6,7]
-; AVX512DQ-SLOW-NEXT:    vinserti64x4 $0, %ymm2, %zmm15, %zmm2
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm11 # 64-byte Reload
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm11, %zmm26 {%k1}
-; AVX512DQ-SLOW-NEXT:    vpblendd $15, {{[-0-9]+}}(%r{{[sb]}}p), %ymm5, %ymm11 # 32-byte Folded Reload
-; AVX512DQ-SLOW-NEXT:    # ymm11 = mem[0,1,2,3],ymm5[4,5,6,7]
-; AVX512DQ-SLOW-NEXT:    vinserti64x4 $0, %ymm11, %zmm26, %zmm5
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm25, %zmm29 {%k1}
-; AVX512DQ-SLOW-NEXT:    vpblendd $15, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm11 # 32-byte Folded Reload
-; AVX512DQ-SLOW-NEXT:    # ymm11 = mem[0,1,2,3],ymm0[4,5,6,7]
-; AVX512DQ-SLOW-NEXT:    vinserti64x4 $0, %ymm11, %zmm29, %zmm11
-; AVX512DQ-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQ-SLOW-NEXT:    vmovaps %zmm0, 64(%rsi)
-; AVX512DQ-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQ-SLOW-NEXT:    vmovaps %zmm0, (%rsi)
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm18, 64(%rdx)
-; AVX512DQ-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQ-SLOW-NEXT:    vmovaps %zmm0, (%rdx)
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm1, 64(%rcx)
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm24, (%rcx)
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm3, 64(%r8)
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm4, (%r8)
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm6, 64(%r9)
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm8, (%r9)
-; AVX512DQ-SLOW-NEXT:    movq {{[0-9]+}}(%rsp), %rax
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm7, 64(%rax)
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm10, (%rax)
-; AVX512DQ-SLOW-NEXT:    movq {{[0-9]+}}(%rsp), %rax
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm2, 64(%rax)
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm9, (%rax)
-; AVX512DQ-SLOW-NEXT:    movq {{[0-9]+}}(%rsp), %rax
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm11, 64(%rax)
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm5, (%rax)
-; AVX512DQ-SLOW-NEXT:    addq $1096, %rsp # imm = 0x448
-; AVX512DQ-SLOW-NEXT:    vzeroupper
-; AVX512DQ-SLOW-NEXT:    retq
-;
-; AVX512DQ-FAST-LABEL: load_i32_stride8_vf32:
-; AVX512DQ-FAST:       # %bb.0:
-; AVX512DQ-FAST-NEXT:    subq $1096, %rsp # imm = 0x448
-; AVX512DQ-FAST-NEXT:    vmovdqa64 704(%rdi), %zmm6
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vmovdqa64 640(%rdi), %zmm8
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vmovdqa64 576(%rdi), %zmm28
-; AVX512DQ-FAST-NEXT:    vmovdqa64 512(%rdi), %zmm11
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm11, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vmovdqa64 832(%rdi), %zmm5
-; AVX512DQ-FAST-NEXT:    vmovdqa64 768(%rdi), %zmm29
-; AVX512DQ-FAST-NEXT:    vmovdqa64 960(%rdi), %zmm21
-; AVX512DQ-FAST-NEXT:    vmovdqa64 896(%rdi), %zmm25
-; AVX512DQ-FAST-NEXT:    vmovdqa64 (%rdi), %zmm7
-; AVX512DQ-FAST-NEXT:    vmovdqa64 64(%rdi), %zmm17
-; AVX512DQ-FAST-NEXT:    vmovdqa64 128(%rdi), %zmm4
-; AVX512DQ-FAST-NEXT:    vmovdqa64 192(%rdi), %zmm9
-; AVX512DQ-FAST-NEXT:    vmovdqa64 320(%rdi), %zmm20
-; AVX512DQ-FAST-NEXT:    vmovdqa64 256(%rdi), %zmm26
-; AVX512DQ-FAST-NEXT:    vmovdqa64 448(%rdi), %zmm22
-; AVX512DQ-FAST-NEXT:    vmovdqa64 384(%rdi), %zmm2
-; AVX512DQ-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} zmm0 = [0,8,16,24,0,8,16,24,0,8,16,24,0,8,16,24]
-; AVX512DQ-FAST-NEXT:    # zmm0 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm2, %zmm1
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm2, %zmm12
-; AVX512DQ-FAST-NEXT:    vpermt2d %zmm22, %zmm0, %zmm1
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm26, %zmm2
-; AVX512DQ-FAST-NEXT:    vpermt2d %zmm20, %zmm0, %zmm2
-; AVX512DQ-FAST-NEXT:    movb $-64, %al
-; AVX512DQ-FAST-NEXT:    kmovw %eax, %k1
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm1, %zmm2 {%k1}
-; AVX512DQ-FAST-NEXT:    vbroadcasti128 {{.*#+}} ymm1 = [0,8,16,24,0,8,16,24]
-; AVX512DQ-FAST-NEXT:    # ymm1 = mem[0,1,0,1]
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm4, %zmm3
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm4, %zmm10
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vpermt2d %zmm9, %zmm1, %zmm3
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm7, %zmm4
-; AVX512DQ-FAST-NEXT:    vpermt2d %zmm17, %zmm0, %zmm4
-; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm3 = ymm4[0,1,2,3],ymm3[4,5,6,7]
-; AVX512DQ-FAST-NEXT:    vinserti64x4 $0, %ymm3, %zmm2, %zmm2
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm25, %zmm2
-; AVX512DQ-FAST-NEXT:    vpermt2d %zmm21, %zmm0, %zmm2
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm29, %zmm3
-; AVX512DQ-FAST-NEXT:    vpermt2d %zmm5, %zmm0, %zmm3
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm2, %zmm3 {%k1}
-; AVX512DQ-FAST-NEXT:    vpermi2d %zmm28, %zmm11, %zmm0
-; AVX512DQ-FAST-NEXT:    vpermi2d %zmm6, %zmm8, %zmm1
-; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
-; AVX512DQ-FAST-NEXT:    vinserti64x4 $0, %ymm0, %zmm3, %zmm0
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} zmm4 = [1,9,17,25,1,9,17,25,1,9,17,25,1,9,17,25]
-; AVX512DQ-FAST-NEXT:    # zmm4 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm12, %zmm0
-; AVX512DQ-FAST-NEXT:    vpermt2d %zmm22, %zmm4, %zmm0
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm26, %zmm1
-; AVX512DQ-FAST-NEXT:    vpermt2d %zmm20, %zmm4, %zmm1
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm0, %zmm1 {%k1}
-; AVX512DQ-FAST-NEXT:    vbroadcasti128 {{.*#+}} ymm2 = [1,9,17,25,1,9,17,25]
-; AVX512DQ-FAST-NEXT:    # ymm2 = mem[0,1,0,1]
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm10, %zmm0
-; AVX512DQ-FAST-NEXT:    vpermt2d %zmm9, %zmm2, %zmm0
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm7, %zmm2
-; AVX512DQ-FAST-NEXT:    vpermt2d %zmm17, %zmm4, %zmm2
-; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm2[0,1,2,3],ymm0[4,5,6,7]
-; AVX512DQ-FAST-NEXT:    vinserti64x4 $0, %ymm0, %zmm1, %zmm0
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} zmm0 = [2,10,18,26,2,10,18,26,2,10,18,26,2,10,18,26]
-; AVX512DQ-FAST-NEXT:    # zmm0 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm12, %zmm1
-; AVX512DQ-FAST-NEXT:    vpermt2d %zmm22, %zmm0, %zmm1
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} zmm2 = [3,11,19,27,3,11,19,27,3,11,19,27,3,11,19,27]
-; AVX512DQ-FAST-NEXT:    # zmm2 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm12, %zmm1
-; AVX512DQ-FAST-NEXT:    vpermt2d %zmm22, %zmm2, %zmm1
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm2, %zmm3
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} zmm10 = [4,12,20,28,4,12,20,28,4,12,20,28,4,12,20,28]
-; AVX512DQ-FAST-NEXT:    # zmm10 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm12, %zmm2
-; AVX512DQ-FAST-NEXT:    vpermt2d %zmm22, %zmm10, %zmm2
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} zmm15 = [5,13,21,29,5,13,21,29,5,13,21,29,5,13,21,29]
-; AVX512DQ-FAST-NEXT:    # zmm15 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm12, %zmm1
-; AVX512DQ-FAST-NEXT:    vpermt2d %zmm22, %zmm15, %zmm1
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} zmm13 = [6,14,22,30,6,14,22,30,6,14,22,30,6,14,22,30]
-; AVX512DQ-FAST-NEXT:    # zmm13 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm12, %zmm1
-; AVX512DQ-FAST-NEXT:    vpermt2d %zmm22, %zmm13, %zmm1
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} zmm1 = [7,15,23,31,7,15,23,31,7,15,23,31,7,15,23,31]
-; AVX512DQ-FAST-NEXT:    # zmm1 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512DQ-FAST-NEXT:    vpermt2d %zmm22, %zmm1, %zmm12
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm12, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm26, %zmm24
-; AVX512DQ-FAST-NEXT:    vpermt2d %zmm20, %zmm0, %zmm24
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm26, %zmm27
-; AVX512DQ-FAST-NEXT:    vpermt2d %zmm20, %zmm3, %zmm27
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm26, %zmm31
-; AVX512DQ-FAST-NEXT:    vpermt2d %zmm20, %zmm10, %zmm31
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm26, %zmm30
-; AVX512DQ-FAST-NEXT:    vpermt2d %zmm20, %zmm15, %zmm30
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm26, %zmm22
-; AVX512DQ-FAST-NEXT:    vpermt2d %zmm20, %zmm13, %zmm22
-; AVX512DQ-FAST-NEXT:    vpermt2d %zmm20, %zmm1, %zmm26
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm7, %zmm8
-; AVX512DQ-FAST-NEXT:    vpermt2d %zmm17, %zmm0, %zmm8
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm7, %zmm12
-; AVX512DQ-FAST-NEXT:    vpermt2d %zmm17, %zmm3, %zmm12
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm7, %zmm6
-; AVX512DQ-FAST-NEXT:    vpermt2d %zmm17, %zmm10, %zmm6
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm6, (%rsp) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm7, %zmm6
-; AVX512DQ-FAST-NEXT:    vpermt2d %zmm17, %zmm15, %zmm6
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm7, %zmm6
-; AVX512DQ-FAST-NEXT:    vpermt2d %zmm17, %zmm13, %zmm6
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vpermt2d %zmm17, %zmm1, %zmm7
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm25, %zmm18
-; AVX512DQ-FAST-NEXT:    vpermt2d %zmm21, %zmm4, %zmm18
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm25, %zmm19
-; AVX512DQ-FAST-NEXT:    vpermt2d %zmm21, %zmm0, %zmm19
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm25, %zmm23
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm3, %zmm6
-; AVX512DQ-FAST-NEXT:    vpermt2d %zmm21, %zmm3, %zmm23
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm25, %zmm16
-; AVX512DQ-FAST-NEXT:    vpermt2d %zmm21, %zmm10, %zmm16
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm25, %zmm20
-; AVX512DQ-FAST-NEXT:    vpermt2d %zmm21, %zmm15, %zmm20
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm25, %zmm17
-; AVX512DQ-FAST-NEXT:    vpermt2d %zmm21, %zmm13, %zmm17
-; AVX512DQ-FAST-NEXT:    vpermt2d %zmm21, %zmm1, %zmm25
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm29, %zmm9
-; AVX512DQ-FAST-NEXT:    vpermt2d %zmm5, %zmm4, %zmm9
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm29, %zmm11
-; AVX512DQ-FAST-NEXT:    vpermt2d %zmm5, %zmm0, %zmm11
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm0, %zmm3
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm29, %zmm14
-; AVX512DQ-FAST-NEXT:    vpermt2d %zmm5, %zmm6, %zmm14
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm29, %zmm21
-; AVX512DQ-FAST-NEXT:    vpermt2d %zmm5, %zmm10, %zmm21
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm29, %zmm7
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm15, %zmm2
-; AVX512DQ-FAST-NEXT:    vpermt2d %zmm5, %zmm15, %zmm7
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm29, %zmm15
-; AVX512DQ-FAST-NEXT:    vpermt2d %zmm5, %zmm13, %zmm15
-; AVX512DQ-FAST-NEXT:    vpermt2d %zmm5, %zmm1, %zmm29
-; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vpermi2d %zmm28, %zmm0, %zmm4
-; AVX512DQ-FAST-NEXT:    vpermi2d %zmm28, %zmm0, %zmm3
-; AVX512DQ-FAST-NEXT:    vpermi2d %zmm28, %zmm0, %zmm6
-; AVX512DQ-FAST-NEXT:    vpermi2d %zmm28, %zmm0, %zmm10
-; AVX512DQ-FAST-NEXT:    vpermi2d %zmm28, %zmm0, %zmm2
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vpermi2d %zmm28, %zmm0, %zmm13
-; AVX512DQ-FAST-NEXT:    vpermt2d %zmm28, %zmm1, %zmm0
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm18, %zmm9 {%k1}
-; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm28 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vbroadcasti128 {{.*#+}} ymm0 = [1,9,17,25,1,9,17,25]
-; AVX512DQ-FAST-NEXT:    # ymm0 = mem[0,1,0,1]
-; AVX512DQ-FAST-NEXT:    vpermi2d %zmm28, %zmm2, %zmm0
-; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm4[0,1,2,3],ymm0[4,5,6,7]
-; AVX512DQ-FAST-NEXT:    vinserti64x4 $0, %ymm0, %zmm9, %zmm18
-; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm0, %zmm24 {%k1}
-; AVX512DQ-FAST-NEXT:    vbroadcasti128 {{.*#+}} ymm9 = [2,10,18,26,2,10,18,26]
-; AVX512DQ-FAST-NEXT:    # ymm9 = mem[0,1,0,1]
-; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm4, %zmm0
-; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vpermt2d %zmm5, %zmm9, %zmm0
-; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm8[0,1,2,3],ymm0[4,5,6,7]
-; AVX512DQ-FAST-NEXT:    vinserti64x4 $0, %ymm0, %zmm24, %zmm24
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm19, %zmm11 {%k1}
-; AVX512DQ-FAST-NEXT:    vpermi2d %zmm28, %zmm2, %zmm9
-; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm1 = ymm3[0,1,2,3],ymm9[4,5,6,7]
-; AVX512DQ-FAST-NEXT:    vinserti64x4 $0, %ymm1, %zmm11, %zmm1
-; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm0, %zmm27 {%k1}
-; AVX512DQ-FAST-NEXT:    vbroadcasti128 {{.*#+}} ymm3 = [3,11,19,27,3,11,19,27]
-; AVX512DQ-FAST-NEXT:    # ymm3 = mem[0,1,0,1]
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm4, %zmm0
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm4, %zmm9
-; AVX512DQ-FAST-NEXT:    vpermt2d %zmm5, %zmm3, %zmm9
-; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm4 = ymm12[0,1,2,3],ymm9[4,5,6,7]
-; AVX512DQ-FAST-NEXT:    vinserti64x4 $0, %ymm4, %zmm27, %zmm4
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm23, %zmm14 {%k1}
-; AVX512DQ-FAST-NEXT:    vpermi2d %zmm28, %zmm2, %zmm3
-; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm3 = ymm6[0,1,2,3],ymm3[4,5,6,7]
-; AVX512DQ-FAST-NEXT:    vinserti64x4 $0, %ymm3, %zmm14, %zmm3
-; AVX512DQ-FAST-NEXT:    vbroadcasti128 {{.*#+}} ymm6 = [4,12,20,28,4,12,20,28]
-; AVX512DQ-FAST-NEXT:    # ymm6 = mem[0,1,0,1]
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm0, %zmm8
-; AVX512DQ-FAST-NEXT:    vpermt2d %zmm5, %zmm6, %zmm8
-; AVX512DQ-FAST-NEXT:    vbroadcasti128 {{.*#+}} ymm9 = [5,13,21,29,5,13,21,29]
-; AVX512DQ-FAST-NEXT:    # ymm9 = mem[0,1,0,1]
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm0, %zmm11
-; AVX512DQ-FAST-NEXT:    vpermt2d %zmm5, %zmm9, %zmm11
-; AVX512DQ-FAST-NEXT:    vbroadcasti128 {{.*#+}} ymm12 = [6,14,22,30,6,14,22,30]
-; AVX512DQ-FAST-NEXT:    # ymm12 = mem[0,1,0,1]
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm0, %zmm14
-; AVX512DQ-FAST-NEXT:    vpermt2d %zmm5, %zmm12, %zmm14
-; AVX512DQ-FAST-NEXT:    vbroadcasti64x2 {{.*#+}} ymm19 = [7,15,23,31,7,15,23,31]
-; AVX512DQ-FAST-NEXT:    # ymm19 = mem[0,1,0,1]
-; AVX512DQ-FAST-NEXT:    vpermt2d %zmm5, %zmm19, %zmm0
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm0, %zmm5
-; AVX512DQ-FAST-NEXT:    vpermi2d %zmm28, %zmm2, %zmm6
-; AVX512DQ-FAST-NEXT:    vpermi2d %zmm28, %zmm2, %zmm9
-; AVX512DQ-FAST-NEXT:    vpermi2d %zmm28, %zmm2, %zmm12
-; AVX512DQ-FAST-NEXT:    vpermt2d %zmm28, %zmm19, %zmm2
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm2, %zmm0
-; AVX512DQ-FAST-NEXT:    vpblendd $15, (%rsp), %ymm8, %ymm8 # 32-byte Folded Reload
-; AVX512DQ-FAST-NEXT:    # ymm8 = mem[0,1,2,3],ymm8[4,5,6,7]
-; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm2, %zmm31 {%k1}
-; AVX512DQ-FAST-NEXT:    vinserti64x4 $0, %ymm8, %zmm31, %zmm8
-; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm6 = ymm10[0,1,2,3],ymm6[4,5,6,7]
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm16, %zmm21 {%k1}
-; AVX512DQ-FAST-NEXT:    vinserti64x4 $0, %ymm6, %zmm21, %zmm6
-; AVX512DQ-FAST-NEXT:    vpblendd $15, {{[-0-9]+}}(%r{{[sb]}}p), %ymm11, %ymm10 # 32-byte Folded Reload
-; AVX512DQ-FAST-NEXT:    # ymm10 = mem[0,1,2,3],ymm11[4,5,6,7]
-; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm2, %zmm30 {%k1}
-; AVX512DQ-FAST-NEXT:    vinserti64x4 $0, %ymm10, %zmm30, %zmm10
-; AVX512DQ-FAST-NEXT:    vpblendd $15, {{[-0-9]+}}(%r{{[sb]}}p), %ymm9, %ymm9 # 32-byte Folded Reload
-; AVX512DQ-FAST-NEXT:    # ymm9 = mem[0,1,2,3],ymm9[4,5,6,7]
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm20, %zmm7 {%k1}
-; AVX512DQ-FAST-NEXT:    vinserti64x4 $0, %ymm9, %zmm7, %zmm7
-; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm2, %zmm22 {%k1}
-; AVX512DQ-FAST-NEXT:    vpblendd $15, {{[-0-9]+}}(%r{{[sb]}}p), %ymm14, %ymm9 # 32-byte Folded Reload
-; AVX512DQ-FAST-NEXT:    # ymm9 = mem[0,1,2,3],ymm14[4,5,6,7]
-; AVX512DQ-FAST-NEXT:    vinserti64x4 $0, %ymm9, %zmm22, %zmm9
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm17, %zmm15 {%k1}
-; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm2 = ymm13[0,1,2,3],ymm12[4,5,6,7]
-; AVX512DQ-FAST-NEXT:    vinserti64x4 $0, %ymm2, %zmm15, %zmm2
-; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm11 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm11, %zmm26 {%k1}
-; AVX512DQ-FAST-NEXT:    vpblendd $15, {{[-0-9]+}}(%r{{[sb]}}p), %ymm5, %ymm11 # 32-byte Folded Reload
-; AVX512DQ-FAST-NEXT:    # ymm11 = mem[0,1,2,3],ymm5[4,5,6,7]
-; AVX512DQ-FAST-NEXT:    vinserti64x4 $0, %ymm11, %zmm26, %zmm5
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm25, %zmm29 {%k1}
-; AVX512DQ-FAST-NEXT:    vpblendd $15, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm11 # 32-byte Folded Reload
-; AVX512DQ-FAST-NEXT:    # ymm11 = mem[0,1,2,3],ymm0[4,5,6,7]
-; AVX512DQ-FAST-NEXT:    vinserti64x4 $0, %ymm11, %zmm29, %zmm11
-; AVX512DQ-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vmovaps %zmm0, 64(%rsi)
-; AVX512DQ-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vmovaps %zmm0, (%rsi)
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm18, 64(%rdx)
-; AVX512DQ-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vmovaps %zmm0, (%rdx)
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm1, 64(%rcx)
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm24, (%rcx)
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm3, 64(%r8)
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm4, (%r8)
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm6, 64(%r9)
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm8, (%r9)
-; AVX512DQ-FAST-NEXT:    movq {{[0-9]+}}(%rsp), %rax
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm7, 64(%rax)
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm10, (%rax)
-; AVX512DQ-FAST-NEXT:    movq {{[0-9]+}}(%rsp), %rax
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm2, 64(%rax)
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm9, (%rax)
-; AVX512DQ-FAST-NEXT:    movq {{[0-9]+}}(%rsp), %rax
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm11, 64(%rax)
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm5, (%rax)
-; AVX512DQ-FAST-NEXT:    addq $1096, %rsp # imm = 0x448
-; AVX512DQ-FAST-NEXT:    vzeroupper
-; AVX512DQ-FAST-NEXT:    retq
-;
-; AVX512BW-ONLY-SLOW-LABEL: load_i32_stride8_vf32:
-; AVX512BW-ONLY-SLOW:       # %bb.0:
-; AVX512BW-ONLY-SLOW-NEXT:    subq $1096, %rsp # imm = 0x448
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 704(%rdi), %zmm6
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 640(%rdi), %zmm8
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 576(%rdi), %zmm28
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 512(%rdi), %zmm11
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm11, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 832(%rdi), %zmm5
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 768(%rdi), %zmm29
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 960(%rdi), %zmm21
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 896(%rdi), %zmm25
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 (%rdi), %zmm7
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 64(%rdi), %zmm17
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 128(%rdi), %zmm4
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 192(%rdi), %zmm9
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 320(%rdi), %zmm20
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 256(%rdi), %zmm26
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 448(%rdi), %zmm22
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 384(%rdi), %zmm2
-; AVX512BW-ONLY-SLOW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm0 = [0,8,16,24,0,8,16,24,0,8,16,24,0,8,16,24]
-; AVX512BW-ONLY-SLOW-NEXT:    # zmm0 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm2, %zmm1
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm2, %zmm12
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2d %zmm22, %zmm0, %zmm1
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm26, %zmm2
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2d %zmm20, %zmm0, %zmm2
-; AVX512BW-ONLY-SLOW-NEXT:    movb $-64, %al
-; AVX512BW-ONLY-SLOW-NEXT:    kmovd %eax, %k1
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm1, %zmm2 {%k1}
-; AVX512BW-ONLY-SLOW-NEXT:    vbroadcasti128 {{.*#+}} ymm1 = [0,8,16,24,0,8,16,24]
-; AVX512BW-ONLY-SLOW-NEXT:    # ymm1 = mem[0,1,0,1]
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm4, %zmm3
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm4, %zmm10
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2d %zmm9, %zmm1, %zmm3
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm7, %zmm4
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2d %zmm17, %zmm0, %zmm4
-; AVX512BW-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm3 = ymm4[0,1,2,3],ymm3[4,5,6,7]
-; AVX512BW-ONLY-SLOW-NEXT:    vinserti64x4 $0, %ymm3, %zmm2, %zmm2
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm25, %zmm2
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2d %zmm21, %zmm0, %zmm2
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm29, %zmm3
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2d %zmm5, %zmm0, %zmm3
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm2, %zmm3 {%k1}
-; AVX512BW-ONLY-SLOW-NEXT:    vpermi2d %zmm28, %zmm11, %zmm0
-; AVX512BW-ONLY-SLOW-NEXT:    vpermi2d %zmm6, %zmm8, %zmm1
-; AVX512BW-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
-; AVX512BW-ONLY-SLOW-NEXT:    vinserti64x4 $0, %ymm0, %zmm3, %zmm0
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm4 = [1,9,17,25,1,9,17,25,1,9,17,25,1,9,17,25]
-; AVX512BW-ONLY-SLOW-NEXT:    # zmm4 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm12, %zmm0
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2d %zmm22, %zmm4, %zmm0
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm26, %zmm1
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2d %zmm20, %zmm4, %zmm1
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm0, %zmm1 {%k1}
-; AVX512BW-ONLY-SLOW-NEXT:    vbroadcasti128 {{.*#+}} ymm2 = [1,9,17,25,1,9,17,25]
-; AVX512BW-ONLY-SLOW-NEXT:    # ymm2 = mem[0,1,0,1]
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm10, %zmm0
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2d %zmm9, %zmm2, %zmm0
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm7, %zmm2
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2d %zmm17, %zmm4, %zmm2
-; AVX512BW-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm2[0,1,2,3],ymm0[4,5,6,7]
-; AVX512BW-ONLY-SLOW-NEXT:    vinserti64x4 $0, %ymm0, %zmm1, %zmm0
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm0 = [2,10,18,26,2,10,18,26,2,10,18,26,2,10,18,26]
-; AVX512BW-ONLY-SLOW-NEXT:    # zmm0 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm12, %zmm1
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2d %zmm22, %zmm0, %zmm1
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm2 = [3,11,19,27,3,11,19,27,3,11,19,27,3,11,19,27]
-; AVX512BW-ONLY-SLOW-NEXT:    # zmm2 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm12, %zmm1
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2d %zmm22, %zmm2, %zmm1
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm2, %zmm3
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm10 = [4,12,20,28,4,12,20,28,4,12,20,28,4,12,20,28]
-; AVX512BW-ONLY-SLOW-NEXT:    # zmm10 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm12, %zmm2
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2d %zmm22, %zmm10, %zmm2
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm15 = [5,13,21,29,5,13,21,29,5,13,21,29,5,13,21,29]
-; AVX512BW-ONLY-SLOW-NEXT:    # zmm15 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm12, %zmm1
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2d %zmm22, %zmm15, %zmm1
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm13 = [6,14,22,30,6,14,22,30,6,14,22,30,6,14,22,30]
-; AVX512BW-ONLY-SLOW-NEXT:    # zmm13 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm12, %zmm1
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2d %zmm22, %zmm13, %zmm1
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm1 = [7,15,23,31,7,15,23,31,7,15,23,31,7,15,23,31]
-; AVX512BW-ONLY-SLOW-NEXT:    # zmm1 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2d %zmm22, %zmm1, %zmm12
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm12, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm26, %zmm24
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2d %zmm20, %zmm0, %zmm24
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm26, %zmm27
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2d %zmm20, %zmm3, %zmm27
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm26, %zmm31
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2d %zmm20, %zmm10, %zmm31
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm26, %zmm30
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2d %zmm20, %zmm15, %zmm30
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm26, %zmm22
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2d %zmm20, %zmm13, %zmm22
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2d %zmm20, %zmm1, %zmm26
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm7, %zmm8
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2d %zmm17, %zmm0, %zmm8
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm7, %zmm12
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2d %zmm17, %zmm3, %zmm12
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm7, %zmm6
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2d %zmm17, %zmm10, %zmm6
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm6, (%rsp) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm7, %zmm6
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2d %zmm17, %zmm15, %zmm6
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm7, %zmm6
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2d %zmm17, %zmm13, %zmm6
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2d %zmm17, %zmm1, %zmm7
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm25, %zmm18
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2d %zmm21, %zmm4, %zmm18
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm25, %zmm19
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2d %zmm21, %zmm0, %zmm19
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm25, %zmm23
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm3, %zmm6
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2d %zmm21, %zmm3, %zmm23
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm25, %zmm16
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2d %zmm21, %zmm10, %zmm16
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm25, %zmm20
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2d %zmm21, %zmm15, %zmm20
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm25, %zmm17
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2d %zmm21, %zmm13, %zmm17
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2d %zmm21, %zmm1, %zmm25
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm29, %zmm9
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2d %zmm5, %zmm4, %zmm9
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm29, %zmm11
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2d %zmm5, %zmm0, %zmm11
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm0, %zmm3
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm29, %zmm14
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2d %zmm5, %zmm6, %zmm14
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm29, %zmm21
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2d %zmm5, %zmm10, %zmm21
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm29, %zmm7
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm15, %zmm2
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2d %zmm5, %zmm15, %zmm7
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm29, %zmm15
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2d %zmm5, %zmm13, %zmm15
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2d %zmm5, %zmm1, %zmm29
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512BW-ONLY-SLOW-NEXT:    vpermi2d %zmm28, %zmm0, %zmm4
-; AVX512BW-ONLY-SLOW-NEXT:    vpermi2d %zmm28, %zmm0, %zmm3
-; AVX512BW-ONLY-SLOW-NEXT:    vpermi2d %zmm28, %zmm0, %zmm6
-; AVX512BW-ONLY-SLOW-NEXT:    vpermi2d %zmm28, %zmm0, %zmm10
-; AVX512BW-ONLY-SLOW-NEXT:    vpermi2d %zmm28, %zmm0, %zmm2
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vpermi2d %zmm28, %zmm0, %zmm13
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2d %zmm28, %zmm1, %zmm0
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm18, %zmm9 {%k1}
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm28 # 64-byte Reload
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512BW-ONLY-SLOW-NEXT:    vbroadcasti128 {{.*#+}} ymm0 = [1,9,17,25,1,9,17,25]
-; AVX512BW-ONLY-SLOW-NEXT:    # ymm0 = mem[0,1,0,1]
-; AVX512BW-ONLY-SLOW-NEXT:    vpermi2d %zmm28, %zmm2, %zmm0
-; AVX512BW-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm4[0,1,2,3],ymm0[4,5,6,7]
-; AVX512BW-ONLY-SLOW-NEXT:    vinserti64x4 $0, %ymm0, %zmm9, %zmm18
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm0, %zmm24 {%k1}
-; AVX512BW-ONLY-SLOW-NEXT:    vbroadcasti128 {{.*#+}} ymm9 = [2,10,18,26,2,10,18,26]
-; AVX512BW-ONLY-SLOW-NEXT:    # ymm9 = mem[0,1,0,1]
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm4, %zmm0
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2d %zmm5, %zmm9, %zmm0
-; AVX512BW-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm8[0,1,2,3],ymm0[4,5,6,7]
-; AVX512BW-ONLY-SLOW-NEXT:    vinserti64x4 $0, %ymm0, %zmm24, %zmm24
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm19, %zmm11 {%k1}
-; AVX512BW-ONLY-SLOW-NEXT:    vpermi2d %zmm28, %zmm2, %zmm9
-; AVX512BW-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm1 = ymm3[0,1,2,3],ymm9[4,5,6,7]
-; AVX512BW-ONLY-SLOW-NEXT:    vinserti64x4 $0, %ymm1, %zmm11, %zmm1
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm0, %zmm27 {%k1}
-; AVX512BW-ONLY-SLOW-NEXT:    vbroadcasti128 {{.*#+}} ymm3 = [3,11,19,27,3,11,19,27]
-; AVX512BW-ONLY-SLOW-NEXT:    # ymm3 = mem[0,1,0,1]
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm4, %zmm0
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm4, %zmm9
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2d %zmm5, %zmm3, %zmm9
-; AVX512BW-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm4 = ymm12[0,1,2,3],ymm9[4,5,6,7]
-; AVX512BW-ONLY-SLOW-NEXT:    vinserti64x4 $0, %ymm4, %zmm27, %zmm4
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm23, %zmm14 {%k1}
-; AVX512BW-ONLY-SLOW-NEXT:    vpermi2d %zmm28, %zmm2, %zmm3
-; AVX512BW-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm3 = ymm6[0,1,2,3],ymm3[4,5,6,7]
-; AVX512BW-ONLY-SLOW-NEXT:    vinserti64x4 $0, %ymm3, %zmm14, %zmm3
-; AVX512BW-ONLY-SLOW-NEXT:    vbroadcasti128 {{.*#+}} ymm6 = [4,12,20,28,4,12,20,28]
-; AVX512BW-ONLY-SLOW-NEXT:    # ymm6 = mem[0,1,0,1]
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm0, %zmm8
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2d %zmm5, %zmm6, %zmm8
-; AVX512BW-ONLY-SLOW-NEXT:    vbroadcasti128 {{.*#+}} ymm9 = [5,13,21,29,5,13,21,29]
-; AVX512BW-ONLY-SLOW-NEXT:    # ymm9 = mem[0,1,0,1]
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm0, %zmm11
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2d %zmm5, %zmm9, %zmm11
-; AVX512BW-ONLY-SLOW-NEXT:    vbroadcasti128 {{.*#+}} ymm12 = [6,14,22,30,6,14,22,30]
-; AVX512BW-ONLY-SLOW-NEXT:    # ymm12 = mem[0,1,0,1]
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm0, %zmm14
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2d %zmm5, %zmm12, %zmm14
-; AVX512BW-ONLY-SLOW-NEXT:    vbroadcasti32x4 {{.*#+}} ymm19 = [7,15,23,31,7,15,23,31]
-; AVX512BW-ONLY-SLOW-NEXT:    # ymm19 = mem[0,1,2,3,0,1,2,3]
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2d %zmm5, %zmm19, %zmm0
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm0, %zmm5
-; AVX512BW-ONLY-SLOW-NEXT:    vpermi2d %zmm28, %zmm2, %zmm6
-; AVX512BW-ONLY-SLOW-NEXT:    vpermi2d %zmm28, %zmm2, %zmm9
-; AVX512BW-ONLY-SLOW-NEXT:    vpermi2d %zmm28, %zmm2, %zmm12
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2d %zmm28, %zmm19, %zmm2
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm2, %zmm0
-; AVX512BW-ONLY-SLOW-NEXT:    vpblendd $15, (%rsp), %ymm8, %ymm8 # 32-byte Folded Reload
-; AVX512BW-ONLY-SLOW-NEXT:    # ymm8 = mem[0,1,2,3],ymm8[4,5,6,7]
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm2, %zmm31 {%k1}
-; AVX512BW-ONLY-SLOW-NEXT:    vinserti64x4 $0, %ymm8, %zmm31, %zmm8
-; AVX512BW-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm6 = ymm10[0,1,2,3],ymm6[4,5,6,7]
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm16, %zmm21 {%k1}
-; AVX512BW-ONLY-SLOW-NEXT:    vinserti64x4 $0, %ymm6, %zmm21, %zmm6
-; AVX512BW-ONLY-SLOW-NEXT:    vpblendd $15, {{[-0-9]+}}(%r{{[sb]}}p), %ymm11, %ymm10 # 32-byte Folded Reload
-; AVX512BW-ONLY-SLOW-NEXT:    # ymm10 = mem[0,1,2,3],ymm11[4,5,6,7]
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm2, %zmm30 {%k1}
-; AVX512BW-ONLY-SLOW-NEXT:    vinserti64x4 $0, %ymm10, %zmm30, %zmm10
-; AVX512BW-ONLY-SLOW-NEXT:    vpblendd $15, {{[-0-9]+}}(%r{{[sb]}}p), %ymm9, %ymm9 # 32-byte Folded Reload
-; AVX512BW-ONLY-SLOW-NEXT:    # ymm9 = mem[0,1,2,3],ymm9[4,5,6,7]
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm20, %zmm7 {%k1}
-; AVX512BW-ONLY-SLOW-NEXT:    vinserti64x4 $0, %ymm9, %zmm7, %zmm7
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm2, %zmm22 {%k1}
-; AVX512BW-ONLY-SLOW-NEXT:    vpblendd $15, {{[-0-9]+}}(%r{{[sb]}}p), %ymm14, %ymm9 # 32-byte Folded Reload
-; AVX512BW-ONLY-SLOW-NEXT:    # ymm9 = mem[0,1,2,3],ymm14[4,5,6,7]
-; AVX512BW-ONLY-SLOW-NEXT:    vinserti64x4 $0, %ymm9, %zmm22, %zmm9
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm17, %zmm15 {%k1}
-; AVX512BW-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm2 = ymm13[0,1,2,3],ymm12[4,5,6,7]
-; AVX512BW-ONLY-SLOW-NEXT:    vinserti64x4 $0, %ymm2, %zmm15, %zmm2
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm11 # 64-byte Reload
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm11, %zmm26 {%k1}
-; AVX512BW-ONLY-SLOW-NEXT:    vpblendd $15, {{[-0-9]+}}(%r{{[sb]}}p), %ymm5, %ymm11 # 32-byte Folded Reload
-; AVX512BW-ONLY-SLOW-NEXT:    # ymm11 = mem[0,1,2,3],ymm5[4,5,6,7]
-; AVX512BW-ONLY-SLOW-NEXT:    vinserti64x4 $0, %ymm11, %zmm26, %zmm5
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm25, %zmm29 {%k1}
-; AVX512BW-ONLY-SLOW-NEXT:    vpblendd $15, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm11 # 32-byte Folded Reload
-; AVX512BW-ONLY-SLOW-NEXT:    # ymm11 = mem[0,1,2,3],ymm0[4,5,6,7]
-; AVX512BW-ONLY-SLOW-NEXT:    vinserti64x4 $0, %ymm11, %zmm29, %zmm11
-; AVX512BW-ONLY-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512BW-ONLY-SLOW-NEXT:    vmovaps %zmm0, 64(%rsi)
-; AVX512BW-ONLY-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512BW-ONLY-SLOW-NEXT:    vmovaps %zmm0, (%rsi)
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm18, 64(%rdx)
-; AVX512BW-ONLY-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512BW-ONLY-SLOW-NEXT:    vmovaps %zmm0, (%rdx)
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm1, 64(%rcx)
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm24, (%rcx)
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm3, 64(%r8)
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm4, (%r8)
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm6, 64(%r9)
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm8, (%r9)
-; AVX512BW-ONLY-SLOW-NEXT:    movq {{[0-9]+}}(%rsp), %rax
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm7, 64(%rax)
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm10, (%rax)
-; AVX512BW-ONLY-SLOW-NEXT:    movq {{[0-9]+}}(%rsp), %rax
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm2, 64(%rax)
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm9, (%rax)
-; AVX512BW-ONLY-SLOW-NEXT:    movq {{[0-9]+}}(%rsp), %rax
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm11, 64(%rax)
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm5, (%rax)
-; AVX512BW-ONLY-SLOW-NEXT:    addq $1096, %rsp # imm = 0x448
-; AVX512BW-ONLY-SLOW-NEXT:    vzeroupper
-; AVX512BW-ONLY-SLOW-NEXT:    retq
+; AVX512F-ONLY-LABEL: load_i32_stride8_vf32:
+; AVX512F-ONLY:       # %bb.0:
+; AVX512F-ONLY-NEXT:    subq $1096, %rsp # imm = 0x448
+; AVX512F-ONLY-NEXT:    vmovdqa64 704(%rdi), %zmm6
+; AVX512F-ONLY-NEXT:    vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-NEXT:    vmovdqa64 640(%rdi), %zmm8
+; AVX512F-ONLY-NEXT:    vmovdqu64 %zmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-NEXT:    vmovdqa64 576(%rdi), %zmm28
+; AVX512F-ONLY-NEXT:    vmovdqa64 512(%rdi), %zmm11
+; AVX512F-ONLY-NEXT:    vmovdqu64 %zmm11, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-NEXT:    vmovdqa64 832(%rdi), %zmm5
+; AVX512F-ONLY-NEXT:    vmovdqa64 768(%rdi), %zmm29
+; AVX512F-ONLY-NEXT:    vmovdqa64 960(%rdi), %zmm21
+; AVX512F-ONLY-NEXT:    vmovdqa64 896(%rdi), %zmm25
+; AVX512F-ONLY-NEXT:    vmovdqa64 (%rdi), %zmm7
+; AVX512F-ONLY-NEXT:    vmovdqa64 64(%rdi), %zmm17
+; AVX512F-ONLY-NEXT:    vmovdqa64 128(%rdi), %zmm4
+; AVX512F-ONLY-NEXT:    vmovdqa64 192(%rdi), %zmm9
+; AVX512F-ONLY-NEXT:    vmovdqa64 320(%rdi), %zmm20
+; AVX512F-ONLY-NEXT:    vmovdqa64 256(%rdi), %zmm26
+; AVX512F-ONLY-NEXT:    vmovdqa64 448(%rdi), %zmm22
+; AVX512F-ONLY-NEXT:    vmovdqa64 384(%rdi), %zmm2
+; AVX512F-ONLY-NEXT:    vbroadcasti32x4 {{.*#+}} zmm0 = [0,8,16,24,0,8,16,24,0,8,16,24,0,8,16,24]
+; AVX512F-ONLY-NEXT:    # zmm0 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm2, %zmm1
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm2, %zmm12
+; AVX512F-ONLY-NEXT:    vpermt2d %zmm22, %zmm0, %zmm1
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm26, %zmm2
+; AVX512F-ONLY-NEXT:    vpermt2d %zmm20, %zmm0, %zmm2
+; AVX512F-ONLY-NEXT:    movb $-64, %al
+; AVX512F-ONLY-NEXT:    kmovw %eax, %k1
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm1, %zmm2 {%k1}
+; AVX512F-ONLY-NEXT:    vbroadcasti128 {{.*#+}} ymm1 = [0,8,16,24,0,8,16,24]
+; AVX512F-ONLY-NEXT:    # ymm1 = mem[0,1,0,1]
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm4, %zmm3
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm4, %zmm10
+; AVX512F-ONLY-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-NEXT:    vpermt2d %zmm9, %zmm1, %zmm3
+; AVX512F-ONLY-NEXT:    vmovdqu64 %zmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm7, %zmm4
+; AVX512F-ONLY-NEXT:    vpermt2d %zmm17, %zmm0, %zmm4
+; AVX512F-ONLY-NEXT:    vpblendd {{.*#+}} ymm3 = ymm4[0,1,2,3],ymm3[4,5,6,7]
+; AVX512F-ONLY-NEXT:    vinserti64x4 $0, %ymm3, %zmm2, %zmm2
+; AVX512F-ONLY-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm25, %zmm2
+; AVX512F-ONLY-NEXT:    vpermt2d %zmm21, %zmm0, %zmm2
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm29, %zmm3
+; AVX512F-ONLY-NEXT:    vpermt2d %zmm5, %zmm0, %zmm3
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm2, %zmm3 {%k1}
+; AVX512F-ONLY-NEXT:    vpermi2d %zmm28, %zmm11, %zmm0
+; AVX512F-ONLY-NEXT:    vpermi2d %zmm6, %zmm8, %zmm1
+; AVX512F-ONLY-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
+; AVX512F-ONLY-NEXT:    vinserti64x4 $0, %ymm0, %zmm3, %zmm0
+; AVX512F-ONLY-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-NEXT:    vbroadcasti32x4 {{.*#+}} zmm4 = [1,9,17,25,1,9,17,25,1,9,17,25,1,9,17,25]
+; AVX512F-ONLY-NEXT:    # zmm4 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm12, %zmm0
+; AVX512F-ONLY-NEXT:    vpermt2d %zmm22, %zmm4, %zmm0
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm26, %zmm1
+; AVX512F-ONLY-NEXT:    vpermt2d %zmm20, %zmm4, %zmm1
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm0, %zmm1 {%k1}
+; AVX512F-ONLY-NEXT:    vbroadcasti128 {{.*#+}} ymm2 = [1,9,17,25,1,9,17,25]
+; AVX512F-ONLY-NEXT:    # ymm2 = mem[0,1,0,1]
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm10, %zmm0
+; AVX512F-ONLY-NEXT:    vpermt2d %zmm9, %zmm2, %zmm0
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm7, %zmm2
+; AVX512F-ONLY-NEXT:    vpermt2d %zmm17, %zmm4, %zmm2
+; AVX512F-ONLY-NEXT:    vpblendd {{.*#+}} ymm0 = ymm2[0,1,2,3],ymm0[4,5,6,7]
+; AVX512F-ONLY-NEXT:    vinserti64x4 $0, %ymm0, %zmm1, %zmm0
+; AVX512F-ONLY-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-NEXT:    vbroadcasti32x4 {{.*#+}} zmm0 = [2,10,18,26,2,10,18,26,2,10,18,26,2,10,18,26]
+; AVX512F-ONLY-NEXT:    # zmm0 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm12, %zmm1
+; AVX512F-ONLY-NEXT:    vpermt2d %zmm22, %zmm0, %zmm1
+; AVX512F-ONLY-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-NEXT:    vbroadcasti32x4 {{.*#+}} zmm2 = [3,11,19,27,3,11,19,27,3,11,19,27,3,11,19,27]
+; AVX512F-ONLY-NEXT:    # zmm2 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm12, %zmm1
+; AVX512F-ONLY-NEXT:    vpermt2d %zmm22, %zmm2, %zmm1
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm2, %zmm3
+; AVX512F-ONLY-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-NEXT:    vbroadcasti32x4 {{.*#+}} zmm10 = [4,12,20,28,4,12,20,28,4,12,20,28,4,12,20,28]
+; AVX512F-ONLY-NEXT:    # zmm10 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm12, %zmm2
+; AVX512F-ONLY-NEXT:    vpermt2d %zmm22, %zmm10, %zmm2
+; AVX512F-ONLY-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-NEXT:    vbroadcasti32x4 {{.*#+}} zmm15 = [5,13,21,29,5,13,21,29,5,13,21,29,5,13,21,29]
+; AVX512F-ONLY-NEXT:    # zmm15 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm12, %zmm1
+; AVX512F-ONLY-NEXT:    vpermt2d %zmm22, %zmm15, %zmm1
+; AVX512F-ONLY-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-NEXT:    vbroadcasti32x4 {{.*#+}} zmm13 = [6,14,22,30,6,14,22,30,6,14,22,30,6,14,22,30]
+; AVX512F-ONLY-NEXT:    # zmm13 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm12, %zmm1
+; AVX512F-ONLY-NEXT:    vpermt2d %zmm22, %zmm13, %zmm1
+; AVX512F-ONLY-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-NEXT:    vbroadcasti32x4 {{.*#+}} zmm1 = [7,15,23,31,7,15,23,31,7,15,23,31,7,15,23,31]
+; AVX512F-ONLY-NEXT:    # zmm1 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512F-ONLY-NEXT:    vpermt2d %zmm22, %zmm1, %zmm12
+; AVX512F-ONLY-NEXT:    vmovdqu64 %zmm12, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm26, %zmm24
+; AVX512F-ONLY-NEXT:    vpermt2d %zmm20, %zmm0, %zmm24
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm26, %zmm27
+; AVX512F-ONLY-NEXT:    vpermt2d %zmm20, %zmm3, %zmm27
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm26, %zmm31
+; AVX512F-ONLY-NEXT:    vpermt2d %zmm20, %zmm10, %zmm31
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm26, %zmm30
+; AVX512F-ONLY-NEXT:    vpermt2d %zmm20, %zmm15, %zmm30
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm26, %zmm22
+; AVX512F-ONLY-NEXT:    vpermt2d %zmm20, %zmm13, %zmm22
+; AVX512F-ONLY-NEXT:    vpermt2d %zmm20, %zmm1, %zmm26
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm7, %zmm8
+; AVX512F-ONLY-NEXT:    vpermt2d %zmm17, %zmm0, %zmm8
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm7, %zmm12
+; AVX512F-ONLY-NEXT:    vpermt2d %zmm17, %zmm3, %zmm12
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm7, %zmm6
+; AVX512F-ONLY-NEXT:    vpermt2d %zmm17, %zmm10, %zmm6
+; AVX512F-ONLY-NEXT:    vmovdqu64 %zmm6, (%rsp) # 64-byte Spill
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm7, %zmm6
+; AVX512F-ONLY-NEXT:    vpermt2d %zmm17, %zmm15, %zmm6
+; AVX512F-ONLY-NEXT:    vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm7, %zmm6
+; AVX512F-ONLY-NEXT:    vpermt2d %zmm17, %zmm13, %zmm6
+; AVX512F-ONLY-NEXT:    vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-NEXT:    vpermt2d %zmm17, %zmm1, %zmm7
+; AVX512F-ONLY-NEXT:    vmovdqu64 %zmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm25, %zmm18
+; AVX512F-ONLY-NEXT:    vpermt2d %zmm21, %zmm4, %zmm18
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm25, %zmm19
+; AVX512F-ONLY-NEXT:    vpermt2d %zmm21, %zmm0, %zmm19
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm25, %zmm23
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm3, %zmm6
+; AVX512F-ONLY-NEXT:    vpermt2d %zmm21, %zmm3, %zmm23
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm25, %zmm16
+; AVX512F-ONLY-NEXT:    vpermt2d %zmm21, %zmm10, %zmm16
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm25, %zmm20
+; AVX512F-ONLY-NEXT:    vpermt2d %zmm21, %zmm15, %zmm20
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm25, %zmm17
+; AVX512F-ONLY-NEXT:    vpermt2d %zmm21, %zmm13, %zmm17
+; AVX512F-ONLY-NEXT:    vpermt2d %zmm21, %zmm1, %zmm25
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm29, %zmm9
+; AVX512F-ONLY-NEXT:    vpermt2d %zmm5, %zmm4, %zmm9
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm29, %zmm11
+; AVX512F-ONLY-NEXT:    vpermt2d %zmm5, %zmm0, %zmm11
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm0, %zmm3
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm29, %zmm14
+; AVX512F-ONLY-NEXT:    vpermt2d %zmm5, %zmm6, %zmm14
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm29, %zmm21
+; AVX512F-ONLY-NEXT:    vpermt2d %zmm5, %zmm10, %zmm21
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm29, %zmm7
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm15, %zmm2
+; AVX512F-ONLY-NEXT:    vpermt2d %zmm5, %zmm15, %zmm7
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm29, %zmm15
+; AVX512F-ONLY-NEXT:    vpermt2d %zmm5, %zmm13, %zmm15
+; AVX512F-ONLY-NEXT:    vpermt2d %zmm5, %zmm1, %zmm29
+; AVX512F-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512F-ONLY-NEXT:    vpermi2d %zmm28, %zmm0, %zmm4
+; AVX512F-ONLY-NEXT:    vpermi2d %zmm28, %zmm0, %zmm3
+; AVX512F-ONLY-NEXT:    vpermi2d %zmm28, %zmm0, %zmm6
+; AVX512F-ONLY-NEXT:    vpermi2d %zmm28, %zmm0, %zmm10
+; AVX512F-ONLY-NEXT:    vpermi2d %zmm28, %zmm0, %zmm2
+; AVX512F-ONLY-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-NEXT:    vpermi2d %zmm28, %zmm0, %zmm13
+; AVX512F-ONLY-NEXT:    vpermt2d %zmm28, %zmm1, %zmm0
+; AVX512F-ONLY-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm18, %zmm9 {%k1}
+; AVX512F-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm28 # 64-byte Reload
+; AVX512F-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
+; AVX512F-ONLY-NEXT:    vbroadcasti128 {{.*#+}} ymm0 = [1,9,17,25,1,9,17,25]
+; AVX512F-ONLY-NEXT:    # ymm0 = mem[0,1,0,1]
+; AVX512F-ONLY-NEXT:    vpermi2d %zmm28, %zmm2, %zmm0
+; AVX512F-ONLY-NEXT:    vpblendd {{.*#+}} ymm0 = ymm4[0,1,2,3],ymm0[4,5,6,7]
+; AVX512F-ONLY-NEXT:    vinserti64x4 $0, %ymm0, %zmm9, %zmm18
+; AVX512F-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm0, %zmm24 {%k1}
+; AVX512F-ONLY-NEXT:    vbroadcasti128 {{.*#+}} ymm9 = [2,10,18,26,2,10,18,26]
+; AVX512F-ONLY-NEXT:    # ymm9 = mem[0,1,0,1]
+; AVX512F-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm4, %zmm0
+; AVX512F-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
+; AVX512F-ONLY-NEXT:    vpermt2d %zmm5, %zmm9, %zmm0
+; AVX512F-ONLY-NEXT:    vpblendd {{.*#+}} ymm0 = ymm8[0,1,2,3],ymm0[4,5,6,7]
+; AVX512F-ONLY-NEXT:    vinserti64x4 $0, %ymm0, %zmm24, %zmm24
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm19, %zmm11 {%k1}
+; AVX512F-ONLY-NEXT:    vpermi2d %zmm28, %zmm2, %zmm9
+; AVX512F-ONLY-NEXT:    vpblendd {{.*#+}} ymm1 = ymm3[0,1,2,3],ymm9[4,5,6,7]
+; AVX512F-ONLY-NEXT:    vinserti64x4 $0, %ymm1, %zmm11, %zmm1
+; AVX512F-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm0, %zmm27 {%k1}
+; AVX512F-ONLY-NEXT:    vbroadcasti128 {{.*#+}} ymm3 = [3,11,19,27,3,11,19,27]
+; AVX512F-ONLY-NEXT:    # ymm3 = mem[0,1,0,1]
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm4, %zmm0
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm4, %zmm9
+; AVX512F-ONLY-NEXT:    vpermt2d %zmm5, %zmm3, %zmm9
+; AVX512F-ONLY-NEXT:    vpblendd {{.*#+}} ymm4 = ymm12[0,1,2,3],ymm9[4,5,6,7]
+; AVX512F-ONLY-NEXT:    vinserti64x4 $0, %ymm4, %zmm27, %zmm4
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm23, %zmm14 {%k1}
+; AVX512F-ONLY-NEXT:    vpermi2d %zmm28, %zmm2, %zmm3
+; AVX512F-ONLY-NEXT:    vpblendd {{.*#+}} ymm3 = ymm6[0,1,2,3],ymm3[4,5,6,7]
+; AVX512F-ONLY-NEXT:    vinserti64x4 $0, %ymm3, %zmm14, %zmm3
+; AVX512F-ONLY-NEXT:    vbroadcasti128 {{.*#+}} ymm6 = [4,12,20,28,4,12,20,28]
+; AVX512F-ONLY-NEXT:    # ymm6 = mem[0,1,0,1]
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm0, %zmm8
+; AVX512F-ONLY-NEXT:    vpermt2d %zmm5, %zmm6, %zmm8
+; AVX512F-ONLY-NEXT:    vbroadcasti128 {{.*#+}} ymm9 = [5,13,21,29,5,13,21,29]
+; AVX512F-ONLY-NEXT:    # ymm9 = mem[0,1,0,1]
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm0, %zmm11
+; AVX512F-ONLY-NEXT:    vpermt2d %zmm5, %zmm9, %zmm11
+; AVX512F-ONLY-NEXT:    vbroadcasti128 {{.*#+}} ymm12 = [6,14,22,30,6,14,22,30]
+; AVX512F-ONLY-NEXT:    # ymm12 = mem[0,1,0,1]
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm0, %zmm14
+; AVX512F-ONLY-NEXT:    vpermt2d %zmm5, %zmm12, %zmm14
+; AVX512F-ONLY-NEXT:    vbroadcasti32x4 {{.*#+}} ymm19 = [7,15,23,31,7,15,23,31]
+; AVX512F-ONLY-NEXT:    # ymm19 = mem[0,1,2,3,0,1,2,3]
+; AVX512F-ONLY-NEXT:    vpermt2d %zmm5, %zmm19, %zmm0
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm0, %zmm5
+; AVX512F-ONLY-NEXT:    vpermi2d %zmm28, %zmm2, %zmm6
+; AVX512F-ONLY-NEXT:    vpermi2d %zmm28, %zmm2, %zmm9
+; AVX512F-ONLY-NEXT:    vpermi2d %zmm28, %zmm2, %zmm12
+; AVX512F-ONLY-NEXT:    vpermt2d %zmm28, %zmm19, %zmm2
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm2, %zmm0
+; AVX512F-ONLY-NEXT:    vpblendd $15, (%rsp), %ymm8, %ymm8 # 32-byte Folded Reload
+; AVX512F-ONLY-NEXT:    # ymm8 = mem[0,1,2,3],ymm8[4,5,6,7]
+; AVX512F-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm2, %zmm31 {%k1}
+; AVX512F-ONLY-NEXT:    vinserti64x4 $0, %ymm8, %zmm31, %zmm8
+; AVX512F-ONLY-NEXT:    vpblendd {{.*#+}} ymm6 = ymm10[0,1,2,3],ymm6[4,5,6,7]
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm16, %zmm21 {%k1}
+; AVX512F-ONLY-NEXT:    vinserti64x4 $0, %ymm6, %zmm21, %zmm6
+; AVX512F-ONLY-NEXT:    vpblendd $15, {{[-0-9]+}}(%r{{[sb]}}p), %ymm11, %ymm10 # 32-byte Folded Reload
+; AVX512F-ONLY-NEXT:    # ymm10 = mem[0,1,2,3],ymm11[4,5,6,7]
+; AVX512F-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm2, %zmm30 {%k1}
+; AVX512F-ONLY-NEXT:    vinserti64x4 $0, %ymm10, %zmm30, %zmm10
+; AVX512F-ONLY-NEXT:    vpblendd $15, {{[-0-9]+}}(%r{{[sb]}}p), %ymm9, %ymm9 # 32-byte Folded Reload
+; AVX512F-ONLY-NEXT:    # ymm9 = mem[0,1,2,3],ymm9[4,5,6,7]
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm20, %zmm7 {%k1}
+; AVX512F-ONLY-NEXT:    vinserti64x4 $0, %ymm9, %zmm7, %zmm7
+; AVX512F-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm2, %zmm22 {%k1}
+; AVX512F-ONLY-NEXT:    vpblendd $15, {{[-0-9]+}}(%r{{[sb]}}p), %ymm14, %ymm9 # 32-byte Folded Reload
+; AVX512F-ONLY-NEXT:    # ymm9 = mem[0,1,2,3],ymm14[4,5,6,7]
+; AVX512F-ONLY-NEXT:    vinserti64x4 $0, %ymm9, %zmm22, %zmm9
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm17, %zmm15 {%k1}
+; AVX512F-ONLY-NEXT:    vpblendd {{.*#+}} ymm2 = ymm13[0,1,2,3],ymm12[4,5,6,7]
+; AVX512F-ONLY-NEXT:    vinserti64x4 $0, %ymm2, %zmm15, %zmm2
+; AVX512F-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm11 # 64-byte Reload
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm11, %zmm26 {%k1}
+; AVX512F-ONLY-NEXT:    vpblendd $15, {{[-0-9]+}}(%r{{[sb]}}p), %ymm5, %ymm11 # 32-byte Folded Reload
+; AVX512F-ONLY-NEXT:    # ymm11 = mem[0,1,2,3],ymm5[4,5,6,7]
+; AVX512F-ONLY-NEXT:    vinserti64x4 $0, %ymm11, %zmm26, %zmm5
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm25, %zmm29 {%k1}
+; AVX512F-ONLY-NEXT:    vpblendd $15, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm11 # 32-byte Folded Reload
+; AVX512F-ONLY-NEXT:    # ymm11 = mem[0,1,2,3],ymm0[4,5,6,7]
+; AVX512F-ONLY-NEXT:    vinserti64x4 $0, %ymm11, %zmm29, %zmm11
+; AVX512F-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512F-ONLY-NEXT:    vmovaps %zmm0, 64(%rsi)
+; AVX512F-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512F-ONLY-NEXT:    vmovaps %zmm0, (%rsi)
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm18, 64(%rdx)
+; AVX512F-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512F-ONLY-NEXT:    vmovaps %zmm0, (%rdx)
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm1, 64(%rcx)
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm24, (%rcx)
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm3, 64(%r8)
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm4, (%r8)
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm6, 64(%r9)
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm8, (%r9)
+; AVX512F-ONLY-NEXT:    movq {{[0-9]+}}(%rsp), %rax
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm7, 64(%rax)
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm10, (%rax)
+; AVX512F-ONLY-NEXT:    movq {{[0-9]+}}(%rsp), %rax
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm2, 64(%rax)
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm9, (%rax)
+; AVX512F-ONLY-NEXT:    movq {{[0-9]+}}(%rsp), %rax
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm11, 64(%rax)
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm5, (%rax)
+; AVX512F-ONLY-NEXT:    addq $1096, %rsp # imm = 0x448
+; AVX512F-ONLY-NEXT:    vzeroupper
+; AVX512F-ONLY-NEXT:    retq
 ;
-; AVX512BW-ONLY-FAST-LABEL: load_i32_stride8_vf32:
-; AVX512BW-ONLY-FAST:       # %bb.0:
-; AVX512BW-ONLY-FAST-NEXT:    subq $1096, %rsp # imm = 0x448
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 704(%rdi), %zmm6
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 640(%rdi), %zmm8
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 576(%rdi), %zmm28
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 512(%rdi), %zmm11
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm11, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 832(%rdi), %zmm5
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 768(%rdi), %zmm29
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 960(%rdi), %zmm21
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 896(%rdi), %zmm25
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 (%rdi), %zmm7
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 64(%rdi), %zmm17
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 128(%rdi), %zmm4
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 192(%rdi), %zmm9
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 320(%rdi), %zmm20
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 256(%rdi), %zmm26
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 448(%rdi), %zmm22
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 384(%rdi), %zmm2
-; AVX512BW-ONLY-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} zmm0 = [0,8,16,24,0,8,16,24,0,8,16,24,0,8,16,24]
-; AVX512BW-ONLY-FAST-NEXT:    # zmm0 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm2, %zmm1
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm2, %zmm12
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2d %zmm22, %zmm0, %zmm1
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm26, %zmm2
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2d %zmm20, %zmm0, %zmm2
-; AVX512BW-ONLY-FAST-NEXT:    movb $-64, %al
-; AVX512BW-ONLY-FAST-NEXT:    kmovd %eax, %k1
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm1, %zmm2 {%k1}
-; AVX512BW-ONLY-FAST-NEXT:    vbroadcasti128 {{.*#+}} ymm1 = [0,8,16,24,0,8,16,24]
-; AVX512BW-ONLY-FAST-NEXT:    # ymm1 = mem[0,1,0,1]
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm4, %zmm3
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm4, %zmm10
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2d %zmm9, %zmm1, %zmm3
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm7, %zmm4
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2d %zmm17, %zmm0, %zmm4
-; AVX512BW-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm3 = ymm4[0,1,2,3],ymm3[4,5,6,7]
-; AVX512BW-ONLY-FAST-NEXT:    vinserti64x4 $0, %ymm3, %zmm2, %zmm2
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm25, %zmm2
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2d %zmm21, %zmm0, %zmm2
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm29, %zmm3
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2d %zmm5, %zmm0, %zmm3
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm2, %zmm3 {%k1}
-; AVX512BW-ONLY-FAST-NEXT:    vpermi2d %zmm28, %zmm11, %zmm0
-; AVX512BW-ONLY-FAST-NEXT:    vpermi2d %zmm6, %zmm8, %zmm1
-; AVX512BW-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
-; AVX512BW-ONLY-FAST-NEXT:    vinserti64x4 $0, %ymm0, %zmm3, %zmm0
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} zmm4 = [1,9,17,25,1,9,17,25,1,9,17,25,1,9,17,25]
-; AVX512BW-ONLY-FAST-NEXT:    # zmm4 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm12, %zmm0
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2d %zmm22, %zmm4, %zmm0
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm26, %zmm1
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2d %zmm20, %zmm4, %zmm1
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm0, %zmm1 {%k1}
-; AVX512BW-ONLY-FAST-NEXT:    vbroadcasti128 {{.*#+}} ymm2 = [1,9,17,25,1,9,17,25]
-; AVX512BW-ONLY-FAST-NEXT:    # ymm2 = mem[0,1,0,1]
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm10, %zmm0
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2d %zmm9, %zmm2, %zmm0
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm7, %zmm2
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2d %zmm17, %zmm4, %zmm2
-; AVX512BW-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm2[0,1,2,3],ymm0[4,5,6,7]
-; AVX512BW-ONLY-FAST-NEXT:    vinserti64x4 $0, %ymm0, %zmm1, %zmm0
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} zmm0 = [2,10,18,26,2,10,18,26,2,10,18,26,2,10,18,26]
-; AVX512BW-ONLY-FAST-NEXT:    # zmm0 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm12, %zmm1
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2d %zmm22, %zmm0, %zmm1
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} zmm2 = [3,11,19,27,3,11,19,27,3,11,19,27,3,11,19,27]
-; AVX512BW-ONLY-FAST-NEXT:    # zmm2 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm12, %zmm1
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2d %zmm22, %zmm2, %zmm1
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm2, %zmm3
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} zmm10 = [4,12,20,28,4,12,20,28,4,12,20,28,4,12,20,28]
-; AVX512BW-ONLY-FAST-NEXT:    # zmm10 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm12, %zmm2
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2d %zmm22, %zmm10, %zmm2
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} zmm15 = [5,13,21,29,5,13,21,29,5,13,21,29,5,13,21,29]
-; AVX512BW-ONLY-FAST-NEXT:    # zmm15 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm12, %zmm1
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2d %zmm22, %zmm15, %zmm1
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} zmm13 = [6,14,22,30,6,14,22,30,6,14,22,30,6,14,22,30]
-; AVX512BW-ONLY-FAST-NEXT:    # zmm13 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm12, %zmm1
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2d %zmm22, %zmm13, %zmm1
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} zmm1 = [7,15,23,31,7,15,23,31,7,15,23,31,7,15,23,31]
-; AVX512BW-ONLY-FAST-NEXT:    # zmm1 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2d %zmm22, %zmm1, %zmm12
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm12, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm26, %zmm24
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2d %zmm20, %zmm0, %zmm24
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm26, %zmm27
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2d %zmm20, %zmm3, %zmm27
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm26, %zmm31
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2d %zmm20, %zmm10, %zmm31
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm26, %zmm30
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2d %zmm20, %zmm15, %zmm30
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm26, %zmm22
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2d %zmm20, %zmm13, %zmm22
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2d %zmm20, %zmm1, %zmm26
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm7, %zmm8
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2d %zmm17, %zmm0, %zmm8
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm7, %zmm12
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2d %zmm17, %zmm3, %zmm12
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm7, %zmm6
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2d %zmm17, %zmm10, %zmm6
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm6, (%rsp) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm7, %zmm6
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2d %zmm17, %zmm15, %zmm6
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm7, %zmm6
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2d %zmm17, %zmm13, %zmm6
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2d %zmm17, %zmm1, %zmm7
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm25, %zmm18
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2d %zmm21, %zmm4, %zmm18
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm25, %zmm19
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2d %zmm21, %zmm0, %zmm19
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm25, %zmm23
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm3, %zmm6
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2d %zmm21, %zmm3, %zmm23
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm25, %zmm16
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2d %zmm21, %zmm10, %zmm16
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm25, %zmm20
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2d %zmm21, %zmm15, %zmm20
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm25, %zmm17
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2d %zmm21, %zmm13, %zmm17
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2d %zmm21, %zmm1, %zmm25
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm29, %zmm9
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2d %zmm5, %zmm4, %zmm9
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm29, %zmm11
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2d %zmm5, %zmm0, %zmm11
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm0, %zmm3
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm29, %zmm14
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2d %zmm5, %zmm6, %zmm14
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm29, %zmm21
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2d %zmm5, %zmm10, %zmm21
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm29, %zmm7
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm15, %zmm2
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2d %zmm5, %zmm15, %zmm7
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm29, %zmm15
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2d %zmm5, %zmm13, %zmm15
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2d %zmm5, %zmm1, %zmm29
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512BW-ONLY-FAST-NEXT:    vpermi2d %zmm28, %zmm0, %zmm4
-; AVX512BW-ONLY-FAST-NEXT:    vpermi2d %zmm28, %zmm0, %zmm3
-; AVX512BW-ONLY-FAST-NEXT:    vpermi2d %zmm28, %zmm0, %zmm6
-; AVX512BW-ONLY-FAST-NEXT:    vpermi2d %zmm28, %zmm0, %zmm10
-; AVX512BW-ONLY-FAST-NEXT:    vpermi2d %zmm28, %zmm0, %zmm2
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vpermi2d %zmm28, %zmm0, %zmm13
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2d %zmm28, %zmm1, %zmm0
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm18, %zmm9 {%k1}
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm28 # 64-byte Reload
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512BW-ONLY-FAST-NEXT:    vbroadcasti128 {{.*#+}} ymm0 = [1,9,17,25,1,9,17,25]
-; AVX512BW-ONLY-FAST-NEXT:    # ymm0 = mem[0,1,0,1]
-; AVX512BW-ONLY-FAST-NEXT:    vpermi2d %zmm28, %zmm2, %zmm0
-; AVX512BW-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm4[0,1,2,3],ymm0[4,5,6,7]
-; AVX512BW-ONLY-FAST-NEXT:    vinserti64x4 $0, %ymm0, %zmm9, %zmm18
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm0, %zmm24 {%k1}
-; AVX512BW-ONLY-FAST-NEXT:    vbroadcasti128 {{.*#+}} ymm9 = [2,10,18,26,2,10,18,26]
-; AVX512BW-ONLY-FAST-NEXT:    # ymm9 = mem[0,1,0,1]
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm4, %zmm0
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2d %zmm5, %zmm9, %zmm0
-; AVX512BW-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm8[0,1,2,3],ymm0[4,5,6,7]
-; AVX512BW-ONLY-FAST-NEXT:    vinserti64x4 $0, %ymm0, %zmm24, %zmm24
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm19, %zmm11 {%k1}
-; AVX512BW-ONLY-FAST-NEXT:    vpermi2d %zmm28, %zmm2, %zmm9
-; AVX512BW-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm1 = ymm3[0,1,2,3],ymm9[4,5,6,7]
-; AVX512BW-ONLY-FAST-NEXT:    vinserti64x4 $0, %ymm1, %zmm11, %zmm1
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm0, %zmm27 {%k1}
-; AVX512BW-ONLY-FAST-NEXT:    vbroadcasti128 {{.*#+}} ymm3 = [3,11,19,27,3,11,19,27]
-; AVX512BW-ONLY-FAST-NEXT:    # ymm3 = mem[0,1,0,1]
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm4, %zmm0
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm4, %zmm9
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2d %zmm5, %zmm3, %zmm9
-; AVX512BW-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm4 = ymm12[0,1,2,3],ymm9[4,5,6,7]
-; AVX512BW-ONLY-FAST-NEXT:    vinserti64x4 $0, %ymm4, %zmm27, %zmm4
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm23, %zmm14 {%k1}
-; AVX512BW-ONLY-FAST-NEXT:    vpermi2d %zmm28, %zmm2, %zmm3
-; AVX512BW-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm3 = ymm6[0,1,2,3],ymm3[4,5,6,7]
-; AVX512BW-ONLY-FAST-NEXT:    vinserti64x4 $0, %ymm3, %zmm14, %zmm3
-; AVX512BW-ONLY-FAST-NEXT:    vbroadcasti128 {{.*#+}} ymm6 = [4,12,20,28,4,12,20,28]
-; AVX512BW-ONLY-FAST-NEXT:    # ymm6 = mem[0,1,0,1]
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm0, %zmm8
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2d %zmm5, %zmm6, %zmm8
-; AVX512BW-ONLY-FAST-NEXT:    vbroadcasti128 {{.*#+}} ymm9 = [5,13,21,29,5,13,21,29]
-; AVX512BW-ONLY-FAST-NEXT:    # ymm9 = mem[0,1,0,1]
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm0, %zmm11
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2d %zmm5, %zmm9, %zmm11
-; AVX512BW-ONLY-FAST-NEXT:    vbroadcasti128 {{.*#+}} ymm12 = [6,14,22,30,6,14,22,30]
-; AVX512BW-ONLY-FAST-NEXT:    # ymm12 = mem[0,1,0,1]
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm0, %zmm14
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2d %zmm5, %zmm12, %zmm14
-; AVX512BW-ONLY-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} ymm19 = [7,15,23,31,7,15,23,31]
-; AVX512BW-ONLY-FAST-NEXT:    # ymm19 = mem[0,1,2,3,0,1,2,3]
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2d %zmm5, %zmm19, %zmm0
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm0, %zmm5
-; AVX512BW-ONLY-FAST-NEXT:    vpermi2d %zmm28, %zmm2, %zmm6
-; AVX512BW-ONLY-FAST-NEXT:    vpermi2d %zmm28, %zmm2, %zmm9
-; AVX512BW-ONLY-FAST-NEXT:    vpermi2d %zmm28, %zmm2, %zmm12
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2d %zmm28, %zmm19, %zmm2
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm2, %zmm0
-; AVX512BW-ONLY-FAST-NEXT:    vpblendd $15, (%rsp), %ymm8, %ymm8 # 32-byte Folded Reload
-; AVX512BW-ONLY-FAST-NEXT:    # ymm8 = mem[0,1,2,3],ymm8[4,5,6,7]
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm2, %zmm31 {%k1}
-; AVX512BW-ONLY-FAST-NEXT:    vinserti64x4 $0, %ymm8, %zmm31, %zmm8
-; AVX512BW-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm6 = ymm10[0,1,2,3],ymm6[4,5,6,7]
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm16, %zmm21 {%k1}
-; AVX512BW-ONLY-FAST-NEXT:    vinserti64x4 $0, %ymm6, %zmm21, %zmm6
-; AVX512BW-ONLY-FAST-NEXT:    vpblendd $15, {{[-0-9]+}}(%r{{[sb]}}p), %ymm11, %ymm10 # 32-byte Folded Reload
-; AVX512BW-ONLY-FAST-NEXT:    # ymm10 = mem[0,1,2,3],ymm11[4,5,6,7]
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm2, %zmm30 {%k1}
-; AVX512BW-ONLY-FAST-NEXT:    vinserti64x4 $0, %ymm10, %zmm30, %zmm10
-; AVX512BW-ONLY-FAST-NEXT:    vpblendd $15, {{[-0-9]+}}(%r{{[sb]}}p), %ymm9, %ymm9 # 32-byte Folded Reload
-; AVX512BW-ONLY-FAST-NEXT:    # ymm9 = mem[0,1,2,3],ymm9[4,5,6,7]
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm20, %zmm7 {%k1}
-; AVX512BW-ONLY-FAST-NEXT:    vinserti64x4 $0, %ymm9, %zmm7, %zmm7
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm2, %zmm22 {%k1}
-; AVX512BW-ONLY-FAST-NEXT:    vpblendd $15, {{[-0-9]+}}(%r{{[sb]}}p), %ymm14, %ymm9 # 32-byte Folded Reload
-; AVX512BW-ONLY-FAST-NEXT:    # ymm9 = mem[0,1,2,3],ymm14[4,5,6,7]
-; AVX512BW-ONLY-FAST-NEXT:    vinserti64x4 $0, %ymm9, %zmm22, %zmm9
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm17, %zmm15 {%k1}
-; AVX512BW-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm2 = ymm13[0,1,2,3],ymm12[4,5,6,7]
-; AVX512BW-ONLY-FAST-NEXT:    vinserti64x4 $0, %ymm2, %zmm15, %zmm2
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm11 # 64-byte Reload
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm11, %zmm26 {%k1}
-; AVX512BW-ONLY-FAST-NEXT:    vpblendd $15, {{[-0-9]+}}(%r{{[sb]}}p), %ymm5, %ymm11 # 32-byte Folded Reload
-; AVX512BW-ONLY-FAST-NEXT:    # ymm11 = mem[0,1,2,3],ymm5[4,5,6,7]
-; AVX512BW-ONLY-FAST-NEXT:    vinserti64x4 $0, %ymm11, %zmm26, %zmm5
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm25, %zmm29 {%k1}
-; AVX512BW-ONLY-FAST-NEXT:    vpblendd $15, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm11 # 32-byte Folded Reload
-; AVX512BW-ONLY-FAST-NEXT:    # ymm11 = mem[0,1,2,3],ymm0[4,5,6,7]
-; AVX512BW-ONLY-FAST-NEXT:    vinserti64x4 $0, %ymm11, %zmm29, %zmm11
-; AVX512BW-ONLY-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512BW-ONLY-FAST-NEXT:    vmovaps %zmm0, 64(%rsi)
-; AVX512BW-ONLY-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512BW-ONLY-FAST-NEXT:    vmovaps %zmm0, (%rsi)
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm18, 64(%rdx)
-; AVX512BW-ONLY-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512BW-ONLY-FAST-NEXT:    vmovaps %zmm0, (%rdx)
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm1, 64(%rcx)
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm24, (%rcx)
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm3, 64(%r8)
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm4, (%r8)
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm6, 64(%r9)
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm8, (%r9)
-; AVX512BW-ONLY-FAST-NEXT:    movq {{[0-9]+}}(%rsp), %rax
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm7, 64(%rax)
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm10, (%rax)
-; AVX512BW-ONLY-FAST-NEXT:    movq {{[0-9]+}}(%rsp), %rax
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm2, 64(%rax)
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm9, (%rax)
-; AVX512BW-ONLY-FAST-NEXT:    movq {{[0-9]+}}(%rsp), %rax
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm11, 64(%rax)
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm5, (%rax)
-; AVX512BW-ONLY-FAST-NEXT:    addq $1096, %rsp # imm = 0x448
-; AVX512BW-ONLY-FAST-NEXT:    vzeroupper
-; AVX512BW-ONLY-FAST-NEXT:    retq
+; AVX512DQ-ONLY-LABEL: load_i32_stride8_vf32:
+; AVX512DQ-ONLY:       # %bb.0:
+; AVX512DQ-ONLY-NEXT:    subq $1096, %rsp # imm = 0x448
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 704(%rdi), %zmm6
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 640(%rdi), %zmm8
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 %zmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 576(%rdi), %zmm28
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 512(%rdi), %zmm11
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 %zmm11, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 832(%rdi), %zmm5
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 768(%rdi), %zmm29
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 960(%rdi), %zmm21
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 896(%rdi), %zmm25
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 (%rdi), %zmm7
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 64(%rdi), %zmm17
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 128(%rdi), %zmm4
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 192(%rdi), %zmm9
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 320(%rdi), %zmm20
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 256(%rdi), %zmm26
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 448(%rdi), %zmm22
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 384(%rdi), %zmm2
+; AVX512DQ-ONLY-NEXT:    vbroadcasti32x4 {{.*#+}} zmm0 = [0,8,16,24,0,8,16,24,0,8,16,24,0,8,16,24]
+; AVX512DQ-ONLY-NEXT:    # zmm0 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm2, %zmm1
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm2, %zmm12
+; AVX512DQ-ONLY-NEXT:    vpermt2d %zmm22, %zmm0, %zmm1
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm26, %zmm2
+; AVX512DQ-ONLY-NEXT:    vpermt2d %zmm20, %zmm0, %zmm2
+; AVX512DQ-ONLY-NEXT:    movb $-64, %al
+; AVX512DQ-ONLY-NEXT:    kmovw %eax, %k1
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm1, %zmm2 {%k1}
+; AVX512DQ-ONLY-NEXT:    vbroadcasti128 {{.*#+}} ymm1 = [0,8,16,24,0,8,16,24]
+; AVX512DQ-ONLY-NEXT:    # ymm1 = mem[0,1,0,1]
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm4, %zmm3
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm4, %zmm10
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-ONLY-NEXT:    vpermt2d %zmm9, %zmm1, %zmm3
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 %zmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm7, %zmm4
+; AVX512DQ-ONLY-NEXT:    vpermt2d %zmm17, %zmm0, %zmm4
+; AVX512DQ-ONLY-NEXT:    vpblendd {{.*#+}} ymm3 = ymm4[0,1,2,3],ymm3[4,5,6,7]
+; AVX512DQ-ONLY-NEXT:    vinserti64x4 $0, %ymm3, %zmm2, %zmm2
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm25, %zmm2
+; AVX512DQ-ONLY-NEXT:    vpermt2d %zmm21, %zmm0, %zmm2
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm29, %zmm3
+; AVX512DQ-ONLY-NEXT:    vpermt2d %zmm5, %zmm0, %zmm3
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm2, %zmm3 {%k1}
+; AVX512DQ-ONLY-NEXT:    vpermi2d %zmm28, %zmm11, %zmm0
+; AVX512DQ-ONLY-NEXT:    vpermi2d %zmm6, %zmm8, %zmm1
+; AVX512DQ-ONLY-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
+; AVX512DQ-ONLY-NEXT:    vinserti64x4 $0, %ymm0, %zmm3, %zmm0
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-ONLY-NEXT:    vbroadcasti32x4 {{.*#+}} zmm4 = [1,9,17,25,1,9,17,25,1,9,17,25,1,9,17,25]
+; AVX512DQ-ONLY-NEXT:    # zmm4 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm12, %zmm0
+; AVX512DQ-ONLY-NEXT:    vpermt2d %zmm22, %zmm4, %zmm0
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm26, %zmm1
+; AVX512DQ-ONLY-NEXT:    vpermt2d %zmm20, %zmm4, %zmm1
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm0, %zmm1 {%k1}
+; AVX512DQ-ONLY-NEXT:    vbroadcasti128 {{.*#+}} ymm2 = [1,9,17,25,1,9,17,25]
+; AVX512DQ-ONLY-NEXT:    # ymm2 = mem[0,1,0,1]
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm10, %zmm0
+; AVX512DQ-ONLY-NEXT:    vpermt2d %zmm9, %zmm2, %zmm0
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm7, %zmm2
+; AVX512DQ-ONLY-NEXT:    vpermt2d %zmm17, %zmm4, %zmm2
+; AVX512DQ-ONLY-NEXT:    vpblendd {{.*#+}} ymm0 = ymm2[0,1,2,3],ymm0[4,5,6,7]
+; AVX512DQ-ONLY-NEXT:    vinserti64x4 $0, %ymm0, %zmm1, %zmm0
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-ONLY-NEXT:    vbroadcasti32x4 {{.*#+}} zmm0 = [2,10,18,26,2,10,18,26,2,10,18,26,2,10,18,26]
+; AVX512DQ-ONLY-NEXT:    # zmm0 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm12, %zmm1
+; AVX512DQ-ONLY-NEXT:    vpermt2d %zmm22, %zmm0, %zmm1
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-ONLY-NEXT:    vbroadcasti32x4 {{.*#+}} zmm2 = [3,11,19,27,3,11,19,27,3,11,19,27,3,11,19,27]
+; AVX512DQ-ONLY-NEXT:    # zmm2 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm12, %zmm1
+; AVX512DQ-ONLY-NEXT:    vpermt2d %zmm22, %zmm2, %zmm1
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm2, %zmm3
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-ONLY-NEXT:    vbroadcasti32x4 {{.*#+}} zmm10 = [4,12,20,28,4,12,20,28,4,12,20,28,4,12,20,28]
+; AVX512DQ-ONLY-NEXT:    # zmm10 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm12, %zmm2
+; AVX512DQ-ONLY-NEXT:    vpermt2d %zmm22, %zmm10, %zmm2
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-ONLY-NEXT:    vbroadcasti32x4 {{.*#+}} zmm15 = [5,13,21,29,5,13,21,29,5,13,21,29,5,13,21,29]
+; AVX512DQ-ONLY-NEXT:    # zmm15 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm12, %zmm1
+; AVX512DQ-ONLY-NEXT:    vpermt2d %zmm22, %zmm15, %zmm1
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-ONLY-NEXT:    vbroadcasti32x4 {{.*#+}} zmm13 = [6,14,22,30,6,14,22,30,6,14,22,30,6,14,22,30]
+; AVX512DQ-ONLY-NEXT:    # zmm13 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm12, %zmm1
+; AVX512DQ-ONLY-NEXT:    vpermt2d %zmm22, %zmm13, %zmm1
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-ONLY-NEXT:    vbroadcasti32x4 {{.*#+}} zmm1 = [7,15,23,31,7,15,23,31,7,15,23,31,7,15,23,31]
+; AVX512DQ-ONLY-NEXT:    # zmm1 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512DQ-ONLY-NEXT:    vpermt2d %zmm22, %zmm1, %zmm12
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 %zmm12, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm26, %zmm24
+; AVX512DQ-ONLY-NEXT:    vpermt2d %zmm20, %zmm0, %zmm24
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm26, %zmm27
+; AVX512DQ-ONLY-NEXT:    vpermt2d %zmm20, %zmm3, %zmm27
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm26, %zmm31
+; AVX512DQ-ONLY-NEXT:    vpermt2d %zmm20, %zmm10, %zmm31
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm26, %zmm30
+; AVX512DQ-ONLY-NEXT:    vpermt2d %zmm20, %zmm15, %zmm30
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm26, %zmm22
+; AVX512DQ-ONLY-NEXT:    vpermt2d %zmm20, %zmm13, %zmm22
+; AVX512DQ-ONLY-NEXT:    vpermt2d %zmm20, %zmm1, %zmm26
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm7, %zmm8
+; AVX512DQ-ONLY-NEXT:    vpermt2d %zmm17, %zmm0, %zmm8
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm7, %zmm12
+; AVX512DQ-ONLY-NEXT:    vpermt2d %zmm17, %zmm3, %zmm12
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm7, %zmm6
+; AVX512DQ-ONLY-NEXT:    vpermt2d %zmm17, %zmm10, %zmm6
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 %zmm6, (%rsp) # 64-byte Spill
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm7, %zmm6
+; AVX512DQ-ONLY-NEXT:    vpermt2d %zmm17, %zmm15, %zmm6
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm7, %zmm6
+; AVX512DQ-ONLY-NEXT:    vpermt2d %zmm17, %zmm13, %zmm6
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-ONLY-NEXT:    vpermt2d %zmm17, %zmm1, %zmm7
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 %zmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm25, %zmm18
+; AVX512DQ-ONLY-NEXT:    vpermt2d %zmm21, %zmm4, %zmm18
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm25, %zmm19
+; AVX512DQ-ONLY-NEXT:    vpermt2d %zmm21, %zmm0, %zmm19
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm25, %zmm23
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm3, %zmm6
+; AVX512DQ-ONLY-NEXT:    vpermt2d %zmm21, %zmm3, %zmm23
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm25, %zmm16
+; AVX512DQ-ONLY-NEXT:    vpermt2d %zmm21, %zmm10, %zmm16
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm25, %zmm20
+; AVX512DQ-ONLY-NEXT:    vpermt2d %zmm21, %zmm15, %zmm20
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm25, %zmm17
+; AVX512DQ-ONLY-NEXT:    vpermt2d %zmm21, %zmm13, %zmm17
+; AVX512DQ-ONLY-NEXT:    vpermt2d %zmm21, %zmm1, %zmm25
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm29, %zmm9
+; AVX512DQ-ONLY-NEXT:    vpermt2d %zmm5, %zmm4, %zmm9
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm29, %zmm11
+; AVX512DQ-ONLY-NEXT:    vpermt2d %zmm5, %zmm0, %zmm11
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm0, %zmm3
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm29, %zmm14
+; AVX512DQ-ONLY-NEXT:    vpermt2d %zmm5, %zmm6, %zmm14
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm29, %zmm21
+; AVX512DQ-ONLY-NEXT:    vpermt2d %zmm5, %zmm10, %zmm21
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm29, %zmm7
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm15, %zmm2
+; AVX512DQ-ONLY-NEXT:    vpermt2d %zmm5, %zmm15, %zmm7
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm29, %zmm15
+; AVX512DQ-ONLY-NEXT:    vpermt2d %zmm5, %zmm13, %zmm15
+; AVX512DQ-ONLY-NEXT:    vpermt2d %zmm5, %zmm1, %zmm29
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512DQ-ONLY-NEXT:    vpermi2d %zmm28, %zmm0, %zmm4
+; AVX512DQ-ONLY-NEXT:    vpermi2d %zmm28, %zmm0, %zmm3
+; AVX512DQ-ONLY-NEXT:    vpermi2d %zmm28, %zmm0, %zmm6
+; AVX512DQ-ONLY-NEXT:    vpermi2d %zmm28, %zmm0, %zmm10
+; AVX512DQ-ONLY-NEXT:    vpermi2d %zmm28, %zmm0, %zmm2
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-ONLY-NEXT:    vpermi2d %zmm28, %zmm0, %zmm13
+; AVX512DQ-ONLY-NEXT:    vpermt2d %zmm28, %zmm1, %zmm0
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm18, %zmm9 {%k1}
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm28 # 64-byte Reload
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
+; AVX512DQ-ONLY-NEXT:    vbroadcasti128 {{.*#+}} ymm0 = [1,9,17,25,1,9,17,25]
+; AVX512DQ-ONLY-NEXT:    # ymm0 = mem[0,1,0,1]
+; AVX512DQ-ONLY-NEXT:    vpermi2d %zmm28, %zmm2, %zmm0
+; AVX512DQ-ONLY-NEXT:    vpblendd {{.*#+}} ymm0 = ymm4[0,1,2,3],ymm0[4,5,6,7]
+; AVX512DQ-ONLY-NEXT:    vinserti64x4 $0, %ymm0, %zmm9, %zmm18
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm0, %zmm24 {%k1}
+; AVX512DQ-ONLY-NEXT:    vbroadcasti128 {{.*#+}} ymm9 = [2,10,18,26,2,10,18,26]
+; AVX512DQ-ONLY-NEXT:    # ymm9 = mem[0,1,0,1]
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm4, %zmm0
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
+; AVX512DQ-ONLY-NEXT:    vpermt2d %zmm5, %zmm9, %zmm0
+; AVX512DQ-ONLY-NEXT:    vpblendd {{.*#+}} ymm0 = ymm8[0,1,2,3],ymm0[4,5,6,7]
+; AVX512DQ-ONLY-NEXT:    vinserti64x4 $0, %ymm0, %zmm24, %zmm24
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm19, %zmm11 {%k1}
+; AVX512DQ-ONLY-NEXT:    vpermi2d %zmm28, %zmm2, %zmm9
+; AVX512DQ-ONLY-NEXT:    vpblendd {{.*#+}} ymm1 = ymm3[0,1,2,3],ymm9[4,5,6,7]
+; AVX512DQ-ONLY-NEXT:    vinserti64x4 $0, %ymm1, %zmm11, %zmm1
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm0, %zmm27 {%k1}
+; AVX512DQ-ONLY-NEXT:    vbroadcasti128 {{.*#+}} ymm3 = [3,11,19,27,3,11,19,27]
+; AVX512DQ-ONLY-NEXT:    # ymm3 = mem[0,1,0,1]
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm4, %zmm0
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm4, %zmm9
+; AVX512DQ-ONLY-NEXT:    vpermt2d %zmm5, %zmm3, %zmm9
+; AVX512DQ-ONLY-NEXT:    vpblendd {{.*#+}} ymm4 = ymm12[0,1,2,3],ymm9[4,5,6,7]
+; AVX512DQ-ONLY-NEXT:    vinserti64x4 $0, %ymm4, %zmm27, %zmm4
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm23, %zmm14 {%k1}
+; AVX512DQ-ONLY-NEXT:    vpermi2d %zmm28, %zmm2, %zmm3
+; AVX512DQ-ONLY-NEXT:    vpblendd {{.*#+}} ymm3 = ymm6[0,1,2,3],ymm3[4,5,6,7]
+; AVX512DQ-ONLY-NEXT:    vinserti64x4 $0, %ymm3, %zmm14, %zmm3
+; AVX512DQ-ONLY-NEXT:    vbroadcasti128 {{.*#+}} ymm6 = [4,12,20,28,4,12,20,28]
+; AVX512DQ-ONLY-NEXT:    # ymm6 = mem[0,1,0,1]
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm0, %zmm8
+; AVX512DQ-ONLY-NEXT:    vpermt2d %zmm5, %zmm6, %zmm8
+; AVX512DQ-ONLY-NEXT:    vbroadcasti128 {{.*#+}} ymm9 = [5,13,21,29,5,13,21,29]
+; AVX512DQ-ONLY-NEXT:    # ymm9 = mem[0,1,0,1]
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm0, %zmm11
+; AVX512DQ-ONLY-NEXT:    vpermt2d %zmm5, %zmm9, %zmm11
+; AVX512DQ-ONLY-NEXT:    vbroadcasti128 {{.*#+}} ymm12 = [6,14,22,30,6,14,22,30]
+; AVX512DQ-ONLY-NEXT:    # ymm12 = mem[0,1,0,1]
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm0, %zmm14
+; AVX512DQ-ONLY-NEXT:    vpermt2d %zmm5, %zmm12, %zmm14
+; AVX512DQ-ONLY-NEXT:    vbroadcasti64x2 {{.*#+}} ymm19 = [7,15,23,31,7,15,23,31]
+; AVX512DQ-ONLY-NEXT:    # ymm19 = mem[0,1,0,1]
+; AVX512DQ-ONLY-NEXT:    vpermt2d %zmm5, %zmm19, %zmm0
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm0, %zmm5
+; AVX512DQ-ONLY-NEXT:    vpermi2d %zmm28, %zmm2, %zmm6
+; AVX512DQ-ONLY-NEXT:    vpermi2d %zmm28, %zmm2, %zmm9
+; AVX512DQ-ONLY-NEXT:    vpermi2d %zmm28, %zmm2, %zmm12
+; AVX512DQ-ONLY-NEXT:    vpermt2d %zmm28, %zmm19, %zmm2
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm2, %zmm0
+; AVX512DQ-ONLY-NEXT:    vpblendd $15, (%rsp), %ymm8, %ymm8 # 32-byte Folded Reload
+; AVX512DQ-ONLY-NEXT:    # ymm8 = mem[0,1,2,3],ymm8[4,5,6,7]
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm2, %zmm31 {%k1}
+; AVX512DQ-ONLY-NEXT:    vinserti64x4 $0, %ymm8, %zmm31, %zmm8
+; AVX512DQ-ONLY-NEXT:    vpblendd {{.*#+}} ymm6 = ymm10[0,1,2,3],ymm6[4,5,6,7]
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm16, %zmm21 {%k1}
+; AVX512DQ-ONLY-NEXT:    vinserti64x4 $0, %ymm6, %zmm21, %zmm6
+; AVX512DQ-ONLY-NEXT:    vpblendd $15, {{[-0-9]+}}(%r{{[sb]}}p), %ymm11, %ymm10 # 32-byte Folded Reload
+; AVX512DQ-ONLY-NEXT:    # ymm10 = mem[0,1,2,3],ymm11[4,5,6,7]
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm2, %zmm30 {%k1}
+; AVX512DQ-ONLY-NEXT:    vinserti64x4 $0, %ymm10, %zmm30, %zmm10
+; AVX512DQ-ONLY-NEXT:    vpblendd $15, {{[-0-9]+}}(%r{{[sb]}}p), %ymm9, %ymm9 # 32-byte Folded Reload
+; AVX512DQ-ONLY-NEXT:    # ymm9 = mem[0,1,2,3],ymm9[4,5,6,7]
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm20, %zmm7 {%k1}
+; AVX512DQ-ONLY-NEXT:    vinserti64x4 $0, %ymm9, %zmm7, %zmm7
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm2, %zmm22 {%k1}
+; AVX512DQ-ONLY-NEXT:    vpblendd $15, {{[-0-9]+}}(%r{{[sb]}}p), %ymm14, %ymm9 # 32-byte Folded Reload
+; AVX512DQ-ONLY-NEXT:    # ymm9 = mem[0,1,2,3],ymm14[4,5,6,7]
+; AVX512DQ-ONLY-NEXT:    vinserti64x4 $0, %ymm9, %zmm22, %zmm9
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm17, %zmm15 {%k1}
+; AVX512DQ-ONLY-NEXT:    vpblendd {{.*#+}} ymm2 = ymm13[0,1,2,3],ymm12[4,5,6,7]
+; AVX512DQ-ONLY-NEXT:    vinserti64x4 $0, %ymm2, %zmm15, %zmm2
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm11 # 64-byte Reload
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm11, %zmm26 {%k1}
+; AVX512DQ-ONLY-NEXT:    vpblendd $15, {{[-0-9]+}}(%r{{[sb]}}p), %ymm5, %ymm11 # 32-byte Folded Reload
+; AVX512DQ-ONLY-NEXT:    # ymm11 = mem[0,1,2,3],ymm5[4,5,6,7]
+; AVX512DQ-ONLY-NEXT:    vinserti64x4 $0, %ymm11, %zmm26, %zmm5
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm25, %zmm29 {%k1}
+; AVX512DQ-ONLY-NEXT:    vpblendd $15, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm11 # 32-byte Folded Reload
+; AVX512DQ-ONLY-NEXT:    # ymm11 = mem[0,1,2,3],ymm0[4,5,6,7]
+; AVX512DQ-ONLY-NEXT:    vinserti64x4 $0, %ymm11, %zmm29, %zmm11
+; AVX512DQ-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512DQ-ONLY-NEXT:    vmovaps %zmm0, 64(%rsi)
+; AVX512DQ-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512DQ-ONLY-NEXT:    vmovaps %zmm0, (%rsi)
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm18, 64(%rdx)
+; AVX512DQ-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512DQ-ONLY-NEXT:    vmovaps %zmm0, (%rdx)
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm1, 64(%rcx)
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm24, (%rcx)
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm3, 64(%r8)
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm4, (%r8)
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm6, 64(%r9)
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm8, (%r9)
+; AVX512DQ-ONLY-NEXT:    movq {{[0-9]+}}(%rsp), %rax
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm7, 64(%rax)
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm10, (%rax)
+; AVX512DQ-ONLY-NEXT:    movq {{[0-9]+}}(%rsp), %rax
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm2, 64(%rax)
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm9, (%rax)
+; AVX512DQ-ONLY-NEXT:    movq {{[0-9]+}}(%rsp), %rax
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm11, 64(%rax)
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm5, (%rax)
+; AVX512DQ-ONLY-NEXT:    addq $1096, %rsp # imm = 0x448
+; AVX512DQ-ONLY-NEXT:    vzeroupper
+; AVX512DQ-ONLY-NEXT:    retq
 ;
-; AVX512DQBW-SLOW-LABEL: load_i32_stride8_vf32:
-; AVX512DQBW-SLOW:       # %bb.0:
-; AVX512DQBW-SLOW-NEXT:    subq $1096, %rsp # imm = 0x448
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 704(%rdi), %zmm6
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 640(%rdi), %zmm8
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 576(%rdi), %zmm28
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 512(%rdi), %zmm11
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm11, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 832(%rdi), %zmm5
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 768(%rdi), %zmm29
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 960(%rdi), %zmm21
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 896(%rdi), %zmm25
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 (%rdi), %zmm7
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 64(%rdi), %zmm17
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 128(%rdi), %zmm4
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 192(%rdi), %zmm9
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 320(%rdi), %zmm20
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 256(%rdi), %zmm26
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 448(%rdi), %zmm22
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 384(%rdi), %zmm2
-; AVX512DQBW-SLOW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm0 = [0,8,16,24,0,8,16,24,0,8,16,24,0,8,16,24]
-; AVX512DQBW-SLOW-NEXT:    # zmm0 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm2, %zmm1
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm2, %zmm12
-; AVX512DQBW-SLOW-NEXT:    vpermt2d %zmm22, %zmm0, %zmm1
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm26, %zmm2
-; AVX512DQBW-SLOW-NEXT:    vpermt2d %zmm20, %zmm0, %zmm2
-; AVX512DQBW-SLOW-NEXT:    movb $-64, %al
-; AVX512DQBW-SLOW-NEXT:    kmovd %eax, %k1
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm1, %zmm2 {%k1}
-; AVX512DQBW-SLOW-NEXT:    vbroadcasti128 {{.*#+}} ymm1 = [0,8,16,24,0,8,16,24]
-; AVX512DQBW-SLOW-NEXT:    # ymm1 = mem[0,1,0,1]
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm4, %zmm3
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm4, %zmm10
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vpermt2d %zmm9, %zmm1, %zmm3
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm7, %zmm4
-; AVX512DQBW-SLOW-NEXT:    vpermt2d %zmm17, %zmm0, %zmm4
-; AVX512DQBW-SLOW-NEXT:    vpblendd {{.*#+}} ymm3 = ymm4[0,1,2,3],ymm3[4,5,6,7]
-; AVX512DQBW-SLOW-NEXT:    vinserti64x4 $0, %ymm3, %zmm2, %zmm2
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm25, %zmm2
-; AVX512DQBW-SLOW-NEXT:    vpermt2d %zmm21, %zmm0, %zmm2
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm29, %zmm3
-; AVX512DQBW-SLOW-NEXT:    vpermt2d %zmm5, %zmm0, %zmm3
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm2, %zmm3 {%k1}
-; AVX512DQBW-SLOW-NEXT:    vpermi2d %zmm28, %zmm11, %zmm0
-; AVX512DQBW-SLOW-NEXT:    vpermi2d %zmm6, %zmm8, %zmm1
-; AVX512DQBW-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
-; AVX512DQBW-SLOW-NEXT:    vinserti64x4 $0, %ymm0, %zmm3, %zmm0
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm4 = [1,9,17,25,1,9,17,25,1,9,17,25,1,9,17,25]
-; AVX512DQBW-SLOW-NEXT:    # zmm4 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm12, %zmm0
-; AVX512DQBW-SLOW-NEXT:    vpermt2d %zmm22, %zmm4, %zmm0
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm26, %zmm1
-; AVX512DQBW-SLOW-NEXT:    vpermt2d %zmm20, %zmm4, %zmm1
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm0, %zmm1 {%k1}
-; AVX512DQBW-SLOW-NEXT:    vbroadcasti128 {{.*#+}} ymm2 = [1,9,17,25,1,9,17,25]
-; AVX512DQBW-SLOW-NEXT:    # ymm2 = mem[0,1,0,1]
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm10, %zmm0
-; AVX512DQBW-SLOW-NEXT:    vpermt2d %zmm9, %zmm2, %zmm0
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm7, %zmm2
-; AVX512DQBW-SLOW-NEXT:    vpermt2d %zmm17, %zmm4, %zmm2
-; AVX512DQBW-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm2[0,1,2,3],ymm0[4,5,6,7]
-; AVX512DQBW-SLOW-NEXT:    vinserti64x4 $0, %ymm0, %zmm1, %zmm0
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm0 = [2,10,18,26,2,10,18,26,2,10,18,26,2,10,18,26]
-; AVX512DQBW-SLOW-NEXT:    # zmm0 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm12, %zmm1
-; AVX512DQBW-SLOW-NEXT:    vpermt2d %zmm22, %zmm0, %zmm1
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm2 = [3,11,19,27,3,11,19,27,3,11,19,27,3,11,19,27]
-; AVX512DQBW-SLOW-NEXT:    # zmm2 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm12, %zmm1
-; AVX512DQBW-SLOW-NEXT:    vpermt2d %zmm22, %zmm2, %zmm1
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm2, %zmm3
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm10 = [4,12,20,28,4,12,20,28,4,12,20,28,4,12,20,28]
-; AVX512DQBW-SLOW-NEXT:    # zmm10 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm12, %zmm2
-; AVX512DQBW-SLOW-NEXT:    vpermt2d %zmm22, %zmm10, %zmm2
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm15 = [5,13,21,29,5,13,21,29,5,13,21,29,5,13,21,29]
-; AVX512DQBW-SLOW-NEXT:    # zmm15 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm12, %zmm1
-; AVX512DQBW-SLOW-NEXT:    vpermt2d %zmm22, %zmm15, %zmm1
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm13 = [6,14,22,30,6,14,22,30,6,14,22,30,6,14,22,30]
-; AVX512DQBW-SLOW-NEXT:    # zmm13 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm12, %zmm1
-; AVX512DQBW-SLOW-NEXT:    vpermt2d %zmm22, %zmm13, %zmm1
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm1 = [7,15,23,31,7,15,23,31,7,15,23,31,7,15,23,31]
-; AVX512DQBW-SLOW-NEXT:    # zmm1 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512DQBW-SLOW-NEXT:    vpermt2d %zmm22, %zmm1, %zmm12
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm12, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm26, %zmm24
-; AVX512DQBW-SLOW-NEXT:    vpermt2d %zmm20, %zmm0, %zmm24
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm26, %zmm27
-; AVX512DQBW-SLOW-NEXT:    vpermt2d %zmm20, %zmm3, %zmm27
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm26, %zmm31
-; AVX512DQBW-SLOW-NEXT:    vpermt2d %zmm20, %zmm10, %zmm31
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm26, %zmm30
-; AVX512DQBW-SLOW-NEXT:    vpermt2d %zmm20, %zmm15, %zmm30
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm26, %zmm22
-; AVX512DQBW-SLOW-NEXT:    vpermt2d %zmm20, %zmm13, %zmm22
-; AVX512DQBW-SLOW-NEXT:    vpermt2d %zmm20, %zmm1, %zmm26
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm7, %zmm8
-; AVX512DQBW-SLOW-NEXT:    vpermt2d %zmm17, %zmm0, %zmm8
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm7, %zmm12
-; AVX512DQBW-SLOW-NEXT:    vpermt2d %zmm17, %zmm3, %zmm12
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm7, %zmm6
-; AVX512DQBW-SLOW-NEXT:    vpermt2d %zmm17, %zmm10, %zmm6
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm6, (%rsp) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm7, %zmm6
-; AVX512DQBW-SLOW-NEXT:    vpermt2d %zmm17, %zmm15, %zmm6
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm7, %zmm6
-; AVX512DQBW-SLOW-NEXT:    vpermt2d %zmm17, %zmm13, %zmm6
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vpermt2d %zmm17, %zmm1, %zmm7
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm25, %zmm18
-; AVX512DQBW-SLOW-NEXT:    vpermt2d %zmm21, %zmm4, %zmm18
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm25, %zmm19
-; AVX512DQBW-SLOW-NEXT:    vpermt2d %zmm21, %zmm0, %zmm19
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm25, %zmm23
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm3, %zmm6
-; AVX512DQBW-SLOW-NEXT:    vpermt2d %zmm21, %zmm3, %zmm23
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm25, %zmm16
-; AVX512DQBW-SLOW-NEXT:    vpermt2d %zmm21, %zmm10, %zmm16
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm25, %zmm20
-; AVX512DQBW-SLOW-NEXT:    vpermt2d %zmm21, %zmm15, %zmm20
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm25, %zmm17
-; AVX512DQBW-SLOW-NEXT:    vpermt2d %zmm21, %zmm13, %zmm17
-; AVX512DQBW-SLOW-NEXT:    vpermt2d %zmm21, %zmm1, %zmm25
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm29, %zmm9
-; AVX512DQBW-SLOW-NEXT:    vpermt2d %zmm5, %zmm4, %zmm9
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm29, %zmm11
-; AVX512DQBW-SLOW-NEXT:    vpermt2d %zmm5, %zmm0, %zmm11
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm0, %zmm3
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm29, %zmm14
-; AVX512DQBW-SLOW-NEXT:    vpermt2d %zmm5, %zmm6, %zmm14
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm29, %zmm21
-; AVX512DQBW-SLOW-NEXT:    vpermt2d %zmm5, %zmm10, %zmm21
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm29, %zmm7
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm15, %zmm2
-; AVX512DQBW-SLOW-NEXT:    vpermt2d %zmm5, %zmm15, %zmm7
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm29, %zmm15
-; AVX512DQBW-SLOW-NEXT:    vpermt2d %zmm5, %zmm13, %zmm15
-; AVX512DQBW-SLOW-NEXT:    vpermt2d %zmm5, %zmm1, %zmm29
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQBW-SLOW-NEXT:    vpermi2d %zmm28, %zmm0, %zmm4
-; AVX512DQBW-SLOW-NEXT:    vpermi2d %zmm28, %zmm0, %zmm3
-; AVX512DQBW-SLOW-NEXT:    vpermi2d %zmm28, %zmm0, %zmm6
-; AVX512DQBW-SLOW-NEXT:    vpermi2d %zmm28, %zmm0, %zmm10
-; AVX512DQBW-SLOW-NEXT:    vpermi2d %zmm28, %zmm0, %zmm2
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vpermi2d %zmm28, %zmm0, %zmm13
-; AVX512DQBW-SLOW-NEXT:    vpermt2d %zmm28, %zmm1, %zmm0
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm18, %zmm9 {%k1}
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm28 # 64-byte Reload
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512DQBW-SLOW-NEXT:    vbroadcasti128 {{.*#+}} ymm0 = [1,9,17,25,1,9,17,25]
-; AVX512DQBW-SLOW-NEXT:    # ymm0 = mem[0,1,0,1]
-; AVX512DQBW-SLOW-NEXT:    vpermi2d %zmm28, %zmm2, %zmm0
-; AVX512DQBW-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm4[0,1,2,3],ymm0[4,5,6,7]
-; AVX512DQBW-SLOW-NEXT:    vinserti64x4 $0, %ymm0, %zmm9, %zmm18
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm0, %zmm24 {%k1}
-; AVX512DQBW-SLOW-NEXT:    vbroadcasti128 {{.*#+}} ymm9 = [2,10,18,26,2,10,18,26]
-; AVX512DQBW-SLOW-NEXT:    # ymm9 = mem[0,1,0,1]
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm4, %zmm0
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
-; AVX512DQBW-SLOW-NEXT:    vpermt2d %zmm5, %zmm9, %zmm0
-; AVX512DQBW-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm8[0,1,2,3],ymm0[4,5,6,7]
-; AVX512DQBW-SLOW-NEXT:    vinserti64x4 $0, %ymm0, %zmm24, %zmm24
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm19, %zmm11 {%k1}
-; AVX512DQBW-SLOW-NEXT:    vpermi2d %zmm28, %zmm2, %zmm9
-; AVX512DQBW-SLOW-NEXT:    vpblendd {{.*#+}} ymm1 = ymm3[0,1,2,3],ymm9[4,5,6,7]
-; AVX512DQBW-SLOW-NEXT:    vinserti64x4 $0, %ymm1, %zmm11, %zmm1
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm0, %zmm27 {%k1}
-; AVX512DQBW-SLOW-NEXT:    vbroadcasti128 {{.*#+}} ymm3 = [3,11,19,27,3,11,19,27]
-; AVX512DQBW-SLOW-NEXT:    # ymm3 = mem[0,1,0,1]
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm4, %zmm0
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm4, %zmm9
-; AVX512DQBW-SLOW-NEXT:    vpermt2d %zmm5, %zmm3, %zmm9
-; AVX512DQBW-SLOW-NEXT:    vpblendd {{.*#+}} ymm4 = ymm12[0,1,2,3],ymm9[4,5,6,7]
-; AVX512DQBW-SLOW-NEXT:    vinserti64x4 $0, %ymm4, %zmm27, %zmm4
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm23, %zmm14 {%k1}
-; AVX512DQBW-SLOW-NEXT:    vpermi2d %zmm28, %zmm2, %zmm3
-; AVX512DQBW-SLOW-NEXT:    vpblendd {{.*#+}} ymm3 = ymm6[0,1,2,3],ymm3[4,5,6,7]
-; AVX512DQBW-SLOW-NEXT:    vinserti64x4 $0, %ymm3, %zmm14, %zmm3
-; AVX512DQBW-SLOW-NEXT:    vbroadcasti128 {{.*#+}} ymm6 = [4,12,20,28,4,12,20,28]
-; AVX512DQBW-SLOW-NEXT:    # ymm6 = mem[0,1,0,1]
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm0, %zmm8
-; AVX512DQBW-SLOW-NEXT:    vpermt2d %zmm5, %zmm6, %zmm8
-; AVX512DQBW-SLOW-NEXT:    vbroadcasti128 {{.*#+}} ymm9 = [5,13,21,29,5,13,21,29]
-; AVX512DQBW-SLOW-NEXT:    # ymm9 = mem[0,1,0,1]
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm0, %zmm11
-; AVX512DQBW-SLOW-NEXT:    vpermt2d %zmm5, %zmm9, %zmm11
-; AVX512DQBW-SLOW-NEXT:    vbroadcasti128 {{.*#+}} ymm12 = [6,14,22,30,6,14,22,30]
-; AVX512DQBW-SLOW-NEXT:    # ymm12 = mem[0,1,0,1]
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm0, %zmm14
-; AVX512DQBW-SLOW-NEXT:    vpermt2d %zmm5, %zmm12, %zmm14
-; AVX512DQBW-SLOW-NEXT:    vbroadcasti64x2 {{.*#+}} ymm19 = [7,15,23,31,7,15,23,31]
-; AVX512DQBW-SLOW-NEXT:    # ymm19 = mem[0,1,0,1]
-; AVX512DQBW-SLOW-NEXT:    vpermt2d %zmm5, %zmm19, %zmm0
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm0, %zmm5
-; AVX512DQBW-SLOW-NEXT:    vpermi2d %zmm28, %zmm2, %zmm6
-; AVX512DQBW-SLOW-NEXT:    vpermi2d %zmm28, %zmm2, %zmm9
-; AVX512DQBW-SLOW-NEXT:    vpermi2d %zmm28, %zmm2, %zmm12
-; AVX512DQBW-SLOW-NEXT:    vpermt2d %zmm28, %zmm19, %zmm2
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm2, %zmm0
-; AVX512DQBW-SLOW-NEXT:    vpblendd $15, (%rsp), %ymm8, %ymm8 # 32-byte Folded Reload
-; AVX512DQBW-SLOW-NEXT:    # ymm8 = mem[0,1,2,3],ymm8[4,5,6,7]
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm2, %zmm31 {%k1}
-; AVX512DQBW-SLOW-NEXT:    vinserti64x4 $0, %ymm8, %zmm31, %zmm8
-; AVX512DQBW-SLOW-NEXT:    vpblendd {{.*#+}} ymm6 = ymm10[0,1,2,3],ymm6[4,5,6,7]
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm16, %zmm21 {%k1}
-; AVX512DQBW-SLOW-NEXT:    vinserti64x4 $0, %ymm6, %zmm21, %zmm6
-; AVX512DQBW-SLOW-NEXT:    vpblendd $15, {{[-0-9]+}}(%r{{[sb]}}p), %ymm11, %ymm10 # 32-byte Folded Reload
-; AVX512DQBW-SLOW-NEXT:    # ymm10 = mem[0,1,2,3],ymm11[4,5,6,7]
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm2, %zmm30 {%k1}
-; AVX512DQBW-SLOW-NEXT:    vinserti64x4 $0, %ymm10, %zmm30, %zmm10
-; AVX512DQBW-SLOW-NEXT:    vpblendd $15, {{[-0-9]+}}(%r{{[sb]}}p), %ymm9, %ymm9 # 32-byte Folded Reload
-; AVX512DQBW-SLOW-NEXT:    # ymm9 = mem[0,1,2,3],ymm9[4,5,6,7]
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm20, %zmm7 {%k1}
-; AVX512DQBW-SLOW-NEXT:    vinserti64x4 $0, %ymm9, %zmm7, %zmm7
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm2, %zmm22 {%k1}
-; AVX512DQBW-SLOW-NEXT:    vpblendd $15, {{[-0-9]+}}(%r{{[sb]}}p), %ymm14, %ymm9 # 32-byte Folded Reload
-; AVX512DQBW-SLOW-NEXT:    # ymm9 = mem[0,1,2,3],ymm14[4,5,6,7]
-; AVX512DQBW-SLOW-NEXT:    vinserti64x4 $0, %ymm9, %zmm22, %zmm9
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm17, %zmm15 {%k1}
-; AVX512DQBW-SLOW-NEXT:    vpblendd {{.*#+}} ymm2 = ymm13[0,1,2,3],ymm12[4,5,6,7]
-; AVX512DQBW-SLOW-NEXT:    vinserti64x4 $0, %ymm2, %zmm15, %zmm2
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm11 # 64-byte Reload
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm11, %zmm26 {%k1}
-; AVX512DQBW-SLOW-NEXT:    vpblendd $15, {{[-0-9]+}}(%r{{[sb]}}p), %ymm5, %ymm11 # 32-byte Folded Reload
-; AVX512DQBW-SLOW-NEXT:    # ymm11 = mem[0,1,2,3],ymm5[4,5,6,7]
-; AVX512DQBW-SLOW-NEXT:    vinserti64x4 $0, %ymm11, %zmm26, %zmm5
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm25, %zmm29 {%k1}
-; AVX512DQBW-SLOW-NEXT:    vpblendd $15, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm11 # 32-byte Folded Reload
-; AVX512DQBW-SLOW-NEXT:    # ymm11 = mem[0,1,2,3],ymm0[4,5,6,7]
-; AVX512DQBW-SLOW-NEXT:    vinserti64x4 $0, %ymm11, %zmm29, %zmm11
-; AVX512DQBW-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQBW-SLOW-NEXT:    vmovaps %zmm0, 64(%rsi)
-; AVX512DQBW-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQBW-SLOW-NEXT:    vmovaps %zmm0, (%rsi)
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm18, 64(%rdx)
-; AVX512DQBW-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQBW-SLOW-NEXT:    vmovaps %zmm0, (%rdx)
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm1, 64(%rcx)
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm24, (%rcx)
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm3, 64(%r8)
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm4, (%r8)
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm6, 64(%r9)
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm8, (%r9)
-; AVX512DQBW-SLOW-NEXT:    movq {{[0-9]+}}(%rsp), %rax
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm7, 64(%rax)
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm10, (%rax)
-; AVX512DQBW-SLOW-NEXT:    movq {{[0-9]+}}(%rsp), %rax
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm2, 64(%rax)
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm9, (%rax)
-; AVX512DQBW-SLOW-NEXT:    movq {{[0-9]+}}(%rsp), %rax
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm11, 64(%rax)
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm5, (%rax)
-; AVX512DQBW-SLOW-NEXT:    addq $1096, %rsp # imm = 0x448
-; AVX512DQBW-SLOW-NEXT:    vzeroupper
-; AVX512DQBW-SLOW-NEXT:    retq
+; AVX512BW-ONLY-LABEL: load_i32_stride8_vf32:
+; AVX512BW-ONLY:       # %bb.0:
+; AVX512BW-ONLY-NEXT:    subq $1096, %rsp # imm = 0x448
+; AVX512BW-ONLY-NEXT:    vmovdqa64 704(%rdi), %zmm6
+; AVX512BW-ONLY-NEXT:    vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-NEXT:    vmovdqa64 640(%rdi), %zmm8
+; AVX512BW-ONLY-NEXT:    vmovdqu64 %zmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-NEXT:    vmovdqa64 576(%rdi), %zmm28
+; AVX512BW-ONLY-NEXT:    vmovdqa64 512(%rdi), %zmm11
+; AVX512BW-ONLY-NEXT:    vmovdqu64 %zmm11, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-NEXT:    vmovdqa64 832(%rdi), %zmm5
+; AVX512BW-ONLY-NEXT:    vmovdqa64 768(%rdi), %zmm29
+; AVX512BW-ONLY-NEXT:    vmovdqa64 960(%rdi), %zmm21
+; AVX512BW-ONLY-NEXT:    vmovdqa64 896(%rdi), %zmm25
+; AVX512BW-ONLY-NEXT:    vmovdqa64 (%rdi), %zmm7
+; AVX512BW-ONLY-NEXT:    vmovdqa64 64(%rdi), %zmm17
+; AVX512BW-ONLY-NEXT:    vmovdqa64 128(%rdi), %zmm4
+; AVX512BW-ONLY-NEXT:    vmovdqa64 192(%rdi), %zmm9
+; AVX512BW-ONLY-NEXT:    vmovdqa64 320(%rdi), %zmm20
+; AVX512BW-ONLY-NEXT:    vmovdqa64 256(%rdi), %zmm26
+; AVX512BW-ONLY-NEXT:    vmovdqa64 448(%rdi), %zmm22
+; AVX512BW-ONLY-NEXT:    vmovdqa64 384(%rdi), %zmm2
+; AVX512BW-ONLY-NEXT:    vbroadcasti32x4 {{.*#+}} zmm0 = [0,8,16,24,0,8,16,24,0,8,16,24,0,8,16,24]
+; AVX512BW-ONLY-NEXT:    # zmm0 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm2, %zmm1
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm2, %zmm12
+; AVX512BW-ONLY-NEXT:    vpermt2d %zmm22, %zmm0, %zmm1
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm26, %zmm2
+; AVX512BW-ONLY-NEXT:    vpermt2d %zmm20, %zmm0, %zmm2
+; AVX512BW-ONLY-NEXT:    movb $-64, %al
+; AVX512BW-ONLY-NEXT:    kmovd %eax, %k1
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm1, %zmm2 {%k1}
+; AVX512BW-ONLY-NEXT:    vbroadcasti128 {{.*#+}} ymm1 = [0,8,16,24,0,8,16,24]
+; AVX512BW-ONLY-NEXT:    # ymm1 = mem[0,1,0,1]
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm4, %zmm3
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm4, %zmm10
+; AVX512BW-ONLY-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-NEXT:    vpermt2d %zmm9, %zmm1, %zmm3
+; AVX512BW-ONLY-NEXT:    vmovdqu64 %zmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm7, %zmm4
+; AVX512BW-ONLY-NEXT:    vpermt2d %zmm17, %zmm0, %zmm4
+; AVX512BW-ONLY-NEXT:    vpblendd {{.*#+}} ymm3 = ymm4[0,1,2,3],ymm3[4,5,6,7]
+; AVX512BW-ONLY-NEXT:    vinserti64x4 $0, %ymm3, %zmm2, %zmm2
+; AVX512BW-ONLY-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm25, %zmm2
+; AVX512BW-ONLY-NEXT:    vpermt2d %zmm21, %zmm0, %zmm2
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm29, %zmm3
+; AVX512BW-ONLY-NEXT:    vpermt2d %zmm5, %zmm0, %zmm3
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm2, %zmm3 {%k1}
+; AVX512BW-ONLY-NEXT:    vpermi2d %zmm28, %zmm11, %zmm0
+; AVX512BW-ONLY-NEXT:    vpermi2d %zmm6, %zmm8, %zmm1
+; AVX512BW-ONLY-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
+; AVX512BW-ONLY-NEXT:    vinserti64x4 $0, %ymm0, %zmm3, %zmm0
+; AVX512BW-ONLY-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-NEXT:    vbroadcasti32x4 {{.*#+}} zmm4 = [1,9,17,25,1,9,17,25,1,9,17,25,1,9,17,25]
+; AVX512BW-ONLY-NEXT:    # zmm4 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm12, %zmm0
+; AVX512BW-ONLY-NEXT:    vpermt2d %zmm22, %zmm4, %zmm0
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm26, %zmm1
+; AVX512BW-ONLY-NEXT:    vpermt2d %zmm20, %zmm4, %zmm1
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm0, %zmm1 {%k1}
+; AVX512BW-ONLY-NEXT:    vbroadcasti128 {{.*#+}} ymm2 = [1,9,17,25,1,9,17,25]
+; AVX512BW-ONLY-NEXT:    # ymm2 = mem[0,1,0,1]
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm10, %zmm0
+; AVX512BW-ONLY-NEXT:    vpermt2d %zmm9, %zmm2, %zmm0
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm7, %zmm2
+; AVX512BW-ONLY-NEXT:    vpermt2d %zmm17, %zmm4, %zmm2
+; AVX512BW-ONLY-NEXT:    vpblendd {{.*#+}} ymm0 = ymm2[0,1,2,3],ymm0[4,5,6,7]
+; AVX512BW-ONLY-NEXT:    vinserti64x4 $0, %ymm0, %zmm1, %zmm0
+; AVX512BW-ONLY-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-NEXT:    vbroadcasti32x4 {{.*#+}} zmm0 = [2,10,18,26,2,10,18,26,2,10,18,26,2,10,18,26]
+; AVX512BW-ONLY-NEXT:    # zmm0 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm12, %zmm1
+; AVX512BW-ONLY-NEXT:    vpermt2d %zmm22, %zmm0, %zmm1
+; AVX512BW-ONLY-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-NEXT:    vbroadcasti32x4 {{.*#+}} zmm2 = [3,11,19,27,3,11,19,27,3,11,19,27,3,11,19,27]
+; AVX512BW-ONLY-NEXT:    # zmm2 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm12, %zmm1
+; AVX512BW-ONLY-NEXT:    vpermt2d %zmm22, %zmm2, %zmm1
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm2, %zmm3
+; AVX512BW-ONLY-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-NEXT:    vbroadcasti32x4 {{.*#+}} zmm10 = [4,12,20,28,4,12,20,28,4,12,20,28,4,12,20,28]
+; AVX512BW-ONLY-NEXT:    # zmm10 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm12, %zmm2
+; AVX512BW-ONLY-NEXT:    vpermt2d %zmm22, %zmm10, %zmm2
+; AVX512BW-ONLY-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-NEXT:    vbroadcasti32x4 {{.*#+}} zmm15 = [5,13,21,29,5,13,21,29,5,13,21,29,5,13,21,29]
+; AVX512BW-ONLY-NEXT:    # zmm15 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm12, %zmm1
+; AVX512BW-ONLY-NEXT:    vpermt2d %zmm22, %zmm15, %zmm1
+; AVX512BW-ONLY-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-NEXT:    vbroadcasti32x4 {{.*#+}} zmm13 = [6,14,22,30,6,14,22,30,6,14,22,30,6,14,22,30]
+; AVX512BW-ONLY-NEXT:    # zmm13 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm12, %zmm1
+; AVX512BW-ONLY-NEXT:    vpermt2d %zmm22, %zmm13, %zmm1
+; AVX512BW-ONLY-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-NEXT:    vbroadcasti32x4 {{.*#+}} zmm1 = [7,15,23,31,7,15,23,31,7,15,23,31,7,15,23,31]
+; AVX512BW-ONLY-NEXT:    # zmm1 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512BW-ONLY-NEXT:    vpermt2d %zmm22, %zmm1, %zmm12
+; AVX512BW-ONLY-NEXT:    vmovdqu64 %zmm12, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm26, %zmm24
+; AVX512BW-ONLY-NEXT:    vpermt2d %zmm20, %zmm0, %zmm24
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm26, %zmm27
+; AVX512BW-ONLY-NEXT:    vpermt2d %zmm20, %zmm3, %zmm27
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm26, %zmm31
+; AVX512BW-ONLY-NEXT:    vpermt2d %zmm20, %zmm10, %zmm31
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm26, %zmm30
+; AVX512BW-ONLY-NEXT:    vpermt2d %zmm20, %zmm15, %zmm30
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm26, %zmm22
+; AVX512BW-ONLY-NEXT:    vpermt2d %zmm20, %zmm13, %zmm22
+; AVX512BW-ONLY-NEXT:    vpermt2d %zmm20, %zmm1, %zmm26
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm7, %zmm8
+; AVX512BW-ONLY-NEXT:    vpermt2d %zmm17, %zmm0, %zmm8
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm7, %zmm12
+; AVX512BW-ONLY-NEXT:    vpermt2d %zmm17, %zmm3, %zmm12
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm7, %zmm6
+; AVX512BW-ONLY-NEXT:    vpermt2d %zmm17, %zmm10, %zmm6
+; AVX512BW-ONLY-NEXT:    vmovdqu64 %zmm6, (%rsp) # 64-byte Spill
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm7, %zmm6
+; AVX512BW-ONLY-NEXT:    vpermt2d %zmm17, %zmm15, %zmm6
+; AVX512BW-ONLY-NEXT:    vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm7, %zmm6
+; AVX512BW-ONLY-NEXT:    vpermt2d %zmm17, %zmm13, %zmm6
+; AVX512BW-ONLY-NEXT:    vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-NEXT:    vpermt2d %zmm17, %zmm1, %zmm7
+; AVX512BW-ONLY-NEXT:    vmovdqu64 %zmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm25, %zmm18
+; AVX512BW-ONLY-NEXT:    vpermt2d %zmm21, %zmm4, %zmm18
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm25, %zmm19
+; AVX512BW-ONLY-NEXT:    vpermt2d %zmm21, %zmm0, %zmm19
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm25, %zmm23
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm3, %zmm6
+; AVX512BW-ONLY-NEXT:    vpermt2d %zmm21, %zmm3, %zmm23
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm25, %zmm16
+; AVX512BW-ONLY-NEXT:    vpermt2d %zmm21, %zmm10, %zmm16
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm25, %zmm20
+; AVX512BW-ONLY-NEXT:    vpermt2d %zmm21, %zmm15, %zmm20
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm25, %zmm17
+; AVX512BW-ONLY-NEXT:    vpermt2d %zmm21, %zmm13, %zmm17
+; AVX512BW-ONLY-NEXT:    vpermt2d %zmm21, %zmm1, %zmm25
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm29, %zmm9
+; AVX512BW-ONLY-NEXT:    vpermt2d %zmm5, %zmm4, %zmm9
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm29, %zmm11
+; AVX512BW-ONLY-NEXT:    vpermt2d %zmm5, %zmm0, %zmm11
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm0, %zmm3
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm29, %zmm14
+; AVX512BW-ONLY-NEXT:    vpermt2d %zmm5, %zmm6, %zmm14
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm29, %zmm21
+; AVX512BW-ONLY-NEXT:    vpermt2d %zmm5, %zmm10, %zmm21
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm29, %zmm7
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm15, %zmm2
+; AVX512BW-ONLY-NEXT:    vpermt2d %zmm5, %zmm15, %zmm7
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm29, %zmm15
+; AVX512BW-ONLY-NEXT:    vpermt2d %zmm5, %zmm13, %zmm15
+; AVX512BW-ONLY-NEXT:    vpermt2d %zmm5, %zmm1, %zmm29
+; AVX512BW-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512BW-ONLY-NEXT:    vpermi2d %zmm28, %zmm0, %zmm4
+; AVX512BW-ONLY-NEXT:    vpermi2d %zmm28, %zmm0, %zmm3
+; AVX512BW-ONLY-NEXT:    vpermi2d %zmm28, %zmm0, %zmm6
+; AVX512BW-ONLY-NEXT:    vpermi2d %zmm28, %zmm0, %zmm10
+; AVX512BW-ONLY-NEXT:    vpermi2d %zmm28, %zmm0, %zmm2
+; AVX512BW-ONLY-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-NEXT:    vpermi2d %zmm28, %zmm0, %zmm13
+; AVX512BW-ONLY-NEXT:    vpermt2d %zmm28, %zmm1, %zmm0
+; AVX512BW-ONLY-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm18, %zmm9 {%k1}
+; AVX512BW-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm28 # 64-byte Reload
+; AVX512BW-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
+; AVX512BW-ONLY-NEXT:    vbroadcasti128 {{.*#+}} ymm0 = [1,9,17,25,1,9,17,25]
+; AVX512BW-ONLY-NEXT:    # ymm0 = mem[0,1,0,1]
+; AVX512BW-ONLY-NEXT:    vpermi2d %zmm28, %zmm2, %zmm0
+; AVX512BW-ONLY-NEXT:    vpblendd {{.*#+}} ymm0 = ymm4[0,1,2,3],ymm0[4,5,6,7]
+; AVX512BW-ONLY-NEXT:    vinserti64x4 $0, %ymm0, %zmm9, %zmm18
+; AVX512BW-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm0, %zmm24 {%k1}
+; AVX512BW-ONLY-NEXT:    vbroadcasti128 {{.*#+}} ymm9 = [2,10,18,26,2,10,18,26]
+; AVX512BW-ONLY-NEXT:    # ymm9 = mem[0,1,0,1]
+; AVX512BW-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm4, %zmm0
+; AVX512BW-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
+; AVX512BW-ONLY-NEXT:    vpermt2d %zmm5, %zmm9, %zmm0
+; AVX512BW-ONLY-NEXT:    vpblendd {{.*#+}} ymm0 = ymm8[0,1,2,3],ymm0[4,5,6,7]
+; AVX512BW-ONLY-NEXT:    vinserti64x4 $0, %ymm0, %zmm24, %zmm24
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm19, %zmm11 {%k1}
+; AVX512BW-ONLY-NEXT:    vpermi2d %zmm28, %zmm2, %zmm9
+; AVX512BW-ONLY-NEXT:    vpblendd {{.*#+}} ymm1 = ymm3[0,1,2,3],ymm9[4,5,6,7]
+; AVX512BW-ONLY-NEXT:    vinserti64x4 $0, %ymm1, %zmm11, %zmm1
+; AVX512BW-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm0, %zmm27 {%k1}
+; AVX512BW-ONLY-NEXT:    vbroadcasti128 {{.*#+}} ymm3 = [3,11,19,27,3,11,19,27]
+; AVX512BW-ONLY-NEXT:    # ymm3 = mem[0,1,0,1]
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm4, %zmm0
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm4, %zmm9
+; AVX512BW-ONLY-NEXT:    vpermt2d %zmm5, %zmm3, %zmm9
+; AVX512BW-ONLY-NEXT:    vpblendd {{.*#+}} ymm4 = ymm12[0,1,2,3],ymm9[4,5,6,7]
+; AVX512BW-ONLY-NEXT:    vinserti64x4 $0, %ymm4, %zmm27, %zmm4
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm23, %zmm14 {%k1}
+; AVX512BW-ONLY-NEXT:    vpermi2d %zmm28, %zmm2, %zmm3
+; AVX512BW-ONLY-NEXT:    vpblendd {{.*#+}} ymm3 = ymm6[0,1,2,3],ymm3[4,5,6,7]
+; AVX512BW-ONLY-NEXT:    vinserti64x4 $0, %ymm3, %zmm14, %zmm3
+; AVX512BW-ONLY-NEXT:    vbroadcasti128 {{.*#+}} ymm6 = [4,12,20,28,4,12,20,28]
+; AVX512BW-ONLY-NEXT:    # ymm6 = mem[0,1,0,1]
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm0, %zmm8
+; AVX512BW-ONLY-NEXT:    vpermt2d %zmm5, %zmm6, %zmm8
+; AVX512BW-ONLY-NEXT:    vbroadcasti128 {{.*#+}} ymm9 = [5,13,21,29,5,13,21,29]
+; AVX512BW-ONLY-NEXT:    # ymm9 = mem[0,1,0,1]
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm0, %zmm11
+; AVX512BW-ONLY-NEXT:    vpermt2d %zmm5, %zmm9, %zmm11
+; AVX512BW-ONLY-NEXT:    vbroadcasti128 {{.*#+}} ymm12 = [6,14,22,30,6,14,22,30]
+; AVX512BW-ONLY-NEXT:    # ymm12 = mem[0,1,0,1]
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm0, %zmm14
+; AVX512BW-ONLY-NEXT:    vpermt2d %zmm5, %zmm12, %zmm14
+; AVX512BW-ONLY-NEXT:    vbroadcasti32x4 {{.*#+}} ymm19 = [7,15,23,31,7,15,23,31]
+; AVX512BW-ONLY-NEXT:    # ymm19 = mem[0,1,2,3,0,1,2,3]
+; AVX512BW-ONLY-NEXT:    vpermt2d %zmm5, %zmm19, %zmm0
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm0, %zmm5
+; AVX512BW-ONLY-NEXT:    vpermi2d %zmm28, %zmm2, %zmm6
+; AVX512BW-ONLY-NEXT:    vpermi2d %zmm28, %zmm2, %zmm9
+; AVX512BW-ONLY-NEXT:    vpermi2d %zmm28, %zmm2, %zmm12
+; AVX512BW-ONLY-NEXT:    vpermt2d %zmm28, %zmm19, %zmm2
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm2, %zmm0
+; AVX512BW-ONLY-NEXT:    vpblendd $15, (%rsp), %ymm8, %ymm8 # 32-byte Folded Reload
+; AVX512BW-ONLY-NEXT:    # ymm8 = mem[0,1,2,3],ymm8[4,5,6,7]
+; AVX512BW-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm2, %zmm31 {%k1}
+; AVX512BW-ONLY-NEXT:    vinserti64x4 $0, %ymm8, %zmm31, %zmm8
+; AVX512BW-ONLY-NEXT:    vpblendd {{.*#+}} ymm6 = ymm10[0,1,2,3],ymm6[4,5,6,7]
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm16, %zmm21 {%k1}
+; AVX512BW-ONLY-NEXT:    vinserti64x4 $0, %ymm6, %zmm21, %zmm6
+; AVX512BW-ONLY-NEXT:    vpblendd $15, {{[-0-9]+}}(%r{{[sb]}}p), %ymm11, %ymm10 # 32-byte Folded Reload
+; AVX512BW-ONLY-NEXT:    # ymm10 = mem[0,1,2,3],ymm11[4,5,6,7]
+; AVX512BW-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm2, %zmm30 {%k1}
+; AVX512BW-ONLY-NEXT:    vinserti64x4 $0, %ymm10, %zmm30, %zmm10
+; AVX512BW-ONLY-NEXT:    vpblendd $15, {{[-0-9]+}}(%r{{[sb]}}p), %ymm9, %ymm9 # 32-byte Folded Reload
+; AVX512BW-ONLY-NEXT:    # ymm9 = mem[0,1,2,3],ymm9[4,5,6,7]
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm20, %zmm7 {%k1}
+; AVX512BW-ONLY-NEXT:    vinserti64x4 $0, %ymm9, %zmm7, %zmm7
+; AVX512BW-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm2, %zmm22 {%k1}
+; AVX512BW-ONLY-NEXT:    vpblendd $15, {{[-0-9]+}}(%r{{[sb]}}p), %ymm14, %ymm9 # 32-byte Folded Reload
+; AVX512BW-ONLY-NEXT:    # ymm9 = mem[0,1,2,3],ymm14[4,5,6,7]
+; AVX512BW-ONLY-NEXT:    vinserti64x4 $0, %ymm9, %zmm22, %zmm9
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm17, %zmm15 {%k1}
+; AVX512BW-ONLY-NEXT:    vpblendd {{.*#+}} ymm2 = ymm13[0,1,2,3],ymm12[4,5,6,7]
+; AVX512BW-ONLY-NEXT:    vinserti64x4 $0, %ymm2, %zmm15, %zmm2
+; AVX512BW-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm11 # 64-byte Reload
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm11, %zmm26 {%k1}
+; AVX512BW-ONLY-NEXT:    vpblendd $15, {{[-0-9]+}}(%r{{[sb]}}p), %ymm5, %ymm11 # 32-byte Folded Reload
+; AVX512BW-ONLY-NEXT:    # ymm11 = mem[0,1,2,3],ymm5[4,5,6,7]
+; AVX512BW-ONLY-NEXT:    vinserti64x4 $0, %ymm11, %zmm26, %zmm5
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm25, %zmm29 {%k1}
+; AVX512BW-ONLY-NEXT:    vpblendd $15, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm11 # 32-byte Folded Reload
+; AVX512BW-ONLY-NEXT:    # ymm11 = mem[0,1,2,3],ymm0[4,5,6,7]
+; AVX512BW-ONLY-NEXT:    vinserti64x4 $0, %ymm11, %zmm29, %zmm11
+; AVX512BW-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512BW-ONLY-NEXT:    vmovaps %zmm0, 64(%rsi)
+; AVX512BW-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512BW-ONLY-NEXT:    vmovaps %zmm0, (%rsi)
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm18, 64(%rdx)
+; AVX512BW-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512BW-ONLY-NEXT:    vmovaps %zmm0, (%rdx)
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm1, 64(%rcx)
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm24, (%rcx)
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm3, 64(%r8)
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm4, (%r8)
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm6, 64(%r9)
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm8, (%r9)
+; AVX512BW-ONLY-NEXT:    movq {{[0-9]+}}(%rsp), %rax
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm7, 64(%rax)
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm10, (%rax)
+; AVX512BW-ONLY-NEXT:    movq {{[0-9]+}}(%rsp), %rax
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm2, 64(%rax)
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm9, (%rax)
+; AVX512BW-ONLY-NEXT:    movq {{[0-9]+}}(%rsp), %rax
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm11, 64(%rax)
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm5, (%rax)
+; AVX512BW-ONLY-NEXT:    addq $1096, %rsp # imm = 0x448
+; AVX512BW-ONLY-NEXT:    vzeroupper
+; AVX512BW-ONLY-NEXT:    retq
 ;
-; AVX512DQBW-FAST-LABEL: load_i32_stride8_vf32:
-; AVX512DQBW-FAST:       # %bb.0:
-; AVX512DQBW-FAST-NEXT:    subq $1096, %rsp # imm = 0x448
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 704(%rdi), %zmm6
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 640(%rdi), %zmm8
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 576(%rdi), %zmm28
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 512(%rdi), %zmm11
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm11, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 832(%rdi), %zmm5
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 768(%rdi), %zmm29
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 960(%rdi), %zmm21
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 896(%rdi), %zmm25
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 (%rdi), %zmm7
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 64(%rdi), %zmm17
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 128(%rdi), %zmm4
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 192(%rdi), %zmm9
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 320(%rdi), %zmm20
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 256(%rdi), %zmm26
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 448(%rdi), %zmm22
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 384(%rdi), %zmm2
-; AVX512DQBW-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} zmm0 = [0,8,16,24,0,8,16,24,0,8,16,24,0,8,16,24]
-; AVX512DQBW-FAST-NEXT:    # zmm0 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm2, %zmm1
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm2, %zmm12
-; AVX512DQBW-FAST-NEXT:    vpermt2d %zmm22, %zmm0, %zmm1
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm26, %zmm2
-; AVX512DQBW-FAST-NEXT:    vpermt2d %zmm20, %zmm0, %zmm2
-; AVX512DQBW-FAST-NEXT:    movb $-64, %al
-; AVX512DQBW-FAST-NEXT:    kmovd %eax, %k1
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm1, %zmm2 {%k1}
-; AVX512DQBW-FAST-NEXT:    vbroadcasti128 {{.*#+}} ymm1 = [0,8,16,24,0,8,16,24]
-; AVX512DQBW-FAST-NEXT:    # ymm1 = mem[0,1,0,1]
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm4, %zmm3
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm4, %zmm10
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vpermt2d %zmm9, %zmm1, %zmm3
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm7, %zmm4
-; AVX512DQBW-FAST-NEXT:    vpermt2d %zmm17, %zmm0, %zmm4
-; AVX512DQBW-FAST-NEXT:    vpblendd {{.*#+}} ymm3 = ymm4[0,1,2,3],ymm3[4,5,6,7]
-; AVX512DQBW-FAST-NEXT:    vinserti64x4 $0, %ymm3, %zmm2, %zmm2
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm25, %zmm2
-; AVX512DQBW-FAST-NEXT:    vpermt2d %zmm21, %zmm0, %zmm2
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm29, %zmm3
-; AVX512DQBW-FAST-NEXT:    vpermt2d %zmm5, %zmm0, %zmm3
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm2, %zmm3 {%k1}
-; AVX512DQBW-FAST-NEXT:    vpermi2d %zmm28, %zmm11, %zmm0
-; AVX512DQBW-FAST-NEXT:    vpermi2d %zmm6, %zmm8, %zmm1
-; AVX512DQBW-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
-; AVX512DQBW-FAST-NEXT:    vinserti64x4 $0, %ymm0, %zmm3, %zmm0
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} zmm4 = [1,9,17,25,1,9,17,25,1,9,17,25,1,9,17,25]
-; AVX512DQBW-FAST-NEXT:    # zmm4 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm12, %zmm0
-; AVX512DQBW-FAST-NEXT:    vpermt2d %zmm22, %zmm4, %zmm0
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm26, %zmm1
-; AVX512DQBW-FAST-NEXT:    vpermt2d %zmm20, %zmm4, %zmm1
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm0, %zmm1 {%k1}
-; AVX512DQBW-FAST-NEXT:    vbroadcasti128 {{.*#+}} ymm2 = [1,9,17,25,1,9,17,25]
-; AVX512DQBW-FAST-NEXT:    # ymm2 = mem[0,1,0,1]
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm10, %zmm0
-; AVX512DQBW-FAST-NEXT:    vpermt2d %zmm9, %zmm2, %zmm0
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm7, %zmm2
-; AVX512DQBW-FAST-NEXT:    vpermt2d %zmm17, %zmm4, %zmm2
-; AVX512DQBW-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm2[0,1,2,3],ymm0[4,5,6,7]
-; AVX512DQBW-FAST-NEXT:    vinserti64x4 $0, %ymm0, %zmm1, %zmm0
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} zmm0 = [2,10,18,26,2,10,18,26,2,10,18,26,2,10,18,26]
-; AVX512DQBW-FAST-NEXT:    # zmm0 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm12, %zmm1
-; AVX512DQBW-FAST-NEXT:    vpermt2d %zmm22, %zmm0, %zmm1
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} zmm2 = [3,11,19,27,3,11,19,27,3,11,19,27,3,11,19,27]
-; AVX512DQBW-FAST-NEXT:    # zmm2 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm12, %zmm1
-; AVX512DQBW-FAST-NEXT:    vpermt2d %zmm22, %zmm2, %zmm1
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm2, %zmm3
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} zmm10 = [4,12,20,28,4,12,20,28,4,12,20,28,4,12,20,28]
-; AVX512DQBW-FAST-NEXT:    # zmm10 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm12, %zmm2
-; AVX512DQBW-FAST-NEXT:    vpermt2d %zmm22, %zmm10, %zmm2
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} zmm15 = [5,13,21,29,5,13,21,29,5,13,21,29,5,13,21,29]
-; AVX512DQBW-FAST-NEXT:    # zmm15 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm12, %zmm1
-; AVX512DQBW-FAST-NEXT:    vpermt2d %zmm22, %zmm15, %zmm1
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} zmm13 = [6,14,22,30,6,14,22,30,6,14,22,30,6,14,22,30]
-; AVX512DQBW-FAST-NEXT:    # zmm13 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm12, %zmm1
-; AVX512DQBW-FAST-NEXT:    vpermt2d %zmm22, %zmm13, %zmm1
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} zmm1 = [7,15,23,31,7,15,23,31,7,15,23,31,7,15,23,31]
-; AVX512DQBW-FAST-NEXT:    # zmm1 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512DQBW-FAST-NEXT:    vpermt2d %zmm22, %zmm1, %zmm12
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm12, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm26, %zmm24
-; AVX512DQBW-FAST-NEXT:    vpermt2d %zmm20, %zmm0, %zmm24
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm26, %zmm27
-; AVX512DQBW-FAST-NEXT:    vpermt2d %zmm20, %zmm3, %zmm27
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm26, %zmm31
-; AVX512DQBW-FAST-NEXT:    vpermt2d %zmm20, %zmm10, %zmm31
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm26, %zmm30
-; AVX512DQBW-FAST-NEXT:    vpermt2d %zmm20, %zmm15, %zmm30
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm26, %zmm22
-; AVX512DQBW-FAST-NEXT:    vpermt2d %zmm20, %zmm13, %zmm22
-; AVX512DQBW-FAST-NEXT:    vpermt2d %zmm20, %zmm1, %zmm26
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm7, %zmm8
-; AVX512DQBW-FAST-NEXT:    vpermt2d %zmm17, %zmm0, %zmm8
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm7, %zmm12
-; AVX512DQBW-FAST-NEXT:    vpermt2d %zmm17, %zmm3, %zmm12
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm7, %zmm6
-; AVX512DQBW-FAST-NEXT:    vpermt2d %zmm17, %zmm10, %zmm6
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm6, (%rsp) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm7, %zmm6
-; AVX512DQBW-FAST-NEXT:    vpermt2d %zmm17, %zmm15, %zmm6
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm7, %zmm6
-; AVX512DQBW-FAST-NEXT:    vpermt2d %zmm17, %zmm13, %zmm6
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vpermt2d %zmm17, %zmm1, %zmm7
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm25, %zmm18
-; AVX512DQBW-FAST-NEXT:    vpermt2d %zmm21, %zmm4, %zmm18
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm25, %zmm19
-; AVX512DQBW-FAST-NEXT:    vpermt2d %zmm21, %zmm0, %zmm19
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm25, %zmm23
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm3, %zmm6
-; AVX512DQBW-FAST-NEXT:    vpermt2d %zmm21, %zmm3, %zmm23
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm25, %zmm16
-; AVX512DQBW-FAST-NEXT:    vpermt2d %zmm21, %zmm10, %zmm16
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm25, %zmm20
-; AVX512DQBW-FAST-NEXT:    vpermt2d %zmm21, %zmm15, %zmm20
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm25, %zmm17
-; AVX512DQBW-FAST-NEXT:    vpermt2d %zmm21, %zmm13, %zmm17
-; AVX512DQBW-FAST-NEXT:    vpermt2d %zmm21, %zmm1, %zmm25
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm29, %zmm9
-; AVX512DQBW-FAST-NEXT:    vpermt2d %zmm5, %zmm4, %zmm9
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm29, %zmm11
-; AVX512DQBW-FAST-NEXT:    vpermt2d %zmm5, %zmm0, %zmm11
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm0, %zmm3
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm29, %zmm14
-; AVX512DQBW-FAST-NEXT:    vpermt2d %zmm5, %zmm6, %zmm14
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm29, %zmm21
-; AVX512DQBW-FAST-NEXT:    vpermt2d %zmm5, %zmm10, %zmm21
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm29, %zmm7
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm15, %zmm2
-; AVX512DQBW-FAST-NEXT:    vpermt2d %zmm5, %zmm15, %zmm7
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm29, %zmm15
-; AVX512DQBW-FAST-NEXT:    vpermt2d %zmm5, %zmm13, %zmm15
-; AVX512DQBW-FAST-NEXT:    vpermt2d %zmm5, %zmm1, %zmm29
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQBW-FAST-NEXT:    vpermi2d %zmm28, %zmm0, %zmm4
-; AVX512DQBW-FAST-NEXT:    vpermi2d %zmm28, %zmm0, %zmm3
-; AVX512DQBW-FAST-NEXT:    vpermi2d %zmm28, %zmm0, %zmm6
-; AVX512DQBW-FAST-NEXT:    vpermi2d %zmm28, %zmm0, %zmm10
-; AVX512DQBW-FAST-NEXT:    vpermi2d %zmm28, %zmm0, %zmm2
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vpermi2d %zmm28, %zmm0, %zmm13
-; AVX512DQBW-FAST-NEXT:    vpermt2d %zmm28, %zmm1, %zmm0
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm18, %zmm9 {%k1}
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm28 # 64-byte Reload
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512DQBW-FAST-NEXT:    vbroadcasti128 {{.*#+}} ymm0 = [1,9,17,25,1,9,17,25]
-; AVX512DQBW-FAST-NEXT:    # ymm0 = mem[0,1,0,1]
-; AVX512DQBW-FAST-NEXT:    vpermi2d %zmm28, %zmm2, %zmm0
-; AVX512DQBW-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm4[0,1,2,3],ymm0[4,5,6,7]
-; AVX512DQBW-FAST-NEXT:    vinserti64x4 $0, %ymm0, %zmm9, %zmm18
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm0, %zmm24 {%k1}
-; AVX512DQBW-FAST-NEXT:    vbroadcasti128 {{.*#+}} ymm9 = [2,10,18,26,2,10,18,26]
-; AVX512DQBW-FAST-NEXT:    # ymm9 = mem[0,1,0,1]
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm4, %zmm0
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
-; AVX512DQBW-FAST-NEXT:    vpermt2d %zmm5, %zmm9, %zmm0
-; AVX512DQBW-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm8[0,1,2,3],ymm0[4,5,6,7]
-; AVX512DQBW-FAST-NEXT:    vinserti64x4 $0, %ymm0, %zmm24, %zmm24
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm19, %zmm11 {%k1}
-; AVX512DQBW-FAST-NEXT:    vpermi2d %zmm28, %zmm2, %zmm9
-; AVX512DQBW-FAST-NEXT:    vpblendd {{.*#+}} ymm1 = ymm3[0,1,2,3],ymm9[4,5,6,7]
-; AVX512DQBW-FAST-NEXT:    vinserti64x4 $0, %ymm1, %zmm11, %zmm1
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm0, %zmm27 {%k1}
-; AVX512DQBW-FAST-NEXT:    vbroadcasti128 {{.*#+}} ymm3 = [3,11,19,27,3,11,19,27]
-; AVX512DQBW-FAST-NEXT:    # ymm3 = mem[0,1,0,1]
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm4, %zmm0
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm4, %zmm9
-; AVX512DQBW-FAST-NEXT:    vpermt2d %zmm5, %zmm3, %zmm9
-; AVX512DQBW-FAST-NEXT:    vpblendd {{.*#+}} ymm4 = ymm12[0,1,2,3],ymm9[4,5,6,7]
-; AVX512DQBW-FAST-NEXT:    vinserti64x4 $0, %ymm4, %zmm27, %zmm4
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm23, %zmm14 {%k1}
-; AVX512DQBW-FAST-NEXT:    vpermi2d %zmm28, %zmm2, %zmm3
-; AVX512DQBW-FAST-NEXT:    vpblendd {{.*#+}} ymm3 = ymm6[0,1,2,3],ymm3[4,5,6,7]
-; AVX512DQBW-FAST-NEXT:    vinserti64x4 $0, %ymm3, %zmm14, %zmm3
-; AVX512DQBW-FAST-NEXT:    vbroadcasti128 {{.*#+}} ymm6 = [4,12,20,28,4,12,20,28]
-; AVX512DQBW-FAST-NEXT:    # ymm6 = mem[0,1,0,1]
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm0, %zmm8
-; AVX512DQBW-FAST-NEXT:    vpermt2d %zmm5, %zmm6, %zmm8
-; AVX512DQBW-FAST-NEXT:    vbroadcasti128 {{.*#+}} ymm9 = [5,13,21,29,5,13,21,29]
-; AVX512DQBW-FAST-NEXT:    # ymm9 = mem[0,1,0,1]
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm0, %zmm11
-; AVX512DQBW-FAST-NEXT:    vpermt2d %zmm5, %zmm9, %zmm11
-; AVX512DQBW-FAST-NEXT:    vbroadcasti128 {{.*#+}} ymm12 = [6,14,22,30,6,14,22,30]
-; AVX512DQBW-FAST-NEXT:    # ymm12 = mem[0,1,0,1]
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm0, %zmm14
-; AVX512DQBW-FAST-NEXT:    vpermt2d %zmm5, %zmm12, %zmm14
-; AVX512DQBW-FAST-NEXT:    vbroadcasti64x2 {{.*#+}} ymm19 = [7,15,23,31,7,15,23,31]
-; AVX512DQBW-FAST-NEXT:    # ymm19 = mem[0,1,0,1]
-; AVX512DQBW-FAST-NEXT:    vpermt2d %zmm5, %zmm19, %zmm0
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm0, %zmm5
-; AVX512DQBW-FAST-NEXT:    vpermi2d %zmm28, %zmm2, %zmm6
-; AVX512DQBW-FAST-NEXT:    vpermi2d %zmm28, %zmm2, %zmm9
-; AVX512DQBW-FAST-NEXT:    vpermi2d %zmm28, %zmm2, %zmm12
-; AVX512DQBW-FAST-NEXT:    vpermt2d %zmm28, %zmm19, %zmm2
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm2, %zmm0
-; AVX512DQBW-FAST-NEXT:    vpblendd $15, (%rsp), %ymm8, %ymm8 # 32-byte Folded Reload
-; AVX512DQBW-FAST-NEXT:    # ymm8 = mem[0,1,2,3],ymm8[4,5,6,7]
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm2, %zmm31 {%k1}
-; AVX512DQBW-FAST-NEXT:    vinserti64x4 $0, %ymm8, %zmm31, %zmm8
-; AVX512DQBW-FAST-NEXT:    vpblendd {{.*#+}} ymm6 = ymm10[0,1,2,3],ymm6[4,5,6,7]
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm16, %zmm21 {%k1}
-; AVX512DQBW-FAST-NEXT:    vinserti64x4 $0, %ymm6, %zmm21, %zmm6
-; AVX512DQBW-FAST-NEXT:    vpblendd $15, {{[-0-9]+}}(%r{{[sb]}}p), %ymm11, %ymm10 # 32-byte Folded Reload
-; AVX512DQBW-FAST-NEXT:    # ymm10 = mem[0,1,2,3],ymm11[4,5,6,7]
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm2, %zmm30 {%k1}
-; AVX512DQBW-FAST-NEXT:    vinserti64x4 $0, %ymm10, %zmm30, %zmm10
-; AVX512DQBW-FAST-NEXT:    vpblendd $15, {{[-0-9]+}}(%r{{[sb]}}p), %ymm9, %ymm9 # 32-byte Folded Reload
-; AVX512DQBW-FAST-NEXT:    # ymm9 = mem[0,1,2,3],ymm9[4,5,6,7]
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm20, %zmm7 {%k1}
-; AVX512DQBW-FAST-NEXT:    vinserti64x4 $0, %ymm9, %zmm7, %zmm7
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm2, %zmm22 {%k1}
-; AVX512DQBW-FAST-NEXT:    vpblendd $15, {{[-0-9]+}}(%r{{[sb]}}p), %ymm14, %ymm9 # 32-byte Folded Reload
-; AVX512DQBW-FAST-NEXT:    # ymm9 = mem[0,1,2,3],ymm14[4,5,6,7]
-; AVX512DQBW-FAST-NEXT:    vinserti64x4 $0, %ymm9, %zmm22, %zmm9
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm17, %zmm15 {%k1}
-; AVX512DQBW-FAST-NEXT:    vpblendd {{.*#+}} ymm2 = ymm13[0,1,2,3],ymm12[4,5,6,7]
-; AVX512DQBW-FAST-NEXT:    vinserti64x4 $0, %ymm2, %zmm15, %zmm2
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm11 # 64-byte Reload
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm11, %zmm26 {%k1}
-; AVX512DQBW-FAST-NEXT:    vpblendd $15, {{[-0-9]+}}(%r{{[sb]}}p), %ymm5, %ymm11 # 32-byte Folded Reload
-; AVX512DQBW-FAST-NEXT:    # ymm11 = mem[0,1,2,3],ymm5[4,5,6,7]
-; AVX512DQBW-FAST-NEXT:    vinserti64x4 $0, %ymm11, %zmm26, %zmm5
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm25, %zmm29 {%k1}
-; AVX512DQBW-FAST-NEXT:    vpblendd $15, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm11 # 32-byte Folded Reload
-; AVX512DQBW-FAST-NEXT:    # ymm11 = mem[0,1,2,3],ymm0[4,5,6,7]
-; AVX512DQBW-FAST-NEXT:    vinserti64x4 $0, %ymm11, %zmm29, %zmm11
-; AVX512DQBW-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQBW-FAST-NEXT:    vmovaps %zmm0, 64(%rsi)
-; AVX512DQBW-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQBW-FAST-NEXT:    vmovaps %zmm0, (%rsi)
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm18, 64(%rdx)
-; AVX512DQBW-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQBW-FAST-NEXT:    vmovaps %zmm0, (%rdx)
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm1, 64(%rcx)
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm24, (%rcx)
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm3, 64(%r8)
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm4, (%r8)
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm6, 64(%r9)
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm8, (%r9)
-; AVX512DQBW-FAST-NEXT:    movq {{[0-9]+}}(%rsp), %rax
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm7, 64(%rax)
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm10, (%rax)
-; AVX512DQBW-FAST-NEXT:    movq {{[0-9]+}}(%rsp), %rax
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm2, 64(%rax)
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm9, (%rax)
-; AVX512DQBW-FAST-NEXT:    movq {{[0-9]+}}(%rsp), %rax
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm11, 64(%rax)
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm5, (%rax)
-; AVX512DQBW-FAST-NEXT:    addq $1096, %rsp # imm = 0x448
-; AVX512DQBW-FAST-NEXT:    vzeroupper
-; AVX512DQBW-FAST-NEXT:    retq
+; AVX512DQBW-ONLY-LABEL: load_i32_stride8_vf32:
+; AVX512DQBW-ONLY:       # %bb.0:
+; AVX512DQBW-ONLY-NEXT:    subq $1096, %rsp # imm = 0x448
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 704(%rdi), %zmm6
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 640(%rdi), %zmm8
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 %zmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 576(%rdi), %zmm28
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 512(%rdi), %zmm11
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 %zmm11, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 832(%rdi), %zmm5
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 768(%rdi), %zmm29
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 960(%rdi), %zmm21
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 896(%rdi), %zmm25
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 (%rdi), %zmm7
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 64(%rdi), %zmm17
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 128(%rdi), %zmm4
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 192(%rdi), %zmm9
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 320(%rdi), %zmm20
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 256(%rdi), %zmm26
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 448(%rdi), %zmm22
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 384(%rdi), %zmm2
+; AVX512DQBW-ONLY-NEXT:    vbroadcasti32x4 {{.*#+}} zmm0 = [0,8,16,24,0,8,16,24,0,8,16,24,0,8,16,24]
+; AVX512DQBW-ONLY-NEXT:    # zmm0 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm2, %zmm1
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm2, %zmm12
+; AVX512DQBW-ONLY-NEXT:    vpermt2d %zmm22, %zmm0, %zmm1
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm26, %zmm2
+; AVX512DQBW-ONLY-NEXT:    vpermt2d %zmm20, %zmm0, %zmm2
+; AVX512DQBW-ONLY-NEXT:    movb $-64, %al
+; AVX512DQBW-ONLY-NEXT:    kmovd %eax, %k1
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm1, %zmm2 {%k1}
+; AVX512DQBW-ONLY-NEXT:    vbroadcasti128 {{.*#+}} ymm1 = [0,8,16,24,0,8,16,24]
+; AVX512DQBW-ONLY-NEXT:    # ymm1 = mem[0,1,0,1]
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm4, %zmm3
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm4, %zmm10
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-ONLY-NEXT:    vpermt2d %zmm9, %zmm1, %zmm3
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 %zmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm7, %zmm4
+; AVX512DQBW-ONLY-NEXT:    vpermt2d %zmm17, %zmm0, %zmm4
+; AVX512DQBW-ONLY-NEXT:    vpblendd {{.*#+}} ymm3 = ymm4[0,1,2,3],ymm3[4,5,6,7]
+; AVX512DQBW-ONLY-NEXT:    vinserti64x4 $0, %ymm3, %zmm2, %zmm2
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm25, %zmm2
+; AVX512DQBW-ONLY-NEXT:    vpermt2d %zmm21, %zmm0, %zmm2
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm29, %zmm3
+; AVX512DQBW-ONLY-NEXT:    vpermt2d %zmm5, %zmm0, %zmm3
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm2, %zmm3 {%k1}
+; AVX512DQBW-ONLY-NEXT:    vpermi2d %zmm28, %zmm11, %zmm0
+; AVX512DQBW-ONLY-NEXT:    vpermi2d %zmm6, %zmm8, %zmm1
+; AVX512DQBW-ONLY-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
+; AVX512DQBW-ONLY-NEXT:    vinserti64x4 $0, %ymm0, %zmm3, %zmm0
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-ONLY-NEXT:    vbroadcasti32x4 {{.*#+}} zmm4 = [1,9,17,25,1,9,17,25,1,9,17,25,1,9,17,25]
+; AVX512DQBW-ONLY-NEXT:    # zmm4 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm12, %zmm0
+; AVX512DQBW-ONLY-NEXT:    vpermt2d %zmm22, %zmm4, %zmm0
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm26, %zmm1
+; AVX512DQBW-ONLY-NEXT:    vpermt2d %zmm20, %zmm4, %zmm1
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm0, %zmm1 {%k1}
+; AVX512DQBW-ONLY-NEXT:    vbroadcasti128 {{.*#+}} ymm2 = [1,9,17,25,1,9,17,25]
+; AVX512DQBW-ONLY-NEXT:    # ymm2 = mem[0,1,0,1]
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm10, %zmm0
+; AVX512DQBW-ONLY-NEXT:    vpermt2d %zmm9, %zmm2, %zmm0
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm7, %zmm2
+; AVX512DQBW-ONLY-NEXT:    vpermt2d %zmm17, %zmm4, %zmm2
+; AVX512DQBW-ONLY-NEXT:    vpblendd {{.*#+}} ymm0 = ymm2[0,1,2,3],ymm0[4,5,6,7]
+; AVX512DQBW-ONLY-NEXT:    vinserti64x4 $0, %ymm0, %zmm1, %zmm0
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-ONLY-NEXT:    vbroadcasti32x4 {{.*#+}} zmm0 = [2,10,18,26,2,10,18,26,2,10,18,26,2,10,18,26]
+; AVX512DQBW-ONLY-NEXT:    # zmm0 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm12, %zmm1
+; AVX512DQBW-ONLY-NEXT:    vpermt2d %zmm22, %zmm0, %zmm1
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-ONLY-NEXT:    vbroadcasti32x4 {{.*#+}} zmm2 = [3,11,19,27,3,11,19,27,3,11,19,27,3,11,19,27]
+; AVX512DQBW-ONLY-NEXT:    # zmm2 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm12, %zmm1
+; AVX512DQBW-ONLY-NEXT:    vpermt2d %zmm22, %zmm2, %zmm1
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm2, %zmm3
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-ONLY-NEXT:    vbroadcasti32x4 {{.*#+}} zmm10 = [4,12,20,28,4,12,20,28,4,12,20,28,4,12,20,28]
+; AVX512DQBW-ONLY-NEXT:    # zmm10 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm12, %zmm2
+; AVX512DQBW-ONLY-NEXT:    vpermt2d %zmm22, %zmm10, %zmm2
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-ONLY-NEXT:    vbroadcasti32x4 {{.*#+}} zmm15 = [5,13,21,29,5,13,21,29,5,13,21,29,5,13,21,29]
+; AVX512DQBW-ONLY-NEXT:    # zmm15 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm12, %zmm1
+; AVX512DQBW-ONLY-NEXT:    vpermt2d %zmm22, %zmm15, %zmm1
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-ONLY-NEXT:    vbroadcasti32x4 {{.*#+}} zmm13 = [6,14,22,30,6,14,22,30,6,14,22,30,6,14,22,30]
+; AVX512DQBW-ONLY-NEXT:    # zmm13 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm12, %zmm1
+; AVX512DQBW-ONLY-NEXT:    vpermt2d %zmm22, %zmm13, %zmm1
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-ONLY-NEXT:    vbroadcasti32x4 {{.*#+}} zmm1 = [7,15,23,31,7,15,23,31,7,15,23,31,7,15,23,31]
+; AVX512DQBW-ONLY-NEXT:    # zmm1 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512DQBW-ONLY-NEXT:    vpermt2d %zmm22, %zmm1, %zmm12
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 %zmm12, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm26, %zmm24
+; AVX512DQBW-ONLY-NEXT:    vpermt2d %zmm20, %zmm0, %zmm24
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm26, %zmm27
+; AVX512DQBW-ONLY-NEXT:    vpermt2d %zmm20, %zmm3, %zmm27
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm26, %zmm31
+; AVX512DQBW-ONLY-NEXT:    vpermt2d %zmm20, %zmm10, %zmm31
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm26, %zmm30
+; AVX512DQBW-ONLY-NEXT:    vpermt2d %zmm20, %zmm15, %zmm30
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm26, %zmm22
+; AVX512DQBW-ONLY-NEXT:    vpermt2d %zmm20, %zmm13, %zmm22
+; AVX512DQBW-ONLY-NEXT:    vpermt2d %zmm20, %zmm1, %zmm26
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm7, %zmm8
+; AVX512DQBW-ONLY-NEXT:    vpermt2d %zmm17, %zmm0, %zmm8
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm7, %zmm12
+; AVX512DQBW-ONLY-NEXT:    vpermt2d %zmm17, %zmm3, %zmm12
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm7, %zmm6
+; AVX512DQBW-ONLY-NEXT:    vpermt2d %zmm17, %zmm10, %zmm6
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 %zmm6, (%rsp) # 64-byte Spill
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm7, %zmm6
+; AVX512DQBW-ONLY-NEXT:    vpermt2d %zmm17, %zmm15, %zmm6
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm7, %zmm6
+; AVX512DQBW-ONLY-NEXT:    vpermt2d %zmm17, %zmm13, %zmm6
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-ONLY-NEXT:    vpermt2d %zmm17, %zmm1, %zmm7
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 %zmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm25, %zmm18
+; AVX512DQBW-ONLY-NEXT:    vpermt2d %zmm21, %zmm4, %zmm18
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm25, %zmm19
+; AVX512DQBW-ONLY-NEXT:    vpermt2d %zmm21, %zmm0, %zmm19
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm25, %zmm23
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm3, %zmm6
+; AVX512DQBW-ONLY-NEXT:    vpermt2d %zmm21, %zmm3, %zmm23
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm25, %zmm16
+; AVX512DQBW-ONLY-NEXT:    vpermt2d %zmm21, %zmm10, %zmm16
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm25, %zmm20
+; AVX512DQBW-ONLY-NEXT:    vpermt2d %zmm21, %zmm15, %zmm20
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm25, %zmm17
+; AVX512DQBW-ONLY-NEXT:    vpermt2d %zmm21, %zmm13, %zmm17
+; AVX512DQBW-ONLY-NEXT:    vpermt2d %zmm21, %zmm1, %zmm25
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm29, %zmm9
+; AVX512DQBW-ONLY-NEXT:    vpermt2d %zmm5, %zmm4, %zmm9
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm29, %zmm11
+; AVX512DQBW-ONLY-NEXT:    vpermt2d %zmm5, %zmm0, %zmm11
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm0, %zmm3
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm29, %zmm14
+; AVX512DQBW-ONLY-NEXT:    vpermt2d %zmm5, %zmm6, %zmm14
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm29, %zmm21
+; AVX512DQBW-ONLY-NEXT:    vpermt2d %zmm5, %zmm10, %zmm21
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm29, %zmm7
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm15, %zmm2
+; AVX512DQBW-ONLY-NEXT:    vpermt2d %zmm5, %zmm15, %zmm7
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm29, %zmm15
+; AVX512DQBW-ONLY-NEXT:    vpermt2d %zmm5, %zmm13, %zmm15
+; AVX512DQBW-ONLY-NEXT:    vpermt2d %zmm5, %zmm1, %zmm29
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512DQBW-ONLY-NEXT:    vpermi2d %zmm28, %zmm0, %zmm4
+; AVX512DQBW-ONLY-NEXT:    vpermi2d %zmm28, %zmm0, %zmm3
+; AVX512DQBW-ONLY-NEXT:    vpermi2d %zmm28, %zmm0, %zmm6
+; AVX512DQBW-ONLY-NEXT:    vpermi2d %zmm28, %zmm0, %zmm10
+; AVX512DQBW-ONLY-NEXT:    vpermi2d %zmm28, %zmm0, %zmm2
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-ONLY-NEXT:    vpermi2d %zmm28, %zmm0, %zmm13
+; AVX512DQBW-ONLY-NEXT:    vpermt2d %zmm28, %zmm1, %zmm0
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm18, %zmm9 {%k1}
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm28 # 64-byte Reload
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
+; AVX512DQBW-ONLY-NEXT:    vbroadcasti128 {{.*#+}} ymm0 = [1,9,17,25,1,9,17,25]
+; AVX512DQBW-ONLY-NEXT:    # ymm0 = mem[0,1,0,1]
+; AVX512DQBW-ONLY-NEXT:    vpermi2d %zmm28, %zmm2, %zmm0
+; AVX512DQBW-ONLY-NEXT:    vpblendd {{.*#+}} ymm0 = ymm4[0,1,2,3],ymm0[4,5,6,7]
+; AVX512DQBW-ONLY-NEXT:    vinserti64x4 $0, %ymm0, %zmm9, %zmm18
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm0, %zmm24 {%k1}
+; AVX512DQBW-ONLY-NEXT:    vbroadcasti128 {{.*#+}} ymm9 = [2,10,18,26,2,10,18,26]
+; AVX512DQBW-ONLY-NEXT:    # ymm9 = mem[0,1,0,1]
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm4, %zmm0
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
+; AVX512DQBW-ONLY-NEXT:    vpermt2d %zmm5, %zmm9, %zmm0
+; AVX512DQBW-ONLY-NEXT:    vpblendd {{.*#+}} ymm0 = ymm8[0,1,2,3],ymm0[4,5,6,7]
+; AVX512DQBW-ONLY-NEXT:    vinserti64x4 $0, %ymm0, %zmm24, %zmm24
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm19, %zmm11 {%k1}
+; AVX512DQBW-ONLY-NEXT:    vpermi2d %zmm28, %zmm2, %zmm9
+; AVX512DQBW-ONLY-NEXT:    vpblendd {{.*#+}} ymm1 = ymm3[0,1,2,3],ymm9[4,5,6,7]
+; AVX512DQBW-ONLY-NEXT:    vinserti64x4 $0, %ymm1, %zmm11, %zmm1
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm0, %zmm27 {%k1}
+; AVX512DQBW-ONLY-NEXT:    vbroadcasti128 {{.*#+}} ymm3 = [3,11,19,27,3,11,19,27]
+; AVX512DQBW-ONLY-NEXT:    # ymm3 = mem[0,1,0,1]
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm4, %zmm0
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm4, %zmm9
+; AVX512DQBW-ONLY-NEXT:    vpermt2d %zmm5, %zmm3, %zmm9
+; AVX512DQBW-ONLY-NEXT:    vpblendd {{.*#+}} ymm4 = ymm12[0,1,2,3],ymm9[4,5,6,7]
+; AVX512DQBW-ONLY-NEXT:    vinserti64x4 $0, %ymm4, %zmm27, %zmm4
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm23, %zmm14 {%k1}
+; AVX512DQBW-ONLY-NEXT:    vpermi2d %zmm28, %zmm2, %zmm3
+; AVX512DQBW-ONLY-NEXT:    vpblendd {{.*#+}} ymm3 = ymm6[0,1,2,3],ymm3[4,5,6,7]
+; AVX512DQBW-ONLY-NEXT:    vinserti64x4 $0, %ymm3, %zmm14, %zmm3
+; AVX512DQBW-ONLY-NEXT:    vbroadcasti128 {{.*#+}} ymm6 = [4,12,20,28,4,12,20,28]
+; AVX512DQBW-ONLY-NEXT:    # ymm6 = mem[0,1,0,1]
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm0, %zmm8
+; AVX512DQBW-ONLY-NEXT:    vpermt2d %zmm5, %zmm6, %zmm8
+; AVX512DQBW-ONLY-NEXT:    vbroadcasti128 {{.*#+}} ymm9 = [5,13,21,29,5,13,21,29]
+; AVX512DQBW-ONLY-NEXT:    # ymm9 = mem[0,1,0,1]
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm0, %zmm11
+; AVX512DQBW-ONLY-NEXT:    vpermt2d %zmm5, %zmm9, %zmm11
+; AVX512DQBW-ONLY-NEXT:    vbroadcasti128 {{.*#+}} ymm12 = [6,14,22,30,6,14,22,30]
+; AVX512DQBW-ONLY-NEXT:    # ymm12 = mem[0,1,0,1]
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm0, %zmm14
+; AVX512DQBW-ONLY-NEXT:    vpermt2d %zmm5, %zmm12, %zmm14
+; AVX512DQBW-ONLY-NEXT:    vbroadcasti64x2 {{.*#+}} ymm19 = [7,15,23,31,7,15,23,31]
+; AVX512DQBW-ONLY-NEXT:    # ymm19 = mem[0,1,0,1]
+; AVX512DQBW-ONLY-NEXT:    vpermt2d %zmm5, %zmm19, %zmm0
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm0, %zmm5
+; AVX512DQBW-ONLY-NEXT:    vpermi2d %zmm28, %zmm2, %zmm6
+; AVX512DQBW-ONLY-NEXT:    vpermi2d %zmm28, %zmm2, %zmm9
+; AVX512DQBW-ONLY-NEXT:    vpermi2d %zmm28, %zmm2, %zmm12
+; AVX512DQBW-ONLY-NEXT:    vpermt2d %zmm28, %zmm19, %zmm2
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm2, %zmm0
+; AVX512DQBW-ONLY-NEXT:    vpblendd $15, (%rsp), %ymm8, %ymm8 # 32-byte Folded Reload
+; AVX512DQBW-ONLY-NEXT:    # ymm8 = mem[0,1,2,3],ymm8[4,5,6,7]
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm2, %zmm31 {%k1}
+; AVX512DQBW-ONLY-NEXT:    vinserti64x4 $0, %ymm8, %zmm31, %zmm8
+; AVX512DQBW-ONLY-NEXT:    vpblendd {{.*#+}} ymm6 = ymm10[0,1,2,3],ymm6[4,5,6,7]
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm16, %zmm21 {%k1}
+; AVX512DQBW-ONLY-NEXT:    vinserti64x4 $0, %ymm6, %zmm21, %zmm6
+; AVX512DQBW-ONLY-NEXT:    vpblendd $15, {{[-0-9]+}}(%r{{[sb]}}p), %ymm11, %ymm10 # 32-byte Folded Reload
+; AVX512DQBW-ONLY-NEXT:    # ymm10 = mem[0,1,2,3],ymm11[4,5,6,7]
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm2, %zmm30 {%k1}
+; AVX512DQBW-ONLY-NEXT:    vinserti64x4 $0, %ymm10, %zmm30, %zmm10
+; AVX512DQBW-ONLY-NEXT:    vpblendd $15, {{[-0-9]+}}(%r{{[sb]}}p), %ymm9, %ymm9 # 32-byte Folded Reload
+; AVX512DQBW-ONLY-NEXT:    # ymm9 = mem[0,1,2,3],ymm9[4,5,6,7]
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm20, %zmm7 {%k1}
+; AVX512DQBW-ONLY-NEXT:    vinserti64x4 $0, %ymm9, %zmm7, %zmm7
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm2, %zmm22 {%k1}
+; AVX512DQBW-ONLY-NEXT:    vpblendd $15, {{[-0-9]+}}(%r{{[sb]}}p), %ymm14, %ymm9 # 32-byte Folded Reload
+; AVX512DQBW-ONLY-NEXT:    # ymm9 = mem[0,1,2,3],ymm14[4,5,6,7]
+; AVX512DQBW-ONLY-NEXT:    vinserti64x4 $0, %ymm9, %zmm22, %zmm9
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm17, %zmm15 {%k1}
+; AVX512DQBW-ONLY-NEXT:    vpblendd {{.*#+}} ymm2 = ymm13[0,1,2,3],ymm12[4,5,6,7]
+; AVX512DQBW-ONLY-NEXT:    vinserti64x4 $0, %ymm2, %zmm15, %zmm2
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm11 # 64-byte Reload
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm11, %zmm26 {%k1}
+; AVX512DQBW-ONLY-NEXT:    vpblendd $15, {{[-0-9]+}}(%r{{[sb]}}p), %ymm5, %ymm11 # 32-byte Folded Reload
+; AVX512DQBW-ONLY-NEXT:    # ymm11 = mem[0,1,2,3],ymm5[4,5,6,7]
+; AVX512DQBW-ONLY-NEXT:    vinserti64x4 $0, %ymm11, %zmm26, %zmm5
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm25, %zmm29 {%k1}
+; AVX512DQBW-ONLY-NEXT:    vpblendd $15, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm11 # 32-byte Folded Reload
+; AVX512DQBW-ONLY-NEXT:    # ymm11 = mem[0,1,2,3],ymm0[4,5,6,7]
+; AVX512DQBW-ONLY-NEXT:    vinserti64x4 $0, %ymm11, %zmm29, %zmm11
+; AVX512DQBW-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512DQBW-ONLY-NEXT:    vmovaps %zmm0, 64(%rsi)
+; AVX512DQBW-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512DQBW-ONLY-NEXT:    vmovaps %zmm0, (%rsi)
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm18, 64(%rdx)
+; AVX512DQBW-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512DQBW-ONLY-NEXT:    vmovaps %zmm0, (%rdx)
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm1, 64(%rcx)
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm24, (%rcx)
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm3, 64(%r8)
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm4, (%r8)
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm6, 64(%r9)
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm8, (%r9)
+; AVX512DQBW-ONLY-NEXT:    movq {{[0-9]+}}(%rsp), %rax
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm7, 64(%rax)
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm10, (%rax)
+; AVX512DQBW-ONLY-NEXT:    movq {{[0-9]+}}(%rsp), %rax
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm2, 64(%rax)
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm9, (%rax)
+; AVX512DQBW-ONLY-NEXT:    movq {{[0-9]+}}(%rsp), %rax
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm11, 64(%rax)
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm5, (%rax)
+; AVX512DQBW-ONLY-NEXT:    addq $1096, %rsp # imm = 0x448
+; AVX512DQBW-ONLY-NEXT:    vzeroupper
+; AVX512DQBW-ONLY-NEXT:    retq
   %wide.vec = load <256 x i32>, ptr %in.vec, align 64
   %strided.vec0 = shufflevector <256 x i32> %wide.vec, <256 x i32> poison, <32 x i32> <i32 0, i32 8, i32 16, i32 24, i32 32, i32 40, i32 48, i32 56, i32 64, i32 72, i32 80, i32 88, i32 96, i32 104, i32 112, i32 120, i32 128, i32 136, i32 144, i32 152, i32 160, i32 168, i32 176, i32 184, i32 192, i32 200, i32 208, i32 216, i32 224, i32 232, i32 240, i32 248>
   %strided.vec1 = shufflevector <256 x i32> %wide.vec, <256 x i32> poison, <32 x i32> <i32 1, i32 9, i32 17, i32 25, i32 33, i32 41, i32 49, i32 57, i32 65, i32 73, i32 81, i32 89, i32 97, i32 105, i32 113, i32 121, i32 129, i32 137, i32 145, i32 153, i32 161, i32 169, i32 177, i32 185, i32 193, i32 201, i32 209, i32 217, i32 225, i32 233, i32 241, i32 249>
@@ -11096,8 +9938,17 @@ define void @load_i32_stride8_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX2-FAST: {{.*}}
 ; AVX2-FAST-PERLANE: {{.*}}
 ; AVX2-SLOW: {{.*}}
+; AVX512BW-FAST: {{.*}}
+; AVX512BW-ONLY-FAST: {{.*}}
+; AVX512BW-ONLY-SLOW: {{.*}}
 ; AVX512BW-SLOW: {{.*}}
+; AVX512DQ-FAST: {{.*}}
+; AVX512DQ-SLOW: {{.*}}
+; AVX512DQBW-FAST: {{.*}}
+; AVX512DQBW-SLOW: {{.*}}
 ; AVX512F-FAST: {{.*}}
+; AVX512F-ONLY-FAST: {{.*}}
+; AVX512F-ONLY-SLOW: {{.*}}
 ; AVX512F-SLOW: {{.*}}
 ; FALLBACK0: {{.*}}
 ; FALLBACK1: {{.*}}

diff  --git a/llvm/test/CodeGen/X86/vector-interleaved-load-i64-stride-2.ll b/llvm/test/CodeGen/X86/vector-interleaved-load-i64-stride-2.ll
index eb2a1cc227f43ab..972458ad55fe1dc 100644
--- a/llvm/test/CodeGen/X86/vector-interleaved-load-i64-stride-2.ll
+++ b/llvm/test/CodeGen/X86/vector-interleaved-load-i64-stride-2.ll
@@ -4,14 +4,14 @@
 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx2 | FileCheck %s --check-prefixes=AVX,AVX1,AVX2,AVX2-ONLY,AVX2-SLOW,FALLBACK2
 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx2,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX1,AVX2,AVX2-ONLY,AVX2-FAST,FALLBACK3
 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx2,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX1,AVX2,AVX2-ONLY,AVX2-FAST-PERLANE,FALLBACK4
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512-SLOW,AVX512F-SLOW,AVX512F-ONLY-SLOW,FALLBACK5
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512-FAST,AVX512F-FAST,AVX512F-ONLY-FAST,FALLBACK6
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512-SLOW,AVX512F-SLOW,AVX512DQ-SLOW,FALLBACK7
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512-FAST,AVX512F-FAST,AVX512DQ-FAST,FALLBACK8
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512bw | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512-SLOW,AVX512BW-SLOW,AVX512BW-ONLY-SLOW,FALLBACK9
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512bw,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512-FAST,AVX512BW-FAST,AVX512BW-ONLY-FAST,FALLBACK10
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq,+avx512bw | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512-SLOW,AVX512BW-SLOW,AVX512DQBW-SLOW,FALLBACK11
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq,+avx512bw,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512-FAST,AVX512BW-FAST,AVX512DQBW-FAST,FALLBACK12
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512F-ONLY,AVX512-SLOW,AVX512F-SLOW,AVX512F-ONLY-SLOW,FALLBACK5
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512F-ONLY,AVX512-FAST,AVX512F-FAST,AVX512F-ONLY-FAST,FALLBACK6
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512DQ-ONLY,AVX512-SLOW,AVX512F-SLOW,AVX512DQ-SLOW,FALLBACK7
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512DQ-ONLY,AVX512-FAST,AVX512F-FAST,AVX512DQ-FAST,FALLBACK8
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512bw | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512BW-ONLY,AVX512-SLOW,AVX512BW-SLOW,AVX512BW-ONLY-SLOW,FALLBACK9
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512bw,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512BW-ONLY,AVX512-FAST,AVX512BW-FAST,AVX512BW-ONLY-FAST,FALLBACK10
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq,+avx512bw | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512DQBW-ONLY,AVX512-SLOW,AVX512BW-SLOW,AVX512DQBW-SLOW,FALLBACK11
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq,+avx512bw,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512DQBW-ONLY,AVX512-FAST,AVX512BW-FAST,AVX512DQBW-FAST,FALLBACK12
 
 ; These patterns are produced by LoopVectorizer for interleaved loads.
 
@@ -1429,15 +1429,19 @@ define void @load_i64_stride2_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1) no
 ; AVX2-SLOW: {{.*}}
 ; AVX512BW: {{.*}}
 ; AVX512BW-FAST: {{.*}}
+; AVX512BW-ONLY: {{.*}}
 ; AVX512BW-ONLY-FAST: {{.*}}
 ; AVX512BW-ONLY-SLOW: {{.*}}
 ; AVX512BW-SLOW: {{.*}}
 ; AVX512DQ-FAST: {{.*}}
+; AVX512DQ-ONLY: {{.*}}
 ; AVX512DQ-SLOW: {{.*}}
 ; AVX512DQBW-FAST: {{.*}}
+; AVX512DQBW-ONLY: {{.*}}
 ; AVX512DQBW-SLOW: {{.*}}
 ; AVX512F: {{.*}}
 ; AVX512F-FAST: {{.*}}
+; AVX512F-ONLY: {{.*}}
 ; AVX512F-ONLY-FAST: {{.*}}
 ; AVX512F-ONLY-SLOW: {{.*}}
 ; AVX512F-SLOW: {{.*}}

diff  --git a/llvm/test/CodeGen/X86/vector-interleaved-load-i64-stride-3.ll b/llvm/test/CodeGen/X86/vector-interleaved-load-i64-stride-3.ll
index b83571698ea2e65..5ec2453c3564f24 100644
--- a/llvm/test/CodeGen/X86/vector-interleaved-load-i64-stride-3.ll
+++ b/llvm/test/CodeGen/X86/vector-interleaved-load-i64-stride-3.ll
@@ -4,14 +4,14 @@
 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx2 | FileCheck %s --check-prefixes=AVX,AVX1,AVX2,AVX2-ONLY,AVX2-SLOW,FALLBACK2
 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx2,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX1,AVX2,AVX2-ONLY,AVX2-FAST,FALLBACK3
 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx2,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX1,AVX2,AVX2-ONLY,AVX2-FAST-PERLANE,FALLBACK4
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512-SLOW,AVX512F-SLOW,AVX512F-ONLY-SLOW,FALLBACK5
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512-FAST,AVX512F-FAST,AVX512F-ONLY-FAST,FALLBACK6
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512-SLOW,AVX512F-SLOW,AVX512DQ-SLOW,FALLBACK7
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512-FAST,AVX512F-FAST,AVX512DQ-FAST,FALLBACK8
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512bw | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512-SLOW,AVX512BW-SLOW,AVX512BW-ONLY-SLOW,FALLBACK9
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512bw,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512-FAST,AVX512BW-FAST,AVX512BW-ONLY-FAST,FALLBACK10
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq,+avx512bw | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512-SLOW,AVX512BW-SLOW,AVX512DQBW-SLOW,FALLBACK11
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq,+avx512bw,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512-FAST,AVX512BW-FAST,AVX512DQBW-FAST,FALLBACK12
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512F-ONLY,AVX512-SLOW,AVX512F-SLOW,AVX512F-ONLY-SLOW,FALLBACK5
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512F-ONLY,AVX512-FAST,AVX512F-FAST,AVX512F-ONLY-FAST,FALLBACK6
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512DQ-ONLY,AVX512-SLOW,AVX512F-SLOW,AVX512DQ-SLOW,FALLBACK7
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512DQ-ONLY,AVX512-FAST,AVX512F-FAST,AVX512DQ-FAST,FALLBACK8
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512bw | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512BW-ONLY,AVX512-SLOW,AVX512BW-SLOW,AVX512BW-ONLY-SLOW,FALLBACK9
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512bw,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512BW-ONLY,AVX512-FAST,AVX512BW-FAST,AVX512BW-ONLY-FAST,FALLBACK10
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq,+avx512bw | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512DQBW-ONLY,AVX512-SLOW,AVX512BW-SLOW,AVX512DQBW-SLOW,FALLBACK11
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq,+avx512bw,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512DQBW-ONLY,AVX512-FAST,AVX512BW-FAST,AVX512DQBW-FAST,FALLBACK12
 
 ; These patterns are produced by LoopVectorizer for interleaved loads.
 
@@ -2546,15 +2546,19 @@ define void @load_i64_stride3_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX2-SLOW: {{.*}}
 ; AVX512BW: {{.*}}
 ; AVX512BW-FAST: {{.*}}
+; AVX512BW-ONLY: {{.*}}
 ; AVX512BW-ONLY-FAST: {{.*}}
 ; AVX512BW-ONLY-SLOW: {{.*}}
 ; AVX512BW-SLOW: {{.*}}
 ; AVX512DQ-FAST: {{.*}}
+; AVX512DQ-ONLY: {{.*}}
 ; AVX512DQ-SLOW: {{.*}}
 ; AVX512DQBW-FAST: {{.*}}
+; AVX512DQBW-ONLY: {{.*}}
 ; AVX512DQBW-SLOW: {{.*}}
 ; AVX512F: {{.*}}
 ; AVX512F-FAST: {{.*}}
+; AVX512F-ONLY: {{.*}}
 ; AVX512F-ONLY-FAST: {{.*}}
 ; AVX512F-ONLY-SLOW: {{.*}}
 ; AVX512F-SLOW: {{.*}}

diff  --git a/llvm/test/CodeGen/X86/vector-interleaved-load-i64-stride-4.ll b/llvm/test/CodeGen/X86/vector-interleaved-load-i64-stride-4.ll
index e2bf6ef15fdea78..faed36dfe51be89 100644
--- a/llvm/test/CodeGen/X86/vector-interleaved-load-i64-stride-4.ll
+++ b/llvm/test/CodeGen/X86/vector-interleaved-load-i64-stride-4.ll
@@ -4,14 +4,14 @@
 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx2 | FileCheck %s --check-prefixes=AVX,AVX1,AVX2,AVX2-ONLY,AVX2-SLOW,FALLBACK2
 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx2,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX1,AVX2,AVX2-ONLY,AVX2-FAST,FALLBACK3
 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx2,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX1,AVX2,AVX2-ONLY,AVX2-FAST-PERLANE,FALLBACK4
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512-SLOW,AVX512F-SLOW,AVX512F-ONLY-SLOW,FALLBACK5
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512-FAST,AVX512F-FAST,AVX512F-ONLY-FAST,FALLBACK6
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512-SLOW,AVX512F-SLOW,AVX512DQ-SLOW,FALLBACK7
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512-FAST,AVX512F-FAST,AVX512DQ-FAST,FALLBACK8
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512bw | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512-SLOW,AVX512BW-SLOW,AVX512BW-ONLY-SLOW,FALLBACK9
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512bw,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512-FAST,AVX512BW-FAST,AVX512BW-ONLY-FAST,FALLBACK10
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq,+avx512bw | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512-SLOW,AVX512BW-SLOW,AVX512DQBW-SLOW,FALLBACK11
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq,+avx512bw,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512-FAST,AVX512BW-FAST,AVX512DQBW-FAST,FALLBACK12
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512F-ONLY,AVX512-SLOW,AVX512F-SLOW,AVX512F-ONLY-SLOW,FALLBACK5
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512F-ONLY,AVX512-FAST,AVX512F-FAST,AVX512F-ONLY-FAST,FALLBACK6
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512DQ-ONLY,AVX512-SLOW,AVX512F-SLOW,AVX512DQ-SLOW,FALLBACK7
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512DQ-ONLY,AVX512-FAST,AVX512F-FAST,AVX512DQ-FAST,FALLBACK8
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512bw | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512BW-ONLY,AVX512-SLOW,AVX512BW-SLOW,AVX512BW-ONLY-SLOW,FALLBACK9
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512bw,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512BW-ONLY,AVX512-FAST,AVX512BW-FAST,AVX512BW-ONLY-FAST,FALLBACK10
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq,+avx512bw | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512DQBW-ONLY,AVX512-SLOW,AVX512BW-SLOW,AVX512DQBW-SLOW,FALLBACK11
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq,+avx512bw,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512DQBW-ONLY,AVX512-FAST,AVX512BW-FAST,AVX512DQBW-FAST,FALLBACK12
 
 ; These patterns are produced by LoopVectorizer for interleaved loads.
 
@@ -4146,15 +4146,19 @@ define void @load_i64_stride4_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX2-SLOW: {{.*}}
 ; AVX512BW: {{.*}}
 ; AVX512BW-FAST: {{.*}}
+; AVX512BW-ONLY: {{.*}}
 ; AVX512BW-ONLY-FAST: {{.*}}
 ; AVX512BW-ONLY-SLOW: {{.*}}
 ; AVX512BW-SLOW: {{.*}}
 ; AVX512DQ-FAST: {{.*}}
+; AVX512DQ-ONLY: {{.*}}
 ; AVX512DQ-SLOW: {{.*}}
 ; AVX512DQBW-FAST: {{.*}}
+; AVX512DQBW-ONLY: {{.*}}
 ; AVX512DQBW-SLOW: {{.*}}
 ; AVX512F: {{.*}}
 ; AVX512F-FAST: {{.*}}
+; AVX512F-ONLY: {{.*}}
 ; AVX512F-ONLY-FAST: {{.*}}
 ; AVX512F-ONLY-SLOW: {{.*}}
 ; AVX512F-SLOW: {{.*}}

diff  --git a/llvm/test/CodeGen/X86/vector-interleaved-load-i64-stride-5.ll b/llvm/test/CodeGen/X86/vector-interleaved-load-i64-stride-5.ll
index 08763079d0c68d3..4a2c38168bdf420 100644
--- a/llvm/test/CodeGen/X86/vector-interleaved-load-i64-stride-5.ll
+++ b/llvm/test/CodeGen/X86/vector-interleaved-load-i64-stride-5.ll
@@ -4,14 +4,14 @@
 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx2 | FileCheck %s --check-prefixes=AVX,AVX1,AVX2,AVX2-ONLY,AVX2-SLOW,FALLBACK2
 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx2,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX1,AVX2,AVX2-ONLY,AVX2-FAST,FALLBACK3
 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx2,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX1,AVX2,AVX2-ONLY,AVX2-FAST-PERLANE,FALLBACK4
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512-SLOW,AVX512F-SLOW,AVX512F-ONLY-SLOW,FALLBACK5
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512-FAST,AVX512F-FAST,AVX512F-ONLY-FAST,FALLBACK6
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512-SLOW,AVX512F-SLOW,AVX512DQ-SLOW,FALLBACK7
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512-FAST,AVX512F-FAST,AVX512DQ-FAST,FALLBACK8
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512bw | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512-SLOW,AVX512BW-SLOW,AVX512BW-ONLY-SLOW,FALLBACK9
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512bw,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512-FAST,AVX512BW-FAST,AVX512BW-ONLY-FAST,FALLBACK10
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq,+avx512bw | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512-SLOW,AVX512BW-SLOW,AVX512DQBW-SLOW,FALLBACK11
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq,+avx512bw,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512-FAST,AVX512BW-FAST,AVX512DQBW-FAST,FALLBACK12
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512F-ONLY,AVX512-SLOW,AVX512F-SLOW,AVX512F-ONLY-SLOW,FALLBACK5
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512F-ONLY,AVX512-FAST,AVX512F-FAST,AVX512F-ONLY-FAST,FALLBACK6
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512DQ-ONLY,AVX512-SLOW,AVX512F-SLOW,AVX512DQ-SLOW,FALLBACK7
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512DQ-ONLY,AVX512-FAST,AVX512F-FAST,AVX512DQ-FAST,FALLBACK8
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512bw | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512BW-ONLY,AVX512-SLOW,AVX512BW-SLOW,AVX512BW-ONLY-SLOW,FALLBACK9
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512bw,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512BW-ONLY,AVX512-FAST,AVX512BW-FAST,AVX512BW-ONLY-FAST,FALLBACK10
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq,+avx512bw | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512DQBW-ONLY,AVX512-SLOW,AVX512BW-SLOW,AVX512DQBW-SLOW,FALLBACK11
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq,+avx512bw,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512DQBW-ONLY,AVX512-FAST,AVX512BW-FAST,AVX512DQBW-FAST,FALLBACK12
 
 ; These patterns are produced by LoopVectorizer for interleaved loads.
 
@@ -6440,14 +6440,18 @@ define void @load_i64_stride5_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX2-FAST-PERLANE: {{.*}}
 ; AVX2-SLOW: {{.*}}
 ; AVX512BW-FAST: {{.*}}
+; AVX512BW-ONLY: {{.*}}
 ; AVX512BW-ONLY-FAST: {{.*}}
 ; AVX512BW-ONLY-SLOW: {{.*}}
 ; AVX512BW-SLOW: {{.*}}
 ; AVX512DQ-FAST: {{.*}}
+; AVX512DQ-ONLY: {{.*}}
 ; AVX512DQ-SLOW: {{.*}}
 ; AVX512DQBW-FAST: {{.*}}
+; AVX512DQBW-ONLY: {{.*}}
 ; AVX512DQBW-SLOW: {{.*}}
 ; AVX512F-FAST: {{.*}}
+; AVX512F-ONLY: {{.*}}
 ; AVX512F-ONLY-FAST: {{.*}}
 ; AVX512F-ONLY-SLOW: {{.*}}
 ; AVX512F-SLOW: {{.*}}

diff  --git a/llvm/test/CodeGen/X86/vector-interleaved-load-i64-stride-6.ll b/llvm/test/CodeGen/X86/vector-interleaved-load-i64-stride-6.ll
index 6e00c62f404e5a7..288b7219260c9a9 100644
--- a/llvm/test/CodeGen/X86/vector-interleaved-load-i64-stride-6.ll
+++ b/llvm/test/CodeGen/X86/vector-interleaved-load-i64-stride-6.ll
@@ -4,14 +4,14 @@
 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx2 | FileCheck %s --check-prefixes=AVX,AVX1,AVX2,AVX2-ONLY,AVX2-SLOW,FALLBACK2
 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx2,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX1,AVX2,AVX2-ONLY,AVX2-FAST,FALLBACK3
 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx2,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX1,AVX2,AVX2-ONLY,AVX2-FAST-PERLANE,FALLBACK4
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512-SLOW,AVX512F-SLOW,AVX512F-ONLY-SLOW,FALLBACK5
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512-FAST,AVX512F-FAST,AVX512F-ONLY-FAST,FALLBACK6
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512-SLOW,AVX512F-SLOW,AVX512DQ-SLOW,FALLBACK7
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512-FAST,AVX512F-FAST,AVX512DQ-FAST,FALLBACK8
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512bw | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512-SLOW,AVX512BW-SLOW,AVX512BW-ONLY-SLOW,FALLBACK9
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512bw,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512-FAST,AVX512BW-FAST,AVX512BW-ONLY-FAST,FALLBACK10
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq,+avx512bw | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512-SLOW,AVX512BW-SLOW,AVX512DQBW-SLOW,FALLBACK11
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq,+avx512bw,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512-FAST,AVX512BW-FAST,AVX512DQBW-FAST,FALLBACK12
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512F-ONLY,AVX512-SLOW,AVX512F-SLOW,AVX512F-ONLY-SLOW,FALLBACK5
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512F-ONLY,AVX512-FAST,AVX512F-FAST,AVX512F-ONLY-FAST,FALLBACK6
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512DQ-ONLY,AVX512-SLOW,AVX512F-SLOW,AVX512DQ-SLOW,FALLBACK7
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512DQ-ONLY,AVX512-FAST,AVX512F-FAST,AVX512DQ-FAST,FALLBACK8
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512bw | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512BW-ONLY,AVX512-SLOW,AVX512BW-SLOW,AVX512BW-ONLY-SLOW,FALLBACK9
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512bw,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512BW-ONLY,AVX512-FAST,AVX512BW-FAST,AVX512BW-ONLY-FAST,FALLBACK10
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq,+avx512bw | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512DQBW-ONLY,AVX512-SLOW,AVX512BW-SLOW,AVX512DQBW-SLOW,FALLBACK11
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq,+avx512bw,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512DQBW-ONLY,AVX512-FAST,AVX512BW-FAST,AVX512DQBW-FAST,FALLBACK12
 
 ; These patterns are produced by LoopVectorizer for interleaved loads.
 
@@ -8648,14 +8648,18 @@ define void @load_i64_stride6_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX2-SLOW: {{.*}}
 ; AVX512: {{.*}}
 ; AVX512BW-FAST: {{.*}}
+; AVX512BW-ONLY: {{.*}}
 ; AVX512BW-ONLY-FAST: {{.*}}
 ; AVX512BW-ONLY-SLOW: {{.*}}
 ; AVX512BW-SLOW: {{.*}}
 ; AVX512DQ-FAST: {{.*}}
+; AVX512DQ-ONLY: {{.*}}
 ; AVX512DQ-SLOW: {{.*}}
 ; AVX512DQBW-FAST: {{.*}}
+; AVX512DQBW-ONLY: {{.*}}
 ; AVX512DQBW-SLOW: {{.*}}
 ; AVX512F-FAST: {{.*}}
+; AVX512F-ONLY: {{.*}}
 ; AVX512F-ONLY-FAST: {{.*}}
 ; AVX512F-ONLY-SLOW: {{.*}}
 ; AVX512F-SLOW: {{.*}}

diff  --git a/llvm/test/CodeGen/X86/vector-interleaved-load-i64-stride-7.ll b/llvm/test/CodeGen/X86/vector-interleaved-load-i64-stride-7.ll
index 7c777fb5a94b1ab..831766ececbaca1 100644
--- a/llvm/test/CodeGen/X86/vector-interleaved-load-i64-stride-7.ll
+++ b/llvm/test/CodeGen/X86/vector-interleaved-load-i64-stride-7.ll
@@ -4,14 +4,14 @@
 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx2 | FileCheck %s --check-prefixes=AVX,AVX1,AVX2,AVX2-ONLY,AVX2-SLOW,FALLBACK2
 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx2,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX1,AVX2,AVX2-ONLY,AVX2-FAST,FALLBACK3
 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx2,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX1,AVX2,AVX2-ONLY,AVX2-FAST-PERLANE,FALLBACK4
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512-SLOW,AVX512F-SLOW,AVX512F-ONLY-SLOW,FALLBACK5
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512-FAST,AVX512F-FAST,AVX512F-ONLY-FAST,FALLBACK6
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512-SLOW,AVX512F-SLOW,AVX512DQ-SLOW,FALLBACK7
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512-FAST,AVX512F-FAST,AVX512DQ-FAST,FALLBACK8
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512bw | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512-SLOW,AVX512BW-SLOW,AVX512BW-ONLY-SLOW,FALLBACK9
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512bw,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512-FAST,AVX512BW-FAST,AVX512BW-ONLY-FAST,FALLBACK10
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq,+avx512bw | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512-SLOW,AVX512BW-SLOW,AVX512DQBW-SLOW,FALLBACK11
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq,+avx512bw,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512-FAST,AVX512BW-FAST,AVX512DQBW-FAST,FALLBACK12
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512F-ONLY,AVX512-SLOW,AVX512F-SLOW,AVX512F-ONLY-SLOW,FALLBACK5
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512F-ONLY,AVX512-FAST,AVX512F-FAST,AVX512F-ONLY-FAST,FALLBACK6
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512DQ-ONLY,AVX512-SLOW,AVX512F-SLOW,AVX512DQ-SLOW,FALLBACK7
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512DQ-ONLY,AVX512-FAST,AVX512F-FAST,AVX512DQ-FAST,FALLBACK8
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512bw | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512BW-ONLY,AVX512-SLOW,AVX512BW-SLOW,AVX512BW-ONLY-SLOW,FALLBACK9
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512bw,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512BW-ONLY,AVX512-FAST,AVX512BW-FAST,AVX512BW-ONLY-FAST,FALLBACK10
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq,+avx512bw | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512DQBW-ONLY,AVX512-SLOW,AVX512BW-SLOW,AVX512DQBW-SLOW,FALLBACK11
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq,+avx512bw,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512DQBW-ONLY,AVX512-FAST,AVX512BW-FAST,AVX512DQBW-FAST,FALLBACK12
 
 ; These patterns are produced by LoopVectorizer for interleaved loads.
 
@@ -3678,3569 +3678,1787 @@ define void @load_i64_stride7_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX2-ONLY-NEXT:    vzeroupper
 ; AVX2-ONLY-NEXT:    retq
 ;
-; AVX512F-ONLY-SLOW-LABEL: load_i64_stride7_vf32:
-; AVX512F-ONLY-SLOW:       # %bb.0:
-; AVX512F-ONLY-SLOW-NEXT:    subq $2216, %rsp # imm = 0x8A8
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 1664(%rdi), %zmm21
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 1600(%rdi), %zmm3
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 1216(%rdi), %zmm1
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 1152(%rdi), %zmm6
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 1088(%rdi), %zmm11
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm11, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vmovaps 1024(%rdi), %zmm0
-; AVX512F-ONLY-SLOW-NEXT:    vmovups %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 768(%rdi), %zmm2
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 704(%rdi), %zmm9
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 640(%rdi), %zmm15
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 576(%rdi), %zmm10
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 320(%rdi), %zmm0
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 256(%rdi), %zmm26
-; AVX512F-ONLY-SLOW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm7 = [10,3,10,3,10,3,10,3]
-; AVX512F-ONLY-SLOW-NEXT:    # zmm7 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm26, %zmm8
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm0, %zmm7, %zmm8
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm9, %zmm8
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm2, %zmm7, %zmm8
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm6, %zmm8
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm1, %zmm7, %zmm8
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vpermi2q %zmm21, %zmm3, %zmm7
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm7 = [11,4,11,4,11,4,11,4]
-; AVX512F-ONLY-SLOW-NEXT:    # zmm7 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm6, %zmm8
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm1, %zmm7, %zmm8
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm7, %zmm13
-; AVX512F-ONLY-SLOW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm7 = [13,6,13,6,13,6,13,6]
-; AVX512F-ONLY-SLOW-NEXT:    # zmm7 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm9, %zmm8
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm2, %zmm13, %zmm8
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm26, %zmm8
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm0, %zmm13, %zmm8
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm20 = [12,5,12,5,12,5,12,5]
-; AVX512F-ONLY-SLOW-NEXT:    # zmm20 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm9, %zmm18
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm2, %zmm20, %zmm18
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm26, %zmm23
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm0, %zmm20, %zmm23
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm9, %zmm17
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm2, %zmm7, %zmm17
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm26, %zmm25
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm0, %zmm7, %zmm25
-; AVX512F-ONLY-SLOW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm28 = [0,7,14,0,0,7,14,0]
-; AVX512F-ONLY-SLOW-NEXT:    # zmm28 = mem[0,1,2,3,0,1,2,3]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm9, %zmm8
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm2, %zmm28, %zmm8
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm26, %zmm16
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm0, %zmm28, %zmm16
-; AVX512F-ONLY-SLOW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm30 = [9,0,7,0,9,0,7,0]
-; AVX512F-ONLY-SLOW-NEXT:    # zmm30 = mem[0,1,2,3,0,1,2,3]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm2, %zmm14
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm9, %zmm30, %zmm14
-; AVX512F-ONLY-SLOW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm4 = [2,9,2,9,2,9,2,9]
-; AVX512F-ONLY-SLOW-NEXT:    # zmm4 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm2, %zmm4, %zmm9
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm0, %zmm8
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm26, %zmm30, %zmm8
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm0, %zmm4, %zmm26
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm7, %zmm2
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm11, %zmm7, %zmm0
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm10, %zmm11
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm10, %zmm0
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm15, %zmm10
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm15, %zmm7, %zmm0
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 192(%rdi), %zmm24
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 128(%rdi), %zmm12
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm12, %zmm0
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm24, %zmm7, %zmm0
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm3, %zmm5
-; AVX512F-ONLY-SLOW-NEXT:    vpermi2q %zmm21, %zmm3, %zmm13
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 1536(%rdi), %zmm15
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 1472(%rdi), %zmm0
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm15, %zmm7, %zmm0
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm6, %zmm29
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm1, %zmm20, %zmm29
-; AVX512F-ONLY-SLOW-NEXT:    vpermi2q %zmm21, %zmm3, %zmm20
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm6, %zmm31
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm1, %zmm7, %zmm31
-; AVX512F-ONLY-SLOW-NEXT:    vpermi2q %zmm21, %zmm3, %zmm2
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm6, %zmm22
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm1, %zmm28, %zmm22
-; AVX512F-ONLY-SLOW-NEXT:    vpermi2q %zmm21, %zmm3, %zmm28
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm1, %zmm3
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm6, %zmm30, %zmm3
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm6, %zmm0
-; AVX512F-ONLY-SLOW-NEXT:    vpermi2q %zmm5, %zmm21, %zmm30
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm21, %zmm4, %zmm5
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm5, %zmm21
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm1, %zmm4, %zmm0
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 384(%rdi), %zmm1
-; AVX512F-ONLY-SLOW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm4 = [0,5,6,9,0,5,6,9]
-; AVX512F-ONLY-SLOW-NEXT:    # zmm4 = mem[0,1,2,3,0,1,2,3]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm1, %zmm4, %zmm5
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 832(%rdi), %zmm5
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm6 # 64-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm5, %zmm4, %zmm6
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 1280(%rdi), %zmm6
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm7 # 64-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm6, %zmm4, %zmm7
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 1728(%rdi), %zmm7
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm27 # 64-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm7, %zmm4, %zmm27
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm27, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm4 = [0,5,6,10,0,5,6,10]
-; AVX512F-ONLY-SLOW-NEXT:    # zmm4 = mem[0,1,2,3,0,1,2,3]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm19 # 64-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm6, %zmm4, %zmm19
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm19, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm19 # 64-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm5, %zmm4, %zmm19
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm19, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm19 # 64-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm1, %zmm4, %zmm19
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm19, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm7, %zmm4, %zmm13
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm13, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm4 = [0,5,6,11,0,5,6,11]
-; AVX512F-ONLY-SLOW-NEXT:    # zmm4 = mem[0,1,2,3,0,1,2,3]
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm6, %zmm4, %zmm29
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm5, %zmm4, %zmm18
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm18, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm1, %zmm4, %zmm23
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm23, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm7, %zmm4, %zmm20
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm20, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm4 = [0,5,6,12,0,5,6,12]
-; AVX512F-ONLY-SLOW-NEXT:    # zmm4 = mem[0,1,2,3,0,1,2,3]
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm6, %zmm4, %zmm31
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm5, %zmm4, %zmm17
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm17, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm1, %zmm4, %zmm25
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm25, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm7, %zmm4, %zmm2
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm4 = [4,5,6,13,4,5,6,13]
-; AVX512F-ONLY-SLOW-NEXT:    # zmm4 = mem[0,1,2,3,0,1,2,3]
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm6, %zmm4, %zmm22
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm19 # 64-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm5, %zmm4, %zmm19
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm1, %zmm4, %zmm16
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm16, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm7, %zmm4, %zmm28
-; AVX512F-ONLY-SLOW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm4 = [4,5,6,14,4,5,6,14]
-; AVX512F-ONLY-SLOW-NEXT:    # zmm4 = mem[0,1,2,3,0,1,2,3]
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm6, %zmm4, %zmm3
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm5, %zmm4, %zmm14
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm14, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm1, %zmm4, %zmm8
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm7, %zmm4, %zmm30
-; AVX512F-ONLY-SLOW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm4 = [4,5,8,15,4,5,8,15]
-; AVX512F-ONLY-SLOW-NEXT:    # zmm4 = mem[0,1,2,3,0,1,2,3]
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm5, %zmm4, %zmm9
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm1, %zmm4, %zmm26
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm26, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm7, %zmm4, %zmm21
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm21, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm6, %zmm4, %zmm0
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    movb $24, %al
-; AVX512F-ONLY-SLOW-NEXT:    kmovw %eax, %k1
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 512(%rdi), %zmm5
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 448(%rdi), %zmm7
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 {{.*#+}} ymm21 = <0,7,14,u>
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm7, %zmm18
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm5, %zmm21, %zmm18
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm10, %zmm0
-; AVX512F-ONLY-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm18 {%k1} = zmm11[4,5,4,5],zmm10[4,5,4,5]
-; AVX512F-ONLY-SLOW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm10 = [14,0,0,7,14,0,0,7]
-; AVX512F-ONLY-SLOW-NEXT:    # zmm10 = mem[0,1,2,3,0,1,2,3]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm11, %zmm26
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm0, %zmm10, %zmm26
-; AVX512F-ONLY-SLOW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm16 = [7,0,9,0,7,0,9,0]
-; AVX512F-ONLY-SLOW-NEXT:    # zmm16 = mem[0,1,2,3,0,1,2,3]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm0, %zmm27
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm11, %zmm16, %zmm27
-; AVX512F-ONLY-SLOW-NEXT:    vbroadcasti32x4 {{.*#+}} ymm20 = [4,11,4,11]
-; AVX512F-ONLY-SLOW-NEXT:    # ymm20 = mem[0,1,2,3,0,1,2,3]
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm0, %zmm20, %zmm11
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm11, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 64(%rdi), %zmm1
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 (%rdi), %zmm14
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm14, %zmm17
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm1, %zmm21, %zmm17
-; AVX512F-ONLY-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm17 {%k1} = zmm12[4,5,4,5],zmm24[4,5,4,5]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm12, %zmm23
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm24, %zmm10, %zmm23
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm24, %zmm25
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm12, %zmm16, %zmm25
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm24, %zmm20, %zmm12
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm12, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 960(%rdi), %zmm6
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 896(%rdi), %zmm11
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm11, %zmm12
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm6, %zmm21, %zmm12
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 1408(%rdi), %zmm4
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 1344(%rdi), %zmm8
-; AVX512F-ONLY-SLOW-NEXT:    vpermi2q %zmm4, %zmm8, %zmm21
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm21 {%k1} = zmm3[4,5,4,5],zmm15[4,5,4,5]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm2, %zmm0
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm9 # 64-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm9, %zmm10, %zmm0
-; AVX512F-ONLY-SLOW-NEXT:    vpermi2q %zmm15, %zmm3, %zmm10
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm9, %zmm24
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm2, %zmm16, %zmm24
-; AVX512F-ONLY-SLOW-NEXT:    vpermi2q %zmm3, %zmm15, %zmm16
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm15, %zmm20, %zmm3
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm12 {%k1} = zmm2[4,5,4,5],zmm9[4,5,4,5]
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm9, %zmm20, %zmm2
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 912(%rdi), %xmm2
-; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} xmm2 = xmm2[0,1],mem[2,3]
-; AVX512F-ONLY-SLOW-NEXT:    vinserti32x4 $0, %xmm2, %zmm0, %zmm0
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 464(%rdi), %xmm0
-; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} xmm0 = xmm0[0,1],mem[2,3]
-; AVX512F-ONLY-SLOW-NEXT:    vinserti32x4 $0, %xmm0, %zmm26, %zmm0
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 16(%rdi), %xmm0
-; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} xmm0 = xmm0[0,1],mem[2,3]
-; AVX512F-ONLY-SLOW-NEXT:    vinserti32x4 $0, %xmm0, %zmm23, %zmm26
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 1360(%rdi), %xmm0
-; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} xmm0 = xmm0[0,1],mem[2,3]
-; AVX512F-ONLY-SLOW-NEXT:    vinserti32x4 $0, %xmm0, %zmm10, %zmm20
-; AVX512F-ONLY-SLOW-NEXT:    vinserti128 $1, 1088(%rdi), %ymm0, %ymm0
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 1024(%rdi), %ymm2
-; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm2[0,1,2,3,4,5],ymm0[6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa {{.*#+}} xmm10 = [4,11]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm11, %zmm13
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm6, %zmm10, %zmm13
-; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm13[0,1,2,3],ymm0[4,5,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $0, %ymm0, %zmm22, %zmm0
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vinserti128 $1, 640(%rdi), %ymm0, %ymm0
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 576(%rdi), %ymm3
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm3[0,1,2,3,4,5],ymm0[6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm7, %zmm3
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm5, %zmm10, %zmm3
-; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm3[0,1,2,3],ymm0[4,5,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $0, %ymm0, %zmm19, %zmm0
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vinserti128 $1, 192(%rdi), %ymm0, %ymm0
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 128(%rdi), %ymm3
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu %ymm3, (%rsp) # 32-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm3[0,1,2,3,4,5],ymm0[6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm14, %zmm3
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm1, %zmm10, %zmm3
-; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm3[0,1,2,3],ymm0[4,5,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $0, %ymm0, %zmm3, %zmm0
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vinserti128 $1, 1536(%rdi), %ymm0, %ymm0
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 1472(%rdi), %ymm3
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm3[0,1,2,3,4,5],ymm0[6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vpermi2q %zmm4, %zmm8, %zmm10
-; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm10[0,1,2,3],ymm0[4,5,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $0, %ymm0, %zmm28, %zmm28
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 1088(%rdi), %ymm0
-; AVX512F-ONLY-SLOW-NEXT:    vpalignr {{.*#+}} ymm0 = ymm2[8,9,10,11,12,13,14,15],ymm0[0,1,2,3,4,5,6,7],ymm2[24,25,26,27,28,29,30,31],ymm0[16,17,18,19,20,21,22,23]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm0, %ymm23
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa {{.*#+}} ymm0 = <9,0,7,u>
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm5, %zmm3
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm7, %zmm9
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm7, %zmm0, %zmm3
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa {{.*#+}} xmm2 = [5,12]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm7, %zmm13
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm5, %zmm2, %zmm13
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa {{.*#+}} xmm7 = [6,13]
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm5, %zmm7, %zmm9
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm1, %zmm5
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm14, %zmm0, %zmm5
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm14, %zmm10
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm1, %zmm2, %zmm10
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm1, %zmm7, %zmm14
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm6, %zmm1
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm11, %zmm0, %zmm1
-; AVX512F-ONLY-SLOW-NEXT:    vpermi2q %zmm8, %zmm4, %zmm0
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 960(%rdi), %ymm15
-; AVX512F-ONLY-SLOW-NEXT:    vpalignr {{.*#+}} ymm15 = mem[8,9,10,11,12,13,14,15],ymm15[0,1,2,3,4,5,6,7],mem[24,25,26,27,28,29,30,31],ymm15[16,17,18,19,20,21,22,23]
-; AVX512F-ONLY-SLOW-NEXT:    vextracti32x4 $1, %ymm15, %xmm19
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 512(%rdi), %ymm15
-; AVX512F-ONLY-SLOW-NEXT:    vpalignr {{.*#+}} ymm15 = mem[8,9,10,11,12,13,14,15],ymm15[0,1,2,3,4,5,6,7],mem[24,25,26,27,28,29,30,31],ymm15[16,17,18,19,20,21,22,23]
-; AVX512F-ONLY-SLOW-NEXT:    vextracti128 $1, %ymm15, %xmm15
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm11, %zmm9
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm6, %zmm2, %zmm9
-; AVX512F-ONLY-SLOW-NEXT:    vpermi2q %zmm4, %zmm8, %zmm2
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm4, %zmm7, %zmm8
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm6, %zmm7, %zmm11
-; AVX512F-ONLY-SLOW-NEXT:    movb $-32, %al
-; AVX512F-ONLY-SLOW-NEXT:    kmovw %eax, %k2
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm4, %zmm17 {%k2}
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm4, %zmm18 {%k2}
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm4, %zmm12 {%k2}
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm4, %zmm21 {%k2}
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm4, %zmm1 {%k1}
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm4, %zmm1 {%k2}
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm4, %zmm3 {%k1}
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm4, %zmm3 {%k2}
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm4, %zmm5 {%k1}
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm4, %zmm5 {%k2}
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm4, %zmm0 {%k1}
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm4, %zmm0 {%k2}
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm22 # 64-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm29, %zmm22 {%k2}
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm29 # 64-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm4, %zmm29 {%k2}
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm4, %zmm26 {%k2}
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm4, %zmm20 {%k2}
-; AVX512F-ONLY-SLOW-NEXT:    vinserti32x4 $0, %xmm19, %zmm24, %zmm4
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm31, %zmm4 {%k2}
-; AVX512F-ONLY-SLOW-NEXT:    vinserti32x4 $0, %xmm15, %zmm27, %zmm6
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm7 # 64-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm7, %zmm6 {%k2}
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm23, %ymm7
-; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm7 = ymm9[0,1,2,3],ymm7[4,5,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 64(%rdi), %ymm9
-; AVX512F-ONLY-SLOW-NEXT:    vpalignr {{.*#+}} ymm9 = mem[8,9,10,11,12,13,14,15],ymm9[0,1,2,3,4,5,6,7],mem[24,25,26,27,28,29,30,31],ymm9[16,17,18,19,20,21,22,23]
-; AVX512F-ONLY-SLOW-NEXT:    vextracti128 $1, %ymm9, %xmm9
-; AVX512F-ONLY-SLOW-NEXT:    vinserti32x4 $0, %xmm9, %zmm25, %zmm9
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm15 # 64-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm15, %zmm9 {%k2}
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm15 # 64-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $0, %ymm7, %zmm15, %zmm19
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 640(%rdi), %ymm15
-; AVX512F-ONLY-SLOW-NEXT:    vpalignr $8, {{[-0-9]+}}(%r{{[sb]}}p), %ymm15, %ymm15 # 32-byte Folded Reload
-; AVX512F-ONLY-SLOW-NEXT:    # ymm15 = mem[8,9,10,11,12,13,14,15],ymm15[0,1,2,3,4,5,6,7],mem[24,25,26,27,28,29,30,31],ymm15[16,17,18,19,20,21,22,23]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 1408(%rdi), %ymm7
-; AVX512F-ONLY-SLOW-NEXT:    vpalignr {{.*#+}} ymm7 = mem[8,9,10,11,12,13,14,15],ymm7[0,1,2,3,4,5,6,7],mem[24,25,26,27,28,29,30,31],ymm7[16,17,18,19,20,21,22,23]
-; AVX512F-ONLY-SLOW-NEXT:    vextracti128 $1, %ymm7, %xmm7
-; AVX512F-ONLY-SLOW-NEXT:    vinserti32x4 $0, %xmm7, %zmm16, %zmm7
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm16 # 64-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm16, %zmm7 {%k2}
-; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm13 = ymm13[0,1,2,3],ymm15[4,5,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm15 # 64-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $0, %ymm13, %zmm15, %zmm13
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 192(%rdi), %ymm15
-; AVX512F-ONLY-SLOW-NEXT:    vpalignr $8, (%rsp), %ymm15, %ymm15 # 32-byte Folded Reload
-; AVX512F-ONLY-SLOW-NEXT:    # ymm15 = mem[8,9,10,11,12,13,14,15],ymm15[0,1,2,3,4,5,6,7],mem[24,25,26,27,28,29,30,31],ymm15[16,17,18,19,20,21,22,23]
-; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm10 = ymm10[0,1,2,3],ymm15[4,5,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm15 # 64-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $0, %ymm10, %zmm15, %zmm10
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 1536(%rdi), %ymm15
-; AVX512F-ONLY-SLOW-NEXT:    vpalignr $8, {{[-0-9]+}}(%r{{[sb]}}p), %ymm15, %ymm15 # 32-byte Folded Reload
-; AVX512F-ONLY-SLOW-NEXT:    # ymm15 = mem[8,9,10,11,12,13,14,15],ymm15[0,1,2,3,4,5,6,7],mem[24,25,26,27,28,29,30,31],ymm15[16,17,18,19,20,21,22,23]
-; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm15[4,5,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $0, %ymm2, %zmm30, %zmm2
-; AVX512F-ONLY-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm15 # 64-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vblendps $240, {{[-0-9]+}}(%r{{[sb]}}p), %ymm15, %ymm15 # 32-byte Folded Reload
-; AVX512F-ONLY-SLOW-NEXT:    # ymm15 = ymm15[0,1,2,3],mem[4,5,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm16 # 64-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vinsertf64x4 $0, %ymm15, %zmm16, %zmm15
-; AVX512F-ONLY-SLOW-NEXT:    vpblendd $240, {{[-0-9]+}}(%r{{[sb]}}p), %ymm14, %ymm14 # 32-byte Folded Reload
-; AVX512F-ONLY-SLOW-NEXT:    # ymm14 = ymm14[0,1,2,3],mem[4,5,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm16 # 64-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $0, %ymm14, %zmm16, %zmm14
-; AVX512F-ONLY-SLOW-NEXT:    vpblendd $240, {{[-0-9]+}}(%r{{[sb]}}p), %ymm8, %ymm8 # 32-byte Folded Reload
-; AVX512F-ONLY-SLOW-NEXT:    # ymm8 = ymm8[0,1,2,3],mem[4,5,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm16 # 64-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $0, %ymm8, %zmm16, %zmm8
-; AVX512F-ONLY-SLOW-NEXT:    vpblendd $240, {{[-0-9]+}}(%r{{[sb]}}p), %ymm11, %ymm11 # 32-byte Folded Reload
-; AVX512F-ONLY-SLOW-NEXT:    # ymm11 = ymm11[0,1,2,3],mem[4,5,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm16 # 64-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $0, %ymm11, %zmm16, %zmm11
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm21, 192(%rsi)
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm12, 128(%rsi)
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm18, 64(%rsi)
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm17, (%rsi)
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm0, 192(%rdx)
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm5, (%rdx)
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm3, 64(%rdx)
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm1, 128(%rdx)
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm20, 192(%rcx)
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm26, (%rcx)
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm29, 64(%rcx)
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm22, 128(%rcx)
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm7, 192(%r8)
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm9, (%r8)
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm6, 64(%r8)
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm4, 128(%r8)
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm28, 192(%r9)
-; AVX512F-ONLY-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vmovaps %zmm0, (%r9)
-; AVX512F-ONLY-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vmovaps %zmm0, 64(%r9)
-; AVX512F-ONLY-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vmovaps %zmm0, 128(%r9)
-; AVX512F-ONLY-SLOW-NEXT:    movq {{[0-9]+}}(%rsp), %rax
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm2, 192(%rax)
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm10, (%rax)
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm13, 64(%rax)
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm19, 128(%rax)
-; AVX512F-ONLY-SLOW-NEXT:    movq {{[0-9]+}}(%rsp), %rax
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm11, 128(%rax)
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm8, 192(%rax)
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm14, (%rax)
-; AVX512F-ONLY-SLOW-NEXT:    vmovaps %zmm15, 64(%rax)
-; AVX512F-ONLY-SLOW-NEXT:    addq $2216, %rsp # imm = 0x8A8
-; AVX512F-ONLY-SLOW-NEXT:    vzeroupper
-; AVX512F-ONLY-SLOW-NEXT:    retq
+; AVX512F-ONLY-LABEL: load_i64_stride7_vf32:
+; AVX512F-ONLY:       # %bb.0:
+; AVX512F-ONLY-NEXT:    subq $2216, %rsp # imm = 0x8A8
+; AVX512F-ONLY-NEXT:    vmovdqa64 1664(%rdi), %zmm21
+; AVX512F-ONLY-NEXT:    vmovdqa64 1600(%rdi), %zmm3
+; AVX512F-ONLY-NEXT:    vmovdqa64 1216(%rdi), %zmm1
+; AVX512F-ONLY-NEXT:    vmovdqa64 1152(%rdi), %zmm6
+; AVX512F-ONLY-NEXT:    vmovdqa64 1088(%rdi), %zmm11
+; AVX512F-ONLY-NEXT:    vmovdqu64 %zmm11, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-NEXT:    vmovaps 1024(%rdi), %zmm0
+; AVX512F-ONLY-NEXT:    vmovups %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-NEXT:    vmovdqa64 768(%rdi), %zmm2
+; AVX512F-ONLY-NEXT:    vmovdqa64 704(%rdi), %zmm9
+; AVX512F-ONLY-NEXT:    vmovdqa64 640(%rdi), %zmm15
+; AVX512F-ONLY-NEXT:    vmovdqa64 576(%rdi), %zmm10
+; AVX512F-ONLY-NEXT:    vmovdqa64 320(%rdi), %zmm0
+; AVX512F-ONLY-NEXT:    vmovdqa64 256(%rdi), %zmm26
+; AVX512F-ONLY-NEXT:    vbroadcasti32x4 {{.*#+}} zmm7 = [10,3,10,3,10,3,10,3]
+; AVX512F-ONLY-NEXT:    # zmm7 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm26, %zmm8
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm0, %zmm7, %zmm8
+; AVX512F-ONLY-NEXT:    vmovdqu64 %zmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm9, %zmm8
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm2, %zmm7, %zmm8
+; AVX512F-ONLY-NEXT:    vmovdqu64 %zmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm6, %zmm8
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm1, %zmm7, %zmm8
+; AVX512F-ONLY-NEXT:    vmovdqu64 %zmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-NEXT:    vpermi2q %zmm21, %zmm3, %zmm7
+; AVX512F-ONLY-NEXT:    vmovdqu64 %zmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-NEXT:    vbroadcasti32x4 {{.*#+}} zmm7 = [11,4,11,4,11,4,11,4]
+; AVX512F-ONLY-NEXT:    # zmm7 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm6, %zmm8
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm1, %zmm7, %zmm8
+; AVX512F-ONLY-NEXT:    vmovdqu64 %zmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm7, %zmm13
+; AVX512F-ONLY-NEXT:    vbroadcasti32x4 {{.*#+}} zmm7 = [13,6,13,6,13,6,13,6]
+; AVX512F-ONLY-NEXT:    # zmm7 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm9, %zmm8
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm2, %zmm13, %zmm8
+; AVX512F-ONLY-NEXT:    vmovdqu64 %zmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm26, %zmm8
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm0, %zmm13, %zmm8
+; AVX512F-ONLY-NEXT:    vmovdqu64 %zmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-NEXT:    vbroadcasti32x4 {{.*#+}} zmm20 = [12,5,12,5,12,5,12,5]
+; AVX512F-ONLY-NEXT:    # zmm20 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm9, %zmm18
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm2, %zmm20, %zmm18
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm26, %zmm23
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm0, %zmm20, %zmm23
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm9, %zmm17
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm2, %zmm7, %zmm17
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm26, %zmm25
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm0, %zmm7, %zmm25
+; AVX512F-ONLY-NEXT:    vbroadcasti64x4 {{.*#+}} zmm28 = [0,7,14,0,0,7,14,0]
+; AVX512F-ONLY-NEXT:    # zmm28 = mem[0,1,2,3,0,1,2,3]
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm9, %zmm8
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm2, %zmm28, %zmm8
+; AVX512F-ONLY-NEXT:    vmovdqu64 %zmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm26, %zmm16
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm0, %zmm28, %zmm16
+; AVX512F-ONLY-NEXT:    vbroadcasti64x4 {{.*#+}} zmm30 = [9,0,7,0,9,0,7,0]
+; AVX512F-ONLY-NEXT:    # zmm30 = mem[0,1,2,3,0,1,2,3]
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm2, %zmm14
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm9, %zmm30, %zmm14
+; AVX512F-ONLY-NEXT:    vbroadcasti32x4 {{.*#+}} zmm4 = [2,9,2,9,2,9,2,9]
+; AVX512F-ONLY-NEXT:    # zmm4 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm2, %zmm4, %zmm9
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm0, %zmm8
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm26, %zmm30, %zmm8
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm0, %zmm4, %zmm26
+; AVX512F-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm7, %zmm2
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm11, %zmm7, %zmm0
+; AVX512F-ONLY-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm10, %zmm11
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm10, %zmm0
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm15, %zmm10
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm15, %zmm7, %zmm0
+; AVX512F-ONLY-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-NEXT:    vmovdqa64 192(%rdi), %zmm24
+; AVX512F-ONLY-NEXT:    vmovdqa64 128(%rdi), %zmm12
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm12, %zmm0
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm24, %zmm7, %zmm0
+; AVX512F-ONLY-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm3, %zmm5
+; AVX512F-ONLY-NEXT:    vpermi2q %zmm21, %zmm3, %zmm13
+; AVX512F-ONLY-NEXT:    vmovdqa64 1536(%rdi), %zmm15
+; AVX512F-ONLY-NEXT:    vmovdqa64 1472(%rdi), %zmm0
+; AVX512F-ONLY-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm15, %zmm7, %zmm0
+; AVX512F-ONLY-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm6, %zmm29
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm1, %zmm20, %zmm29
+; AVX512F-ONLY-NEXT:    vpermi2q %zmm21, %zmm3, %zmm20
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm6, %zmm31
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm1, %zmm7, %zmm31
+; AVX512F-ONLY-NEXT:    vpermi2q %zmm21, %zmm3, %zmm2
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm6, %zmm22
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm1, %zmm28, %zmm22
+; AVX512F-ONLY-NEXT:    vpermi2q %zmm21, %zmm3, %zmm28
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm1, %zmm3
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm6, %zmm30, %zmm3
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm6, %zmm0
+; AVX512F-ONLY-NEXT:    vpermi2q %zmm5, %zmm21, %zmm30
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm21, %zmm4, %zmm5
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm5, %zmm21
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm1, %zmm4, %zmm0
+; AVX512F-ONLY-NEXT:    vmovdqa64 384(%rdi), %zmm1
+; AVX512F-ONLY-NEXT:    vbroadcasti64x4 {{.*#+}} zmm4 = [0,5,6,9,0,5,6,9]
+; AVX512F-ONLY-NEXT:    # zmm4 = mem[0,1,2,3,0,1,2,3]
+; AVX512F-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm1, %zmm4, %zmm5
+; AVX512F-ONLY-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-NEXT:    vmovdqa64 832(%rdi), %zmm5
+; AVX512F-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm6 # 64-byte Reload
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm5, %zmm4, %zmm6
+; AVX512F-ONLY-NEXT:    vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-NEXT:    vmovdqa64 1280(%rdi), %zmm6
+; AVX512F-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm7 # 64-byte Reload
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm6, %zmm4, %zmm7
+; AVX512F-ONLY-NEXT:    vmovdqu64 %zmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-NEXT:    vmovdqa64 1728(%rdi), %zmm7
+; AVX512F-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm27 # 64-byte Reload
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm7, %zmm4, %zmm27
+; AVX512F-ONLY-NEXT:    vmovdqu64 %zmm27, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-NEXT:    vbroadcasti64x4 {{.*#+}} zmm4 = [0,5,6,10,0,5,6,10]
+; AVX512F-ONLY-NEXT:    # zmm4 = mem[0,1,2,3,0,1,2,3]
+; AVX512F-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm19 # 64-byte Reload
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm6, %zmm4, %zmm19
+; AVX512F-ONLY-NEXT:    vmovdqu64 %zmm19, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm19 # 64-byte Reload
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm5, %zmm4, %zmm19
+; AVX512F-ONLY-NEXT:    vmovdqu64 %zmm19, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm19 # 64-byte Reload
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm1, %zmm4, %zmm19
+; AVX512F-ONLY-NEXT:    vmovdqu64 %zmm19, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm7, %zmm4, %zmm13
+; AVX512F-ONLY-NEXT:    vmovdqu64 %zmm13, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-NEXT:    vbroadcasti64x4 {{.*#+}} zmm4 = [0,5,6,11,0,5,6,11]
+; AVX512F-ONLY-NEXT:    # zmm4 = mem[0,1,2,3,0,1,2,3]
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm6, %zmm4, %zmm29
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm5, %zmm4, %zmm18
+; AVX512F-ONLY-NEXT:    vmovdqu64 %zmm18, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm1, %zmm4, %zmm23
+; AVX512F-ONLY-NEXT:    vmovdqu64 %zmm23, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm7, %zmm4, %zmm20
+; AVX512F-ONLY-NEXT:    vmovdqu64 %zmm20, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-NEXT:    vbroadcasti64x4 {{.*#+}} zmm4 = [0,5,6,12,0,5,6,12]
+; AVX512F-ONLY-NEXT:    # zmm4 = mem[0,1,2,3,0,1,2,3]
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm6, %zmm4, %zmm31
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm5, %zmm4, %zmm17
+; AVX512F-ONLY-NEXT:    vmovdqu64 %zmm17, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm1, %zmm4, %zmm25
+; AVX512F-ONLY-NEXT:    vmovdqu64 %zmm25, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm7, %zmm4, %zmm2
+; AVX512F-ONLY-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-NEXT:    vbroadcasti64x4 {{.*#+}} zmm4 = [4,5,6,13,4,5,6,13]
+; AVX512F-ONLY-NEXT:    # zmm4 = mem[0,1,2,3,0,1,2,3]
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm6, %zmm4, %zmm22
+; AVX512F-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm19 # 64-byte Reload
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm5, %zmm4, %zmm19
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm1, %zmm4, %zmm16
+; AVX512F-ONLY-NEXT:    vmovdqu64 %zmm16, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm7, %zmm4, %zmm28
+; AVX512F-ONLY-NEXT:    vbroadcasti64x4 {{.*#+}} zmm4 = [4,5,6,14,4,5,6,14]
+; AVX512F-ONLY-NEXT:    # zmm4 = mem[0,1,2,3,0,1,2,3]
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm6, %zmm4, %zmm3
+; AVX512F-ONLY-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm5, %zmm4, %zmm14
+; AVX512F-ONLY-NEXT:    vmovdqu64 %zmm14, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm1, %zmm4, %zmm8
+; AVX512F-ONLY-NEXT:    vmovdqu64 %zmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm7, %zmm4, %zmm30
+; AVX512F-ONLY-NEXT:    vbroadcasti64x4 {{.*#+}} zmm4 = [4,5,8,15,4,5,8,15]
+; AVX512F-ONLY-NEXT:    # zmm4 = mem[0,1,2,3,0,1,2,3]
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm5, %zmm4, %zmm9
+; AVX512F-ONLY-NEXT:    vmovdqu64 %zmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm1, %zmm4, %zmm26
+; AVX512F-ONLY-NEXT:    vmovdqu64 %zmm26, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm7, %zmm4, %zmm21
+; AVX512F-ONLY-NEXT:    vmovdqu64 %zmm21, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm6, %zmm4, %zmm0
+; AVX512F-ONLY-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-NEXT:    movb $24, %al
+; AVX512F-ONLY-NEXT:    kmovw %eax, %k1
+; AVX512F-ONLY-NEXT:    vmovdqa64 512(%rdi), %zmm5
+; AVX512F-ONLY-NEXT:    vmovdqa64 448(%rdi), %zmm7
+; AVX512F-ONLY-NEXT:    vmovdqa64 {{.*#+}} ymm21 = <0,7,14,u>
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm7, %zmm18
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm5, %zmm21, %zmm18
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm10, %zmm0
+; AVX512F-ONLY-NEXT:    vshufi64x2 {{.*#+}} zmm18 {%k1} = zmm11[4,5,4,5],zmm10[4,5,4,5]
+; AVX512F-ONLY-NEXT:    vbroadcasti64x4 {{.*#+}} zmm10 = [14,0,0,7,14,0,0,7]
+; AVX512F-ONLY-NEXT:    # zmm10 = mem[0,1,2,3,0,1,2,3]
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm11, %zmm26
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm0, %zmm10, %zmm26
+; AVX512F-ONLY-NEXT:    vbroadcasti64x4 {{.*#+}} zmm16 = [7,0,9,0,7,0,9,0]
+; AVX512F-ONLY-NEXT:    # zmm16 = mem[0,1,2,3,0,1,2,3]
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm0, %zmm27
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm11, %zmm16, %zmm27
+; AVX512F-ONLY-NEXT:    vbroadcasti32x4 {{.*#+}} ymm20 = [4,11,4,11]
+; AVX512F-ONLY-NEXT:    # ymm20 = mem[0,1,2,3,0,1,2,3]
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm0, %zmm20, %zmm11
+; AVX512F-ONLY-NEXT:    vmovdqu64 %zmm11, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-NEXT:    vmovdqa64 64(%rdi), %zmm1
+; AVX512F-ONLY-NEXT:    vmovdqa64 (%rdi), %zmm14
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm14, %zmm17
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm1, %zmm21, %zmm17
+; AVX512F-ONLY-NEXT:    vshufi64x2 {{.*#+}} zmm17 {%k1} = zmm12[4,5,4,5],zmm24[4,5,4,5]
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm12, %zmm23
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm24, %zmm10, %zmm23
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm24, %zmm25
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm12, %zmm16, %zmm25
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm24, %zmm20, %zmm12
+; AVX512F-ONLY-NEXT:    vmovdqu64 %zmm12, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-NEXT:    vmovdqa64 960(%rdi), %zmm6
+; AVX512F-ONLY-NEXT:    vmovdqa64 896(%rdi), %zmm11
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm11, %zmm12
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm6, %zmm21, %zmm12
+; AVX512F-ONLY-NEXT:    vmovdqa64 1408(%rdi), %zmm4
+; AVX512F-ONLY-NEXT:    vmovdqa64 1344(%rdi), %zmm8
+; AVX512F-ONLY-NEXT:    vpermi2q %zmm4, %zmm8, %zmm21
+; AVX512F-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
+; AVX512F-ONLY-NEXT:    vshufi64x2 {{.*#+}} zmm21 {%k1} = zmm3[4,5,4,5],zmm15[4,5,4,5]
+; AVX512F-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm2, %zmm0
+; AVX512F-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm9 # 64-byte Reload
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm9, %zmm10, %zmm0
+; AVX512F-ONLY-NEXT:    vpermi2q %zmm15, %zmm3, %zmm10
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm9, %zmm24
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm2, %zmm16, %zmm24
+; AVX512F-ONLY-NEXT:    vpermi2q %zmm3, %zmm15, %zmm16
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm15, %zmm20, %zmm3
+; AVX512F-ONLY-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-NEXT:    vshufi64x2 {{.*#+}} zmm12 {%k1} = zmm2[4,5,4,5],zmm9[4,5,4,5]
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm9, %zmm20, %zmm2
+; AVX512F-ONLY-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-NEXT:    vmovdqa 912(%rdi), %xmm2
+; AVX512F-ONLY-NEXT:    vpblendd {{.*#+}} xmm2 = xmm2[0,1],mem[2,3]
+; AVX512F-ONLY-NEXT:    vinserti32x4 $0, %xmm2, %zmm0, %zmm0
+; AVX512F-ONLY-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-NEXT:    vmovdqa 464(%rdi), %xmm0
+; AVX512F-ONLY-NEXT:    vpblendd {{.*#+}} xmm0 = xmm0[0,1],mem[2,3]
+; AVX512F-ONLY-NEXT:    vinserti32x4 $0, %xmm0, %zmm26, %zmm0
+; AVX512F-ONLY-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-NEXT:    vmovdqa 16(%rdi), %xmm0
+; AVX512F-ONLY-NEXT:    vpblendd {{.*#+}} xmm0 = xmm0[0,1],mem[2,3]
+; AVX512F-ONLY-NEXT:    vinserti32x4 $0, %xmm0, %zmm23, %zmm26
+; AVX512F-ONLY-NEXT:    vmovdqa 1360(%rdi), %xmm0
+; AVX512F-ONLY-NEXT:    vpblendd {{.*#+}} xmm0 = xmm0[0,1],mem[2,3]
+; AVX512F-ONLY-NEXT:    vinserti32x4 $0, %xmm0, %zmm10, %zmm20
+; AVX512F-ONLY-NEXT:    vinserti128 $1, 1088(%rdi), %ymm0, %ymm0
+; AVX512F-ONLY-NEXT:    vmovdqa 1024(%rdi), %ymm2
+; AVX512F-ONLY-NEXT:    vpblendd {{.*#+}} ymm0 = ymm2[0,1,2,3,4,5],ymm0[6,7]
+; AVX512F-ONLY-NEXT:    vmovdqa {{.*#+}} xmm10 = [4,11]
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm11, %zmm13
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm6, %zmm10, %zmm13
+; AVX512F-ONLY-NEXT:    vpblendd {{.*#+}} ymm0 = ymm13[0,1,2,3],ymm0[4,5,6,7]
+; AVX512F-ONLY-NEXT:    vinserti64x4 $0, %ymm0, %zmm22, %zmm0
+; AVX512F-ONLY-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-NEXT:    vinserti128 $1, 640(%rdi), %ymm0, %ymm0
+; AVX512F-ONLY-NEXT:    vmovdqa 576(%rdi), %ymm3
+; AVX512F-ONLY-NEXT:    vmovdqu %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512F-ONLY-NEXT:    vpblendd {{.*#+}} ymm0 = ymm3[0,1,2,3,4,5],ymm0[6,7]
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm7, %zmm3
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm5, %zmm10, %zmm3
+; AVX512F-ONLY-NEXT:    vpblendd {{.*#+}} ymm0 = ymm3[0,1,2,3],ymm0[4,5,6,7]
+; AVX512F-ONLY-NEXT:    vinserti64x4 $0, %ymm0, %zmm19, %zmm0
+; AVX512F-ONLY-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-NEXT:    vinserti128 $1, 192(%rdi), %ymm0, %ymm0
+; AVX512F-ONLY-NEXT:    vmovdqa 128(%rdi), %ymm3
+; AVX512F-ONLY-NEXT:    vmovdqu %ymm3, (%rsp) # 32-byte Spill
+; AVX512F-ONLY-NEXT:    vpblendd {{.*#+}} ymm0 = ymm3[0,1,2,3,4,5],ymm0[6,7]
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm14, %zmm3
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm1, %zmm10, %zmm3
+; AVX512F-ONLY-NEXT:    vpblendd {{.*#+}} ymm0 = ymm3[0,1,2,3],ymm0[4,5,6,7]
+; AVX512F-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
+; AVX512F-ONLY-NEXT:    vinserti64x4 $0, %ymm0, %zmm3, %zmm0
+; AVX512F-ONLY-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-NEXT:    vinserti128 $1, 1536(%rdi), %ymm0, %ymm0
+; AVX512F-ONLY-NEXT:    vmovdqa 1472(%rdi), %ymm3
+; AVX512F-ONLY-NEXT:    vmovdqu %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512F-ONLY-NEXT:    vpblendd {{.*#+}} ymm0 = ymm3[0,1,2,3,4,5],ymm0[6,7]
+; AVX512F-ONLY-NEXT:    vpermi2q %zmm4, %zmm8, %zmm10
+; AVX512F-ONLY-NEXT:    vpblendd {{.*#+}} ymm0 = ymm10[0,1,2,3],ymm0[4,5,6,7]
+; AVX512F-ONLY-NEXT:    vinserti64x4 $0, %ymm0, %zmm28, %zmm28
+; AVX512F-ONLY-NEXT:    vmovdqa 1088(%rdi), %ymm0
+; AVX512F-ONLY-NEXT:    vpalignr {{.*#+}} ymm0 = ymm2[8,9,10,11,12,13,14,15],ymm0[0,1,2,3,4,5,6,7],ymm2[24,25,26,27,28,29,30,31],ymm0[16,17,18,19,20,21,22,23]
+; AVX512F-ONLY-NEXT:    vmovdqa64 %ymm0, %ymm23
+; AVX512F-ONLY-NEXT:    vmovdqa {{.*#+}} ymm0 = <9,0,7,u>
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm5, %zmm3
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm7, %zmm9
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm7, %zmm0, %zmm3
+; AVX512F-ONLY-NEXT:    vmovdqa {{.*#+}} xmm2 = [5,12]
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm7, %zmm13
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm5, %zmm2, %zmm13
+; AVX512F-ONLY-NEXT:    vmovdqa {{.*#+}} xmm7 = [6,13]
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm5, %zmm7, %zmm9
+; AVX512F-ONLY-NEXT:    vmovdqu64 %zmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm1, %zmm5
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm14, %zmm0, %zmm5
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm14, %zmm10
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm1, %zmm2, %zmm10
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm1, %zmm7, %zmm14
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm6, %zmm1
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm11, %zmm0, %zmm1
+; AVX512F-ONLY-NEXT:    vpermi2q %zmm8, %zmm4, %zmm0
+; AVX512F-ONLY-NEXT:    vmovdqa 960(%rdi), %ymm15
+; AVX512F-ONLY-NEXT:    vpalignr {{.*#+}} ymm15 = mem[8,9,10,11,12,13,14,15],ymm15[0,1,2,3,4,5,6,7],mem[24,25,26,27,28,29,30,31],ymm15[16,17,18,19,20,21,22,23]
+; AVX512F-ONLY-NEXT:    vextracti32x4 $1, %ymm15, %xmm19
+; AVX512F-ONLY-NEXT:    vmovdqa 512(%rdi), %ymm15
+; AVX512F-ONLY-NEXT:    vpalignr {{.*#+}} ymm15 = mem[8,9,10,11,12,13,14,15],ymm15[0,1,2,3,4,5,6,7],mem[24,25,26,27,28,29,30,31],ymm15[16,17,18,19,20,21,22,23]
+; AVX512F-ONLY-NEXT:    vextracti128 $1, %ymm15, %xmm15
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm11, %zmm9
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm6, %zmm2, %zmm9
+; AVX512F-ONLY-NEXT:    vpermi2q %zmm4, %zmm8, %zmm2
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm4, %zmm7, %zmm8
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm6, %zmm7, %zmm11
+; AVX512F-ONLY-NEXT:    movb $-32, %al
+; AVX512F-ONLY-NEXT:    kmovw %eax, %k2
+; AVX512F-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm4, %zmm17 {%k2}
+; AVX512F-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm4, %zmm18 {%k2}
+; AVX512F-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm4, %zmm12 {%k2}
+; AVX512F-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm4, %zmm21 {%k2}
+; AVX512F-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm4, %zmm1 {%k1}
+; AVX512F-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm4, %zmm1 {%k2}
+; AVX512F-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm4, %zmm3 {%k1}
+; AVX512F-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm4, %zmm3 {%k2}
+; AVX512F-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm4, %zmm5 {%k1}
+; AVX512F-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm4, %zmm5 {%k2}
+; AVX512F-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm4, %zmm0 {%k1}
+; AVX512F-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm4, %zmm0 {%k2}
+; AVX512F-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm22 # 64-byte Reload
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm29, %zmm22 {%k2}
+; AVX512F-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
+; AVX512F-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm29 # 64-byte Reload
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm4, %zmm29 {%k2}
+; AVX512F-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm4, %zmm26 {%k2}
+; AVX512F-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm4, %zmm20 {%k2}
+; AVX512F-ONLY-NEXT:    vinserti32x4 $0, %xmm19, %zmm24, %zmm4
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm31, %zmm4 {%k2}
+; AVX512F-ONLY-NEXT:    vinserti32x4 $0, %xmm15, %zmm27, %zmm6
+; AVX512F-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm7 # 64-byte Reload
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm7, %zmm6 {%k2}
+; AVX512F-ONLY-NEXT:    vmovdqa64 %ymm23, %ymm7
+; AVX512F-ONLY-NEXT:    vpblendd {{.*#+}} ymm7 = ymm9[0,1,2,3],ymm7[4,5,6,7]
+; AVX512F-ONLY-NEXT:    vmovdqa 64(%rdi), %ymm9
+; AVX512F-ONLY-NEXT:    vpalignr {{.*#+}} ymm9 = mem[8,9,10,11,12,13,14,15],ymm9[0,1,2,3,4,5,6,7],mem[24,25,26,27,28,29,30,31],ymm9[16,17,18,19,20,21,22,23]
+; AVX512F-ONLY-NEXT:    vextracti128 $1, %ymm9, %xmm9
+; AVX512F-ONLY-NEXT:    vinserti32x4 $0, %xmm9, %zmm25, %zmm9
+; AVX512F-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm15 # 64-byte Reload
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm15, %zmm9 {%k2}
+; AVX512F-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm15 # 64-byte Reload
+; AVX512F-ONLY-NEXT:    vinserti64x4 $0, %ymm7, %zmm15, %zmm19
+; AVX512F-ONLY-NEXT:    vmovdqa 640(%rdi), %ymm15
+; AVX512F-ONLY-NEXT:    vpalignr $8, {{[-0-9]+}}(%r{{[sb]}}p), %ymm15, %ymm15 # 32-byte Folded Reload
+; AVX512F-ONLY-NEXT:    # ymm15 = mem[8,9,10,11,12,13,14,15],ymm15[0,1,2,3,4,5,6,7],mem[24,25,26,27,28,29,30,31],ymm15[16,17,18,19,20,21,22,23]
+; AVX512F-ONLY-NEXT:    vmovdqa 1408(%rdi), %ymm7
+; AVX512F-ONLY-NEXT:    vpalignr {{.*#+}} ymm7 = mem[8,9,10,11,12,13,14,15],ymm7[0,1,2,3,4,5,6,7],mem[24,25,26,27,28,29,30,31],ymm7[16,17,18,19,20,21,22,23]
+; AVX512F-ONLY-NEXT:    vextracti128 $1, %ymm7, %xmm7
+; AVX512F-ONLY-NEXT:    vinserti32x4 $0, %xmm7, %zmm16, %zmm7
+; AVX512F-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm16 # 64-byte Reload
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm16, %zmm7 {%k2}
+; AVX512F-ONLY-NEXT:    vpblendd {{.*#+}} ymm13 = ymm13[0,1,2,3],ymm15[4,5,6,7]
+; AVX512F-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm15 # 64-byte Reload
+; AVX512F-ONLY-NEXT:    vinserti64x4 $0, %ymm13, %zmm15, %zmm13
+; AVX512F-ONLY-NEXT:    vmovdqa 192(%rdi), %ymm15
+; AVX512F-ONLY-NEXT:    vpalignr $8, (%rsp), %ymm15, %ymm15 # 32-byte Folded Reload
+; AVX512F-ONLY-NEXT:    # ymm15 = mem[8,9,10,11,12,13,14,15],ymm15[0,1,2,3,4,5,6,7],mem[24,25,26,27,28,29,30,31],ymm15[16,17,18,19,20,21,22,23]
+; AVX512F-ONLY-NEXT:    vpblendd {{.*#+}} ymm10 = ymm10[0,1,2,3],ymm15[4,5,6,7]
+; AVX512F-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm15 # 64-byte Reload
+; AVX512F-ONLY-NEXT:    vinserti64x4 $0, %ymm10, %zmm15, %zmm10
+; AVX512F-ONLY-NEXT:    vmovdqa 1536(%rdi), %ymm15
+; AVX512F-ONLY-NEXT:    vpalignr $8, {{[-0-9]+}}(%r{{[sb]}}p), %ymm15, %ymm15 # 32-byte Folded Reload
+; AVX512F-ONLY-NEXT:    # ymm15 = mem[8,9,10,11,12,13,14,15],ymm15[0,1,2,3,4,5,6,7],mem[24,25,26,27,28,29,30,31],ymm15[16,17,18,19,20,21,22,23]
+; AVX512F-ONLY-NEXT:    vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm15[4,5,6,7]
+; AVX512F-ONLY-NEXT:    vinserti64x4 $0, %ymm2, %zmm30, %zmm2
+; AVX512F-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm15 # 64-byte Reload
+; AVX512F-ONLY-NEXT:    vblendps $240, {{[-0-9]+}}(%r{{[sb]}}p), %ymm15, %ymm15 # 32-byte Folded Reload
+; AVX512F-ONLY-NEXT:    # ymm15 = ymm15[0,1,2,3],mem[4,5,6,7]
+; AVX512F-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm16 # 64-byte Reload
+; AVX512F-ONLY-NEXT:    vinsertf64x4 $0, %ymm15, %zmm16, %zmm15
+; AVX512F-ONLY-NEXT:    vpblendd $240, {{[-0-9]+}}(%r{{[sb]}}p), %ymm14, %ymm14 # 32-byte Folded Reload
+; AVX512F-ONLY-NEXT:    # ymm14 = ymm14[0,1,2,3],mem[4,5,6,7]
+; AVX512F-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm16 # 64-byte Reload
+; AVX512F-ONLY-NEXT:    vinserti64x4 $0, %ymm14, %zmm16, %zmm14
+; AVX512F-ONLY-NEXT:    vpblendd $240, {{[-0-9]+}}(%r{{[sb]}}p), %ymm8, %ymm8 # 32-byte Folded Reload
+; AVX512F-ONLY-NEXT:    # ymm8 = ymm8[0,1,2,3],mem[4,5,6,7]
+; AVX512F-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm16 # 64-byte Reload
+; AVX512F-ONLY-NEXT:    vinserti64x4 $0, %ymm8, %zmm16, %zmm8
+; AVX512F-ONLY-NEXT:    vpblendd $240, {{[-0-9]+}}(%r{{[sb]}}p), %ymm11, %ymm11 # 32-byte Folded Reload
+; AVX512F-ONLY-NEXT:    # ymm11 = ymm11[0,1,2,3],mem[4,5,6,7]
+; AVX512F-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm16 # 64-byte Reload
+; AVX512F-ONLY-NEXT:    vinserti64x4 $0, %ymm11, %zmm16, %zmm11
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm21, 192(%rsi)
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm12, 128(%rsi)
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm18, 64(%rsi)
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm17, (%rsi)
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm0, 192(%rdx)
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm5, (%rdx)
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm3, 64(%rdx)
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm1, 128(%rdx)
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm20, 192(%rcx)
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm26, (%rcx)
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm29, 64(%rcx)
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm22, 128(%rcx)
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm7, 192(%r8)
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm9, (%r8)
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm6, 64(%r8)
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm4, 128(%r8)
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm28, 192(%r9)
+; AVX512F-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512F-ONLY-NEXT:    vmovaps %zmm0, (%r9)
+; AVX512F-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512F-ONLY-NEXT:    vmovaps %zmm0, 64(%r9)
+; AVX512F-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512F-ONLY-NEXT:    vmovaps %zmm0, 128(%r9)
+; AVX512F-ONLY-NEXT:    movq {{[0-9]+}}(%rsp), %rax
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm2, 192(%rax)
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm10, (%rax)
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm13, 64(%rax)
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm19, 128(%rax)
+; AVX512F-ONLY-NEXT:    movq {{[0-9]+}}(%rsp), %rax
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm11, 128(%rax)
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm8, 192(%rax)
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm14, (%rax)
+; AVX512F-ONLY-NEXT:    vmovaps %zmm15, 64(%rax)
+; AVX512F-ONLY-NEXT:    addq $2216, %rsp # imm = 0x8A8
+; AVX512F-ONLY-NEXT:    vzeroupper
+; AVX512F-ONLY-NEXT:    retq
 ;
-; AVX512F-ONLY-FAST-LABEL: load_i64_stride7_vf32:
-; AVX512F-ONLY-FAST:       # %bb.0:
-; AVX512F-ONLY-FAST-NEXT:    subq $2216, %rsp # imm = 0x8A8
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 1664(%rdi), %zmm21
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 1600(%rdi), %zmm3
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 1216(%rdi), %zmm1
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 1152(%rdi), %zmm6
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 1088(%rdi), %zmm11
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm11, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vmovaps 1024(%rdi), %zmm0
-; AVX512F-ONLY-FAST-NEXT:    vmovups %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 768(%rdi), %zmm2
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 704(%rdi), %zmm9
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 640(%rdi), %zmm15
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 576(%rdi), %zmm10
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 320(%rdi), %zmm0
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 256(%rdi), %zmm26
-; AVX512F-ONLY-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} zmm7 = [10,3,10,3,10,3,10,3]
-; AVX512F-ONLY-FAST-NEXT:    # zmm7 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm26, %zmm8
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm0, %zmm7, %zmm8
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm9, %zmm8
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm2, %zmm7, %zmm8
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm6, %zmm8
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm1, %zmm7, %zmm8
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vpermi2q %zmm21, %zmm3, %zmm7
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} zmm7 = [11,4,11,4,11,4,11,4]
-; AVX512F-ONLY-FAST-NEXT:    # zmm7 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm6, %zmm8
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm1, %zmm7, %zmm8
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm7, %zmm13
-; AVX512F-ONLY-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} zmm7 = [13,6,13,6,13,6,13,6]
-; AVX512F-ONLY-FAST-NEXT:    # zmm7 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm9, %zmm8
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm2, %zmm13, %zmm8
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm26, %zmm8
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm0, %zmm13, %zmm8
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} zmm20 = [12,5,12,5,12,5,12,5]
-; AVX512F-ONLY-FAST-NEXT:    # zmm20 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm9, %zmm18
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm2, %zmm20, %zmm18
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm26, %zmm23
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm0, %zmm20, %zmm23
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm9, %zmm17
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm2, %zmm7, %zmm17
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm26, %zmm25
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm0, %zmm7, %zmm25
-; AVX512F-ONLY-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm28 = [0,7,14,0,0,7,14,0]
-; AVX512F-ONLY-FAST-NEXT:    # zmm28 = mem[0,1,2,3,0,1,2,3]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm9, %zmm8
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm2, %zmm28, %zmm8
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm26, %zmm16
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm0, %zmm28, %zmm16
-; AVX512F-ONLY-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm30 = [9,0,7,0,9,0,7,0]
-; AVX512F-ONLY-FAST-NEXT:    # zmm30 = mem[0,1,2,3,0,1,2,3]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm2, %zmm14
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm9, %zmm30, %zmm14
-; AVX512F-ONLY-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} zmm4 = [2,9,2,9,2,9,2,9]
-; AVX512F-ONLY-FAST-NEXT:    # zmm4 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm2, %zmm4, %zmm9
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm0, %zmm8
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm26, %zmm30, %zmm8
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm0, %zmm4, %zmm26
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm7, %zmm2
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm11, %zmm7, %zmm0
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm10, %zmm11
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm10, %zmm0
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm15, %zmm10
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm15, %zmm7, %zmm0
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 192(%rdi), %zmm24
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 128(%rdi), %zmm12
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm12, %zmm0
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm24, %zmm7, %zmm0
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm3, %zmm5
-; AVX512F-ONLY-FAST-NEXT:    vpermi2q %zmm21, %zmm3, %zmm13
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 1536(%rdi), %zmm15
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 1472(%rdi), %zmm0
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm15, %zmm7, %zmm0
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm6, %zmm29
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm1, %zmm20, %zmm29
-; AVX512F-ONLY-FAST-NEXT:    vpermi2q %zmm21, %zmm3, %zmm20
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm6, %zmm31
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm1, %zmm7, %zmm31
-; AVX512F-ONLY-FAST-NEXT:    vpermi2q %zmm21, %zmm3, %zmm2
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm6, %zmm22
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm1, %zmm28, %zmm22
-; AVX512F-ONLY-FAST-NEXT:    vpermi2q %zmm21, %zmm3, %zmm28
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm1, %zmm3
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm6, %zmm30, %zmm3
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm6, %zmm0
-; AVX512F-ONLY-FAST-NEXT:    vpermi2q %zmm5, %zmm21, %zmm30
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm21, %zmm4, %zmm5
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm5, %zmm21
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm1, %zmm4, %zmm0
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 384(%rdi), %zmm1
-; AVX512F-ONLY-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm4 = [0,5,6,9,0,5,6,9]
-; AVX512F-ONLY-FAST-NEXT:    # zmm4 = mem[0,1,2,3,0,1,2,3]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm1, %zmm4, %zmm5
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 832(%rdi), %zmm5
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm6 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm5, %zmm4, %zmm6
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 1280(%rdi), %zmm6
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm7 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm6, %zmm4, %zmm7
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 1728(%rdi), %zmm7
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm27 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm7, %zmm4, %zmm27
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm27, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm4 = [0,5,6,10,0,5,6,10]
-; AVX512F-ONLY-FAST-NEXT:    # zmm4 = mem[0,1,2,3,0,1,2,3]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm19 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm6, %zmm4, %zmm19
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm19, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm19 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm5, %zmm4, %zmm19
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm19, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm19 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm1, %zmm4, %zmm19
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm19, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm7, %zmm4, %zmm13
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm13, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm4 = [0,5,6,11,0,5,6,11]
-; AVX512F-ONLY-FAST-NEXT:    # zmm4 = mem[0,1,2,3,0,1,2,3]
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm6, %zmm4, %zmm29
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm5, %zmm4, %zmm18
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm18, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm1, %zmm4, %zmm23
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm23, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm7, %zmm4, %zmm20
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm20, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm4 = [0,5,6,12,0,5,6,12]
-; AVX512F-ONLY-FAST-NEXT:    # zmm4 = mem[0,1,2,3,0,1,2,3]
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm6, %zmm4, %zmm31
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm5, %zmm4, %zmm17
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm17, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm1, %zmm4, %zmm25
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm25, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm7, %zmm4, %zmm2
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm4 = [4,5,6,13,4,5,6,13]
-; AVX512F-ONLY-FAST-NEXT:    # zmm4 = mem[0,1,2,3,0,1,2,3]
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm6, %zmm4, %zmm22
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm19 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm5, %zmm4, %zmm19
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm1, %zmm4, %zmm16
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm16, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm7, %zmm4, %zmm28
-; AVX512F-ONLY-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm4 = [4,5,6,14,4,5,6,14]
-; AVX512F-ONLY-FAST-NEXT:    # zmm4 = mem[0,1,2,3,0,1,2,3]
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm6, %zmm4, %zmm3
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm5, %zmm4, %zmm14
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm14, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm1, %zmm4, %zmm8
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm7, %zmm4, %zmm30
-; AVX512F-ONLY-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm4 = [4,5,8,15,4,5,8,15]
-; AVX512F-ONLY-FAST-NEXT:    # zmm4 = mem[0,1,2,3,0,1,2,3]
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm5, %zmm4, %zmm9
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm1, %zmm4, %zmm26
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm26, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm7, %zmm4, %zmm21
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm21, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm6, %zmm4, %zmm0
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    movb $24, %al
-; AVX512F-ONLY-FAST-NEXT:    kmovw %eax, %k1
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 512(%rdi), %zmm5
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 448(%rdi), %zmm7
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} ymm21 = <0,7,14,u>
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm7, %zmm18
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm5, %zmm21, %zmm18
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm10, %zmm0
-; AVX512F-ONLY-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm18 {%k1} = zmm11[4,5,4,5],zmm10[4,5,4,5]
-; AVX512F-ONLY-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm10 = [14,0,0,7,14,0,0,7]
-; AVX512F-ONLY-FAST-NEXT:    # zmm10 = mem[0,1,2,3,0,1,2,3]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm11, %zmm26
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm0, %zmm10, %zmm26
-; AVX512F-ONLY-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm16 = [7,0,9,0,7,0,9,0]
-; AVX512F-ONLY-FAST-NEXT:    # zmm16 = mem[0,1,2,3,0,1,2,3]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm0, %zmm27
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm11, %zmm16, %zmm27
-; AVX512F-ONLY-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} ymm20 = [4,11,4,11]
-; AVX512F-ONLY-FAST-NEXT:    # ymm20 = mem[0,1,2,3,0,1,2,3]
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm0, %zmm20, %zmm11
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm11, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 64(%rdi), %zmm1
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 (%rdi), %zmm14
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm14, %zmm17
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm1, %zmm21, %zmm17
-; AVX512F-ONLY-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm17 {%k1} = zmm12[4,5,4,5],zmm24[4,5,4,5]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm12, %zmm23
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm24, %zmm10, %zmm23
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm24, %zmm25
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm12, %zmm16, %zmm25
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm24, %zmm20, %zmm12
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm12, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 960(%rdi), %zmm6
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 896(%rdi), %zmm11
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm11, %zmm12
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm6, %zmm21, %zmm12
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 1408(%rdi), %zmm4
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 1344(%rdi), %zmm8
-; AVX512F-ONLY-FAST-NEXT:    vpermi2q %zmm4, %zmm8, %zmm21
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm21 {%k1} = zmm3[4,5,4,5],zmm15[4,5,4,5]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm2, %zmm0
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm9 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm9, %zmm10, %zmm0
-; AVX512F-ONLY-FAST-NEXT:    vpermi2q %zmm15, %zmm3, %zmm10
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm9, %zmm24
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm2, %zmm16, %zmm24
-; AVX512F-ONLY-FAST-NEXT:    vpermi2q %zmm3, %zmm15, %zmm16
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm15, %zmm20, %zmm3
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm12 {%k1} = zmm2[4,5,4,5],zmm9[4,5,4,5]
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm9, %zmm20, %zmm2
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa 912(%rdi), %xmm2
-; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} xmm2 = xmm2[0,1],mem[2,3]
-; AVX512F-ONLY-FAST-NEXT:    vinserti32x4 $0, %xmm2, %zmm0, %zmm0
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa 464(%rdi), %xmm0
-; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} xmm0 = xmm0[0,1],mem[2,3]
-; AVX512F-ONLY-FAST-NEXT:    vinserti32x4 $0, %xmm0, %zmm26, %zmm0
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa 16(%rdi), %xmm0
-; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} xmm0 = xmm0[0,1],mem[2,3]
-; AVX512F-ONLY-FAST-NEXT:    vinserti32x4 $0, %xmm0, %zmm23, %zmm26
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa 1360(%rdi), %xmm0
-; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} xmm0 = xmm0[0,1],mem[2,3]
-; AVX512F-ONLY-FAST-NEXT:    vinserti32x4 $0, %xmm0, %zmm10, %zmm20
-; AVX512F-ONLY-FAST-NEXT:    vinserti128 $1, 1088(%rdi), %ymm0, %ymm0
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa 1024(%rdi), %ymm2
-; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm2[0,1,2,3,4,5],ymm0[6,7]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} xmm10 = [4,11]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm11, %zmm13
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm6, %zmm10, %zmm13
-; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm13[0,1,2,3],ymm0[4,5,6,7]
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $0, %ymm0, %zmm22, %zmm0
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vinserti128 $1, 640(%rdi), %ymm0, %ymm0
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa 576(%rdi), %ymm3
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm3[0,1,2,3,4,5],ymm0[6,7]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm7, %zmm3
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm5, %zmm10, %zmm3
-; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm3[0,1,2,3],ymm0[4,5,6,7]
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $0, %ymm0, %zmm19, %zmm0
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vinserti128 $1, 192(%rdi), %ymm0, %ymm0
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa 128(%rdi), %ymm3
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu %ymm3, (%rsp) # 32-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm3[0,1,2,3,4,5],ymm0[6,7]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm14, %zmm3
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm1, %zmm10, %zmm3
-; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm3[0,1,2,3],ymm0[4,5,6,7]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $0, %ymm0, %zmm3, %zmm0
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vinserti128 $1, 1536(%rdi), %ymm0, %ymm0
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa 1472(%rdi), %ymm3
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm3[0,1,2,3,4,5],ymm0[6,7]
-; AVX512F-ONLY-FAST-NEXT:    vpermi2q %zmm4, %zmm8, %zmm10
-; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm10[0,1,2,3],ymm0[4,5,6,7]
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $0, %ymm0, %zmm28, %zmm28
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa 1088(%rdi), %ymm0
-; AVX512F-ONLY-FAST-NEXT:    vpalignr {{.*#+}} ymm0 = ymm2[8,9,10,11,12,13,14,15],ymm0[0,1,2,3,4,5,6,7],ymm2[24,25,26,27,28,29,30,31],ymm0[16,17,18,19,20,21,22,23]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm0, %ymm23
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm0 = <9,0,7,u>
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm5, %zmm3
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm7, %zmm9
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm7, %zmm0, %zmm3
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} xmm2 = [5,12]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm7, %zmm13
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm5, %zmm2, %zmm13
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} xmm7 = [6,13]
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm5, %zmm7, %zmm9
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm1, %zmm5
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm14, %zmm0, %zmm5
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm14, %zmm10
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm1, %zmm2, %zmm10
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm1, %zmm7, %zmm14
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm6, %zmm1
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm11, %zmm0, %zmm1
-; AVX512F-ONLY-FAST-NEXT:    vpermi2q %zmm8, %zmm4, %zmm0
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa 960(%rdi), %ymm15
-; AVX512F-ONLY-FAST-NEXT:    vpalignr {{.*#+}} ymm15 = mem[8,9,10,11,12,13,14,15],ymm15[0,1,2,3,4,5,6,7],mem[24,25,26,27,28,29,30,31],ymm15[16,17,18,19,20,21,22,23]
-; AVX512F-ONLY-FAST-NEXT:    vextracti32x4 $1, %ymm15, %xmm19
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa 512(%rdi), %ymm15
-; AVX512F-ONLY-FAST-NEXT:    vpalignr {{.*#+}} ymm15 = mem[8,9,10,11,12,13,14,15],ymm15[0,1,2,3,4,5,6,7],mem[24,25,26,27,28,29,30,31],ymm15[16,17,18,19,20,21,22,23]
-; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm15, %xmm15
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm11, %zmm9
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm6, %zmm2, %zmm9
-; AVX512F-ONLY-FAST-NEXT:    vpermi2q %zmm4, %zmm8, %zmm2
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm4, %zmm7, %zmm8
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm6, %zmm7, %zmm11
-; AVX512F-ONLY-FAST-NEXT:    movb $-32, %al
-; AVX512F-ONLY-FAST-NEXT:    kmovw %eax, %k2
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm4, %zmm17 {%k2}
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm4, %zmm18 {%k2}
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm4, %zmm12 {%k2}
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm4, %zmm21 {%k2}
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm4, %zmm1 {%k1}
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm4, %zmm1 {%k2}
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm4, %zmm3 {%k1}
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm4, %zmm3 {%k2}
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm4, %zmm5 {%k1}
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm4, %zmm5 {%k2}
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm4, %zmm0 {%k1}
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm4, %zmm0 {%k2}
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm22 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm29, %zmm22 {%k2}
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm29 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm4, %zmm29 {%k2}
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm4, %zmm26 {%k2}
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm4, %zmm20 {%k2}
-; AVX512F-ONLY-FAST-NEXT:    vinserti32x4 $0, %xmm19, %zmm24, %zmm4
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm31, %zmm4 {%k2}
-; AVX512F-ONLY-FAST-NEXT:    vinserti32x4 $0, %xmm15, %zmm27, %zmm6
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm7 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm7, %zmm6 {%k2}
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm23, %ymm7
-; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm7 = ymm9[0,1,2,3],ymm7[4,5,6,7]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa 64(%rdi), %ymm9
-; AVX512F-ONLY-FAST-NEXT:    vpalignr {{.*#+}} ymm9 = mem[8,9,10,11,12,13,14,15],ymm9[0,1,2,3,4,5,6,7],mem[24,25,26,27,28,29,30,31],ymm9[16,17,18,19,20,21,22,23]
-; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm9, %xmm9
-; AVX512F-ONLY-FAST-NEXT:    vinserti32x4 $0, %xmm9, %zmm25, %zmm9
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm15 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm15, %zmm9 {%k2}
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm15 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $0, %ymm7, %zmm15, %zmm19
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa 640(%rdi), %ymm15
-; AVX512F-ONLY-FAST-NEXT:    vpalignr $8, {{[-0-9]+}}(%r{{[sb]}}p), %ymm15, %ymm15 # 32-byte Folded Reload
-; AVX512F-ONLY-FAST-NEXT:    # ymm15 = mem[8,9,10,11,12,13,14,15],ymm15[0,1,2,3,4,5,6,7],mem[24,25,26,27,28,29,30,31],ymm15[16,17,18,19,20,21,22,23]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa 1408(%rdi), %ymm7
-; AVX512F-ONLY-FAST-NEXT:    vpalignr {{.*#+}} ymm7 = mem[8,9,10,11,12,13,14,15],ymm7[0,1,2,3,4,5,6,7],mem[24,25,26,27,28,29,30,31],ymm7[16,17,18,19,20,21,22,23]
-; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm7, %xmm7
-; AVX512F-ONLY-FAST-NEXT:    vinserti32x4 $0, %xmm7, %zmm16, %zmm7
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm16 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm16, %zmm7 {%k2}
-; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm13 = ymm13[0,1,2,3],ymm15[4,5,6,7]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm15 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $0, %ymm13, %zmm15, %zmm13
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa 192(%rdi), %ymm15
-; AVX512F-ONLY-FAST-NEXT:    vpalignr $8, (%rsp), %ymm15, %ymm15 # 32-byte Folded Reload
-; AVX512F-ONLY-FAST-NEXT:    # ymm15 = mem[8,9,10,11,12,13,14,15],ymm15[0,1,2,3,4,5,6,7],mem[24,25,26,27,28,29,30,31],ymm15[16,17,18,19,20,21,22,23]
-; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm10 = ymm10[0,1,2,3],ymm15[4,5,6,7]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm15 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $0, %ymm10, %zmm15, %zmm10
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa 1536(%rdi), %ymm15
-; AVX512F-ONLY-FAST-NEXT:    vpalignr $8, {{[-0-9]+}}(%r{{[sb]}}p), %ymm15, %ymm15 # 32-byte Folded Reload
-; AVX512F-ONLY-FAST-NEXT:    # ymm15 = mem[8,9,10,11,12,13,14,15],ymm15[0,1,2,3,4,5,6,7],mem[24,25,26,27,28,29,30,31],ymm15[16,17,18,19,20,21,22,23]
-; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm15[4,5,6,7]
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $0, %ymm2, %zmm30, %zmm2
-; AVX512F-ONLY-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm15 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vblendps $240, {{[-0-9]+}}(%r{{[sb]}}p), %ymm15, %ymm15 # 32-byte Folded Reload
-; AVX512F-ONLY-FAST-NEXT:    # ymm15 = ymm15[0,1,2,3],mem[4,5,6,7]
-; AVX512F-ONLY-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm16 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vinsertf64x4 $0, %ymm15, %zmm16, %zmm15
-; AVX512F-ONLY-FAST-NEXT:    vpblendd $240, {{[-0-9]+}}(%r{{[sb]}}p), %ymm14, %ymm14 # 32-byte Folded Reload
-; AVX512F-ONLY-FAST-NEXT:    # ymm14 = ymm14[0,1,2,3],mem[4,5,6,7]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm16 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $0, %ymm14, %zmm16, %zmm14
-; AVX512F-ONLY-FAST-NEXT:    vpblendd $240, {{[-0-9]+}}(%r{{[sb]}}p), %ymm8, %ymm8 # 32-byte Folded Reload
-; AVX512F-ONLY-FAST-NEXT:    # ymm8 = ymm8[0,1,2,3],mem[4,5,6,7]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm16 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $0, %ymm8, %zmm16, %zmm8
-; AVX512F-ONLY-FAST-NEXT:    vpblendd $240, {{[-0-9]+}}(%r{{[sb]}}p), %ymm11, %ymm11 # 32-byte Folded Reload
-; AVX512F-ONLY-FAST-NEXT:    # ymm11 = ymm11[0,1,2,3],mem[4,5,6,7]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm16 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $0, %ymm11, %zmm16, %zmm11
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm21, 192(%rsi)
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm12, 128(%rsi)
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm18, 64(%rsi)
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm17, (%rsi)
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm0, 192(%rdx)
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm5, (%rdx)
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm3, 64(%rdx)
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm1, 128(%rdx)
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm20, 192(%rcx)
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm26, (%rcx)
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm29, 64(%rcx)
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm22, 128(%rcx)
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm7, 192(%r8)
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm9, (%r8)
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm6, 64(%r8)
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm4, 128(%r8)
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm28, 192(%r9)
-; AVX512F-ONLY-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vmovaps %zmm0, (%r9)
-; AVX512F-ONLY-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vmovaps %zmm0, 64(%r9)
-; AVX512F-ONLY-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vmovaps %zmm0, 128(%r9)
-; AVX512F-ONLY-FAST-NEXT:    movq {{[0-9]+}}(%rsp), %rax
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm2, 192(%rax)
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm10, (%rax)
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm13, 64(%rax)
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm19, 128(%rax)
-; AVX512F-ONLY-FAST-NEXT:    movq {{[0-9]+}}(%rsp), %rax
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm11, 128(%rax)
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm8, 192(%rax)
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm14, (%rax)
-; AVX512F-ONLY-FAST-NEXT:    vmovaps %zmm15, 64(%rax)
-; AVX512F-ONLY-FAST-NEXT:    addq $2216, %rsp # imm = 0x8A8
-; AVX512F-ONLY-FAST-NEXT:    vzeroupper
-; AVX512F-ONLY-FAST-NEXT:    retq
+; AVX512DQ-ONLY-LABEL: load_i64_stride7_vf32:
+; AVX512DQ-ONLY:       # %bb.0:
+; AVX512DQ-ONLY-NEXT:    subq $2216, %rsp # imm = 0x8A8
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 1664(%rdi), %zmm21
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 1600(%rdi), %zmm3
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 1216(%rdi), %zmm1
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 1152(%rdi), %zmm6
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 1088(%rdi), %zmm11
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 %zmm11, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-ONLY-NEXT:    vmovaps 1024(%rdi), %zmm0
+; AVX512DQ-ONLY-NEXT:    vmovups %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 768(%rdi), %zmm2
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 704(%rdi), %zmm9
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 640(%rdi), %zmm15
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 576(%rdi), %zmm10
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 320(%rdi), %zmm0
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 256(%rdi), %zmm26
+; AVX512DQ-ONLY-NEXT:    vbroadcasti32x4 {{.*#+}} zmm7 = [10,3,10,3,10,3,10,3]
+; AVX512DQ-ONLY-NEXT:    # zmm7 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm26, %zmm8
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm0, %zmm7, %zmm8
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 %zmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm9, %zmm8
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm2, %zmm7, %zmm8
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 %zmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm6, %zmm8
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm1, %zmm7, %zmm8
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 %zmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-ONLY-NEXT:    vpermi2q %zmm21, %zmm3, %zmm7
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 %zmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-ONLY-NEXT:    vbroadcasti32x4 {{.*#+}} zmm7 = [11,4,11,4,11,4,11,4]
+; AVX512DQ-ONLY-NEXT:    # zmm7 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm6, %zmm8
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm1, %zmm7, %zmm8
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 %zmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm7, %zmm13
+; AVX512DQ-ONLY-NEXT:    vbroadcasti32x4 {{.*#+}} zmm7 = [13,6,13,6,13,6,13,6]
+; AVX512DQ-ONLY-NEXT:    # zmm7 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm9, %zmm8
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm2, %zmm13, %zmm8
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 %zmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm26, %zmm8
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm0, %zmm13, %zmm8
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 %zmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-ONLY-NEXT:    vbroadcasti32x4 {{.*#+}} zmm20 = [12,5,12,5,12,5,12,5]
+; AVX512DQ-ONLY-NEXT:    # zmm20 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm9, %zmm18
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm2, %zmm20, %zmm18
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm26, %zmm23
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm0, %zmm20, %zmm23
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm9, %zmm17
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm2, %zmm7, %zmm17
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm26, %zmm25
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm0, %zmm7, %zmm25
+; AVX512DQ-ONLY-NEXT:    vbroadcasti64x4 {{.*#+}} zmm28 = [0,7,14,0,0,7,14,0]
+; AVX512DQ-ONLY-NEXT:    # zmm28 = mem[0,1,2,3,0,1,2,3]
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm9, %zmm8
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm2, %zmm28, %zmm8
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 %zmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm26, %zmm16
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm0, %zmm28, %zmm16
+; AVX512DQ-ONLY-NEXT:    vbroadcasti64x4 {{.*#+}} zmm30 = [9,0,7,0,9,0,7,0]
+; AVX512DQ-ONLY-NEXT:    # zmm30 = mem[0,1,2,3,0,1,2,3]
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm2, %zmm14
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm9, %zmm30, %zmm14
+; AVX512DQ-ONLY-NEXT:    vbroadcasti32x4 {{.*#+}} zmm4 = [2,9,2,9,2,9,2,9]
+; AVX512DQ-ONLY-NEXT:    # zmm4 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm2, %zmm4, %zmm9
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm0, %zmm8
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm26, %zmm30, %zmm8
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm0, %zmm4, %zmm26
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm7, %zmm2
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm11, %zmm7, %zmm0
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm10, %zmm11
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm10, %zmm0
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm15, %zmm10
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm15, %zmm7, %zmm0
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 192(%rdi), %zmm24
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 128(%rdi), %zmm12
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm12, %zmm0
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm24, %zmm7, %zmm0
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm3, %zmm5
+; AVX512DQ-ONLY-NEXT:    vpermi2q %zmm21, %zmm3, %zmm13
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 1536(%rdi), %zmm15
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 1472(%rdi), %zmm0
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm15, %zmm7, %zmm0
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm6, %zmm29
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm1, %zmm20, %zmm29
+; AVX512DQ-ONLY-NEXT:    vpermi2q %zmm21, %zmm3, %zmm20
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm6, %zmm31
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm1, %zmm7, %zmm31
+; AVX512DQ-ONLY-NEXT:    vpermi2q %zmm21, %zmm3, %zmm2
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm6, %zmm22
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm1, %zmm28, %zmm22
+; AVX512DQ-ONLY-NEXT:    vpermi2q %zmm21, %zmm3, %zmm28
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm1, %zmm3
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm6, %zmm30, %zmm3
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm6, %zmm0
+; AVX512DQ-ONLY-NEXT:    vpermi2q %zmm5, %zmm21, %zmm30
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm21, %zmm4, %zmm5
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm5, %zmm21
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm1, %zmm4, %zmm0
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 384(%rdi), %zmm1
+; AVX512DQ-ONLY-NEXT:    vbroadcasti64x4 {{.*#+}} zmm4 = [0,5,6,9,0,5,6,9]
+; AVX512DQ-ONLY-NEXT:    # zmm4 = mem[0,1,2,3,0,1,2,3]
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm1, %zmm4, %zmm5
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 832(%rdi), %zmm5
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm6 # 64-byte Reload
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm5, %zmm4, %zmm6
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 1280(%rdi), %zmm6
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm7 # 64-byte Reload
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm6, %zmm4, %zmm7
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 %zmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 1728(%rdi), %zmm7
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm27 # 64-byte Reload
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm7, %zmm4, %zmm27
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 %zmm27, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-ONLY-NEXT:    vbroadcasti64x4 {{.*#+}} zmm4 = [0,5,6,10,0,5,6,10]
+; AVX512DQ-ONLY-NEXT:    # zmm4 = mem[0,1,2,3,0,1,2,3]
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm19 # 64-byte Reload
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm6, %zmm4, %zmm19
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 %zmm19, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm19 # 64-byte Reload
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm5, %zmm4, %zmm19
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 %zmm19, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm19 # 64-byte Reload
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm1, %zmm4, %zmm19
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 %zmm19, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm7, %zmm4, %zmm13
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 %zmm13, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-ONLY-NEXT:    vbroadcasti64x4 {{.*#+}} zmm4 = [0,5,6,11,0,5,6,11]
+; AVX512DQ-ONLY-NEXT:    # zmm4 = mem[0,1,2,3,0,1,2,3]
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm6, %zmm4, %zmm29
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm5, %zmm4, %zmm18
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 %zmm18, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm1, %zmm4, %zmm23
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 %zmm23, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm7, %zmm4, %zmm20
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 %zmm20, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-ONLY-NEXT:    vbroadcasti64x4 {{.*#+}} zmm4 = [0,5,6,12,0,5,6,12]
+; AVX512DQ-ONLY-NEXT:    # zmm4 = mem[0,1,2,3,0,1,2,3]
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm6, %zmm4, %zmm31
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm5, %zmm4, %zmm17
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 %zmm17, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm1, %zmm4, %zmm25
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 %zmm25, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm7, %zmm4, %zmm2
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-ONLY-NEXT:    vbroadcasti64x4 {{.*#+}} zmm4 = [4,5,6,13,4,5,6,13]
+; AVX512DQ-ONLY-NEXT:    # zmm4 = mem[0,1,2,3,0,1,2,3]
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm6, %zmm4, %zmm22
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm19 # 64-byte Reload
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm5, %zmm4, %zmm19
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm1, %zmm4, %zmm16
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 %zmm16, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm7, %zmm4, %zmm28
+; AVX512DQ-ONLY-NEXT:    vbroadcasti64x4 {{.*#+}} zmm4 = [4,5,6,14,4,5,6,14]
+; AVX512DQ-ONLY-NEXT:    # zmm4 = mem[0,1,2,3,0,1,2,3]
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm6, %zmm4, %zmm3
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm5, %zmm4, %zmm14
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 %zmm14, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm1, %zmm4, %zmm8
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 %zmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm7, %zmm4, %zmm30
+; AVX512DQ-ONLY-NEXT:    vbroadcasti64x4 {{.*#+}} zmm4 = [4,5,8,15,4,5,8,15]
+; AVX512DQ-ONLY-NEXT:    # zmm4 = mem[0,1,2,3,0,1,2,3]
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm5, %zmm4, %zmm9
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 %zmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm1, %zmm4, %zmm26
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 %zmm26, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm7, %zmm4, %zmm21
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 %zmm21, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm6, %zmm4, %zmm0
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-ONLY-NEXT:    movb $24, %al
+; AVX512DQ-ONLY-NEXT:    kmovw %eax, %k1
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 512(%rdi), %zmm5
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 448(%rdi), %zmm7
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 {{.*#+}} ymm21 = <0,7,14,u>
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm7, %zmm18
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm5, %zmm21, %zmm18
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm10, %zmm0
+; AVX512DQ-ONLY-NEXT:    vshufi64x2 {{.*#+}} zmm18 {%k1} = zmm11[4,5,4,5],zmm10[4,5,4,5]
+; AVX512DQ-ONLY-NEXT:    vbroadcasti64x4 {{.*#+}} zmm10 = [14,0,0,7,14,0,0,7]
+; AVX512DQ-ONLY-NEXT:    # zmm10 = mem[0,1,2,3,0,1,2,3]
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm11, %zmm26
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm0, %zmm10, %zmm26
+; AVX512DQ-ONLY-NEXT:    vbroadcasti64x4 {{.*#+}} zmm16 = [7,0,9,0,7,0,9,0]
+; AVX512DQ-ONLY-NEXT:    # zmm16 = mem[0,1,2,3,0,1,2,3]
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm0, %zmm27
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm11, %zmm16, %zmm27
+; AVX512DQ-ONLY-NEXT:    vbroadcasti64x2 {{.*#+}} ymm20 = [4,11,4,11]
+; AVX512DQ-ONLY-NEXT:    # ymm20 = mem[0,1,0,1]
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm0, %zmm20, %zmm11
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 %zmm11, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 64(%rdi), %zmm1
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 (%rdi), %zmm14
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm14, %zmm17
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm1, %zmm21, %zmm17
+; AVX512DQ-ONLY-NEXT:    vshufi64x2 {{.*#+}} zmm17 {%k1} = zmm12[4,5,4,5],zmm24[4,5,4,5]
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm12, %zmm23
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm24, %zmm10, %zmm23
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm24, %zmm25
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm12, %zmm16, %zmm25
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm24, %zmm20, %zmm12
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 %zmm12, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 960(%rdi), %zmm6
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 896(%rdi), %zmm11
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm11, %zmm12
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm6, %zmm21, %zmm12
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 1408(%rdi), %zmm4
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 1344(%rdi), %zmm8
+; AVX512DQ-ONLY-NEXT:    vpermi2q %zmm4, %zmm8, %zmm21
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
+; AVX512DQ-ONLY-NEXT:    vshufi64x2 {{.*#+}} zmm21 {%k1} = zmm3[4,5,4,5],zmm15[4,5,4,5]
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm2, %zmm0
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm9 # 64-byte Reload
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm9, %zmm10, %zmm0
+; AVX512DQ-ONLY-NEXT:    vpermi2q %zmm15, %zmm3, %zmm10
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm9, %zmm24
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm2, %zmm16, %zmm24
+; AVX512DQ-ONLY-NEXT:    vpermi2q %zmm3, %zmm15, %zmm16
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm15, %zmm20, %zmm3
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-ONLY-NEXT:    vshufi64x2 {{.*#+}} zmm12 {%k1} = zmm2[4,5,4,5],zmm9[4,5,4,5]
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm9, %zmm20, %zmm2
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-ONLY-NEXT:    vmovdqa 912(%rdi), %xmm2
+; AVX512DQ-ONLY-NEXT:    vpblendd {{.*#+}} xmm2 = xmm2[0,1],mem[2,3]
+; AVX512DQ-ONLY-NEXT:    vinserti32x4 $0, %xmm2, %zmm0, %zmm0
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-ONLY-NEXT:    vmovdqa 464(%rdi), %xmm0
+; AVX512DQ-ONLY-NEXT:    vpblendd {{.*#+}} xmm0 = xmm0[0,1],mem[2,3]
+; AVX512DQ-ONLY-NEXT:    vinserti32x4 $0, %xmm0, %zmm26, %zmm0
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-ONLY-NEXT:    vmovdqa 16(%rdi), %xmm0
+; AVX512DQ-ONLY-NEXT:    vpblendd {{.*#+}} xmm0 = xmm0[0,1],mem[2,3]
+; AVX512DQ-ONLY-NEXT:    vinserti32x4 $0, %xmm0, %zmm23, %zmm26
+; AVX512DQ-ONLY-NEXT:    vmovdqa 1360(%rdi), %xmm0
+; AVX512DQ-ONLY-NEXT:    vpblendd {{.*#+}} xmm0 = xmm0[0,1],mem[2,3]
+; AVX512DQ-ONLY-NEXT:    vinserti32x4 $0, %xmm0, %zmm10, %zmm20
+; AVX512DQ-ONLY-NEXT:    vinserti128 $1, 1088(%rdi), %ymm0, %ymm0
+; AVX512DQ-ONLY-NEXT:    vmovdqa 1024(%rdi), %ymm2
+; AVX512DQ-ONLY-NEXT:    vpblendd {{.*#+}} ymm0 = ymm2[0,1,2,3,4,5],ymm0[6,7]
+; AVX512DQ-ONLY-NEXT:    vmovdqa {{.*#+}} xmm10 = [4,11]
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm11, %zmm13
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm6, %zmm10, %zmm13
+; AVX512DQ-ONLY-NEXT:    vpblendd {{.*#+}} ymm0 = ymm13[0,1,2,3],ymm0[4,5,6,7]
+; AVX512DQ-ONLY-NEXT:    vinserti64x4 $0, %ymm0, %zmm22, %zmm0
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-ONLY-NEXT:    vinserti128 $1, 640(%rdi), %ymm0, %ymm0
+; AVX512DQ-ONLY-NEXT:    vmovdqa 576(%rdi), %ymm3
+; AVX512DQ-ONLY-NEXT:    vmovdqu %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512DQ-ONLY-NEXT:    vpblendd {{.*#+}} ymm0 = ymm3[0,1,2,3,4,5],ymm0[6,7]
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm7, %zmm3
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm5, %zmm10, %zmm3
+; AVX512DQ-ONLY-NEXT:    vpblendd {{.*#+}} ymm0 = ymm3[0,1,2,3],ymm0[4,5,6,7]
+; AVX512DQ-ONLY-NEXT:    vinserti64x4 $0, %ymm0, %zmm19, %zmm0
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-ONLY-NEXT:    vinserti128 $1, 192(%rdi), %ymm0, %ymm0
+; AVX512DQ-ONLY-NEXT:    vmovdqa 128(%rdi), %ymm3
+; AVX512DQ-ONLY-NEXT:    vmovdqu %ymm3, (%rsp) # 32-byte Spill
+; AVX512DQ-ONLY-NEXT:    vpblendd {{.*#+}} ymm0 = ymm3[0,1,2,3,4,5],ymm0[6,7]
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm14, %zmm3
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm1, %zmm10, %zmm3
+; AVX512DQ-ONLY-NEXT:    vpblendd {{.*#+}} ymm0 = ymm3[0,1,2,3],ymm0[4,5,6,7]
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
+; AVX512DQ-ONLY-NEXT:    vinserti64x4 $0, %ymm0, %zmm3, %zmm0
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-ONLY-NEXT:    vinserti128 $1, 1536(%rdi), %ymm0, %ymm0
+; AVX512DQ-ONLY-NEXT:    vmovdqa 1472(%rdi), %ymm3
+; AVX512DQ-ONLY-NEXT:    vmovdqu %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512DQ-ONLY-NEXT:    vpblendd {{.*#+}} ymm0 = ymm3[0,1,2,3,4,5],ymm0[6,7]
+; AVX512DQ-ONLY-NEXT:    vpermi2q %zmm4, %zmm8, %zmm10
+; AVX512DQ-ONLY-NEXT:    vpblendd {{.*#+}} ymm0 = ymm10[0,1,2,3],ymm0[4,5,6,7]
+; AVX512DQ-ONLY-NEXT:    vinserti64x4 $0, %ymm0, %zmm28, %zmm28
+; AVX512DQ-ONLY-NEXT:    vmovdqa 1088(%rdi), %ymm0
+; AVX512DQ-ONLY-NEXT:    vpalignr {{.*#+}} ymm0 = ymm2[8,9,10,11,12,13,14,15],ymm0[0,1,2,3,4,5,6,7],ymm2[24,25,26,27,28,29,30,31],ymm0[16,17,18,19,20,21,22,23]
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %ymm0, %ymm23
+; AVX512DQ-ONLY-NEXT:    vmovdqa {{.*#+}} ymm0 = <9,0,7,u>
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm5, %zmm3
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm7, %zmm9
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm7, %zmm0, %zmm3
+; AVX512DQ-ONLY-NEXT:    vmovdqa {{.*#+}} xmm2 = [5,12]
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm7, %zmm13
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm5, %zmm2, %zmm13
+; AVX512DQ-ONLY-NEXT:    vmovdqa {{.*#+}} xmm7 = [6,13]
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm5, %zmm7, %zmm9
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 %zmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm1, %zmm5
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm14, %zmm0, %zmm5
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm14, %zmm10
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm1, %zmm2, %zmm10
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm1, %zmm7, %zmm14
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm6, %zmm1
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm11, %zmm0, %zmm1
+; AVX512DQ-ONLY-NEXT:    vpermi2q %zmm8, %zmm4, %zmm0
+; AVX512DQ-ONLY-NEXT:    vmovdqa 960(%rdi), %ymm15
+; AVX512DQ-ONLY-NEXT:    vpalignr {{.*#+}} ymm15 = mem[8,9,10,11,12,13,14,15],ymm15[0,1,2,3,4,5,6,7],mem[24,25,26,27,28,29,30,31],ymm15[16,17,18,19,20,21,22,23]
+; AVX512DQ-ONLY-NEXT:    vextracti32x4 $1, %ymm15, %xmm19
+; AVX512DQ-ONLY-NEXT:    vmovdqa 512(%rdi), %ymm15
+; AVX512DQ-ONLY-NEXT:    vpalignr {{.*#+}} ymm15 = mem[8,9,10,11,12,13,14,15],ymm15[0,1,2,3,4,5,6,7],mem[24,25,26,27,28,29,30,31],ymm15[16,17,18,19,20,21,22,23]
+; AVX512DQ-ONLY-NEXT:    vextracti128 $1, %ymm15, %xmm15
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm11, %zmm9
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm6, %zmm2, %zmm9
+; AVX512DQ-ONLY-NEXT:    vpermi2q %zmm4, %zmm8, %zmm2
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm4, %zmm7, %zmm8
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm6, %zmm7, %zmm11
+; AVX512DQ-ONLY-NEXT:    movb $-32, %al
+; AVX512DQ-ONLY-NEXT:    kmovw %eax, %k2
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm4, %zmm17 {%k2}
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm4, %zmm18 {%k2}
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm4, %zmm12 {%k2}
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm4, %zmm21 {%k2}
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm4, %zmm1 {%k1}
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm4, %zmm1 {%k2}
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm4, %zmm3 {%k1}
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm4, %zmm3 {%k2}
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm4, %zmm5 {%k1}
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm4, %zmm5 {%k2}
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm4, %zmm0 {%k1}
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm4, %zmm0 {%k2}
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm22 # 64-byte Reload
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm29, %zmm22 {%k2}
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm29 # 64-byte Reload
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm4, %zmm29 {%k2}
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm4, %zmm26 {%k2}
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm4, %zmm20 {%k2}
+; AVX512DQ-ONLY-NEXT:    vinserti32x4 $0, %xmm19, %zmm24, %zmm4
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm31, %zmm4 {%k2}
+; AVX512DQ-ONLY-NEXT:    vinserti32x4 $0, %xmm15, %zmm27, %zmm6
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm7 # 64-byte Reload
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm7, %zmm6 {%k2}
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %ymm23, %ymm7
+; AVX512DQ-ONLY-NEXT:    vpblendd {{.*#+}} ymm7 = ymm9[0,1,2,3],ymm7[4,5,6,7]
+; AVX512DQ-ONLY-NEXT:    vmovdqa 64(%rdi), %ymm9
+; AVX512DQ-ONLY-NEXT:    vpalignr {{.*#+}} ymm9 = mem[8,9,10,11,12,13,14,15],ymm9[0,1,2,3,4,5,6,7],mem[24,25,26,27,28,29,30,31],ymm9[16,17,18,19,20,21,22,23]
+; AVX512DQ-ONLY-NEXT:    vextracti128 $1, %ymm9, %xmm9
+; AVX512DQ-ONLY-NEXT:    vinserti32x4 $0, %xmm9, %zmm25, %zmm9
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm15 # 64-byte Reload
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm15, %zmm9 {%k2}
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm15 # 64-byte Reload
+; AVX512DQ-ONLY-NEXT:    vinserti64x4 $0, %ymm7, %zmm15, %zmm19
+; AVX512DQ-ONLY-NEXT:    vmovdqa 640(%rdi), %ymm15
+; AVX512DQ-ONLY-NEXT:    vpalignr $8, {{[-0-9]+}}(%r{{[sb]}}p), %ymm15, %ymm15 # 32-byte Folded Reload
+; AVX512DQ-ONLY-NEXT:    # ymm15 = mem[8,9,10,11,12,13,14,15],ymm15[0,1,2,3,4,5,6,7],mem[24,25,26,27,28,29,30,31],ymm15[16,17,18,19,20,21,22,23]
+; AVX512DQ-ONLY-NEXT:    vmovdqa 1408(%rdi), %ymm7
+; AVX512DQ-ONLY-NEXT:    vpalignr {{.*#+}} ymm7 = mem[8,9,10,11,12,13,14,15],ymm7[0,1,2,3,4,5,6,7],mem[24,25,26,27,28,29,30,31],ymm7[16,17,18,19,20,21,22,23]
+; AVX512DQ-ONLY-NEXT:    vextracti128 $1, %ymm7, %xmm7
+; AVX512DQ-ONLY-NEXT:    vinserti32x4 $0, %xmm7, %zmm16, %zmm7
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm16 # 64-byte Reload
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm16, %zmm7 {%k2}
+; AVX512DQ-ONLY-NEXT:    vpblendd {{.*#+}} ymm13 = ymm13[0,1,2,3],ymm15[4,5,6,7]
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm15 # 64-byte Reload
+; AVX512DQ-ONLY-NEXT:    vinserti64x4 $0, %ymm13, %zmm15, %zmm13
+; AVX512DQ-ONLY-NEXT:    vmovdqa 192(%rdi), %ymm15
+; AVX512DQ-ONLY-NEXT:    vpalignr $8, (%rsp), %ymm15, %ymm15 # 32-byte Folded Reload
+; AVX512DQ-ONLY-NEXT:    # ymm15 = mem[8,9,10,11,12,13,14,15],ymm15[0,1,2,3,4,5,6,7],mem[24,25,26,27,28,29,30,31],ymm15[16,17,18,19,20,21,22,23]
+; AVX512DQ-ONLY-NEXT:    vpblendd {{.*#+}} ymm10 = ymm10[0,1,2,3],ymm15[4,5,6,7]
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm15 # 64-byte Reload
+; AVX512DQ-ONLY-NEXT:    vinserti64x4 $0, %ymm10, %zmm15, %zmm10
+; AVX512DQ-ONLY-NEXT:    vmovdqa 1536(%rdi), %ymm15
+; AVX512DQ-ONLY-NEXT:    vpalignr $8, {{[-0-9]+}}(%r{{[sb]}}p), %ymm15, %ymm15 # 32-byte Folded Reload
+; AVX512DQ-ONLY-NEXT:    # ymm15 = mem[8,9,10,11,12,13,14,15],ymm15[0,1,2,3,4,5,6,7],mem[24,25,26,27,28,29,30,31],ymm15[16,17,18,19,20,21,22,23]
+; AVX512DQ-ONLY-NEXT:    vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm15[4,5,6,7]
+; AVX512DQ-ONLY-NEXT:    vinserti64x4 $0, %ymm2, %zmm30, %zmm2
+; AVX512DQ-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm15 # 64-byte Reload
+; AVX512DQ-ONLY-NEXT:    vblendps $240, {{[-0-9]+}}(%r{{[sb]}}p), %ymm15, %ymm15 # 32-byte Folded Reload
+; AVX512DQ-ONLY-NEXT:    # ymm15 = ymm15[0,1,2,3],mem[4,5,6,7]
+; AVX512DQ-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm16 # 64-byte Reload
+; AVX512DQ-ONLY-NEXT:    vinsertf64x4 $0, %ymm15, %zmm16, %zmm15
+; AVX512DQ-ONLY-NEXT:    vpblendd $240, {{[-0-9]+}}(%r{{[sb]}}p), %ymm14, %ymm14 # 32-byte Folded Reload
+; AVX512DQ-ONLY-NEXT:    # ymm14 = ymm14[0,1,2,3],mem[4,5,6,7]
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm16 # 64-byte Reload
+; AVX512DQ-ONLY-NEXT:    vinserti64x4 $0, %ymm14, %zmm16, %zmm14
+; AVX512DQ-ONLY-NEXT:    vpblendd $240, {{[-0-9]+}}(%r{{[sb]}}p), %ymm8, %ymm8 # 32-byte Folded Reload
+; AVX512DQ-ONLY-NEXT:    # ymm8 = ymm8[0,1,2,3],mem[4,5,6,7]
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm16 # 64-byte Reload
+; AVX512DQ-ONLY-NEXT:    vinserti64x4 $0, %ymm8, %zmm16, %zmm8
+; AVX512DQ-ONLY-NEXT:    vpblendd $240, {{[-0-9]+}}(%r{{[sb]}}p), %ymm11, %ymm11 # 32-byte Folded Reload
+; AVX512DQ-ONLY-NEXT:    # ymm11 = ymm11[0,1,2,3],mem[4,5,6,7]
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm16 # 64-byte Reload
+; AVX512DQ-ONLY-NEXT:    vinserti64x4 $0, %ymm11, %zmm16, %zmm11
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm21, 192(%rsi)
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm12, 128(%rsi)
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm18, 64(%rsi)
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm17, (%rsi)
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm0, 192(%rdx)
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm5, (%rdx)
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm3, 64(%rdx)
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm1, 128(%rdx)
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm20, 192(%rcx)
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm26, (%rcx)
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm29, 64(%rcx)
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm22, 128(%rcx)
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm7, 192(%r8)
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm9, (%r8)
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm6, 64(%r8)
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm4, 128(%r8)
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm28, 192(%r9)
+; AVX512DQ-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512DQ-ONLY-NEXT:    vmovaps %zmm0, (%r9)
+; AVX512DQ-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512DQ-ONLY-NEXT:    vmovaps %zmm0, 64(%r9)
+; AVX512DQ-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512DQ-ONLY-NEXT:    vmovaps %zmm0, 128(%r9)
+; AVX512DQ-ONLY-NEXT:    movq {{[0-9]+}}(%rsp), %rax
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm2, 192(%rax)
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm10, (%rax)
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm13, 64(%rax)
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm19, 128(%rax)
+; AVX512DQ-ONLY-NEXT:    movq {{[0-9]+}}(%rsp), %rax
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm11, 128(%rax)
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm8, 192(%rax)
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm14, (%rax)
+; AVX512DQ-ONLY-NEXT:    vmovaps %zmm15, 64(%rax)
+; AVX512DQ-ONLY-NEXT:    addq $2216, %rsp # imm = 0x8A8
+; AVX512DQ-ONLY-NEXT:    vzeroupper
+; AVX512DQ-ONLY-NEXT:    retq
 ;
-; AVX512DQ-SLOW-LABEL: load_i64_stride7_vf32:
-; AVX512DQ-SLOW:       # %bb.0:
-; AVX512DQ-SLOW-NEXT:    subq $2216, %rsp # imm = 0x8A8
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 1664(%rdi), %zmm21
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 1600(%rdi), %zmm3
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 1216(%rdi), %zmm1
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 1152(%rdi), %zmm6
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 1088(%rdi), %zmm11
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm11, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vmovaps 1024(%rdi), %zmm0
-; AVX512DQ-SLOW-NEXT:    vmovups %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 768(%rdi), %zmm2
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 704(%rdi), %zmm9
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 640(%rdi), %zmm15
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 576(%rdi), %zmm10
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 320(%rdi), %zmm0
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 256(%rdi), %zmm26
-; AVX512DQ-SLOW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm7 = [10,3,10,3,10,3,10,3]
-; AVX512DQ-SLOW-NEXT:    # zmm7 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm26, %zmm8
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm0, %zmm7, %zmm8
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm9, %zmm8
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm2, %zmm7, %zmm8
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm6, %zmm8
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm1, %zmm7, %zmm8
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vpermi2q %zmm21, %zmm3, %zmm7
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm7 = [11,4,11,4,11,4,11,4]
-; AVX512DQ-SLOW-NEXT:    # zmm7 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm6, %zmm8
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm1, %zmm7, %zmm8
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm7, %zmm13
-; AVX512DQ-SLOW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm7 = [13,6,13,6,13,6,13,6]
-; AVX512DQ-SLOW-NEXT:    # zmm7 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm9, %zmm8
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm2, %zmm13, %zmm8
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm26, %zmm8
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm0, %zmm13, %zmm8
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm20 = [12,5,12,5,12,5,12,5]
-; AVX512DQ-SLOW-NEXT:    # zmm20 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm9, %zmm18
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm2, %zmm20, %zmm18
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm26, %zmm23
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm0, %zmm20, %zmm23
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm9, %zmm17
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm2, %zmm7, %zmm17
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm26, %zmm25
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm0, %zmm7, %zmm25
-; AVX512DQ-SLOW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm28 = [0,7,14,0,0,7,14,0]
-; AVX512DQ-SLOW-NEXT:    # zmm28 = mem[0,1,2,3,0,1,2,3]
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm9, %zmm8
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm2, %zmm28, %zmm8
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm26, %zmm16
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm0, %zmm28, %zmm16
-; AVX512DQ-SLOW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm30 = [9,0,7,0,9,0,7,0]
-; AVX512DQ-SLOW-NEXT:    # zmm30 = mem[0,1,2,3,0,1,2,3]
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm2, %zmm14
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm9, %zmm30, %zmm14
-; AVX512DQ-SLOW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm4 = [2,9,2,9,2,9,2,9]
-; AVX512DQ-SLOW-NEXT:    # zmm4 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm2, %zmm4, %zmm9
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm0, %zmm8
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm26, %zmm30, %zmm8
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm0, %zmm4, %zmm26
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm7, %zmm2
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm11, %zmm7, %zmm0
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm10, %zmm11
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm10, %zmm0
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm15, %zmm10
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm15, %zmm7, %zmm0
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 192(%rdi), %zmm24
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 128(%rdi), %zmm12
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm12, %zmm0
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm24, %zmm7, %zmm0
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm3, %zmm5
-; AVX512DQ-SLOW-NEXT:    vpermi2q %zmm21, %zmm3, %zmm13
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 1536(%rdi), %zmm15
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 1472(%rdi), %zmm0
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm15, %zmm7, %zmm0
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm6, %zmm29
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm1, %zmm20, %zmm29
-; AVX512DQ-SLOW-NEXT:    vpermi2q %zmm21, %zmm3, %zmm20
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm6, %zmm31
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm1, %zmm7, %zmm31
-; AVX512DQ-SLOW-NEXT:    vpermi2q %zmm21, %zmm3, %zmm2
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm6, %zmm22
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm1, %zmm28, %zmm22
-; AVX512DQ-SLOW-NEXT:    vpermi2q %zmm21, %zmm3, %zmm28
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm1, %zmm3
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm6, %zmm30, %zmm3
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm6, %zmm0
-; AVX512DQ-SLOW-NEXT:    vpermi2q %zmm5, %zmm21, %zmm30
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm21, %zmm4, %zmm5
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm5, %zmm21
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm1, %zmm4, %zmm0
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 384(%rdi), %zmm1
-; AVX512DQ-SLOW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm4 = [0,5,6,9,0,5,6,9]
-; AVX512DQ-SLOW-NEXT:    # zmm4 = mem[0,1,2,3,0,1,2,3]
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm1, %zmm4, %zmm5
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 832(%rdi), %zmm5
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm6 # 64-byte Reload
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm5, %zmm4, %zmm6
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 1280(%rdi), %zmm6
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm7 # 64-byte Reload
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm6, %zmm4, %zmm7
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 1728(%rdi), %zmm7
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm27 # 64-byte Reload
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm7, %zmm4, %zmm27
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm27, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm4 = [0,5,6,10,0,5,6,10]
-; AVX512DQ-SLOW-NEXT:    # zmm4 = mem[0,1,2,3,0,1,2,3]
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm19 # 64-byte Reload
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm6, %zmm4, %zmm19
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm19, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm19 # 64-byte Reload
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm5, %zmm4, %zmm19
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm19, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm19 # 64-byte Reload
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm1, %zmm4, %zmm19
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm19, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm7, %zmm4, %zmm13
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm13, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm4 = [0,5,6,11,0,5,6,11]
-; AVX512DQ-SLOW-NEXT:    # zmm4 = mem[0,1,2,3,0,1,2,3]
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm6, %zmm4, %zmm29
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm5, %zmm4, %zmm18
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm18, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm1, %zmm4, %zmm23
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm23, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm7, %zmm4, %zmm20
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm20, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm4 = [0,5,6,12,0,5,6,12]
-; AVX512DQ-SLOW-NEXT:    # zmm4 = mem[0,1,2,3,0,1,2,3]
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm6, %zmm4, %zmm31
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm5, %zmm4, %zmm17
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm17, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm1, %zmm4, %zmm25
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm25, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm7, %zmm4, %zmm2
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm4 = [4,5,6,13,4,5,6,13]
-; AVX512DQ-SLOW-NEXT:    # zmm4 = mem[0,1,2,3,0,1,2,3]
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm6, %zmm4, %zmm22
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm19 # 64-byte Reload
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm5, %zmm4, %zmm19
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm1, %zmm4, %zmm16
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm16, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm7, %zmm4, %zmm28
-; AVX512DQ-SLOW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm4 = [4,5,6,14,4,5,6,14]
-; AVX512DQ-SLOW-NEXT:    # zmm4 = mem[0,1,2,3,0,1,2,3]
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm6, %zmm4, %zmm3
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm5, %zmm4, %zmm14
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm14, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm1, %zmm4, %zmm8
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm7, %zmm4, %zmm30
-; AVX512DQ-SLOW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm4 = [4,5,8,15,4,5,8,15]
-; AVX512DQ-SLOW-NEXT:    # zmm4 = mem[0,1,2,3,0,1,2,3]
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm5, %zmm4, %zmm9
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm1, %zmm4, %zmm26
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm26, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm7, %zmm4, %zmm21
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm21, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm6, %zmm4, %zmm0
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    movb $24, %al
-; AVX512DQ-SLOW-NEXT:    kmovw %eax, %k1
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 512(%rdi), %zmm5
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 448(%rdi), %zmm7
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 {{.*#+}} ymm21 = <0,7,14,u>
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm7, %zmm18
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm5, %zmm21, %zmm18
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm10, %zmm0
-; AVX512DQ-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm18 {%k1} = zmm11[4,5,4,5],zmm10[4,5,4,5]
-; AVX512DQ-SLOW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm10 = [14,0,0,7,14,0,0,7]
-; AVX512DQ-SLOW-NEXT:    # zmm10 = mem[0,1,2,3,0,1,2,3]
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm11, %zmm26
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm0, %zmm10, %zmm26
-; AVX512DQ-SLOW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm16 = [7,0,9,0,7,0,9,0]
-; AVX512DQ-SLOW-NEXT:    # zmm16 = mem[0,1,2,3,0,1,2,3]
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm0, %zmm27
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm11, %zmm16, %zmm27
-; AVX512DQ-SLOW-NEXT:    vbroadcasti64x2 {{.*#+}} ymm20 = [4,11,4,11]
-; AVX512DQ-SLOW-NEXT:    # ymm20 = mem[0,1,0,1]
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm0, %zmm20, %zmm11
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm11, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 64(%rdi), %zmm1
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 (%rdi), %zmm14
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm14, %zmm17
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm1, %zmm21, %zmm17
-; AVX512DQ-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm17 {%k1} = zmm12[4,5,4,5],zmm24[4,5,4,5]
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm12, %zmm23
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm24, %zmm10, %zmm23
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm24, %zmm25
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm12, %zmm16, %zmm25
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm24, %zmm20, %zmm12
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm12, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 960(%rdi), %zmm6
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 896(%rdi), %zmm11
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm11, %zmm12
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm6, %zmm21, %zmm12
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 1408(%rdi), %zmm4
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 1344(%rdi), %zmm8
-; AVX512DQ-SLOW-NEXT:    vpermi2q %zmm4, %zmm8, %zmm21
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
-; AVX512DQ-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm21 {%k1} = zmm3[4,5,4,5],zmm15[4,5,4,5]
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm2, %zmm0
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm9 # 64-byte Reload
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm9, %zmm10, %zmm0
-; AVX512DQ-SLOW-NEXT:    vpermi2q %zmm15, %zmm3, %zmm10
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm9, %zmm24
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm2, %zmm16, %zmm24
-; AVX512DQ-SLOW-NEXT:    vpermi2q %zmm3, %zmm15, %zmm16
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm15, %zmm20, %zmm3
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm12 {%k1} = zmm2[4,5,4,5],zmm9[4,5,4,5]
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm9, %zmm20, %zmm2
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vmovdqa 912(%rdi), %xmm2
-; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} xmm2 = xmm2[0,1],mem[2,3]
-; AVX512DQ-SLOW-NEXT:    vinserti32x4 $0, %xmm2, %zmm0, %zmm0
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vmovdqa 464(%rdi), %xmm0
-; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} xmm0 = xmm0[0,1],mem[2,3]
-; AVX512DQ-SLOW-NEXT:    vinserti32x4 $0, %xmm0, %zmm26, %zmm0
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vmovdqa 16(%rdi), %xmm0
-; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} xmm0 = xmm0[0,1],mem[2,3]
-; AVX512DQ-SLOW-NEXT:    vinserti32x4 $0, %xmm0, %zmm23, %zmm26
-; AVX512DQ-SLOW-NEXT:    vmovdqa 1360(%rdi), %xmm0
-; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} xmm0 = xmm0[0,1],mem[2,3]
-; AVX512DQ-SLOW-NEXT:    vinserti32x4 $0, %xmm0, %zmm10, %zmm20
-; AVX512DQ-SLOW-NEXT:    vinserti128 $1, 1088(%rdi), %ymm0, %ymm0
-; AVX512DQ-SLOW-NEXT:    vmovdqa 1024(%rdi), %ymm2
-; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm2[0,1,2,3,4,5],ymm0[6,7]
-; AVX512DQ-SLOW-NEXT:    vmovdqa {{.*#+}} xmm10 = [4,11]
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm11, %zmm13
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm6, %zmm10, %zmm13
-; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm13[0,1,2,3],ymm0[4,5,6,7]
-; AVX512DQ-SLOW-NEXT:    vinserti64x4 $0, %ymm0, %zmm22, %zmm0
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vinserti128 $1, 640(%rdi), %ymm0, %ymm0
-; AVX512DQ-SLOW-NEXT:    vmovdqa 576(%rdi), %ymm3
-; AVX512DQ-SLOW-NEXT:    vmovdqu %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm3[0,1,2,3,4,5],ymm0[6,7]
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm7, %zmm3
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm5, %zmm10, %zmm3
-; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm3[0,1,2,3],ymm0[4,5,6,7]
-; AVX512DQ-SLOW-NEXT:    vinserti64x4 $0, %ymm0, %zmm19, %zmm0
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vinserti128 $1, 192(%rdi), %ymm0, %ymm0
-; AVX512DQ-SLOW-NEXT:    vmovdqa 128(%rdi), %ymm3
-; AVX512DQ-SLOW-NEXT:    vmovdqu %ymm3, (%rsp) # 32-byte Spill
-; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm3[0,1,2,3,4,5],ymm0[6,7]
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm14, %zmm3
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm1, %zmm10, %zmm3
-; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm3[0,1,2,3],ymm0[4,5,6,7]
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
-; AVX512DQ-SLOW-NEXT:    vinserti64x4 $0, %ymm0, %zmm3, %zmm0
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vinserti128 $1, 1536(%rdi), %ymm0, %ymm0
-; AVX512DQ-SLOW-NEXT:    vmovdqa 1472(%rdi), %ymm3
-; AVX512DQ-SLOW-NEXT:    vmovdqu %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm3[0,1,2,3,4,5],ymm0[6,7]
-; AVX512DQ-SLOW-NEXT:    vpermi2q %zmm4, %zmm8, %zmm10
-; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm10[0,1,2,3],ymm0[4,5,6,7]
-; AVX512DQ-SLOW-NEXT:    vinserti64x4 $0, %ymm0, %zmm28, %zmm28
-; AVX512DQ-SLOW-NEXT:    vmovdqa 1088(%rdi), %ymm0
-; AVX512DQ-SLOW-NEXT:    vpalignr {{.*#+}} ymm0 = ymm2[8,9,10,11,12,13,14,15],ymm0[0,1,2,3,4,5,6,7],ymm2[24,25,26,27,28,29,30,31],ymm0[16,17,18,19,20,21,22,23]
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %ymm0, %ymm23
-; AVX512DQ-SLOW-NEXT:    vmovdqa {{.*#+}} ymm0 = <9,0,7,u>
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm5, %zmm3
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm7, %zmm9
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm7, %zmm0, %zmm3
-; AVX512DQ-SLOW-NEXT:    vmovdqa {{.*#+}} xmm2 = [5,12]
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm7, %zmm13
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm5, %zmm2, %zmm13
-; AVX512DQ-SLOW-NEXT:    vmovdqa {{.*#+}} xmm7 = [6,13]
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm5, %zmm7, %zmm9
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm1, %zmm5
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm14, %zmm0, %zmm5
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm14, %zmm10
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm1, %zmm2, %zmm10
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm1, %zmm7, %zmm14
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm6, %zmm1
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm11, %zmm0, %zmm1
-; AVX512DQ-SLOW-NEXT:    vpermi2q %zmm8, %zmm4, %zmm0
-; AVX512DQ-SLOW-NEXT:    vmovdqa 960(%rdi), %ymm15
-; AVX512DQ-SLOW-NEXT:    vpalignr {{.*#+}} ymm15 = mem[8,9,10,11,12,13,14,15],ymm15[0,1,2,3,4,5,6,7],mem[24,25,26,27,28,29,30,31],ymm15[16,17,18,19,20,21,22,23]
-; AVX512DQ-SLOW-NEXT:    vextracti32x4 $1, %ymm15, %xmm19
-; AVX512DQ-SLOW-NEXT:    vmovdqa 512(%rdi), %ymm15
-; AVX512DQ-SLOW-NEXT:    vpalignr {{.*#+}} ymm15 = mem[8,9,10,11,12,13,14,15],ymm15[0,1,2,3,4,5,6,7],mem[24,25,26,27,28,29,30,31],ymm15[16,17,18,19,20,21,22,23]
-; AVX512DQ-SLOW-NEXT:    vextracti128 $1, %ymm15, %xmm15
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm11, %zmm9
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm6, %zmm2, %zmm9
-; AVX512DQ-SLOW-NEXT:    vpermi2q %zmm4, %zmm8, %zmm2
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm4, %zmm7, %zmm8
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm6, %zmm7, %zmm11
-; AVX512DQ-SLOW-NEXT:    movb $-32, %al
-; AVX512DQ-SLOW-NEXT:    kmovw %eax, %k2
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm4, %zmm17 {%k2}
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm4, %zmm18 {%k2}
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm4, %zmm12 {%k2}
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm4, %zmm21 {%k2}
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm4, %zmm1 {%k1}
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm4, %zmm1 {%k2}
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm4, %zmm3 {%k1}
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm4, %zmm3 {%k2}
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm4, %zmm5 {%k1}
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm4, %zmm5 {%k2}
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm4, %zmm0 {%k1}
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm4, %zmm0 {%k2}
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm22 # 64-byte Reload
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm29, %zmm22 {%k2}
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm29 # 64-byte Reload
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm4, %zmm29 {%k2}
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm4, %zmm26 {%k2}
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm4, %zmm20 {%k2}
-; AVX512DQ-SLOW-NEXT:    vinserti32x4 $0, %xmm19, %zmm24, %zmm4
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm31, %zmm4 {%k2}
-; AVX512DQ-SLOW-NEXT:    vinserti32x4 $0, %xmm15, %zmm27, %zmm6
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm7 # 64-byte Reload
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm7, %zmm6 {%k2}
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %ymm23, %ymm7
-; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm7 = ymm9[0,1,2,3],ymm7[4,5,6,7]
-; AVX512DQ-SLOW-NEXT:    vmovdqa 64(%rdi), %ymm9
-; AVX512DQ-SLOW-NEXT:    vpalignr {{.*#+}} ymm9 = mem[8,9,10,11,12,13,14,15],ymm9[0,1,2,3,4,5,6,7],mem[24,25,26,27,28,29,30,31],ymm9[16,17,18,19,20,21,22,23]
-; AVX512DQ-SLOW-NEXT:    vextracti128 $1, %ymm9, %xmm9
-; AVX512DQ-SLOW-NEXT:    vinserti32x4 $0, %xmm9, %zmm25, %zmm9
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm15 # 64-byte Reload
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm15, %zmm9 {%k2}
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm15 # 64-byte Reload
-; AVX512DQ-SLOW-NEXT:    vinserti64x4 $0, %ymm7, %zmm15, %zmm19
-; AVX512DQ-SLOW-NEXT:    vmovdqa 640(%rdi), %ymm15
-; AVX512DQ-SLOW-NEXT:    vpalignr $8, {{[-0-9]+}}(%r{{[sb]}}p), %ymm15, %ymm15 # 32-byte Folded Reload
-; AVX512DQ-SLOW-NEXT:    # ymm15 = mem[8,9,10,11,12,13,14,15],ymm15[0,1,2,3,4,5,6,7],mem[24,25,26,27,28,29,30,31],ymm15[16,17,18,19,20,21,22,23]
-; AVX512DQ-SLOW-NEXT:    vmovdqa 1408(%rdi), %ymm7
-; AVX512DQ-SLOW-NEXT:    vpalignr {{.*#+}} ymm7 = mem[8,9,10,11,12,13,14,15],ymm7[0,1,2,3,4,5,6,7],mem[24,25,26,27,28,29,30,31],ymm7[16,17,18,19,20,21,22,23]
-; AVX512DQ-SLOW-NEXT:    vextracti128 $1, %ymm7, %xmm7
-; AVX512DQ-SLOW-NEXT:    vinserti32x4 $0, %xmm7, %zmm16, %zmm7
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm16 # 64-byte Reload
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm16, %zmm7 {%k2}
-; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm13 = ymm13[0,1,2,3],ymm15[4,5,6,7]
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm15 # 64-byte Reload
-; AVX512DQ-SLOW-NEXT:    vinserti64x4 $0, %ymm13, %zmm15, %zmm13
-; AVX512DQ-SLOW-NEXT:    vmovdqa 192(%rdi), %ymm15
-; AVX512DQ-SLOW-NEXT:    vpalignr $8, (%rsp), %ymm15, %ymm15 # 32-byte Folded Reload
-; AVX512DQ-SLOW-NEXT:    # ymm15 = mem[8,9,10,11,12,13,14,15],ymm15[0,1,2,3,4,5,6,7],mem[24,25,26,27,28,29,30,31],ymm15[16,17,18,19,20,21,22,23]
-; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm10 = ymm10[0,1,2,3],ymm15[4,5,6,7]
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm15 # 64-byte Reload
-; AVX512DQ-SLOW-NEXT:    vinserti64x4 $0, %ymm10, %zmm15, %zmm10
-; AVX512DQ-SLOW-NEXT:    vmovdqa 1536(%rdi), %ymm15
-; AVX512DQ-SLOW-NEXT:    vpalignr $8, {{[-0-9]+}}(%r{{[sb]}}p), %ymm15, %ymm15 # 32-byte Folded Reload
-; AVX512DQ-SLOW-NEXT:    # ymm15 = mem[8,9,10,11,12,13,14,15],ymm15[0,1,2,3,4,5,6,7],mem[24,25,26,27,28,29,30,31],ymm15[16,17,18,19,20,21,22,23]
-; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm15[4,5,6,7]
-; AVX512DQ-SLOW-NEXT:    vinserti64x4 $0, %ymm2, %zmm30, %zmm2
-; AVX512DQ-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm15 # 64-byte Reload
-; AVX512DQ-SLOW-NEXT:    vblendps $240, {{[-0-9]+}}(%r{{[sb]}}p), %ymm15, %ymm15 # 32-byte Folded Reload
-; AVX512DQ-SLOW-NEXT:    # ymm15 = ymm15[0,1,2,3],mem[4,5,6,7]
-; AVX512DQ-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm16 # 64-byte Reload
-; AVX512DQ-SLOW-NEXT:    vinsertf64x4 $0, %ymm15, %zmm16, %zmm15
-; AVX512DQ-SLOW-NEXT:    vpblendd $240, {{[-0-9]+}}(%r{{[sb]}}p), %ymm14, %ymm14 # 32-byte Folded Reload
-; AVX512DQ-SLOW-NEXT:    # ymm14 = ymm14[0,1,2,3],mem[4,5,6,7]
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm16 # 64-byte Reload
-; AVX512DQ-SLOW-NEXT:    vinserti64x4 $0, %ymm14, %zmm16, %zmm14
-; AVX512DQ-SLOW-NEXT:    vpblendd $240, {{[-0-9]+}}(%r{{[sb]}}p), %ymm8, %ymm8 # 32-byte Folded Reload
-; AVX512DQ-SLOW-NEXT:    # ymm8 = ymm8[0,1,2,3],mem[4,5,6,7]
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm16 # 64-byte Reload
-; AVX512DQ-SLOW-NEXT:    vinserti64x4 $0, %ymm8, %zmm16, %zmm8
-; AVX512DQ-SLOW-NEXT:    vpblendd $240, {{[-0-9]+}}(%r{{[sb]}}p), %ymm11, %ymm11 # 32-byte Folded Reload
-; AVX512DQ-SLOW-NEXT:    # ymm11 = ymm11[0,1,2,3],mem[4,5,6,7]
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm16 # 64-byte Reload
-; AVX512DQ-SLOW-NEXT:    vinserti64x4 $0, %ymm11, %zmm16, %zmm11
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm21, 192(%rsi)
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm12, 128(%rsi)
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm18, 64(%rsi)
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm17, (%rsi)
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm0, 192(%rdx)
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm5, (%rdx)
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm3, 64(%rdx)
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm1, 128(%rdx)
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm20, 192(%rcx)
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm26, (%rcx)
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm29, 64(%rcx)
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm22, 128(%rcx)
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm7, 192(%r8)
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm9, (%r8)
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm6, 64(%r8)
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm4, 128(%r8)
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm28, 192(%r9)
-; AVX512DQ-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQ-SLOW-NEXT:    vmovaps %zmm0, (%r9)
-; AVX512DQ-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQ-SLOW-NEXT:    vmovaps %zmm0, 64(%r9)
-; AVX512DQ-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQ-SLOW-NEXT:    vmovaps %zmm0, 128(%r9)
-; AVX512DQ-SLOW-NEXT:    movq {{[0-9]+}}(%rsp), %rax
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm2, 192(%rax)
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm10, (%rax)
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm13, 64(%rax)
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm19, 128(%rax)
-; AVX512DQ-SLOW-NEXT:    movq {{[0-9]+}}(%rsp), %rax
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm11, 128(%rax)
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm8, 192(%rax)
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm14, (%rax)
-; AVX512DQ-SLOW-NEXT:    vmovaps %zmm15, 64(%rax)
-; AVX512DQ-SLOW-NEXT:    addq $2216, %rsp # imm = 0x8A8
-; AVX512DQ-SLOW-NEXT:    vzeroupper
-; AVX512DQ-SLOW-NEXT:    retq
+; AVX512BW-ONLY-LABEL: load_i64_stride7_vf32:
+; AVX512BW-ONLY:       # %bb.0:
+; AVX512BW-ONLY-NEXT:    subq $2152, %rsp # imm = 0x868
+; AVX512BW-ONLY-NEXT:    vmovdqa64 1664(%rdi), %zmm21
+; AVX512BW-ONLY-NEXT:    vmovdqa64 1600(%rdi), %zmm31
+; AVX512BW-ONLY-NEXT:    vmovdqa64 1216(%rdi), %zmm2
+; AVX512BW-ONLY-NEXT:    vmovdqa64 1152(%rdi), %zmm6
+; AVX512BW-ONLY-NEXT:    vmovdqa64 1088(%rdi), %zmm12
+; AVX512BW-ONLY-NEXT:    vmovdqu64 %zmm12, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-NEXT:    vmovdqa64 1024(%rdi), %zmm3
+; AVX512BW-ONLY-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-NEXT:    vmovdqa64 768(%rdi), %zmm1
+; AVX512BW-ONLY-NEXT:    vmovdqa64 704(%rdi), %zmm10
+; AVX512BW-ONLY-NEXT:    vmovdqa64 640(%rdi), %zmm11
+; AVX512BW-ONLY-NEXT:    vmovaps 576(%rdi), %zmm0
+; AVX512BW-ONLY-NEXT:    vmovups %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-NEXT:    vmovdqa64 320(%rdi), %zmm0
+; AVX512BW-ONLY-NEXT:    vmovdqa64 256(%rdi), %zmm5
+; AVX512BW-ONLY-NEXT:    vbroadcasti32x4 {{.*#+}} zmm7 = [10,3,10,3,10,3,10,3]
+; AVX512BW-ONLY-NEXT:    # zmm7 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm5, %zmm8
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm0, %zmm7, %zmm8
+; AVX512BW-ONLY-NEXT:    vmovdqu64 %zmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm10, %zmm8
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm1, %zmm7, %zmm8
+; AVX512BW-ONLY-NEXT:    vmovdqu64 %zmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm6, %zmm8
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm2, %zmm7, %zmm8
+; AVX512BW-ONLY-NEXT:    vmovdqu64 %zmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-NEXT:    vpermi2q %zmm21, %zmm31, %zmm7
+; AVX512BW-ONLY-NEXT:    vmovdqu64 %zmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-NEXT:    vbroadcasti32x4 {{.*#+}} zmm7 = [11,4,11,4,11,4,11,4]
+; AVX512BW-ONLY-NEXT:    # zmm7 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm6, %zmm8
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm2, %zmm7, %zmm8
+; AVX512BW-ONLY-NEXT:    vmovdqu64 %zmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm7, %zmm13
+; AVX512BW-ONLY-NEXT:    vbroadcasti32x4 {{.*#+}} zmm7 = [13,6,13,6,13,6,13,6]
+; AVX512BW-ONLY-NEXT:    # zmm7 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm10, %zmm8
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm1, %zmm13, %zmm8
+; AVX512BW-ONLY-NEXT:    vmovdqu64 %zmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm5, %zmm8
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm5, %zmm18
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm0, %zmm13, %zmm18
+; AVX512BW-ONLY-NEXT:    vbroadcasti32x4 {{.*#+}} zmm19 = [12,5,12,5,12,5,12,5]
+; AVX512BW-ONLY-NEXT:    # zmm19 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm10, %zmm17
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm1, %zmm19, %zmm17
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm5, %zmm22
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm0, %zmm19, %zmm22
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm22, %zmm23
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm10, %zmm16
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm1, %zmm7, %zmm16
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm5, %zmm22
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm0, %zmm7, %zmm22
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm22, %zmm24
+; AVX512BW-ONLY-NEXT:    vbroadcasti64x4 {{.*#+}} zmm28 = [0,7,14,0,0,7,14,0]
+; AVX512BW-ONLY-NEXT:    # zmm28 = mem[0,1,2,3,0,1,2,3]
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm10, %zmm15
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm1, %zmm28, %zmm15
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm5, %zmm25
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm0, %zmm28, %zmm25
+; AVX512BW-ONLY-NEXT:    vbroadcasti64x4 {{.*#+}} zmm29 = [9,0,7,0,9,0,7,0]
+; AVX512BW-ONLY-NEXT:    # zmm29 = mem[0,1,2,3,0,1,2,3]
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm1, %zmm26
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm10, %zmm29, %zmm26
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm26, %zmm30
+; AVX512BW-ONLY-NEXT:    vbroadcasti32x4 {{.*#+}} zmm4 = [2,9,2,9,2,9,2,9]
+; AVX512BW-ONLY-NEXT:    # zmm4 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm1, %zmm4, %zmm10
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm0, %zmm9
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm5, %zmm29, %zmm9
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm0, %zmm4, %zmm8
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm7, %zmm1
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm12, %zmm7, %zmm3
+; AVX512BW-ONLY-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm14 # 64-byte Reload
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm14, %zmm0
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm11, %zmm7, %zmm0
+; AVX512BW-ONLY-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-NEXT:    vmovdqa64 192(%rdi), %zmm22
+; AVX512BW-ONLY-NEXT:    vmovdqa64 128(%rdi), %zmm0
+; AVX512BW-ONLY-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm22, %zmm7, %zmm0
+; AVX512BW-ONLY-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm31, %zmm5
+; AVX512BW-ONLY-NEXT:    vpermi2q %zmm21, %zmm31, %zmm13
+; AVX512BW-ONLY-NEXT:    vmovdqa64 1536(%rdi), %zmm3
+; AVX512BW-ONLY-NEXT:    vmovdqa64 1472(%rdi), %zmm0
+; AVX512BW-ONLY-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm3, %zmm7, %zmm0
+; AVX512BW-ONLY-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm6, %zmm0
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm6, %zmm31
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm2, %zmm19, %zmm31
+; AVX512BW-ONLY-NEXT:    vpermi2q %zmm21, %zmm5, %zmm19
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm19, %zmm26
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm6, %zmm27
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm2, %zmm7, %zmm27
+; AVX512BW-ONLY-NEXT:    vpermi2q %zmm21, %zmm5, %zmm1
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm6, %zmm20
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm2, %zmm28, %zmm20
+; AVX512BW-ONLY-NEXT:    vpermi2q %zmm21, %zmm5, %zmm28
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm2, %zmm12
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm6, %zmm29, %zmm12
+; AVX512BW-ONLY-NEXT:    vpermi2q %zmm5, %zmm21, %zmm29
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm21, %zmm4, %zmm5
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm5, %zmm21
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm2, %zmm4, %zmm0
+; AVX512BW-ONLY-NEXT:    vmovdqa64 384(%rdi), %zmm2
+; AVX512BW-ONLY-NEXT:    vbroadcasti64x4 {{.*#+}} zmm4 = [0,5,6,9,0,5,6,9]
+; AVX512BW-ONLY-NEXT:    # zmm4 = mem[0,1,2,3,0,1,2,3]
+; AVX512BW-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm2, %zmm4, %zmm5
+; AVX512BW-ONLY-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-NEXT:    vmovdqa64 832(%rdi), %zmm5
+; AVX512BW-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm6 # 64-byte Reload
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm5, %zmm4, %zmm6
+; AVX512BW-ONLY-NEXT:    vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-NEXT:    vmovdqa64 1280(%rdi), %zmm6
+; AVX512BW-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm7 # 64-byte Reload
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm6, %zmm4, %zmm7
+; AVX512BW-ONLY-NEXT:    vmovdqu64 %zmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-NEXT:    vmovdqa64 1728(%rdi), %zmm7
+; AVX512BW-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm19 # 64-byte Reload
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm7, %zmm4, %zmm19
+; AVX512BW-ONLY-NEXT:    vmovdqu64 %zmm19, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-NEXT:    vbroadcasti64x4 {{.*#+}} zmm4 = [0,5,6,10,0,5,6,10]
+; AVX512BW-ONLY-NEXT:    # zmm4 = mem[0,1,2,3,0,1,2,3]
+; AVX512BW-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm19 # 64-byte Reload
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm6, %zmm4, %zmm19
+; AVX512BW-ONLY-NEXT:    vmovdqu64 %zmm19, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm19 # 64-byte Reload
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm5, %zmm4, %zmm19
+; AVX512BW-ONLY-NEXT:    vmovdqu64 %zmm19, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm2, %zmm4, %zmm18
+; AVX512BW-ONLY-NEXT:    vmovdqu64 %zmm18, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm7, %zmm4, %zmm13
+; AVX512BW-ONLY-NEXT:    vmovdqu64 %zmm13, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-NEXT:    vbroadcasti64x4 {{.*#+}} zmm4 = [0,5,6,11,0,5,6,11]
+; AVX512BW-ONLY-NEXT:    # zmm4 = mem[0,1,2,3,0,1,2,3]
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm6, %zmm4, %zmm31
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm5, %zmm4, %zmm17
+; AVX512BW-ONLY-NEXT:    vmovdqu64 %zmm17, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm2, %zmm4, %zmm23
+; AVX512BW-ONLY-NEXT:    vmovdqu64 %zmm23, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm7, %zmm4, %zmm26
+; AVX512BW-ONLY-NEXT:    vmovdqu64 %zmm26, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-NEXT:    vbroadcasti64x4 {{.*#+}} zmm4 = [0,5,6,12,0,5,6,12]
+; AVX512BW-ONLY-NEXT:    # zmm4 = mem[0,1,2,3,0,1,2,3]
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm6, %zmm4, %zmm27
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm5, %zmm4, %zmm16
+; AVX512BW-ONLY-NEXT:    vmovdqu64 %zmm16, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm2, %zmm4, %zmm24
+; AVX512BW-ONLY-NEXT:    vmovdqu64 %zmm24, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm7, %zmm4, %zmm1
+; AVX512BW-ONLY-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-NEXT:    vbroadcasti64x4 {{.*#+}} zmm4 = [4,5,6,13,4,5,6,13]
+; AVX512BW-ONLY-NEXT:    # zmm4 = mem[0,1,2,3,0,1,2,3]
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm6, %zmm4, %zmm20
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm5, %zmm4, %zmm15
+; AVX512BW-ONLY-NEXT:    vmovdqu64 %zmm15, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm2, %zmm4, %zmm25
+; AVX512BW-ONLY-NEXT:    vmovdqu64 %zmm25, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm7, %zmm4, %zmm28
+; AVX512BW-ONLY-NEXT:    vbroadcasti64x4 {{.*#+}} zmm4 = [4,5,6,14,4,5,6,14]
+; AVX512BW-ONLY-NEXT:    # zmm4 = mem[0,1,2,3,0,1,2,3]
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm6, %zmm4, %zmm12
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm5, %zmm4, %zmm30
+; AVX512BW-ONLY-NEXT:    vmovdqu64 %zmm30, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm2, %zmm4, %zmm9
+; AVX512BW-ONLY-NEXT:    vmovdqu64 %zmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm7, %zmm4, %zmm29
+; AVX512BW-ONLY-NEXT:    vbroadcasti64x4 {{.*#+}} zmm4 = [4,5,8,15,4,5,8,15]
+; AVX512BW-ONLY-NEXT:    # zmm4 = mem[0,1,2,3,0,1,2,3]
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm5, %zmm4, %zmm10
+; AVX512BW-ONLY-NEXT:    vmovdqu64 %zmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm2, %zmm4, %zmm8
+; AVX512BW-ONLY-NEXT:    vmovdqu64 %zmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm7, %zmm4, %zmm21
+; AVX512BW-ONLY-NEXT:    vmovdqu64 %zmm21, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm6, %zmm4, %zmm0
+; AVX512BW-ONLY-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-NEXT:    movb $24, %al
+; AVX512BW-ONLY-NEXT:    kmovd %eax, %k1
+; AVX512BW-ONLY-NEXT:    vmovdqa64 512(%rdi), %zmm2
+; AVX512BW-ONLY-NEXT:    vmovdqa64 448(%rdi), %zmm15
+; AVX512BW-ONLY-NEXT:    vmovdqa64 {{.*#+}} ymm21 = <0,7,14,u>
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm15, %zmm24
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm2, %zmm21, %zmm24
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm11, %zmm0
+; AVX512BW-ONLY-NEXT:    vshufi64x2 {{.*#+}} zmm24 {%k1} = zmm14[4,5,4,5],zmm11[4,5,4,5]
+; AVX512BW-ONLY-NEXT:    vbroadcasti64x4 {{.*#+}} zmm11 = [14,0,0,7,14,0,0,7]
+; AVX512BW-ONLY-NEXT:    # zmm11 = mem[0,1,2,3,0,1,2,3]
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm14, %zmm13
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm0, %zmm11, %zmm13
+; AVX512BW-ONLY-NEXT:    vbroadcasti64x4 {{.*#+}} zmm17 = [7,0,9,0,7,0,9,0]
+; AVX512BW-ONLY-NEXT:    # zmm17 = mem[0,1,2,3,0,1,2,3]
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm0, %zmm26
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm14, %zmm17, %zmm26
+; AVX512BW-ONLY-NEXT:    vbroadcasti32x4 {{.*#+}} ymm19 = [4,11,4,11]
+; AVX512BW-ONLY-NEXT:    # ymm19 = mem[0,1,2,3,0,1,2,3]
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm0, %zmm19, %zmm14
+; AVX512BW-ONLY-NEXT:    vmovdqu64 %zmm14, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-NEXT:    vmovdqa64 64(%rdi), %zmm4
+; AVX512BW-ONLY-NEXT:    vmovdqa64 (%rdi), %zmm14
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm14, %zmm18
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm4, %zmm21, %zmm18
+; AVX512BW-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512BW-ONLY-NEXT:    vshufi64x2 {{.*#+}} zmm18 {%k1} = zmm0[4,5,4,5],zmm22[4,5,4,5]
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm0, %zmm23
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm22, %zmm11, %zmm23
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm22, %zmm30
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm0, %zmm17, %zmm30
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm22, %zmm19, %zmm0
+; AVX512BW-ONLY-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-NEXT:    vmovdqa64 960(%rdi), %zmm7
+; AVX512BW-ONLY-NEXT:    vmovdqa64 896(%rdi), %zmm10
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm10, %zmm16
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm7, %zmm21, %zmm16
+; AVX512BW-ONLY-NEXT:    vmovdqa64 1408(%rdi), %zmm5
+; AVX512BW-ONLY-NEXT:    vmovdqa64 1344(%rdi), %zmm8
+; AVX512BW-ONLY-NEXT:    vpermi2q %zmm5, %zmm8, %zmm21
+; AVX512BW-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm6 # 64-byte Reload
+; AVX512BW-ONLY-NEXT:    vshufi64x2 {{.*#+}} zmm21 {%k1} = zmm6[4,5,4,5],zmm3[4,5,4,5]
+; AVX512BW-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm1, %zmm0
+; AVX512BW-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm22 # 64-byte Reload
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm22, %zmm11, %zmm0
+; AVX512BW-ONLY-NEXT:    vpermi2q %zmm3, %zmm6, %zmm11
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm22, %zmm25
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm1, %zmm17, %zmm25
+; AVX512BW-ONLY-NEXT:    vpermi2q %zmm6, %zmm3, %zmm17
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm3, %zmm19, %zmm6
+; AVX512BW-ONLY-NEXT:    vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-NEXT:    vshufi64x2 {{.*#+}} zmm16 {%k1} = zmm1[4,5,4,5],zmm22[4,5,4,5]
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm22, %zmm19, %zmm1
+; AVX512BW-ONLY-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-NEXT:    vmovdqa 912(%rdi), %xmm1
+; AVX512BW-ONLY-NEXT:    vpblendd {{.*#+}} xmm1 = xmm1[0,1],mem[2,3]
+; AVX512BW-ONLY-NEXT:    vinserti32x4 $0, %xmm1, %zmm0, %zmm0
+; AVX512BW-ONLY-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-NEXT:    vmovdqa 464(%rdi), %xmm0
+; AVX512BW-ONLY-NEXT:    vpblendd {{.*#+}} xmm0 = xmm0[0,1],mem[2,3]
+; AVX512BW-ONLY-NEXT:    vinserti32x4 $0, %xmm0, %zmm13, %zmm0
+; AVX512BW-ONLY-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-NEXT:    vmovdqa 16(%rdi), %xmm0
+; AVX512BW-ONLY-NEXT:    vpblendd {{.*#+}} xmm0 = xmm0[0,1],mem[2,3]
+; AVX512BW-ONLY-NEXT:    vinserti32x4 $0, %xmm0, %zmm23, %zmm23
+; AVX512BW-ONLY-NEXT:    vmovdqa 1360(%rdi), %xmm0
+; AVX512BW-ONLY-NEXT:    vpblendd {{.*#+}} xmm0 = xmm0[0,1],mem[2,3]
+; AVX512BW-ONLY-NEXT:    vinserti32x4 $0, %xmm0, %zmm11, %zmm22
+; AVX512BW-ONLY-NEXT:    vinserti128 $1, 1088(%rdi), %ymm0, %ymm0
+; AVX512BW-ONLY-NEXT:    vmovdqa 1024(%rdi), %ymm1
+; AVX512BW-ONLY-NEXT:    vpblendd {{.*#+}} ymm0 = ymm1[0,1,2,3,4,5],ymm0[6,7]
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %ymm1, %ymm19
+; AVX512BW-ONLY-NEXT:    vmovdqa {{.*#+}} xmm1 = [4,11]
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm10, %zmm11
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm7, %zmm1, %zmm11
+; AVX512BW-ONLY-NEXT:    vpblendd {{.*#+}} ymm0 = ymm11[0,1,2,3],ymm0[4,5,6,7]
+; AVX512BW-ONLY-NEXT:    vinserti64x4 $0, %ymm0, %zmm20, %zmm0
+; AVX512BW-ONLY-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-NEXT:    vinserti128 $1, 640(%rdi), %ymm0, %ymm0
+; AVX512BW-ONLY-NEXT:    vmovdqa 576(%rdi), %ymm3
+; AVX512BW-ONLY-NEXT:    vmovdqu %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512BW-ONLY-NEXT:    vpblendd {{.*#+}} ymm0 = ymm3[0,1,2,3,4,5],ymm0[6,7]
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm15, %zmm3
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm2, %zmm1, %zmm3
+; AVX512BW-ONLY-NEXT:    vpblendd {{.*#+}} ymm0 = ymm3[0,1,2,3],ymm0[4,5,6,7]
+; AVX512BW-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
+; AVX512BW-ONLY-NEXT:    vinserti64x4 $0, %ymm0, %zmm3, %zmm0
+; AVX512BW-ONLY-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-NEXT:    vinserti128 $1, 192(%rdi), %ymm0, %ymm0
+; AVX512BW-ONLY-NEXT:    vmovdqa 128(%rdi), %ymm3
+; AVX512BW-ONLY-NEXT:    vmovdqu %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512BW-ONLY-NEXT:    vpblendd {{.*#+}} ymm0 = ymm3[0,1,2,3,4,5],ymm0[6,7]
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm14, %zmm3
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm4, %zmm1, %zmm3
+; AVX512BW-ONLY-NEXT:    vpblendd {{.*#+}} ymm0 = ymm3[0,1,2,3],ymm0[4,5,6,7]
+; AVX512BW-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
+; AVX512BW-ONLY-NEXT:    vinserti64x4 $0, %ymm0, %zmm3, %zmm0
+; AVX512BW-ONLY-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-NEXT:    vinserti128 $1, 1536(%rdi), %ymm0, %ymm0
+; AVX512BW-ONLY-NEXT:    vmovdqa 1472(%rdi), %ymm3
+; AVX512BW-ONLY-NEXT:    vmovdqu %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512BW-ONLY-NEXT:    vpblendd {{.*#+}} ymm0 = ymm3[0,1,2,3,4,5],ymm0[6,7]
+; AVX512BW-ONLY-NEXT:    vpermi2q %zmm5, %zmm8, %zmm1
+; AVX512BW-ONLY-NEXT:    vpblendd {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7]
+; AVX512BW-ONLY-NEXT:    vinserti64x4 $0, %ymm0, %zmm28, %zmm0
+; AVX512BW-ONLY-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-NEXT:    vmovdqa {{.*#+}} ymm0 = <9,0,7,u>
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm2, %zmm3
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm15, %zmm0, %zmm3
+; AVX512BW-ONLY-NEXT:    vmovdqa {{.*#+}} xmm1 = [5,12]
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm15, %zmm13
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm2, %zmm1, %zmm13
+; AVX512BW-ONLY-NEXT:    vmovdqa {{.*#+}} xmm6 = [6,13]
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm2, %zmm6, %zmm15
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm4, %zmm2
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm14, %zmm0, %zmm2
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm14, %zmm11
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm4, %zmm1, %zmm11
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm4, %zmm6, %zmm14
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm7, %zmm4
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm10, %zmm0, %zmm4
+; AVX512BW-ONLY-NEXT:    vpermi2q %zmm8, %zmm5, %zmm0
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm10, %zmm9
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm7, %zmm1, %zmm9
+; AVX512BW-ONLY-NEXT:    vpermi2q %zmm5, %zmm8, %zmm1
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm5, %zmm6, %zmm8
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm7, %zmm6, %zmm10
+; AVX512BW-ONLY-NEXT:    vmovdqa 1088(%rdi), %ymm5
+; AVX512BW-ONLY-NEXT:    vpalignr {{.*#+}} ymm5 = ymm19[8,9,10,11,12,13,14,15],ymm5[0,1,2,3,4,5,6,7],ymm19[24,25,26,27,28,29,30,31],ymm5[16,17,18,19,20,21,22,23]
+; AVX512BW-ONLY-NEXT:    vpblendd {{.*#+}} ymm5 = ymm9[0,1,2,3],ymm5[4,5,6,7]
+; AVX512BW-ONLY-NEXT:    vinserti64x4 $0, %ymm5, %zmm12, %zmm28
+; AVX512BW-ONLY-NEXT:    movb $-32, %al
+; AVX512BW-ONLY-NEXT:    kmovd %eax, %k2
+; AVX512BW-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm5, %zmm18 {%k2}
+; AVX512BW-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm5, %zmm24 {%k2}
+; AVX512BW-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm5, %zmm16 {%k2}
+; AVX512BW-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm5, %zmm21 {%k2}
+; AVX512BW-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm5, %zmm4 {%k1}
+; AVX512BW-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm5, %zmm4 {%k2}
+; AVX512BW-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm5, %zmm3 {%k1}
+; AVX512BW-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm5, %zmm3 {%k2}
+; AVX512BW-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm5, %zmm2 {%k1}
+; AVX512BW-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm5, %zmm2 {%k2}
+; AVX512BW-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm5, %zmm0 {%k1}
+; AVX512BW-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm5, %zmm0 {%k2}
+; AVX512BW-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm12 # 64-byte Reload
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm31, %zmm12 {%k2}
+; AVX512BW-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
+; AVX512BW-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm19 # 64-byte Reload
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm5, %zmm19 {%k2}
+; AVX512BW-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm5, %zmm23 {%k2}
+; AVX512BW-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm5, %zmm22 {%k2}
+; AVX512BW-ONLY-NEXT:    vmovdqa 960(%rdi), %ymm6
+; AVX512BW-ONLY-NEXT:    vpalignr {{.*#+}} ymm6 = mem[8,9,10,11,12,13,14,15],ymm6[0,1,2,3,4,5,6,7],mem[24,25,26,27,28,29,30,31],ymm6[16,17,18,19,20,21,22,23]
+; AVX512BW-ONLY-NEXT:    vextracti128 $1, %ymm6, %xmm6
+; AVX512BW-ONLY-NEXT:    vinserti32x4 $0, %xmm6, %zmm25, %zmm6
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm27, %zmm6 {%k2}
+; AVX512BW-ONLY-NEXT:    vmovdqa 512(%rdi), %ymm7
+; AVX512BW-ONLY-NEXT:    vpalignr {{.*#+}} ymm7 = mem[8,9,10,11,12,13,14,15],ymm7[0,1,2,3,4,5,6,7],mem[24,25,26,27,28,29,30,31],ymm7[16,17,18,19,20,21,22,23]
+; AVX512BW-ONLY-NEXT:    vextracti128 $1, %ymm7, %xmm7
+; AVX512BW-ONLY-NEXT:    vinserti32x4 $0, %xmm7, %zmm26, %zmm7
+; AVX512BW-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm5, %zmm7 {%k2}
+; AVX512BW-ONLY-NEXT:    vmovdqa 64(%rdi), %ymm9
+; AVX512BW-ONLY-NEXT:    vpalignr {{.*#+}} ymm9 = mem[8,9,10,11,12,13,14,15],ymm9[0,1,2,3,4,5,6,7],mem[24,25,26,27,28,29,30,31],ymm9[16,17,18,19,20,21,22,23]
+; AVX512BW-ONLY-NEXT:    vextracti128 $1, %ymm9, %xmm9
+; AVX512BW-ONLY-NEXT:    vinserti32x4 $0, %xmm9, %zmm30, %zmm9
+; AVX512BW-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm5, %zmm9 {%k2}
+; AVX512BW-ONLY-NEXT:    vmovdqa64 1408(%rdi), %ymm20
+; AVX512BW-ONLY-NEXT:    vpalignr {{.*#+}} ymm20 = mem[8,9,10,11,12,13,14,15],ymm20[0,1,2,3,4,5,6,7],mem[24,25,26,27,28,29,30,31],ymm20[16,17,18,19,20,21,22,23]
+; AVX512BW-ONLY-NEXT:    vextracti32x4 $1, %ymm20, %xmm20
+; AVX512BW-ONLY-NEXT:    vinserti32x4 $0, %xmm20, %zmm17, %zmm17
+; AVX512BW-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm5, %zmm17 {%k2}
+; AVX512BW-ONLY-NEXT:    vmovdqa64 640(%rdi), %ymm20
+; AVX512BW-ONLY-NEXT:    vpalignr $8, {{[-0-9]+}}(%r{{[sb]}}p), %ymm20, %ymm5 # 32-byte Folded Reload
+; AVX512BW-ONLY-NEXT:    # ymm5 = mem[8,9,10,11,12,13,14,15],ymm20[0,1,2,3,4,5,6,7],mem[24,25,26,27,28,29,30,31],ymm20[16,17,18,19,20,21,22,23]
+; AVX512BW-ONLY-NEXT:    vpblendd {{.*#+}} ymm5 = ymm13[0,1,2,3],ymm5[4,5,6,7]
+; AVX512BW-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm13 # 64-byte Reload
+; AVX512BW-ONLY-NEXT:    vinserti64x4 $0, %ymm5, %zmm13, %zmm5
+; AVX512BW-ONLY-NEXT:    vmovdqa 192(%rdi), %ymm13
+; AVX512BW-ONLY-NEXT:    vpalignr $8, {{[-0-9]+}}(%r{{[sb]}}p), %ymm13, %ymm13 # 32-byte Folded Reload
+; AVX512BW-ONLY-NEXT:    # ymm13 = mem[8,9,10,11,12,13,14,15],ymm13[0,1,2,3,4,5,6,7],mem[24,25,26,27,28,29,30,31],ymm13[16,17,18,19,20,21,22,23]
+; AVX512BW-ONLY-NEXT:    vpblendd {{.*#+}} ymm11 = ymm11[0,1,2,3],ymm13[4,5,6,7]
+; AVX512BW-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm13 # 64-byte Reload
+; AVX512BW-ONLY-NEXT:    vinserti64x4 $0, %ymm11, %zmm13, %zmm11
+; AVX512BW-ONLY-NEXT:    vmovdqa 1536(%rdi), %ymm13
+; AVX512BW-ONLY-NEXT:    vpalignr $8, {{[-0-9]+}}(%r{{[sb]}}p), %ymm13, %ymm13 # 32-byte Folded Reload
+; AVX512BW-ONLY-NEXT:    # ymm13 = mem[8,9,10,11,12,13,14,15],ymm13[0,1,2,3,4,5,6,7],mem[24,25,26,27,28,29,30,31],ymm13[16,17,18,19,20,21,22,23]
+; AVX512BW-ONLY-NEXT:    vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm13[4,5,6,7]
+; AVX512BW-ONLY-NEXT:    vinserti64x4 $0, %ymm1, %zmm29, %zmm1
+; AVX512BW-ONLY-NEXT:    vpblendd $240, {{[-0-9]+}}(%r{{[sb]}}p), %ymm15, %ymm13 # 32-byte Folded Reload
+; AVX512BW-ONLY-NEXT:    # ymm13 = ymm15[0,1,2,3],mem[4,5,6,7]
+; AVX512BW-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm15 # 64-byte Reload
+; AVX512BW-ONLY-NEXT:    vinserti64x4 $0, %ymm13, %zmm15, %zmm13
+; AVX512BW-ONLY-NEXT:    vpblendd $240, {{[-0-9]+}}(%r{{[sb]}}p), %ymm14, %ymm14 # 32-byte Folded Reload
+; AVX512BW-ONLY-NEXT:    # ymm14 = ymm14[0,1,2,3],mem[4,5,6,7]
+; AVX512BW-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm15 # 64-byte Reload
+; AVX512BW-ONLY-NEXT:    vinserti64x4 $0, %ymm14, %zmm15, %zmm14
+; AVX512BW-ONLY-NEXT:    vpblendd $240, {{[-0-9]+}}(%r{{[sb]}}p), %ymm8, %ymm8 # 32-byte Folded Reload
+; AVX512BW-ONLY-NEXT:    # ymm8 = ymm8[0,1,2,3],mem[4,5,6,7]
+; AVX512BW-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm15 # 64-byte Reload
+; AVX512BW-ONLY-NEXT:    vinserti64x4 $0, %ymm8, %zmm15, %zmm8
+; AVX512BW-ONLY-NEXT:    vpblendd $240, {{[-0-9]+}}(%r{{[sb]}}p), %ymm10, %ymm10 # 32-byte Folded Reload
+; AVX512BW-ONLY-NEXT:    # ymm10 = ymm10[0,1,2,3],mem[4,5,6,7]
+; AVX512BW-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm15 # 64-byte Reload
+; AVX512BW-ONLY-NEXT:    vinserti64x4 $0, %ymm10, %zmm15, %zmm10
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm21, 192(%rsi)
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm16, 128(%rsi)
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm24, 64(%rsi)
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm18, (%rsi)
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm0, 192(%rdx)
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm2, (%rdx)
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm3, 64(%rdx)
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm4, 128(%rdx)
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm22, 192(%rcx)
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm23, (%rcx)
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm19, 64(%rcx)
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm12, 128(%rcx)
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm17, 192(%r8)
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm9, (%r8)
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm7, 64(%r8)
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm6, 128(%r8)
+; AVX512BW-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512BW-ONLY-NEXT:    vmovaps %zmm0, 192(%r9)
+; AVX512BW-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512BW-ONLY-NEXT:    vmovaps %zmm0, (%r9)
+; AVX512BW-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512BW-ONLY-NEXT:    vmovaps %zmm0, 64(%r9)
+; AVX512BW-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512BW-ONLY-NEXT:    vmovaps %zmm0, 128(%r9)
+; AVX512BW-ONLY-NEXT:    movq {{[0-9]+}}(%rsp), %rax
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm1, 192(%rax)
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm11, (%rax)
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm5, 64(%rax)
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm28, 128(%rax)
+; AVX512BW-ONLY-NEXT:    movq {{[0-9]+}}(%rsp), %rax
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm10, 128(%rax)
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm8, 192(%rax)
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm14, (%rax)
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm13, 64(%rax)
+; AVX512BW-ONLY-NEXT:    addq $2152, %rsp # imm = 0x868
+; AVX512BW-ONLY-NEXT:    vzeroupper
+; AVX512BW-ONLY-NEXT:    retq
 ;
-; AVX512DQ-FAST-LABEL: load_i64_stride7_vf32:
-; AVX512DQ-FAST:       # %bb.0:
-; AVX512DQ-FAST-NEXT:    subq $2216, %rsp # imm = 0x8A8
-; AVX512DQ-FAST-NEXT:    vmovdqa64 1664(%rdi), %zmm21
-; AVX512DQ-FAST-NEXT:    vmovdqa64 1600(%rdi), %zmm3
-; AVX512DQ-FAST-NEXT:    vmovdqa64 1216(%rdi), %zmm1
-; AVX512DQ-FAST-NEXT:    vmovdqa64 1152(%rdi), %zmm6
-; AVX512DQ-FAST-NEXT:    vmovdqa64 1088(%rdi), %zmm11
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm11, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vmovaps 1024(%rdi), %zmm0
-; AVX512DQ-FAST-NEXT:    vmovups %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vmovdqa64 768(%rdi), %zmm2
-; AVX512DQ-FAST-NEXT:    vmovdqa64 704(%rdi), %zmm9
-; AVX512DQ-FAST-NEXT:    vmovdqa64 640(%rdi), %zmm15
-; AVX512DQ-FAST-NEXT:    vmovdqa64 576(%rdi), %zmm10
-; AVX512DQ-FAST-NEXT:    vmovdqa64 320(%rdi), %zmm0
-; AVX512DQ-FAST-NEXT:    vmovdqa64 256(%rdi), %zmm26
-; AVX512DQ-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} zmm7 = [10,3,10,3,10,3,10,3]
-; AVX512DQ-FAST-NEXT:    # zmm7 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm26, %zmm8
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm0, %zmm7, %zmm8
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm9, %zmm8
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm2, %zmm7, %zmm8
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm6, %zmm8
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm1, %zmm7, %zmm8
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vpermi2q %zmm21, %zmm3, %zmm7
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} zmm7 = [11,4,11,4,11,4,11,4]
-; AVX512DQ-FAST-NEXT:    # zmm7 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm6, %zmm8
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm1, %zmm7, %zmm8
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm7, %zmm13
-; AVX512DQ-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} zmm7 = [13,6,13,6,13,6,13,6]
-; AVX512DQ-FAST-NEXT:    # zmm7 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm9, %zmm8
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm2, %zmm13, %zmm8
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm26, %zmm8
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm0, %zmm13, %zmm8
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} zmm20 = [12,5,12,5,12,5,12,5]
-; AVX512DQ-FAST-NEXT:    # zmm20 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm9, %zmm18
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm2, %zmm20, %zmm18
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm26, %zmm23
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm0, %zmm20, %zmm23
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm9, %zmm17
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm2, %zmm7, %zmm17
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm26, %zmm25
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm0, %zmm7, %zmm25
-; AVX512DQ-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm28 = [0,7,14,0,0,7,14,0]
-; AVX512DQ-FAST-NEXT:    # zmm28 = mem[0,1,2,3,0,1,2,3]
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm9, %zmm8
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm2, %zmm28, %zmm8
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm26, %zmm16
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm0, %zmm28, %zmm16
-; AVX512DQ-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm30 = [9,0,7,0,9,0,7,0]
-; AVX512DQ-FAST-NEXT:    # zmm30 = mem[0,1,2,3,0,1,2,3]
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm2, %zmm14
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm9, %zmm30, %zmm14
-; AVX512DQ-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} zmm4 = [2,9,2,9,2,9,2,9]
-; AVX512DQ-FAST-NEXT:    # zmm4 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm2, %zmm4, %zmm9
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm0, %zmm8
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm26, %zmm30, %zmm8
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm0, %zmm4, %zmm26
-; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm7, %zmm2
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm11, %zmm7, %zmm0
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm10, %zmm11
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm10, %zmm0
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm15, %zmm10
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm15, %zmm7, %zmm0
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vmovdqa64 192(%rdi), %zmm24
-; AVX512DQ-FAST-NEXT:    vmovdqa64 128(%rdi), %zmm12
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm12, %zmm0
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm24, %zmm7, %zmm0
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm3, %zmm5
-; AVX512DQ-FAST-NEXT:    vpermi2q %zmm21, %zmm3, %zmm13
-; AVX512DQ-FAST-NEXT:    vmovdqa64 1536(%rdi), %zmm15
-; AVX512DQ-FAST-NEXT:    vmovdqa64 1472(%rdi), %zmm0
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm15, %zmm7, %zmm0
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm6, %zmm29
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm1, %zmm20, %zmm29
-; AVX512DQ-FAST-NEXT:    vpermi2q %zmm21, %zmm3, %zmm20
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm6, %zmm31
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm1, %zmm7, %zmm31
-; AVX512DQ-FAST-NEXT:    vpermi2q %zmm21, %zmm3, %zmm2
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm6, %zmm22
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm1, %zmm28, %zmm22
-; AVX512DQ-FAST-NEXT:    vpermi2q %zmm21, %zmm3, %zmm28
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm1, %zmm3
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm6, %zmm30, %zmm3
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm6, %zmm0
-; AVX512DQ-FAST-NEXT:    vpermi2q %zmm5, %zmm21, %zmm30
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm21, %zmm4, %zmm5
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm5, %zmm21
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm1, %zmm4, %zmm0
-; AVX512DQ-FAST-NEXT:    vmovdqa64 384(%rdi), %zmm1
-; AVX512DQ-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm4 = [0,5,6,9,0,5,6,9]
-; AVX512DQ-FAST-NEXT:    # zmm4 = mem[0,1,2,3,0,1,2,3]
-; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm1, %zmm4, %zmm5
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vmovdqa64 832(%rdi), %zmm5
-; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm6 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm5, %zmm4, %zmm6
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vmovdqa64 1280(%rdi), %zmm6
-; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm7 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm6, %zmm4, %zmm7
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vmovdqa64 1728(%rdi), %zmm7
-; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm27 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm7, %zmm4, %zmm27
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm27, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm4 = [0,5,6,10,0,5,6,10]
-; AVX512DQ-FAST-NEXT:    # zmm4 = mem[0,1,2,3,0,1,2,3]
-; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm19 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm6, %zmm4, %zmm19
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm19, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm19 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm5, %zmm4, %zmm19
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm19, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm19 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm1, %zmm4, %zmm19
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm19, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm7, %zmm4, %zmm13
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm13, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm4 = [0,5,6,11,0,5,6,11]
-; AVX512DQ-FAST-NEXT:    # zmm4 = mem[0,1,2,3,0,1,2,3]
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm6, %zmm4, %zmm29
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm5, %zmm4, %zmm18
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm18, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm1, %zmm4, %zmm23
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm23, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm7, %zmm4, %zmm20
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm20, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm4 = [0,5,6,12,0,5,6,12]
-; AVX512DQ-FAST-NEXT:    # zmm4 = mem[0,1,2,3,0,1,2,3]
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm6, %zmm4, %zmm31
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm5, %zmm4, %zmm17
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm17, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm1, %zmm4, %zmm25
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm25, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm7, %zmm4, %zmm2
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm4 = [4,5,6,13,4,5,6,13]
-; AVX512DQ-FAST-NEXT:    # zmm4 = mem[0,1,2,3,0,1,2,3]
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm6, %zmm4, %zmm22
-; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm19 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm5, %zmm4, %zmm19
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm1, %zmm4, %zmm16
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm16, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm7, %zmm4, %zmm28
-; AVX512DQ-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm4 = [4,5,6,14,4,5,6,14]
-; AVX512DQ-FAST-NEXT:    # zmm4 = mem[0,1,2,3,0,1,2,3]
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm6, %zmm4, %zmm3
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm5, %zmm4, %zmm14
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm14, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm1, %zmm4, %zmm8
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm7, %zmm4, %zmm30
-; AVX512DQ-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm4 = [4,5,8,15,4,5,8,15]
-; AVX512DQ-FAST-NEXT:    # zmm4 = mem[0,1,2,3,0,1,2,3]
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm5, %zmm4, %zmm9
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm1, %zmm4, %zmm26
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm26, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm7, %zmm4, %zmm21
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm21, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm6, %zmm4, %zmm0
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    movb $24, %al
-; AVX512DQ-FAST-NEXT:    kmovw %eax, %k1
-; AVX512DQ-FAST-NEXT:    vmovdqa64 512(%rdi), %zmm5
-; AVX512DQ-FAST-NEXT:    vmovdqa64 448(%rdi), %zmm7
-; AVX512DQ-FAST-NEXT:    vmovdqa64 {{.*#+}} ymm21 = <0,7,14,u>
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm7, %zmm18
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm5, %zmm21, %zmm18
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm10, %zmm0
-; AVX512DQ-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm18 {%k1} = zmm11[4,5,4,5],zmm10[4,5,4,5]
-; AVX512DQ-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm10 = [14,0,0,7,14,0,0,7]
-; AVX512DQ-FAST-NEXT:    # zmm10 = mem[0,1,2,3,0,1,2,3]
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm11, %zmm26
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm0, %zmm10, %zmm26
-; AVX512DQ-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm16 = [7,0,9,0,7,0,9,0]
-; AVX512DQ-FAST-NEXT:    # zmm16 = mem[0,1,2,3,0,1,2,3]
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm0, %zmm27
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm11, %zmm16, %zmm27
-; AVX512DQ-FAST-NEXT:    vbroadcasti64x2 {{.*#+}} ymm20 = [4,11,4,11]
-; AVX512DQ-FAST-NEXT:    # ymm20 = mem[0,1,0,1]
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm0, %zmm20, %zmm11
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm11, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vmovdqa64 64(%rdi), %zmm1
-; AVX512DQ-FAST-NEXT:    vmovdqa64 (%rdi), %zmm14
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm14, %zmm17
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm1, %zmm21, %zmm17
-; AVX512DQ-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm17 {%k1} = zmm12[4,5,4,5],zmm24[4,5,4,5]
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm12, %zmm23
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm24, %zmm10, %zmm23
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm24, %zmm25
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm12, %zmm16, %zmm25
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm24, %zmm20, %zmm12
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm12, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vmovdqa64 960(%rdi), %zmm6
-; AVX512DQ-FAST-NEXT:    vmovdqa64 896(%rdi), %zmm11
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm11, %zmm12
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm6, %zmm21, %zmm12
-; AVX512DQ-FAST-NEXT:    vmovdqa64 1408(%rdi), %zmm4
-; AVX512DQ-FAST-NEXT:    vmovdqa64 1344(%rdi), %zmm8
-; AVX512DQ-FAST-NEXT:    vpermi2q %zmm4, %zmm8, %zmm21
-; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm21 {%k1} = zmm3[4,5,4,5],zmm15[4,5,4,5]
-; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm2, %zmm0
-; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm9 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm9, %zmm10, %zmm0
-; AVX512DQ-FAST-NEXT:    vpermi2q %zmm15, %zmm3, %zmm10
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm9, %zmm24
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm2, %zmm16, %zmm24
-; AVX512DQ-FAST-NEXT:    vpermi2q %zmm3, %zmm15, %zmm16
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm15, %zmm20, %zmm3
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm12 {%k1} = zmm2[4,5,4,5],zmm9[4,5,4,5]
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm9, %zmm20, %zmm2
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vmovdqa 912(%rdi), %xmm2
-; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} xmm2 = xmm2[0,1],mem[2,3]
-; AVX512DQ-FAST-NEXT:    vinserti32x4 $0, %xmm2, %zmm0, %zmm0
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vmovdqa 464(%rdi), %xmm0
-; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} xmm0 = xmm0[0,1],mem[2,3]
-; AVX512DQ-FAST-NEXT:    vinserti32x4 $0, %xmm0, %zmm26, %zmm0
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vmovdqa 16(%rdi), %xmm0
-; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} xmm0 = xmm0[0,1],mem[2,3]
-; AVX512DQ-FAST-NEXT:    vinserti32x4 $0, %xmm0, %zmm23, %zmm26
-; AVX512DQ-FAST-NEXT:    vmovdqa 1360(%rdi), %xmm0
-; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} xmm0 = xmm0[0,1],mem[2,3]
-; AVX512DQ-FAST-NEXT:    vinserti32x4 $0, %xmm0, %zmm10, %zmm20
-; AVX512DQ-FAST-NEXT:    vinserti128 $1, 1088(%rdi), %ymm0, %ymm0
-; AVX512DQ-FAST-NEXT:    vmovdqa 1024(%rdi), %ymm2
-; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm2[0,1,2,3,4,5],ymm0[6,7]
-; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} xmm10 = [4,11]
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm11, %zmm13
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm6, %zmm10, %zmm13
-; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm13[0,1,2,3],ymm0[4,5,6,7]
-; AVX512DQ-FAST-NEXT:    vinserti64x4 $0, %ymm0, %zmm22, %zmm0
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vinserti128 $1, 640(%rdi), %ymm0, %ymm0
-; AVX512DQ-FAST-NEXT:    vmovdqa 576(%rdi), %ymm3
-; AVX512DQ-FAST-NEXT:    vmovdqu %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm3[0,1,2,3,4,5],ymm0[6,7]
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm7, %zmm3
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm5, %zmm10, %zmm3
-; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm3[0,1,2,3],ymm0[4,5,6,7]
-; AVX512DQ-FAST-NEXT:    vinserti64x4 $0, %ymm0, %zmm19, %zmm0
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vinserti128 $1, 192(%rdi), %ymm0, %ymm0
-; AVX512DQ-FAST-NEXT:    vmovdqa 128(%rdi), %ymm3
-; AVX512DQ-FAST-NEXT:    vmovdqu %ymm3, (%rsp) # 32-byte Spill
-; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm3[0,1,2,3,4,5],ymm0[6,7]
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm14, %zmm3
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm1, %zmm10, %zmm3
-; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm3[0,1,2,3],ymm0[4,5,6,7]
-; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vinserti64x4 $0, %ymm0, %zmm3, %zmm0
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vinserti128 $1, 1536(%rdi), %ymm0, %ymm0
-; AVX512DQ-FAST-NEXT:    vmovdqa 1472(%rdi), %ymm3
-; AVX512DQ-FAST-NEXT:    vmovdqu %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm3[0,1,2,3,4,5],ymm0[6,7]
-; AVX512DQ-FAST-NEXT:    vpermi2q %zmm4, %zmm8, %zmm10
-; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm10[0,1,2,3],ymm0[4,5,6,7]
-; AVX512DQ-FAST-NEXT:    vinserti64x4 $0, %ymm0, %zmm28, %zmm28
-; AVX512DQ-FAST-NEXT:    vmovdqa 1088(%rdi), %ymm0
-; AVX512DQ-FAST-NEXT:    vpalignr {{.*#+}} ymm0 = ymm2[8,9,10,11,12,13,14,15],ymm0[0,1,2,3,4,5,6,7],ymm2[24,25,26,27,28,29,30,31],ymm0[16,17,18,19,20,21,22,23]
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm0, %ymm23
-; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} ymm0 = <9,0,7,u>
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm5, %zmm3
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm7, %zmm9
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm7, %zmm0, %zmm3
-; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} xmm2 = [5,12]
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm7, %zmm13
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm5, %zmm2, %zmm13
-; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} xmm7 = [6,13]
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm5, %zmm7, %zmm9
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm1, %zmm5
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm14, %zmm0, %zmm5
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm14, %zmm10
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm1, %zmm2, %zmm10
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm1, %zmm7, %zmm14
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm6, %zmm1
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm11, %zmm0, %zmm1
-; AVX512DQ-FAST-NEXT:    vpermi2q %zmm8, %zmm4, %zmm0
-; AVX512DQ-FAST-NEXT:    vmovdqa 960(%rdi), %ymm15
-; AVX512DQ-FAST-NEXT:    vpalignr {{.*#+}} ymm15 = mem[8,9,10,11,12,13,14,15],ymm15[0,1,2,3,4,5,6,7],mem[24,25,26,27,28,29,30,31],ymm15[16,17,18,19,20,21,22,23]
-; AVX512DQ-FAST-NEXT:    vextracti32x4 $1, %ymm15, %xmm19
-; AVX512DQ-FAST-NEXT:    vmovdqa 512(%rdi), %ymm15
-; AVX512DQ-FAST-NEXT:    vpalignr {{.*#+}} ymm15 = mem[8,9,10,11,12,13,14,15],ymm15[0,1,2,3,4,5,6,7],mem[24,25,26,27,28,29,30,31],ymm15[16,17,18,19,20,21,22,23]
-; AVX512DQ-FAST-NEXT:    vextracti128 $1, %ymm15, %xmm15
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm11, %zmm9
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm6, %zmm2, %zmm9
-; AVX512DQ-FAST-NEXT:    vpermi2q %zmm4, %zmm8, %zmm2
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm4, %zmm7, %zmm8
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm6, %zmm7, %zmm11
-; AVX512DQ-FAST-NEXT:    movb $-32, %al
-; AVX512DQ-FAST-NEXT:    kmovw %eax, %k2
-; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm4, %zmm17 {%k2}
-; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm4, %zmm18 {%k2}
-; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm4, %zmm12 {%k2}
-; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm4, %zmm21 {%k2}
-; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm4, %zmm1 {%k1}
-; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm4, %zmm1 {%k2}
-; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm4, %zmm3 {%k1}
-; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm4, %zmm3 {%k2}
-; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm4, %zmm5 {%k1}
-; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm4, %zmm5 {%k2}
-; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm4, %zmm0 {%k1}
-; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm4, %zmm0 {%k2}
-; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm22 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm29, %zmm22 {%k2}
-; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm29 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm4, %zmm29 {%k2}
-; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm4, %zmm26 {%k2}
-; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm4, %zmm20 {%k2}
-; AVX512DQ-FAST-NEXT:    vinserti32x4 $0, %xmm19, %zmm24, %zmm4
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm31, %zmm4 {%k2}
-; AVX512DQ-FAST-NEXT:    vinserti32x4 $0, %xmm15, %zmm27, %zmm6
-; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm7 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm7, %zmm6 {%k2}
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm23, %ymm7
-; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm7 = ymm9[0,1,2,3],ymm7[4,5,6,7]
-; AVX512DQ-FAST-NEXT:    vmovdqa 64(%rdi), %ymm9
-; AVX512DQ-FAST-NEXT:    vpalignr {{.*#+}} ymm9 = mem[8,9,10,11,12,13,14,15],ymm9[0,1,2,3,4,5,6,7],mem[24,25,26,27,28,29,30,31],ymm9[16,17,18,19,20,21,22,23]
-; AVX512DQ-FAST-NEXT:    vextracti128 $1, %ymm9, %xmm9
-; AVX512DQ-FAST-NEXT:    vinserti32x4 $0, %xmm9, %zmm25, %zmm9
-; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm15 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm15, %zmm9 {%k2}
-; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm15 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vinserti64x4 $0, %ymm7, %zmm15, %zmm19
-; AVX512DQ-FAST-NEXT:    vmovdqa 640(%rdi), %ymm15
-; AVX512DQ-FAST-NEXT:    vpalignr $8, {{[-0-9]+}}(%r{{[sb]}}p), %ymm15, %ymm15 # 32-byte Folded Reload
-; AVX512DQ-FAST-NEXT:    # ymm15 = mem[8,9,10,11,12,13,14,15],ymm15[0,1,2,3,4,5,6,7],mem[24,25,26,27,28,29,30,31],ymm15[16,17,18,19,20,21,22,23]
-; AVX512DQ-FAST-NEXT:    vmovdqa 1408(%rdi), %ymm7
-; AVX512DQ-FAST-NEXT:    vpalignr {{.*#+}} ymm7 = mem[8,9,10,11,12,13,14,15],ymm7[0,1,2,3,4,5,6,7],mem[24,25,26,27,28,29,30,31],ymm7[16,17,18,19,20,21,22,23]
-; AVX512DQ-FAST-NEXT:    vextracti128 $1, %ymm7, %xmm7
-; AVX512DQ-FAST-NEXT:    vinserti32x4 $0, %xmm7, %zmm16, %zmm7
-; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm16 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm16, %zmm7 {%k2}
-; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm13 = ymm13[0,1,2,3],ymm15[4,5,6,7]
-; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm15 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vinserti64x4 $0, %ymm13, %zmm15, %zmm13
-; AVX512DQ-FAST-NEXT:    vmovdqa 192(%rdi), %ymm15
-; AVX512DQ-FAST-NEXT:    vpalignr $8, (%rsp), %ymm15, %ymm15 # 32-byte Folded Reload
-; AVX512DQ-FAST-NEXT:    # ymm15 = mem[8,9,10,11,12,13,14,15],ymm15[0,1,2,3,4,5,6,7],mem[24,25,26,27,28,29,30,31],ymm15[16,17,18,19,20,21,22,23]
-; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm10 = ymm10[0,1,2,3],ymm15[4,5,6,7]
-; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm15 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vinserti64x4 $0, %ymm10, %zmm15, %zmm10
-; AVX512DQ-FAST-NEXT:    vmovdqa 1536(%rdi), %ymm15
-; AVX512DQ-FAST-NEXT:    vpalignr $8, {{[-0-9]+}}(%r{{[sb]}}p), %ymm15, %ymm15 # 32-byte Folded Reload
-; AVX512DQ-FAST-NEXT:    # ymm15 = mem[8,9,10,11,12,13,14,15],ymm15[0,1,2,3,4,5,6,7],mem[24,25,26,27,28,29,30,31],ymm15[16,17,18,19,20,21,22,23]
-; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm15[4,5,6,7]
-; AVX512DQ-FAST-NEXT:    vinserti64x4 $0, %ymm2, %zmm30, %zmm2
-; AVX512DQ-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm15 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vblendps $240, {{[-0-9]+}}(%r{{[sb]}}p), %ymm15, %ymm15 # 32-byte Folded Reload
-; AVX512DQ-FAST-NEXT:    # ymm15 = ymm15[0,1,2,3],mem[4,5,6,7]
-; AVX512DQ-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm16 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vinsertf64x4 $0, %ymm15, %zmm16, %zmm15
-; AVX512DQ-FAST-NEXT:    vpblendd $240, {{[-0-9]+}}(%r{{[sb]}}p), %ymm14, %ymm14 # 32-byte Folded Reload
-; AVX512DQ-FAST-NEXT:    # ymm14 = ymm14[0,1,2,3],mem[4,5,6,7]
-; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm16 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vinserti64x4 $0, %ymm14, %zmm16, %zmm14
-; AVX512DQ-FAST-NEXT:    vpblendd $240, {{[-0-9]+}}(%r{{[sb]}}p), %ymm8, %ymm8 # 32-byte Folded Reload
-; AVX512DQ-FAST-NEXT:    # ymm8 = ymm8[0,1,2,3],mem[4,5,6,7]
-; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm16 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vinserti64x4 $0, %ymm8, %zmm16, %zmm8
-; AVX512DQ-FAST-NEXT:    vpblendd $240, {{[-0-9]+}}(%r{{[sb]}}p), %ymm11, %ymm11 # 32-byte Folded Reload
-; AVX512DQ-FAST-NEXT:    # ymm11 = ymm11[0,1,2,3],mem[4,5,6,7]
-; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm16 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vinserti64x4 $0, %ymm11, %zmm16, %zmm11
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm21, 192(%rsi)
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm12, 128(%rsi)
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm18, 64(%rsi)
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm17, (%rsi)
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm0, 192(%rdx)
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm5, (%rdx)
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm3, 64(%rdx)
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm1, 128(%rdx)
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm20, 192(%rcx)
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm26, (%rcx)
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm29, 64(%rcx)
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm22, 128(%rcx)
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm7, 192(%r8)
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm9, (%r8)
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm6, 64(%r8)
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm4, 128(%r8)
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm28, 192(%r9)
-; AVX512DQ-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vmovaps %zmm0, (%r9)
-; AVX512DQ-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vmovaps %zmm0, 64(%r9)
-; AVX512DQ-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vmovaps %zmm0, 128(%r9)
-; AVX512DQ-FAST-NEXT:    movq {{[0-9]+}}(%rsp), %rax
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm2, 192(%rax)
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm10, (%rax)
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm13, 64(%rax)
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm19, 128(%rax)
-; AVX512DQ-FAST-NEXT:    movq {{[0-9]+}}(%rsp), %rax
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm11, 128(%rax)
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm8, 192(%rax)
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm14, (%rax)
-; AVX512DQ-FAST-NEXT:    vmovaps %zmm15, 64(%rax)
-; AVX512DQ-FAST-NEXT:    addq $2216, %rsp # imm = 0x8A8
-; AVX512DQ-FAST-NEXT:    vzeroupper
-; AVX512DQ-FAST-NEXT:    retq
-;
-; AVX512BW-ONLY-SLOW-LABEL: load_i64_stride7_vf32:
-; AVX512BW-ONLY-SLOW:       # %bb.0:
-; AVX512BW-ONLY-SLOW-NEXT:    subq $2152, %rsp # imm = 0x868
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 1664(%rdi), %zmm21
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 1600(%rdi), %zmm31
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 1216(%rdi), %zmm2
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 1152(%rdi), %zmm6
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 1088(%rdi), %zmm12
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm12, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 1024(%rdi), %zmm3
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 768(%rdi), %zmm1
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 704(%rdi), %zmm10
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 640(%rdi), %zmm11
-; AVX512BW-ONLY-SLOW-NEXT:    vmovaps 576(%rdi), %zmm0
-; AVX512BW-ONLY-SLOW-NEXT:    vmovups %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 320(%rdi), %zmm0
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 256(%rdi), %zmm5
-; AVX512BW-ONLY-SLOW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm7 = [10,3,10,3,10,3,10,3]
-; AVX512BW-ONLY-SLOW-NEXT:    # zmm7 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm5, %zmm8
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm0, %zmm7, %zmm8
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm10, %zmm8
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm1, %zmm7, %zmm8
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm6, %zmm8
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm2, %zmm7, %zmm8
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vpermi2q %zmm21, %zmm31, %zmm7
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm7 = [11,4,11,4,11,4,11,4]
-; AVX512BW-ONLY-SLOW-NEXT:    # zmm7 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm6, %zmm8
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm2, %zmm7, %zmm8
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm7, %zmm13
-; AVX512BW-ONLY-SLOW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm7 = [13,6,13,6,13,6,13,6]
-; AVX512BW-ONLY-SLOW-NEXT:    # zmm7 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm10, %zmm8
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm1, %zmm13, %zmm8
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm5, %zmm8
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm5, %zmm18
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm0, %zmm13, %zmm18
-; AVX512BW-ONLY-SLOW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm19 = [12,5,12,5,12,5,12,5]
-; AVX512BW-ONLY-SLOW-NEXT:    # zmm19 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm10, %zmm17
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm1, %zmm19, %zmm17
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm5, %zmm22
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm0, %zmm19, %zmm22
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm22, %zmm23
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm10, %zmm16
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm1, %zmm7, %zmm16
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm5, %zmm22
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm0, %zmm7, %zmm22
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm22, %zmm24
-; AVX512BW-ONLY-SLOW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm28 = [0,7,14,0,0,7,14,0]
-; AVX512BW-ONLY-SLOW-NEXT:    # zmm28 = mem[0,1,2,3,0,1,2,3]
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm10, %zmm15
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm1, %zmm28, %zmm15
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm5, %zmm25
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm0, %zmm28, %zmm25
-; AVX512BW-ONLY-SLOW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm29 = [9,0,7,0,9,0,7,0]
-; AVX512BW-ONLY-SLOW-NEXT:    # zmm29 = mem[0,1,2,3,0,1,2,3]
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm1, %zmm26
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm10, %zmm29, %zmm26
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm26, %zmm30
-; AVX512BW-ONLY-SLOW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm4 = [2,9,2,9,2,9,2,9]
-; AVX512BW-ONLY-SLOW-NEXT:    # zmm4 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm1, %zmm4, %zmm10
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm0, %zmm9
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm5, %zmm29, %zmm9
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm0, %zmm4, %zmm8
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm7, %zmm1
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm12, %zmm7, %zmm3
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm14 # 64-byte Reload
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm14, %zmm0
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm11, %zmm7, %zmm0
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 192(%rdi), %zmm22
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 128(%rdi), %zmm0
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm22, %zmm7, %zmm0
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm31, %zmm5
-; AVX512BW-ONLY-SLOW-NEXT:    vpermi2q %zmm21, %zmm31, %zmm13
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 1536(%rdi), %zmm3
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 1472(%rdi), %zmm0
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm3, %zmm7, %zmm0
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm6, %zmm0
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm6, %zmm31
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm2, %zmm19, %zmm31
-; AVX512BW-ONLY-SLOW-NEXT:    vpermi2q %zmm21, %zmm5, %zmm19
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm19, %zmm26
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm6, %zmm27
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm2, %zmm7, %zmm27
-; AVX512BW-ONLY-SLOW-NEXT:    vpermi2q %zmm21, %zmm5, %zmm1
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm6, %zmm20
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm2, %zmm28, %zmm20
-; AVX512BW-ONLY-SLOW-NEXT:    vpermi2q %zmm21, %zmm5, %zmm28
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm2, %zmm12
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm6, %zmm29, %zmm12
-; AVX512BW-ONLY-SLOW-NEXT:    vpermi2q %zmm5, %zmm21, %zmm29
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm21, %zmm4, %zmm5
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm5, %zmm21
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm2, %zmm4, %zmm0
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 384(%rdi), %zmm2
-; AVX512BW-ONLY-SLOW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm4 = [0,5,6,9,0,5,6,9]
-; AVX512BW-ONLY-SLOW-NEXT:    # zmm4 = mem[0,1,2,3,0,1,2,3]
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm2, %zmm4, %zmm5
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 832(%rdi), %zmm5
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm6 # 64-byte Reload
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm5, %zmm4, %zmm6
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 1280(%rdi), %zmm6
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm7 # 64-byte Reload
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm6, %zmm4, %zmm7
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 1728(%rdi), %zmm7
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm19 # 64-byte Reload
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm7, %zmm4, %zmm19
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm19, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm4 = [0,5,6,10,0,5,6,10]
-; AVX512BW-ONLY-SLOW-NEXT:    # zmm4 = mem[0,1,2,3,0,1,2,3]
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm19 # 64-byte Reload
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm6, %zmm4, %zmm19
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm19, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm19 # 64-byte Reload
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm5, %zmm4, %zmm19
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm19, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm2, %zmm4, %zmm18
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm18, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm7, %zmm4, %zmm13
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm13, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm4 = [0,5,6,11,0,5,6,11]
-; AVX512BW-ONLY-SLOW-NEXT:    # zmm4 = mem[0,1,2,3,0,1,2,3]
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm6, %zmm4, %zmm31
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm5, %zmm4, %zmm17
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm17, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm2, %zmm4, %zmm23
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm23, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm7, %zmm4, %zmm26
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm26, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm4 = [0,5,6,12,0,5,6,12]
-; AVX512BW-ONLY-SLOW-NEXT:    # zmm4 = mem[0,1,2,3,0,1,2,3]
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm6, %zmm4, %zmm27
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm5, %zmm4, %zmm16
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm16, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm2, %zmm4, %zmm24
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm24, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm7, %zmm4, %zmm1
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm4 = [4,5,6,13,4,5,6,13]
-; AVX512BW-ONLY-SLOW-NEXT:    # zmm4 = mem[0,1,2,3,0,1,2,3]
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm6, %zmm4, %zmm20
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm5, %zmm4, %zmm15
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm15, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm2, %zmm4, %zmm25
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm25, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm7, %zmm4, %zmm28
-; AVX512BW-ONLY-SLOW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm4 = [4,5,6,14,4,5,6,14]
-; AVX512BW-ONLY-SLOW-NEXT:    # zmm4 = mem[0,1,2,3,0,1,2,3]
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm6, %zmm4, %zmm12
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm5, %zmm4, %zmm30
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm30, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm2, %zmm4, %zmm9
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm7, %zmm4, %zmm29
-; AVX512BW-ONLY-SLOW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm4 = [4,5,8,15,4,5,8,15]
-; AVX512BW-ONLY-SLOW-NEXT:    # zmm4 = mem[0,1,2,3,0,1,2,3]
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm5, %zmm4, %zmm10
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm2, %zmm4, %zmm8
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm7, %zmm4, %zmm21
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm21, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm6, %zmm4, %zmm0
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    movb $24, %al
-; AVX512BW-ONLY-SLOW-NEXT:    kmovd %eax, %k1
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 512(%rdi), %zmm2
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 448(%rdi), %zmm15
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 {{.*#+}} ymm21 = <0,7,14,u>
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm15, %zmm24
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm2, %zmm21, %zmm24
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm11, %zmm0
-; AVX512BW-ONLY-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm24 {%k1} = zmm14[4,5,4,5],zmm11[4,5,4,5]
-; AVX512BW-ONLY-SLOW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm11 = [14,0,0,7,14,0,0,7]
-; AVX512BW-ONLY-SLOW-NEXT:    # zmm11 = mem[0,1,2,3,0,1,2,3]
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm14, %zmm13
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm0, %zmm11, %zmm13
-; AVX512BW-ONLY-SLOW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm17 = [7,0,9,0,7,0,9,0]
-; AVX512BW-ONLY-SLOW-NEXT:    # zmm17 = mem[0,1,2,3,0,1,2,3]
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm0, %zmm26
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm14, %zmm17, %zmm26
-; AVX512BW-ONLY-SLOW-NEXT:    vbroadcasti32x4 {{.*#+}} ymm19 = [4,11,4,11]
-; AVX512BW-ONLY-SLOW-NEXT:    # ymm19 = mem[0,1,2,3,0,1,2,3]
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm0, %zmm19, %zmm14
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm14, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 64(%rdi), %zmm4
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 (%rdi), %zmm14
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm14, %zmm18
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm4, %zmm21, %zmm18
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512BW-ONLY-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm18 {%k1} = zmm0[4,5,4,5],zmm22[4,5,4,5]
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm0, %zmm23
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm22, %zmm11, %zmm23
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm22, %zmm30
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm0, %zmm17, %zmm30
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm22, %zmm19, %zmm0
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 960(%rdi), %zmm7
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 896(%rdi), %zmm10
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm10, %zmm16
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm7, %zmm21, %zmm16
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 1408(%rdi), %zmm5
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 1344(%rdi), %zmm8
-; AVX512BW-ONLY-SLOW-NEXT:    vpermi2q %zmm5, %zmm8, %zmm21
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm6 # 64-byte Reload
-; AVX512BW-ONLY-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm21 {%k1} = zmm6[4,5,4,5],zmm3[4,5,4,5]
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm1, %zmm0
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm22 # 64-byte Reload
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm22, %zmm11, %zmm0
-; AVX512BW-ONLY-SLOW-NEXT:    vpermi2q %zmm3, %zmm6, %zmm11
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm22, %zmm25
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm1, %zmm17, %zmm25
-; AVX512BW-ONLY-SLOW-NEXT:    vpermi2q %zmm6, %zmm3, %zmm17
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm3, %zmm19, %zmm6
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm16 {%k1} = zmm1[4,5,4,5],zmm22[4,5,4,5]
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm22, %zmm19, %zmm1
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa 912(%rdi), %xmm1
-; AVX512BW-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} xmm1 = xmm1[0,1],mem[2,3]
-; AVX512BW-ONLY-SLOW-NEXT:    vinserti32x4 $0, %xmm1, %zmm0, %zmm0
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa 464(%rdi), %xmm0
-; AVX512BW-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} xmm0 = xmm0[0,1],mem[2,3]
-; AVX512BW-ONLY-SLOW-NEXT:    vinserti32x4 $0, %xmm0, %zmm13, %zmm0
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa 16(%rdi), %xmm0
-; AVX512BW-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} xmm0 = xmm0[0,1],mem[2,3]
-; AVX512BW-ONLY-SLOW-NEXT:    vinserti32x4 $0, %xmm0, %zmm23, %zmm23
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa 1360(%rdi), %xmm0
-; AVX512BW-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} xmm0 = xmm0[0,1],mem[2,3]
-; AVX512BW-ONLY-SLOW-NEXT:    vinserti32x4 $0, %xmm0, %zmm11, %zmm22
-; AVX512BW-ONLY-SLOW-NEXT:    vinserti128 $1, 1088(%rdi), %ymm0, %ymm0
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa 1024(%rdi), %ymm1
-; AVX512BW-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm1[0,1,2,3,4,5],ymm0[6,7]
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %ymm1, %ymm19
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa {{.*#+}} xmm1 = [4,11]
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm10, %zmm11
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm7, %zmm1, %zmm11
-; AVX512BW-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm11[0,1,2,3],ymm0[4,5,6,7]
-; AVX512BW-ONLY-SLOW-NEXT:    vinserti64x4 $0, %ymm0, %zmm20, %zmm0
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vinserti128 $1, 640(%rdi), %ymm0, %ymm0
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa 576(%rdi), %ymm3
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm3[0,1,2,3,4,5],ymm0[6,7]
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm15, %zmm3
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm2, %zmm1, %zmm3
-; AVX512BW-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm3[0,1,2,3],ymm0[4,5,6,7]
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
-; AVX512BW-ONLY-SLOW-NEXT:    vinserti64x4 $0, %ymm0, %zmm3, %zmm0
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vinserti128 $1, 192(%rdi), %ymm0, %ymm0
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa 128(%rdi), %ymm3
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm3[0,1,2,3,4,5],ymm0[6,7]
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm14, %zmm3
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm4, %zmm1, %zmm3
-; AVX512BW-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm3[0,1,2,3],ymm0[4,5,6,7]
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
-; AVX512BW-ONLY-SLOW-NEXT:    vinserti64x4 $0, %ymm0, %zmm3, %zmm0
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vinserti128 $1, 1536(%rdi), %ymm0, %ymm0
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa 1472(%rdi), %ymm3
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm3[0,1,2,3,4,5],ymm0[6,7]
-; AVX512BW-ONLY-SLOW-NEXT:    vpermi2q %zmm5, %zmm8, %zmm1
-; AVX512BW-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7]
-; AVX512BW-ONLY-SLOW-NEXT:    vinserti64x4 $0, %ymm0, %zmm28, %zmm0
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa {{.*#+}} ymm0 = <9,0,7,u>
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm2, %zmm3
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm15, %zmm0, %zmm3
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa {{.*#+}} xmm1 = [5,12]
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm15, %zmm13
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm2, %zmm1, %zmm13
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa {{.*#+}} xmm6 = [6,13]
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm2, %zmm6, %zmm15
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm4, %zmm2
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm14, %zmm0, %zmm2
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm14, %zmm11
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm4, %zmm1, %zmm11
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm4, %zmm6, %zmm14
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm7, %zmm4
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm10, %zmm0, %zmm4
-; AVX512BW-ONLY-SLOW-NEXT:    vpermi2q %zmm8, %zmm5, %zmm0
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm10, %zmm9
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm7, %zmm1, %zmm9
-; AVX512BW-ONLY-SLOW-NEXT:    vpermi2q %zmm5, %zmm8, %zmm1
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm5, %zmm6, %zmm8
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm7, %zmm6, %zmm10
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa 1088(%rdi), %ymm5
-; AVX512BW-ONLY-SLOW-NEXT:    vpalignr {{.*#+}} ymm5 = ymm19[8,9,10,11,12,13,14,15],ymm5[0,1,2,3,4,5,6,7],ymm19[24,25,26,27,28,29,30,31],ymm5[16,17,18,19,20,21,22,23]
-; AVX512BW-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm5 = ymm9[0,1,2,3],ymm5[4,5,6,7]
-; AVX512BW-ONLY-SLOW-NEXT:    vinserti64x4 $0, %ymm5, %zmm12, %zmm28
-; AVX512BW-ONLY-SLOW-NEXT:    movb $-32, %al
-; AVX512BW-ONLY-SLOW-NEXT:    kmovd %eax, %k2
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm5, %zmm18 {%k2}
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm5, %zmm24 {%k2}
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm5, %zmm16 {%k2}
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm5, %zmm21 {%k2}
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm5, %zmm4 {%k1}
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm5, %zmm4 {%k2}
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm5, %zmm3 {%k1}
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm5, %zmm3 {%k2}
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm5, %zmm2 {%k1}
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm5, %zmm2 {%k2}
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm5, %zmm0 {%k1}
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm5, %zmm0 {%k2}
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm12 # 64-byte Reload
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm31, %zmm12 {%k2}
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm19 # 64-byte Reload
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm5, %zmm19 {%k2}
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm5, %zmm23 {%k2}
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm5, %zmm22 {%k2}
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa 960(%rdi), %ymm6
-; AVX512BW-ONLY-SLOW-NEXT:    vpalignr {{.*#+}} ymm6 = mem[8,9,10,11,12,13,14,15],ymm6[0,1,2,3,4,5,6,7],mem[24,25,26,27,28,29,30,31],ymm6[16,17,18,19,20,21,22,23]
-; AVX512BW-ONLY-SLOW-NEXT:    vextracti128 $1, %ymm6, %xmm6
-; AVX512BW-ONLY-SLOW-NEXT:    vinserti32x4 $0, %xmm6, %zmm25, %zmm6
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm27, %zmm6 {%k2}
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa 512(%rdi), %ymm7
-; AVX512BW-ONLY-SLOW-NEXT:    vpalignr {{.*#+}} ymm7 = mem[8,9,10,11,12,13,14,15],ymm7[0,1,2,3,4,5,6,7],mem[24,25,26,27,28,29,30,31],ymm7[16,17,18,19,20,21,22,23]
-; AVX512BW-ONLY-SLOW-NEXT:    vextracti128 $1, %ymm7, %xmm7
-; AVX512BW-ONLY-SLOW-NEXT:    vinserti32x4 $0, %xmm7, %zmm26, %zmm7
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm5, %zmm7 {%k2}
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa 64(%rdi), %ymm9
-; AVX512BW-ONLY-SLOW-NEXT:    vpalignr {{.*#+}} ymm9 = mem[8,9,10,11,12,13,14,15],ymm9[0,1,2,3,4,5,6,7],mem[24,25,26,27,28,29,30,31],ymm9[16,17,18,19,20,21,22,23]
-; AVX512BW-ONLY-SLOW-NEXT:    vextracti128 $1, %ymm9, %xmm9
-; AVX512BW-ONLY-SLOW-NEXT:    vinserti32x4 $0, %xmm9, %zmm30, %zmm9
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm5, %zmm9 {%k2}
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 1408(%rdi), %ymm20
-; AVX512BW-ONLY-SLOW-NEXT:    vpalignr {{.*#+}} ymm20 = mem[8,9,10,11,12,13,14,15],ymm20[0,1,2,3,4,5,6,7],mem[24,25,26,27,28,29,30,31],ymm20[16,17,18,19,20,21,22,23]
-; AVX512BW-ONLY-SLOW-NEXT:    vextracti32x4 $1, %ymm20, %xmm20
-; AVX512BW-ONLY-SLOW-NEXT:    vinserti32x4 $0, %xmm20, %zmm17, %zmm17
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm5, %zmm17 {%k2}
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 640(%rdi), %ymm20
-; AVX512BW-ONLY-SLOW-NEXT:    vpalignr $8, {{[-0-9]+}}(%r{{[sb]}}p), %ymm20, %ymm5 # 32-byte Folded Reload
-; AVX512BW-ONLY-SLOW-NEXT:    # ymm5 = mem[8,9,10,11,12,13,14,15],ymm20[0,1,2,3,4,5,6,7],mem[24,25,26,27,28,29,30,31],ymm20[16,17,18,19,20,21,22,23]
-; AVX512BW-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm5 = ymm13[0,1,2,3],ymm5[4,5,6,7]
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm13 # 64-byte Reload
-; AVX512BW-ONLY-SLOW-NEXT:    vinserti64x4 $0, %ymm5, %zmm13, %zmm5
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa 192(%rdi), %ymm13
-; AVX512BW-ONLY-SLOW-NEXT:    vpalignr $8, {{[-0-9]+}}(%r{{[sb]}}p), %ymm13, %ymm13 # 32-byte Folded Reload
-; AVX512BW-ONLY-SLOW-NEXT:    # ymm13 = mem[8,9,10,11,12,13,14,15],ymm13[0,1,2,3,4,5,6,7],mem[24,25,26,27,28,29,30,31],ymm13[16,17,18,19,20,21,22,23]
-; AVX512BW-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm11 = ymm11[0,1,2,3],ymm13[4,5,6,7]
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm13 # 64-byte Reload
-; AVX512BW-ONLY-SLOW-NEXT:    vinserti64x4 $0, %ymm11, %zmm13, %zmm11
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa 1536(%rdi), %ymm13
-; AVX512BW-ONLY-SLOW-NEXT:    vpalignr $8, {{[-0-9]+}}(%r{{[sb]}}p), %ymm13, %ymm13 # 32-byte Folded Reload
-; AVX512BW-ONLY-SLOW-NEXT:    # ymm13 = mem[8,9,10,11,12,13,14,15],ymm13[0,1,2,3,4,5,6,7],mem[24,25,26,27,28,29,30,31],ymm13[16,17,18,19,20,21,22,23]
-; AVX512BW-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm13[4,5,6,7]
-; AVX512BW-ONLY-SLOW-NEXT:    vinserti64x4 $0, %ymm1, %zmm29, %zmm1
-; AVX512BW-ONLY-SLOW-NEXT:    vpblendd $240, {{[-0-9]+}}(%r{{[sb]}}p), %ymm15, %ymm13 # 32-byte Folded Reload
-; AVX512BW-ONLY-SLOW-NEXT:    # ymm13 = ymm15[0,1,2,3],mem[4,5,6,7]
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm15 # 64-byte Reload
-; AVX512BW-ONLY-SLOW-NEXT:    vinserti64x4 $0, %ymm13, %zmm15, %zmm13
-; AVX512BW-ONLY-SLOW-NEXT:    vpblendd $240, {{[-0-9]+}}(%r{{[sb]}}p), %ymm14, %ymm14 # 32-byte Folded Reload
-; AVX512BW-ONLY-SLOW-NEXT:    # ymm14 = ymm14[0,1,2,3],mem[4,5,6,7]
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm15 # 64-byte Reload
-; AVX512BW-ONLY-SLOW-NEXT:    vinserti64x4 $0, %ymm14, %zmm15, %zmm14
-; AVX512BW-ONLY-SLOW-NEXT:    vpblendd $240, {{[-0-9]+}}(%r{{[sb]}}p), %ymm8, %ymm8 # 32-byte Folded Reload
-; AVX512BW-ONLY-SLOW-NEXT:    # ymm8 = ymm8[0,1,2,3],mem[4,5,6,7]
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm15 # 64-byte Reload
-; AVX512BW-ONLY-SLOW-NEXT:    vinserti64x4 $0, %ymm8, %zmm15, %zmm8
-; AVX512BW-ONLY-SLOW-NEXT:    vpblendd $240, {{[-0-9]+}}(%r{{[sb]}}p), %ymm10, %ymm10 # 32-byte Folded Reload
-; AVX512BW-ONLY-SLOW-NEXT:    # ymm10 = ymm10[0,1,2,3],mem[4,5,6,7]
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm15 # 64-byte Reload
-; AVX512BW-ONLY-SLOW-NEXT:    vinserti64x4 $0, %ymm10, %zmm15, %zmm10
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm21, 192(%rsi)
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm16, 128(%rsi)
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm24, 64(%rsi)
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm18, (%rsi)
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm0, 192(%rdx)
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm2, (%rdx)
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm3, 64(%rdx)
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm4, 128(%rdx)
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm22, 192(%rcx)
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm23, (%rcx)
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm19, 64(%rcx)
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm12, 128(%rcx)
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm17, 192(%r8)
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm9, (%r8)
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm7, 64(%r8)
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm6, 128(%r8)
-; AVX512BW-ONLY-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512BW-ONLY-SLOW-NEXT:    vmovaps %zmm0, 192(%r9)
-; AVX512BW-ONLY-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512BW-ONLY-SLOW-NEXT:    vmovaps %zmm0, (%r9)
-; AVX512BW-ONLY-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512BW-ONLY-SLOW-NEXT:    vmovaps %zmm0, 64(%r9)
-; AVX512BW-ONLY-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512BW-ONLY-SLOW-NEXT:    vmovaps %zmm0, 128(%r9)
-; AVX512BW-ONLY-SLOW-NEXT:    movq {{[0-9]+}}(%rsp), %rax
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm1, 192(%rax)
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm11, (%rax)
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm5, 64(%rax)
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm28, 128(%rax)
-; AVX512BW-ONLY-SLOW-NEXT:    movq {{[0-9]+}}(%rsp), %rax
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm10, 128(%rax)
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm8, 192(%rax)
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm14, (%rax)
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm13, 64(%rax)
-; AVX512BW-ONLY-SLOW-NEXT:    addq $2152, %rsp # imm = 0x868
-; AVX512BW-ONLY-SLOW-NEXT:    vzeroupper
-; AVX512BW-ONLY-SLOW-NEXT:    retq
-;
-; AVX512BW-ONLY-FAST-LABEL: load_i64_stride7_vf32:
-; AVX512BW-ONLY-FAST:       # %bb.0:
-; AVX512BW-ONLY-FAST-NEXT:    subq $2152, %rsp # imm = 0x868
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 1664(%rdi), %zmm21
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 1600(%rdi), %zmm31
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 1216(%rdi), %zmm2
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 1152(%rdi), %zmm6
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 1088(%rdi), %zmm12
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm12, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 1024(%rdi), %zmm3
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 768(%rdi), %zmm1
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 704(%rdi), %zmm10
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 640(%rdi), %zmm11
-; AVX512BW-ONLY-FAST-NEXT:    vmovaps 576(%rdi), %zmm0
-; AVX512BW-ONLY-FAST-NEXT:    vmovups %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 320(%rdi), %zmm0
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 256(%rdi), %zmm5
-; AVX512BW-ONLY-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} zmm7 = [10,3,10,3,10,3,10,3]
-; AVX512BW-ONLY-FAST-NEXT:    # zmm7 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm5, %zmm8
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm0, %zmm7, %zmm8
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm10, %zmm8
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm1, %zmm7, %zmm8
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm6, %zmm8
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm2, %zmm7, %zmm8
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vpermi2q %zmm21, %zmm31, %zmm7
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} zmm7 = [11,4,11,4,11,4,11,4]
-; AVX512BW-ONLY-FAST-NEXT:    # zmm7 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm6, %zmm8
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm2, %zmm7, %zmm8
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm7, %zmm13
-; AVX512BW-ONLY-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} zmm7 = [13,6,13,6,13,6,13,6]
-; AVX512BW-ONLY-FAST-NEXT:    # zmm7 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm10, %zmm8
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm1, %zmm13, %zmm8
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm5, %zmm8
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm5, %zmm18
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm0, %zmm13, %zmm18
-; AVX512BW-ONLY-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} zmm19 = [12,5,12,5,12,5,12,5]
-; AVX512BW-ONLY-FAST-NEXT:    # zmm19 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm10, %zmm17
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm1, %zmm19, %zmm17
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm5, %zmm22
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm0, %zmm19, %zmm22
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm22, %zmm23
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm10, %zmm16
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm1, %zmm7, %zmm16
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm5, %zmm22
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm0, %zmm7, %zmm22
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm22, %zmm24
-; AVX512BW-ONLY-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm28 = [0,7,14,0,0,7,14,0]
-; AVX512BW-ONLY-FAST-NEXT:    # zmm28 = mem[0,1,2,3,0,1,2,3]
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm10, %zmm15
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm1, %zmm28, %zmm15
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm5, %zmm25
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm0, %zmm28, %zmm25
-; AVX512BW-ONLY-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm29 = [9,0,7,0,9,0,7,0]
-; AVX512BW-ONLY-FAST-NEXT:    # zmm29 = mem[0,1,2,3,0,1,2,3]
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm1, %zmm26
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm10, %zmm29, %zmm26
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm26, %zmm30
-; AVX512BW-ONLY-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} zmm4 = [2,9,2,9,2,9,2,9]
-; AVX512BW-ONLY-FAST-NEXT:    # zmm4 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm1, %zmm4, %zmm10
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm0, %zmm9
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm5, %zmm29, %zmm9
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm0, %zmm4, %zmm8
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm7, %zmm1
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm12, %zmm7, %zmm3
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm14 # 64-byte Reload
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm14, %zmm0
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm11, %zmm7, %zmm0
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 192(%rdi), %zmm22
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 128(%rdi), %zmm0
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm22, %zmm7, %zmm0
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm31, %zmm5
-; AVX512BW-ONLY-FAST-NEXT:    vpermi2q %zmm21, %zmm31, %zmm13
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 1536(%rdi), %zmm3
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 1472(%rdi), %zmm0
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm3, %zmm7, %zmm0
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm6, %zmm0
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm6, %zmm31
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm2, %zmm19, %zmm31
-; AVX512BW-ONLY-FAST-NEXT:    vpermi2q %zmm21, %zmm5, %zmm19
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm19, %zmm26
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm6, %zmm27
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm2, %zmm7, %zmm27
-; AVX512BW-ONLY-FAST-NEXT:    vpermi2q %zmm21, %zmm5, %zmm1
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm6, %zmm20
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm2, %zmm28, %zmm20
-; AVX512BW-ONLY-FAST-NEXT:    vpermi2q %zmm21, %zmm5, %zmm28
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm2, %zmm12
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm6, %zmm29, %zmm12
-; AVX512BW-ONLY-FAST-NEXT:    vpermi2q %zmm5, %zmm21, %zmm29
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm21, %zmm4, %zmm5
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm5, %zmm21
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm2, %zmm4, %zmm0
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 384(%rdi), %zmm2
-; AVX512BW-ONLY-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm4 = [0,5,6,9,0,5,6,9]
-; AVX512BW-ONLY-FAST-NEXT:    # zmm4 = mem[0,1,2,3,0,1,2,3]
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm2, %zmm4, %zmm5
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 832(%rdi), %zmm5
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm6 # 64-byte Reload
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm5, %zmm4, %zmm6
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 1280(%rdi), %zmm6
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm7 # 64-byte Reload
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm6, %zmm4, %zmm7
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 1728(%rdi), %zmm7
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm19 # 64-byte Reload
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm7, %zmm4, %zmm19
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm19, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm4 = [0,5,6,10,0,5,6,10]
-; AVX512BW-ONLY-FAST-NEXT:    # zmm4 = mem[0,1,2,3,0,1,2,3]
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm19 # 64-byte Reload
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm6, %zmm4, %zmm19
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm19, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm19 # 64-byte Reload
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm5, %zmm4, %zmm19
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm19, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm2, %zmm4, %zmm18
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm18, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm7, %zmm4, %zmm13
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm13, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm4 = [0,5,6,11,0,5,6,11]
-; AVX512BW-ONLY-FAST-NEXT:    # zmm4 = mem[0,1,2,3,0,1,2,3]
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm6, %zmm4, %zmm31
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm5, %zmm4, %zmm17
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm17, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm2, %zmm4, %zmm23
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm23, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm7, %zmm4, %zmm26
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm26, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm4 = [0,5,6,12,0,5,6,12]
-; AVX512BW-ONLY-FAST-NEXT:    # zmm4 = mem[0,1,2,3,0,1,2,3]
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm6, %zmm4, %zmm27
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm5, %zmm4, %zmm16
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm16, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm2, %zmm4, %zmm24
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm24, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm7, %zmm4, %zmm1
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm4 = [4,5,6,13,4,5,6,13]
-; AVX512BW-ONLY-FAST-NEXT:    # zmm4 = mem[0,1,2,3,0,1,2,3]
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm6, %zmm4, %zmm20
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm5, %zmm4, %zmm15
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm15, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm2, %zmm4, %zmm25
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm25, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm7, %zmm4, %zmm28
-; AVX512BW-ONLY-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm4 = [4,5,6,14,4,5,6,14]
-; AVX512BW-ONLY-FAST-NEXT:    # zmm4 = mem[0,1,2,3,0,1,2,3]
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm6, %zmm4, %zmm12
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm5, %zmm4, %zmm30
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm30, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm2, %zmm4, %zmm9
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm7, %zmm4, %zmm29
-; AVX512BW-ONLY-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm4 = [4,5,8,15,4,5,8,15]
-; AVX512BW-ONLY-FAST-NEXT:    # zmm4 = mem[0,1,2,3,0,1,2,3]
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm5, %zmm4, %zmm10
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm2, %zmm4, %zmm8
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm7, %zmm4, %zmm21
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm21, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm6, %zmm4, %zmm0
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    movb $24, %al
-; AVX512BW-ONLY-FAST-NEXT:    kmovd %eax, %k1
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 512(%rdi), %zmm2
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 448(%rdi), %zmm15
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} ymm21 = <0,7,14,u>
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm15, %zmm24
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm2, %zmm21, %zmm24
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm11, %zmm0
-; AVX512BW-ONLY-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm24 {%k1} = zmm14[4,5,4,5],zmm11[4,5,4,5]
-; AVX512BW-ONLY-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm11 = [14,0,0,7,14,0,0,7]
-; AVX512BW-ONLY-FAST-NEXT:    # zmm11 = mem[0,1,2,3,0,1,2,3]
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm14, %zmm13
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm0, %zmm11, %zmm13
-; AVX512BW-ONLY-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm17 = [7,0,9,0,7,0,9,0]
-; AVX512BW-ONLY-FAST-NEXT:    # zmm17 = mem[0,1,2,3,0,1,2,3]
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm0, %zmm26
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm14, %zmm17, %zmm26
-; AVX512BW-ONLY-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} ymm19 = [4,11,4,11]
-; AVX512BW-ONLY-FAST-NEXT:    # ymm19 = mem[0,1,2,3,0,1,2,3]
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm0, %zmm19, %zmm14
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm14, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 64(%rdi), %zmm4
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 (%rdi), %zmm14
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm14, %zmm18
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm4, %zmm21, %zmm18
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512BW-ONLY-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm18 {%k1} = zmm0[4,5,4,5],zmm22[4,5,4,5]
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm0, %zmm23
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm22, %zmm11, %zmm23
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm22, %zmm30
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm0, %zmm17, %zmm30
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm22, %zmm19, %zmm0
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 960(%rdi), %zmm7
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 896(%rdi), %zmm10
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm10, %zmm16
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm7, %zmm21, %zmm16
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 1408(%rdi), %zmm5
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 1344(%rdi), %zmm8
-; AVX512BW-ONLY-FAST-NEXT:    vpermi2q %zmm5, %zmm8, %zmm21
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm6 # 64-byte Reload
-; AVX512BW-ONLY-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm21 {%k1} = zmm6[4,5,4,5],zmm3[4,5,4,5]
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm1, %zmm0
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm22 # 64-byte Reload
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm22, %zmm11, %zmm0
-; AVX512BW-ONLY-FAST-NEXT:    vpermi2q %zmm3, %zmm6, %zmm11
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm22, %zmm25
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm1, %zmm17, %zmm25
-; AVX512BW-ONLY-FAST-NEXT:    vpermi2q %zmm6, %zmm3, %zmm17
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm3, %zmm19, %zmm6
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm16 {%k1} = zmm1[4,5,4,5],zmm22[4,5,4,5]
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm22, %zmm19, %zmm1
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa 912(%rdi), %xmm1
-; AVX512BW-ONLY-FAST-NEXT:    vpblendd {{.*#+}} xmm1 = xmm1[0,1],mem[2,3]
-; AVX512BW-ONLY-FAST-NEXT:    vinserti32x4 $0, %xmm1, %zmm0, %zmm0
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa 464(%rdi), %xmm0
-; AVX512BW-ONLY-FAST-NEXT:    vpblendd {{.*#+}} xmm0 = xmm0[0,1],mem[2,3]
-; AVX512BW-ONLY-FAST-NEXT:    vinserti32x4 $0, %xmm0, %zmm13, %zmm0
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa 16(%rdi), %xmm0
-; AVX512BW-ONLY-FAST-NEXT:    vpblendd {{.*#+}} xmm0 = xmm0[0,1],mem[2,3]
-; AVX512BW-ONLY-FAST-NEXT:    vinserti32x4 $0, %xmm0, %zmm23, %zmm23
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa 1360(%rdi), %xmm0
-; AVX512BW-ONLY-FAST-NEXT:    vpblendd {{.*#+}} xmm0 = xmm0[0,1],mem[2,3]
-; AVX512BW-ONLY-FAST-NEXT:    vinserti32x4 $0, %xmm0, %zmm11, %zmm22
-; AVX512BW-ONLY-FAST-NEXT:    vinserti128 $1, 1088(%rdi), %ymm0, %ymm0
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa 1024(%rdi), %ymm1
-; AVX512BW-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm1[0,1,2,3,4,5],ymm0[6,7]
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %ymm1, %ymm19
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} xmm1 = [4,11]
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm10, %zmm11
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm7, %zmm1, %zmm11
-; AVX512BW-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm11[0,1,2,3],ymm0[4,5,6,7]
-; AVX512BW-ONLY-FAST-NEXT:    vinserti64x4 $0, %ymm0, %zmm20, %zmm0
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vinserti128 $1, 640(%rdi), %ymm0, %ymm0
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa 576(%rdi), %ymm3
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm3[0,1,2,3,4,5],ymm0[6,7]
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm15, %zmm3
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm2, %zmm1, %zmm3
-; AVX512BW-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm3[0,1,2,3],ymm0[4,5,6,7]
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
-; AVX512BW-ONLY-FAST-NEXT:    vinserti64x4 $0, %ymm0, %zmm3, %zmm0
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vinserti128 $1, 192(%rdi), %ymm0, %ymm0
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa 128(%rdi), %ymm3
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm3[0,1,2,3,4,5],ymm0[6,7]
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm14, %zmm3
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm4, %zmm1, %zmm3
-; AVX512BW-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm3[0,1,2,3],ymm0[4,5,6,7]
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
-; AVX512BW-ONLY-FAST-NEXT:    vinserti64x4 $0, %ymm0, %zmm3, %zmm0
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vinserti128 $1, 1536(%rdi), %ymm0, %ymm0
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa 1472(%rdi), %ymm3
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm3[0,1,2,3,4,5],ymm0[6,7]
-; AVX512BW-ONLY-FAST-NEXT:    vpermi2q %zmm5, %zmm8, %zmm1
-; AVX512BW-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7]
-; AVX512BW-ONLY-FAST-NEXT:    vinserti64x4 $0, %ymm0, %zmm28, %zmm0
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm0 = <9,0,7,u>
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm2, %zmm3
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm15, %zmm0, %zmm3
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} xmm1 = [5,12]
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm15, %zmm13
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm2, %zmm1, %zmm13
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} xmm6 = [6,13]
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm2, %zmm6, %zmm15
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm4, %zmm2
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm14, %zmm0, %zmm2
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm14, %zmm11
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm4, %zmm1, %zmm11
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm4, %zmm6, %zmm14
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm7, %zmm4
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm10, %zmm0, %zmm4
-; AVX512BW-ONLY-FAST-NEXT:    vpermi2q %zmm8, %zmm5, %zmm0
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm10, %zmm9
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm7, %zmm1, %zmm9
-; AVX512BW-ONLY-FAST-NEXT:    vpermi2q %zmm5, %zmm8, %zmm1
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm5, %zmm6, %zmm8
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm7, %zmm6, %zmm10
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa 1088(%rdi), %ymm5
-; AVX512BW-ONLY-FAST-NEXT:    vpalignr {{.*#+}} ymm5 = ymm19[8,9,10,11,12,13,14,15],ymm5[0,1,2,3,4,5,6,7],ymm19[24,25,26,27,28,29,30,31],ymm5[16,17,18,19,20,21,22,23]
-; AVX512BW-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm5 = ymm9[0,1,2,3],ymm5[4,5,6,7]
-; AVX512BW-ONLY-FAST-NEXT:    vinserti64x4 $0, %ymm5, %zmm12, %zmm28
-; AVX512BW-ONLY-FAST-NEXT:    movb $-32, %al
-; AVX512BW-ONLY-FAST-NEXT:    kmovd %eax, %k2
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm5, %zmm18 {%k2}
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm5, %zmm24 {%k2}
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm5, %zmm16 {%k2}
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm5, %zmm21 {%k2}
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm5, %zmm4 {%k1}
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm5, %zmm4 {%k2}
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm5, %zmm3 {%k1}
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm5, %zmm3 {%k2}
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm5, %zmm2 {%k1}
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm5, %zmm2 {%k2}
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm5, %zmm0 {%k1}
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm5, %zmm0 {%k2}
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm12 # 64-byte Reload
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm31, %zmm12 {%k2}
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm19 # 64-byte Reload
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm5, %zmm19 {%k2}
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm5, %zmm23 {%k2}
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm5, %zmm22 {%k2}
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa 960(%rdi), %ymm6
-; AVX512BW-ONLY-FAST-NEXT:    vpalignr {{.*#+}} ymm6 = mem[8,9,10,11,12,13,14,15],ymm6[0,1,2,3,4,5,6,7],mem[24,25,26,27,28,29,30,31],ymm6[16,17,18,19,20,21,22,23]
-; AVX512BW-ONLY-FAST-NEXT:    vextracti128 $1, %ymm6, %xmm6
-; AVX512BW-ONLY-FAST-NEXT:    vinserti32x4 $0, %xmm6, %zmm25, %zmm6
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm27, %zmm6 {%k2}
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa 512(%rdi), %ymm7
-; AVX512BW-ONLY-FAST-NEXT:    vpalignr {{.*#+}} ymm7 = mem[8,9,10,11,12,13,14,15],ymm7[0,1,2,3,4,5,6,7],mem[24,25,26,27,28,29,30,31],ymm7[16,17,18,19,20,21,22,23]
-; AVX512BW-ONLY-FAST-NEXT:    vextracti128 $1, %ymm7, %xmm7
-; AVX512BW-ONLY-FAST-NEXT:    vinserti32x4 $0, %xmm7, %zmm26, %zmm7
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm5, %zmm7 {%k2}
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa 64(%rdi), %ymm9
-; AVX512BW-ONLY-FAST-NEXT:    vpalignr {{.*#+}} ymm9 = mem[8,9,10,11,12,13,14,15],ymm9[0,1,2,3,4,5,6,7],mem[24,25,26,27,28,29,30,31],ymm9[16,17,18,19,20,21,22,23]
-; AVX512BW-ONLY-FAST-NEXT:    vextracti128 $1, %ymm9, %xmm9
-; AVX512BW-ONLY-FAST-NEXT:    vinserti32x4 $0, %xmm9, %zmm30, %zmm9
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm5, %zmm9 {%k2}
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 1408(%rdi), %ymm20
-; AVX512BW-ONLY-FAST-NEXT:    vpalignr {{.*#+}} ymm20 = mem[8,9,10,11,12,13,14,15],ymm20[0,1,2,3,4,5,6,7],mem[24,25,26,27,28,29,30,31],ymm20[16,17,18,19,20,21,22,23]
-; AVX512BW-ONLY-FAST-NEXT:    vextracti32x4 $1, %ymm20, %xmm20
-; AVX512BW-ONLY-FAST-NEXT:    vinserti32x4 $0, %xmm20, %zmm17, %zmm17
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm5, %zmm17 {%k2}
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 640(%rdi), %ymm20
-; AVX512BW-ONLY-FAST-NEXT:    vpalignr $8, {{[-0-9]+}}(%r{{[sb]}}p), %ymm20, %ymm5 # 32-byte Folded Reload
-; AVX512BW-ONLY-FAST-NEXT:    # ymm5 = mem[8,9,10,11,12,13,14,15],ymm20[0,1,2,3,4,5,6,7],mem[24,25,26,27,28,29,30,31],ymm20[16,17,18,19,20,21,22,23]
-; AVX512BW-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm5 = ymm13[0,1,2,3],ymm5[4,5,6,7]
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm13 # 64-byte Reload
-; AVX512BW-ONLY-FAST-NEXT:    vinserti64x4 $0, %ymm5, %zmm13, %zmm5
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa 192(%rdi), %ymm13
-; AVX512BW-ONLY-FAST-NEXT:    vpalignr $8, {{[-0-9]+}}(%r{{[sb]}}p), %ymm13, %ymm13 # 32-byte Folded Reload
-; AVX512BW-ONLY-FAST-NEXT:    # ymm13 = mem[8,9,10,11,12,13,14,15],ymm13[0,1,2,3,4,5,6,7],mem[24,25,26,27,28,29,30,31],ymm13[16,17,18,19,20,21,22,23]
-; AVX512BW-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm11 = ymm11[0,1,2,3],ymm13[4,5,6,7]
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm13 # 64-byte Reload
-; AVX512BW-ONLY-FAST-NEXT:    vinserti64x4 $0, %ymm11, %zmm13, %zmm11
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa 1536(%rdi), %ymm13
-; AVX512BW-ONLY-FAST-NEXT:    vpalignr $8, {{[-0-9]+}}(%r{{[sb]}}p), %ymm13, %ymm13 # 32-byte Folded Reload
-; AVX512BW-ONLY-FAST-NEXT:    # ymm13 = mem[8,9,10,11,12,13,14,15],ymm13[0,1,2,3,4,5,6,7],mem[24,25,26,27,28,29,30,31],ymm13[16,17,18,19,20,21,22,23]
-; AVX512BW-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm13[4,5,6,7]
-; AVX512BW-ONLY-FAST-NEXT:    vinserti64x4 $0, %ymm1, %zmm29, %zmm1
-; AVX512BW-ONLY-FAST-NEXT:    vpblendd $240, {{[-0-9]+}}(%r{{[sb]}}p), %ymm15, %ymm13 # 32-byte Folded Reload
-; AVX512BW-ONLY-FAST-NEXT:    # ymm13 = ymm15[0,1,2,3],mem[4,5,6,7]
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm15 # 64-byte Reload
-; AVX512BW-ONLY-FAST-NEXT:    vinserti64x4 $0, %ymm13, %zmm15, %zmm13
-; AVX512BW-ONLY-FAST-NEXT:    vpblendd $240, {{[-0-9]+}}(%r{{[sb]}}p), %ymm14, %ymm14 # 32-byte Folded Reload
-; AVX512BW-ONLY-FAST-NEXT:    # ymm14 = ymm14[0,1,2,3],mem[4,5,6,7]
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm15 # 64-byte Reload
-; AVX512BW-ONLY-FAST-NEXT:    vinserti64x4 $0, %ymm14, %zmm15, %zmm14
-; AVX512BW-ONLY-FAST-NEXT:    vpblendd $240, {{[-0-9]+}}(%r{{[sb]}}p), %ymm8, %ymm8 # 32-byte Folded Reload
-; AVX512BW-ONLY-FAST-NEXT:    # ymm8 = ymm8[0,1,2,3],mem[4,5,6,7]
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm15 # 64-byte Reload
-; AVX512BW-ONLY-FAST-NEXT:    vinserti64x4 $0, %ymm8, %zmm15, %zmm8
-; AVX512BW-ONLY-FAST-NEXT:    vpblendd $240, {{[-0-9]+}}(%r{{[sb]}}p), %ymm10, %ymm10 # 32-byte Folded Reload
-; AVX512BW-ONLY-FAST-NEXT:    # ymm10 = ymm10[0,1,2,3],mem[4,5,6,7]
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm15 # 64-byte Reload
-; AVX512BW-ONLY-FAST-NEXT:    vinserti64x4 $0, %ymm10, %zmm15, %zmm10
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm21, 192(%rsi)
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm16, 128(%rsi)
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm24, 64(%rsi)
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm18, (%rsi)
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm0, 192(%rdx)
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm2, (%rdx)
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm3, 64(%rdx)
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm4, 128(%rdx)
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm22, 192(%rcx)
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm23, (%rcx)
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm19, 64(%rcx)
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm12, 128(%rcx)
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm17, 192(%r8)
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm9, (%r8)
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm7, 64(%r8)
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm6, 128(%r8)
-; AVX512BW-ONLY-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512BW-ONLY-FAST-NEXT:    vmovaps %zmm0, 192(%r9)
-; AVX512BW-ONLY-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512BW-ONLY-FAST-NEXT:    vmovaps %zmm0, (%r9)
-; AVX512BW-ONLY-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512BW-ONLY-FAST-NEXT:    vmovaps %zmm0, 64(%r9)
-; AVX512BW-ONLY-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512BW-ONLY-FAST-NEXT:    vmovaps %zmm0, 128(%r9)
-; AVX512BW-ONLY-FAST-NEXT:    movq {{[0-9]+}}(%rsp), %rax
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm1, 192(%rax)
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm11, (%rax)
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm5, 64(%rax)
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm28, 128(%rax)
-; AVX512BW-ONLY-FAST-NEXT:    movq {{[0-9]+}}(%rsp), %rax
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm10, 128(%rax)
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm8, 192(%rax)
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm14, (%rax)
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm13, 64(%rax)
-; AVX512BW-ONLY-FAST-NEXT:    addq $2152, %rsp # imm = 0x868
-; AVX512BW-ONLY-FAST-NEXT:    vzeroupper
-; AVX512BW-ONLY-FAST-NEXT:    retq
-;
-; AVX512DQBW-SLOW-LABEL: load_i64_stride7_vf32:
-; AVX512DQBW-SLOW:       # %bb.0:
-; AVX512DQBW-SLOW-NEXT:    subq $2152, %rsp # imm = 0x868
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 1664(%rdi), %zmm21
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 1600(%rdi), %zmm31
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 1216(%rdi), %zmm2
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 1152(%rdi), %zmm6
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 1088(%rdi), %zmm12
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm12, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 1024(%rdi), %zmm3
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 768(%rdi), %zmm1
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 704(%rdi), %zmm10
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 640(%rdi), %zmm11
-; AVX512DQBW-SLOW-NEXT:    vmovaps 576(%rdi), %zmm0
-; AVX512DQBW-SLOW-NEXT:    vmovups %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 320(%rdi), %zmm0
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 256(%rdi), %zmm5
-; AVX512DQBW-SLOW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm7 = [10,3,10,3,10,3,10,3]
-; AVX512DQBW-SLOW-NEXT:    # zmm7 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm5, %zmm8
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm0, %zmm7, %zmm8
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm10, %zmm8
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm1, %zmm7, %zmm8
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm6, %zmm8
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm2, %zmm7, %zmm8
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vpermi2q %zmm21, %zmm31, %zmm7
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm7 = [11,4,11,4,11,4,11,4]
-; AVX512DQBW-SLOW-NEXT:    # zmm7 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm6, %zmm8
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm2, %zmm7, %zmm8
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm7, %zmm13
-; AVX512DQBW-SLOW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm7 = [13,6,13,6,13,6,13,6]
-; AVX512DQBW-SLOW-NEXT:    # zmm7 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm10, %zmm8
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm1, %zmm13, %zmm8
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm5, %zmm8
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm5, %zmm18
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm0, %zmm13, %zmm18
-; AVX512DQBW-SLOW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm19 = [12,5,12,5,12,5,12,5]
-; AVX512DQBW-SLOW-NEXT:    # zmm19 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm10, %zmm17
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm1, %zmm19, %zmm17
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm5, %zmm22
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm0, %zmm19, %zmm22
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm22, %zmm23
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm10, %zmm16
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm1, %zmm7, %zmm16
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm5, %zmm22
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm0, %zmm7, %zmm22
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm22, %zmm24
-; AVX512DQBW-SLOW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm28 = [0,7,14,0,0,7,14,0]
-; AVX512DQBW-SLOW-NEXT:    # zmm28 = mem[0,1,2,3,0,1,2,3]
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm10, %zmm15
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm1, %zmm28, %zmm15
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm5, %zmm25
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm0, %zmm28, %zmm25
-; AVX512DQBW-SLOW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm29 = [9,0,7,0,9,0,7,0]
-; AVX512DQBW-SLOW-NEXT:    # zmm29 = mem[0,1,2,3,0,1,2,3]
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm1, %zmm26
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm10, %zmm29, %zmm26
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm26, %zmm30
-; AVX512DQBW-SLOW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm4 = [2,9,2,9,2,9,2,9]
-; AVX512DQBW-SLOW-NEXT:    # zmm4 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm1, %zmm4, %zmm10
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm0, %zmm9
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm5, %zmm29, %zmm9
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm0, %zmm4, %zmm8
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm7, %zmm1
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm12, %zmm7, %zmm3
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm14 # 64-byte Reload
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm14, %zmm0
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm11, %zmm7, %zmm0
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 192(%rdi), %zmm22
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 128(%rdi), %zmm0
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm22, %zmm7, %zmm0
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm31, %zmm5
-; AVX512DQBW-SLOW-NEXT:    vpermi2q %zmm21, %zmm31, %zmm13
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 1536(%rdi), %zmm3
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 1472(%rdi), %zmm0
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm3, %zmm7, %zmm0
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm6, %zmm0
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm6, %zmm31
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm2, %zmm19, %zmm31
-; AVX512DQBW-SLOW-NEXT:    vpermi2q %zmm21, %zmm5, %zmm19
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm19, %zmm26
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm6, %zmm27
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm2, %zmm7, %zmm27
-; AVX512DQBW-SLOW-NEXT:    vpermi2q %zmm21, %zmm5, %zmm1
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm6, %zmm20
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm2, %zmm28, %zmm20
-; AVX512DQBW-SLOW-NEXT:    vpermi2q %zmm21, %zmm5, %zmm28
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm2, %zmm12
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm6, %zmm29, %zmm12
-; AVX512DQBW-SLOW-NEXT:    vpermi2q %zmm5, %zmm21, %zmm29
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm21, %zmm4, %zmm5
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm5, %zmm21
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm2, %zmm4, %zmm0
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 384(%rdi), %zmm2
-; AVX512DQBW-SLOW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm4 = [0,5,6,9,0,5,6,9]
-; AVX512DQBW-SLOW-NEXT:    # zmm4 = mem[0,1,2,3,0,1,2,3]
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm2, %zmm4, %zmm5
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 832(%rdi), %zmm5
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm6 # 64-byte Reload
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm5, %zmm4, %zmm6
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 1280(%rdi), %zmm6
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm7 # 64-byte Reload
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm6, %zmm4, %zmm7
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 1728(%rdi), %zmm7
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm19 # 64-byte Reload
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm7, %zmm4, %zmm19
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm19, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm4 = [0,5,6,10,0,5,6,10]
-; AVX512DQBW-SLOW-NEXT:    # zmm4 = mem[0,1,2,3,0,1,2,3]
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm19 # 64-byte Reload
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm6, %zmm4, %zmm19
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm19, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm19 # 64-byte Reload
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm5, %zmm4, %zmm19
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm19, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm2, %zmm4, %zmm18
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm18, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm7, %zmm4, %zmm13
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm13, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm4 = [0,5,6,11,0,5,6,11]
-; AVX512DQBW-SLOW-NEXT:    # zmm4 = mem[0,1,2,3,0,1,2,3]
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm6, %zmm4, %zmm31
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm5, %zmm4, %zmm17
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm17, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm2, %zmm4, %zmm23
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm23, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm7, %zmm4, %zmm26
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm26, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm4 = [0,5,6,12,0,5,6,12]
-; AVX512DQBW-SLOW-NEXT:    # zmm4 = mem[0,1,2,3,0,1,2,3]
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm6, %zmm4, %zmm27
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm5, %zmm4, %zmm16
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm16, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm2, %zmm4, %zmm24
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm24, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm7, %zmm4, %zmm1
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm4 = [4,5,6,13,4,5,6,13]
-; AVX512DQBW-SLOW-NEXT:    # zmm4 = mem[0,1,2,3,0,1,2,3]
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm6, %zmm4, %zmm20
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm5, %zmm4, %zmm15
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm15, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm2, %zmm4, %zmm25
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm25, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm7, %zmm4, %zmm28
-; AVX512DQBW-SLOW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm4 = [4,5,6,14,4,5,6,14]
-; AVX512DQBW-SLOW-NEXT:    # zmm4 = mem[0,1,2,3,0,1,2,3]
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm6, %zmm4, %zmm12
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm5, %zmm4, %zmm30
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm30, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm2, %zmm4, %zmm9
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm7, %zmm4, %zmm29
-; AVX512DQBW-SLOW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm4 = [4,5,8,15,4,5,8,15]
-; AVX512DQBW-SLOW-NEXT:    # zmm4 = mem[0,1,2,3,0,1,2,3]
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm5, %zmm4, %zmm10
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm2, %zmm4, %zmm8
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm7, %zmm4, %zmm21
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm21, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm6, %zmm4, %zmm0
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    movb $24, %al
-; AVX512DQBW-SLOW-NEXT:    kmovd %eax, %k1
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 512(%rdi), %zmm2
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 448(%rdi), %zmm15
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 {{.*#+}} ymm21 = <0,7,14,u>
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm15, %zmm24
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm2, %zmm21, %zmm24
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm11, %zmm0
-; AVX512DQBW-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm24 {%k1} = zmm14[4,5,4,5],zmm11[4,5,4,5]
-; AVX512DQBW-SLOW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm11 = [14,0,0,7,14,0,0,7]
-; AVX512DQBW-SLOW-NEXT:    # zmm11 = mem[0,1,2,3,0,1,2,3]
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm14, %zmm13
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm0, %zmm11, %zmm13
-; AVX512DQBW-SLOW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm17 = [7,0,9,0,7,0,9,0]
-; AVX512DQBW-SLOW-NEXT:    # zmm17 = mem[0,1,2,3,0,1,2,3]
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm0, %zmm26
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm14, %zmm17, %zmm26
-; AVX512DQBW-SLOW-NEXT:    vbroadcasti64x2 {{.*#+}} ymm19 = [4,11,4,11]
-; AVX512DQBW-SLOW-NEXT:    # ymm19 = mem[0,1,0,1]
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm0, %zmm19, %zmm14
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm14, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 64(%rdi), %zmm4
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 (%rdi), %zmm14
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm14, %zmm18
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm4, %zmm21, %zmm18
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQBW-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm18 {%k1} = zmm0[4,5,4,5],zmm22[4,5,4,5]
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm0, %zmm23
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm22, %zmm11, %zmm23
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm22, %zmm30
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm0, %zmm17, %zmm30
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm22, %zmm19, %zmm0
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 960(%rdi), %zmm7
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 896(%rdi), %zmm10
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm10, %zmm16
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm7, %zmm21, %zmm16
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 1408(%rdi), %zmm5
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 1344(%rdi), %zmm8
-; AVX512DQBW-SLOW-NEXT:    vpermi2q %zmm5, %zmm8, %zmm21
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm6 # 64-byte Reload
-; AVX512DQBW-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm21 {%k1} = zmm6[4,5,4,5],zmm3[4,5,4,5]
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm1, %zmm0
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm22 # 64-byte Reload
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm22, %zmm11, %zmm0
-; AVX512DQBW-SLOW-NEXT:    vpermi2q %zmm3, %zmm6, %zmm11
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm22, %zmm25
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm1, %zmm17, %zmm25
-; AVX512DQBW-SLOW-NEXT:    vpermi2q %zmm6, %zmm3, %zmm17
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm3, %zmm19, %zmm6
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm16 {%k1} = zmm1[4,5,4,5],zmm22[4,5,4,5]
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm22, %zmm19, %zmm1
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vmovdqa 912(%rdi), %xmm1
-; AVX512DQBW-SLOW-NEXT:    vpblendd {{.*#+}} xmm1 = xmm1[0,1],mem[2,3]
-; AVX512DQBW-SLOW-NEXT:    vinserti32x4 $0, %xmm1, %zmm0, %zmm0
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vmovdqa 464(%rdi), %xmm0
-; AVX512DQBW-SLOW-NEXT:    vpblendd {{.*#+}} xmm0 = xmm0[0,1],mem[2,3]
-; AVX512DQBW-SLOW-NEXT:    vinserti32x4 $0, %xmm0, %zmm13, %zmm0
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vmovdqa 16(%rdi), %xmm0
-; AVX512DQBW-SLOW-NEXT:    vpblendd {{.*#+}} xmm0 = xmm0[0,1],mem[2,3]
-; AVX512DQBW-SLOW-NEXT:    vinserti32x4 $0, %xmm0, %zmm23, %zmm23
-; AVX512DQBW-SLOW-NEXT:    vmovdqa 1360(%rdi), %xmm0
-; AVX512DQBW-SLOW-NEXT:    vpblendd {{.*#+}} xmm0 = xmm0[0,1],mem[2,3]
-; AVX512DQBW-SLOW-NEXT:    vinserti32x4 $0, %xmm0, %zmm11, %zmm22
-; AVX512DQBW-SLOW-NEXT:    vinserti128 $1, 1088(%rdi), %ymm0, %ymm0
-; AVX512DQBW-SLOW-NEXT:    vmovdqa 1024(%rdi), %ymm1
-; AVX512DQBW-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm1[0,1,2,3,4,5],ymm0[6,7]
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %ymm1, %ymm19
-; AVX512DQBW-SLOW-NEXT:    vmovdqa {{.*#+}} xmm1 = [4,11]
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm10, %zmm11
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm7, %zmm1, %zmm11
-; AVX512DQBW-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm11[0,1,2,3],ymm0[4,5,6,7]
-; AVX512DQBW-SLOW-NEXT:    vinserti64x4 $0, %ymm0, %zmm20, %zmm0
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vinserti128 $1, 640(%rdi), %ymm0, %ymm0
-; AVX512DQBW-SLOW-NEXT:    vmovdqa 576(%rdi), %ymm3
-; AVX512DQBW-SLOW-NEXT:    vmovdqu %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm3[0,1,2,3,4,5],ymm0[6,7]
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm15, %zmm3
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm2, %zmm1, %zmm3
-; AVX512DQBW-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm3[0,1,2,3],ymm0[4,5,6,7]
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
-; AVX512DQBW-SLOW-NEXT:    vinserti64x4 $0, %ymm0, %zmm3, %zmm0
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vinserti128 $1, 192(%rdi), %ymm0, %ymm0
-; AVX512DQBW-SLOW-NEXT:    vmovdqa 128(%rdi), %ymm3
-; AVX512DQBW-SLOW-NEXT:    vmovdqu %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm3[0,1,2,3,4,5],ymm0[6,7]
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm14, %zmm3
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm4, %zmm1, %zmm3
-; AVX512DQBW-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm3[0,1,2,3],ymm0[4,5,6,7]
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
-; AVX512DQBW-SLOW-NEXT:    vinserti64x4 $0, %ymm0, %zmm3, %zmm0
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vinserti128 $1, 1536(%rdi), %ymm0, %ymm0
-; AVX512DQBW-SLOW-NEXT:    vmovdqa 1472(%rdi), %ymm3
-; AVX512DQBW-SLOW-NEXT:    vmovdqu %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm3[0,1,2,3,4,5],ymm0[6,7]
-; AVX512DQBW-SLOW-NEXT:    vpermi2q %zmm5, %zmm8, %zmm1
-; AVX512DQBW-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7]
-; AVX512DQBW-SLOW-NEXT:    vinserti64x4 $0, %ymm0, %zmm28, %zmm0
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vmovdqa {{.*#+}} ymm0 = <9,0,7,u>
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm2, %zmm3
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm15, %zmm0, %zmm3
-; AVX512DQBW-SLOW-NEXT:    vmovdqa {{.*#+}} xmm1 = [5,12]
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm15, %zmm13
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm2, %zmm1, %zmm13
-; AVX512DQBW-SLOW-NEXT:    vmovdqa {{.*#+}} xmm6 = [6,13]
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm2, %zmm6, %zmm15
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm4, %zmm2
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm14, %zmm0, %zmm2
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm14, %zmm11
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm4, %zmm1, %zmm11
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm4, %zmm6, %zmm14
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm7, %zmm4
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm10, %zmm0, %zmm4
-; AVX512DQBW-SLOW-NEXT:    vpermi2q %zmm8, %zmm5, %zmm0
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm10, %zmm9
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm7, %zmm1, %zmm9
-; AVX512DQBW-SLOW-NEXT:    vpermi2q %zmm5, %zmm8, %zmm1
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm5, %zmm6, %zmm8
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm7, %zmm6, %zmm10
-; AVX512DQBW-SLOW-NEXT:    vmovdqa 1088(%rdi), %ymm5
-; AVX512DQBW-SLOW-NEXT:    vpalignr {{.*#+}} ymm5 = ymm19[8,9,10,11,12,13,14,15],ymm5[0,1,2,3,4,5,6,7],ymm19[24,25,26,27,28,29,30,31],ymm5[16,17,18,19,20,21,22,23]
-; AVX512DQBW-SLOW-NEXT:    vpblendd {{.*#+}} ymm5 = ymm9[0,1,2,3],ymm5[4,5,6,7]
-; AVX512DQBW-SLOW-NEXT:    vinserti64x4 $0, %ymm5, %zmm12, %zmm28
-; AVX512DQBW-SLOW-NEXT:    movb $-32, %al
-; AVX512DQBW-SLOW-NEXT:    kmovd %eax, %k2
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm5, %zmm18 {%k2}
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm5, %zmm24 {%k2}
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm5, %zmm16 {%k2}
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm5, %zmm21 {%k2}
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm5, %zmm4 {%k1}
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm5, %zmm4 {%k2}
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm5, %zmm3 {%k1}
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm5, %zmm3 {%k2}
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm5, %zmm2 {%k1}
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm5, %zmm2 {%k2}
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm5, %zmm0 {%k1}
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm5, %zmm0 {%k2}
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm12 # 64-byte Reload
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm31, %zmm12 {%k2}
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm19 # 64-byte Reload
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm5, %zmm19 {%k2}
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm5, %zmm23 {%k2}
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm5, %zmm22 {%k2}
-; AVX512DQBW-SLOW-NEXT:    vmovdqa 960(%rdi), %ymm6
-; AVX512DQBW-SLOW-NEXT:    vpalignr {{.*#+}} ymm6 = mem[8,9,10,11,12,13,14,15],ymm6[0,1,2,3,4,5,6,7],mem[24,25,26,27,28,29,30,31],ymm6[16,17,18,19,20,21,22,23]
-; AVX512DQBW-SLOW-NEXT:    vextracti128 $1, %ymm6, %xmm6
-; AVX512DQBW-SLOW-NEXT:    vinserti32x4 $0, %xmm6, %zmm25, %zmm6
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm27, %zmm6 {%k2}
-; AVX512DQBW-SLOW-NEXT:    vmovdqa 512(%rdi), %ymm7
-; AVX512DQBW-SLOW-NEXT:    vpalignr {{.*#+}} ymm7 = mem[8,9,10,11,12,13,14,15],ymm7[0,1,2,3,4,5,6,7],mem[24,25,26,27,28,29,30,31],ymm7[16,17,18,19,20,21,22,23]
-; AVX512DQBW-SLOW-NEXT:    vextracti128 $1, %ymm7, %xmm7
-; AVX512DQBW-SLOW-NEXT:    vinserti32x4 $0, %xmm7, %zmm26, %zmm7
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm5, %zmm7 {%k2}
-; AVX512DQBW-SLOW-NEXT:    vmovdqa 64(%rdi), %ymm9
-; AVX512DQBW-SLOW-NEXT:    vpalignr {{.*#+}} ymm9 = mem[8,9,10,11,12,13,14,15],ymm9[0,1,2,3,4,5,6,7],mem[24,25,26,27,28,29,30,31],ymm9[16,17,18,19,20,21,22,23]
-; AVX512DQBW-SLOW-NEXT:    vextracti128 $1, %ymm9, %xmm9
-; AVX512DQBW-SLOW-NEXT:    vinserti32x4 $0, %xmm9, %zmm30, %zmm9
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm5, %zmm9 {%k2}
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 1408(%rdi), %ymm20
-; AVX512DQBW-SLOW-NEXT:    vpalignr {{.*#+}} ymm20 = mem[8,9,10,11,12,13,14,15],ymm20[0,1,2,3,4,5,6,7],mem[24,25,26,27,28,29,30,31],ymm20[16,17,18,19,20,21,22,23]
-; AVX512DQBW-SLOW-NEXT:    vextracti32x4 $1, %ymm20, %xmm20
-; AVX512DQBW-SLOW-NEXT:    vinserti32x4 $0, %xmm20, %zmm17, %zmm17
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm5, %zmm17 {%k2}
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 640(%rdi), %ymm20
-; AVX512DQBW-SLOW-NEXT:    vpalignr $8, {{[-0-9]+}}(%r{{[sb]}}p), %ymm20, %ymm5 # 32-byte Folded Reload
-; AVX512DQBW-SLOW-NEXT:    # ymm5 = mem[8,9,10,11,12,13,14,15],ymm20[0,1,2,3,4,5,6,7],mem[24,25,26,27,28,29,30,31],ymm20[16,17,18,19,20,21,22,23]
-; AVX512DQBW-SLOW-NEXT:    vpblendd {{.*#+}} ymm5 = ymm13[0,1,2,3],ymm5[4,5,6,7]
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm13 # 64-byte Reload
-; AVX512DQBW-SLOW-NEXT:    vinserti64x4 $0, %ymm5, %zmm13, %zmm5
-; AVX512DQBW-SLOW-NEXT:    vmovdqa 192(%rdi), %ymm13
-; AVX512DQBW-SLOW-NEXT:    vpalignr $8, {{[-0-9]+}}(%r{{[sb]}}p), %ymm13, %ymm13 # 32-byte Folded Reload
-; AVX512DQBW-SLOW-NEXT:    # ymm13 = mem[8,9,10,11,12,13,14,15],ymm13[0,1,2,3,4,5,6,7],mem[24,25,26,27,28,29,30,31],ymm13[16,17,18,19,20,21,22,23]
-; AVX512DQBW-SLOW-NEXT:    vpblendd {{.*#+}} ymm11 = ymm11[0,1,2,3],ymm13[4,5,6,7]
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm13 # 64-byte Reload
-; AVX512DQBW-SLOW-NEXT:    vinserti64x4 $0, %ymm11, %zmm13, %zmm11
-; AVX512DQBW-SLOW-NEXT:    vmovdqa 1536(%rdi), %ymm13
-; AVX512DQBW-SLOW-NEXT:    vpalignr $8, {{[-0-9]+}}(%r{{[sb]}}p), %ymm13, %ymm13 # 32-byte Folded Reload
-; AVX512DQBW-SLOW-NEXT:    # ymm13 = mem[8,9,10,11,12,13,14,15],ymm13[0,1,2,3,4,5,6,7],mem[24,25,26,27,28,29,30,31],ymm13[16,17,18,19,20,21,22,23]
-; AVX512DQBW-SLOW-NEXT:    vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm13[4,5,6,7]
-; AVX512DQBW-SLOW-NEXT:    vinserti64x4 $0, %ymm1, %zmm29, %zmm1
-; AVX512DQBW-SLOW-NEXT:    vpblendd $240, {{[-0-9]+}}(%r{{[sb]}}p), %ymm15, %ymm13 # 32-byte Folded Reload
-; AVX512DQBW-SLOW-NEXT:    # ymm13 = ymm15[0,1,2,3],mem[4,5,6,7]
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm15 # 64-byte Reload
-; AVX512DQBW-SLOW-NEXT:    vinserti64x4 $0, %ymm13, %zmm15, %zmm13
-; AVX512DQBW-SLOW-NEXT:    vpblendd $240, {{[-0-9]+}}(%r{{[sb]}}p), %ymm14, %ymm14 # 32-byte Folded Reload
-; AVX512DQBW-SLOW-NEXT:    # ymm14 = ymm14[0,1,2,3],mem[4,5,6,7]
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm15 # 64-byte Reload
-; AVX512DQBW-SLOW-NEXT:    vinserti64x4 $0, %ymm14, %zmm15, %zmm14
-; AVX512DQBW-SLOW-NEXT:    vpblendd $240, {{[-0-9]+}}(%r{{[sb]}}p), %ymm8, %ymm8 # 32-byte Folded Reload
-; AVX512DQBW-SLOW-NEXT:    # ymm8 = ymm8[0,1,2,3],mem[4,5,6,7]
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm15 # 64-byte Reload
-; AVX512DQBW-SLOW-NEXT:    vinserti64x4 $0, %ymm8, %zmm15, %zmm8
-; AVX512DQBW-SLOW-NEXT:    vpblendd $240, {{[-0-9]+}}(%r{{[sb]}}p), %ymm10, %ymm10 # 32-byte Folded Reload
-; AVX512DQBW-SLOW-NEXT:    # ymm10 = ymm10[0,1,2,3],mem[4,5,6,7]
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm15 # 64-byte Reload
-; AVX512DQBW-SLOW-NEXT:    vinserti64x4 $0, %ymm10, %zmm15, %zmm10
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm21, 192(%rsi)
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm16, 128(%rsi)
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm24, 64(%rsi)
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm18, (%rsi)
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm0, 192(%rdx)
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm2, (%rdx)
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm3, 64(%rdx)
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm4, 128(%rdx)
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm22, 192(%rcx)
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm23, (%rcx)
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm19, 64(%rcx)
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm12, 128(%rcx)
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm17, 192(%r8)
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm9, (%r8)
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm7, 64(%r8)
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm6, 128(%r8)
-; AVX512DQBW-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQBW-SLOW-NEXT:    vmovaps %zmm0, 192(%r9)
-; AVX512DQBW-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQBW-SLOW-NEXT:    vmovaps %zmm0, (%r9)
-; AVX512DQBW-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQBW-SLOW-NEXT:    vmovaps %zmm0, 64(%r9)
-; AVX512DQBW-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQBW-SLOW-NEXT:    vmovaps %zmm0, 128(%r9)
-; AVX512DQBW-SLOW-NEXT:    movq {{[0-9]+}}(%rsp), %rax
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm1, 192(%rax)
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm11, (%rax)
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm5, 64(%rax)
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm28, 128(%rax)
-; AVX512DQBW-SLOW-NEXT:    movq {{[0-9]+}}(%rsp), %rax
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm10, 128(%rax)
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm8, 192(%rax)
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm14, (%rax)
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm13, 64(%rax)
-; AVX512DQBW-SLOW-NEXT:    addq $2152, %rsp # imm = 0x868
-; AVX512DQBW-SLOW-NEXT:    vzeroupper
-; AVX512DQBW-SLOW-NEXT:    retq
-;
-; AVX512DQBW-FAST-LABEL: load_i64_stride7_vf32:
-; AVX512DQBW-FAST:       # %bb.0:
-; AVX512DQBW-FAST-NEXT:    subq $2152, %rsp # imm = 0x868
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 1664(%rdi), %zmm21
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 1600(%rdi), %zmm31
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 1216(%rdi), %zmm2
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 1152(%rdi), %zmm6
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 1088(%rdi), %zmm12
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm12, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 1024(%rdi), %zmm3
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 768(%rdi), %zmm1
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 704(%rdi), %zmm10
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 640(%rdi), %zmm11
-; AVX512DQBW-FAST-NEXT:    vmovaps 576(%rdi), %zmm0
-; AVX512DQBW-FAST-NEXT:    vmovups %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 320(%rdi), %zmm0
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 256(%rdi), %zmm5
-; AVX512DQBW-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} zmm7 = [10,3,10,3,10,3,10,3]
-; AVX512DQBW-FAST-NEXT:    # zmm7 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm5, %zmm8
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm0, %zmm7, %zmm8
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm10, %zmm8
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm1, %zmm7, %zmm8
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm6, %zmm8
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm2, %zmm7, %zmm8
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vpermi2q %zmm21, %zmm31, %zmm7
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} zmm7 = [11,4,11,4,11,4,11,4]
-; AVX512DQBW-FAST-NEXT:    # zmm7 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm6, %zmm8
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm2, %zmm7, %zmm8
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm7, %zmm13
-; AVX512DQBW-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} zmm7 = [13,6,13,6,13,6,13,6]
-; AVX512DQBW-FAST-NEXT:    # zmm7 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm10, %zmm8
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm1, %zmm13, %zmm8
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm5, %zmm8
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm5, %zmm18
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm0, %zmm13, %zmm18
-; AVX512DQBW-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} zmm19 = [12,5,12,5,12,5,12,5]
-; AVX512DQBW-FAST-NEXT:    # zmm19 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm10, %zmm17
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm1, %zmm19, %zmm17
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm5, %zmm22
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm0, %zmm19, %zmm22
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm22, %zmm23
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm10, %zmm16
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm1, %zmm7, %zmm16
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm5, %zmm22
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm0, %zmm7, %zmm22
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm22, %zmm24
-; AVX512DQBW-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm28 = [0,7,14,0,0,7,14,0]
-; AVX512DQBW-FAST-NEXT:    # zmm28 = mem[0,1,2,3,0,1,2,3]
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm10, %zmm15
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm1, %zmm28, %zmm15
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm5, %zmm25
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm0, %zmm28, %zmm25
-; AVX512DQBW-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm29 = [9,0,7,0,9,0,7,0]
-; AVX512DQBW-FAST-NEXT:    # zmm29 = mem[0,1,2,3,0,1,2,3]
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm1, %zmm26
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm10, %zmm29, %zmm26
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm26, %zmm30
-; AVX512DQBW-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} zmm4 = [2,9,2,9,2,9,2,9]
-; AVX512DQBW-FAST-NEXT:    # zmm4 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm1, %zmm4, %zmm10
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm0, %zmm9
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm5, %zmm29, %zmm9
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm0, %zmm4, %zmm8
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm7, %zmm1
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm12, %zmm7, %zmm3
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm14 # 64-byte Reload
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm14, %zmm0
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm11, %zmm7, %zmm0
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 192(%rdi), %zmm22
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 128(%rdi), %zmm0
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm22, %zmm7, %zmm0
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm31, %zmm5
-; AVX512DQBW-FAST-NEXT:    vpermi2q %zmm21, %zmm31, %zmm13
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 1536(%rdi), %zmm3
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 1472(%rdi), %zmm0
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm3, %zmm7, %zmm0
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm6, %zmm0
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm6, %zmm31
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm2, %zmm19, %zmm31
-; AVX512DQBW-FAST-NEXT:    vpermi2q %zmm21, %zmm5, %zmm19
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm19, %zmm26
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm6, %zmm27
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm2, %zmm7, %zmm27
-; AVX512DQBW-FAST-NEXT:    vpermi2q %zmm21, %zmm5, %zmm1
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm6, %zmm20
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm2, %zmm28, %zmm20
-; AVX512DQBW-FAST-NEXT:    vpermi2q %zmm21, %zmm5, %zmm28
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm2, %zmm12
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm6, %zmm29, %zmm12
-; AVX512DQBW-FAST-NEXT:    vpermi2q %zmm5, %zmm21, %zmm29
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm21, %zmm4, %zmm5
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm5, %zmm21
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm2, %zmm4, %zmm0
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 384(%rdi), %zmm2
-; AVX512DQBW-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm4 = [0,5,6,9,0,5,6,9]
-; AVX512DQBW-FAST-NEXT:    # zmm4 = mem[0,1,2,3,0,1,2,3]
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm2, %zmm4, %zmm5
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 832(%rdi), %zmm5
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm6 # 64-byte Reload
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm5, %zmm4, %zmm6
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 1280(%rdi), %zmm6
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm7 # 64-byte Reload
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm6, %zmm4, %zmm7
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 1728(%rdi), %zmm7
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm19 # 64-byte Reload
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm7, %zmm4, %zmm19
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm19, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm4 = [0,5,6,10,0,5,6,10]
-; AVX512DQBW-FAST-NEXT:    # zmm4 = mem[0,1,2,3,0,1,2,3]
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm19 # 64-byte Reload
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm6, %zmm4, %zmm19
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm19, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm19 # 64-byte Reload
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm5, %zmm4, %zmm19
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm19, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm2, %zmm4, %zmm18
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm18, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm7, %zmm4, %zmm13
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm13, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm4 = [0,5,6,11,0,5,6,11]
-; AVX512DQBW-FAST-NEXT:    # zmm4 = mem[0,1,2,3,0,1,2,3]
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm6, %zmm4, %zmm31
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm5, %zmm4, %zmm17
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm17, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm2, %zmm4, %zmm23
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm23, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm7, %zmm4, %zmm26
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm26, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm4 = [0,5,6,12,0,5,6,12]
-; AVX512DQBW-FAST-NEXT:    # zmm4 = mem[0,1,2,3,0,1,2,3]
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm6, %zmm4, %zmm27
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm5, %zmm4, %zmm16
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm16, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm2, %zmm4, %zmm24
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm24, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm7, %zmm4, %zmm1
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm4 = [4,5,6,13,4,5,6,13]
-; AVX512DQBW-FAST-NEXT:    # zmm4 = mem[0,1,2,3,0,1,2,3]
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm6, %zmm4, %zmm20
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm5, %zmm4, %zmm15
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm15, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm2, %zmm4, %zmm25
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm25, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm7, %zmm4, %zmm28
-; AVX512DQBW-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm4 = [4,5,6,14,4,5,6,14]
-; AVX512DQBW-FAST-NEXT:    # zmm4 = mem[0,1,2,3,0,1,2,3]
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm6, %zmm4, %zmm12
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm5, %zmm4, %zmm30
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm30, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm2, %zmm4, %zmm9
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm7, %zmm4, %zmm29
-; AVX512DQBW-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm4 = [4,5,8,15,4,5,8,15]
-; AVX512DQBW-FAST-NEXT:    # zmm4 = mem[0,1,2,3,0,1,2,3]
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm5, %zmm4, %zmm10
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm2, %zmm4, %zmm8
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm7, %zmm4, %zmm21
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm21, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm6, %zmm4, %zmm0
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    movb $24, %al
-; AVX512DQBW-FAST-NEXT:    kmovd %eax, %k1
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 512(%rdi), %zmm2
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 448(%rdi), %zmm15
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 {{.*#+}} ymm21 = <0,7,14,u>
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm15, %zmm24
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm2, %zmm21, %zmm24
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm11, %zmm0
-; AVX512DQBW-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm24 {%k1} = zmm14[4,5,4,5],zmm11[4,5,4,5]
-; AVX512DQBW-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm11 = [14,0,0,7,14,0,0,7]
-; AVX512DQBW-FAST-NEXT:    # zmm11 = mem[0,1,2,3,0,1,2,3]
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm14, %zmm13
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm0, %zmm11, %zmm13
-; AVX512DQBW-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm17 = [7,0,9,0,7,0,9,0]
-; AVX512DQBW-FAST-NEXT:    # zmm17 = mem[0,1,2,3,0,1,2,3]
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm0, %zmm26
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm14, %zmm17, %zmm26
-; AVX512DQBW-FAST-NEXT:    vbroadcasti64x2 {{.*#+}} ymm19 = [4,11,4,11]
-; AVX512DQBW-FAST-NEXT:    # ymm19 = mem[0,1,0,1]
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm0, %zmm19, %zmm14
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm14, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 64(%rdi), %zmm4
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 (%rdi), %zmm14
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm14, %zmm18
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm4, %zmm21, %zmm18
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQBW-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm18 {%k1} = zmm0[4,5,4,5],zmm22[4,5,4,5]
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm0, %zmm23
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm22, %zmm11, %zmm23
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm22, %zmm30
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm0, %zmm17, %zmm30
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm22, %zmm19, %zmm0
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 960(%rdi), %zmm7
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 896(%rdi), %zmm10
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm10, %zmm16
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm7, %zmm21, %zmm16
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 1408(%rdi), %zmm5
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 1344(%rdi), %zmm8
-; AVX512DQBW-FAST-NEXT:    vpermi2q %zmm5, %zmm8, %zmm21
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm6 # 64-byte Reload
-; AVX512DQBW-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm21 {%k1} = zmm6[4,5,4,5],zmm3[4,5,4,5]
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm1, %zmm0
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm22 # 64-byte Reload
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm22, %zmm11, %zmm0
-; AVX512DQBW-FAST-NEXT:    vpermi2q %zmm3, %zmm6, %zmm11
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm22, %zmm25
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm1, %zmm17, %zmm25
-; AVX512DQBW-FAST-NEXT:    vpermi2q %zmm6, %zmm3, %zmm17
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm3, %zmm19, %zmm6
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm16 {%k1} = zmm1[4,5,4,5],zmm22[4,5,4,5]
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm22, %zmm19, %zmm1
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vmovdqa 912(%rdi), %xmm1
-; AVX512DQBW-FAST-NEXT:    vpblendd {{.*#+}} xmm1 = xmm1[0,1],mem[2,3]
-; AVX512DQBW-FAST-NEXT:    vinserti32x4 $0, %xmm1, %zmm0, %zmm0
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vmovdqa 464(%rdi), %xmm0
-; AVX512DQBW-FAST-NEXT:    vpblendd {{.*#+}} xmm0 = xmm0[0,1],mem[2,3]
-; AVX512DQBW-FAST-NEXT:    vinserti32x4 $0, %xmm0, %zmm13, %zmm0
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vmovdqa 16(%rdi), %xmm0
-; AVX512DQBW-FAST-NEXT:    vpblendd {{.*#+}} xmm0 = xmm0[0,1],mem[2,3]
-; AVX512DQBW-FAST-NEXT:    vinserti32x4 $0, %xmm0, %zmm23, %zmm23
-; AVX512DQBW-FAST-NEXT:    vmovdqa 1360(%rdi), %xmm0
-; AVX512DQBW-FAST-NEXT:    vpblendd {{.*#+}} xmm0 = xmm0[0,1],mem[2,3]
-; AVX512DQBW-FAST-NEXT:    vinserti32x4 $0, %xmm0, %zmm11, %zmm22
-; AVX512DQBW-FAST-NEXT:    vinserti128 $1, 1088(%rdi), %ymm0, %ymm0
-; AVX512DQBW-FAST-NEXT:    vmovdqa 1024(%rdi), %ymm1
-; AVX512DQBW-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm1[0,1,2,3,4,5],ymm0[6,7]
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %ymm1, %ymm19
-; AVX512DQBW-FAST-NEXT:    vmovdqa {{.*#+}} xmm1 = [4,11]
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm10, %zmm11
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm7, %zmm1, %zmm11
-; AVX512DQBW-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm11[0,1,2,3],ymm0[4,5,6,7]
-; AVX512DQBW-FAST-NEXT:    vinserti64x4 $0, %ymm0, %zmm20, %zmm0
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vinserti128 $1, 640(%rdi), %ymm0, %ymm0
-; AVX512DQBW-FAST-NEXT:    vmovdqa 576(%rdi), %ymm3
-; AVX512DQBW-FAST-NEXT:    vmovdqu %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512DQBW-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm3[0,1,2,3,4,5],ymm0[6,7]
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm15, %zmm3
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm2, %zmm1, %zmm3
-; AVX512DQBW-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm3[0,1,2,3],ymm0[4,5,6,7]
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
-; AVX512DQBW-FAST-NEXT:    vinserti64x4 $0, %ymm0, %zmm3, %zmm0
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vinserti128 $1, 192(%rdi), %ymm0, %ymm0
-; AVX512DQBW-FAST-NEXT:    vmovdqa 128(%rdi), %ymm3
-; AVX512DQBW-FAST-NEXT:    vmovdqu %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512DQBW-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm3[0,1,2,3,4,5],ymm0[6,7]
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm14, %zmm3
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm4, %zmm1, %zmm3
-; AVX512DQBW-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm3[0,1,2,3],ymm0[4,5,6,7]
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
-; AVX512DQBW-FAST-NEXT:    vinserti64x4 $0, %ymm0, %zmm3, %zmm0
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vinserti128 $1, 1536(%rdi), %ymm0, %ymm0
-; AVX512DQBW-FAST-NEXT:    vmovdqa 1472(%rdi), %ymm3
-; AVX512DQBW-FAST-NEXT:    vmovdqu %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512DQBW-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm3[0,1,2,3,4,5],ymm0[6,7]
-; AVX512DQBW-FAST-NEXT:    vpermi2q %zmm5, %zmm8, %zmm1
-; AVX512DQBW-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7]
-; AVX512DQBW-FAST-NEXT:    vinserti64x4 $0, %ymm0, %zmm28, %zmm0
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vmovdqa {{.*#+}} ymm0 = <9,0,7,u>
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm2, %zmm3
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm15, %zmm0, %zmm3
-; AVX512DQBW-FAST-NEXT:    vmovdqa {{.*#+}} xmm1 = [5,12]
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm15, %zmm13
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm2, %zmm1, %zmm13
-; AVX512DQBW-FAST-NEXT:    vmovdqa {{.*#+}} xmm6 = [6,13]
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm2, %zmm6, %zmm15
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm4, %zmm2
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm14, %zmm0, %zmm2
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm14, %zmm11
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm4, %zmm1, %zmm11
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm4, %zmm6, %zmm14
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm7, %zmm4
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm10, %zmm0, %zmm4
-; AVX512DQBW-FAST-NEXT:    vpermi2q %zmm8, %zmm5, %zmm0
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm10, %zmm9
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm7, %zmm1, %zmm9
-; AVX512DQBW-FAST-NEXT:    vpermi2q %zmm5, %zmm8, %zmm1
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm5, %zmm6, %zmm8
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm7, %zmm6, %zmm10
-; AVX512DQBW-FAST-NEXT:    vmovdqa 1088(%rdi), %ymm5
-; AVX512DQBW-FAST-NEXT:    vpalignr {{.*#+}} ymm5 = ymm19[8,9,10,11,12,13,14,15],ymm5[0,1,2,3,4,5,6,7],ymm19[24,25,26,27,28,29,30,31],ymm5[16,17,18,19,20,21,22,23]
-; AVX512DQBW-FAST-NEXT:    vpblendd {{.*#+}} ymm5 = ymm9[0,1,2,3],ymm5[4,5,6,7]
-; AVX512DQBW-FAST-NEXT:    vinserti64x4 $0, %ymm5, %zmm12, %zmm28
-; AVX512DQBW-FAST-NEXT:    movb $-32, %al
-; AVX512DQBW-FAST-NEXT:    kmovd %eax, %k2
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm5, %zmm18 {%k2}
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm5, %zmm24 {%k2}
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm5, %zmm16 {%k2}
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm5, %zmm21 {%k2}
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm5, %zmm4 {%k1}
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm5, %zmm4 {%k2}
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm5, %zmm3 {%k1}
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm5, %zmm3 {%k2}
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm5, %zmm2 {%k1}
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm5, %zmm2 {%k2}
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm5, %zmm0 {%k1}
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm5, %zmm0 {%k2}
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm12 # 64-byte Reload
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm31, %zmm12 {%k2}
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm19 # 64-byte Reload
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm5, %zmm19 {%k2}
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm5, %zmm23 {%k2}
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm5, %zmm22 {%k2}
-; AVX512DQBW-FAST-NEXT:    vmovdqa 960(%rdi), %ymm6
-; AVX512DQBW-FAST-NEXT:    vpalignr {{.*#+}} ymm6 = mem[8,9,10,11,12,13,14,15],ymm6[0,1,2,3,4,5,6,7],mem[24,25,26,27,28,29,30,31],ymm6[16,17,18,19,20,21,22,23]
-; AVX512DQBW-FAST-NEXT:    vextracti128 $1, %ymm6, %xmm6
-; AVX512DQBW-FAST-NEXT:    vinserti32x4 $0, %xmm6, %zmm25, %zmm6
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm27, %zmm6 {%k2}
-; AVX512DQBW-FAST-NEXT:    vmovdqa 512(%rdi), %ymm7
-; AVX512DQBW-FAST-NEXT:    vpalignr {{.*#+}} ymm7 = mem[8,9,10,11,12,13,14,15],ymm7[0,1,2,3,4,5,6,7],mem[24,25,26,27,28,29,30,31],ymm7[16,17,18,19,20,21,22,23]
-; AVX512DQBW-FAST-NEXT:    vextracti128 $1, %ymm7, %xmm7
-; AVX512DQBW-FAST-NEXT:    vinserti32x4 $0, %xmm7, %zmm26, %zmm7
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm5, %zmm7 {%k2}
-; AVX512DQBW-FAST-NEXT:    vmovdqa 64(%rdi), %ymm9
-; AVX512DQBW-FAST-NEXT:    vpalignr {{.*#+}} ymm9 = mem[8,9,10,11,12,13,14,15],ymm9[0,1,2,3,4,5,6,7],mem[24,25,26,27,28,29,30,31],ymm9[16,17,18,19,20,21,22,23]
-; AVX512DQBW-FAST-NEXT:    vextracti128 $1, %ymm9, %xmm9
-; AVX512DQBW-FAST-NEXT:    vinserti32x4 $0, %xmm9, %zmm30, %zmm9
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm5, %zmm9 {%k2}
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 1408(%rdi), %ymm20
-; AVX512DQBW-FAST-NEXT:    vpalignr {{.*#+}} ymm20 = mem[8,9,10,11,12,13,14,15],ymm20[0,1,2,3,4,5,6,7],mem[24,25,26,27,28,29,30,31],ymm20[16,17,18,19,20,21,22,23]
-; AVX512DQBW-FAST-NEXT:    vextracti32x4 $1, %ymm20, %xmm20
-; AVX512DQBW-FAST-NEXT:    vinserti32x4 $0, %xmm20, %zmm17, %zmm17
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm5, %zmm17 {%k2}
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 640(%rdi), %ymm20
-; AVX512DQBW-FAST-NEXT:    vpalignr $8, {{[-0-9]+}}(%r{{[sb]}}p), %ymm20, %ymm5 # 32-byte Folded Reload
-; AVX512DQBW-FAST-NEXT:    # ymm5 = mem[8,9,10,11,12,13,14,15],ymm20[0,1,2,3,4,5,6,7],mem[24,25,26,27,28,29,30,31],ymm20[16,17,18,19,20,21,22,23]
-; AVX512DQBW-FAST-NEXT:    vpblendd {{.*#+}} ymm5 = ymm13[0,1,2,3],ymm5[4,5,6,7]
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm13 # 64-byte Reload
-; AVX512DQBW-FAST-NEXT:    vinserti64x4 $0, %ymm5, %zmm13, %zmm5
-; AVX512DQBW-FAST-NEXT:    vmovdqa 192(%rdi), %ymm13
-; AVX512DQBW-FAST-NEXT:    vpalignr $8, {{[-0-9]+}}(%r{{[sb]}}p), %ymm13, %ymm13 # 32-byte Folded Reload
-; AVX512DQBW-FAST-NEXT:    # ymm13 = mem[8,9,10,11,12,13,14,15],ymm13[0,1,2,3,4,5,6,7],mem[24,25,26,27,28,29,30,31],ymm13[16,17,18,19,20,21,22,23]
-; AVX512DQBW-FAST-NEXT:    vpblendd {{.*#+}} ymm11 = ymm11[0,1,2,3],ymm13[4,5,6,7]
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm13 # 64-byte Reload
-; AVX512DQBW-FAST-NEXT:    vinserti64x4 $0, %ymm11, %zmm13, %zmm11
-; AVX512DQBW-FAST-NEXT:    vmovdqa 1536(%rdi), %ymm13
-; AVX512DQBW-FAST-NEXT:    vpalignr $8, {{[-0-9]+}}(%r{{[sb]}}p), %ymm13, %ymm13 # 32-byte Folded Reload
-; AVX512DQBW-FAST-NEXT:    # ymm13 = mem[8,9,10,11,12,13,14,15],ymm13[0,1,2,3,4,5,6,7],mem[24,25,26,27,28,29,30,31],ymm13[16,17,18,19,20,21,22,23]
-; AVX512DQBW-FAST-NEXT:    vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm13[4,5,6,7]
-; AVX512DQBW-FAST-NEXT:    vinserti64x4 $0, %ymm1, %zmm29, %zmm1
-; AVX512DQBW-FAST-NEXT:    vpblendd $240, {{[-0-9]+}}(%r{{[sb]}}p), %ymm15, %ymm13 # 32-byte Folded Reload
-; AVX512DQBW-FAST-NEXT:    # ymm13 = ymm15[0,1,2,3],mem[4,5,6,7]
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm15 # 64-byte Reload
-; AVX512DQBW-FAST-NEXT:    vinserti64x4 $0, %ymm13, %zmm15, %zmm13
-; AVX512DQBW-FAST-NEXT:    vpblendd $240, {{[-0-9]+}}(%r{{[sb]}}p), %ymm14, %ymm14 # 32-byte Folded Reload
-; AVX512DQBW-FAST-NEXT:    # ymm14 = ymm14[0,1,2,3],mem[4,5,6,7]
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm15 # 64-byte Reload
-; AVX512DQBW-FAST-NEXT:    vinserti64x4 $0, %ymm14, %zmm15, %zmm14
-; AVX512DQBW-FAST-NEXT:    vpblendd $240, {{[-0-9]+}}(%r{{[sb]}}p), %ymm8, %ymm8 # 32-byte Folded Reload
-; AVX512DQBW-FAST-NEXT:    # ymm8 = ymm8[0,1,2,3],mem[4,5,6,7]
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm15 # 64-byte Reload
-; AVX512DQBW-FAST-NEXT:    vinserti64x4 $0, %ymm8, %zmm15, %zmm8
-; AVX512DQBW-FAST-NEXT:    vpblendd $240, {{[-0-9]+}}(%r{{[sb]}}p), %ymm10, %ymm10 # 32-byte Folded Reload
-; AVX512DQBW-FAST-NEXT:    # ymm10 = ymm10[0,1,2,3],mem[4,5,6,7]
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm15 # 64-byte Reload
-; AVX512DQBW-FAST-NEXT:    vinserti64x4 $0, %ymm10, %zmm15, %zmm10
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm21, 192(%rsi)
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm16, 128(%rsi)
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm24, 64(%rsi)
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm18, (%rsi)
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm0, 192(%rdx)
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm2, (%rdx)
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm3, 64(%rdx)
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm4, 128(%rdx)
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm22, 192(%rcx)
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm23, (%rcx)
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm19, 64(%rcx)
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm12, 128(%rcx)
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm17, 192(%r8)
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm9, (%r8)
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm7, 64(%r8)
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm6, 128(%r8)
-; AVX512DQBW-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQBW-FAST-NEXT:    vmovaps %zmm0, 192(%r9)
-; AVX512DQBW-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQBW-FAST-NEXT:    vmovaps %zmm0, (%r9)
-; AVX512DQBW-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQBW-FAST-NEXT:    vmovaps %zmm0, 64(%r9)
-; AVX512DQBW-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQBW-FAST-NEXT:    vmovaps %zmm0, 128(%r9)
-; AVX512DQBW-FAST-NEXT:    movq {{[0-9]+}}(%rsp), %rax
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm1, 192(%rax)
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm11, (%rax)
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm5, 64(%rax)
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm28, 128(%rax)
-; AVX512DQBW-FAST-NEXT:    movq {{[0-9]+}}(%rsp), %rax
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm10, 128(%rax)
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm8, 192(%rax)
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm14, (%rax)
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm13, 64(%rax)
-; AVX512DQBW-FAST-NEXT:    addq $2152, %rsp # imm = 0x868
-; AVX512DQBW-FAST-NEXT:    vzeroupper
-; AVX512DQBW-FAST-NEXT:    retq
+; AVX512DQBW-ONLY-LABEL: load_i64_stride7_vf32:
+; AVX512DQBW-ONLY:       # %bb.0:
+; AVX512DQBW-ONLY-NEXT:    subq $2152, %rsp # imm = 0x868
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 1664(%rdi), %zmm21
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 1600(%rdi), %zmm31
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 1216(%rdi), %zmm2
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 1152(%rdi), %zmm6
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 1088(%rdi), %zmm12
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 %zmm12, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 1024(%rdi), %zmm3
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 768(%rdi), %zmm1
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 704(%rdi), %zmm10
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 640(%rdi), %zmm11
+; AVX512DQBW-ONLY-NEXT:    vmovaps 576(%rdi), %zmm0
+; AVX512DQBW-ONLY-NEXT:    vmovups %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 320(%rdi), %zmm0
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 256(%rdi), %zmm5
+; AVX512DQBW-ONLY-NEXT:    vbroadcasti32x4 {{.*#+}} zmm7 = [10,3,10,3,10,3,10,3]
+; AVX512DQBW-ONLY-NEXT:    # zmm7 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm5, %zmm8
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm0, %zmm7, %zmm8
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 %zmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm10, %zmm8
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm1, %zmm7, %zmm8
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 %zmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm6, %zmm8
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm2, %zmm7, %zmm8
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 %zmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-ONLY-NEXT:    vpermi2q %zmm21, %zmm31, %zmm7
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 %zmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-ONLY-NEXT:    vbroadcasti32x4 {{.*#+}} zmm7 = [11,4,11,4,11,4,11,4]
+; AVX512DQBW-ONLY-NEXT:    # zmm7 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm6, %zmm8
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm2, %zmm7, %zmm8
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 %zmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm7, %zmm13
+; AVX512DQBW-ONLY-NEXT:    vbroadcasti32x4 {{.*#+}} zmm7 = [13,6,13,6,13,6,13,6]
+; AVX512DQBW-ONLY-NEXT:    # zmm7 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm10, %zmm8
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm1, %zmm13, %zmm8
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 %zmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm5, %zmm8
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm5, %zmm18
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm0, %zmm13, %zmm18
+; AVX512DQBW-ONLY-NEXT:    vbroadcasti32x4 {{.*#+}} zmm19 = [12,5,12,5,12,5,12,5]
+; AVX512DQBW-ONLY-NEXT:    # zmm19 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm10, %zmm17
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm1, %zmm19, %zmm17
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm5, %zmm22
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm0, %zmm19, %zmm22
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm22, %zmm23
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm10, %zmm16
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm1, %zmm7, %zmm16
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm5, %zmm22
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm0, %zmm7, %zmm22
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm22, %zmm24
+; AVX512DQBW-ONLY-NEXT:    vbroadcasti64x4 {{.*#+}} zmm28 = [0,7,14,0,0,7,14,0]
+; AVX512DQBW-ONLY-NEXT:    # zmm28 = mem[0,1,2,3,0,1,2,3]
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm10, %zmm15
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm1, %zmm28, %zmm15
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm5, %zmm25
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm0, %zmm28, %zmm25
+; AVX512DQBW-ONLY-NEXT:    vbroadcasti64x4 {{.*#+}} zmm29 = [9,0,7,0,9,0,7,0]
+; AVX512DQBW-ONLY-NEXT:    # zmm29 = mem[0,1,2,3,0,1,2,3]
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm1, %zmm26
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm10, %zmm29, %zmm26
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm26, %zmm30
+; AVX512DQBW-ONLY-NEXT:    vbroadcasti32x4 {{.*#+}} zmm4 = [2,9,2,9,2,9,2,9]
+; AVX512DQBW-ONLY-NEXT:    # zmm4 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm1, %zmm4, %zmm10
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm0, %zmm9
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm5, %zmm29, %zmm9
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm0, %zmm4, %zmm8
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm7, %zmm1
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm12, %zmm7, %zmm3
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm14 # 64-byte Reload
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm14, %zmm0
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm11, %zmm7, %zmm0
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 192(%rdi), %zmm22
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 128(%rdi), %zmm0
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm22, %zmm7, %zmm0
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm31, %zmm5
+; AVX512DQBW-ONLY-NEXT:    vpermi2q %zmm21, %zmm31, %zmm13
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 1536(%rdi), %zmm3
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 1472(%rdi), %zmm0
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm3, %zmm7, %zmm0
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm6, %zmm0
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm6, %zmm31
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm2, %zmm19, %zmm31
+; AVX512DQBW-ONLY-NEXT:    vpermi2q %zmm21, %zmm5, %zmm19
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm19, %zmm26
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm6, %zmm27
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm2, %zmm7, %zmm27
+; AVX512DQBW-ONLY-NEXT:    vpermi2q %zmm21, %zmm5, %zmm1
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm6, %zmm20
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm2, %zmm28, %zmm20
+; AVX512DQBW-ONLY-NEXT:    vpermi2q %zmm21, %zmm5, %zmm28
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm2, %zmm12
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm6, %zmm29, %zmm12
+; AVX512DQBW-ONLY-NEXT:    vpermi2q %zmm5, %zmm21, %zmm29
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm21, %zmm4, %zmm5
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm5, %zmm21
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm2, %zmm4, %zmm0
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 384(%rdi), %zmm2
+; AVX512DQBW-ONLY-NEXT:    vbroadcasti64x4 {{.*#+}} zmm4 = [0,5,6,9,0,5,6,9]
+; AVX512DQBW-ONLY-NEXT:    # zmm4 = mem[0,1,2,3,0,1,2,3]
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm2, %zmm4, %zmm5
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 832(%rdi), %zmm5
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm6 # 64-byte Reload
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm5, %zmm4, %zmm6
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 1280(%rdi), %zmm6
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm7 # 64-byte Reload
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm6, %zmm4, %zmm7
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 %zmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 1728(%rdi), %zmm7
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm19 # 64-byte Reload
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm7, %zmm4, %zmm19
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 %zmm19, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-ONLY-NEXT:    vbroadcasti64x4 {{.*#+}} zmm4 = [0,5,6,10,0,5,6,10]
+; AVX512DQBW-ONLY-NEXT:    # zmm4 = mem[0,1,2,3,0,1,2,3]
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm19 # 64-byte Reload
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm6, %zmm4, %zmm19
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 %zmm19, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm19 # 64-byte Reload
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm5, %zmm4, %zmm19
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 %zmm19, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm2, %zmm4, %zmm18
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 %zmm18, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm7, %zmm4, %zmm13
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 %zmm13, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-ONLY-NEXT:    vbroadcasti64x4 {{.*#+}} zmm4 = [0,5,6,11,0,5,6,11]
+; AVX512DQBW-ONLY-NEXT:    # zmm4 = mem[0,1,2,3,0,1,2,3]
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm6, %zmm4, %zmm31
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm5, %zmm4, %zmm17
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 %zmm17, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm2, %zmm4, %zmm23
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 %zmm23, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm7, %zmm4, %zmm26
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 %zmm26, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-ONLY-NEXT:    vbroadcasti64x4 {{.*#+}} zmm4 = [0,5,6,12,0,5,6,12]
+; AVX512DQBW-ONLY-NEXT:    # zmm4 = mem[0,1,2,3,0,1,2,3]
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm6, %zmm4, %zmm27
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm5, %zmm4, %zmm16
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 %zmm16, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm2, %zmm4, %zmm24
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 %zmm24, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm7, %zmm4, %zmm1
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-ONLY-NEXT:    vbroadcasti64x4 {{.*#+}} zmm4 = [4,5,6,13,4,5,6,13]
+; AVX512DQBW-ONLY-NEXT:    # zmm4 = mem[0,1,2,3,0,1,2,3]
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm6, %zmm4, %zmm20
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm5, %zmm4, %zmm15
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 %zmm15, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm2, %zmm4, %zmm25
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 %zmm25, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm7, %zmm4, %zmm28
+; AVX512DQBW-ONLY-NEXT:    vbroadcasti64x4 {{.*#+}} zmm4 = [4,5,6,14,4,5,6,14]
+; AVX512DQBW-ONLY-NEXT:    # zmm4 = mem[0,1,2,3,0,1,2,3]
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm6, %zmm4, %zmm12
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm5, %zmm4, %zmm30
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 %zmm30, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm2, %zmm4, %zmm9
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 %zmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm7, %zmm4, %zmm29
+; AVX512DQBW-ONLY-NEXT:    vbroadcasti64x4 {{.*#+}} zmm4 = [4,5,8,15,4,5,8,15]
+; AVX512DQBW-ONLY-NEXT:    # zmm4 = mem[0,1,2,3,0,1,2,3]
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm5, %zmm4, %zmm10
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 %zmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm2, %zmm4, %zmm8
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 %zmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm7, %zmm4, %zmm21
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 %zmm21, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm6, %zmm4, %zmm0
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-ONLY-NEXT:    movb $24, %al
+; AVX512DQBW-ONLY-NEXT:    kmovd %eax, %k1
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 512(%rdi), %zmm2
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 448(%rdi), %zmm15
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 {{.*#+}} ymm21 = <0,7,14,u>
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm15, %zmm24
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm2, %zmm21, %zmm24
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm11, %zmm0
+; AVX512DQBW-ONLY-NEXT:    vshufi64x2 {{.*#+}} zmm24 {%k1} = zmm14[4,5,4,5],zmm11[4,5,4,5]
+; AVX512DQBW-ONLY-NEXT:    vbroadcasti64x4 {{.*#+}} zmm11 = [14,0,0,7,14,0,0,7]
+; AVX512DQBW-ONLY-NEXT:    # zmm11 = mem[0,1,2,3,0,1,2,3]
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm14, %zmm13
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm0, %zmm11, %zmm13
+; AVX512DQBW-ONLY-NEXT:    vbroadcasti64x4 {{.*#+}} zmm17 = [7,0,9,0,7,0,9,0]
+; AVX512DQBW-ONLY-NEXT:    # zmm17 = mem[0,1,2,3,0,1,2,3]
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm0, %zmm26
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm14, %zmm17, %zmm26
+; AVX512DQBW-ONLY-NEXT:    vbroadcasti64x2 {{.*#+}} ymm19 = [4,11,4,11]
+; AVX512DQBW-ONLY-NEXT:    # ymm19 = mem[0,1,0,1]
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm0, %zmm19, %zmm14
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 %zmm14, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 64(%rdi), %zmm4
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 (%rdi), %zmm14
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm14, %zmm18
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm4, %zmm21, %zmm18
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512DQBW-ONLY-NEXT:    vshufi64x2 {{.*#+}} zmm18 {%k1} = zmm0[4,5,4,5],zmm22[4,5,4,5]
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm0, %zmm23
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm22, %zmm11, %zmm23
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm22, %zmm30
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm0, %zmm17, %zmm30
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm22, %zmm19, %zmm0
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 960(%rdi), %zmm7
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 896(%rdi), %zmm10
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm10, %zmm16
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm7, %zmm21, %zmm16
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 1408(%rdi), %zmm5
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 1344(%rdi), %zmm8
+; AVX512DQBW-ONLY-NEXT:    vpermi2q %zmm5, %zmm8, %zmm21
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm6 # 64-byte Reload
+; AVX512DQBW-ONLY-NEXT:    vshufi64x2 {{.*#+}} zmm21 {%k1} = zmm6[4,5,4,5],zmm3[4,5,4,5]
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm1, %zmm0
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm22 # 64-byte Reload
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm22, %zmm11, %zmm0
+; AVX512DQBW-ONLY-NEXT:    vpermi2q %zmm3, %zmm6, %zmm11
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm22, %zmm25
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm1, %zmm17, %zmm25
+; AVX512DQBW-ONLY-NEXT:    vpermi2q %zmm6, %zmm3, %zmm17
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm3, %zmm19, %zmm6
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-ONLY-NEXT:    vshufi64x2 {{.*#+}} zmm16 {%k1} = zmm1[4,5,4,5],zmm22[4,5,4,5]
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm22, %zmm19, %zmm1
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-ONLY-NEXT:    vmovdqa 912(%rdi), %xmm1
+; AVX512DQBW-ONLY-NEXT:    vpblendd {{.*#+}} xmm1 = xmm1[0,1],mem[2,3]
+; AVX512DQBW-ONLY-NEXT:    vinserti32x4 $0, %xmm1, %zmm0, %zmm0
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-ONLY-NEXT:    vmovdqa 464(%rdi), %xmm0
+; AVX512DQBW-ONLY-NEXT:    vpblendd {{.*#+}} xmm0 = xmm0[0,1],mem[2,3]
+; AVX512DQBW-ONLY-NEXT:    vinserti32x4 $0, %xmm0, %zmm13, %zmm0
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-ONLY-NEXT:    vmovdqa 16(%rdi), %xmm0
+; AVX512DQBW-ONLY-NEXT:    vpblendd {{.*#+}} xmm0 = xmm0[0,1],mem[2,3]
+; AVX512DQBW-ONLY-NEXT:    vinserti32x4 $0, %xmm0, %zmm23, %zmm23
+; AVX512DQBW-ONLY-NEXT:    vmovdqa 1360(%rdi), %xmm0
+; AVX512DQBW-ONLY-NEXT:    vpblendd {{.*#+}} xmm0 = xmm0[0,1],mem[2,3]
+; AVX512DQBW-ONLY-NEXT:    vinserti32x4 $0, %xmm0, %zmm11, %zmm22
+; AVX512DQBW-ONLY-NEXT:    vinserti128 $1, 1088(%rdi), %ymm0, %ymm0
+; AVX512DQBW-ONLY-NEXT:    vmovdqa 1024(%rdi), %ymm1
+; AVX512DQBW-ONLY-NEXT:    vpblendd {{.*#+}} ymm0 = ymm1[0,1,2,3,4,5],ymm0[6,7]
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %ymm1, %ymm19
+; AVX512DQBW-ONLY-NEXT:    vmovdqa {{.*#+}} xmm1 = [4,11]
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm10, %zmm11
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm7, %zmm1, %zmm11
+; AVX512DQBW-ONLY-NEXT:    vpblendd {{.*#+}} ymm0 = ymm11[0,1,2,3],ymm0[4,5,6,7]
+; AVX512DQBW-ONLY-NEXT:    vinserti64x4 $0, %ymm0, %zmm20, %zmm0
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-ONLY-NEXT:    vinserti128 $1, 640(%rdi), %ymm0, %ymm0
+; AVX512DQBW-ONLY-NEXT:    vmovdqa 576(%rdi), %ymm3
+; AVX512DQBW-ONLY-NEXT:    vmovdqu %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512DQBW-ONLY-NEXT:    vpblendd {{.*#+}} ymm0 = ymm3[0,1,2,3,4,5],ymm0[6,7]
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm15, %zmm3
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm2, %zmm1, %zmm3
+; AVX512DQBW-ONLY-NEXT:    vpblendd {{.*#+}} ymm0 = ymm3[0,1,2,3],ymm0[4,5,6,7]
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
+; AVX512DQBW-ONLY-NEXT:    vinserti64x4 $0, %ymm0, %zmm3, %zmm0
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-ONLY-NEXT:    vinserti128 $1, 192(%rdi), %ymm0, %ymm0
+; AVX512DQBW-ONLY-NEXT:    vmovdqa 128(%rdi), %ymm3
+; AVX512DQBW-ONLY-NEXT:    vmovdqu %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512DQBW-ONLY-NEXT:    vpblendd {{.*#+}} ymm0 = ymm3[0,1,2,3,4,5],ymm0[6,7]
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm14, %zmm3
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm4, %zmm1, %zmm3
+; AVX512DQBW-ONLY-NEXT:    vpblendd {{.*#+}} ymm0 = ymm3[0,1,2,3],ymm0[4,5,6,7]
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
+; AVX512DQBW-ONLY-NEXT:    vinserti64x4 $0, %ymm0, %zmm3, %zmm0
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-ONLY-NEXT:    vinserti128 $1, 1536(%rdi), %ymm0, %ymm0
+; AVX512DQBW-ONLY-NEXT:    vmovdqa 1472(%rdi), %ymm3
+; AVX512DQBW-ONLY-NEXT:    vmovdqu %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512DQBW-ONLY-NEXT:    vpblendd {{.*#+}} ymm0 = ymm3[0,1,2,3,4,5],ymm0[6,7]
+; AVX512DQBW-ONLY-NEXT:    vpermi2q %zmm5, %zmm8, %zmm1
+; AVX512DQBW-ONLY-NEXT:    vpblendd {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7]
+; AVX512DQBW-ONLY-NEXT:    vinserti64x4 $0, %ymm0, %zmm28, %zmm0
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-ONLY-NEXT:    vmovdqa {{.*#+}} ymm0 = <9,0,7,u>
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm2, %zmm3
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm15, %zmm0, %zmm3
+; AVX512DQBW-ONLY-NEXT:    vmovdqa {{.*#+}} xmm1 = [5,12]
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm15, %zmm13
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm2, %zmm1, %zmm13
+; AVX512DQBW-ONLY-NEXT:    vmovdqa {{.*#+}} xmm6 = [6,13]
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm2, %zmm6, %zmm15
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm4, %zmm2
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm14, %zmm0, %zmm2
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm14, %zmm11
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm4, %zmm1, %zmm11
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm4, %zmm6, %zmm14
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm7, %zmm4
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm10, %zmm0, %zmm4
+; AVX512DQBW-ONLY-NEXT:    vpermi2q %zmm8, %zmm5, %zmm0
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm10, %zmm9
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm7, %zmm1, %zmm9
+; AVX512DQBW-ONLY-NEXT:    vpermi2q %zmm5, %zmm8, %zmm1
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm5, %zmm6, %zmm8
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm7, %zmm6, %zmm10
+; AVX512DQBW-ONLY-NEXT:    vmovdqa 1088(%rdi), %ymm5
+; AVX512DQBW-ONLY-NEXT:    vpalignr {{.*#+}} ymm5 = ymm19[8,9,10,11,12,13,14,15],ymm5[0,1,2,3,4,5,6,7],ymm19[24,25,26,27,28,29,30,31],ymm5[16,17,18,19,20,21,22,23]
+; AVX512DQBW-ONLY-NEXT:    vpblendd {{.*#+}} ymm5 = ymm9[0,1,2,3],ymm5[4,5,6,7]
+; AVX512DQBW-ONLY-NEXT:    vinserti64x4 $0, %ymm5, %zmm12, %zmm28
+; AVX512DQBW-ONLY-NEXT:    movb $-32, %al
+; AVX512DQBW-ONLY-NEXT:    kmovd %eax, %k2
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm5, %zmm18 {%k2}
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm5, %zmm24 {%k2}
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm5, %zmm16 {%k2}
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm5, %zmm21 {%k2}
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm5, %zmm4 {%k1}
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm5, %zmm4 {%k2}
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm5, %zmm3 {%k1}
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm5, %zmm3 {%k2}
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm5, %zmm2 {%k1}
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm5, %zmm2 {%k2}
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm5, %zmm0 {%k1}
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm5, %zmm0 {%k2}
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm12 # 64-byte Reload
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm31, %zmm12 {%k2}
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm19 # 64-byte Reload
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm5, %zmm19 {%k2}
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm5, %zmm23 {%k2}
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm5, %zmm22 {%k2}
+; AVX512DQBW-ONLY-NEXT:    vmovdqa 960(%rdi), %ymm6
+; AVX512DQBW-ONLY-NEXT:    vpalignr {{.*#+}} ymm6 = mem[8,9,10,11,12,13,14,15],ymm6[0,1,2,3,4,5,6,7],mem[24,25,26,27,28,29,30,31],ymm6[16,17,18,19,20,21,22,23]
+; AVX512DQBW-ONLY-NEXT:    vextracti128 $1, %ymm6, %xmm6
+; AVX512DQBW-ONLY-NEXT:    vinserti32x4 $0, %xmm6, %zmm25, %zmm6
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm27, %zmm6 {%k2}
+; AVX512DQBW-ONLY-NEXT:    vmovdqa 512(%rdi), %ymm7
+; AVX512DQBW-ONLY-NEXT:    vpalignr {{.*#+}} ymm7 = mem[8,9,10,11,12,13,14,15],ymm7[0,1,2,3,4,5,6,7],mem[24,25,26,27,28,29,30,31],ymm7[16,17,18,19,20,21,22,23]
+; AVX512DQBW-ONLY-NEXT:    vextracti128 $1, %ymm7, %xmm7
+; AVX512DQBW-ONLY-NEXT:    vinserti32x4 $0, %xmm7, %zmm26, %zmm7
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm5, %zmm7 {%k2}
+; AVX512DQBW-ONLY-NEXT:    vmovdqa 64(%rdi), %ymm9
+; AVX512DQBW-ONLY-NEXT:    vpalignr {{.*#+}} ymm9 = mem[8,9,10,11,12,13,14,15],ymm9[0,1,2,3,4,5,6,7],mem[24,25,26,27,28,29,30,31],ymm9[16,17,18,19,20,21,22,23]
+; AVX512DQBW-ONLY-NEXT:    vextracti128 $1, %ymm9, %xmm9
+; AVX512DQBW-ONLY-NEXT:    vinserti32x4 $0, %xmm9, %zmm30, %zmm9
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm5, %zmm9 {%k2}
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 1408(%rdi), %ymm20
+; AVX512DQBW-ONLY-NEXT:    vpalignr {{.*#+}} ymm20 = mem[8,9,10,11,12,13,14,15],ymm20[0,1,2,3,4,5,6,7],mem[24,25,26,27,28,29,30,31],ymm20[16,17,18,19,20,21,22,23]
+; AVX512DQBW-ONLY-NEXT:    vextracti32x4 $1, %ymm20, %xmm20
+; AVX512DQBW-ONLY-NEXT:    vinserti32x4 $0, %xmm20, %zmm17, %zmm17
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm5, %zmm17 {%k2}
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 640(%rdi), %ymm20
+; AVX512DQBW-ONLY-NEXT:    vpalignr $8, {{[-0-9]+}}(%r{{[sb]}}p), %ymm20, %ymm5 # 32-byte Folded Reload
+; AVX512DQBW-ONLY-NEXT:    # ymm5 = mem[8,9,10,11,12,13,14,15],ymm20[0,1,2,3,4,5,6,7],mem[24,25,26,27,28,29,30,31],ymm20[16,17,18,19,20,21,22,23]
+; AVX512DQBW-ONLY-NEXT:    vpblendd {{.*#+}} ymm5 = ymm13[0,1,2,3],ymm5[4,5,6,7]
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm13 # 64-byte Reload
+; AVX512DQBW-ONLY-NEXT:    vinserti64x4 $0, %ymm5, %zmm13, %zmm5
+; AVX512DQBW-ONLY-NEXT:    vmovdqa 192(%rdi), %ymm13
+; AVX512DQBW-ONLY-NEXT:    vpalignr $8, {{[-0-9]+}}(%r{{[sb]}}p), %ymm13, %ymm13 # 32-byte Folded Reload
+; AVX512DQBW-ONLY-NEXT:    # ymm13 = mem[8,9,10,11,12,13,14,15],ymm13[0,1,2,3,4,5,6,7],mem[24,25,26,27,28,29,30,31],ymm13[16,17,18,19,20,21,22,23]
+; AVX512DQBW-ONLY-NEXT:    vpblendd {{.*#+}} ymm11 = ymm11[0,1,2,3],ymm13[4,5,6,7]
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm13 # 64-byte Reload
+; AVX512DQBW-ONLY-NEXT:    vinserti64x4 $0, %ymm11, %zmm13, %zmm11
+; AVX512DQBW-ONLY-NEXT:    vmovdqa 1536(%rdi), %ymm13
+; AVX512DQBW-ONLY-NEXT:    vpalignr $8, {{[-0-9]+}}(%r{{[sb]}}p), %ymm13, %ymm13 # 32-byte Folded Reload
+; AVX512DQBW-ONLY-NEXT:    # ymm13 = mem[8,9,10,11,12,13,14,15],ymm13[0,1,2,3,4,5,6,7],mem[24,25,26,27,28,29,30,31],ymm13[16,17,18,19,20,21,22,23]
+; AVX512DQBW-ONLY-NEXT:    vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm13[4,5,6,7]
+; AVX512DQBW-ONLY-NEXT:    vinserti64x4 $0, %ymm1, %zmm29, %zmm1
+; AVX512DQBW-ONLY-NEXT:    vpblendd $240, {{[-0-9]+}}(%r{{[sb]}}p), %ymm15, %ymm13 # 32-byte Folded Reload
+; AVX512DQBW-ONLY-NEXT:    # ymm13 = ymm15[0,1,2,3],mem[4,5,6,7]
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm15 # 64-byte Reload
+; AVX512DQBW-ONLY-NEXT:    vinserti64x4 $0, %ymm13, %zmm15, %zmm13
+; AVX512DQBW-ONLY-NEXT:    vpblendd $240, {{[-0-9]+}}(%r{{[sb]}}p), %ymm14, %ymm14 # 32-byte Folded Reload
+; AVX512DQBW-ONLY-NEXT:    # ymm14 = ymm14[0,1,2,3],mem[4,5,6,7]
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm15 # 64-byte Reload
+; AVX512DQBW-ONLY-NEXT:    vinserti64x4 $0, %ymm14, %zmm15, %zmm14
+; AVX512DQBW-ONLY-NEXT:    vpblendd $240, {{[-0-9]+}}(%r{{[sb]}}p), %ymm8, %ymm8 # 32-byte Folded Reload
+; AVX512DQBW-ONLY-NEXT:    # ymm8 = ymm8[0,1,2,3],mem[4,5,6,7]
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm15 # 64-byte Reload
+; AVX512DQBW-ONLY-NEXT:    vinserti64x4 $0, %ymm8, %zmm15, %zmm8
+; AVX512DQBW-ONLY-NEXT:    vpblendd $240, {{[-0-9]+}}(%r{{[sb]}}p), %ymm10, %ymm10 # 32-byte Folded Reload
+; AVX512DQBW-ONLY-NEXT:    # ymm10 = ymm10[0,1,2,3],mem[4,5,6,7]
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm15 # 64-byte Reload
+; AVX512DQBW-ONLY-NEXT:    vinserti64x4 $0, %ymm10, %zmm15, %zmm10
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm21, 192(%rsi)
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm16, 128(%rsi)
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm24, 64(%rsi)
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm18, (%rsi)
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm0, 192(%rdx)
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm2, (%rdx)
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm3, 64(%rdx)
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm4, 128(%rdx)
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm22, 192(%rcx)
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm23, (%rcx)
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm19, 64(%rcx)
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm12, 128(%rcx)
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm17, 192(%r8)
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm9, (%r8)
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm7, 64(%r8)
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm6, 128(%r8)
+; AVX512DQBW-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512DQBW-ONLY-NEXT:    vmovaps %zmm0, 192(%r9)
+; AVX512DQBW-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512DQBW-ONLY-NEXT:    vmovaps %zmm0, (%r9)
+; AVX512DQBW-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512DQBW-ONLY-NEXT:    vmovaps %zmm0, 64(%r9)
+; AVX512DQBW-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512DQBW-ONLY-NEXT:    vmovaps %zmm0, 128(%r9)
+; AVX512DQBW-ONLY-NEXT:    movq {{[0-9]+}}(%rsp), %rax
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm1, 192(%rax)
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm11, (%rax)
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm5, 64(%rax)
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm28, 128(%rax)
+; AVX512DQBW-ONLY-NEXT:    movq {{[0-9]+}}(%rsp), %rax
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm10, 128(%rax)
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm8, 192(%rax)
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm14, (%rax)
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm13, 64(%rax)
+; AVX512DQBW-ONLY-NEXT:    addq $2152, %rsp # imm = 0x868
+; AVX512DQBW-ONLY-NEXT:    vzeroupper
+; AVX512DQBW-ONLY-NEXT:    retq
   %wide.vec = load <224 x i64>, ptr %in.vec, align 64
   %strided.vec0 = shufflevector <224 x i64> %wide.vec, <224 x i64> poison, <32 x i32> <i32 0, i32 7, i32 14, i32 21, i32 28, i32 35, i32 42, i32 49, i32 56, i32 63, i32 70, i32 77, i32 84, i32 91, i32 98, i32 105, i32 112, i32 119, i32 126, i32 133, i32 140, i32 147, i32 154, i32 161, i32 168, i32 175, i32 182, i32 189, i32 196, i32 203, i32 210, i32 217>
   %strided.vec1 = shufflevector <224 x i64> %wide.vec, <224 x i64> poison, <32 x i32> <i32 1, i32 8, i32 15, i32 22, i32 29, i32 36, i32 43, i32 50, i32 57, i32 64, i32 71, i32 78, i32 85, i32 92, i32 99, i32 106, i32 113, i32 120, i32 127, i32 134, i32 141, i32 148, i32 155, i32 162, i32 169, i32 176, i32 183, i32 190, i32 197, i32 204, i32 211, i32 218>
@@ -12646,8 +10864,16 @@ define void @load_i64_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX2-FAST-PERLANE: {{.*}}
 ; AVX2-SLOW: {{.*}}
 ; AVX512BW-FAST: {{.*}}
+; AVX512BW-ONLY-FAST: {{.*}}
+; AVX512BW-ONLY-SLOW: {{.*}}
 ; AVX512BW-SLOW: {{.*}}
+; AVX512DQ-FAST: {{.*}}
+; AVX512DQ-SLOW: {{.*}}
+; AVX512DQBW-FAST: {{.*}}
+; AVX512DQBW-SLOW: {{.*}}
 ; AVX512F-FAST: {{.*}}
+; AVX512F-ONLY-FAST: {{.*}}
+; AVX512F-ONLY-SLOW: {{.*}}
 ; AVX512F-SLOW: {{.*}}
 ; FALLBACK0: {{.*}}
 ; FALLBACK1: {{.*}}

diff  --git a/llvm/test/CodeGen/X86/vector-interleaved-load-i64-stride-8.ll b/llvm/test/CodeGen/X86/vector-interleaved-load-i64-stride-8.ll
index 884a4abfe646c3b..4738b2344255b59 100644
--- a/llvm/test/CodeGen/X86/vector-interleaved-load-i64-stride-8.ll
+++ b/llvm/test/CodeGen/X86/vector-interleaved-load-i64-stride-8.ll
@@ -4,14 +4,14 @@
 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx2 | FileCheck %s --check-prefixes=AVX,AVX1,AVX2,AVX2-ONLY,AVX2-SLOW,FALLBACK2
 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx2,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX1,AVX2,AVX2-ONLY,AVX2-FAST,FALLBACK3
 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx2,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX1,AVX2,AVX2-ONLY,AVX2-FAST-PERLANE,FALLBACK4
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512-SLOW,AVX512F-SLOW,AVX512F-ONLY-SLOW,FALLBACK5
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512-FAST,AVX512F-FAST,AVX512F-ONLY-FAST,FALLBACK6
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512-SLOW,AVX512F-SLOW,AVX512DQ-SLOW,FALLBACK7
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512-FAST,AVX512F-FAST,AVX512DQ-FAST,FALLBACK8
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512bw | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512-SLOW,AVX512BW-SLOW,AVX512BW-ONLY-SLOW,FALLBACK9
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512bw,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512-FAST,AVX512BW-FAST,AVX512BW-ONLY-FAST,FALLBACK10
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq,+avx512bw | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512-SLOW,AVX512BW-SLOW,AVX512DQBW-SLOW,FALLBACK11
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq,+avx512bw,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512-FAST,AVX512BW-FAST,AVX512DQBW-FAST,FALLBACK12
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512F-ONLY,AVX512-SLOW,AVX512F-SLOW,AVX512F-ONLY-SLOW,FALLBACK5
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512F-ONLY,AVX512-FAST,AVX512F-FAST,AVX512F-ONLY-FAST,FALLBACK6
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512DQ-ONLY,AVX512-SLOW,AVX512F-SLOW,AVX512DQ-SLOW,FALLBACK7
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512DQ-ONLY,AVX512-FAST,AVX512F-FAST,AVX512DQ-FAST,FALLBACK8
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512bw | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512BW-ONLY,AVX512-SLOW,AVX512BW-SLOW,AVX512BW-ONLY-SLOW,FALLBACK9
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512bw,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512BW-ONLY,AVX512-FAST,AVX512BW-FAST,AVX512BW-ONLY-FAST,FALLBACK10
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq,+avx512bw | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512DQBW-ONLY,AVX512-SLOW,AVX512BW-SLOW,AVX512DQBW-SLOW,FALLBACK11
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq,+avx512bw,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512DQBW-ONLY,AVX512-FAST,AVX512BW-FAST,AVX512DQBW-FAST,FALLBACK12
 
 ; These patterns are produced by LoopVectorizer for interleaved loads.
 
@@ -11303,14 +11303,18 @@ define void @load_i64_stride8_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512-FAST: {{.*}}
 ; AVX512-SLOW: {{.*}}
 ; AVX512BW-FAST: {{.*}}
+; AVX512BW-ONLY: {{.*}}
 ; AVX512BW-ONLY-FAST: {{.*}}
 ; AVX512BW-ONLY-SLOW: {{.*}}
 ; AVX512BW-SLOW: {{.*}}
 ; AVX512DQ-FAST: {{.*}}
+; AVX512DQ-ONLY: {{.*}}
 ; AVX512DQ-SLOW: {{.*}}
 ; AVX512DQBW-FAST: {{.*}}
+; AVX512DQBW-ONLY: {{.*}}
 ; AVX512DQBW-SLOW: {{.*}}
 ; AVX512F-FAST: {{.*}}
+; AVX512F-ONLY: {{.*}}
 ; AVX512F-ONLY-FAST: {{.*}}
 ; AVX512F-ONLY-SLOW: {{.*}}
 ; AVX512F-SLOW: {{.*}}

diff  --git a/llvm/test/CodeGen/X86/vector-interleaved-load-i8-stride-2.ll b/llvm/test/CodeGen/X86/vector-interleaved-load-i8-stride-2.ll
index 06b95c94ce16d49..88ebda3622cc9b4 100644
--- a/llvm/test/CodeGen/X86/vector-interleaved-load-i8-stride-2.ll
+++ b/llvm/test/CodeGen/X86/vector-interleaved-load-i8-stride-2.ll
@@ -4,14 +4,14 @@
 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx2 | FileCheck %s --check-prefixes=AVX,AVX1,AVX2,AVX2-ONLY,AVX2-SLOW,FALLBACK2
 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx2,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX1,AVX2,AVX2-ONLY,AVX2-FAST,FALLBACK3
 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx2,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX1,AVX2,AVX2-ONLY,AVX2-FAST-PERLANE,FALLBACK4
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512-SLOW,AVX512F-SLOW,AVX512F-ONLY-SLOW,FALLBACK5
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512-FAST,AVX512F-FAST,AVX512F-ONLY-FAST,FALLBACK6
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512-SLOW,AVX512F-SLOW,AVX512DQ-SLOW,FALLBACK7
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512-FAST,AVX512F-FAST,AVX512DQ-FAST,FALLBACK8
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512bw | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512-SLOW,AVX512BW-SLOW,AVX512BW-ONLY-SLOW,FALLBACK9
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512bw,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512-FAST,AVX512BW-FAST,AVX512BW-ONLY-FAST,FALLBACK10
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq,+avx512bw | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512-SLOW,AVX512BW-SLOW,AVX512DQBW-SLOW,FALLBACK11
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq,+avx512bw,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512-FAST,AVX512BW-FAST,AVX512DQBW-FAST,FALLBACK12
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512F-ONLY,AVX512-SLOW,AVX512F-SLOW,AVX512F-ONLY-SLOW,FALLBACK5
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512F-ONLY,AVX512-FAST,AVX512F-FAST,AVX512F-ONLY-FAST,FALLBACK6
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512DQ-ONLY,AVX512-SLOW,AVX512F-SLOW,AVX512DQ-SLOW,FALLBACK7
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512DQ-ONLY,AVX512-FAST,AVX512F-FAST,AVX512DQ-FAST,FALLBACK8
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512bw | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512BW-ONLY,AVX512-SLOW,AVX512BW-SLOW,AVX512BW-ONLY-SLOW,FALLBACK9
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512bw,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512BW-ONLY,AVX512-FAST,AVX512BW-FAST,AVX512BW-ONLY-FAST,FALLBACK10
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq,+avx512bw | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512DQBW-ONLY,AVX512-SLOW,AVX512BW-SLOW,AVX512DQBW-SLOW,FALLBACK11
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq,+avx512bw,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512DQBW-ONLY,AVX512-FAST,AVX512BW-FAST,AVX512DQBW-FAST,FALLBACK12
 
 ; These patterns are produced by LoopVectorizer for interleaved loads.
 
@@ -589,13 +589,17 @@ define void @load_i8_stride2_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1) nou
 ; AVX512-FAST: {{.*}}
 ; AVX512-SLOW: {{.*}}
 ; AVX512BW-FAST: {{.*}}
+; AVX512BW-ONLY: {{.*}}
 ; AVX512BW-ONLY-FAST: {{.*}}
 ; AVX512BW-ONLY-SLOW: {{.*}}
 ; AVX512BW-SLOW: {{.*}}
 ; AVX512DQ-FAST: {{.*}}
+; AVX512DQ-ONLY: {{.*}}
 ; AVX512DQ-SLOW: {{.*}}
 ; AVX512DQBW-FAST: {{.*}}
+; AVX512DQBW-ONLY: {{.*}}
 ; AVX512DQBW-SLOW: {{.*}}
+; AVX512F-ONLY: {{.*}}
 ; AVX512F-ONLY-FAST: {{.*}}
 ; AVX512F-ONLY-SLOW: {{.*}}
 ; FALLBACK0: {{.*}}

diff  --git a/llvm/test/CodeGen/X86/vector-interleaved-load-i8-stride-3.ll b/llvm/test/CodeGen/X86/vector-interleaved-load-i8-stride-3.ll
index 7d7b43a2234c2ba..e847933adf0abcf 100644
--- a/llvm/test/CodeGen/X86/vector-interleaved-load-i8-stride-3.ll
+++ b/llvm/test/CodeGen/X86/vector-interleaved-load-i8-stride-3.ll
@@ -4,14 +4,14 @@
 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx2 | FileCheck %s --check-prefixes=AVX,AVX1,AVX2,AVX2-ONLY,AVX2-SLOW,FALLBACK2
 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx2,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX1,AVX2,AVX2-ONLY,AVX2-FAST,FALLBACK3
 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx2,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX1,AVX2,AVX2-ONLY,AVX2-FAST-PERLANE,FALLBACK4
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512-SLOW,AVX512F-SLOW,AVX512F-ONLY-SLOW,FALLBACK5
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512-FAST,AVX512F-FAST,AVX512F-ONLY-FAST,FALLBACK6
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512-SLOW,AVX512F-SLOW,AVX512DQ-SLOW,FALLBACK7
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512-FAST,AVX512F-FAST,AVX512DQ-FAST,FALLBACK8
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512bw | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512-SLOW,AVX512BW-SLOW,AVX512BW-ONLY-SLOW,FALLBACK9
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512bw,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512-FAST,AVX512BW-FAST,AVX512BW-ONLY-FAST,FALLBACK10
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq,+avx512bw | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512-SLOW,AVX512BW-SLOW,AVX512DQBW-SLOW,FALLBACK11
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq,+avx512bw,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512-FAST,AVX512BW-FAST,AVX512DQBW-FAST,FALLBACK12
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512F-ONLY,AVX512-SLOW,AVX512F-SLOW,AVX512F-ONLY-SLOW,FALLBACK5
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512F-ONLY,AVX512-FAST,AVX512F-FAST,AVX512F-ONLY-FAST,FALLBACK6
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512DQ-ONLY,AVX512-SLOW,AVX512F-SLOW,AVX512DQ-SLOW,FALLBACK7
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512DQ-ONLY,AVX512-FAST,AVX512F-FAST,AVX512DQ-FAST,FALLBACK8
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512bw | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512BW-ONLY,AVX512-SLOW,AVX512BW-SLOW,AVX512BW-ONLY-SLOW,FALLBACK9
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512bw,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512BW-ONLY,AVX512-FAST,AVX512BW-FAST,AVX512BW-ONLY-FAST,FALLBACK10
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq,+avx512bw | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512DQBW-ONLY,AVX512-SLOW,AVX512BW-SLOW,AVX512DQBW-SLOW,FALLBACK11
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq,+avx512bw,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512DQBW-ONLY,AVX512-FAST,AVX512BW-FAST,AVX512DQBW-FAST,FALLBACK12
 
 ; These patterns are produced by LoopVectorizer for interleaved loads.
 
@@ -1467,153 +1467,79 @@ define void @load_i8_stride3_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
 ; AVX512F-NEXT:    vzeroupper
 ; AVX512F-NEXT:    retq
 ;
-; AVX512BW-ONLY-SLOW-LABEL: load_i8_stride3_vf64:
-; AVX512BW-ONLY-SLOW:       # %bb.0:
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa (%rdi), %xmm0
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa 16(%rdi), %xmm1
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa 32(%rdi), %xmm2
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa 96(%rdi), %xmm3
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa 112(%rdi), %xmm4
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa 128(%rdi), %xmm5
-; AVX512BW-ONLY-SLOW-NEXT:    vinserti128 $1, 144(%rdi), %ymm3, %ymm3
-; AVX512BW-ONLY-SLOW-NEXT:    vinserti128 $1, 48(%rdi), %ymm0, %ymm0
-; AVX512BW-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm3, %zmm0, %zmm0
-; AVX512BW-ONLY-SLOW-NEXT:    vinserti128 $1, 160(%rdi), %ymm4, %ymm3
-; AVX512BW-ONLY-SLOW-NEXT:    vinserti128 $1, 64(%rdi), %ymm1, %ymm1
-; AVX512BW-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm3, %zmm1, %zmm1
-; AVX512BW-ONLY-SLOW-NEXT:    vinserti128 $1, 176(%rdi), %ymm5, %ymm3
-; AVX512BW-ONLY-SLOW-NEXT:    vinserti128 $1, 80(%rdi), %ymm2, %ymm2
-; AVX512BW-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm3, %zmm2, %zmm2
-; AVX512BW-ONLY-SLOW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm3 = [0,3,6,9,12,15,2,5,8,11,14,1,4,7,10,13,0,3,6,9,12,15,2,5,8,11,14,1,4,7,10,13,0,3,6,9,12,15,2,5,8,11,14,1,4,7,10,13,0,3,6,9,12,15,2,5,8,11,14,1,4,7,10,13]
-; AVX512BW-ONLY-SLOW-NEXT:    # zmm3 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512BW-ONLY-SLOW-NEXT:    vpshufb %zmm3, %zmm0, %zmm0
-; AVX512BW-ONLY-SLOW-NEXT:    vpshufb %zmm3, %zmm1, %zmm1
-; AVX512BW-ONLY-SLOW-NEXT:    vpshufb %zmm3, %zmm2, %zmm2
-; AVX512BW-ONLY-SLOW-NEXT:    vpalignr {{.*#+}} zmm3 = zmm2[11,12,13,14,15],zmm0[0,1,2,3,4,5,6,7,8,9,10],zmm2[27,28,29,30,31],zmm0[16,17,18,19,20,21,22,23,24,25,26],zmm2[43,44,45,46,47],zmm0[32,33,34,35,36,37,38,39,40,41,42],zmm2[59,60,61,62,63],zmm0[48,49,50,51,52,53,54,55,56,57,58]
-; AVX512BW-ONLY-SLOW-NEXT:    vpalignr {{.*#+}} zmm0 = zmm0[11,12,13,14,15],zmm1[0,1,2,3,4,5,6,7,8,9,10],zmm0[27,28,29,30,31],zmm1[16,17,18,19,20,21,22,23,24,25,26],zmm0[43,44,45,46,47],zmm1[32,33,34,35,36,37,38,39,40,41,42],zmm0[59,60,61,62,63],zmm1[48,49,50,51,52,53,54,55,56,57,58]
-; AVX512BW-ONLY-SLOW-NEXT:    vpalignr {{.*#+}} zmm1 = zmm1[11,12,13,14,15],zmm2[0,1,2,3,4,5,6,7,8,9,10],zmm1[27,28,29,30,31],zmm2[16,17,18,19,20,21,22,23,24,25,26],zmm1[43,44,45,46,47],zmm2[32,33,34,35,36,37,38,39,40,41,42],zmm1[59,60,61,62,63],zmm2[48,49,50,51,52,53,54,55,56,57,58]
-; AVX512BW-ONLY-SLOW-NEXT:    movabsq $-576188069258921984, %rax # imm = 0xF800F800F800F800
-; AVX512BW-ONLY-SLOW-NEXT:    kmovq %rax, %k1
-; AVX512BW-ONLY-SLOW-NEXT:    vpblendmb %zmm1, %zmm0, %zmm2 {%k1}
-; AVX512BW-ONLY-SLOW-NEXT:    vpalignr {{.*#+}} zmm1 = zmm3[11,12,13,14,15],zmm1[0,1,2,3,4,5,6,7,8,9,10],zmm3[27,28,29,30,31],zmm1[16,17,18,19,20,21,22,23,24,25,26],zmm3[43,44,45,46,47],zmm1[32,33,34,35,36,37,38,39,40,41,42],zmm3[59,60,61,62,63],zmm1[48,49,50,51,52,53,54,55,56,57,58]
-; AVX512BW-ONLY-SLOW-NEXT:    vpalignr {{.*#+}} zmm0 = zmm0[11,12,13,14,15],zmm3[0,1,2,3,4,5,6,7,8,9,10],zmm0[27,28,29,30,31],zmm3[16,17,18,19,20,21,22,23,24,25,26],zmm0[43,44,45,46,47],zmm3[32,33,34,35,36,37,38,39,40,41,42],zmm0[59,60,61,62,63],zmm3[48,49,50,51,52,53,54,55,56,57,58]
-; AVX512BW-ONLY-SLOW-NEXT:    vpalignr {{.*#+}} zmm0 = zmm0[10,11,12,13,14,15,0,1,2,3,4,5,6,7,8,9,26,27,28,29,30,31,16,17,18,19,20,21,22,23,24,25,42,43,44,45,46,47,32,33,34,35,36,37,38,39,40,41,58,59,60,61,62,63,48,49,50,51,52,53,54,55,56,57]
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm0, (%rsi)
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm2, (%rdx)
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm1, (%rcx)
-; AVX512BW-ONLY-SLOW-NEXT:    vzeroupper
-; AVX512BW-ONLY-SLOW-NEXT:    retq
+; AVX512BW-ONLY-LABEL: load_i8_stride3_vf64:
+; AVX512BW-ONLY:       # %bb.0:
+; AVX512BW-ONLY-NEXT:    vmovdqa (%rdi), %xmm0
+; AVX512BW-ONLY-NEXT:    vmovdqa 16(%rdi), %xmm1
+; AVX512BW-ONLY-NEXT:    vmovdqa 32(%rdi), %xmm2
+; AVX512BW-ONLY-NEXT:    vmovdqa 96(%rdi), %xmm3
+; AVX512BW-ONLY-NEXT:    vmovdqa 112(%rdi), %xmm4
+; AVX512BW-ONLY-NEXT:    vmovdqa 128(%rdi), %xmm5
+; AVX512BW-ONLY-NEXT:    vinserti128 $1, 144(%rdi), %ymm3, %ymm3
+; AVX512BW-ONLY-NEXT:    vinserti128 $1, 48(%rdi), %ymm0, %ymm0
+; AVX512BW-ONLY-NEXT:    vinserti64x4 $1, %ymm3, %zmm0, %zmm0
+; AVX512BW-ONLY-NEXT:    vinserti128 $1, 160(%rdi), %ymm4, %ymm3
+; AVX512BW-ONLY-NEXT:    vinserti128 $1, 64(%rdi), %ymm1, %ymm1
+; AVX512BW-ONLY-NEXT:    vinserti64x4 $1, %ymm3, %zmm1, %zmm1
+; AVX512BW-ONLY-NEXT:    vinserti128 $1, 176(%rdi), %ymm5, %ymm3
+; AVX512BW-ONLY-NEXT:    vinserti128 $1, 80(%rdi), %ymm2, %ymm2
+; AVX512BW-ONLY-NEXT:    vinserti64x4 $1, %ymm3, %zmm2, %zmm2
+; AVX512BW-ONLY-NEXT:    vbroadcasti32x4 {{.*#+}} zmm3 = [0,3,6,9,12,15,2,5,8,11,14,1,4,7,10,13,0,3,6,9,12,15,2,5,8,11,14,1,4,7,10,13,0,3,6,9,12,15,2,5,8,11,14,1,4,7,10,13,0,3,6,9,12,15,2,5,8,11,14,1,4,7,10,13]
+; AVX512BW-ONLY-NEXT:    # zmm3 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512BW-ONLY-NEXT:    vpshufb %zmm3, %zmm0, %zmm0
+; AVX512BW-ONLY-NEXT:    vpshufb %zmm3, %zmm1, %zmm1
+; AVX512BW-ONLY-NEXT:    vpshufb %zmm3, %zmm2, %zmm2
+; AVX512BW-ONLY-NEXT:    vpalignr {{.*#+}} zmm3 = zmm2[11,12,13,14,15],zmm0[0,1,2,3,4,5,6,7,8,9,10],zmm2[27,28,29,30,31],zmm0[16,17,18,19,20,21,22,23,24,25,26],zmm2[43,44,45,46,47],zmm0[32,33,34,35,36,37,38,39,40,41,42],zmm2[59,60,61,62,63],zmm0[48,49,50,51,52,53,54,55,56,57,58]
+; AVX512BW-ONLY-NEXT:    vpalignr {{.*#+}} zmm0 = zmm0[11,12,13,14,15],zmm1[0,1,2,3,4,5,6,7,8,9,10],zmm0[27,28,29,30,31],zmm1[16,17,18,19,20,21,22,23,24,25,26],zmm0[43,44,45,46,47],zmm1[32,33,34,35,36,37,38,39,40,41,42],zmm0[59,60,61,62,63],zmm1[48,49,50,51,52,53,54,55,56,57,58]
+; AVX512BW-ONLY-NEXT:    vpalignr {{.*#+}} zmm1 = zmm1[11,12,13,14,15],zmm2[0,1,2,3,4,5,6,7,8,9,10],zmm1[27,28,29,30,31],zmm2[16,17,18,19,20,21,22,23,24,25,26],zmm1[43,44,45,46,47],zmm2[32,33,34,35,36,37,38,39,40,41,42],zmm1[59,60,61,62,63],zmm2[48,49,50,51,52,53,54,55,56,57,58]
+; AVX512BW-ONLY-NEXT:    movabsq $-576188069258921984, %rax # imm = 0xF800F800F800F800
+; AVX512BW-ONLY-NEXT:    kmovq %rax, %k1
+; AVX512BW-ONLY-NEXT:    vpblendmb %zmm1, %zmm0, %zmm2 {%k1}
+; AVX512BW-ONLY-NEXT:    vpalignr {{.*#+}} zmm1 = zmm3[11,12,13,14,15],zmm1[0,1,2,3,4,5,6,7,8,9,10],zmm3[27,28,29,30,31],zmm1[16,17,18,19,20,21,22,23,24,25,26],zmm3[43,44,45,46,47],zmm1[32,33,34,35,36,37,38,39,40,41,42],zmm3[59,60,61,62,63],zmm1[48,49,50,51,52,53,54,55,56,57,58]
+; AVX512BW-ONLY-NEXT:    vpalignr {{.*#+}} zmm0 = zmm0[11,12,13,14,15],zmm3[0,1,2,3,4,5,6,7,8,9,10],zmm0[27,28,29,30,31],zmm3[16,17,18,19,20,21,22,23,24,25,26],zmm0[43,44,45,46,47],zmm3[32,33,34,35,36,37,38,39,40,41,42],zmm0[59,60,61,62,63],zmm3[48,49,50,51,52,53,54,55,56,57,58]
+; AVX512BW-ONLY-NEXT:    vpalignr {{.*#+}} zmm0 = zmm0[10,11,12,13,14,15,0,1,2,3,4,5,6,7,8,9,26,27,28,29,30,31,16,17,18,19,20,21,22,23,24,25,42,43,44,45,46,47,32,33,34,35,36,37,38,39,40,41,58,59,60,61,62,63,48,49,50,51,52,53,54,55,56,57]
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm0, (%rsi)
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm2, (%rdx)
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm1, (%rcx)
+; AVX512BW-ONLY-NEXT:    vzeroupper
+; AVX512BW-ONLY-NEXT:    retq
 ;
-; AVX512BW-ONLY-FAST-LABEL: load_i8_stride3_vf64:
-; AVX512BW-ONLY-FAST:       # %bb.0:
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa (%rdi), %xmm0
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa 16(%rdi), %xmm1
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa 32(%rdi), %xmm2
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa 96(%rdi), %xmm3
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa 112(%rdi), %xmm4
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa 128(%rdi), %xmm5
-; AVX512BW-ONLY-FAST-NEXT:    vinserti128 $1, 144(%rdi), %ymm3, %ymm3
-; AVX512BW-ONLY-FAST-NEXT:    vinserti128 $1, 48(%rdi), %ymm0, %ymm0
-; AVX512BW-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm3, %zmm0, %zmm0
-; AVX512BW-ONLY-FAST-NEXT:    vinserti128 $1, 160(%rdi), %ymm4, %ymm3
-; AVX512BW-ONLY-FAST-NEXT:    vinserti128 $1, 64(%rdi), %ymm1, %ymm1
-; AVX512BW-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm3, %zmm1, %zmm1
-; AVX512BW-ONLY-FAST-NEXT:    vinserti128 $1, 176(%rdi), %ymm5, %ymm3
-; AVX512BW-ONLY-FAST-NEXT:    vinserti128 $1, 80(%rdi), %ymm2, %ymm2
-; AVX512BW-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm3, %zmm2, %zmm2
-; AVX512BW-ONLY-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} zmm3 = [0,3,6,9,12,15,2,5,8,11,14,1,4,7,10,13,0,3,6,9,12,15,2,5,8,11,14,1,4,7,10,13,0,3,6,9,12,15,2,5,8,11,14,1,4,7,10,13,0,3,6,9,12,15,2,5,8,11,14,1,4,7,10,13]
-; AVX512BW-ONLY-FAST-NEXT:    # zmm3 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512BW-ONLY-FAST-NEXT:    vpshufb %zmm3, %zmm0, %zmm0
-; AVX512BW-ONLY-FAST-NEXT:    vpshufb %zmm3, %zmm1, %zmm1
-; AVX512BW-ONLY-FAST-NEXT:    vpshufb %zmm3, %zmm2, %zmm2
-; AVX512BW-ONLY-FAST-NEXT:    vpalignr {{.*#+}} zmm3 = zmm2[11,12,13,14,15],zmm0[0,1,2,3,4,5,6,7,8,9,10],zmm2[27,28,29,30,31],zmm0[16,17,18,19,20,21,22,23,24,25,26],zmm2[43,44,45,46,47],zmm0[32,33,34,35,36,37,38,39,40,41,42],zmm2[59,60,61,62,63],zmm0[48,49,50,51,52,53,54,55,56,57,58]
-; AVX512BW-ONLY-FAST-NEXT:    vpalignr {{.*#+}} zmm0 = zmm0[11,12,13,14,15],zmm1[0,1,2,3,4,5,6,7,8,9,10],zmm0[27,28,29,30,31],zmm1[16,17,18,19,20,21,22,23,24,25,26],zmm0[43,44,45,46,47],zmm1[32,33,34,35,36,37,38,39,40,41,42],zmm0[59,60,61,62,63],zmm1[48,49,50,51,52,53,54,55,56,57,58]
-; AVX512BW-ONLY-FAST-NEXT:    vpalignr {{.*#+}} zmm1 = zmm1[11,12,13,14,15],zmm2[0,1,2,3,4,5,6,7,8,9,10],zmm1[27,28,29,30,31],zmm2[16,17,18,19,20,21,22,23,24,25,26],zmm1[43,44,45,46,47],zmm2[32,33,34,35,36,37,38,39,40,41,42],zmm1[59,60,61,62,63],zmm2[48,49,50,51,52,53,54,55,56,57,58]
-; AVX512BW-ONLY-FAST-NEXT:    movabsq $-576188069258921984, %rax # imm = 0xF800F800F800F800
-; AVX512BW-ONLY-FAST-NEXT:    kmovq %rax, %k1
-; AVX512BW-ONLY-FAST-NEXT:    vpblendmb %zmm1, %zmm0, %zmm2 {%k1}
-; AVX512BW-ONLY-FAST-NEXT:    vpalignr {{.*#+}} zmm1 = zmm3[11,12,13,14,15],zmm1[0,1,2,3,4,5,6,7,8,9,10],zmm3[27,28,29,30,31],zmm1[16,17,18,19,20,21,22,23,24,25,26],zmm3[43,44,45,46,47],zmm1[32,33,34,35,36,37,38,39,40,41,42],zmm3[59,60,61,62,63],zmm1[48,49,50,51,52,53,54,55,56,57,58]
-; AVX512BW-ONLY-FAST-NEXT:    vpalignr {{.*#+}} zmm0 = zmm0[11,12,13,14,15],zmm3[0,1,2,3,4,5,6,7,8,9,10],zmm0[27,28,29,30,31],zmm3[16,17,18,19,20,21,22,23,24,25,26],zmm0[43,44,45,46,47],zmm3[32,33,34,35,36,37,38,39,40,41,42],zmm0[59,60,61,62,63],zmm3[48,49,50,51,52,53,54,55,56,57,58]
-; AVX512BW-ONLY-FAST-NEXT:    vpalignr {{.*#+}} zmm0 = zmm0[10,11,12,13,14,15,0,1,2,3,4,5,6,7,8,9,26,27,28,29,30,31,16,17,18,19,20,21,22,23,24,25,42,43,44,45,46,47,32,33,34,35,36,37,38,39,40,41,58,59,60,61,62,63,48,49,50,51,52,53,54,55,56,57]
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm0, (%rsi)
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm2, (%rdx)
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm1, (%rcx)
-; AVX512BW-ONLY-FAST-NEXT:    vzeroupper
-; AVX512BW-ONLY-FAST-NEXT:    retq
-;
-; AVX512DQBW-SLOW-LABEL: load_i8_stride3_vf64:
-; AVX512DQBW-SLOW:       # %bb.0:
-; AVX512DQBW-SLOW-NEXT:    vmovdqa (%rdi), %xmm0
-; AVX512DQBW-SLOW-NEXT:    vmovdqa 16(%rdi), %xmm1
-; AVX512DQBW-SLOW-NEXT:    vmovdqa 32(%rdi), %xmm2
-; AVX512DQBW-SLOW-NEXT:    vmovdqa 96(%rdi), %xmm3
-; AVX512DQBW-SLOW-NEXT:    vmovdqa 112(%rdi), %xmm4
-; AVX512DQBW-SLOW-NEXT:    vmovdqa 128(%rdi), %xmm5
-; AVX512DQBW-SLOW-NEXT:    vinserti128 $1, 144(%rdi), %ymm3, %ymm3
-; AVX512DQBW-SLOW-NEXT:    vinserti128 $1, 48(%rdi), %ymm0, %ymm0
-; AVX512DQBW-SLOW-NEXT:    vinserti64x4 $1, %ymm3, %zmm0, %zmm0
-; AVX512DQBW-SLOW-NEXT:    vinserti128 $1, 160(%rdi), %ymm4, %ymm3
-; AVX512DQBW-SLOW-NEXT:    vinserti128 $1, 64(%rdi), %ymm1, %ymm1
-; AVX512DQBW-SLOW-NEXT:    vinserti64x4 $1, %ymm3, %zmm1, %zmm1
-; AVX512DQBW-SLOW-NEXT:    vinserti128 $1, 176(%rdi), %ymm5, %ymm3
-; AVX512DQBW-SLOW-NEXT:    vinserti128 $1, 80(%rdi), %ymm2, %ymm2
-; AVX512DQBW-SLOW-NEXT:    vinserti64x4 $1, %ymm3, %zmm2, %zmm2
-; AVX512DQBW-SLOW-NEXT:    vbroadcasti64x2 {{.*#+}} zmm3 = [0,3,6,9,12,15,2,5,8,11,14,1,4,7,10,13,0,3,6,9,12,15,2,5,8,11,14,1,4,7,10,13,0,3,6,9,12,15,2,5,8,11,14,1,4,7,10,13,0,3,6,9,12,15,2,5,8,11,14,1,4,7,10,13]
-; AVX512DQBW-SLOW-NEXT:    # zmm3 = mem[0,1,0,1,0,1,0,1]
-; AVX512DQBW-SLOW-NEXT:    vpshufb %zmm3, %zmm0, %zmm0
-; AVX512DQBW-SLOW-NEXT:    vpshufb %zmm3, %zmm1, %zmm1
-; AVX512DQBW-SLOW-NEXT:    vpshufb %zmm3, %zmm2, %zmm2
-; AVX512DQBW-SLOW-NEXT:    vpalignr {{.*#+}} zmm3 = zmm2[11,12,13,14,15],zmm0[0,1,2,3,4,5,6,7,8,9,10],zmm2[27,28,29,30,31],zmm0[16,17,18,19,20,21,22,23,24,25,26],zmm2[43,44,45,46,47],zmm0[32,33,34,35,36,37,38,39,40,41,42],zmm2[59,60,61,62,63],zmm0[48,49,50,51,52,53,54,55,56,57,58]
-; AVX512DQBW-SLOW-NEXT:    vpalignr {{.*#+}} zmm0 = zmm0[11,12,13,14,15],zmm1[0,1,2,3,4,5,6,7,8,9,10],zmm0[27,28,29,30,31],zmm1[16,17,18,19,20,21,22,23,24,25,26],zmm0[43,44,45,46,47],zmm1[32,33,34,35,36,37,38,39,40,41,42],zmm0[59,60,61,62,63],zmm1[48,49,50,51,52,53,54,55,56,57,58]
-; AVX512DQBW-SLOW-NEXT:    vpalignr {{.*#+}} zmm1 = zmm1[11,12,13,14,15],zmm2[0,1,2,3,4,5,6,7,8,9,10],zmm1[27,28,29,30,31],zmm2[16,17,18,19,20,21,22,23,24,25,26],zmm1[43,44,45,46,47],zmm2[32,33,34,35,36,37,38,39,40,41,42],zmm1[59,60,61,62,63],zmm2[48,49,50,51,52,53,54,55,56,57,58]
-; AVX512DQBW-SLOW-NEXT:    movabsq $-576188069258921984, %rax # imm = 0xF800F800F800F800
-; AVX512DQBW-SLOW-NEXT:    kmovq %rax, %k1
-; AVX512DQBW-SLOW-NEXT:    vpblendmb %zmm1, %zmm0, %zmm2 {%k1}
-; AVX512DQBW-SLOW-NEXT:    vpalignr {{.*#+}} zmm1 = zmm3[11,12,13,14,15],zmm1[0,1,2,3,4,5,6,7,8,9,10],zmm3[27,28,29,30,31],zmm1[16,17,18,19,20,21,22,23,24,25,26],zmm3[43,44,45,46,47],zmm1[32,33,34,35,36,37,38,39,40,41,42],zmm3[59,60,61,62,63],zmm1[48,49,50,51,52,53,54,55,56,57,58]
-; AVX512DQBW-SLOW-NEXT:    vpalignr {{.*#+}} zmm0 = zmm0[11,12,13,14,15],zmm3[0,1,2,3,4,5,6,7,8,9,10],zmm0[27,28,29,30,31],zmm3[16,17,18,19,20,21,22,23,24,25,26],zmm0[43,44,45,46,47],zmm3[32,33,34,35,36,37,38,39,40,41,42],zmm0[59,60,61,62,63],zmm3[48,49,50,51,52,53,54,55,56,57,58]
-; AVX512DQBW-SLOW-NEXT:    vpalignr {{.*#+}} zmm0 = zmm0[10,11,12,13,14,15,0,1,2,3,4,5,6,7,8,9,26,27,28,29,30,31,16,17,18,19,20,21,22,23,24,25,42,43,44,45,46,47,32,33,34,35,36,37,38,39,40,41,58,59,60,61,62,63,48,49,50,51,52,53,54,55,56,57]
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm0, (%rsi)
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm2, (%rdx)
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm1, (%rcx)
-; AVX512DQBW-SLOW-NEXT:    vzeroupper
-; AVX512DQBW-SLOW-NEXT:    retq
-;
-; AVX512DQBW-FAST-LABEL: load_i8_stride3_vf64:
-; AVX512DQBW-FAST:       # %bb.0:
-; AVX512DQBW-FAST-NEXT:    vmovdqa (%rdi), %xmm0
-; AVX512DQBW-FAST-NEXT:    vmovdqa 16(%rdi), %xmm1
-; AVX512DQBW-FAST-NEXT:    vmovdqa 32(%rdi), %xmm2
-; AVX512DQBW-FAST-NEXT:    vmovdqa 96(%rdi), %xmm3
-; AVX512DQBW-FAST-NEXT:    vmovdqa 112(%rdi), %xmm4
-; AVX512DQBW-FAST-NEXT:    vmovdqa 128(%rdi), %xmm5
-; AVX512DQBW-FAST-NEXT:    vinserti128 $1, 144(%rdi), %ymm3, %ymm3
-; AVX512DQBW-FAST-NEXT:    vinserti128 $1, 48(%rdi), %ymm0, %ymm0
-; AVX512DQBW-FAST-NEXT:    vinserti64x4 $1, %ymm3, %zmm0, %zmm0
-; AVX512DQBW-FAST-NEXT:    vinserti128 $1, 160(%rdi), %ymm4, %ymm3
-; AVX512DQBW-FAST-NEXT:    vinserti128 $1, 64(%rdi), %ymm1, %ymm1
-; AVX512DQBW-FAST-NEXT:    vinserti64x4 $1, %ymm3, %zmm1, %zmm1
-; AVX512DQBW-FAST-NEXT:    vinserti128 $1, 176(%rdi), %ymm5, %ymm3
-; AVX512DQBW-FAST-NEXT:    vinserti128 $1, 80(%rdi), %ymm2, %ymm2
-; AVX512DQBW-FAST-NEXT:    vinserti64x4 $1, %ymm3, %zmm2, %zmm2
-; AVX512DQBW-FAST-NEXT:    vbroadcasti64x2 {{.*#+}} zmm3 = [0,3,6,9,12,15,2,5,8,11,14,1,4,7,10,13,0,3,6,9,12,15,2,5,8,11,14,1,4,7,10,13,0,3,6,9,12,15,2,5,8,11,14,1,4,7,10,13,0,3,6,9,12,15,2,5,8,11,14,1,4,7,10,13]
-; AVX512DQBW-FAST-NEXT:    # zmm3 = mem[0,1,0,1,0,1,0,1]
-; AVX512DQBW-FAST-NEXT:    vpshufb %zmm3, %zmm0, %zmm0
-; AVX512DQBW-FAST-NEXT:    vpshufb %zmm3, %zmm1, %zmm1
-; AVX512DQBW-FAST-NEXT:    vpshufb %zmm3, %zmm2, %zmm2
-; AVX512DQBW-FAST-NEXT:    vpalignr {{.*#+}} zmm3 = zmm2[11,12,13,14,15],zmm0[0,1,2,3,4,5,6,7,8,9,10],zmm2[27,28,29,30,31],zmm0[16,17,18,19,20,21,22,23,24,25,26],zmm2[43,44,45,46,47],zmm0[32,33,34,35,36,37,38,39,40,41,42],zmm2[59,60,61,62,63],zmm0[48,49,50,51,52,53,54,55,56,57,58]
-; AVX512DQBW-FAST-NEXT:    vpalignr {{.*#+}} zmm0 = zmm0[11,12,13,14,15],zmm1[0,1,2,3,4,5,6,7,8,9,10],zmm0[27,28,29,30,31],zmm1[16,17,18,19,20,21,22,23,24,25,26],zmm0[43,44,45,46,47],zmm1[32,33,34,35,36,37,38,39,40,41,42],zmm0[59,60,61,62,63],zmm1[48,49,50,51,52,53,54,55,56,57,58]
-; AVX512DQBW-FAST-NEXT:    vpalignr {{.*#+}} zmm1 = zmm1[11,12,13,14,15],zmm2[0,1,2,3,4,5,6,7,8,9,10],zmm1[27,28,29,30,31],zmm2[16,17,18,19,20,21,22,23,24,25,26],zmm1[43,44,45,46,47],zmm2[32,33,34,35,36,37,38,39,40,41,42],zmm1[59,60,61,62,63],zmm2[48,49,50,51,52,53,54,55,56,57,58]
-; AVX512DQBW-FAST-NEXT:    movabsq $-576188069258921984, %rax # imm = 0xF800F800F800F800
-; AVX512DQBW-FAST-NEXT:    kmovq %rax, %k1
-; AVX512DQBW-FAST-NEXT:    vpblendmb %zmm1, %zmm0, %zmm2 {%k1}
-; AVX512DQBW-FAST-NEXT:    vpalignr {{.*#+}} zmm1 = zmm3[11,12,13,14,15],zmm1[0,1,2,3,4,5,6,7,8,9,10],zmm3[27,28,29,30,31],zmm1[16,17,18,19,20,21,22,23,24,25,26],zmm3[43,44,45,46,47],zmm1[32,33,34,35,36,37,38,39,40,41,42],zmm3[59,60,61,62,63],zmm1[48,49,50,51,52,53,54,55,56,57,58]
-; AVX512DQBW-FAST-NEXT:    vpalignr {{.*#+}} zmm0 = zmm0[11,12,13,14,15],zmm3[0,1,2,3,4,5,6,7,8,9,10],zmm0[27,28,29,30,31],zmm3[16,17,18,19,20,21,22,23,24,25,26],zmm0[43,44,45,46,47],zmm3[32,33,34,35,36,37,38,39,40,41,42],zmm0[59,60,61,62,63],zmm3[48,49,50,51,52,53,54,55,56,57,58]
-; AVX512DQBW-FAST-NEXT:    vpalignr {{.*#+}} zmm0 = zmm0[10,11,12,13,14,15,0,1,2,3,4,5,6,7,8,9,26,27,28,29,30,31,16,17,18,19,20,21,22,23,24,25,42,43,44,45,46,47,32,33,34,35,36,37,38,39,40,41,58,59,60,61,62,63,48,49,50,51,52,53,54,55,56,57]
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm0, (%rsi)
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm2, (%rdx)
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm1, (%rcx)
-; AVX512DQBW-FAST-NEXT:    vzeroupper
-; AVX512DQBW-FAST-NEXT:    retq
+; AVX512DQBW-ONLY-LABEL: load_i8_stride3_vf64:
+; AVX512DQBW-ONLY:       # %bb.0:
+; AVX512DQBW-ONLY-NEXT:    vmovdqa (%rdi), %xmm0
+; AVX512DQBW-ONLY-NEXT:    vmovdqa 16(%rdi), %xmm1
+; AVX512DQBW-ONLY-NEXT:    vmovdqa 32(%rdi), %xmm2
+; AVX512DQBW-ONLY-NEXT:    vmovdqa 96(%rdi), %xmm3
+; AVX512DQBW-ONLY-NEXT:    vmovdqa 112(%rdi), %xmm4
+; AVX512DQBW-ONLY-NEXT:    vmovdqa 128(%rdi), %xmm5
+; AVX512DQBW-ONLY-NEXT:    vinserti128 $1, 144(%rdi), %ymm3, %ymm3
+; AVX512DQBW-ONLY-NEXT:    vinserti128 $1, 48(%rdi), %ymm0, %ymm0
+; AVX512DQBW-ONLY-NEXT:    vinserti64x4 $1, %ymm3, %zmm0, %zmm0
+; AVX512DQBW-ONLY-NEXT:    vinserti128 $1, 160(%rdi), %ymm4, %ymm3
+; AVX512DQBW-ONLY-NEXT:    vinserti128 $1, 64(%rdi), %ymm1, %ymm1
+; AVX512DQBW-ONLY-NEXT:    vinserti64x4 $1, %ymm3, %zmm1, %zmm1
+; AVX512DQBW-ONLY-NEXT:    vinserti128 $1, 176(%rdi), %ymm5, %ymm3
+; AVX512DQBW-ONLY-NEXT:    vinserti128 $1, 80(%rdi), %ymm2, %ymm2
+; AVX512DQBW-ONLY-NEXT:    vinserti64x4 $1, %ymm3, %zmm2, %zmm2
+; AVX512DQBW-ONLY-NEXT:    vbroadcasti64x2 {{.*#+}} zmm3 = [0,3,6,9,12,15,2,5,8,11,14,1,4,7,10,13,0,3,6,9,12,15,2,5,8,11,14,1,4,7,10,13,0,3,6,9,12,15,2,5,8,11,14,1,4,7,10,13,0,3,6,9,12,15,2,5,8,11,14,1,4,7,10,13]
+; AVX512DQBW-ONLY-NEXT:    # zmm3 = mem[0,1,0,1,0,1,0,1]
+; AVX512DQBW-ONLY-NEXT:    vpshufb %zmm3, %zmm0, %zmm0
+; AVX512DQBW-ONLY-NEXT:    vpshufb %zmm3, %zmm1, %zmm1
+; AVX512DQBW-ONLY-NEXT:    vpshufb %zmm3, %zmm2, %zmm2
+; AVX512DQBW-ONLY-NEXT:    vpalignr {{.*#+}} zmm3 = zmm2[11,12,13,14,15],zmm0[0,1,2,3,4,5,6,7,8,9,10],zmm2[27,28,29,30,31],zmm0[16,17,18,19,20,21,22,23,24,25,26],zmm2[43,44,45,46,47],zmm0[32,33,34,35,36,37,38,39,40,41,42],zmm2[59,60,61,62,63],zmm0[48,49,50,51,52,53,54,55,56,57,58]
+; AVX512DQBW-ONLY-NEXT:    vpalignr {{.*#+}} zmm0 = zmm0[11,12,13,14,15],zmm1[0,1,2,3,4,5,6,7,8,9,10],zmm0[27,28,29,30,31],zmm1[16,17,18,19,20,21,22,23,24,25,26],zmm0[43,44,45,46,47],zmm1[32,33,34,35,36,37,38,39,40,41,42],zmm0[59,60,61,62,63],zmm1[48,49,50,51,52,53,54,55,56,57,58]
+; AVX512DQBW-ONLY-NEXT:    vpalignr {{.*#+}} zmm1 = zmm1[11,12,13,14,15],zmm2[0,1,2,3,4,5,6,7,8,9,10],zmm1[27,28,29,30,31],zmm2[16,17,18,19,20,21,22,23,24,25,26],zmm1[43,44,45,46,47],zmm2[32,33,34,35,36,37,38,39,40,41,42],zmm1[59,60,61,62,63],zmm2[48,49,50,51,52,53,54,55,56,57,58]
+; AVX512DQBW-ONLY-NEXT:    movabsq $-576188069258921984, %rax # imm = 0xF800F800F800F800
+; AVX512DQBW-ONLY-NEXT:    kmovq %rax, %k1
+; AVX512DQBW-ONLY-NEXT:    vpblendmb %zmm1, %zmm0, %zmm2 {%k1}
+; AVX512DQBW-ONLY-NEXT:    vpalignr {{.*#+}} zmm1 = zmm3[11,12,13,14,15],zmm1[0,1,2,3,4,5,6,7,8,9,10],zmm3[27,28,29,30,31],zmm1[16,17,18,19,20,21,22,23,24,25,26],zmm3[43,44,45,46,47],zmm1[32,33,34,35,36,37,38,39,40,41,42],zmm3[59,60,61,62,63],zmm1[48,49,50,51,52,53,54,55,56,57,58]
+; AVX512DQBW-ONLY-NEXT:    vpalignr {{.*#+}} zmm0 = zmm0[11,12,13,14,15],zmm3[0,1,2,3,4,5,6,7,8,9,10],zmm0[27,28,29,30,31],zmm3[16,17,18,19,20,21,22,23,24,25,26],zmm0[43,44,45,46,47],zmm3[32,33,34,35,36,37,38,39,40,41,42],zmm0[59,60,61,62,63],zmm3[48,49,50,51,52,53,54,55,56,57,58]
+; AVX512DQBW-ONLY-NEXT:    vpalignr {{.*#+}} zmm0 = zmm0[10,11,12,13,14,15,0,1,2,3,4,5,6,7,8,9,26,27,28,29,30,31,16,17,18,19,20,21,22,23,24,25,42,43,44,45,46,47,32,33,34,35,36,37,38,39,40,41,58,59,60,61,62,63,48,49,50,51,52,53,54,55,56,57]
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm0, (%rsi)
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm2, (%rdx)
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm1, (%rcx)
+; AVX512DQBW-ONLY-NEXT:    vzeroupper
+; AVX512DQBW-ONLY-NEXT:    retq
   %wide.vec = load <192 x i8>, ptr %in.vec, align 64
   %strided.vec0 = shufflevector <192 x i8> %wide.vec, <192 x i8> poison, <64 x i32> <i32 0, i32 3, i32 6, i32 9, i32 12, i32 15, i32 18, i32 21, i32 24, i32 27, i32 30, i32 33, i32 36, i32 39, i32 42, i32 45, i32 48, i32 51, i32 54, i32 57, i32 60, i32 63, i32 66, i32 69, i32 72, i32 75, i32 78, i32 81, i32 84, i32 87, i32 90, i32 93, i32 96, i32 99, i32 102, i32 105, i32 108, i32 111, i32 114, i32 117, i32 120, i32 123, i32 126, i32 129, i32 132, i32 135, i32 138, i32 141, i32 144, i32 147, i32 150, i32 153, i32 156, i32 159, i32 162, i32 165, i32 168, i32 171, i32 174, i32 177, i32 180, i32 183, i32 186, i32 189>
   %strided.vec1 = shufflevector <192 x i8> %wide.vec, <192 x i8> poison, <64 x i32> <i32 1, i32 4, i32 7, i32 10, i32 13, i32 16, i32 19, i32 22, i32 25, i32 28, i32 31, i32 34, i32 37, i32 40, i32 43, i32 46, i32 49, i32 52, i32 55, i32 58, i32 61, i32 64, i32 67, i32 70, i32 73, i32 76, i32 79, i32 82, i32 85, i32 88, i32 91, i32 94, i32 97, i32 100, i32 103, i32 106, i32 109, i32 112, i32 115, i32 118, i32 121, i32 124, i32 127, i32 130, i32 133, i32 136, i32 139, i32 142, i32 145, i32 148, i32 151, i32 154, i32 157, i32 160, i32 163, i32 166, i32 169, i32 172, i32 175, i32 178, i32 181, i32 184, i32 187, i32 190>
@@ -1632,10 +1558,16 @@ define void @load_i8_stride3_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
 ; AVX512-FAST: {{.*}}
 ; AVX512-SLOW: {{.*}}
 ; AVX512BW-FAST: {{.*}}
+; AVX512BW-ONLY-FAST: {{.*}}
+; AVX512BW-ONLY-SLOW: {{.*}}
 ; AVX512BW-SLOW: {{.*}}
 ; AVX512DQ-FAST: {{.*}}
+; AVX512DQ-ONLY: {{.*}}
 ; AVX512DQ-SLOW: {{.*}}
+; AVX512DQBW-FAST: {{.*}}
+; AVX512DQBW-SLOW: {{.*}}
 ; AVX512F-FAST: {{.*}}
+; AVX512F-ONLY: {{.*}}
 ; AVX512F-ONLY-FAST: {{.*}}
 ; AVX512F-ONLY-SLOW: {{.*}}
 ; AVX512F-SLOW: {{.*}}

diff  --git a/llvm/test/CodeGen/X86/vector-interleaved-load-i8-stride-4.ll b/llvm/test/CodeGen/X86/vector-interleaved-load-i8-stride-4.ll
index 2d66e4d68dbfb93..c9cbb0994810f32 100644
--- a/llvm/test/CodeGen/X86/vector-interleaved-load-i8-stride-4.ll
+++ b/llvm/test/CodeGen/X86/vector-interleaved-load-i8-stride-4.ll
@@ -4,14 +4,14 @@
 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx2 | FileCheck %s --check-prefixes=AVX,AVX1,AVX2,AVX2-ONLY,AVX2-SLOW,FALLBACK2
 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx2,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX1,AVX2,AVX2-ONLY,AVX2-FAST,FALLBACK3
 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx2,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX1,AVX2,AVX2-ONLY,AVX2-FAST-PERLANE,FALLBACK4
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512-SLOW,AVX512F-SLOW,AVX512F-ONLY-SLOW,FALLBACK5
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512-FAST,AVX512F-FAST,AVX512F-ONLY-FAST,FALLBACK6
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512-SLOW,AVX512F-SLOW,AVX512DQ-SLOW,FALLBACK7
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512-FAST,AVX512F-FAST,AVX512DQ-FAST,FALLBACK8
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512bw | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512-SLOW,AVX512BW-SLOW,AVX512BW-ONLY-SLOW,FALLBACK9
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512bw,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512-FAST,AVX512BW-FAST,AVX512BW-ONLY-FAST,FALLBACK10
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq,+avx512bw | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512-SLOW,AVX512BW-SLOW,AVX512DQBW-SLOW,FALLBACK11
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq,+avx512bw,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512-FAST,AVX512BW-FAST,AVX512DQBW-FAST,FALLBACK12
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512F-ONLY,AVX512-SLOW,AVX512F-SLOW,AVX512F-ONLY-SLOW,FALLBACK5
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512F-ONLY,AVX512-FAST,AVX512F-FAST,AVX512F-ONLY-FAST,FALLBACK6
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512DQ-ONLY,AVX512-SLOW,AVX512F-SLOW,AVX512DQ-SLOW,FALLBACK7
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512DQ-ONLY,AVX512-FAST,AVX512F-FAST,AVX512DQ-FAST,FALLBACK8
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512bw | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512BW-ONLY,AVX512-SLOW,AVX512BW-SLOW,AVX512BW-ONLY-SLOW,FALLBACK9
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512bw,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512BW-ONLY,AVX512-FAST,AVX512BW-FAST,AVX512BW-ONLY-FAST,FALLBACK10
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq,+avx512bw | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512DQBW-ONLY,AVX512-SLOW,AVX512BW-SLOW,AVX512DQBW-SLOW,FALLBACK11
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq,+avx512bw,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512DQBW-ONLY,AVX512-FAST,AVX512BW-FAST,AVX512DQBW-FAST,FALLBACK12
 
 ; These patterns are produced by LoopVectorizer for interleaved loads.
 
@@ -2082,14 +2082,18 @@ define void @load_i8_stride4_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
 ; AVX512-FAST: {{.*}}
 ; AVX512-SLOW: {{.*}}
 ; AVX512BW-FAST: {{.*}}
+; AVX512BW-ONLY: {{.*}}
 ; AVX512BW-ONLY-FAST: {{.*}}
 ; AVX512BW-ONLY-SLOW: {{.*}}
 ; AVX512BW-SLOW: {{.*}}
 ; AVX512DQ-FAST: {{.*}}
+; AVX512DQ-ONLY: {{.*}}
 ; AVX512DQ-SLOW: {{.*}}
 ; AVX512DQBW-FAST: {{.*}}
+; AVX512DQBW-ONLY: {{.*}}
 ; AVX512DQBW-SLOW: {{.*}}
 ; AVX512F-FAST: {{.*}}
+; AVX512F-ONLY: {{.*}}
 ; AVX512F-ONLY-FAST: {{.*}}
 ; AVX512F-ONLY-SLOW: {{.*}}
 ; AVX512F-SLOW: {{.*}}

diff  --git a/llvm/test/CodeGen/X86/vector-interleaved-load-i8-stride-5.ll b/llvm/test/CodeGen/X86/vector-interleaved-load-i8-stride-5.ll
index f0b7d41eb299598..7947dd0ff373c8d 100644
--- a/llvm/test/CodeGen/X86/vector-interleaved-load-i8-stride-5.ll
+++ b/llvm/test/CodeGen/X86/vector-interleaved-load-i8-stride-5.ll
@@ -4,14 +4,14 @@
 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx2 | FileCheck %s --check-prefixes=AVX,AVX1,AVX2,AVX2-ONLY,AVX2-SLOW,FALLBACK2
 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx2,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX1,AVX2,AVX2-ONLY,AVX2-FAST,FALLBACK3
 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx2,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX1,AVX2,AVX2-ONLY,AVX2-FAST-PERLANE,FALLBACK4
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512-SLOW,AVX512F-SLOW,AVX512F-ONLY-SLOW,FALLBACK5
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512-FAST,AVX512F-FAST,AVX512F-ONLY-FAST,FALLBACK6
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512-SLOW,AVX512F-SLOW,AVX512DQ-SLOW,FALLBACK7
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512-FAST,AVX512F-FAST,AVX512DQ-FAST,FALLBACK8
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512bw | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512-SLOW,AVX512BW-SLOW,AVX512BW-ONLY-SLOW,FALLBACK9
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512bw,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512-FAST,AVX512BW-FAST,AVX512BW-ONLY-FAST,FALLBACK10
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq,+avx512bw | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512-SLOW,AVX512BW-SLOW,AVX512DQBW-SLOW,FALLBACK11
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq,+avx512bw,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512-FAST,AVX512BW-FAST,AVX512DQBW-FAST,FALLBACK12
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512F-ONLY,AVX512-SLOW,AVX512F-SLOW,AVX512F-ONLY-SLOW,FALLBACK5
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512F-ONLY,AVX512-FAST,AVX512F-FAST,AVX512F-ONLY-FAST,FALLBACK6
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512DQ-ONLY,AVX512-SLOW,AVX512F-SLOW,AVX512DQ-SLOW,FALLBACK7
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512DQ-ONLY,AVX512-FAST,AVX512F-FAST,AVX512DQ-FAST,FALLBACK8
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512bw | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512BW-ONLY,AVX512-SLOW,AVX512BW-SLOW,AVX512BW-ONLY-SLOW,FALLBACK9
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512bw,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512BW-ONLY,AVX512-FAST,AVX512BW-FAST,AVX512BW-ONLY-FAST,FALLBACK10
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq,+avx512bw | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512DQBW-ONLY,AVX512-SLOW,AVX512BW-SLOW,AVX512DQBW-SLOW,FALLBACK11
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq,+avx512bw,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512DQBW-ONLY,AVX512-FAST,AVX512BW-FAST,AVX512DQBW-FAST,FALLBACK12
 
 ; These patterns are produced by LoopVectorizer for interleaved loads.
 
@@ -4262,14 +4262,18 @@ define void @load_i8_stride5_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
 ; AVX512-FAST: {{.*}}
 ; AVX512-SLOW: {{.*}}
 ; AVX512BW-FAST: {{.*}}
+; AVX512BW-ONLY: {{.*}}
 ; AVX512BW-ONLY-FAST: {{.*}}
 ; AVX512BW-ONLY-SLOW: {{.*}}
 ; AVX512BW-SLOW: {{.*}}
 ; AVX512DQ-FAST: {{.*}}
+; AVX512DQ-ONLY: {{.*}}
 ; AVX512DQ-SLOW: {{.*}}
 ; AVX512DQBW-FAST: {{.*}}
+; AVX512DQBW-ONLY: {{.*}}
 ; AVX512DQBW-SLOW: {{.*}}
 ; AVX512F-FAST: {{.*}}
+; AVX512F-ONLY: {{.*}}
 ; AVX512F-ONLY-FAST: {{.*}}
 ; AVX512F-ONLY-SLOW: {{.*}}
 ; AVX512F-SLOW: {{.*}}

diff  --git a/llvm/test/CodeGen/X86/vector-interleaved-load-i8-stride-6.ll b/llvm/test/CodeGen/X86/vector-interleaved-load-i8-stride-6.ll
index f6c22d9fb7a103d..a2ad944c66f42e5 100644
--- a/llvm/test/CodeGen/X86/vector-interleaved-load-i8-stride-6.ll
+++ b/llvm/test/CodeGen/X86/vector-interleaved-load-i8-stride-6.ll
@@ -4,14 +4,14 @@
 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx2 | FileCheck %s --check-prefixes=AVX,AVX1,AVX2,AVX2-ONLY,AVX2-SLOW,FALLBACK2
 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx2,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX1,AVX2,AVX2-ONLY,AVX2-FAST,FALLBACK3
 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx2,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX1,AVX2,AVX2-ONLY,AVX2-FAST-PERLANE,FALLBACK4
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512-SLOW,AVX512F-SLOW,AVX512F-ONLY-SLOW,FALLBACK5
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512-FAST,AVX512F-FAST,AVX512F-ONLY-FAST,FALLBACK6
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512-SLOW,AVX512F-SLOW,AVX512DQ-SLOW,FALLBACK7
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512-FAST,AVX512F-FAST,AVX512DQ-FAST,FALLBACK8
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512bw | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512-SLOW,AVX512BW-SLOW,AVX512BW-ONLY-SLOW,FALLBACK9
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512bw,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512-FAST,AVX512BW-FAST,AVX512BW-ONLY-FAST,FALLBACK10
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq,+avx512bw | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512-SLOW,AVX512BW-SLOW,AVX512DQBW-SLOW,FALLBACK11
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq,+avx512bw,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512-FAST,AVX512BW-FAST,AVX512DQBW-FAST,FALLBACK12
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512F-ONLY,AVX512-SLOW,AVX512F-SLOW,AVX512F-ONLY-SLOW,FALLBACK5
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512F-ONLY,AVX512-FAST,AVX512F-FAST,AVX512F-ONLY-FAST,FALLBACK6
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512DQ-ONLY,AVX512-SLOW,AVX512F-SLOW,AVX512DQ-SLOW,FALLBACK7
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512DQ-ONLY,AVX512-FAST,AVX512F-FAST,AVX512DQ-FAST,FALLBACK8
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512bw | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512BW-ONLY,AVX512-SLOW,AVX512BW-SLOW,AVX512BW-ONLY-SLOW,FALLBACK9
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512bw,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512BW-ONLY,AVX512-FAST,AVX512BW-FAST,AVX512BW-ONLY-FAST,FALLBACK10
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq,+avx512bw | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512DQBW-ONLY,AVX512-SLOW,AVX512BW-SLOW,AVX512DQBW-SLOW,FALLBACK11
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq,+avx512bw,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512DQBW-ONLY,AVX512-FAST,AVX512BW-FAST,AVX512DQBW-FAST,FALLBACK12
 
 ; These patterns are produced by LoopVectorizer for interleaved loads.
 
@@ -5043,14 +5043,18 @@ define void @load_i8_stride6_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
 ; AVX512-FAST: {{.*}}
 ; AVX512-SLOW: {{.*}}
 ; AVX512BW-FAST: {{.*}}
+; AVX512BW-ONLY: {{.*}}
 ; AVX512BW-ONLY-FAST: {{.*}}
 ; AVX512BW-ONLY-SLOW: {{.*}}
 ; AVX512BW-SLOW: {{.*}}
 ; AVX512DQ-FAST: {{.*}}
+; AVX512DQ-ONLY: {{.*}}
 ; AVX512DQ-SLOW: {{.*}}
 ; AVX512DQBW-FAST: {{.*}}
+; AVX512DQBW-ONLY: {{.*}}
 ; AVX512DQBW-SLOW: {{.*}}
 ; AVX512F-FAST: {{.*}}
+; AVX512F-ONLY: {{.*}}
 ; AVX512F-ONLY-FAST: {{.*}}
 ; AVX512F-ONLY-SLOW: {{.*}}
 ; AVX512F-SLOW: {{.*}}

diff  --git a/llvm/test/CodeGen/X86/vector-interleaved-load-i8-stride-7.ll b/llvm/test/CodeGen/X86/vector-interleaved-load-i8-stride-7.ll
index 39875a96326dd98..2e305467e0c22da 100644
--- a/llvm/test/CodeGen/X86/vector-interleaved-load-i8-stride-7.ll
+++ b/llvm/test/CodeGen/X86/vector-interleaved-load-i8-stride-7.ll
@@ -4,14 +4,14 @@
 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx2 | FileCheck %s --check-prefixes=AVX,AVX1,AVX2,AVX2-ONLY,AVX2-SLOW,FALLBACK2
 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx2,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX1,AVX2,AVX2-ONLY,AVX2-FAST,FALLBACK3
 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx2,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX1,AVX2,AVX2-ONLY,AVX2-FAST-PERLANE,FALLBACK4
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512-SLOW,AVX512F-SLOW,AVX512F-ONLY-SLOW,FALLBACK5
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512-FAST,AVX512F-FAST,AVX512F-ONLY-FAST,FALLBACK6
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512-SLOW,AVX512F-SLOW,AVX512DQ-SLOW,FALLBACK7
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512-FAST,AVX512F-FAST,AVX512DQ-FAST,FALLBACK8
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512bw | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512-SLOW,AVX512BW-SLOW,AVX512BW-ONLY-SLOW,FALLBACK9
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512bw,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512-FAST,AVX512BW-FAST,AVX512BW-ONLY-FAST,FALLBACK10
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq,+avx512bw | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512-SLOW,AVX512BW-SLOW,AVX512DQBW-SLOW,FALLBACK11
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq,+avx512bw,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512-FAST,AVX512BW-FAST,AVX512DQBW-FAST,FALLBACK12
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512F-ONLY,AVX512-SLOW,AVX512F-SLOW,AVX512F-ONLY-SLOW,FALLBACK5
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512F-ONLY,AVX512-FAST,AVX512F-FAST,AVX512F-ONLY-FAST,FALLBACK6
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512DQ-ONLY,AVX512-SLOW,AVX512F-SLOW,AVX512DQ-SLOW,FALLBACK7
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512DQ-ONLY,AVX512-FAST,AVX512F-FAST,AVX512DQ-FAST,FALLBACK8
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512bw | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512BW-ONLY,AVX512-SLOW,AVX512BW-SLOW,AVX512BW-ONLY-SLOW,FALLBACK9
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512bw,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512BW-ONLY,AVX512-FAST,AVX512BW-FAST,AVX512BW-ONLY-FAST,FALLBACK10
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq,+avx512bw | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512DQBW-ONLY,AVX512-SLOW,AVX512BW-SLOW,AVX512DQBW-SLOW,FALLBACK11
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq,+avx512bw,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512DQBW-ONLY,AVX512-FAST,AVX512BW-FAST,AVX512DQBW-FAST,FALLBACK12
 
 ; These patterns are produced by LoopVectorizer for interleaved loads.
 
@@ -12348,6 +12348,10 @@ define void @load_i8_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
 ; AVX512: {{.*}}
 ; AVX512-FAST: {{.*}}
 ; AVX512-SLOW: {{.*}}
+; AVX512BW-ONLY: {{.*}}
+; AVX512DQ-ONLY: {{.*}}
+; AVX512DQBW-ONLY: {{.*}}
+; AVX512F-ONLY: {{.*}}
 ; FALLBACK0: {{.*}}
 ; FALLBACK1: {{.*}}
 ; FALLBACK10: {{.*}}

diff  --git a/llvm/test/CodeGen/X86/vector-interleaved-load-i8-stride-8.ll b/llvm/test/CodeGen/X86/vector-interleaved-load-i8-stride-8.ll
index e4acb33faade0e5..f2133b9e42d30df 100644
--- a/llvm/test/CodeGen/X86/vector-interleaved-load-i8-stride-8.ll
+++ b/llvm/test/CodeGen/X86/vector-interleaved-load-i8-stride-8.ll
@@ -4,14 +4,14 @@
 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx2 | FileCheck %s --check-prefixes=AVX,AVX1,AVX2,AVX2-ONLY,AVX2-SLOW,FALLBACK2
 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx2,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX1,AVX2,AVX2-ONLY,AVX2-FAST,FALLBACK3
 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx2,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX1,AVX2,AVX2-ONLY,AVX2-FAST-PERLANE,FALLBACK4
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512-SLOW,AVX512F-SLOW,AVX512F-ONLY-SLOW,FALLBACK5
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512-FAST,AVX512F-FAST,AVX512F-ONLY-FAST,FALLBACK6
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512-SLOW,AVX512F-SLOW,AVX512DQ-SLOW,FALLBACK7
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512-FAST,AVX512F-FAST,AVX512DQ-FAST,FALLBACK8
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512bw | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512-SLOW,AVX512BW-SLOW,AVX512BW-ONLY-SLOW,FALLBACK9
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512bw,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512-FAST,AVX512BW-FAST,AVX512BW-ONLY-FAST,FALLBACK10
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq,+avx512bw | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512-SLOW,AVX512BW-SLOW,AVX512DQBW-SLOW,FALLBACK11
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq,+avx512bw,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512-FAST,AVX512BW-FAST,AVX512DQBW-FAST,FALLBACK12
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512F-ONLY,AVX512-SLOW,AVX512F-SLOW,AVX512F-ONLY-SLOW,FALLBACK5
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512F-ONLY,AVX512-FAST,AVX512F-FAST,AVX512F-ONLY-FAST,FALLBACK6
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512DQ-ONLY,AVX512-SLOW,AVX512F-SLOW,AVX512DQ-SLOW,FALLBACK7
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512DQ-ONLY,AVX512-FAST,AVX512F-FAST,AVX512DQ-FAST,FALLBACK8
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512bw | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512BW-ONLY,AVX512-SLOW,AVX512BW-SLOW,AVX512BW-ONLY-SLOW,FALLBACK9
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512bw,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512BW-ONLY,AVX512-FAST,AVX512BW-FAST,AVX512BW-ONLY-FAST,FALLBACK10
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq,+avx512bw | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512DQBW-ONLY,AVX512-SLOW,AVX512BW-SLOW,AVX512DQBW-SLOW,FALLBACK11
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq,+avx512bw,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512DQBW-ONLY,AVX512-FAST,AVX512BW-FAST,AVX512DQBW-FAST,FALLBACK12
 
 ; These patterns are produced by LoopVectorizer for interleaved loads.
 
@@ -13262,12 +13262,16 @@ define void @load_i8_stride8_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
 ; AVX2: {{.*}}
 ; AVX512-FAST: {{.*}}
 ; AVX512-SLOW: {{.*}}
+; AVX512BW-ONLY: {{.*}}
 ; AVX512BW-ONLY-FAST: {{.*}}
 ; AVX512BW-ONLY-SLOW: {{.*}}
 ; AVX512DQ-FAST: {{.*}}
+; AVX512DQ-ONLY: {{.*}}
 ; AVX512DQ-SLOW: {{.*}}
 ; AVX512DQBW-FAST: {{.*}}
+; AVX512DQBW-ONLY: {{.*}}
 ; AVX512DQBW-SLOW: {{.*}}
+; AVX512F-ONLY: {{.*}}
 ; AVX512F-ONLY-FAST: {{.*}}
 ; AVX512F-ONLY-SLOW: {{.*}}
 ; FALLBACK0: {{.*}}

diff  --git a/llvm/test/CodeGen/X86/vector-interleaved-store-i16-stride-2.ll b/llvm/test/CodeGen/X86/vector-interleaved-store-i16-stride-2.ll
index 5cd794b7f1c4dc9..9fee32dd21f638f 100644
--- a/llvm/test/CodeGen/X86/vector-interleaved-store-i16-stride-2.ll
+++ b/llvm/test/CodeGen/X86/vector-interleaved-store-i16-stride-2.ll
@@ -4,14 +4,14 @@
 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx2 | FileCheck %s --check-prefixes=AVX,AVX1,AVX2,AVX2-ONLY,AVX2-SLOW,FALLBACK2
 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx2,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX1,AVX2,AVX2-ONLY,AVX2-FAST,FALLBACK3
 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx2,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX1,AVX2,AVX2-ONLY,AVX2-FAST-PERLANE,FALLBACK4
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512-SLOW,AVX512F-SLOW,AVX512F-ONLY-SLOW,FALLBACK5
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512-FAST,AVX512F-FAST,AVX512F-ONLY-FAST,FALLBACK6
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512-SLOW,AVX512F-SLOW,AVX512DQ-SLOW,FALLBACK7
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512-FAST,AVX512F-FAST,AVX512DQ-FAST,FALLBACK8
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512bw | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512-SLOW,AVX512BW-SLOW,AVX512BW-ONLY-SLOW,FALLBACK9
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512bw,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512-FAST,AVX512BW-FAST,AVX512BW-ONLY-FAST,FALLBACK10
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq,+avx512bw | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512-SLOW,AVX512BW-SLOW,AVX512DQBW-SLOW,FALLBACK11
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq,+avx512bw,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512-FAST,AVX512BW-FAST,AVX512DQBW-FAST,FALLBACK12
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512F-ONLY,AVX512-SLOW,AVX512F-SLOW,AVX512F-ONLY-SLOW,FALLBACK5
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512F-ONLY,AVX512-FAST,AVX512F-FAST,AVX512F-ONLY-FAST,FALLBACK6
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512DQ-ONLY,AVX512-SLOW,AVX512F-SLOW,AVX512DQ-SLOW,FALLBACK7
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512DQ-ONLY,AVX512-FAST,AVX512F-FAST,AVX512DQ-FAST,FALLBACK8
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512bw | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512BW-ONLY,AVX512-SLOW,AVX512BW-SLOW,AVX512BW-ONLY-SLOW,FALLBACK9
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512bw,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512BW-ONLY,AVX512-FAST,AVX512BW-FAST,AVX512BW-ONLY-FAST,FALLBACK10
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq,+avx512bw | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512DQBW-ONLY,AVX512-SLOW,AVX512BW-SLOW,AVX512DQBW-SLOW,FALLBACK11
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq,+avx512bw,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512DQBW-ONLY,AVX512-FAST,AVX512BW-FAST,AVX512DQBW-FAST,FALLBACK12
 
 ; These patterns are produced by LoopVectorizer for interleaved stores.
 
@@ -574,14 +574,18 @@ define void @store_i16_stride2_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %out.v
 ; AVX512-FAST: {{.*}}
 ; AVX512-SLOW: {{.*}}
 ; AVX512BW-FAST: {{.*}}
+; AVX512BW-ONLY: {{.*}}
 ; AVX512BW-ONLY-FAST: {{.*}}
 ; AVX512BW-ONLY-SLOW: {{.*}}
 ; AVX512BW-SLOW: {{.*}}
 ; AVX512DQ-FAST: {{.*}}
+; AVX512DQ-ONLY: {{.*}}
 ; AVX512DQ-SLOW: {{.*}}
 ; AVX512DQBW-FAST: {{.*}}
+; AVX512DQBW-ONLY: {{.*}}
 ; AVX512DQBW-SLOW: {{.*}}
 ; AVX512F-FAST: {{.*}}
+; AVX512F-ONLY: {{.*}}
 ; AVX512F-ONLY-FAST: {{.*}}
 ; AVX512F-ONLY-SLOW: {{.*}}
 ; AVX512F-SLOW: {{.*}}

diff  --git a/llvm/test/CodeGen/X86/vector-interleaved-store-i16-stride-3.ll b/llvm/test/CodeGen/X86/vector-interleaved-store-i16-stride-3.ll
index f107e762ac311be..e6de73d602b7319 100644
--- a/llvm/test/CodeGen/X86/vector-interleaved-store-i16-stride-3.ll
+++ b/llvm/test/CodeGen/X86/vector-interleaved-store-i16-stride-3.ll
@@ -4,14 +4,14 @@
 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx2 | FileCheck %s --check-prefixes=AVX,AVX1,AVX2,AVX2-ONLY,AVX2-SLOW,FALLBACK2
 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx2,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX1,AVX2,AVX2-ONLY,AVX2-FAST,FALLBACK3
 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx2,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX1,AVX2,AVX2-ONLY,AVX2-FAST-PERLANE,FALLBACK4
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512-SLOW,AVX512F-SLOW,AVX512F-ONLY-SLOW,FALLBACK5
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512-FAST,AVX512F-FAST,AVX512F-ONLY-FAST,FALLBACK6
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512-SLOW,AVX512F-SLOW,AVX512DQ-SLOW,FALLBACK7
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512-FAST,AVX512F-FAST,AVX512DQ-FAST,FALLBACK8
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512bw | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512-SLOW,AVX512BW-SLOW,AVX512BW-ONLY-SLOW,FALLBACK9
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512bw,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512-FAST,AVX512BW-FAST,AVX512BW-ONLY-FAST,FALLBACK10
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq,+avx512bw | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512-SLOW,AVX512BW-SLOW,AVX512DQBW-SLOW,FALLBACK11
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq,+avx512bw,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512-FAST,AVX512BW-FAST,AVX512DQBW-FAST,FALLBACK12
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512F-ONLY,AVX512-SLOW,AVX512F-SLOW,AVX512F-ONLY-SLOW,FALLBACK5
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512F-ONLY,AVX512-FAST,AVX512F-FAST,AVX512F-ONLY-FAST,FALLBACK6
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512DQ-ONLY,AVX512-SLOW,AVX512F-SLOW,AVX512DQ-SLOW,FALLBACK7
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512DQ-ONLY,AVX512-FAST,AVX512F-FAST,AVX512DQ-FAST,FALLBACK8
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512bw | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512BW-ONLY,AVX512-SLOW,AVX512BW-SLOW,AVX512BW-ONLY-SLOW,FALLBACK9
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512bw,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512BW-ONLY,AVX512-FAST,AVX512BW-FAST,AVX512BW-ONLY-FAST,FALLBACK10
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq,+avx512bw | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512DQBW-ONLY,AVX512-SLOW,AVX512BW-SLOW,AVX512DQBW-SLOW,FALLBACK11
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq,+avx512bw,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512DQBW-ONLY,AVX512-FAST,AVX512BW-FAST,AVX512DQBW-FAST,FALLBACK12
 
 ; These patterns are produced by LoopVectorizer for interleaved stores.
 
@@ -2533,13 +2533,17 @@ define void @store_i16_stride3_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX512-FAST: {{.*}}
 ; AVX512-SLOW: {{.*}}
 ; AVX512BW-FAST: {{.*}}
+; AVX512BW-ONLY: {{.*}}
 ; AVX512BW-ONLY-FAST: {{.*}}
 ; AVX512BW-ONLY-SLOW: {{.*}}
 ; AVX512BW-SLOW: {{.*}}
 ; AVX512DQ-FAST: {{.*}}
+; AVX512DQ-ONLY: {{.*}}
 ; AVX512DQ-SLOW: {{.*}}
 ; AVX512DQBW-FAST: {{.*}}
+; AVX512DQBW-ONLY: {{.*}}
 ; AVX512DQBW-SLOW: {{.*}}
+; AVX512F-ONLY: {{.*}}
 ; AVX512F-ONLY-FAST: {{.*}}
 ; AVX512F-ONLY-SLOW: {{.*}}
 ; FALLBACK0: {{.*}}

diff  --git a/llvm/test/CodeGen/X86/vector-interleaved-store-i16-stride-4.ll b/llvm/test/CodeGen/X86/vector-interleaved-store-i16-stride-4.ll
index 74cd79ab551e2a7..e8da0c4dcf47a41 100644
--- a/llvm/test/CodeGen/X86/vector-interleaved-store-i16-stride-4.ll
+++ b/llvm/test/CodeGen/X86/vector-interleaved-store-i16-stride-4.ll
@@ -4,14 +4,14 @@
 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx2 | FileCheck %s --check-prefixes=AVX,AVX1,AVX2,AVX2-ONLY,AVX2-SLOW,FALLBACK2
 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx2,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX1,AVX2,AVX2-ONLY,AVX2-FAST,FALLBACK3
 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx2,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX1,AVX2,AVX2-ONLY,AVX2-FAST-PERLANE,FALLBACK4
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512-SLOW,AVX512F-SLOW,AVX512F-ONLY-SLOW,FALLBACK5
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512-FAST,AVX512F-FAST,AVX512F-ONLY-FAST,FALLBACK6
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512-SLOW,AVX512F-SLOW,AVX512DQ-SLOW,FALLBACK7
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512-FAST,AVX512F-FAST,AVX512DQ-FAST,FALLBACK8
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512bw | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512-SLOW,AVX512BW-SLOW,AVX512BW-ONLY-SLOW,FALLBACK9
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512bw,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512-FAST,AVX512BW-FAST,AVX512BW-ONLY-FAST,FALLBACK10
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq,+avx512bw | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512-SLOW,AVX512BW-SLOW,AVX512DQBW-SLOW,FALLBACK11
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq,+avx512bw,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512-FAST,AVX512BW-FAST,AVX512DQBW-FAST,FALLBACK12
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512F-ONLY,AVX512-SLOW,AVX512F-SLOW,AVX512F-ONLY-SLOW,FALLBACK5
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512F-ONLY,AVX512-FAST,AVX512F-FAST,AVX512F-ONLY-FAST,FALLBACK6
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512DQ-ONLY,AVX512-SLOW,AVX512F-SLOW,AVX512DQ-SLOW,FALLBACK7
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512DQ-ONLY,AVX512-FAST,AVX512F-FAST,AVX512DQ-FAST,FALLBACK8
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512bw | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512BW-ONLY,AVX512-SLOW,AVX512BW-SLOW,AVX512BW-ONLY-SLOW,FALLBACK9
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512bw,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512BW-ONLY,AVX512-FAST,AVX512BW-FAST,AVX512BW-ONLY-FAST,FALLBACK10
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq,+avx512bw | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512DQBW-ONLY,AVX512-SLOW,AVX512BW-SLOW,AVX512DQBW-SLOW,FALLBACK11
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq,+avx512bw,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512DQBW-ONLY,AVX512-FAST,AVX512BW-FAST,AVX512DQBW-FAST,FALLBACK12
 
 ; These patterns are produced by LoopVectorizer for interleaved stores.
 
@@ -3082,12 +3082,16 @@ define void @store_i16_stride4_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX512-FAST: {{.*}}
 ; AVX512-SLOW: {{.*}}
 ; AVX512BW-FAST: {{.*}}
+; AVX512BW-ONLY: {{.*}}
 ; AVX512BW-ONLY-FAST: {{.*}}
 ; AVX512BW-ONLY-SLOW: {{.*}}
 ; AVX512BW-SLOW: {{.*}}
 ; AVX512DQ-FAST: {{.*}}
+; AVX512DQ-ONLY: {{.*}}
 ; AVX512DQBW-FAST: {{.*}}
+; AVX512DQBW-ONLY: {{.*}}
 ; AVX512DQBW-SLOW: {{.*}}
+; AVX512F-ONLY: {{.*}}
 ; AVX512F-ONLY-FAST: {{.*}}
 ; FALLBACK0: {{.*}}
 ; FALLBACK1: {{.*}}

diff  --git a/llvm/test/CodeGen/X86/vector-interleaved-store-i16-stride-5.ll b/llvm/test/CodeGen/X86/vector-interleaved-store-i16-stride-5.ll
index dc7fb6abe419d5a..9f479b0ee99377f 100644
--- a/llvm/test/CodeGen/X86/vector-interleaved-store-i16-stride-5.ll
+++ b/llvm/test/CodeGen/X86/vector-interleaved-store-i16-stride-5.ll
@@ -4,14 +4,14 @@
 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx2 | FileCheck %s --check-prefixes=AVX,AVX1,AVX2,AVX2-ONLY,AVX2-SLOW,FALLBACK2
 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx2,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX1,AVX2,AVX2-ONLY,AVX2-FAST,FALLBACK3
 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx2,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX1,AVX2,AVX2-ONLY,AVX2-FAST-PERLANE,FALLBACK4
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512-SLOW,AVX512F-SLOW,AVX512F-ONLY-SLOW,FALLBACK5
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512-FAST,AVX512F-FAST,AVX512F-ONLY-FAST,FALLBACK6
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512-SLOW,AVX512F-SLOW,AVX512DQ-SLOW,FALLBACK7
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512-FAST,AVX512F-FAST,AVX512DQ-FAST,FALLBACK8
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512bw | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512-SLOW,AVX512BW-SLOW,AVX512BW-ONLY-SLOW,FALLBACK9
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512bw,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512-FAST,AVX512BW-FAST,AVX512BW-ONLY-FAST,FALLBACK10
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq,+avx512bw | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512-SLOW,AVX512BW-SLOW,AVX512DQBW-SLOW,FALLBACK11
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq,+avx512bw,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512-FAST,AVX512BW-FAST,AVX512DQBW-FAST,FALLBACK12
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512F-ONLY,AVX512-SLOW,AVX512F-SLOW,AVX512F-ONLY-SLOW,FALLBACK5
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512F-ONLY,AVX512-FAST,AVX512F-FAST,AVX512F-ONLY-FAST,FALLBACK6
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512DQ-ONLY,AVX512-SLOW,AVX512F-SLOW,AVX512DQ-SLOW,FALLBACK7
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512DQ-ONLY,AVX512-FAST,AVX512F-FAST,AVX512DQ-FAST,FALLBACK8
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512bw | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512BW-ONLY,AVX512-SLOW,AVX512BW-SLOW,AVX512BW-ONLY-SLOW,FALLBACK9
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512bw,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512BW-ONLY,AVX512-FAST,AVX512BW-FAST,AVX512BW-ONLY-FAST,FALLBACK10
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq,+avx512bw | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512DQBW-ONLY,AVX512-SLOW,AVX512BW-SLOW,AVX512DQBW-SLOW,FALLBACK11
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq,+avx512bw,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512DQBW-ONLY,AVX512-FAST,AVX512BW-FAST,AVX512DQBW-FAST,FALLBACK12
 
 ; These patterns are produced by LoopVectorizer for interleaved stores.
 
@@ -6435,13 +6435,17 @@ define void @store_i16_stride5_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX512-FAST: {{.*}}
 ; AVX512-SLOW: {{.*}}
 ; AVX512BW-FAST: {{.*}}
+; AVX512BW-ONLY: {{.*}}
 ; AVX512BW-ONLY-FAST: {{.*}}
 ; AVX512BW-ONLY-SLOW: {{.*}}
 ; AVX512BW-SLOW: {{.*}}
 ; AVX512DQ-FAST: {{.*}}
+; AVX512DQ-ONLY: {{.*}}
 ; AVX512DQ-SLOW: {{.*}}
 ; AVX512DQBW-FAST: {{.*}}
+; AVX512DQBW-ONLY: {{.*}}
 ; AVX512DQBW-SLOW: {{.*}}
+; AVX512F-ONLY: {{.*}}
 ; AVX512F-ONLY-FAST: {{.*}}
 ; AVX512F-ONLY-SLOW: {{.*}}
 ; FALLBACK0: {{.*}}

diff  --git a/llvm/test/CodeGen/X86/vector-interleaved-store-i16-stride-6.ll b/llvm/test/CodeGen/X86/vector-interleaved-store-i16-stride-6.ll
index 4522be032322c9a..7133214d9a80f76 100644
--- a/llvm/test/CodeGen/X86/vector-interleaved-store-i16-stride-6.ll
+++ b/llvm/test/CodeGen/X86/vector-interleaved-store-i16-stride-6.ll
@@ -4,14 +4,14 @@
 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx2 | FileCheck %s --check-prefixes=AVX,AVX1,AVX2,AVX2-ONLY,AVX2-SLOW,FALLBACK2
 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx2,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX1,AVX2,AVX2-ONLY,AVX2-FAST,FALLBACK3
 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx2,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX1,AVX2,AVX2-ONLY,AVX2-FAST-PERLANE,FALLBACK4
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512-SLOW,AVX512F-SLOW,AVX512F-ONLY-SLOW,FALLBACK5
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512-FAST,AVX512F-FAST,AVX512F-ONLY-FAST,FALLBACK6
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512-SLOW,AVX512F-SLOW,AVX512DQ-SLOW,FALLBACK7
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512-FAST,AVX512F-FAST,AVX512DQ-FAST,FALLBACK8
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512bw | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512-SLOW,AVX512BW-SLOW,AVX512BW-ONLY-SLOW,FALLBACK9
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512bw,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512-FAST,AVX512BW-FAST,AVX512BW-ONLY-FAST,FALLBACK10
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq,+avx512bw | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512-SLOW,AVX512BW-SLOW,AVX512DQBW-SLOW,FALLBACK11
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq,+avx512bw,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512-FAST,AVX512BW-FAST,AVX512DQBW-FAST,FALLBACK12
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512F-ONLY,AVX512-SLOW,AVX512F-SLOW,AVX512F-ONLY-SLOW,FALLBACK5
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512F-ONLY,AVX512-FAST,AVX512F-FAST,AVX512F-ONLY-FAST,FALLBACK6
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512DQ-ONLY,AVX512-SLOW,AVX512F-SLOW,AVX512DQ-SLOW,FALLBACK7
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512DQ-ONLY,AVX512-FAST,AVX512F-FAST,AVX512DQ-FAST,FALLBACK8
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512bw | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512BW-ONLY,AVX512-SLOW,AVX512BW-SLOW,AVX512BW-ONLY-SLOW,FALLBACK9
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512bw,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512BW-ONLY,AVX512-FAST,AVX512BW-FAST,AVX512BW-ONLY-FAST,FALLBACK10
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq,+avx512bw | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512DQBW-ONLY,AVX512-SLOW,AVX512BW-SLOW,AVX512DQBW-SLOW,FALLBACK11
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq,+avx512bw,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512DQBW-ONLY,AVX512-FAST,AVX512BW-FAST,AVX512DQBW-FAST,FALLBACK12
 
 ; These patterns are produced by LoopVectorizer for interleaved stores.
 
@@ -9461,12 +9461,16 @@ define void @store_i16_stride6_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX512-FAST: {{.*}}
 ; AVX512-SLOW: {{.*}}
 ; AVX512BW-FAST: {{.*}}
+; AVX512BW-ONLY: {{.*}}
 ; AVX512BW-ONLY-FAST: {{.*}}
 ; AVX512BW-ONLY-SLOW: {{.*}}
 ; AVX512BW-SLOW: {{.*}}
+; AVX512DQ-ONLY: {{.*}}
 ; AVX512DQBW-FAST: {{.*}}
+; AVX512DQBW-ONLY: {{.*}}
 ; AVX512DQBW-SLOW: {{.*}}
 ; AVX512F: {{.*}}
+; AVX512F-ONLY: {{.*}}
 ; FALLBACK0: {{.*}}
 ; FALLBACK1: {{.*}}
 ; FALLBACK10: {{.*}}

diff  --git a/llvm/test/CodeGen/X86/vector-interleaved-store-i16-stride-7.ll b/llvm/test/CodeGen/X86/vector-interleaved-store-i16-stride-7.ll
index 006edc71450157a..810ff07eafb6387 100644
--- a/llvm/test/CodeGen/X86/vector-interleaved-store-i16-stride-7.ll
+++ b/llvm/test/CodeGen/X86/vector-interleaved-store-i16-stride-7.ll
@@ -4,14 +4,14 @@
 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx2 | FileCheck %s --check-prefixes=AVX,AVX1,AVX2,AVX2-ONLY,AVX2-SLOW,FALLBACK2
 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx2,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX1,AVX2,AVX2-ONLY,AVX2-FAST,FALLBACK3
 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx2,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX1,AVX2,AVX2-ONLY,AVX2-FAST-PERLANE,FALLBACK4
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512-SLOW,AVX512F-SLOW,AVX512F-ONLY-SLOW,FALLBACK5
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512-FAST,AVX512F-FAST,AVX512F-ONLY-FAST,FALLBACK6
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512-SLOW,AVX512F-SLOW,AVX512DQ-SLOW,FALLBACK7
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512-FAST,AVX512F-FAST,AVX512DQ-FAST,FALLBACK8
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512bw | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512-SLOW,AVX512BW-SLOW,AVX512BW-ONLY-SLOW,FALLBACK9
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512bw,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512-FAST,AVX512BW-FAST,AVX512BW-ONLY-FAST,FALLBACK10
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq,+avx512bw | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512-SLOW,AVX512BW-SLOW,AVX512DQBW-SLOW,FALLBACK11
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq,+avx512bw,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512-FAST,AVX512BW-FAST,AVX512DQBW-FAST,FALLBACK12
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512F-ONLY,AVX512-SLOW,AVX512F-SLOW,AVX512F-ONLY-SLOW,FALLBACK5
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512F-ONLY,AVX512-FAST,AVX512F-FAST,AVX512F-ONLY-FAST,FALLBACK6
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512DQ-ONLY,AVX512-SLOW,AVX512F-SLOW,AVX512DQ-SLOW,FALLBACK7
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512DQ-ONLY,AVX512-FAST,AVX512F-FAST,AVX512DQ-FAST,FALLBACK8
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512bw | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512BW-ONLY,AVX512-SLOW,AVX512BW-SLOW,AVX512BW-ONLY-SLOW,FALLBACK9
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512bw,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512BW-ONLY,AVX512-FAST,AVX512BW-FAST,AVX512BW-ONLY-FAST,FALLBACK10
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq,+avx512bw | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512DQBW-ONLY,AVX512-SLOW,AVX512BW-SLOW,AVX512DQBW-SLOW,FALLBACK11
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq,+avx512bw,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512DQBW-ONLY,AVX512-FAST,AVX512BW-FAST,AVX512DQBW-FAST,FALLBACK12
 
 ; These patterns are produced by LoopVectorizer for interleaved stores.
 
@@ -14520,10 +14520,14 @@ define void @store_i16_stride7_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX512: {{.*}}
 ; AVX512-FAST: {{.*}}
 ; AVX512-SLOW: {{.*}}
+; AVX512BW-ONLY: {{.*}}
 ; AVX512BW-ONLY-FAST: {{.*}}
 ; AVX512BW-ONLY-SLOW: {{.*}}
+; AVX512DQ-ONLY: {{.*}}
 ; AVX512DQBW-FAST: {{.*}}
+; AVX512DQBW-ONLY: {{.*}}
 ; AVX512DQBW-SLOW: {{.*}}
+; AVX512F-ONLY: {{.*}}
 ; FALLBACK0: {{.*}}
 ; FALLBACK1: {{.*}}
 ; FALLBACK10: {{.*}}

diff  --git a/llvm/test/CodeGen/X86/vector-interleaved-store-i16-stride-8.ll b/llvm/test/CodeGen/X86/vector-interleaved-store-i16-stride-8.ll
index 1cb76e97601528b..c02348af872917b 100644
--- a/llvm/test/CodeGen/X86/vector-interleaved-store-i16-stride-8.ll
+++ b/llvm/test/CodeGen/X86/vector-interleaved-store-i16-stride-8.ll
@@ -4,14 +4,14 @@
 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx2 | FileCheck %s --check-prefixes=AVX,AVX1,AVX2,AVX2-ONLY,AVX2-SLOW,FALLBACK2
 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx2,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX1,AVX2,AVX2-ONLY,AVX2-FAST,FALLBACK3
 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx2,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX1,AVX2,AVX2-ONLY,AVX2-FAST-PERLANE,FALLBACK4
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512-SLOW,AVX512F-SLOW,AVX512F-ONLY-SLOW,FALLBACK5
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512-FAST,AVX512F-FAST,AVX512F-ONLY-FAST,FALLBACK6
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512-SLOW,AVX512F-SLOW,AVX512DQ-SLOW,FALLBACK7
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512-FAST,AVX512F-FAST,AVX512DQ-FAST,FALLBACK8
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512bw | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512-SLOW,AVX512BW-SLOW,AVX512BW-ONLY-SLOW,FALLBACK9
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512bw,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512-FAST,AVX512BW-FAST,AVX512BW-ONLY-FAST,FALLBACK10
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq,+avx512bw | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512-SLOW,AVX512BW-SLOW,AVX512DQBW-SLOW,FALLBACK11
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq,+avx512bw,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512-FAST,AVX512BW-FAST,AVX512DQBW-FAST,FALLBACK12
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512F-ONLY,AVX512-SLOW,AVX512F-SLOW,AVX512F-ONLY-SLOW,FALLBACK5
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512F-ONLY,AVX512-FAST,AVX512F-FAST,AVX512F-ONLY-FAST,FALLBACK6
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512DQ-ONLY,AVX512-SLOW,AVX512F-SLOW,AVX512DQ-SLOW,FALLBACK7
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512DQ-ONLY,AVX512-FAST,AVX512F-FAST,AVX512DQ-FAST,FALLBACK8
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512bw | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512BW-ONLY,AVX512-SLOW,AVX512BW-SLOW,AVX512BW-ONLY-SLOW,FALLBACK9
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512bw,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512BW-ONLY,AVX512-FAST,AVX512BW-FAST,AVX512BW-ONLY-FAST,FALLBACK10
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq,+avx512bw | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512DQBW-ONLY,AVX512-SLOW,AVX512BW-SLOW,AVX512DQBW-SLOW,FALLBACK11
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq,+avx512bw,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512DQBW-ONLY,AVX512-FAST,AVX512BW-FAST,AVX512DQBW-FAST,FALLBACK12
 
 ; These patterns are produced by LoopVectorizer for interleaved stores.
 
@@ -7808,13 +7808,17 @@ define void @store_i16_stride8_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX512-FAST: {{.*}}
 ; AVX512-SLOW: {{.*}}
 ; AVX512BW-FAST: {{.*}}
+; AVX512BW-ONLY: {{.*}}
 ; AVX512BW-ONLY-FAST: {{.*}}
 ; AVX512BW-ONLY-SLOW: {{.*}}
 ; AVX512BW-SLOW: {{.*}}
 ; AVX512DQ-FAST: {{.*}}
+; AVX512DQ-ONLY: {{.*}}
 ; AVX512DQ-SLOW: {{.*}}
 ; AVX512DQBW-FAST: {{.*}}
+; AVX512DQBW-ONLY: {{.*}}
 ; AVX512DQBW-SLOW: {{.*}}
+; AVX512F-ONLY: {{.*}}
 ; AVX512F-ONLY-FAST: {{.*}}
 ; AVX512F-ONLY-SLOW: {{.*}}
 ; FALLBACK0: {{.*}}

diff  --git a/llvm/test/CodeGen/X86/vector-interleaved-store-i32-stride-2.ll b/llvm/test/CodeGen/X86/vector-interleaved-store-i32-stride-2.ll
index 0a59406d865e1ed..3a37970bee92548 100644
--- a/llvm/test/CodeGen/X86/vector-interleaved-store-i32-stride-2.ll
+++ b/llvm/test/CodeGen/X86/vector-interleaved-store-i32-stride-2.ll
@@ -4,14 +4,14 @@
 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx2 | FileCheck %s --check-prefixes=AVX,AVX1,AVX2,AVX2-ONLY,AVX2-SLOW,FALLBACK2
 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx2,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX1,AVX2,AVX2-ONLY,AVX2-FAST,FALLBACK3
 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx2,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX1,AVX2,AVX2-ONLY,AVX2-FAST-PERLANE,FALLBACK4
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512-SLOW,AVX512F-SLOW,AVX512F-ONLY-SLOW,FALLBACK5
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512-FAST,AVX512F-FAST,AVX512F-ONLY-FAST,FALLBACK6
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512-SLOW,AVX512F-SLOW,AVX512DQ-SLOW,FALLBACK7
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512-FAST,AVX512F-FAST,AVX512DQ-FAST,FALLBACK8
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512bw | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512-SLOW,AVX512BW-SLOW,AVX512BW-ONLY-SLOW,FALLBACK9
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512bw,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512-FAST,AVX512BW-FAST,AVX512BW-ONLY-FAST,FALLBACK10
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq,+avx512bw | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512-SLOW,AVX512BW-SLOW,AVX512DQBW-SLOW,FALLBACK11
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq,+avx512bw,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512-FAST,AVX512BW-FAST,AVX512DQBW-FAST,FALLBACK12
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512F-ONLY,AVX512-SLOW,AVX512F-SLOW,AVX512F-ONLY-SLOW,FALLBACK5
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512F-ONLY,AVX512-FAST,AVX512F-FAST,AVX512F-ONLY-FAST,FALLBACK6
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512DQ-ONLY,AVX512-SLOW,AVX512F-SLOW,AVX512DQ-SLOW,FALLBACK7
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512DQ-ONLY,AVX512-FAST,AVX512F-FAST,AVX512DQ-FAST,FALLBACK8
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512bw | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512BW-ONLY,AVX512-SLOW,AVX512BW-SLOW,AVX512BW-ONLY-SLOW,FALLBACK9
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512bw,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512BW-ONLY,AVX512-FAST,AVX512BW-FAST,AVX512BW-ONLY-FAST,FALLBACK10
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq,+avx512bw | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512DQBW-ONLY,AVX512-SLOW,AVX512BW-SLOW,AVX512DQBW-SLOW,FALLBACK11
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq,+avx512bw,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512DQBW-ONLY,AVX512-FAST,AVX512BW-FAST,AVX512DQBW-FAST,FALLBACK12
 
 ; These patterns are produced by LoopVectorizer for interleaved stores.
 
@@ -809,15 +809,19 @@ define void @store_i32_stride2_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %out.v
 ; AVX512-SLOW: {{.*}}
 ; AVX512BW: {{.*}}
 ; AVX512BW-FAST: {{.*}}
+; AVX512BW-ONLY: {{.*}}
 ; AVX512BW-ONLY-FAST: {{.*}}
 ; AVX512BW-ONLY-SLOW: {{.*}}
 ; AVX512BW-SLOW: {{.*}}
 ; AVX512DQ-FAST: {{.*}}
+; AVX512DQ-ONLY: {{.*}}
 ; AVX512DQ-SLOW: {{.*}}
 ; AVX512DQBW-FAST: {{.*}}
+; AVX512DQBW-ONLY: {{.*}}
 ; AVX512DQBW-SLOW: {{.*}}
 ; AVX512F: {{.*}}
 ; AVX512F-FAST: {{.*}}
+; AVX512F-ONLY: {{.*}}
 ; AVX512F-ONLY-FAST: {{.*}}
 ; AVX512F-ONLY-SLOW: {{.*}}
 ; AVX512F-SLOW: {{.*}}

diff  --git a/llvm/test/CodeGen/X86/vector-interleaved-store-i32-stride-3.ll b/llvm/test/CodeGen/X86/vector-interleaved-store-i32-stride-3.ll
index 5ff48a44c347a68..1fc25ee32933320 100644
--- a/llvm/test/CodeGen/X86/vector-interleaved-store-i32-stride-3.ll
+++ b/llvm/test/CodeGen/X86/vector-interleaved-store-i32-stride-3.ll
@@ -4,14 +4,14 @@
 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx2 | FileCheck %s --check-prefixes=AVX,AVX1,AVX2,AVX2-ONLY,AVX2-SLOW,FALLBACK2
 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx2,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX1,AVX2,AVX2-ONLY,AVX2-FAST,FALLBACK3
 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx2,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX1,AVX2,AVX2-ONLY,AVX2-FAST-PERLANE,FALLBACK4
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512-SLOW,AVX512F-SLOW,AVX512F-ONLY-SLOW,FALLBACK5
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512-FAST,AVX512F-FAST,AVX512F-ONLY-FAST,FALLBACK6
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512-SLOW,AVX512F-SLOW,AVX512DQ-SLOW,FALLBACK7
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512-FAST,AVX512F-FAST,AVX512DQ-FAST,FALLBACK8
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512bw | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512-SLOW,AVX512BW-SLOW,AVX512BW-ONLY-SLOW,FALLBACK9
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512bw,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512-FAST,AVX512BW-FAST,AVX512BW-ONLY-FAST,FALLBACK10
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq,+avx512bw | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512-SLOW,AVX512BW-SLOW,AVX512DQBW-SLOW,FALLBACK11
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq,+avx512bw,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512-FAST,AVX512BW-FAST,AVX512DQBW-FAST,FALLBACK12
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512F-ONLY,AVX512-SLOW,AVX512F-SLOW,AVX512F-ONLY-SLOW,FALLBACK5
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512F-ONLY,AVX512-FAST,AVX512F-FAST,AVX512F-ONLY-FAST,FALLBACK6
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512DQ-ONLY,AVX512-SLOW,AVX512F-SLOW,AVX512DQ-SLOW,FALLBACK7
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512DQ-ONLY,AVX512-FAST,AVX512F-FAST,AVX512DQ-FAST,FALLBACK8
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512bw | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512BW-ONLY,AVX512-SLOW,AVX512BW-SLOW,AVX512BW-ONLY-SLOW,FALLBACK9
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512bw,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512BW-ONLY,AVX512-FAST,AVX512BW-FAST,AVX512BW-ONLY-FAST,FALLBACK10
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq,+avx512bw | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512DQBW-ONLY,AVX512-SLOW,AVX512BW-SLOW,AVX512DQBW-SLOW,FALLBACK11
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq,+avx512bw,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512DQBW-ONLY,AVX512-FAST,AVX512BW-FAST,AVX512DQBW-FAST,FALLBACK12
 
 ; These patterns are produced by LoopVectorizer for interleaved stores.
 
@@ -2803,15 +2803,19 @@ define void @store_i32_stride3_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX512-SLOW: {{.*}}
 ; AVX512BW: {{.*}}
 ; AVX512BW-FAST: {{.*}}
+; AVX512BW-ONLY: {{.*}}
 ; AVX512BW-ONLY-FAST: {{.*}}
 ; AVX512BW-ONLY-SLOW: {{.*}}
 ; AVX512BW-SLOW: {{.*}}
 ; AVX512DQ-FAST: {{.*}}
+; AVX512DQ-ONLY: {{.*}}
 ; AVX512DQ-SLOW: {{.*}}
 ; AVX512DQBW-FAST: {{.*}}
+; AVX512DQBW-ONLY: {{.*}}
 ; AVX512DQBW-SLOW: {{.*}}
 ; AVX512F: {{.*}}
 ; AVX512F-FAST: {{.*}}
+; AVX512F-ONLY: {{.*}}
 ; AVX512F-ONLY-FAST: {{.*}}
 ; AVX512F-ONLY-SLOW: {{.*}}
 ; AVX512F-SLOW: {{.*}}

diff  --git a/llvm/test/CodeGen/X86/vector-interleaved-store-i32-stride-4.ll b/llvm/test/CodeGen/X86/vector-interleaved-store-i32-stride-4.ll
index 31e462a139561bd..a4480d5d7e957c1 100644
--- a/llvm/test/CodeGen/X86/vector-interleaved-store-i32-stride-4.ll
+++ b/llvm/test/CodeGen/X86/vector-interleaved-store-i32-stride-4.ll
@@ -4,14 +4,14 @@
 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx2 | FileCheck %s --check-prefixes=AVX,AVX1,AVX2,AVX2-ONLY,AVX2-SLOW,FALLBACK2
 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx2,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX1,AVX2,AVX2-ONLY,AVX2-FAST,FALLBACK3
 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx2,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX1,AVX2,AVX2-ONLY,AVX2-FAST-PERLANE,FALLBACK4
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512-SLOW,AVX512F-SLOW,AVX512F-ONLY-SLOW,FALLBACK5
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512-FAST,AVX512F-FAST,AVX512F-ONLY-FAST,FALLBACK6
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512-SLOW,AVX512F-SLOW,AVX512DQ-SLOW,FALLBACK7
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512-FAST,AVX512F-FAST,AVX512DQ-FAST,FALLBACK8
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512bw | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512-SLOW,AVX512BW-SLOW,AVX512BW-ONLY-SLOW,FALLBACK9
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512bw,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512-FAST,AVX512BW-FAST,AVX512BW-ONLY-FAST,FALLBACK10
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq,+avx512bw | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512-SLOW,AVX512BW-SLOW,AVX512DQBW-SLOW,FALLBACK11
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq,+avx512bw,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512-FAST,AVX512BW-FAST,AVX512DQBW-FAST,FALLBACK12
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512F-ONLY,AVX512-SLOW,AVX512F-SLOW,AVX512F-ONLY-SLOW,FALLBACK5
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512F-ONLY,AVX512-FAST,AVX512F-FAST,AVX512F-ONLY-FAST,FALLBACK6
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512DQ-ONLY,AVX512-SLOW,AVX512F-SLOW,AVX512DQ-SLOW,FALLBACK7
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512DQ-ONLY,AVX512-FAST,AVX512F-FAST,AVX512DQ-FAST,FALLBACK8
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512bw | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512BW-ONLY,AVX512-SLOW,AVX512BW-SLOW,AVX512BW-ONLY-SLOW,FALLBACK9
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512bw,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512BW-ONLY,AVX512-FAST,AVX512BW-FAST,AVX512BW-ONLY-FAST,FALLBACK10
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq,+avx512bw | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512DQBW-ONLY,AVX512-SLOW,AVX512BW-SLOW,AVX512DQBW-SLOW,FALLBACK11
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq,+avx512bw,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512DQBW-ONLY,AVX512-FAST,AVX512BW-FAST,AVX512DQBW-FAST,FALLBACK12
 
 ; These patterns are produced by LoopVectorizer for interleaved stores.
 
@@ -2948,14 +2948,18 @@ define void @store_i32_stride4_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX1: {{.*}}
 ; AVX2: {{.*}}
 ; AVX512BW-FAST: {{.*}}
+; AVX512BW-ONLY: {{.*}}
 ; AVX512BW-ONLY-FAST: {{.*}}
 ; AVX512BW-ONLY-SLOW: {{.*}}
 ; AVX512BW-SLOW: {{.*}}
 ; AVX512DQ-FAST: {{.*}}
+; AVX512DQ-ONLY: {{.*}}
 ; AVX512DQ-SLOW: {{.*}}
 ; AVX512DQBW-FAST: {{.*}}
+; AVX512DQBW-ONLY: {{.*}}
 ; AVX512DQBW-SLOW: {{.*}}
 ; AVX512F-FAST: {{.*}}
+; AVX512F-ONLY: {{.*}}
 ; AVX512F-ONLY-FAST: {{.*}}
 ; AVX512F-ONLY-SLOW: {{.*}}
 ; AVX512F-SLOW: {{.*}}

diff  --git a/llvm/test/CodeGen/X86/vector-interleaved-store-i32-stride-5.ll b/llvm/test/CodeGen/X86/vector-interleaved-store-i32-stride-5.ll
index f0c2f3f23a2d038..1073c24b228ba02 100644
--- a/llvm/test/CodeGen/X86/vector-interleaved-store-i32-stride-5.ll
+++ b/llvm/test/CodeGen/X86/vector-interleaved-store-i32-stride-5.ll
@@ -4,14 +4,14 @@
 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx2 | FileCheck %s --check-prefixes=AVX,AVX1,AVX2,AVX2-ONLY,AVX2-SLOW,FALLBACK2
 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx2,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX1,AVX2,AVX2-ONLY,AVX2-FAST,FALLBACK3
 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx2,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX1,AVX2,AVX2-ONLY,AVX2-FAST-PERLANE,FALLBACK4
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512-SLOW,AVX512F-SLOW,AVX512F-ONLY-SLOW,FALLBACK5
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512-FAST,AVX512F-FAST,AVX512F-ONLY-FAST,FALLBACK6
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512-SLOW,AVX512F-SLOW,AVX512DQ-SLOW,FALLBACK7
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512-FAST,AVX512F-FAST,AVX512DQ-FAST,FALLBACK8
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512bw | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512-SLOW,AVX512BW-SLOW,AVX512BW-ONLY-SLOW,FALLBACK9
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512bw,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512-FAST,AVX512BW-FAST,AVX512BW-ONLY-FAST,FALLBACK10
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq,+avx512bw | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512-SLOW,AVX512BW-SLOW,AVX512DQBW-SLOW,FALLBACK11
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq,+avx512bw,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512-FAST,AVX512BW-FAST,AVX512DQBW-FAST,FALLBACK12
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512F-ONLY,AVX512-SLOW,AVX512F-SLOW,AVX512F-ONLY-SLOW,FALLBACK5
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512F-ONLY,AVX512-FAST,AVX512F-FAST,AVX512F-ONLY-FAST,FALLBACK6
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512DQ-ONLY,AVX512-SLOW,AVX512F-SLOW,AVX512DQ-SLOW,FALLBACK7
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512DQ-ONLY,AVX512-FAST,AVX512F-FAST,AVX512DQ-FAST,FALLBACK8
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512bw | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512BW-ONLY,AVX512-SLOW,AVX512BW-SLOW,AVX512BW-ONLY-SLOW,FALLBACK9
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512bw,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512BW-ONLY,AVX512-FAST,AVX512BW-FAST,AVX512BW-ONLY-FAST,FALLBACK10
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq,+avx512bw | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512DQBW-ONLY,AVX512-SLOW,AVX512BW-SLOW,AVX512DQBW-SLOW,FALLBACK11
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq,+avx512bw,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512DQBW-ONLY,AVX512-FAST,AVX512BW-FAST,AVX512DQBW-FAST,FALLBACK12
 
 ; These patterns are produced by LoopVectorizer for interleaved stores.
 
@@ -6960,14 +6960,18 @@ define void @store_i32_stride5_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX512-FAST: {{.*}}
 ; AVX512-SLOW: {{.*}}
 ; AVX512BW-FAST: {{.*}}
+; AVX512BW-ONLY: {{.*}}
 ; AVX512BW-ONLY-FAST: {{.*}}
 ; AVX512BW-ONLY-SLOW: {{.*}}
 ; AVX512BW-SLOW: {{.*}}
 ; AVX512DQ-FAST: {{.*}}
+; AVX512DQ-ONLY: {{.*}}
 ; AVX512DQ-SLOW: {{.*}}
 ; AVX512DQBW-FAST: {{.*}}
+; AVX512DQBW-ONLY: {{.*}}
 ; AVX512DQBW-SLOW: {{.*}}
 ; AVX512F-FAST: {{.*}}
+; AVX512F-ONLY: {{.*}}
 ; AVX512F-ONLY-FAST: {{.*}}
 ; AVX512F-ONLY-SLOW: {{.*}}
 ; AVX512F-SLOW: {{.*}}

diff  --git a/llvm/test/CodeGen/X86/vector-interleaved-store-i32-stride-6.ll b/llvm/test/CodeGen/X86/vector-interleaved-store-i32-stride-6.ll
index 6e09eba1de1bc9d..299d74fdf5f1771 100644
--- a/llvm/test/CodeGen/X86/vector-interleaved-store-i32-stride-6.ll
+++ b/llvm/test/CodeGen/X86/vector-interleaved-store-i32-stride-6.ll
@@ -4,14 +4,14 @@
 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx2 | FileCheck %s --check-prefixes=AVX,AVX1,AVX2,AVX2-ONLY,AVX2-SLOW,FALLBACK2
 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx2,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX1,AVX2,AVX2-ONLY,AVX2-FAST,FALLBACK3
 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx2,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX1,AVX2,AVX2-ONLY,AVX2-FAST-PERLANE,FALLBACK4
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512-SLOW,AVX512F-SLOW,AVX512F-ONLY-SLOW,FALLBACK5
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512-FAST,AVX512F-FAST,AVX512F-ONLY-FAST,FALLBACK6
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512-SLOW,AVX512F-SLOW,AVX512DQ-SLOW,FALLBACK7
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512-FAST,AVX512F-FAST,AVX512DQ-FAST,FALLBACK8
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512bw | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512-SLOW,AVX512BW-SLOW,AVX512BW-ONLY-SLOW,FALLBACK9
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512bw,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512-FAST,AVX512BW-FAST,AVX512BW-ONLY-FAST,FALLBACK10
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq,+avx512bw | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512-SLOW,AVX512BW-SLOW,AVX512DQBW-SLOW,FALLBACK11
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq,+avx512bw,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512-FAST,AVX512BW-FAST,AVX512DQBW-FAST,FALLBACK12
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512F-ONLY,AVX512-SLOW,AVX512F-SLOW,AVX512F-ONLY-SLOW,FALLBACK5
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512F-ONLY,AVX512-FAST,AVX512F-FAST,AVX512F-ONLY-FAST,FALLBACK6
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512DQ-ONLY,AVX512-SLOW,AVX512F-SLOW,AVX512DQ-SLOW,FALLBACK7
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512DQ-ONLY,AVX512-FAST,AVX512F-FAST,AVX512DQ-FAST,FALLBACK8
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512bw | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512BW-ONLY,AVX512-SLOW,AVX512BW-SLOW,AVX512BW-ONLY-SLOW,FALLBACK9
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512bw,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512BW-ONLY,AVX512-FAST,AVX512BW-FAST,AVX512BW-ONLY-FAST,FALLBACK10
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq,+avx512bw | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512DQBW-ONLY,AVX512-SLOW,AVX512BW-SLOW,AVX512DQBW-SLOW,FALLBACK11
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq,+avx512bw,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512DQBW-ONLY,AVX512-FAST,AVX512BW-FAST,AVX512DQBW-FAST,FALLBACK12
 
 ; These patterns are produced by LoopVectorizer for interleaved stores.
 
@@ -9542,13 +9542,17 @@ define void @store_i32_stride6_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX512-FAST: {{.*}}
 ; AVX512-SLOW: {{.*}}
 ; AVX512BW: {{.*}}
+; AVX512BW-ONLY: {{.*}}
 ; AVX512BW-ONLY-FAST: {{.*}}
 ; AVX512BW-ONLY-SLOW: {{.*}}
 ; AVX512DQ-FAST: {{.*}}
+; AVX512DQ-ONLY: {{.*}}
 ; AVX512DQ-SLOW: {{.*}}
 ; AVX512DQBW-FAST: {{.*}}
+; AVX512DQBW-ONLY: {{.*}}
 ; AVX512DQBW-SLOW: {{.*}}
 ; AVX512F: {{.*}}
+; AVX512F-ONLY: {{.*}}
 ; AVX512F-ONLY-FAST: {{.*}}
 ; AVX512F-ONLY-SLOW: {{.*}}
 ; FALLBACK0: {{.*}}

diff  --git a/llvm/test/CodeGen/X86/vector-interleaved-store-i32-stride-7.ll b/llvm/test/CodeGen/X86/vector-interleaved-store-i32-stride-7.ll
index 33c9d9e182c34c5..d24abc59341a5cb 100644
--- a/llvm/test/CodeGen/X86/vector-interleaved-store-i32-stride-7.ll
+++ b/llvm/test/CodeGen/X86/vector-interleaved-store-i32-stride-7.ll
@@ -4,14 +4,14 @@
 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx2 | FileCheck %s --check-prefixes=AVX,AVX1,AVX2,AVX2-ONLY,AVX2-SLOW,FALLBACK2
 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx2,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX1,AVX2,AVX2-ONLY,AVX2-FAST,FALLBACK3
 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx2,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX1,AVX2,AVX2-ONLY,AVX2-FAST-PERLANE,FALLBACK4
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512-SLOW,AVX512F-SLOW,AVX512F-ONLY-SLOW,FALLBACK5
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512-FAST,AVX512F-FAST,AVX512F-ONLY-FAST,FALLBACK6
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512-SLOW,AVX512F-SLOW,AVX512DQ-SLOW,FALLBACK7
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512-FAST,AVX512F-FAST,AVX512DQ-FAST,FALLBACK8
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512bw | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512-SLOW,AVX512BW-SLOW,AVX512BW-ONLY-SLOW,FALLBACK9
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512bw,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512-FAST,AVX512BW-FAST,AVX512BW-ONLY-FAST,FALLBACK10
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq,+avx512bw | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512-SLOW,AVX512BW-SLOW,AVX512DQBW-SLOW,FALLBACK11
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq,+avx512bw,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512-FAST,AVX512BW-FAST,AVX512DQBW-FAST,FALLBACK12
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512F-ONLY,AVX512-SLOW,AVX512F-SLOW,AVX512F-ONLY-SLOW,FALLBACK5
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512F-ONLY,AVX512-FAST,AVX512F-FAST,AVX512F-ONLY-FAST,FALLBACK6
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512DQ-ONLY,AVX512-SLOW,AVX512F-SLOW,AVX512DQ-SLOW,FALLBACK7
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512DQ-ONLY,AVX512-FAST,AVX512F-FAST,AVX512DQ-FAST,FALLBACK8
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512bw | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512BW-ONLY,AVX512-SLOW,AVX512BW-SLOW,AVX512BW-ONLY-SLOW,FALLBACK9
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512bw,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512BW-ONLY,AVX512-FAST,AVX512BW-FAST,AVX512BW-ONLY-FAST,FALLBACK10
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq,+avx512bw | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512DQBW-ONLY,AVX512-SLOW,AVX512BW-SLOW,AVX512DQBW-SLOW,FALLBACK11
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq,+avx512bw,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512DQBW-ONLY,AVX512-FAST,AVX512BW-FAST,AVX512DQBW-FAST,FALLBACK12
 
 ; These patterns are produced by LoopVectorizer for interleaved stores.
 
@@ -12447,14 +12447,18 @@ define void @store_i32_stride7_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX2: {{.*}}
 ; AVX2-ONLY: {{.*}}
 ; AVX512BW-FAST: {{.*}}
+; AVX512BW-ONLY: {{.*}}
 ; AVX512BW-ONLY-FAST: {{.*}}
 ; AVX512BW-ONLY-SLOW: {{.*}}
 ; AVX512BW-SLOW: {{.*}}
 ; AVX512DQ-FAST: {{.*}}
+; AVX512DQ-ONLY: {{.*}}
 ; AVX512DQ-SLOW: {{.*}}
 ; AVX512DQBW-FAST: {{.*}}
+; AVX512DQBW-ONLY: {{.*}}
 ; AVX512DQBW-SLOW: {{.*}}
 ; AVX512F-FAST: {{.*}}
+; AVX512F-ONLY: {{.*}}
 ; AVX512F-ONLY-FAST: {{.*}}
 ; AVX512F-ONLY-SLOW: {{.*}}
 ; AVX512F-SLOW: {{.*}}

diff  --git a/llvm/test/CodeGen/X86/vector-interleaved-store-i32-stride-8.ll b/llvm/test/CodeGen/X86/vector-interleaved-store-i32-stride-8.ll
index 35d12ffc3a8d306..85ffcdf71e3cbf3 100644
--- a/llvm/test/CodeGen/X86/vector-interleaved-store-i32-stride-8.ll
+++ b/llvm/test/CodeGen/X86/vector-interleaved-store-i32-stride-8.ll
@@ -4,14 +4,14 @@
 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx2 | FileCheck %s --check-prefixes=AVX,AVX1,AVX2,AVX2-ONLY,AVX2-SLOW,FALLBACK2
 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx2,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX1,AVX2,AVX2-ONLY,AVX2-FAST,FALLBACK3
 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx2,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX1,AVX2,AVX2-ONLY,AVX2-FAST-PERLANE,FALLBACK4
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512-SLOW,AVX512F-SLOW,AVX512F-ONLY-SLOW,FALLBACK5
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512-FAST,AVX512F-FAST,AVX512F-ONLY-FAST,FALLBACK6
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512-SLOW,AVX512F-SLOW,AVX512DQ-SLOW,FALLBACK7
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512-FAST,AVX512F-FAST,AVX512DQ-FAST,FALLBACK8
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512bw | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512-SLOW,AVX512BW-SLOW,AVX512BW-ONLY-SLOW,FALLBACK9
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512bw,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512-FAST,AVX512BW-FAST,AVX512BW-ONLY-FAST,FALLBACK10
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq,+avx512bw | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512-SLOW,AVX512BW-SLOW,AVX512DQBW-SLOW,FALLBACK11
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq,+avx512bw,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512-FAST,AVX512BW-FAST,AVX512DQBW-FAST,FALLBACK12
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512F-ONLY,AVX512-SLOW,AVX512F-SLOW,AVX512F-ONLY-SLOW,FALLBACK5
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512F-ONLY,AVX512-FAST,AVX512F-FAST,AVX512F-ONLY-FAST,FALLBACK6
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512DQ-ONLY,AVX512-SLOW,AVX512F-SLOW,AVX512DQ-SLOW,FALLBACK7
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512DQ-ONLY,AVX512-FAST,AVX512F-FAST,AVX512DQ-FAST,FALLBACK8
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512bw | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512BW-ONLY,AVX512-SLOW,AVX512BW-SLOW,AVX512BW-ONLY-SLOW,FALLBACK9
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512bw,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512BW-ONLY,AVX512-FAST,AVX512BW-FAST,AVX512BW-ONLY-FAST,FALLBACK10
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq,+avx512bw | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512DQBW-ONLY,AVX512-SLOW,AVX512BW-SLOW,AVX512DQBW-SLOW,FALLBACK11
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq,+avx512bw,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512DQBW-ONLY,AVX512-FAST,AVX512BW-FAST,AVX512DQBW-FAST,FALLBACK12
 
 ; These patterns are produced by LoopVectorizer for interleaved stores.
 
@@ -8102,14 +8102,18 @@ define void @store_i32_stride8_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX512-FAST: {{.*}}
 ; AVX512-SLOW: {{.*}}
 ; AVX512BW-FAST: {{.*}}
+; AVX512BW-ONLY: {{.*}}
 ; AVX512BW-ONLY-FAST: {{.*}}
 ; AVX512BW-ONLY-SLOW: {{.*}}
 ; AVX512BW-SLOW: {{.*}}
 ; AVX512DQ-FAST: {{.*}}
+; AVX512DQ-ONLY: {{.*}}
 ; AVX512DQ-SLOW: {{.*}}
 ; AVX512DQBW-FAST: {{.*}}
+; AVX512DQBW-ONLY: {{.*}}
 ; AVX512DQBW-SLOW: {{.*}}
 ; AVX512F-FAST: {{.*}}
+; AVX512F-ONLY: {{.*}}
 ; AVX512F-ONLY-FAST: {{.*}}
 ; AVX512F-ONLY-SLOW: {{.*}}
 ; AVX512F-SLOW: {{.*}}

diff  --git a/llvm/test/CodeGen/X86/vector-interleaved-store-i64-stride-2.ll b/llvm/test/CodeGen/X86/vector-interleaved-store-i64-stride-2.ll
index 57e030ff2a1cee0..488753b727587ee 100644
--- a/llvm/test/CodeGen/X86/vector-interleaved-store-i64-stride-2.ll
+++ b/llvm/test/CodeGen/X86/vector-interleaved-store-i64-stride-2.ll
@@ -4,14 +4,14 @@
 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx2 | FileCheck %s --check-prefixes=AVX,AVX1,AVX2,AVX2-ONLY,AVX2-SLOW,FALLBACK2
 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx2,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX1,AVX2,AVX2-ONLY,AVX2-FAST,FALLBACK3
 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx2,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX1,AVX2,AVX2-ONLY,AVX2-FAST-PERLANE,FALLBACK4
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512-SLOW,AVX512F-SLOW,AVX512F-ONLY-SLOW,FALLBACK5
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512-FAST,AVX512F-FAST,AVX512F-ONLY-FAST,FALLBACK6
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512-SLOW,AVX512F-SLOW,AVX512DQ-SLOW,FALLBACK7
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512-FAST,AVX512F-FAST,AVX512DQ-FAST,FALLBACK8
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512bw | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512-SLOW,AVX512BW-SLOW,AVX512BW-ONLY-SLOW,FALLBACK9
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512bw,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512-FAST,AVX512BW-FAST,AVX512BW-ONLY-FAST,FALLBACK10
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq,+avx512bw | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512-SLOW,AVX512BW-SLOW,AVX512DQBW-SLOW,FALLBACK11
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq,+avx512bw,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512-FAST,AVX512BW-FAST,AVX512DQBW-FAST,FALLBACK12
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512F-ONLY,AVX512-SLOW,AVX512F-SLOW,AVX512F-ONLY-SLOW,FALLBACK5
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512F-ONLY,AVX512-FAST,AVX512F-FAST,AVX512F-ONLY-FAST,FALLBACK6
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512DQ-ONLY,AVX512-SLOW,AVX512F-SLOW,AVX512DQ-SLOW,FALLBACK7
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512DQ-ONLY,AVX512-FAST,AVX512F-FAST,AVX512DQ-FAST,FALLBACK8
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512bw | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512BW-ONLY,AVX512-SLOW,AVX512BW-SLOW,AVX512BW-ONLY-SLOW,FALLBACK9
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512bw,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512BW-ONLY,AVX512-FAST,AVX512BW-FAST,AVX512BW-ONLY-FAST,FALLBACK10
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq,+avx512bw | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512DQBW-ONLY,AVX512-SLOW,AVX512BW-SLOW,AVX512DQBW-SLOW,FALLBACK11
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq,+avx512bw,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512DQBW-ONLY,AVX512-FAST,AVX512BW-FAST,AVX512DQBW-FAST,FALLBACK12
 
 ; These patterns are produced by LoopVectorizer for interleaved stores.
 
@@ -1582,15 +1582,19 @@ define void @store_i64_stride2_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %out.v
 ; AVX512-SLOW: {{.*}}
 ; AVX512BW: {{.*}}
 ; AVX512BW-FAST: {{.*}}
+; AVX512BW-ONLY: {{.*}}
 ; AVX512BW-ONLY-FAST: {{.*}}
 ; AVX512BW-ONLY-SLOW: {{.*}}
 ; AVX512BW-SLOW: {{.*}}
 ; AVX512DQ-FAST: {{.*}}
+; AVX512DQ-ONLY: {{.*}}
 ; AVX512DQ-SLOW: {{.*}}
 ; AVX512DQBW-FAST: {{.*}}
+; AVX512DQBW-ONLY: {{.*}}
 ; AVX512DQBW-SLOW: {{.*}}
 ; AVX512F: {{.*}}
 ; AVX512F-FAST: {{.*}}
+; AVX512F-ONLY: {{.*}}
 ; AVX512F-ONLY-FAST: {{.*}}
 ; AVX512F-ONLY-SLOW: {{.*}}
 ; AVX512F-SLOW: {{.*}}

diff  --git a/llvm/test/CodeGen/X86/vector-interleaved-store-i64-stride-3.ll b/llvm/test/CodeGen/X86/vector-interleaved-store-i64-stride-3.ll
index 55c8cfb9b49566b..3ffcf6d6d6f5ad2 100644
--- a/llvm/test/CodeGen/X86/vector-interleaved-store-i64-stride-3.ll
+++ b/llvm/test/CodeGen/X86/vector-interleaved-store-i64-stride-3.ll
@@ -4,14 +4,14 @@
 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx2 | FileCheck %s --check-prefixes=AVX,AVX1,AVX2,AVX2-ONLY,AVX2-SLOW,FALLBACK2
 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx2,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX1,AVX2,AVX2-ONLY,AVX2-FAST,FALLBACK3
 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx2,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX1,AVX2,AVX2-ONLY,AVX2-FAST-PERLANE,FALLBACK4
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512-SLOW,AVX512F-SLOW,AVX512F-ONLY-SLOW,FALLBACK5
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512-FAST,AVX512F-FAST,AVX512F-ONLY-FAST,FALLBACK6
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512-SLOW,AVX512F-SLOW,AVX512DQ-SLOW,FALLBACK7
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512-FAST,AVX512F-FAST,AVX512DQ-FAST,FALLBACK8
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512bw | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512-SLOW,AVX512BW-SLOW,AVX512BW-ONLY-SLOW,FALLBACK9
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512bw,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512-FAST,AVX512BW-FAST,AVX512BW-ONLY-FAST,FALLBACK10
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq,+avx512bw | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512-SLOW,AVX512BW-SLOW,AVX512DQBW-SLOW,FALLBACK11
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq,+avx512bw,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512-FAST,AVX512BW-FAST,AVX512DQBW-FAST,FALLBACK12
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512F-ONLY,AVX512-SLOW,AVX512F-SLOW,AVX512F-ONLY-SLOW,FALLBACK5
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512F-ONLY,AVX512-FAST,AVX512F-FAST,AVX512F-ONLY-FAST,FALLBACK6
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512DQ-ONLY,AVX512-SLOW,AVX512F-SLOW,AVX512DQ-SLOW,FALLBACK7
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512DQ-ONLY,AVX512-FAST,AVX512F-FAST,AVX512DQ-FAST,FALLBACK8
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512bw | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512BW-ONLY,AVX512-SLOW,AVX512BW-SLOW,AVX512BW-ONLY-SLOW,FALLBACK9
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512bw,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512BW-ONLY,AVX512-FAST,AVX512BW-FAST,AVX512BW-ONLY-FAST,FALLBACK10
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq,+avx512bw | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512DQBW-ONLY,AVX512-SLOW,AVX512BW-SLOW,AVX512DQBW-SLOW,FALLBACK11
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq,+avx512bw,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512DQBW-ONLY,AVX512-FAST,AVX512BW-FAST,AVX512DQBW-FAST,FALLBACK12
 
 ; These patterns are produced by LoopVectorizer for interleaved stores.
 
@@ -2704,15 +2704,19 @@ define void @store_i64_stride3_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX512-SLOW: {{.*}}
 ; AVX512BW: {{.*}}
 ; AVX512BW-FAST: {{.*}}
+; AVX512BW-ONLY: {{.*}}
 ; AVX512BW-ONLY-FAST: {{.*}}
 ; AVX512BW-ONLY-SLOW: {{.*}}
 ; AVX512BW-SLOW: {{.*}}
 ; AVX512DQ-FAST: {{.*}}
+; AVX512DQ-ONLY: {{.*}}
 ; AVX512DQ-SLOW: {{.*}}
 ; AVX512DQBW-FAST: {{.*}}
+; AVX512DQBW-ONLY: {{.*}}
 ; AVX512DQBW-SLOW: {{.*}}
 ; AVX512F: {{.*}}
 ; AVX512F-FAST: {{.*}}
+; AVX512F-ONLY: {{.*}}
 ; AVX512F-ONLY-FAST: {{.*}}
 ; AVX512F-ONLY-SLOW: {{.*}}
 ; AVX512F-SLOW: {{.*}}

diff  --git a/llvm/test/CodeGen/X86/vector-interleaved-store-i64-stride-4.ll b/llvm/test/CodeGen/X86/vector-interleaved-store-i64-stride-4.ll
index 80ae5f93ea66d8c..5d7df755956295e 100644
--- a/llvm/test/CodeGen/X86/vector-interleaved-store-i64-stride-4.ll
+++ b/llvm/test/CodeGen/X86/vector-interleaved-store-i64-stride-4.ll
@@ -4,14 +4,14 @@
 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx2 | FileCheck %s --check-prefixes=AVX,AVX1,AVX2,AVX2-ONLY,AVX2-SLOW,FALLBACK2
 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx2,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX1,AVX2,AVX2-ONLY,AVX2-FAST,FALLBACK3
 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx2,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX1,AVX2,AVX2-ONLY,AVX2-FAST-PERLANE,FALLBACK4
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512-SLOW,AVX512F-SLOW,AVX512F-ONLY-SLOW,FALLBACK5
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512-FAST,AVX512F-FAST,AVX512F-ONLY-FAST,FALLBACK6
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512-SLOW,AVX512F-SLOW,AVX512DQ-SLOW,FALLBACK7
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512-FAST,AVX512F-FAST,AVX512DQ-FAST,FALLBACK8
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512bw | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512-SLOW,AVX512BW-SLOW,AVX512BW-ONLY-SLOW,FALLBACK9
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512bw,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512-FAST,AVX512BW-FAST,AVX512BW-ONLY-FAST,FALLBACK10
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq,+avx512bw | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512-SLOW,AVX512BW-SLOW,AVX512DQBW-SLOW,FALLBACK11
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq,+avx512bw,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512-FAST,AVX512BW-FAST,AVX512DQBW-FAST,FALLBACK12
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512F-ONLY,AVX512-SLOW,AVX512F-SLOW,AVX512F-ONLY-SLOW,FALLBACK5
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512F-ONLY,AVX512-FAST,AVX512F-FAST,AVX512F-ONLY-FAST,FALLBACK6
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512DQ-ONLY,AVX512-SLOW,AVX512F-SLOW,AVX512DQ-SLOW,FALLBACK7
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512DQ-ONLY,AVX512-FAST,AVX512F-FAST,AVX512DQ-FAST,FALLBACK8
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512bw | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512BW-ONLY,AVX512-SLOW,AVX512BW-SLOW,AVX512BW-ONLY-SLOW,FALLBACK9
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512bw,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512BW-ONLY,AVX512-FAST,AVX512BW-FAST,AVX512BW-ONLY-FAST,FALLBACK10
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq,+avx512bw | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512DQBW-ONLY,AVX512-SLOW,AVX512BW-SLOW,AVX512DQBW-SLOW,FALLBACK11
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq,+avx512bw,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512DQBW-ONLY,AVX512-FAST,AVX512BW-FAST,AVX512DQBW-FAST,FALLBACK12
 
 ; These patterns are produced by LoopVectorizer for interleaved stores.
 
@@ -4264,14 +4264,18 @@ define void @store_i64_stride4_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX2-FAST-PERLANE: {{.*}}
 ; AVX2-SLOW: {{.*}}
 ; AVX512BW-FAST: {{.*}}
+; AVX512BW-ONLY: {{.*}}
 ; AVX512BW-ONLY-FAST: {{.*}}
 ; AVX512BW-ONLY-SLOW: {{.*}}
 ; AVX512BW-SLOW: {{.*}}
 ; AVX512DQ-FAST: {{.*}}
+; AVX512DQ-ONLY: {{.*}}
 ; AVX512DQ-SLOW: {{.*}}
 ; AVX512DQBW-FAST: {{.*}}
+; AVX512DQBW-ONLY: {{.*}}
 ; AVX512DQBW-SLOW: {{.*}}
 ; AVX512F-FAST: {{.*}}
+; AVX512F-ONLY: {{.*}}
 ; AVX512F-ONLY-FAST: {{.*}}
 ; AVX512F-ONLY-SLOW: {{.*}}
 ; AVX512F-SLOW: {{.*}}

diff  --git a/llvm/test/CodeGen/X86/vector-interleaved-store-i64-stride-5.ll b/llvm/test/CodeGen/X86/vector-interleaved-store-i64-stride-5.ll
index fcc1958a46ed497..99da0be38c2b49b 100644
--- a/llvm/test/CodeGen/X86/vector-interleaved-store-i64-stride-5.ll
+++ b/llvm/test/CodeGen/X86/vector-interleaved-store-i64-stride-5.ll
@@ -4,14 +4,14 @@
 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx2 | FileCheck %s --check-prefixes=AVX,AVX1,AVX2,AVX2-ONLY,AVX2-SLOW,FALLBACK2
 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx2,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX1,AVX2,AVX2-ONLY,AVX2-FAST,FALLBACK3
 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx2,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX1,AVX2,AVX2-ONLY,AVX2-FAST-PERLANE,FALLBACK4
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512-SLOW,AVX512F-SLOW,AVX512F-ONLY-SLOW,FALLBACK5
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512-FAST,AVX512F-FAST,AVX512F-ONLY-FAST,FALLBACK6
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512-SLOW,AVX512F-SLOW,AVX512DQ-SLOW,FALLBACK7
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512-FAST,AVX512F-FAST,AVX512DQ-FAST,FALLBACK8
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512bw | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512-SLOW,AVX512BW-SLOW,AVX512BW-ONLY-SLOW,FALLBACK9
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512bw,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512-FAST,AVX512BW-FAST,AVX512BW-ONLY-FAST,FALLBACK10
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq,+avx512bw | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512-SLOW,AVX512BW-SLOW,AVX512DQBW-SLOW,FALLBACK11
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq,+avx512bw,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512-FAST,AVX512BW-FAST,AVX512DQBW-FAST,FALLBACK12
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512F-ONLY,AVX512-SLOW,AVX512F-SLOW,AVX512F-ONLY-SLOW,FALLBACK5
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512F-ONLY,AVX512-FAST,AVX512F-FAST,AVX512F-ONLY-FAST,FALLBACK6
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512DQ-ONLY,AVX512-SLOW,AVX512F-SLOW,AVX512DQ-SLOW,FALLBACK7
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512DQ-ONLY,AVX512-FAST,AVX512F-FAST,AVX512DQ-FAST,FALLBACK8
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512bw | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512BW-ONLY,AVX512-SLOW,AVX512BW-SLOW,AVX512BW-ONLY-SLOW,FALLBACK9
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512bw,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512BW-ONLY,AVX512-FAST,AVX512BW-FAST,AVX512BW-ONLY-FAST,FALLBACK10
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq,+avx512bw | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512DQBW-ONLY,AVX512-SLOW,AVX512BW-SLOW,AVX512DQBW-SLOW,FALLBACK11
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq,+avx512bw,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512DQBW-ONLY,AVX512-FAST,AVX512BW-FAST,AVX512DQBW-FAST,FALLBACK12
 
 ; These patterns are produced by LoopVectorizer for interleaved stores.
 
@@ -6187,14 +6187,18 @@ define void @store_i64_stride5_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX512-FAST: {{.*}}
 ; AVX512-SLOW: {{.*}}
 ; AVX512BW-FAST: {{.*}}
+; AVX512BW-ONLY: {{.*}}
 ; AVX512BW-ONLY-FAST: {{.*}}
 ; AVX512BW-ONLY-SLOW: {{.*}}
 ; AVX512BW-SLOW: {{.*}}
 ; AVX512DQ-FAST: {{.*}}
+; AVX512DQ-ONLY: {{.*}}
 ; AVX512DQ-SLOW: {{.*}}
 ; AVX512DQBW-FAST: {{.*}}
+; AVX512DQBW-ONLY: {{.*}}
 ; AVX512DQBW-SLOW: {{.*}}
 ; AVX512F-FAST: {{.*}}
+; AVX512F-ONLY: {{.*}}
 ; AVX512F-ONLY-FAST: {{.*}}
 ; AVX512F-ONLY-SLOW: {{.*}}
 ; AVX512F-SLOW: {{.*}}

diff  --git a/llvm/test/CodeGen/X86/vector-interleaved-store-i64-stride-6.ll b/llvm/test/CodeGen/X86/vector-interleaved-store-i64-stride-6.ll
index 12c18c325e5dc25..e68da022c1863f0 100644
--- a/llvm/test/CodeGen/X86/vector-interleaved-store-i64-stride-6.ll
+++ b/llvm/test/CodeGen/X86/vector-interleaved-store-i64-stride-6.ll
@@ -4,14 +4,14 @@
 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx2 | FileCheck %s --check-prefixes=AVX,AVX1,AVX2,AVX2-ONLY,AVX2-SLOW,FALLBACK2
 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx2,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX1,AVX2,AVX2-ONLY,AVX2-FAST,FALLBACK3
 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx2,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX1,AVX2,AVX2-ONLY,AVX2-FAST-PERLANE,FALLBACK4
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512-SLOW,AVX512F-SLOW,AVX512F-ONLY-SLOW,FALLBACK5
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512-FAST,AVX512F-FAST,AVX512F-ONLY-FAST,FALLBACK6
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512-SLOW,AVX512F-SLOW,AVX512DQ-SLOW,FALLBACK7
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512-FAST,AVX512F-FAST,AVX512DQ-FAST,FALLBACK8
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512bw | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512-SLOW,AVX512BW-SLOW,AVX512BW-ONLY-SLOW,FALLBACK9
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512bw,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512-FAST,AVX512BW-FAST,AVX512BW-ONLY-FAST,FALLBACK10
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq,+avx512bw | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512-SLOW,AVX512BW-SLOW,AVX512DQBW-SLOW,FALLBACK11
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq,+avx512bw,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512-FAST,AVX512BW-FAST,AVX512DQBW-FAST,FALLBACK12
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512F-ONLY,AVX512-SLOW,AVX512F-SLOW,AVX512F-ONLY-SLOW,FALLBACK5
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512F-ONLY,AVX512-FAST,AVX512F-FAST,AVX512F-ONLY-FAST,FALLBACK6
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512DQ-ONLY,AVX512-SLOW,AVX512F-SLOW,AVX512DQ-SLOW,FALLBACK7
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512DQ-ONLY,AVX512-FAST,AVX512F-FAST,AVX512DQ-FAST,FALLBACK8
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512bw | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512BW-ONLY,AVX512-SLOW,AVX512BW-SLOW,AVX512BW-ONLY-SLOW,FALLBACK9
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512bw,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512BW-ONLY,AVX512-FAST,AVX512BW-FAST,AVX512BW-ONLY-FAST,FALLBACK10
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq,+avx512bw | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512DQBW-ONLY,AVX512-SLOW,AVX512BW-SLOW,AVX512DQBW-SLOW,FALLBACK11
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq,+avx512bw,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512DQBW-ONLY,AVX512-FAST,AVX512BW-FAST,AVX512DQBW-FAST,FALLBACK12
 
 ; These patterns are produced by LoopVectorizer for interleaved stores.
 
@@ -1442,1125 +1442,565 @@ define void @store_i64_stride6_vf16(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX2-ONLY-NEXT:    vzeroupper
 ; AVX2-ONLY-NEXT:    retq
 ;
-; AVX512F-ONLY-SLOW-LABEL: store_i64_stride6_vf16:
-; AVX512F-ONLY-SLOW:       # %bb.0:
-; AVX512F-ONLY-SLOW-NEXT:    movq {{[0-9]+}}(%rsp), %rax
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 64(%rdi), %zmm13
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 (%rdi), %zmm5
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 (%rsi), %zmm6
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 64(%rsi), %zmm14
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 64(%rdx), %zmm7
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 (%rdx), %zmm2
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 (%rcx), %zmm4
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 64(%rcx), %zmm11
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 (%r8), %zmm3
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 64(%r8), %zmm8
-; AVX512F-ONLY-SLOW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm0 = [4,12,5,13,4,12,5,13]
-; AVX512F-ONLY-SLOW-NEXT:    # zmm0 = mem[0,1,2,3,0,1,2,3]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm5, %zmm1
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm6, %zmm0, %zmm1
-; AVX512F-ONLY-SLOW-NEXT:    vbroadcasti128 {{.*#+}} ymm9 = [4,12,4,12]
-; AVX512F-ONLY-SLOW-NEXT:    # ymm9 = mem[0,1,0,1]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm2, %zmm10
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm4, %zmm9, %zmm10
-; AVX512F-ONLY-SLOW-NEXT:    movb $12, %r10b
-; AVX512F-ONLY-SLOW-NEXT:    kmovw %r10d, %k1
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm10, %zmm1 {%k1}
-; AVX512F-ONLY-SLOW-NEXT:    movb $16, %r10b
-; AVX512F-ONLY-SLOW-NEXT:    kmovw %r10d, %k2
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm3, %zmm1 {%k2}
-; AVX512F-ONLY-SLOW-NEXT:    vpermi2q %zmm14, %zmm13, %zmm0
-; AVX512F-ONLY-SLOW-NEXT:    vpermi2q %zmm11, %zmm7, %zmm9
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm9, %zmm0 {%k1}
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm8, %zmm0 {%k2}
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 (%r9), %zmm10
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 64(%r9), %zmm16
-; AVX512F-ONLY-SLOW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm18 = [6,14,6,14,6,14,6,14]
-; AVX512F-ONLY-SLOW-NEXT:    # zmm18 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm13, %zmm15
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm14, %zmm18, %zmm15
-; AVX512F-ONLY-SLOW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm12 = [5,13,6,14,5,13,6,14]
-; AVX512F-ONLY-SLOW-NEXT:    # zmm12 = mem[0,1,2,3,0,1,2,3]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm7, %zmm9
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm11, %zmm12, %zmm9
-; AVX512F-ONLY-SLOW-NEXT:    movb $48, %r9b
-; AVX512F-ONLY-SLOW-NEXT:    kmovw %r9d, %k2
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm15, %zmm9 {%k2}
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm19 = <0,1,13,u,4,5,6,7>
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm8, %zmm19, %zmm9
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm20 = [0,1,2,13,4,5,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm16, %zmm20, %zmm9
-; AVX512F-ONLY-SLOW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm21 = [2,10,2,10,2,10,2,10]
-; AVX512F-ONLY-SLOW-NEXT:    # zmm21 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm13, %zmm22
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm14, %zmm21, %zmm22
-; AVX512F-ONLY-SLOW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm17 = [1,9,2,10,1,9,2,10]
-; AVX512F-ONLY-SLOW-NEXT:    # zmm17 = mem[0,1,2,3,0,1,2,3]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm7, %zmm15
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm11, %zmm17, %zmm15
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm22, %zmm15 {%k2}
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm22 = <0,1,9,u,4,5,6,7>
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm8, %zmm22, %zmm15
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm23 = [0,1,2,9,4,5,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm16, %zmm23, %zmm15
-; AVX512F-ONLY-SLOW-NEXT:    vpermi2q %zmm6, %zmm5, %zmm18
-; AVX512F-ONLY-SLOW-NEXT:    vpermi2q %zmm4, %zmm2, %zmm12
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm18, %zmm12 {%k2}
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm3, %zmm19, %zmm12
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm10, %zmm20, %zmm12
-; AVX512F-ONLY-SLOW-NEXT:    vpermi2q %zmm6, %zmm5, %zmm21
-; AVX512F-ONLY-SLOW-NEXT:    vpermi2q %zmm4, %zmm2, %zmm17
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm21, %zmm17 {%k2}
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm3, %zmm22, %zmm17
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm10, %zmm23, %zmm17
-; AVX512F-ONLY-SLOW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm19 = [0,8,1,9,0,8,1,9]
-; AVX512F-ONLY-SLOW-NEXT:    # zmm19 = mem[0,1,2,3,0,1,2,3]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm5, %zmm18
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm6, %zmm19, %zmm18
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 (%rdx), %xmm20
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 64(%rdx), %xmm21
-; AVX512F-ONLY-SLOW-NEXT:    vpunpcklqdq {{.*#+}} xmm20 = xmm20[0],mem[0]
-; AVX512F-ONLY-SLOW-NEXT:    vinserti32x4 $1, %xmm20, %ymm0, %ymm20
-; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $0, %ymm20, %zmm0, %zmm18 {%k1}
-; AVX512F-ONLY-SLOW-NEXT:    vinserti32x4 $2, (%r8), %zmm18, %zmm18
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm20 = [0,1,2,3,4,8,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm10, %zmm20, %zmm18
-; AVX512F-ONLY-SLOW-NEXT:    vpermi2q %zmm14, %zmm13, %zmm19
-; AVX512F-ONLY-SLOW-NEXT:    vpunpcklqdq {{.*#+}} xmm21 = xmm21[0],mem[0]
-; AVX512F-ONLY-SLOW-NEXT:    vinserti32x4 $1, %xmm21, %ymm0, %ymm21
-; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $0, %ymm21, %zmm0, %zmm19 {%k1}
-; AVX512F-ONLY-SLOW-NEXT:    vinserti32x4 $2, 64(%r8), %zmm19, %zmm19
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm16, %zmm20, %zmm19
-; AVX512F-ONLY-SLOW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm20 = [7,15,7,15,7,15,7,15]
-; AVX512F-ONLY-SLOW-NEXT:    # zmm20 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm7, %zmm21
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm11, %zmm20, %zmm21
-; AVX512F-ONLY-SLOW-NEXT:    vbroadcasti32x4 {{.*#+}} ymm22 = [7,15,7,15]
-; AVX512F-ONLY-SLOW-NEXT:    # ymm22 = mem[0,1,2,3,0,1,2,3]
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm14, %zmm22, %zmm13
-; AVX512F-ONLY-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm13 = zmm13[0,1,2,3],zmm21[4,5,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm14 = <14,u,2,3,4,5,15,u>
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm8, %zmm14, %zmm13
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm21 = [0,14,2,3,4,5,6,15]
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm16, %zmm21, %zmm13
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm23 = [0,1,2,3,4,12,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm16, %zmm23, %zmm0
-; AVX512F-ONLY-SLOW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm24 = [3,11,3,11,3,11,3,11]
-; AVX512F-ONLY-SLOW-NEXT:    # zmm24 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm11, %zmm24, %zmm7
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa (%rdi), %ymm11
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 64(%rdi), %ymm25
-; AVX512F-ONLY-SLOW-NEXT:    vpunpckhqdq {{.*#+}} ymm25 = ymm25[1],mem[1],ymm25[3],mem[3]
-; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $0, %ymm25, %zmm7, %zmm7
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm25 = <10,u,2,3,4,5,11,u>
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm8, %zmm25, %zmm7
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm8 = [0,10,2,3,4,5,6,11]
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm16, %zmm8, %zmm7
-; AVX512F-ONLY-SLOW-NEXT:    vpermi2q %zmm4, %zmm2, %zmm20
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm6, %zmm22, %zmm5
-; AVX512F-ONLY-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm5 = zmm5[0,1,2,3],zmm20[4,5,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm3, %zmm14, %zmm5
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm10, %zmm21, %zmm5
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm10, %zmm23, %zmm1
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm4, %zmm24, %zmm2
-; AVX512F-ONLY-SLOW-NEXT:    vpunpckhqdq {{.*#+}} ymm4 = ymm11[1],mem[1],ymm11[3],mem[3]
-; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $0, %ymm4, %zmm2, %zmm2
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm3, %zmm25, %zmm2
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm10, %zmm8, %zmm2
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm17, 64(%rax)
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm2, 128(%rax)
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm1, 192(%rax)
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm12, 256(%rax)
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm5, 320(%rax)
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm15, 448(%rax)
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm7, 512(%rax)
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm0, 576(%rax)
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm9, 640(%rax)
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm13, 704(%rax)
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm19, 384(%rax)
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm18, (%rax)
-; AVX512F-ONLY-SLOW-NEXT:    vzeroupper
-; AVX512F-ONLY-SLOW-NEXT:    retq
+; AVX512F-ONLY-LABEL: store_i64_stride6_vf16:
+; AVX512F-ONLY:       # %bb.0:
+; AVX512F-ONLY-NEXT:    movq {{[0-9]+}}(%rsp), %rax
+; AVX512F-ONLY-NEXT:    vmovdqa64 64(%rdi), %zmm13
+; AVX512F-ONLY-NEXT:    vmovdqa64 (%rdi), %zmm5
+; AVX512F-ONLY-NEXT:    vmovdqa64 (%rsi), %zmm6
+; AVX512F-ONLY-NEXT:    vmovdqa64 64(%rsi), %zmm14
+; AVX512F-ONLY-NEXT:    vmovdqa64 64(%rdx), %zmm7
+; AVX512F-ONLY-NEXT:    vmovdqa64 (%rdx), %zmm2
+; AVX512F-ONLY-NEXT:    vmovdqa64 (%rcx), %zmm4
+; AVX512F-ONLY-NEXT:    vmovdqa64 64(%rcx), %zmm11
+; AVX512F-ONLY-NEXT:    vmovdqa64 (%r8), %zmm3
+; AVX512F-ONLY-NEXT:    vmovdqa64 64(%r8), %zmm8
+; AVX512F-ONLY-NEXT:    vbroadcasti64x4 {{.*#+}} zmm0 = [4,12,5,13,4,12,5,13]
+; AVX512F-ONLY-NEXT:    # zmm0 = mem[0,1,2,3,0,1,2,3]
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm5, %zmm1
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm6, %zmm0, %zmm1
+; AVX512F-ONLY-NEXT:    vbroadcasti128 {{.*#+}} ymm9 = [4,12,4,12]
+; AVX512F-ONLY-NEXT:    # ymm9 = mem[0,1,0,1]
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm2, %zmm10
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm4, %zmm9, %zmm10
+; AVX512F-ONLY-NEXT:    movb $12, %r10b
+; AVX512F-ONLY-NEXT:    kmovw %r10d, %k1
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm10, %zmm1 {%k1}
+; AVX512F-ONLY-NEXT:    movb $16, %r10b
+; AVX512F-ONLY-NEXT:    kmovw %r10d, %k2
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm3, %zmm1 {%k2}
+; AVX512F-ONLY-NEXT:    vpermi2q %zmm14, %zmm13, %zmm0
+; AVX512F-ONLY-NEXT:    vpermi2q %zmm11, %zmm7, %zmm9
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm9, %zmm0 {%k1}
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm8, %zmm0 {%k2}
+; AVX512F-ONLY-NEXT:    vmovdqa64 (%r9), %zmm10
+; AVX512F-ONLY-NEXT:    vmovdqa64 64(%r9), %zmm16
+; AVX512F-ONLY-NEXT:    vbroadcasti32x4 {{.*#+}} zmm18 = [6,14,6,14,6,14,6,14]
+; AVX512F-ONLY-NEXT:    # zmm18 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm13, %zmm15
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm14, %zmm18, %zmm15
+; AVX512F-ONLY-NEXT:    vbroadcasti64x4 {{.*#+}} zmm12 = [5,13,6,14,5,13,6,14]
+; AVX512F-ONLY-NEXT:    # zmm12 = mem[0,1,2,3,0,1,2,3]
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm7, %zmm9
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm11, %zmm12, %zmm9
+; AVX512F-ONLY-NEXT:    movb $48, %r9b
+; AVX512F-ONLY-NEXT:    kmovw %r9d, %k2
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm15, %zmm9 {%k2}
+; AVX512F-ONLY-NEXT:    vmovdqa64 {{.*#+}} zmm19 = <0,1,13,u,4,5,6,7>
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm8, %zmm19, %zmm9
+; AVX512F-ONLY-NEXT:    vmovdqa64 {{.*#+}} zmm20 = [0,1,2,13,4,5,6,7]
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm16, %zmm20, %zmm9
+; AVX512F-ONLY-NEXT:    vbroadcasti32x4 {{.*#+}} zmm21 = [2,10,2,10,2,10,2,10]
+; AVX512F-ONLY-NEXT:    # zmm21 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm13, %zmm22
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm14, %zmm21, %zmm22
+; AVX512F-ONLY-NEXT:    vbroadcasti64x4 {{.*#+}} zmm17 = [1,9,2,10,1,9,2,10]
+; AVX512F-ONLY-NEXT:    # zmm17 = mem[0,1,2,3,0,1,2,3]
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm7, %zmm15
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm11, %zmm17, %zmm15
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm22, %zmm15 {%k2}
+; AVX512F-ONLY-NEXT:    vmovdqa64 {{.*#+}} zmm22 = <0,1,9,u,4,5,6,7>
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm8, %zmm22, %zmm15
+; AVX512F-ONLY-NEXT:    vmovdqa64 {{.*#+}} zmm23 = [0,1,2,9,4,5,6,7]
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm16, %zmm23, %zmm15
+; AVX512F-ONLY-NEXT:    vpermi2q %zmm6, %zmm5, %zmm18
+; AVX512F-ONLY-NEXT:    vpermi2q %zmm4, %zmm2, %zmm12
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm18, %zmm12 {%k2}
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm3, %zmm19, %zmm12
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm10, %zmm20, %zmm12
+; AVX512F-ONLY-NEXT:    vpermi2q %zmm6, %zmm5, %zmm21
+; AVX512F-ONLY-NEXT:    vpermi2q %zmm4, %zmm2, %zmm17
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm21, %zmm17 {%k2}
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm3, %zmm22, %zmm17
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm10, %zmm23, %zmm17
+; AVX512F-ONLY-NEXT:    vbroadcasti64x4 {{.*#+}} zmm19 = [0,8,1,9,0,8,1,9]
+; AVX512F-ONLY-NEXT:    # zmm19 = mem[0,1,2,3,0,1,2,3]
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm5, %zmm18
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm6, %zmm19, %zmm18
+; AVX512F-ONLY-NEXT:    vmovdqa64 (%rdx), %xmm20
+; AVX512F-ONLY-NEXT:    vmovdqa64 64(%rdx), %xmm21
+; AVX512F-ONLY-NEXT:    vpunpcklqdq {{.*#+}} xmm20 = xmm20[0],mem[0]
+; AVX512F-ONLY-NEXT:    vinserti32x4 $1, %xmm20, %ymm0, %ymm20
+; AVX512F-ONLY-NEXT:    vinserti64x4 $0, %ymm20, %zmm0, %zmm18 {%k1}
+; AVX512F-ONLY-NEXT:    vinserti32x4 $2, (%r8), %zmm18, %zmm18
+; AVX512F-ONLY-NEXT:    vmovdqa64 {{.*#+}} zmm20 = [0,1,2,3,4,8,6,7]
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm10, %zmm20, %zmm18
+; AVX512F-ONLY-NEXT:    vpermi2q %zmm14, %zmm13, %zmm19
+; AVX512F-ONLY-NEXT:    vpunpcklqdq {{.*#+}} xmm21 = xmm21[0],mem[0]
+; AVX512F-ONLY-NEXT:    vinserti32x4 $1, %xmm21, %ymm0, %ymm21
+; AVX512F-ONLY-NEXT:    vinserti64x4 $0, %ymm21, %zmm0, %zmm19 {%k1}
+; AVX512F-ONLY-NEXT:    vinserti32x4 $2, 64(%r8), %zmm19, %zmm19
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm16, %zmm20, %zmm19
+; AVX512F-ONLY-NEXT:    vbroadcasti32x4 {{.*#+}} zmm20 = [7,15,7,15,7,15,7,15]
+; AVX512F-ONLY-NEXT:    # zmm20 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm7, %zmm21
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm11, %zmm20, %zmm21
+; AVX512F-ONLY-NEXT:    vbroadcasti32x4 {{.*#+}} ymm22 = [7,15,7,15]
+; AVX512F-ONLY-NEXT:    # ymm22 = mem[0,1,2,3,0,1,2,3]
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm14, %zmm22, %zmm13
+; AVX512F-ONLY-NEXT:    vshufi64x2 {{.*#+}} zmm13 = zmm13[0,1,2,3],zmm21[4,5,6,7]
+; AVX512F-ONLY-NEXT:    vmovdqa64 {{.*#+}} zmm14 = <14,u,2,3,4,5,15,u>
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm8, %zmm14, %zmm13
+; AVX512F-ONLY-NEXT:    vmovdqa64 {{.*#+}} zmm21 = [0,14,2,3,4,5,6,15]
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm16, %zmm21, %zmm13
+; AVX512F-ONLY-NEXT:    vmovdqa64 {{.*#+}} zmm23 = [0,1,2,3,4,12,6,7]
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm16, %zmm23, %zmm0
+; AVX512F-ONLY-NEXT:    vbroadcasti32x4 {{.*#+}} zmm24 = [3,11,3,11,3,11,3,11]
+; AVX512F-ONLY-NEXT:    # zmm24 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm11, %zmm24, %zmm7
+; AVX512F-ONLY-NEXT:    vmovdqa (%rdi), %ymm11
+; AVX512F-ONLY-NEXT:    vmovdqa64 64(%rdi), %ymm25
+; AVX512F-ONLY-NEXT:    vpunpckhqdq {{.*#+}} ymm25 = ymm25[1],mem[1],ymm25[3],mem[3]
+; AVX512F-ONLY-NEXT:    vinserti64x4 $0, %ymm25, %zmm7, %zmm7
+; AVX512F-ONLY-NEXT:    vmovdqa64 {{.*#+}} zmm25 = <10,u,2,3,4,5,11,u>
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm8, %zmm25, %zmm7
+; AVX512F-ONLY-NEXT:    vmovdqa64 {{.*#+}} zmm8 = [0,10,2,3,4,5,6,11]
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm16, %zmm8, %zmm7
+; AVX512F-ONLY-NEXT:    vpermi2q %zmm4, %zmm2, %zmm20
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm6, %zmm22, %zmm5
+; AVX512F-ONLY-NEXT:    vshufi64x2 {{.*#+}} zmm5 = zmm5[0,1,2,3],zmm20[4,5,6,7]
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm3, %zmm14, %zmm5
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm10, %zmm21, %zmm5
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm10, %zmm23, %zmm1
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm4, %zmm24, %zmm2
+; AVX512F-ONLY-NEXT:    vpunpckhqdq {{.*#+}} ymm4 = ymm11[1],mem[1],ymm11[3],mem[3]
+; AVX512F-ONLY-NEXT:    vinserti64x4 $0, %ymm4, %zmm2, %zmm2
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm3, %zmm25, %zmm2
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm10, %zmm8, %zmm2
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm17, 64(%rax)
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm2, 128(%rax)
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm1, 192(%rax)
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm12, 256(%rax)
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm5, 320(%rax)
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm15, 448(%rax)
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm7, 512(%rax)
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm0, 576(%rax)
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm9, 640(%rax)
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm13, 704(%rax)
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm19, 384(%rax)
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm18, (%rax)
+; AVX512F-ONLY-NEXT:    vzeroupper
+; AVX512F-ONLY-NEXT:    retq
 ;
-; AVX512F-ONLY-FAST-LABEL: store_i64_stride6_vf16:
-; AVX512F-ONLY-FAST:       # %bb.0:
-; AVX512F-ONLY-FAST-NEXT:    movq {{[0-9]+}}(%rsp), %rax
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 64(%rdi), %zmm13
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 (%rdi), %zmm5
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 (%rsi), %zmm6
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 64(%rsi), %zmm14
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 64(%rdx), %zmm7
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 (%rdx), %zmm2
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 (%rcx), %zmm4
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 64(%rcx), %zmm11
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 (%r8), %zmm3
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 64(%r8), %zmm8
-; AVX512F-ONLY-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm0 = [4,12,5,13,4,12,5,13]
-; AVX512F-ONLY-FAST-NEXT:    # zmm0 = mem[0,1,2,3,0,1,2,3]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm5, %zmm1
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm6, %zmm0, %zmm1
-; AVX512F-ONLY-FAST-NEXT:    vbroadcasti128 {{.*#+}} ymm9 = [4,12,4,12]
-; AVX512F-ONLY-FAST-NEXT:    # ymm9 = mem[0,1,0,1]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm2, %zmm10
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm4, %zmm9, %zmm10
-; AVX512F-ONLY-FAST-NEXT:    movb $12, %r10b
-; AVX512F-ONLY-FAST-NEXT:    kmovw %r10d, %k1
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm10, %zmm1 {%k1}
-; AVX512F-ONLY-FAST-NEXT:    movb $16, %r10b
-; AVX512F-ONLY-FAST-NEXT:    kmovw %r10d, %k2
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm3, %zmm1 {%k2}
-; AVX512F-ONLY-FAST-NEXT:    vpermi2q %zmm14, %zmm13, %zmm0
-; AVX512F-ONLY-FAST-NEXT:    vpermi2q %zmm11, %zmm7, %zmm9
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm9, %zmm0 {%k1}
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm8, %zmm0 {%k2}
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 (%r9), %zmm10
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 64(%r9), %zmm16
-; AVX512F-ONLY-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} zmm18 = [6,14,6,14,6,14,6,14]
-; AVX512F-ONLY-FAST-NEXT:    # zmm18 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm13, %zmm15
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm14, %zmm18, %zmm15
-; AVX512F-ONLY-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm12 = [5,13,6,14,5,13,6,14]
-; AVX512F-ONLY-FAST-NEXT:    # zmm12 = mem[0,1,2,3,0,1,2,3]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm7, %zmm9
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm11, %zmm12, %zmm9
-; AVX512F-ONLY-FAST-NEXT:    movb $48, %r9b
-; AVX512F-ONLY-FAST-NEXT:    kmovw %r9d, %k2
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm15, %zmm9 {%k2}
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm19 = <0,1,13,u,4,5,6,7>
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm8, %zmm19, %zmm9
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm20 = [0,1,2,13,4,5,6,7]
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm16, %zmm20, %zmm9
-; AVX512F-ONLY-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} zmm21 = [2,10,2,10,2,10,2,10]
-; AVX512F-ONLY-FAST-NEXT:    # zmm21 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm13, %zmm22
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm14, %zmm21, %zmm22
-; AVX512F-ONLY-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm17 = [1,9,2,10,1,9,2,10]
-; AVX512F-ONLY-FAST-NEXT:    # zmm17 = mem[0,1,2,3,0,1,2,3]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm7, %zmm15
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm11, %zmm17, %zmm15
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm22, %zmm15 {%k2}
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm22 = <0,1,9,u,4,5,6,7>
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm8, %zmm22, %zmm15
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm23 = [0,1,2,9,4,5,6,7]
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm16, %zmm23, %zmm15
-; AVX512F-ONLY-FAST-NEXT:    vpermi2q %zmm6, %zmm5, %zmm18
-; AVX512F-ONLY-FAST-NEXT:    vpermi2q %zmm4, %zmm2, %zmm12
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm18, %zmm12 {%k2}
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm3, %zmm19, %zmm12
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm10, %zmm20, %zmm12
-; AVX512F-ONLY-FAST-NEXT:    vpermi2q %zmm6, %zmm5, %zmm21
-; AVX512F-ONLY-FAST-NEXT:    vpermi2q %zmm4, %zmm2, %zmm17
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm21, %zmm17 {%k2}
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm3, %zmm22, %zmm17
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm10, %zmm23, %zmm17
-; AVX512F-ONLY-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm19 = [0,8,1,9,0,8,1,9]
-; AVX512F-ONLY-FAST-NEXT:    # zmm19 = mem[0,1,2,3,0,1,2,3]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm5, %zmm18
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm6, %zmm19, %zmm18
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 (%rdx), %xmm20
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 64(%rdx), %xmm21
-; AVX512F-ONLY-FAST-NEXT:    vpunpcklqdq {{.*#+}} xmm20 = xmm20[0],mem[0]
-; AVX512F-ONLY-FAST-NEXT:    vinserti32x4 $1, %xmm20, %ymm0, %ymm20
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $0, %ymm20, %zmm0, %zmm18 {%k1}
-; AVX512F-ONLY-FAST-NEXT:    vinserti32x4 $2, (%r8), %zmm18, %zmm18
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm20 = [0,1,2,3,4,8,6,7]
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm10, %zmm20, %zmm18
-; AVX512F-ONLY-FAST-NEXT:    vpermi2q %zmm14, %zmm13, %zmm19
-; AVX512F-ONLY-FAST-NEXT:    vpunpcklqdq {{.*#+}} xmm21 = xmm21[0],mem[0]
-; AVX512F-ONLY-FAST-NEXT:    vinserti32x4 $1, %xmm21, %ymm0, %ymm21
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $0, %ymm21, %zmm0, %zmm19 {%k1}
-; AVX512F-ONLY-FAST-NEXT:    vinserti32x4 $2, 64(%r8), %zmm19, %zmm19
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm16, %zmm20, %zmm19
-; AVX512F-ONLY-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} zmm20 = [7,15,7,15,7,15,7,15]
-; AVX512F-ONLY-FAST-NEXT:    # zmm20 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm7, %zmm21
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm11, %zmm20, %zmm21
-; AVX512F-ONLY-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} ymm22 = [7,15,7,15]
-; AVX512F-ONLY-FAST-NEXT:    # ymm22 = mem[0,1,2,3,0,1,2,3]
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm14, %zmm22, %zmm13
-; AVX512F-ONLY-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm13 = zmm13[0,1,2,3],zmm21[4,5,6,7]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm14 = <14,u,2,3,4,5,15,u>
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm8, %zmm14, %zmm13
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm21 = [0,14,2,3,4,5,6,15]
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm16, %zmm21, %zmm13
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm23 = [0,1,2,3,4,12,6,7]
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm16, %zmm23, %zmm0
-; AVX512F-ONLY-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} zmm24 = [3,11,3,11,3,11,3,11]
-; AVX512F-ONLY-FAST-NEXT:    # zmm24 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm11, %zmm24, %zmm7
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa (%rdi), %ymm11
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 64(%rdi), %ymm25
-; AVX512F-ONLY-FAST-NEXT:    vpunpckhqdq {{.*#+}} ymm25 = ymm25[1],mem[1],ymm25[3],mem[3]
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $0, %ymm25, %zmm7, %zmm7
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm25 = <10,u,2,3,4,5,11,u>
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm8, %zmm25, %zmm7
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm8 = [0,10,2,3,4,5,6,11]
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm16, %zmm8, %zmm7
-; AVX512F-ONLY-FAST-NEXT:    vpermi2q %zmm4, %zmm2, %zmm20
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm6, %zmm22, %zmm5
-; AVX512F-ONLY-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm5 = zmm5[0,1,2,3],zmm20[4,5,6,7]
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm3, %zmm14, %zmm5
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm10, %zmm21, %zmm5
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm10, %zmm23, %zmm1
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm4, %zmm24, %zmm2
-; AVX512F-ONLY-FAST-NEXT:    vpunpckhqdq {{.*#+}} ymm4 = ymm11[1],mem[1],ymm11[3],mem[3]
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $0, %ymm4, %zmm2, %zmm2
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm3, %zmm25, %zmm2
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm10, %zmm8, %zmm2
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm17, 64(%rax)
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm2, 128(%rax)
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm1, 192(%rax)
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm12, 256(%rax)
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm5, 320(%rax)
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm15, 448(%rax)
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm7, 512(%rax)
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm0, 576(%rax)
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm9, 640(%rax)
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm13, 704(%rax)
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm19, 384(%rax)
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm18, (%rax)
-; AVX512F-ONLY-FAST-NEXT:    vzeroupper
-; AVX512F-ONLY-FAST-NEXT:    retq
+; AVX512DQ-ONLY-LABEL: store_i64_stride6_vf16:
+; AVX512DQ-ONLY:       # %bb.0:
+; AVX512DQ-ONLY-NEXT:    movq {{[0-9]+}}(%rsp), %rax
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 64(%rdi), %zmm13
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 (%rdi), %zmm5
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 (%rsi), %zmm6
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 64(%rsi), %zmm14
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 64(%rdx), %zmm7
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 (%rdx), %zmm2
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 (%rcx), %zmm4
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 64(%rcx), %zmm11
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 (%r8), %zmm3
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 64(%r8), %zmm8
+; AVX512DQ-ONLY-NEXT:    vbroadcasti64x4 {{.*#+}} zmm0 = [4,12,5,13,4,12,5,13]
+; AVX512DQ-ONLY-NEXT:    # zmm0 = mem[0,1,2,3,0,1,2,3]
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm5, %zmm1
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm6, %zmm0, %zmm1
+; AVX512DQ-ONLY-NEXT:    vbroadcasti128 {{.*#+}} ymm9 = [4,12,4,12]
+; AVX512DQ-ONLY-NEXT:    # ymm9 = mem[0,1,0,1]
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm2, %zmm10
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm4, %zmm9, %zmm10
+; AVX512DQ-ONLY-NEXT:    movb $12, %r10b
+; AVX512DQ-ONLY-NEXT:    kmovw %r10d, %k1
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm10, %zmm1 {%k1}
+; AVX512DQ-ONLY-NEXT:    movb $16, %r10b
+; AVX512DQ-ONLY-NEXT:    kmovw %r10d, %k2
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm3, %zmm1 {%k2}
+; AVX512DQ-ONLY-NEXT:    vpermi2q %zmm14, %zmm13, %zmm0
+; AVX512DQ-ONLY-NEXT:    vpermi2q %zmm11, %zmm7, %zmm9
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm9, %zmm0 {%k1}
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm8, %zmm0 {%k2}
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 (%r9), %zmm10
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 64(%r9), %zmm16
+; AVX512DQ-ONLY-NEXT:    vbroadcasti32x4 {{.*#+}} zmm18 = [6,14,6,14,6,14,6,14]
+; AVX512DQ-ONLY-NEXT:    # zmm18 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm13, %zmm15
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm14, %zmm18, %zmm15
+; AVX512DQ-ONLY-NEXT:    vbroadcasti64x4 {{.*#+}} zmm12 = [5,13,6,14,5,13,6,14]
+; AVX512DQ-ONLY-NEXT:    # zmm12 = mem[0,1,2,3,0,1,2,3]
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm7, %zmm9
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm11, %zmm12, %zmm9
+; AVX512DQ-ONLY-NEXT:    movb $48, %r9b
+; AVX512DQ-ONLY-NEXT:    kmovw %r9d, %k2
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm15, %zmm9 {%k2}
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 {{.*#+}} zmm19 = <0,1,13,u,4,5,6,7>
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm8, %zmm19, %zmm9
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 {{.*#+}} zmm20 = [0,1,2,13,4,5,6,7]
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm16, %zmm20, %zmm9
+; AVX512DQ-ONLY-NEXT:    vbroadcasti32x4 {{.*#+}} zmm21 = [2,10,2,10,2,10,2,10]
+; AVX512DQ-ONLY-NEXT:    # zmm21 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm13, %zmm22
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm14, %zmm21, %zmm22
+; AVX512DQ-ONLY-NEXT:    vbroadcasti64x4 {{.*#+}} zmm17 = [1,9,2,10,1,9,2,10]
+; AVX512DQ-ONLY-NEXT:    # zmm17 = mem[0,1,2,3,0,1,2,3]
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm7, %zmm15
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm11, %zmm17, %zmm15
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm22, %zmm15 {%k2}
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 {{.*#+}} zmm22 = <0,1,9,u,4,5,6,7>
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm8, %zmm22, %zmm15
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 {{.*#+}} zmm23 = [0,1,2,9,4,5,6,7]
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm16, %zmm23, %zmm15
+; AVX512DQ-ONLY-NEXT:    vpermi2q %zmm6, %zmm5, %zmm18
+; AVX512DQ-ONLY-NEXT:    vpermi2q %zmm4, %zmm2, %zmm12
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm18, %zmm12 {%k2}
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm3, %zmm19, %zmm12
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm10, %zmm20, %zmm12
+; AVX512DQ-ONLY-NEXT:    vpermi2q %zmm6, %zmm5, %zmm21
+; AVX512DQ-ONLY-NEXT:    vpermi2q %zmm4, %zmm2, %zmm17
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm21, %zmm17 {%k2}
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm3, %zmm22, %zmm17
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm10, %zmm23, %zmm17
+; AVX512DQ-ONLY-NEXT:    vbroadcasti64x4 {{.*#+}} zmm19 = [0,8,1,9,0,8,1,9]
+; AVX512DQ-ONLY-NEXT:    # zmm19 = mem[0,1,2,3,0,1,2,3]
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm5, %zmm18
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm6, %zmm19, %zmm18
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 (%rdx), %xmm20
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 64(%rdx), %xmm21
+; AVX512DQ-ONLY-NEXT:    vpunpcklqdq {{.*#+}} xmm20 = xmm20[0],mem[0]
+; AVX512DQ-ONLY-NEXT:    vinserti32x4 $1, %xmm20, %ymm0, %ymm20
+; AVX512DQ-ONLY-NEXT:    vinserti64x4 $0, %ymm20, %zmm0, %zmm18 {%k1}
+; AVX512DQ-ONLY-NEXT:    vinserti32x4 $2, (%r8), %zmm18, %zmm18
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 {{.*#+}} zmm20 = [0,1,2,3,4,8,6,7]
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm10, %zmm20, %zmm18
+; AVX512DQ-ONLY-NEXT:    vpermi2q %zmm14, %zmm13, %zmm19
+; AVX512DQ-ONLY-NEXT:    vpunpcklqdq {{.*#+}} xmm21 = xmm21[0],mem[0]
+; AVX512DQ-ONLY-NEXT:    vinserti32x4 $1, %xmm21, %ymm0, %ymm21
+; AVX512DQ-ONLY-NEXT:    vinserti64x4 $0, %ymm21, %zmm0, %zmm19 {%k1}
+; AVX512DQ-ONLY-NEXT:    vinserti32x4 $2, 64(%r8), %zmm19, %zmm19
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm16, %zmm20, %zmm19
+; AVX512DQ-ONLY-NEXT:    vbroadcasti32x4 {{.*#+}} zmm20 = [7,15,7,15,7,15,7,15]
+; AVX512DQ-ONLY-NEXT:    # zmm20 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm7, %zmm21
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm11, %zmm20, %zmm21
+; AVX512DQ-ONLY-NEXT:    vbroadcasti64x2 {{.*#+}} ymm22 = [7,15,7,15]
+; AVX512DQ-ONLY-NEXT:    # ymm22 = mem[0,1,0,1]
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm14, %zmm22, %zmm13
+; AVX512DQ-ONLY-NEXT:    vshufi64x2 {{.*#+}} zmm13 = zmm13[0,1,2,3],zmm21[4,5,6,7]
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 {{.*#+}} zmm14 = <14,u,2,3,4,5,15,u>
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm8, %zmm14, %zmm13
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 {{.*#+}} zmm21 = [0,14,2,3,4,5,6,15]
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm16, %zmm21, %zmm13
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 {{.*#+}} zmm23 = [0,1,2,3,4,12,6,7]
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm16, %zmm23, %zmm0
+; AVX512DQ-ONLY-NEXT:    vbroadcasti32x4 {{.*#+}} zmm24 = [3,11,3,11,3,11,3,11]
+; AVX512DQ-ONLY-NEXT:    # zmm24 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm11, %zmm24, %zmm7
+; AVX512DQ-ONLY-NEXT:    vmovdqa (%rdi), %ymm11
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 64(%rdi), %ymm25
+; AVX512DQ-ONLY-NEXT:    vpunpckhqdq {{.*#+}} ymm25 = ymm25[1],mem[1],ymm25[3],mem[3]
+; AVX512DQ-ONLY-NEXT:    vinserti64x4 $0, %ymm25, %zmm7, %zmm7
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 {{.*#+}} zmm25 = <10,u,2,3,4,5,11,u>
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm8, %zmm25, %zmm7
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 {{.*#+}} zmm8 = [0,10,2,3,4,5,6,11]
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm16, %zmm8, %zmm7
+; AVX512DQ-ONLY-NEXT:    vpermi2q %zmm4, %zmm2, %zmm20
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm6, %zmm22, %zmm5
+; AVX512DQ-ONLY-NEXT:    vshufi64x2 {{.*#+}} zmm5 = zmm5[0,1,2,3],zmm20[4,5,6,7]
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm3, %zmm14, %zmm5
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm10, %zmm21, %zmm5
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm10, %zmm23, %zmm1
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm4, %zmm24, %zmm2
+; AVX512DQ-ONLY-NEXT:    vpunpckhqdq {{.*#+}} ymm4 = ymm11[1],mem[1],ymm11[3],mem[3]
+; AVX512DQ-ONLY-NEXT:    vinserti64x4 $0, %ymm4, %zmm2, %zmm2
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm3, %zmm25, %zmm2
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm10, %zmm8, %zmm2
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm17, 64(%rax)
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm2, 128(%rax)
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm1, 192(%rax)
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm12, 256(%rax)
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm5, 320(%rax)
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm15, 448(%rax)
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm7, 512(%rax)
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm0, 576(%rax)
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm9, 640(%rax)
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm13, 704(%rax)
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm19, 384(%rax)
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm18, (%rax)
+; AVX512DQ-ONLY-NEXT:    vzeroupper
+; AVX512DQ-ONLY-NEXT:    retq
 ;
-; AVX512DQ-SLOW-LABEL: store_i64_stride6_vf16:
-; AVX512DQ-SLOW:       # %bb.0:
-; AVX512DQ-SLOW-NEXT:    movq {{[0-9]+}}(%rsp), %rax
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 64(%rdi), %zmm13
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 (%rdi), %zmm5
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 (%rsi), %zmm6
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 64(%rsi), %zmm14
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 64(%rdx), %zmm7
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 (%rdx), %zmm2
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 (%rcx), %zmm4
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 64(%rcx), %zmm11
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 (%r8), %zmm3
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 64(%r8), %zmm8
-; AVX512DQ-SLOW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm0 = [4,12,5,13,4,12,5,13]
-; AVX512DQ-SLOW-NEXT:    # zmm0 = mem[0,1,2,3,0,1,2,3]
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm5, %zmm1
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm6, %zmm0, %zmm1
-; AVX512DQ-SLOW-NEXT:    vbroadcasti128 {{.*#+}} ymm9 = [4,12,4,12]
-; AVX512DQ-SLOW-NEXT:    # ymm9 = mem[0,1,0,1]
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm2, %zmm10
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm4, %zmm9, %zmm10
-; AVX512DQ-SLOW-NEXT:    movb $12, %r10b
-; AVX512DQ-SLOW-NEXT:    kmovw %r10d, %k1
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm10, %zmm1 {%k1}
-; AVX512DQ-SLOW-NEXT:    movb $16, %r10b
-; AVX512DQ-SLOW-NEXT:    kmovw %r10d, %k2
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm3, %zmm1 {%k2}
-; AVX512DQ-SLOW-NEXT:    vpermi2q %zmm14, %zmm13, %zmm0
-; AVX512DQ-SLOW-NEXT:    vpermi2q %zmm11, %zmm7, %zmm9
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm9, %zmm0 {%k1}
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm8, %zmm0 {%k2}
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 (%r9), %zmm10
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 64(%r9), %zmm16
-; AVX512DQ-SLOW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm18 = [6,14,6,14,6,14,6,14]
-; AVX512DQ-SLOW-NEXT:    # zmm18 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm13, %zmm15
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm14, %zmm18, %zmm15
-; AVX512DQ-SLOW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm12 = [5,13,6,14,5,13,6,14]
-; AVX512DQ-SLOW-NEXT:    # zmm12 = mem[0,1,2,3,0,1,2,3]
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm7, %zmm9
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm11, %zmm12, %zmm9
-; AVX512DQ-SLOW-NEXT:    movb $48, %r9b
-; AVX512DQ-SLOW-NEXT:    kmovw %r9d, %k2
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm15, %zmm9 {%k2}
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm19 = <0,1,13,u,4,5,6,7>
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm8, %zmm19, %zmm9
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm20 = [0,1,2,13,4,5,6,7]
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm16, %zmm20, %zmm9
-; AVX512DQ-SLOW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm21 = [2,10,2,10,2,10,2,10]
-; AVX512DQ-SLOW-NEXT:    # zmm21 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm13, %zmm22
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm14, %zmm21, %zmm22
-; AVX512DQ-SLOW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm17 = [1,9,2,10,1,9,2,10]
-; AVX512DQ-SLOW-NEXT:    # zmm17 = mem[0,1,2,3,0,1,2,3]
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm7, %zmm15
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm11, %zmm17, %zmm15
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm22, %zmm15 {%k2}
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm22 = <0,1,9,u,4,5,6,7>
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm8, %zmm22, %zmm15
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm23 = [0,1,2,9,4,5,6,7]
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm16, %zmm23, %zmm15
-; AVX512DQ-SLOW-NEXT:    vpermi2q %zmm6, %zmm5, %zmm18
-; AVX512DQ-SLOW-NEXT:    vpermi2q %zmm4, %zmm2, %zmm12
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm18, %zmm12 {%k2}
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm3, %zmm19, %zmm12
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm10, %zmm20, %zmm12
-; AVX512DQ-SLOW-NEXT:    vpermi2q %zmm6, %zmm5, %zmm21
-; AVX512DQ-SLOW-NEXT:    vpermi2q %zmm4, %zmm2, %zmm17
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm21, %zmm17 {%k2}
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm3, %zmm22, %zmm17
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm10, %zmm23, %zmm17
-; AVX512DQ-SLOW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm19 = [0,8,1,9,0,8,1,9]
-; AVX512DQ-SLOW-NEXT:    # zmm19 = mem[0,1,2,3,0,1,2,3]
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm5, %zmm18
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm6, %zmm19, %zmm18
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 (%rdx), %xmm20
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 64(%rdx), %xmm21
-; AVX512DQ-SLOW-NEXT:    vpunpcklqdq {{.*#+}} xmm20 = xmm20[0],mem[0]
-; AVX512DQ-SLOW-NEXT:    vinserti32x4 $1, %xmm20, %ymm0, %ymm20
-; AVX512DQ-SLOW-NEXT:    vinserti64x4 $0, %ymm20, %zmm0, %zmm18 {%k1}
-; AVX512DQ-SLOW-NEXT:    vinserti32x4 $2, (%r8), %zmm18, %zmm18
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm20 = [0,1,2,3,4,8,6,7]
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm10, %zmm20, %zmm18
-; AVX512DQ-SLOW-NEXT:    vpermi2q %zmm14, %zmm13, %zmm19
-; AVX512DQ-SLOW-NEXT:    vpunpcklqdq {{.*#+}} xmm21 = xmm21[0],mem[0]
-; AVX512DQ-SLOW-NEXT:    vinserti32x4 $1, %xmm21, %ymm0, %ymm21
-; AVX512DQ-SLOW-NEXT:    vinserti64x4 $0, %ymm21, %zmm0, %zmm19 {%k1}
-; AVX512DQ-SLOW-NEXT:    vinserti32x4 $2, 64(%r8), %zmm19, %zmm19
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm16, %zmm20, %zmm19
-; AVX512DQ-SLOW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm20 = [7,15,7,15,7,15,7,15]
-; AVX512DQ-SLOW-NEXT:    # zmm20 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm7, %zmm21
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm11, %zmm20, %zmm21
-; AVX512DQ-SLOW-NEXT:    vbroadcasti64x2 {{.*#+}} ymm22 = [7,15,7,15]
-; AVX512DQ-SLOW-NEXT:    # ymm22 = mem[0,1,0,1]
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm14, %zmm22, %zmm13
-; AVX512DQ-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm13 = zmm13[0,1,2,3],zmm21[4,5,6,7]
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm14 = <14,u,2,3,4,5,15,u>
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm8, %zmm14, %zmm13
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm21 = [0,14,2,3,4,5,6,15]
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm16, %zmm21, %zmm13
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm23 = [0,1,2,3,4,12,6,7]
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm16, %zmm23, %zmm0
-; AVX512DQ-SLOW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm24 = [3,11,3,11,3,11,3,11]
-; AVX512DQ-SLOW-NEXT:    # zmm24 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm11, %zmm24, %zmm7
-; AVX512DQ-SLOW-NEXT:    vmovdqa (%rdi), %ymm11
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 64(%rdi), %ymm25
-; AVX512DQ-SLOW-NEXT:    vpunpckhqdq {{.*#+}} ymm25 = ymm25[1],mem[1],ymm25[3],mem[3]
-; AVX512DQ-SLOW-NEXT:    vinserti64x4 $0, %ymm25, %zmm7, %zmm7
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm25 = <10,u,2,3,4,5,11,u>
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm8, %zmm25, %zmm7
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm8 = [0,10,2,3,4,5,6,11]
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm16, %zmm8, %zmm7
-; AVX512DQ-SLOW-NEXT:    vpermi2q %zmm4, %zmm2, %zmm20
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm6, %zmm22, %zmm5
-; AVX512DQ-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm5 = zmm5[0,1,2,3],zmm20[4,5,6,7]
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm3, %zmm14, %zmm5
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm10, %zmm21, %zmm5
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm10, %zmm23, %zmm1
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm4, %zmm24, %zmm2
-; AVX512DQ-SLOW-NEXT:    vpunpckhqdq {{.*#+}} ymm4 = ymm11[1],mem[1],ymm11[3],mem[3]
-; AVX512DQ-SLOW-NEXT:    vinserti64x4 $0, %ymm4, %zmm2, %zmm2
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm3, %zmm25, %zmm2
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm10, %zmm8, %zmm2
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm17, 64(%rax)
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm2, 128(%rax)
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm1, 192(%rax)
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm12, 256(%rax)
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm5, 320(%rax)
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm15, 448(%rax)
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm7, 512(%rax)
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm0, 576(%rax)
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm9, 640(%rax)
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm13, 704(%rax)
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm19, 384(%rax)
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm18, (%rax)
-; AVX512DQ-SLOW-NEXT:    vzeroupper
-; AVX512DQ-SLOW-NEXT:    retq
+; AVX512BW-ONLY-LABEL: store_i64_stride6_vf16:
+; AVX512BW-ONLY:       # %bb.0:
+; AVX512BW-ONLY-NEXT:    movq {{[0-9]+}}(%rsp), %rax
+; AVX512BW-ONLY-NEXT:    vmovdqa64 64(%rdi), %zmm13
+; AVX512BW-ONLY-NEXT:    vmovdqa64 (%rdi), %zmm5
+; AVX512BW-ONLY-NEXT:    vmovdqa64 (%rsi), %zmm6
+; AVX512BW-ONLY-NEXT:    vmovdqa64 64(%rsi), %zmm14
+; AVX512BW-ONLY-NEXT:    vmovdqa64 64(%rdx), %zmm7
+; AVX512BW-ONLY-NEXT:    vmovdqa64 (%rdx), %zmm2
+; AVX512BW-ONLY-NEXT:    vmovdqa64 (%rcx), %zmm4
+; AVX512BW-ONLY-NEXT:    vmovdqa64 64(%rcx), %zmm11
+; AVX512BW-ONLY-NEXT:    vmovdqa64 (%r8), %zmm3
+; AVX512BW-ONLY-NEXT:    vmovdqa64 64(%r8), %zmm8
+; AVX512BW-ONLY-NEXT:    vbroadcasti64x4 {{.*#+}} zmm0 = [4,12,5,13,4,12,5,13]
+; AVX512BW-ONLY-NEXT:    # zmm0 = mem[0,1,2,3,0,1,2,3]
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm5, %zmm1
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm6, %zmm0, %zmm1
+; AVX512BW-ONLY-NEXT:    vbroadcasti128 {{.*#+}} ymm9 = [4,12,4,12]
+; AVX512BW-ONLY-NEXT:    # ymm9 = mem[0,1,0,1]
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm2, %zmm10
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm4, %zmm9, %zmm10
+; AVX512BW-ONLY-NEXT:    movb $12, %r10b
+; AVX512BW-ONLY-NEXT:    kmovd %r10d, %k1
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm10, %zmm1 {%k1}
+; AVX512BW-ONLY-NEXT:    movb $16, %r10b
+; AVX512BW-ONLY-NEXT:    kmovd %r10d, %k2
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm3, %zmm1 {%k2}
+; AVX512BW-ONLY-NEXT:    vpermi2q %zmm14, %zmm13, %zmm0
+; AVX512BW-ONLY-NEXT:    vpermi2q %zmm11, %zmm7, %zmm9
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm9, %zmm0 {%k1}
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm8, %zmm0 {%k2}
+; AVX512BW-ONLY-NEXT:    vmovdqa64 (%r9), %zmm10
+; AVX512BW-ONLY-NEXT:    vmovdqa64 64(%r9), %zmm16
+; AVX512BW-ONLY-NEXT:    vbroadcasti32x4 {{.*#+}} zmm18 = [6,14,6,14,6,14,6,14]
+; AVX512BW-ONLY-NEXT:    # zmm18 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm13, %zmm15
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm14, %zmm18, %zmm15
+; AVX512BW-ONLY-NEXT:    vbroadcasti64x4 {{.*#+}} zmm12 = [5,13,6,14,5,13,6,14]
+; AVX512BW-ONLY-NEXT:    # zmm12 = mem[0,1,2,3,0,1,2,3]
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm7, %zmm9
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm11, %zmm12, %zmm9
+; AVX512BW-ONLY-NEXT:    movb $48, %r9b
+; AVX512BW-ONLY-NEXT:    kmovd %r9d, %k2
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm15, %zmm9 {%k2}
+; AVX512BW-ONLY-NEXT:    vmovdqa64 {{.*#+}} zmm19 = <0,1,13,u,4,5,6,7>
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm8, %zmm19, %zmm9
+; AVX512BW-ONLY-NEXT:    vmovdqa64 {{.*#+}} zmm20 = [0,1,2,13,4,5,6,7]
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm16, %zmm20, %zmm9
+; AVX512BW-ONLY-NEXT:    vbroadcasti32x4 {{.*#+}} zmm21 = [2,10,2,10,2,10,2,10]
+; AVX512BW-ONLY-NEXT:    # zmm21 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm13, %zmm22
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm14, %zmm21, %zmm22
+; AVX512BW-ONLY-NEXT:    vbroadcasti64x4 {{.*#+}} zmm17 = [1,9,2,10,1,9,2,10]
+; AVX512BW-ONLY-NEXT:    # zmm17 = mem[0,1,2,3,0,1,2,3]
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm7, %zmm15
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm11, %zmm17, %zmm15
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm22, %zmm15 {%k2}
+; AVX512BW-ONLY-NEXT:    vmovdqa64 {{.*#+}} zmm22 = <0,1,9,u,4,5,6,7>
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm8, %zmm22, %zmm15
+; AVX512BW-ONLY-NEXT:    vmovdqa64 {{.*#+}} zmm23 = [0,1,2,9,4,5,6,7]
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm16, %zmm23, %zmm15
+; AVX512BW-ONLY-NEXT:    vpermi2q %zmm6, %zmm5, %zmm18
+; AVX512BW-ONLY-NEXT:    vpermi2q %zmm4, %zmm2, %zmm12
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm18, %zmm12 {%k2}
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm3, %zmm19, %zmm12
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm10, %zmm20, %zmm12
+; AVX512BW-ONLY-NEXT:    vpermi2q %zmm6, %zmm5, %zmm21
+; AVX512BW-ONLY-NEXT:    vpermi2q %zmm4, %zmm2, %zmm17
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm21, %zmm17 {%k2}
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm3, %zmm22, %zmm17
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm10, %zmm23, %zmm17
+; AVX512BW-ONLY-NEXT:    vbroadcasti64x4 {{.*#+}} zmm19 = [0,8,1,9,0,8,1,9]
+; AVX512BW-ONLY-NEXT:    # zmm19 = mem[0,1,2,3,0,1,2,3]
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm5, %zmm18
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm6, %zmm19, %zmm18
+; AVX512BW-ONLY-NEXT:    vmovdqa64 (%rdx), %xmm20
+; AVX512BW-ONLY-NEXT:    vmovdqa64 64(%rdx), %xmm21
+; AVX512BW-ONLY-NEXT:    vpunpcklqdq {{.*#+}} xmm20 = xmm20[0],mem[0]
+; AVX512BW-ONLY-NEXT:    vinserti32x4 $1, %xmm20, %ymm0, %ymm20
+; AVX512BW-ONLY-NEXT:    vinserti64x4 $0, %ymm20, %zmm0, %zmm18 {%k1}
+; AVX512BW-ONLY-NEXT:    vinserti32x4 $2, (%r8), %zmm18, %zmm18
+; AVX512BW-ONLY-NEXT:    vmovdqa64 {{.*#+}} zmm20 = [0,1,2,3,4,8,6,7]
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm10, %zmm20, %zmm18
+; AVX512BW-ONLY-NEXT:    vpermi2q %zmm14, %zmm13, %zmm19
+; AVX512BW-ONLY-NEXT:    vpunpcklqdq {{.*#+}} xmm21 = xmm21[0],mem[0]
+; AVX512BW-ONLY-NEXT:    vinserti32x4 $1, %xmm21, %ymm0, %ymm21
+; AVX512BW-ONLY-NEXT:    vinserti64x4 $0, %ymm21, %zmm0, %zmm19 {%k1}
+; AVX512BW-ONLY-NEXT:    vinserti32x4 $2, 64(%r8), %zmm19, %zmm19
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm16, %zmm20, %zmm19
+; AVX512BW-ONLY-NEXT:    vbroadcasti32x4 {{.*#+}} zmm20 = [7,15,7,15,7,15,7,15]
+; AVX512BW-ONLY-NEXT:    # zmm20 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm7, %zmm21
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm11, %zmm20, %zmm21
+; AVX512BW-ONLY-NEXT:    vbroadcasti32x4 {{.*#+}} ymm22 = [7,15,7,15]
+; AVX512BW-ONLY-NEXT:    # ymm22 = mem[0,1,2,3,0,1,2,3]
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm14, %zmm22, %zmm13
+; AVX512BW-ONLY-NEXT:    vshufi64x2 {{.*#+}} zmm13 = zmm13[0,1,2,3],zmm21[4,5,6,7]
+; AVX512BW-ONLY-NEXT:    vmovdqa64 {{.*#+}} zmm14 = <14,u,2,3,4,5,15,u>
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm8, %zmm14, %zmm13
+; AVX512BW-ONLY-NEXT:    vmovdqa64 {{.*#+}} zmm21 = [0,14,2,3,4,5,6,15]
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm16, %zmm21, %zmm13
+; AVX512BW-ONLY-NEXT:    vmovdqa64 {{.*#+}} zmm23 = [0,1,2,3,4,12,6,7]
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm16, %zmm23, %zmm0
+; AVX512BW-ONLY-NEXT:    vbroadcasti32x4 {{.*#+}} zmm24 = [3,11,3,11,3,11,3,11]
+; AVX512BW-ONLY-NEXT:    # zmm24 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm11, %zmm24, %zmm7
+; AVX512BW-ONLY-NEXT:    vmovdqa (%rdi), %ymm11
+; AVX512BW-ONLY-NEXT:    vmovdqa64 64(%rdi), %ymm25
+; AVX512BW-ONLY-NEXT:    vpunpckhqdq {{.*#+}} ymm25 = ymm25[1],mem[1],ymm25[3],mem[3]
+; AVX512BW-ONLY-NEXT:    vinserti64x4 $0, %ymm25, %zmm7, %zmm7
+; AVX512BW-ONLY-NEXT:    vmovdqa64 {{.*#+}} zmm25 = <10,u,2,3,4,5,11,u>
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm8, %zmm25, %zmm7
+; AVX512BW-ONLY-NEXT:    vmovdqa64 {{.*#+}} zmm8 = [0,10,2,3,4,5,6,11]
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm16, %zmm8, %zmm7
+; AVX512BW-ONLY-NEXT:    vpermi2q %zmm4, %zmm2, %zmm20
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm6, %zmm22, %zmm5
+; AVX512BW-ONLY-NEXT:    vshufi64x2 {{.*#+}} zmm5 = zmm5[0,1,2,3],zmm20[4,5,6,7]
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm3, %zmm14, %zmm5
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm10, %zmm21, %zmm5
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm10, %zmm23, %zmm1
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm4, %zmm24, %zmm2
+; AVX512BW-ONLY-NEXT:    vpunpckhqdq {{.*#+}} ymm4 = ymm11[1],mem[1],ymm11[3],mem[3]
+; AVX512BW-ONLY-NEXT:    vinserti64x4 $0, %ymm4, %zmm2, %zmm2
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm3, %zmm25, %zmm2
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm10, %zmm8, %zmm2
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm17, 64(%rax)
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm2, 128(%rax)
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm1, 192(%rax)
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm12, 256(%rax)
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm5, 320(%rax)
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm15, 448(%rax)
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm7, 512(%rax)
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm0, 576(%rax)
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm9, 640(%rax)
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm13, 704(%rax)
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm19, 384(%rax)
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm18, (%rax)
+; AVX512BW-ONLY-NEXT:    vzeroupper
+; AVX512BW-ONLY-NEXT:    retq
 ;
-; AVX512DQ-FAST-LABEL: store_i64_stride6_vf16:
-; AVX512DQ-FAST:       # %bb.0:
-; AVX512DQ-FAST-NEXT:    movq {{[0-9]+}}(%rsp), %rax
-; AVX512DQ-FAST-NEXT:    vmovdqa64 64(%rdi), %zmm13
-; AVX512DQ-FAST-NEXT:    vmovdqa64 (%rdi), %zmm5
-; AVX512DQ-FAST-NEXT:    vmovdqa64 (%rsi), %zmm6
-; AVX512DQ-FAST-NEXT:    vmovdqa64 64(%rsi), %zmm14
-; AVX512DQ-FAST-NEXT:    vmovdqa64 64(%rdx), %zmm7
-; AVX512DQ-FAST-NEXT:    vmovdqa64 (%rdx), %zmm2
-; AVX512DQ-FAST-NEXT:    vmovdqa64 (%rcx), %zmm4
-; AVX512DQ-FAST-NEXT:    vmovdqa64 64(%rcx), %zmm11
-; AVX512DQ-FAST-NEXT:    vmovdqa64 (%r8), %zmm3
-; AVX512DQ-FAST-NEXT:    vmovdqa64 64(%r8), %zmm8
-; AVX512DQ-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm0 = [4,12,5,13,4,12,5,13]
-; AVX512DQ-FAST-NEXT:    # zmm0 = mem[0,1,2,3,0,1,2,3]
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm5, %zmm1
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm6, %zmm0, %zmm1
-; AVX512DQ-FAST-NEXT:    vbroadcasti128 {{.*#+}} ymm9 = [4,12,4,12]
-; AVX512DQ-FAST-NEXT:    # ymm9 = mem[0,1,0,1]
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm2, %zmm10
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm4, %zmm9, %zmm10
-; AVX512DQ-FAST-NEXT:    movb $12, %r10b
-; AVX512DQ-FAST-NEXT:    kmovw %r10d, %k1
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm10, %zmm1 {%k1}
-; AVX512DQ-FAST-NEXT:    movb $16, %r10b
-; AVX512DQ-FAST-NEXT:    kmovw %r10d, %k2
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm3, %zmm1 {%k2}
-; AVX512DQ-FAST-NEXT:    vpermi2q %zmm14, %zmm13, %zmm0
-; AVX512DQ-FAST-NEXT:    vpermi2q %zmm11, %zmm7, %zmm9
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm9, %zmm0 {%k1}
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm8, %zmm0 {%k2}
-; AVX512DQ-FAST-NEXT:    vmovdqa64 (%r9), %zmm10
-; AVX512DQ-FAST-NEXT:    vmovdqa64 64(%r9), %zmm16
-; AVX512DQ-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} zmm18 = [6,14,6,14,6,14,6,14]
-; AVX512DQ-FAST-NEXT:    # zmm18 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm13, %zmm15
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm14, %zmm18, %zmm15
-; AVX512DQ-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm12 = [5,13,6,14,5,13,6,14]
-; AVX512DQ-FAST-NEXT:    # zmm12 = mem[0,1,2,3,0,1,2,3]
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm7, %zmm9
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm11, %zmm12, %zmm9
-; AVX512DQ-FAST-NEXT:    movb $48, %r9b
-; AVX512DQ-FAST-NEXT:    kmovw %r9d, %k2
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm15, %zmm9 {%k2}
-; AVX512DQ-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm19 = <0,1,13,u,4,5,6,7>
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm8, %zmm19, %zmm9
-; AVX512DQ-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm20 = [0,1,2,13,4,5,6,7]
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm16, %zmm20, %zmm9
-; AVX512DQ-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} zmm21 = [2,10,2,10,2,10,2,10]
-; AVX512DQ-FAST-NEXT:    # zmm21 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm13, %zmm22
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm14, %zmm21, %zmm22
-; AVX512DQ-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm17 = [1,9,2,10,1,9,2,10]
-; AVX512DQ-FAST-NEXT:    # zmm17 = mem[0,1,2,3,0,1,2,3]
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm7, %zmm15
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm11, %zmm17, %zmm15
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm22, %zmm15 {%k2}
-; AVX512DQ-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm22 = <0,1,9,u,4,5,6,7>
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm8, %zmm22, %zmm15
-; AVX512DQ-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm23 = [0,1,2,9,4,5,6,7]
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm16, %zmm23, %zmm15
-; AVX512DQ-FAST-NEXT:    vpermi2q %zmm6, %zmm5, %zmm18
-; AVX512DQ-FAST-NEXT:    vpermi2q %zmm4, %zmm2, %zmm12
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm18, %zmm12 {%k2}
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm3, %zmm19, %zmm12
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm10, %zmm20, %zmm12
-; AVX512DQ-FAST-NEXT:    vpermi2q %zmm6, %zmm5, %zmm21
-; AVX512DQ-FAST-NEXT:    vpermi2q %zmm4, %zmm2, %zmm17
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm21, %zmm17 {%k2}
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm3, %zmm22, %zmm17
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm10, %zmm23, %zmm17
-; AVX512DQ-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm19 = [0,8,1,9,0,8,1,9]
-; AVX512DQ-FAST-NEXT:    # zmm19 = mem[0,1,2,3,0,1,2,3]
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm5, %zmm18
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm6, %zmm19, %zmm18
-; AVX512DQ-FAST-NEXT:    vmovdqa64 (%rdx), %xmm20
-; AVX512DQ-FAST-NEXT:    vmovdqa64 64(%rdx), %xmm21
-; AVX512DQ-FAST-NEXT:    vpunpcklqdq {{.*#+}} xmm20 = xmm20[0],mem[0]
-; AVX512DQ-FAST-NEXT:    vinserti32x4 $1, %xmm20, %ymm0, %ymm20
-; AVX512DQ-FAST-NEXT:    vinserti64x4 $0, %ymm20, %zmm0, %zmm18 {%k1}
-; AVX512DQ-FAST-NEXT:    vinserti32x4 $2, (%r8), %zmm18, %zmm18
-; AVX512DQ-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm20 = [0,1,2,3,4,8,6,7]
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm10, %zmm20, %zmm18
-; AVX512DQ-FAST-NEXT:    vpermi2q %zmm14, %zmm13, %zmm19
-; AVX512DQ-FAST-NEXT:    vpunpcklqdq {{.*#+}} xmm21 = xmm21[0],mem[0]
-; AVX512DQ-FAST-NEXT:    vinserti32x4 $1, %xmm21, %ymm0, %ymm21
-; AVX512DQ-FAST-NEXT:    vinserti64x4 $0, %ymm21, %zmm0, %zmm19 {%k1}
-; AVX512DQ-FAST-NEXT:    vinserti32x4 $2, 64(%r8), %zmm19, %zmm19
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm16, %zmm20, %zmm19
-; AVX512DQ-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} zmm20 = [7,15,7,15,7,15,7,15]
-; AVX512DQ-FAST-NEXT:    # zmm20 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm7, %zmm21
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm11, %zmm20, %zmm21
-; AVX512DQ-FAST-NEXT:    vbroadcasti64x2 {{.*#+}} ymm22 = [7,15,7,15]
-; AVX512DQ-FAST-NEXT:    # ymm22 = mem[0,1,0,1]
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm14, %zmm22, %zmm13
-; AVX512DQ-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm13 = zmm13[0,1,2,3],zmm21[4,5,6,7]
-; AVX512DQ-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm14 = <14,u,2,3,4,5,15,u>
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm8, %zmm14, %zmm13
-; AVX512DQ-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm21 = [0,14,2,3,4,5,6,15]
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm16, %zmm21, %zmm13
-; AVX512DQ-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm23 = [0,1,2,3,4,12,6,7]
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm16, %zmm23, %zmm0
-; AVX512DQ-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} zmm24 = [3,11,3,11,3,11,3,11]
-; AVX512DQ-FAST-NEXT:    # zmm24 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm11, %zmm24, %zmm7
-; AVX512DQ-FAST-NEXT:    vmovdqa (%rdi), %ymm11
-; AVX512DQ-FAST-NEXT:    vmovdqa64 64(%rdi), %ymm25
-; AVX512DQ-FAST-NEXT:    vpunpckhqdq {{.*#+}} ymm25 = ymm25[1],mem[1],ymm25[3],mem[3]
-; AVX512DQ-FAST-NEXT:    vinserti64x4 $0, %ymm25, %zmm7, %zmm7
-; AVX512DQ-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm25 = <10,u,2,3,4,5,11,u>
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm8, %zmm25, %zmm7
-; AVX512DQ-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm8 = [0,10,2,3,4,5,6,11]
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm16, %zmm8, %zmm7
-; AVX512DQ-FAST-NEXT:    vpermi2q %zmm4, %zmm2, %zmm20
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm6, %zmm22, %zmm5
-; AVX512DQ-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm5 = zmm5[0,1,2,3],zmm20[4,5,6,7]
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm3, %zmm14, %zmm5
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm10, %zmm21, %zmm5
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm10, %zmm23, %zmm1
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm4, %zmm24, %zmm2
-; AVX512DQ-FAST-NEXT:    vpunpckhqdq {{.*#+}} ymm4 = ymm11[1],mem[1],ymm11[3],mem[3]
-; AVX512DQ-FAST-NEXT:    vinserti64x4 $0, %ymm4, %zmm2, %zmm2
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm3, %zmm25, %zmm2
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm10, %zmm8, %zmm2
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm17, 64(%rax)
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm2, 128(%rax)
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm1, 192(%rax)
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm12, 256(%rax)
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm5, 320(%rax)
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm15, 448(%rax)
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm7, 512(%rax)
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm0, 576(%rax)
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm9, 640(%rax)
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm13, 704(%rax)
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm19, 384(%rax)
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm18, (%rax)
-; AVX512DQ-FAST-NEXT:    vzeroupper
-; AVX512DQ-FAST-NEXT:    retq
-;
-; AVX512BW-ONLY-SLOW-LABEL: store_i64_stride6_vf16:
-; AVX512BW-ONLY-SLOW:       # %bb.0:
-; AVX512BW-ONLY-SLOW-NEXT:    movq {{[0-9]+}}(%rsp), %rax
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 64(%rdi), %zmm13
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 (%rdi), %zmm5
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 (%rsi), %zmm6
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 64(%rsi), %zmm14
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 64(%rdx), %zmm7
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 (%rdx), %zmm2
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 (%rcx), %zmm4
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 64(%rcx), %zmm11
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 (%r8), %zmm3
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 64(%r8), %zmm8
-; AVX512BW-ONLY-SLOW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm0 = [4,12,5,13,4,12,5,13]
-; AVX512BW-ONLY-SLOW-NEXT:    # zmm0 = mem[0,1,2,3,0,1,2,3]
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm5, %zmm1
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm6, %zmm0, %zmm1
-; AVX512BW-ONLY-SLOW-NEXT:    vbroadcasti128 {{.*#+}} ymm9 = [4,12,4,12]
-; AVX512BW-ONLY-SLOW-NEXT:    # ymm9 = mem[0,1,0,1]
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm2, %zmm10
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm4, %zmm9, %zmm10
-; AVX512BW-ONLY-SLOW-NEXT:    movb $12, %r10b
-; AVX512BW-ONLY-SLOW-NEXT:    kmovd %r10d, %k1
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm10, %zmm1 {%k1}
-; AVX512BW-ONLY-SLOW-NEXT:    movb $16, %r10b
-; AVX512BW-ONLY-SLOW-NEXT:    kmovd %r10d, %k2
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm3, %zmm1 {%k2}
-; AVX512BW-ONLY-SLOW-NEXT:    vpermi2q %zmm14, %zmm13, %zmm0
-; AVX512BW-ONLY-SLOW-NEXT:    vpermi2q %zmm11, %zmm7, %zmm9
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm9, %zmm0 {%k1}
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm8, %zmm0 {%k2}
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 (%r9), %zmm10
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 64(%r9), %zmm16
-; AVX512BW-ONLY-SLOW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm18 = [6,14,6,14,6,14,6,14]
-; AVX512BW-ONLY-SLOW-NEXT:    # zmm18 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm13, %zmm15
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm14, %zmm18, %zmm15
-; AVX512BW-ONLY-SLOW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm12 = [5,13,6,14,5,13,6,14]
-; AVX512BW-ONLY-SLOW-NEXT:    # zmm12 = mem[0,1,2,3,0,1,2,3]
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm7, %zmm9
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm11, %zmm12, %zmm9
-; AVX512BW-ONLY-SLOW-NEXT:    movb $48, %r9b
-; AVX512BW-ONLY-SLOW-NEXT:    kmovd %r9d, %k2
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm15, %zmm9 {%k2}
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm19 = <0,1,13,u,4,5,6,7>
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm8, %zmm19, %zmm9
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm20 = [0,1,2,13,4,5,6,7]
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm16, %zmm20, %zmm9
-; AVX512BW-ONLY-SLOW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm21 = [2,10,2,10,2,10,2,10]
-; AVX512BW-ONLY-SLOW-NEXT:    # zmm21 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm13, %zmm22
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm14, %zmm21, %zmm22
-; AVX512BW-ONLY-SLOW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm17 = [1,9,2,10,1,9,2,10]
-; AVX512BW-ONLY-SLOW-NEXT:    # zmm17 = mem[0,1,2,3,0,1,2,3]
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm7, %zmm15
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm11, %zmm17, %zmm15
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm22, %zmm15 {%k2}
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm22 = <0,1,9,u,4,5,6,7>
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm8, %zmm22, %zmm15
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm23 = [0,1,2,9,4,5,6,7]
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm16, %zmm23, %zmm15
-; AVX512BW-ONLY-SLOW-NEXT:    vpermi2q %zmm6, %zmm5, %zmm18
-; AVX512BW-ONLY-SLOW-NEXT:    vpermi2q %zmm4, %zmm2, %zmm12
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm18, %zmm12 {%k2}
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm3, %zmm19, %zmm12
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm10, %zmm20, %zmm12
-; AVX512BW-ONLY-SLOW-NEXT:    vpermi2q %zmm6, %zmm5, %zmm21
-; AVX512BW-ONLY-SLOW-NEXT:    vpermi2q %zmm4, %zmm2, %zmm17
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm21, %zmm17 {%k2}
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm3, %zmm22, %zmm17
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm10, %zmm23, %zmm17
-; AVX512BW-ONLY-SLOW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm19 = [0,8,1,9,0,8,1,9]
-; AVX512BW-ONLY-SLOW-NEXT:    # zmm19 = mem[0,1,2,3,0,1,2,3]
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm5, %zmm18
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm6, %zmm19, %zmm18
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 (%rdx), %xmm20
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 64(%rdx), %xmm21
-; AVX512BW-ONLY-SLOW-NEXT:    vpunpcklqdq {{.*#+}} xmm20 = xmm20[0],mem[0]
-; AVX512BW-ONLY-SLOW-NEXT:    vinserti32x4 $1, %xmm20, %ymm0, %ymm20
-; AVX512BW-ONLY-SLOW-NEXT:    vinserti64x4 $0, %ymm20, %zmm0, %zmm18 {%k1}
-; AVX512BW-ONLY-SLOW-NEXT:    vinserti32x4 $2, (%r8), %zmm18, %zmm18
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm20 = [0,1,2,3,4,8,6,7]
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm10, %zmm20, %zmm18
-; AVX512BW-ONLY-SLOW-NEXT:    vpermi2q %zmm14, %zmm13, %zmm19
-; AVX512BW-ONLY-SLOW-NEXT:    vpunpcklqdq {{.*#+}} xmm21 = xmm21[0],mem[0]
-; AVX512BW-ONLY-SLOW-NEXT:    vinserti32x4 $1, %xmm21, %ymm0, %ymm21
-; AVX512BW-ONLY-SLOW-NEXT:    vinserti64x4 $0, %ymm21, %zmm0, %zmm19 {%k1}
-; AVX512BW-ONLY-SLOW-NEXT:    vinserti32x4 $2, 64(%r8), %zmm19, %zmm19
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm16, %zmm20, %zmm19
-; AVX512BW-ONLY-SLOW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm20 = [7,15,7,15,7,15,7,15]
-; AVX512BW-ONLY-SLOW-NEXT:    # zmm20 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm7, %zmm21
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm11, %zmm20, %zmm21
-; AVX512BW-ONLY-SLOW-NEXT:    vbroadcasti32x4 {{.*#+}} ymm22 = [7,15,7,15]
-; AVX512BW-ONLY-SLOW-NEXT:    # ymm22 = mem[0,1,2,3,0,1,2,3]
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm14, %zmm22, %zmm13
-; AVX512BW-ONLY-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm13 = zmm13[0,1,2,3],zmm21[4,5,6,7]
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm14 = <14,u,2,3,4,5,15,u>
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm8, %zmm14, %zmm13
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm21 = [0,14,2,3,4,5,6,15]
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm16, %zmm21, %zmm13
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm23 = [0,1,2,3,4,12,6,7]
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm16, %zmm23, %zmm0
-; AVX512BW-ONLY-SLOW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm24 = [3,11,3,11,3,11,3,11]
-; AVX512BW-ONLY-SLOW-NEXT:    # zmm24 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm11, %zmm24, %zmm7
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa (%rdi), %ymm11
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 64(%rdi), %ymm25
-; AVX512BW-ONLY-SLOW-NEXT:    vpunpckhqdq {{.*#+}} ymm25 = ymm25[1],mem[1],ymm25[3],mem[3]
-; AVX512BW-ONLY-SLOW-NEXT:    vinserti64x4 $0, %ymm25, %zmm7, %zmm7
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm25 = <10,u,2,3,4,5,11,u>
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm8, %zmm25, %zmm7
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm8 = [0,10,2,3,4,5,6,11]
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm16, %zmm8, %zmm7
-; AVX512BW-ONLY-SLOW-NEXT:    vpermi2q %zmm4, %zmm2, %zmm20
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm6, %zmm22, %zmm5
-; AVX512BW-ONLY-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm5 = zmm5[0,1,2,3],zmm20[4,5,6,7]
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm3, %zmm14, %zmm5
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm10, %zmm21, %zmm5
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm10, %zmm23, %zmm1
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm4, %zmm24, %zmm2
-; AVX512BW-ONLY-SLOW-NEXT:    vpunpckhqdq {{.*#+}} ymm4 = ymm11[1],mem[1],ymm11[3],mem[3]
-; AVX512BW-ONLY-SLOW-NEXT:    vinserti64x4 $0, %ymm4, %zmm2, %zmm2
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm3, %zmm25, %zmm2
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm10, %zmm8, %zmm2
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm17, 64(%rax)
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm2, 128(%rax)
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm1, 192(%rax)
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm12, 256(%rax)
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm5, 320(%rax)
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm15, 448(%rax)
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm7, 512(%rax)
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm0, 576(%rax)
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm9, 640(%rax)
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm13, 704(%rax)
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm19, 384(%rax)
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm18, (%rax)
-; AVX512BW-ONLY-SLOW-NEXT:    vzeroupper
-; AVX512BW-ONLY-SLOW-NEXT:    retq
-;
-; AVX512BW-ONLY-FAST-LABEL: store_i64_stride6_vf16:
-; AVX512BW-ONLY-FAST:       # %bb.0:
-; AVX512BW-ONLY-FAST-NEXT:    movq {{[0-9]+}}(%rsp), %rax
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 64(%rdi), %zmm13
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 (%rdi), %zmm5
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 (%rsi), %zmm6
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 64(%rsi), %zmm14
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 64(%rdx), %zmm7
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 (%rdx), %zmm2
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 (%rcx), %zmm4
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 64(%rcx), %zmm11
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 (%r8), %zmm3
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 64(%r8), %zmm8
-; AVX512BW-ONLY-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm0 = [4,12,5,13,4,12,5,13]
-; AVX512BW-ONLY-FAST-NEXT:    # zmm0 = mem[0,1,2,3,0,1,2,3]
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm5, %zmm1
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm6, %zmm0, %zmm1
-; AVX512BW-ONLY-FAST-NEXT:    vbroadcasti128 {{.*#+}} ymm9 = [4,12,4,12]
-; AVX512BW-ONLY-FAST-NEXT:    # ymm9 = mem[0,1,0,1]
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm2, %zmm10
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm4, %zmm9, %zmm10
-; AVX512BW-ONLY-FAST-NEXT:    movb $12, %r10b
-; AVX512BW-ONLY-FAST-NEXT:    kmovd %r10d, %k1
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm10, %zmm1 {%k1}
-; AVX512BW-ONLY-FAST-NEXT:    movb $16, %r10b
-; AVX512BW-ONLY-FAST-NEXT:    kmovd %r10d, %k2
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm3, %zmm1 {%k2}
-; AVX512BW-ONLY-FAST-NEXT:    vpermi2q %zmm14, %zmm13, %zmm0
-; AVX512BW-ONLY-FAST-NEXT:    vpermi2q %zmm11, %zmm7, %zmm9
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm9, %zmm0 {%k1}
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm8, %zmm0 {%k2}
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 (%r9), %zmm10
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 64(%r9), %zmm16
-; AVX512BW-ONLY-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} zmm18 = [6,14,6,14,6,14,6,14]
-; AVX512BW-ONLY-FAST-NEXT:    # zmm18 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm13, %zmm15
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm14, %zmm18, %zmm15
-; AVX512BW-ONLY-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm12 = [5,13,6,14,5,13,6,14]
-; AVX512BW-ONLY-FAST-NEXT:    # zmm12 = mem[0,1,2,3,0,1,2,3]
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm7, %zmm9
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm11, %zmm12, %zmm9
-; AVX512BW-ONLY-FAST-NEXT:    movb $48, %r9b
-; AVX512BW-ONLY-FAST-NEXT:    kmovd %r9d, %k2
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm15, %zmm9 {%k2}
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm19 = <0,1,13,u,4,5,6,7>
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm8, %zmm19, %zmm9
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm20 = [0,1,2,13,4,5,6,7]
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm16, %zmm20, %zmm9
-; AVX512BW-ONLY-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} zmm21 = [2,10,2,10,2,10,2,10]
-; AVX512BW-ONLY-FAST-NEXT:    # zmm21 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm13, %zmm22
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm14, %zmm21, %zmm22
-; AVX512BW-ONLY-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm17 = [1,9,2,10,1,9,2,10]
-; AVX512BW-ONLY-FAST-NEXT:    # zmm17 = mem[0,1,2,3,0,1,2,3]
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm7, %zmm15
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm11, %zmm17, %zmm15
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm22, %zmm15 {%k2}
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm22 = <0,1,9,u,4,5,6,7>
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm8, %zmm22, %zmm15
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm23 = [0,1,2,9,4,5,6,7]
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm16, %zmm23, %zmm15
-; AVX512BW-ONLY-FAST-NEXT:    vpermi2q %zmm6, %zmm5, %zmm18
-; AVX512BW-ONLY-FAST-NEXT:    vpermi2q %zmm4, %zmm2, %zmm12
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm18, %zmm12 {%k2}
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm3, %zmm19, %zmm12
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm10, %zmm20, %zmm12
-; AVX512BW-ONLY-FAST-NEXT:    vpermi2q %zmm6, %zmm5, %zmm21
-; AVX512BW-ONLY-FAST-NEXT:    vpermi2q %zmm4, %zmm2, %zmm17
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm21, %zmm17 {%k2}
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm3, %zmm22, %zmm17
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm10, %zmm23, %zmm17
-; AVX512BW-ONLY-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm19 = [0,8,1,9,0,8,1,9]
-; AVX512BW-ONLY-FAST-NEXT:    # zmm19 = mem[0,1,2,3,0,1,2,3]
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm5, %zmm18
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm6, %zmm19, %zmm18
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 (%rdx), %xmm20
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 64(%rdx), %xmm21
-; AVX512BW-ONLY-FAST-NEXT:    vpunpcklqdq {{.*#+}} xmm20 = xmm20[0],mem[0]
-; AVX512BW-ONLY-FAST-NEXT:    vinserti32x4 $1, %xmm20, %ymm0, %ymm20
-; AVX512BW-ONLY-FAST-NEXT:    vinserti64x4 $0, %ymm20, %zmm0, %zmm18 {%k1}
-; AVX512BW-ONLY-FAST-NEXT:    vinserti32x4 $2, (%r8), %zmm18, %zmm18
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm20 = [0,1,2,3,4,8,6,7]
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm10, %zmm20, %zmm18
-; AVX512BW-ONLY-FAST-NEXT:    vpermi2q %zmm14, %zmm13, %zmm19
-; AVX512BW-ONLY-FAST-NEXT:    vpunpcklqdq {{.*#+}} xmm21 = xmm21[0],mem[0]
-; AVX512BW-ONLY-FAST-NEXT:    vinserti32x4 $1, %xmm21, %ymm0, %ymm21
-; AVX512BW-ONLY-FAST-NEXT:    vinserti64x4 $0, %ymm21, %zmm0, %zmm19 {%k1}
-; AVX512BW-ONLY-FAST-NEXT:    vinserti32x4 $2, 64(%r8), %zmm19, %zmm19
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm16, %zmm20, %zmm19
-; AVX512BW-ONLY-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} zmm20 = [7,15,7,15,7,15,7,15]
-; AVX512BW-ONLY-FAST-NEXT:    # zmm20 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm7, %zmm21
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm11, %zmm20, %zmm21
-; AVX512BW-ONLY-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} ymm22 = [7,15,7,15]
-; AVX512BW-ONLY-FAST-NEXT:    # ymm22 = mem[0,1,2,3,0,1,2,3]
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm14, %zmm22, %zmm13
-; AVX512BW-ONLY-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm13 = zmm13[0,1,2,3],zmm21[4,5,6,7]
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm14 = <14,u,2,3,4,5,15,u>
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm8, %zmm14, %zmm13
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm21 = [0,14,2,3,4,5,6,15]
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm16, %zmm21, %zmm13
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm23 = [0,1,2,3,4,12,6,7]
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm16, %zmm23, %zmm0
-; AVX512BW-ONLY-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} zmm24 = [3,11,3,11,3,11,3,11]
-; AVX512BW-ONLY-FAST-NEXT:    # zmm24 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm11, %zmm24, %zmm7
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa (%rdi), %ymm11
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 64(%rdi), %ymm25
-; AVX512BW-ONLY-FAST-NEXT:    vpunpckhqdq {{.*#+}} ymm25 = ymm25[1],mem[1],ymm25[3],mem[3]
-; AVX512BW-ONLY-FAST-NEXT:    vinserti64x4 $0, %ymm25, %zmm7, %zmm7
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm25 = <10,u,2,3,4,5,11,u>
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm8, %zmm25, %zmm7
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm8 = [0,10,2,3,4,5,6,11]
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm16, %zmm8, %zmm7
-; AVX512BW-ONLY-FAST-NEXT:    vpermi2q %zmm4, %zmm2, %zmm20
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm6, %zmm22, %zmm5
-; AVX512BW-ONLY-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm5 = zmm5[0,1,2,3],zmm20[4,5,6,7]
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm3, %zmm14, %zmm5
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm10, %zmm21, %zmm5
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm10, %zmm23, %zmm1
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm4, %zmm24, %zmm2
-; AVX512BW-ONLY-FAST-NEXT:    vpunpckhqdq {{.*#+}} ymm4 = ymm11[1],mem[1],ymm11[3],mem[3]
-; AVX512BW-ONLY-FAST-NEXT:    vinserti64x4 $0, %ymm4, %zmm2, %zmm2
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm3, %zmm25, %zmm2
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm10, %zmm8, %zmm2
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm17, 64(%rax)
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm2, 128(%rax)
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm1, 192(%rax)
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm12, 256(%rax)
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm5, 320(%rax)
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm15, 448(%rax)
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm7, 512(%rax)
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm0, 576(%rax)
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm9, 640(%rax)
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm13, 704(%rax)
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm19, 384(%rax)
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm18, (%rax)
-; AVX512BW-ONLY-FAST-NEXT:    vzeroupper
-; AVX512BW-ONLY-FAST-NEXT:    retq
-;
-; AVX512DQBW-SLOW-LABEL: store_i64_stride6_vf16:
-; AVX512DQBW-SLOW:       # %bb.0:
-; AVX512DQBW-SLOW-NEXT:    movq {{[0-9]+}}(%rsp), %rax
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 64(%rdi), %zmm13
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 (%rdi), %zmm5
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 (%rsi), %zmm6
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 64(%rsi), %zmm14
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 64(%rdx), %zmm7
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 (%rdx), %zmm2
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 (%rcx), %zmm4
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 64(%rcx), %zmm11
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 (%r8), %zmm3
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 64(%r8), %zmm8
-; AVX512DQBW-SLOW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm0 = [4,12,5,13,4,12,5,13]
-; AVX512DQBW-SLOW-NEXT:    # zmm0 = mem[0,1,2,3,0,1,2,3]
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm5, %zmm1
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm6, %zmm0, %zmm1
-; AVX512DQBW-SLOW-NEXT:    vbroadcasti128 {{.*#+}} ymm9 = [4,12,4,12]
-; AVX512DQBW-SLOW-NEXT:    # ymm9 = mem[0,1,0,1]
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm2, %zmm10
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm4, %zmm9, %zmm10
-; AVX512DQBW-SLOW-NEXT:    movb $12, %r10b
-; AVX512DQBW-SLOW-NEXT:    kmovd %r10d, %k1
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm10, %zmm1 {%k1}
-; AVX512DQBW-SLOW-NEXT:    movb $16, %r10b
-; AVX512DQBW-SLOW-NEXT:    kmovd %r10d, %k2
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm3, %zmm1 {%k2}
-; AVX512DQBW-SLOW-NEXT:    vpermi2q %zmm14, %zmm13, %zmm0
-; AVX512DQBW-SLOW-NEXT:    vpermi2q %zmm11, %zmm7, %zmm9
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm9, %zmm0 {%k1}
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm8, %zmm0 {%k2}
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 (%r9), %zmm10
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 64(%r9), %zmm16
-; AVX512DQBW-SLOW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm18 = [6,14,6,14,6,14,6,14]
-; AVX512DQBW-SLOW-NEXT:    # zmm18 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm13, %zmm15
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm14, %zmm18, %zmm15
-; AVX512DQBW-SLOW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm12 = [5,13,6,14,5,13,6,14]
-; AVX512DQBW-SLOW-NEXT:    # zmm12 = mem[0,1,2,3,0,1,2,3]
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm7, %zmm9
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm11, %zmm12, %zmm9
-; AVX512DQBW-SLOW-NEXT:    movb $48, %r9b
-; AVX512DQBW-SLOW-NEXT:    kmovd %r9d, %k2
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm15, %zmm9 {%k2}
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm19 = <0,1,13,u,4,5,6,7>
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm8, %zmm19, %zmm9
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm20 = [0,1,2,13,4,5,6,7]
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm16, %zmm20, %zmm9
-; AVX512DQBW-SLOW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm21 = [2,10,2,10,2,10,2,10]
-; AVX512DQBW-SLOW-NEXT:    # zmm21 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm13, %zmm22
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm14, %zmm21, %zmm22
-; AVX512DQBW-SLOW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm17 = [1,9,2,10,1,9,2,10]
-; AVX512DQBW-SLOW-NEXT:    # zmm17 = mem[0,1,2,3,0,1,2,3]
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm7, %zmm15
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm11, %zmm17, %zmm15
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm22, %zmm15 {%k2}
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm22 = <0,1,9,u,4,5,6,7>
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm8, %zmm22, %zmm15
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm23 = [0,1,2,9,4,5,6,7]
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm16, %zmm23, %zmm15
-; AVX512DQBW-SLOW-NEXT:    vpermi2q %zmm6, %zmm5, %zmm18
-; AVX512DQBW-SLOW-NEXT:    vpermi2q %zmm4, %zmm2, %zmm12
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm18, %zmm12 {%k2}
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm3, %zmm19, %zmm12
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm10, %zmm20, %zmm12
-; AVX512DQBW-SLOW-NEXT:    vpermi2q %zmm6, %zmm5, %zmm21
-; AVX512DQBW-SLOW-NEXT:    vpermi2q %zmm4, %zmm2, %zmm17
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm21, %zmm17 {%k2}
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm3, %zmm22, %zmm17
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm10, %zmm23, %zmm17
-; AVX512DQBW-SLOW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm19 = [0,8,1,9,0,8,1,9]
-; AVX512DQBW-SLOW-NEXT:    # zmm19 = mem[0,1,2,3,0,1,2,3]
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm5, %zmm18
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm6, %zmm19, %zmm18
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 (%rdx), %xmm20
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 64(%rdx), %xmm21
-; AVX512DQBW-SLOW-NEXT:    vpunpcklqdq {{.*#+}} xmm20 = xmm20[0],mem[0]
-; AVX512DQBW-SLOW-NEXT:    vinserti32x4 $1, %xmm20, %ymm0, %ymm20
-; AVX512DQBW-SLOW-NEXT:    vinserti64x4 $0, %ymm20, %zmm0, %zmm18 {%k1}
-; AVX512DQBW-SLOW-NEXT:    vinserti32x4 $2, (%r8), %zmm18, %zmm18
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm20 = [0,1,2,3,4,8,6,7]
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm10, %zmm20, %zmm18
-; AVX512DQBW-SLOW-NEXT:    vpermi2q %zmm14, %zmm13, %zmm19
-; AVX512DQBW-SLOW-NEXT:    vpunpcklqdq {{.*#+}} xmm21 = xmm21[0],mem[0]
-; AVX512DQBW-SLOW-NEXT:    vinserti32x4 $1, %xmm21, %ymm0, %ymm21
-; AVX512DQBW-SLOW-NEXT:    vinserti64x4 $0, %ymm21, %zmm0, %zmm19 {%k1}
-; AVX512DQBW-SLOW-NEXT:    vinserti32x4 $2, 64(%r8), %zmm19, %zmm19
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm16, %zmm20, %zmm19
-; AVX512DQBW-SLOW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm20 = [7,15,7,15,7,15,7,15]
-; AVX512DQBW-SLOW-NEXT:    # zmm20 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm7, %zmm21
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm11, %zmm20, %zmm21
-; AVX512DQBW-SLOW-NEXT:    vbroadcasti64x2 {{.*#+}} ymm22 = [7,15,7,15]
-; AVX512DQBW-SLOW-NEXT:    # ymm22 = mem[0,1,0,1]
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm14, %zmm22, %zmm13
-; AVX512DQBW-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm13 = zmm13[0,1,2,3],zmm21[4,5,6,7]
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm14 = <14,u,2,3,4,5,15,u>
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm8, %zmm14, %zmm13
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm21 = [0,14,2,3,4,5,6,15]
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm16, %zmm21, %zmm13
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm23 = [0,1,2,3,4,12,6,7]
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm16, %zmm23, %zmm0
-; AVX512DQBW-SLOW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm24 = [3,11,3,11,3,11,3,11]
-; AVX512DQBW-SLOW-NEXT:    # zmm24 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm11, %zmm24, %zmm7
-; AVX512DQBW-SLOW-NEXT:    vmovdqa (%rdi), %ymm11
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 64(%rdi), %ymm25
-; AVX512DQBW-SLOW-NEXT:    vpunpckhqdq {{.*#+}} ymm25 = ymm25[1],mem[1],ymm25[3],mem[3]
-; AVX512DQBW-SLOW-NEXT:    vinserti64x4 $0, %ymm25, %zmm7, %zmm7
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm25 = <10,u,2,3,4,5,11,u>
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm8, %zmm25, %zmm7
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm8 = [0,10,2,3,4,5,6,11]
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm16, %zmm8, %zmm7
-; AVX512DQBW-SLOW-NEXT:    vpermi2q %zmm4, %zmm2, %zmm20
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm6, %zmm22, %zmm5
-; AVX512DQBW-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm5 = zmm5[0,1,2,3],zmm20[4,5,6,7]
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm3, %zmm14, %zmm5
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm10, %zmm21, %zmm5
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm10, %zmm23, %zmm1
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm4, %zmm24, %zmm2
-; AVX512DQBW-SLOW-NEXT:    vpunpckhqdq {{.*#+}} ymm4 = ymm11[1],mem[1],ymm11[3],mem[3]
-; AVX512DQBW-SLOW-NEXT:    vinserti64x4 $0, %ymm4, %zmm2, %zmm2
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm3, %zmm25, %zmm2
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm10, %zmm8, %zmm2
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm17, 64(%rax)
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm2, 128(%rax)
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm1, 192(%rax)
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm12, 256(%rax)
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm5, 320(%rax)
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm15, 448(%rax)
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm7, 512(%rax)
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm0, 576(%rax)
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm9, 640(%rax)
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm13, 704(%rax)
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm19, 384(%rax)
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm18, (%rax)
-; AVX512DQBW-SLOW-NEXT:    vzeroupper
-; AVX512DQBW-SLOW-NEXT:    retq
-;
-; AVX512DQBW-FAST-LABEL: store_i64_stride6_vf16:
-; AVX512DQBW-FAST:       # %bb.0:
-; AVX512DQBW-FAST-NEXT:    movq {{[0-9]+}}(%rsp), %rax
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 64(%rdi), %zmm13
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 (%rdi), %zmm5
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 (%rsi), %zmm6
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 64(%rsi), %zmm14
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 64(%rdx), %zmm7
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 (%rdx), %zmm2
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 (%rcx), %zmm4
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 64(%rcx), %zmm11
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 (%r8), %zmm3
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 64(%r8), %zmm8
-; AVX512DQBW-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm0 = [4,12,5,13,4,12,5,13]
-; AVX512DQBW-FAST-NEXT:    # zmm0 = mem[0,1,2,3,0,1,2,3]
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm5, %zmm1
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm6, %zmm0, %zmm1
-; AVX512DQBW-FAST-NEXT:    vbroadcasti128 {{.*#+}} ymm9 = [4,12,4,12]
-; AVX512DQBW-FAST-NEXT:    # ymm9 = mem[0,1,0,1]
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm2, %zmm10
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm4, %zmm9, %zmm10
-; AVX512DQBW-FAST-NEXT:    movb $12, %r10b
-; AVX512DQBW-FAST-NEXT:    kmovd %r10d, %k1
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm10, %zmm1 {%k1}
-; AVX512DQBW-FAST-NEXT:    movb $16, %r10b
-; AVX512DQBW-FAST-NEXT:    kmovd %r10d, %k2
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm3, %zmm1 {%k2}
-; AVX512DQBW-FAST-NEXT:    vpermi2q %zmm14, %zmm13, %zmm0
-; AVX512DQBW-FAST-NEXT:    vpermi2q %zmm11, %zmm7, %zmm9
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm9, %zmm0 {%k1}
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm8, %zmm0 {%k2}
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 (%r9), %zmm10
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 64(%r9), %zmm16
-; AVX512DQBW-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} zmm18 = [6,14,6,14,6,14,6,14]
-; AVX512DQBW-FAST-NEXT:    # zmm18 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm13, %zmm15
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm14, %zmm18, %zmm15
-; AVX512DQBW-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm12 = [5,13,6,14,5,13,6,14]
-; AVX512DQBW-FAST-NEXT:    # zmm12 = mem[0,1,2,3,0,1,2,3]
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm7, %zmm9
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm11, %zmm12, %zmm9
-; AVX512DQBW-FAST-NEXT:    movb $48, %r9b
-; AVX512DQBW-FAST-NEXT:    kmovd %r9d, %k2
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm15, %zmm9 {%k2}
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm19 = <0,1,13,u,4,5,6,7>
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm8, %zmm19, %zmm9
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm20 = [0,1,2,13,4,5,6,7]
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm16, %zmm20, %zmm9
-; AVX512DQBW-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} zmm21 = [2,10,2,10,2,10,2,10]
-; AVX512DQBW-FAST-NEXT:    # zmm21 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm13, %zmm22
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm14, %zmm21, %zmm22
-; AVX512DQBW-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm17 = [1,9,2,10,1,9,2,10]
-; AVX512DQBW-FAST-NEXT:    # zmm17 = mem[0,1,2,3,0,1,2,3]
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm7, %zmm15
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm11, %zmm17, %zmm15
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm22, %zmm15 {%k2}
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm22 = <0,1,9,u,4,5,6,7>
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm8, %zmm22, %zmm15
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm23 = [0,1,2,9,4,5,6,7]
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm16, %zmm23, %zmm15
-; AVX512DQBW-FAST-NEXT:    vpermi2q %zmm6, %zmm5, %zmm18
-; AVX512DQBW-FAST-NEXT:    vpermi2q %zmm4, %zmm2, %zmm12
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm18, %zmm12 {%k2}
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm3, %zmm19, %zmm12
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm10, %zmm20, %zmm12
-; AVX512DQBW-FAST-NEXT:    vpermi2q %zmm6, %zmm5, %zmm21
-; AVX512DQBW-FAST-NEXT:    vpermi2q %zmm4, %zmm2, %zmm17
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm21, %zmm17 {%k2}
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm3, %zmm22, %zmm17
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm10, %zmm23, %zmm17
-; AVX512DQBW-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm19 = [0,8,1,9,0,8,1,9]
-; AVX512DQBW-FAST-NEXT:    # zmm19 = mem[0,1,2,3,0,1,2,3]
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm5, %zmm18
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm6, %zmm19, %zmm18
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 (%rdx), %xmm20
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 64(%rdx), %xmm21
-; AVX512DQBW-FAST-NEXT:    vpunpcklqdq {{.*#+}} xmm20 = xmm20[0],mem[0]
-; AVX512DQBW-FAST-NEXT:    vinserti32x4 $1, %xmm20, %ymm0, %ymm20
-; AVX512DQBW-FAST-NEXT:    vinserti64x4 $0, %ymm20, %zmm0, %zmm18 {%k1}
-; AVX512DQBW-FAST-NEXT:    vinserti32x4 $2, (%r8), %zmm18, %zmm18
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm20 = [0,1,2,3,4,8,6,7]
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm10, %zmm20, %zmm18
-; AVX512DQBW-FAST-NEXT:    vpermi2q %zmm14, %zmm13, %zmm19
-; AVX512DQBW-FAST-NEXT:    vpunpcklqdq {{.*#+}} xmm21 = xmm21[0],mem[0]
-; AVX512DQBW-FAST-NEXT:    vinserti32x4 $1, %xmm21, %ymm0, %ymm21
-; AVX512DQBW-FAST-NEXT:    vinserti64x4 $0, %ymm21, %zmm0, %zmm19 {%k1}
-; AVX512DQBW-FAST-NEXT:    vinserti32x4 $2, 64(%r8), %zmm19, %zmm19
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm16, %zmm20, %zmm19
-; AVX512DQBW-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} zmm20 = [7,15,7,15,7,15,7,15]
-; AVX512DQBW-FAST-NEXT:    # zmm20 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm7, %zmm21
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm11, %zmm20, %zmm21
-; AVX512DQBW-FAST-NEXT:    vbroadcasti64x2 {{.*#+}} ymm22 = [7,15,7,15]
-; AVX512DQBW-FAST-NEXT:    # ymm22 = mem[0,1,0,1]
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm14, %zmm22, %zmm13
-; AVX512DQBW-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm13 = zmm13[0,1,2,3],zmm21[4,5,6,7]
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm14 = <14,u,2,3,4,5,15,u>
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm8, %zmm14, %zmm13
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm21 = [0,14,2,3,4,5,6,15]
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm16, %zmm21, %zmm13
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm23 = [0,1,2,3,4,12,6,7]
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm16, %zmm23, %zmm0
-; AVX512DQBW-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} zmm24 = [3,11,3,11,3,11,3,11]
-; AVX512DQBW-FAST-NEXT:    # zmm24 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm11, %zmm24, %zmm7
-; AVX512DQBW-FAST-NEXT:    vmovdqa (%rdi), %ymm11
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 64(%rdi), %ymm25
-; AVX512DQBW-FAST-NEXT:    vpunpckhqdq {{.*#+}} ymm25 = ymm25[1],mem[1],ymm25[3],mem[3]
-; AVX512DQBW-FAST-NEXT:    vinserti64x4 $0, %ymm25, %zmm7, %zmm7
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm25 = <10,u,2,3,4,5,11,u>
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm8, %zmm25, %zmm7
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm8 = [0,10,2,3,4,5,6,11]
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm16, %zmm8, %zmm7
-; AVX512DQBW-FAST-NEXT:    vpermi2q %zmm4, %zmm2, %zmm20
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm6, %zmm22, %zmm5
-; AVX512DQBW-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm5 = zmm5[0,1,2,3],zmm20[4,5,6,7]
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm3, %zmm14, %zmm5
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm10, %zmm21, %zmm5
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm10, %zmm23, %zmm1
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm4, %zmm24, %zmm2
-; AVX512DQBW-FAST-NEXT:    vpunpckhqdq {{.*#+}} ymm4 = ymm11[1],mem[1],ymm11[3],mem[3]
-; AVX512DQBW-FAST-NEXT:    vinserti64x4 $0, %ymm4, %zmm2, %zmm2
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm3, %zmm25, %zmm2
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm10, %zmm8, %zmm2
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm17, 64(%rax)
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm2, 128(%rax)
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm1, 192(%rax)
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm12, 256(%rax)
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm5, 320(%rax)
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm15, 448(%rax)
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm7, 512(%rax)
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm0, 576(%rax)
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm9, 640(%rax)
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm13, 704(%rax)
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm19, 384(%rax)
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm18, (%rax)
-; AVX512DQBW-FAST-NEXT:    vzeroupper
-; AVX512DQBW-FAST-NEXT:    retq
+; AVX512DQBW-ONLY-LABEL: store_i64_stride6_vf16:
+; AVX512DQBW-ONLY:       # %bb.0:
+; AVX512DQBW-ONLY-NEXT:    movq {{[0-9]+}}(%rsp), %rax
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 64(%rdi), %zmm13
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 (%rdi), %zmm5
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 (%rsi), %zmm6
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 64(%rsi), %zmm14
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 64(%rdx), %zmm7
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 (%rdx), %zmm2
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 (%rcx), %zmm4
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 64(%rcx), %zmm11
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 (%r8), %zmm3
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 64(%r8), %zmm8
+; AVX512DQBW-ONLY-NEXT:    vbroadcasti64x4 {{.*#+}} zmm0 = [4,12,5,13,4,12,5,13]
+; AVX512DQBW-ONLY-NEXT:    # zmm0 = mem[0,1,2,3,0,1,2,3]
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm5, %zmm1
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm6, %zmm0, %zmm1
+; AVX512DQBW-ONLY-NEXT:    vbroadcasti128 {{.*#+}} ymm9 = [4,12,4,12]
+; AVX512DQBW-ONLY-NEXT:    # ymm9 = mem[0,1,0,1]
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm2, %zmm10
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm4, %zmm9, %zmm10
+; AVX512DQBW-ONLY-NEXT:    movb $12, %r10b
+; AVX512DQBW-ONLY-NEXT:    kmovd %r10d, %k1
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm10, %zmm1 {%k1}
+; AVX512DQBW-ONLY-NEXT:    movb $16, %r10b
+; AVX512DQBW-ONLY-NEXT:    kmovd %r10d, %k2
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm3, %zmm1 {%k2}
+; AVX512DQBW-ONLY-NEXT:    vpermi2q %zmm14, %zmm13, %zmm0
+; AVX512DQBW-ONLY-NEXT:    vpermi2q %zmm11, %zmm7, %zmm9
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm9, %zmm0 {%k1}
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm8, %zmm0 {%k2}
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 (%r9), %zmm10
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 64(%r9), %zmm16
+; AVX512DQBW-ONLY-NEXT:    vbroadcasti32x4 {{.*#+}} zmm18 = [6,14,6,14,6,14,6,14]
+; AVX512DQBW-ONLY-NEXT:    # zmm18 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm13, %zmm15
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm14, %zmm18, %zmm15
+; AVX512DQBW-ONLY-NEXT:    vbroadcasti64x4 {{.*#+}} zmm12 = [5,13,6,14,5,13,6,14]
+; AVX512DQBW-ONLY-NEXT:    # zmm12 = mem[0,1,2,3,0,1,2,3]
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm7, %zmm9
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm11, %zmm12, %zmm9
+; AVX512DQBW-ONLY-NEXT:    movb $48, %r9b
+; AVX512DQBW-ONLY-NEXT:    kmovd %r9d, %k2
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm15, %zmm9 {%k2}
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 {{.*#+}} zmm19 = <0,1,13,u,4,5,6,7>
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm8, %zmm19, %zmm9
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 {{.*#+}} zmm20 = [0,1,2,13,4,5,6,7]
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm16, %zmm20, %zmm9
+; AVX512DQBW-ONLY-NEXT:    vbroadcasti32x4 {{.*#+}} zmm21 = [2,10,2,10,2,10,2,10]
+; AVX512DQBW-ONLY-NEXT:    # zmm21 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm13, %zmm22
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm14, %zmm21, %zmm22
+; AVX512DQBW-ONLY-NEXT:    vbroadcasti64x4 {{.*#+}} zmm17 = [1,9,2,10,1,9,2,10]
+; AVX512DQBW-ONLY-NEXT:    # zmm17 = mem[0,1,2,3,0,1,2,3]
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm7, %zmm15
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm11, %zmm17, %zmm15
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm22, %zmm15 {%k2}
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 {{.*#+}} zmm22 = <0,1,9,u,4,5,6,7>
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm8, %zmm22, %zmm15
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 {{.*#+}} zmm23 = [0,1,2,9,4,5,6,7]
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm16, %zmm23, %zmm15
+; AVX512DQBW-ONLY-NEXT:    vpermi2q %zmm6, %zmm5, %zmm18
+; AVX512DQBW-ONLY-NEXT:    vpermi2q %zmm4, %zmm2, %zmm12
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm18, %zmm12 {%k2}
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm3, %zmm19, %zmm12
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm10, %zmm20, %zmm12
+; AVX512DQBW-ONLY-NEXT:    vpermi2q %zmm6, %zmm5, %zmm21
+; AVX512DQBW-ONLY-NEXT:    vpermi2q %zmm4, %zmm2, %zmm17
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm21, %zmm17 {%k2}
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm3, %zmm22, %zmm17
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm10, %zmm23, %zmm17
+; AVX512DQBW-ONLY-NEXT:    vbroadcasti64x4 {{.*#+}} zmm19 = [0,8,1,9,0,8,1,9]
+; AVX512DQBW-ONLY-NEXT:    # zmm19 = mem[0,1,2,3,0,1,2,3]
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm5, %zmm18
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm6, %zmm19, %zmm18
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 (%rdx), %xmm20
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 64(%rdx), %xmm21
+; AVX512DQBW-ONLY-NEXT:    vpunpcklqdq {{.*#+}} xmm20 = xmm20[0],mem[0]
+; AVX512DQBW-ONLY-NEXT:    vinserti32x4 $1, %xmm20, %ymm0, %ymm20
+; AVX512DQBW-ONLY-NEXT:    vinserti64x4 $0, %ymm20, %zmm0, %zmm18 {%k1}
+; AVX512DQBW-ONLY-NEXT:    vinserti32x4 $2, (%r8), %zmm18, %zmm18
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 {{.*#+}} zmm20 = [0,1,2,3,4,8,6,7]
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm10, %zmm20, %zmm18
+; AVX512DQBW-ONLY-NEXT:    vpermi2q %zmm14, %zmm13, %zmm19
+; AVX512DQBW-ONLY-NEXT:    vpunpcklqdq {{.*#+}} xmm21 = xmm21[0],mem[0]
+; AVX512DQBW-ONLY-NEXT:    vinserti32x4 $1, %xmm21, %ymm0, %ymm21
+; AVX512DQBW-ONLY-NEXT:    vinserti64x4 $0, %ymm21, %zmm0, %zmm19 {%k1}
+; AVX512DQBW-ONLY-NEXT:    vinserti32x4 $2, 64(%r8), %zmm19, %zmm19
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm16, %zmm20, %zmm19
+; AVX512DQBW-ONLY-NEXT:    vbroadcasti32x4 {{.*#+}} zmm20 = [7,15,7,15,7,15,7,15]
+; AVX512DQBW-ONLY-NEXT:    # zmm20 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm7, %zmm21
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm11, %zmm20, %zmm21
+; AVX512DQBW-ONLY-NEXT:    vbroadcasti64x2 {{.*#+}} ymm22 = [7,15,7,15]
+; AVX512DQBW-ONLY-NEXT:    # ymm22 = mem[0,1,0,1]
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm14, %zmm22, %zmm13
+; AVX512DQBW-ONLY-NEXT:    vshufi64x2 {{.*#+}} zmm13 = zmm13[0,1,2,3],zmm21[4,5,6,7]
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 {{.*#+}} zmm14 = <14,u,2,3,4,5,15,u>
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm8, %zmm14, %zmm13
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 {{.*#+}} zmm21 = [0,14,2,3,4,5,6,15]
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm16, %zmm21, %zmm13
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 {{.*#+}} zmm23 = [0,1,2,3,4,12,6,7]
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm16, %zmm23, %zmm0
+; AVX512DQBW-ONLY-NEXT:    vbroadcasti32x4 {{.*#+}} zmm24 = [3,11,3,11,3,11,3,11]
+; AVX512DQBW-ONLY-NEXT:    # zmm24 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm11, %zmm24, %zmm7
+; AVX512DQBW-ONLY-NEXT:    vmovdqa (%rdi), %ymm11
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 64(%rdi), %ymm25
+; AVX512DQBW-ONLY-NEXT:    vpunpckhqdq {{.*#+}} ymm25 = ymm25[1],mem[1],ymm25[3],mem[3]
+; AVX512DQBW-ONLY-NEXT:    vinserti64x4 $0, %ymm25, %zmm7, %zmm7
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 {{.*#+}} zmm25 = <10,u,2,3,4,5,11,u>
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm8, %zmm25, %zmm7
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 {{.*#+}} zmm8 = [0,10,2,3,4,5,6,11]
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm16, %zmm8, %zmm7
+; AVX512DQBW-ONLY-NEXT:    vpermi2q %zmm4, %zmm2, %zmm20
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm6, %zmm22, %zmm5
+; AVX512DQBW-ONLY-NEXT:    vshufi64x2 {{.*#+}} zmm5 = zmm5[0,1,2,3],zmm20[4,5,6,7]
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm3, %zmm14, %zmm5
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm10, %zmm21, %zmm5
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm10, %zmm23, %zmm1
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm4, %zmm24, %zmm2
+; AVX512DQBW-ONLY-NEXT:    vpunpckhqdq {{.*#+}} ymm4 = ymm11[1],mem[1],ymm11[3],mem[3]
+; AVX512DQBW-ONLY-NEXT:    vinserti64x4 $0, %ymm4, %zmm2, %zmm2
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm3, %zmm25, %zmm2
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm10, %zmm8, %zmm2
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm17, 64(%rax)
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm2, 128(%rax)
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm1, 192(%rax)
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm12, 256(%rax)
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm5, 320(%rax)
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm15, 448(%rax)
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm7, 512(%rax)
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm0, 576(%rax)
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm9, 640(%rax)
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm13, 704(%rax)
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm19, 384(%rax)
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm18, (%rax)
+; AVX512DQBW-ONLY-NEXT:    vzeroupper
+; AVX512DQBW-ONLY-NEXT:    retq
   %in.vec0 = load <16 x i64>, ptr %in.vecptr0, align 64
   %in.vec1 = load <16 x i64>, ptr %in.vecptr1, align 64
   %in.vec2 = load <16 x i64>, ptr %in.vecptr2, align 64
@@ -3989,2237 +3429,1121 @@ define void @store_i64_stride6_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX2-ONLY-NEXT:    vzeroupper
 ; AVX2-ONLY-NEXT:    retq
 ;
-; AVX512F-ONLY-SLOW-LABEL: store_i64_stride6_vf32:
-; AVX512F-ONLY-SLOW:       # %bb.0:
-; AVX512F-ONLY-SLOW-NEXT:    subq $712, %rsp # imm = 0x2C8
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 (%rdi), %zmm7
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 64(%rdi), %zmm4
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 128(%rdi), %zmm2
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 192(%rdi), %zmm18
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 (%rsi), %zmm22
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 64(%rsi), %zmm1
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 128(%rsi), %zmm19
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 192(%rsi), %zmm21
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 (%rdx), %zmm8
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 64(%rdx), %zmm6
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 128(%rdx), %zmm5
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 192(%rdx), %zmm12
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 (%rcx), %zmm29
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 64(%rcx), %zmm27
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 128(%rcx), %zmm26
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 192(%rcx), %zmm25
-; AVX512F-ONLY-SLOW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm11 = [4,12,5,13,4,12,5,13]
-; AVX512F-ONLY-SLOW-NEXT:    # zmm11 = mem[0,1,2,3,0,1,2,3]
-; AVX512F-ONLY-SLOW-NEXT:    vbroadcasti32x4 {{.*#+}} ymm20 = [4,12,4,12]
-; AVX512F-ONLY-SLOW-NEXT:    # ymm20 = mem[0,1,2,3,0,1,2,3]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm12, %zmm0
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm25, %zmm20, %zmm0
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm5, %zmm0
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm26, %zmm20, %zmm0
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm6, %zmm0
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm27, %zmm20, %zmm0
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vpermi2q %zmm29, %zmm8, %zmm20
-; AVX512F-ONLY-SLOW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm10 = [1,9,2,10,1,9,2,10]
-; AVX512F-ONLY-SLOW-NEXT:    # zmm10 = mem[0,1,2,3,0,1,2,3]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm8, %zmm0
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm29, %zmm10, %zmm0
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm13 = [5,13,6,14,5,13,6,14]
-; AVX512F-ONLY-SLOW-NEXT:    # zmm13 = mem[0,1,2,3,0,1,2,3]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm8, %zmm17
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm29, %zmm13, %zmm17
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm6, %zmm31
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm27, %zmm10, %zmm31
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm6, %zmm14
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm27, %zmm13, %zmm14
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm5, %zmm15
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm26, %zmm10, %zmm15
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm5, %zmm16
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm26, %zmm13, %zmm16
-; AVX512F-ONLY-SLOW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm28 = [3,11,3,11,3,11,3,11]
-; AVX512F-ONLY-SLOW-NEXT:    # zmm28 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm8, %zmm0
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm29, %zmm28, %zmm0
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm0 = [7,15,7,15,7,15,7,15]
-; AVX512F-ONLY-SLOW-NEXT:    # zmm0 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm29, %zmm0, %zmm8
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm6, %zmm29
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm27, %zmm28, %zmm29
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm27, %zmm0, %zmm6
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm5, %zmm6
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm26, %zmm28, %zmm6
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm26, %zmm0, %zmm5
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm18, %zmm26
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm21, %zmm11, %zmm26
-; AVX512F-ONLY-SLOW-NEXT:    vpermi2q %zmm25, %zmm12, %zmm10
-; AVX512F-ONLY-SLOW-NEXT:    vpermi2q %zmm25, %zmm12, %zmm13
-; AVX512F-ONLY-SLOW-NEXT:    vpermi2q %zmm25, %zmm12, %zmm28
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm25, %zmm0, %zmm12
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm2, %zmm27
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm2, %zmm9
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm19, %zmm5
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm19, %zmm11, %zmm9
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm4, %zmm0
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm4, %zmm25
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm1, %zmm11, %zmm25
-; AVX512F-ONLY-SLOW-NEXT:    vpermi2q %zmm22, %zmm7, %zmm11
-; AVX512F-ONLY-SLOW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm3 = [2,10,2,10,2,10,2,10]
-; AVX512F-ONLY-SLOW-NEXT:    # zmm3 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm7, %zmm23
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm22, %zmm3, %zmm23
-; AVX512F-ONLY-SLOW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm6 = [6,14,6,14,6,14,6,14]
-; AVX512F-ONLY-SLOW-NEXT:    # zmm6 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm7, %zmm24
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm22, %zmm6, %zmm24
-; AVX512F-ONLY-SLOW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm2 = [0,8,1,9,0,8,1,9]
-; AVX512F-ONLY-SLOW-NEXT:    # zmm2 = mem[0,1,2,3,0,1,2,3]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm7, %zmm30
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm22, %zmm2, %zmm30
-; AVX512F-ONLY-SLOW-NEXT:    vbroadcasti32x4 {{.*#+}} ymm19 = [7,15,7,15]
-; AVX512F-ONLY-SLOW-NEXT:    # ymm19 = mem[0,1,2,3,0,1,2,3]
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm22, %zmm19, %zmm7
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm4, %zmm22
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm1, %zmm3, %zmm22
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm1, %zmm6, %zmm4
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm0, %zmm8
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm1, %zmm2, %zmm8
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm1, %zmm19, %zmm0
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm0, (%rsp) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm27, %zmm0
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm5, %zmm3, %zmm0
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm27, %zmm1
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm5, %zmm6, %zmm1
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm27, %zmm7
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm5, %zmm2, %zmm7
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm5, %zmm19, %zmm27
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm27, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vpermi2q %zmm21, %zmm18, %zmm3
-; AVX512F-ONLY-SLOW-NEXT:    vpermi2q %zmm21, %zmm18, %zmm6
-; AVX512F-ONLY-SLOW-NEXT:    vpermi2q %zmm21, %zmm18, %zmm2
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm21, %zmm19, %zmm18
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm18, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    movb $12, %al
-; AVX512F-ONLY-SLOW-NEXT:    kmovw %eax, %k1
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm5, %zmm26 {%k1}
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm5, %zmm9 {%k1}
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm5, %zmm25 {%k1}
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm20, %zmm11 {%k1}
-; AVX512F-ONLY-SLOW-NEXT:    movb $48, %al
-; AVX512F-ONLY-SLOW-NEXT:    kmovw %eax, %k2
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm23, %zmm5 {%k2}
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm24, %zmm17 {%k2}
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm22, %zmm31 {%k2}
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm4, %zmm14 {%k2}
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm0, %zmm15 {%k2}
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 (%r8), %zmm23
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm1, %zmm16 {%k2}
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 64(%r8), %zmm1
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm3, %zmm10 {%k2}
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm0 = <0,1,9,u,4,5,6,7>
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm23, %zmm0, %zmm5
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm1, %zmm0, %zmm31
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 128(%r8), %zmm3
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm3, %zmm0, %zmm15
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 192(%r8), %zmm22
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm22, %zmm0, %zmm10
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 (%r9), %zmm20
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm0 = [0,1,2,9,4,5,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm20, %zmm0, %zmm5
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 64(%r9), %zmm21
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm21, %zmm0, %zmm31
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 128(%r9), %zmm19
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm19, %zmm0, %zmm15
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 192(%r9), %zmm18
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm18, %zmm0, %zmm10
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm6, %zmm13 {%k2}
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm0 = <0,1,13,u,4,5,6,7>
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm17, %zmm27
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm23, %zmm0, %zmm27
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm1, %zmm0, %zmm14
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm3, %zmm0, %zmm16
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm22, %zmm0, %zmm13
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm0 = [0,1,2,13,4,5,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm20, %zmm0, %zmm27
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm21, %zmm0, %zmm14
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm19, %zmm0, %zmm16
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm18, %zmm0, %zmm13
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa (%rdx), %xmm0
-; AVX512F-ONLY-SLOW-NEXT:    vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],mem[0]
-; AVX512F-ONLY-SLOW-NEXT:    vinserti128 $1, %xmm0, %ymm0, %ymm0
-; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $0, %ymm0, %zmm0, %zmm30 {%k1}
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 64(%rdx), %xmm0
-; AVX512F-ONLY-SLOW-NEXT:    vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],mem[0]
-; AVX512F-ONLY-SLOW-NEXT:    vinserti128 $1, %xmm0, %ymm0, %ymm0
-; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $0, %ymm0, %zmm0, %zmm8 {%k1}
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 128(%rdx), %xmm0
-; AVX512F-ONLY-SLOW-NEXT:    vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],mem[0]
-; AVX512F-ONLY-SLOW-NEXT:    vinserti128 $1, %xmm0, %ymm0, %ymm0
-; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $0, %ymm0, %zmm0, %zmm7 {%k1}
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 192(%rdx), %xmm0
-; AVX512F-ONLY-SLOW-NEXT:    vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],mem[0]
-; AVX512F-ONLY-SLOW-NEXT:    vinserti128 $1, %xmm0, %ymm0, %ymm0
-; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $0, %ymm0, %zmm0, %zmm2 {%k1}
-; AVX512F-ONLY-SLOW-NEXT:    vinserti32x4 $2, (%r8), %zmm30, %zmm24
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm0 = [0,1,2,3,4,8,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm20, %zmm0, %zmm24
-; AVX512F-ONLY-SLOW-NEXT:    vinserti32x4 $2, 64(%r8), %zmm8, %zmm6
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm21, %zmm0, %zmm6
-; AVX512F-ONLY-SLOW-NEXT:    vinserti32x4 $2, 128(%r8), %zmm7, %zmm5
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm19, %zmm0, %zmm5
-; AVX512F-ONLY-SLOW-NEXT:    vinserti32x4 $2, 192(%r8), %zmm2, %zmm4
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm18, %zmm0, %zmm4
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa (%rdi), %ymm0
-; AVX512F-ONLY-SLOW-NEXT:    vpunpckhqdq {{.*#+}} ymm0 = ymm0[1],mem[1],ymm0[3],mem[3]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $0, %ymm0, %zmm2, %zmm0
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vshufi64x2 $228, {{[-0-9]+}}(%r{{[sb]}}p), %zmm2, %zmm2 # 64-byte Folded Reload
-; AVX512F-ONLY-SLOW-NEXT:    # zmm2 = zmm2[0,1,2,3],mem[4,5,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    movb $16, %al
-; AVX512F-ONLY-SLOW-NEXT:    kmovw %eax, %k1
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm23, %zmm11 {%k1}
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm7 = <10,u,2,3,4,5,11,u>
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm23, %zmm7, %zmm0
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm17 = <14,u,2,3,4,5,15,u>
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm23, %zmm17, %zmm2
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 64(%rdi), %ymm23
-; AVX512F-ONLY-SLOW-NEXT:    vpunpckhqdq {{.*#+}} ymm23 = ymm23[1],mem[1],ymm23[3],mem[3]
-; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $0, %ymm23, %zmm29, %zmm23
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 (%rsp), %zmm8 # 64-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vshufi64x2 $228, {{[-0-9]+}}(%r{{[sb]}}p), %zmm8, %zmm29 # 64-byte Folded Reload
-; AVX512F-ONLY-SLOW-NEXT:    # zmm29 = zmm8[0,1,2,3],mem[4,5,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm1, %zmm25 {%k1}
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm1, %zmm7, %zmm23
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm1, %zmm17, %zmm29
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 128(%rdi), %ymm1
-; AVX512F-ONLY-SLOW-NEXT:    vpunpckhqdq {{.*#+}} ymm1 = ymm1[1],mem[1],ymm1[3],mem[3]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm8 # 64-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $0, %ymm1, %zmm8, %zmm1
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm8 # 64-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vshufi64x2 $228, {{[-0-9]+}}(%r{{[sb]}}p), %zmm8, %zmm30 # 64-byte Folded Reload
-; AVX512F-ONLY-SLOW-NEXT:    # zmm30 = zmm8[0,1,2,3],mem[4,5,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm3, %zmm9 {%k1}
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm3, %zmm7, %zmm1
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm3, %zmm17, %zmm30
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 192(%rdi), %ymm3
-; AVX512F-ONLY-SLOW-NEXT:    vpunpckhqdq {{.*#+}} ymm3 = ymm3[1],mem[1],ymm3[3],mem[3]
-; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $0, %ymm3, %zmm28, %zmm3
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm22, %zmm7, %zmm3
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm22, %zmm26 {%k1}
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm7 # 64-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm7 = zmm7[0,1,2,3],zmm12[4,5,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm28 = [0,10,2,3,4,5,6,11]
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm20, %zmm28, %zmm0
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm22, %zmm17, %zmm7
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm17 = [0,1,2,3,4,12,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm20, %zmm17, %zmm11
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm22 = [0,14,2,3,4,5,6,15]
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm20, %zmm22, %zmm2
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm21, %zmm28, %zmm23
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm21, %zmm17, %zmm25
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm21, %zmm22, %zmm29
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm19, %zmm28, %zmm1
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm19, %zmm17, %zmm9
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm19, %zmm22, %zmm30
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm18, %zmm28, %zmm3
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm18, %zmm17, %zmm26
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm18, %zmm22, %zmm7
-; AVX512F-ONLY-SLOW-NEXT:    movq {{[0-9]+}}(%rsp), %rax
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm7, 1472(%rax)
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm13, 1408(%rax)
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm26, 1344(%rax)
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm3, 1280(%rax)
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm10, 1216(%rax)
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm30, 1088(%rax)
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm16, 1024(%rax)
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm9, 960(%rax)
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm1, 896(%rax)
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm15, 832(%rax)
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm29, 704(%rax)
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm14, 640(%rax)
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm25, 576(%rax)
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm23, 512(%rax)
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm31, 448(%rax)
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm2, 320(%rax)
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm27, 256(%rax)
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm11, 192(%rax)
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm0, 128(%rax)
-; AVX512F-ONLY-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vmovaps %zmm0, 64(%rax)
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm4, 1152(%rax)
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm5, 768(%rax)
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm6, 384(%rax)
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm24, (%rax)
-; AVX512F-ONLY-SLOW-NEXT:    addq $712, %rsp # imm = 0x2C8
-; AVX512F-ONLY-SLOW-NEXT:    vzeroupper
-; AVX512F-ONLY-SLOW-NEXT:    retq
-;
-; AVX512F-ONLY-FAST-LABEL: store_i64_stride6_vf32:
-; AVX512F-ONLY-FAST:       # %bb.0:
-; AVX512F-ONLY-FAST-NEXT:    subq $712, %rsp # imm = 0x2C8
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 (%rdi), %zmm7
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 64(%rdi), %zmm4
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 128(%rdi), %zmm2
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 192(%rdi), %zmm18
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 (%rsi), %zmm22
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 64(%rsi), %zmm1
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 128(%rsi), %zmm19
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 192(%rsi), %zmm21
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 (%rdx), %zmm8
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 64(%rdx), %zmm6
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 128(%rdx), %zmm5
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 192(%rdx), %zmm12
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 (%rcx), %zmm29
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 64(%rcx), %zmm27
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 128(%rcx), %zmm26
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 192(%rcx), %zmm25
-; AVX512F-ONLY-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm11 = [4,12,5,13,4,12,5,13]
-; AVX512F-ONLY-FAST-NEXT:    # zmm11 = mem[0,1,2,3,0,1,2,3]
-; AVX512F-ONLY-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} ymm20 = [4,12,4,12]
-; AVX512F-ONLY-FAST-NEXT:    # ymm20 = mem[0,1,2,3,0,1,2,3]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm12, %zmm0
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm25, %zmm20, %zmm0
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm5, %zmm0
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm26, %zmm20, %zmm0
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm6, %zmm0
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm27, %zmm20, %zmm0
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vpermi2q %zmm29, %zmm8, %zmm20
-; AVX512F-ONLY-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm10 = [1,9,2,10,1,9,2,10]
-; AVX512F-ONLY-FAST-NEXT:    # zmm10 = mem[0,1,2,3,0,1,2,3]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm8, %zmm0
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm29, %zmm10, %zmm0
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm13 = [5,13,6,14,5,13,6,14]
-; AVX512F-ONLY-FAST-NEXT:    # zmm13 = mem[0,1,2,3,0,1,2,3]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm8, %zmm17
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm29, %zmm13, %zmm17
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm6, %zmm31
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm27, %zmm10, %zmm31
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm6, %zmm14
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm27, %zmm13, %zmm14
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm5, %zmm15
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm26, %zmm10, %zmm15
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm5, %zmm16
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm26, %zmm13, %zmm16
-; AVX512F-ONLY-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} zmm28 = [3,11,3,11,3,11,3,11]
-; AVX512F-ONLY-FAST-NEXT:    # zmm28 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm8, %zmm0
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm29, %zmm28, %zmm0
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} zmm0 = [7,15,7,15,7,15,7,15]
-; AVX512F-ONLY-FAST-NEXT:    # zmm0 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm29, %zmm0, %zmm8
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm6, %zmm29
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm27, %zmm28, %zmm29
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm27, %zmm0, %zmm6
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm5, %zmm6
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm26, %zmm28, %zmm6
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm26, %zmm0, %zmm5
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm18, %zmm26
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm21, %zmm11, %zmm26
-; AVX512F-ONLY-FAST-NEXT:    vpermi2q %zmm25, %zmm12, %zmm10
-; AVX512F-ONLY-FAST-NEXT:    vpermi2q %zmm25, %zmm12, %zmm13
-; AVX512F-ONLY-FAST-NEXT:    vpermi2q %zmm25, %zmm12, %zmm28
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm25, %zmm0, %zmm12
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm2, %zmm27
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm2, %zmm9
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm19, %zmm5
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm19, %zmm11, %zmm9
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm4, %zmm0
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm4, %zmm25
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm1, %zmm11, %zmm25
-; AVX512F-ONLY-FAST-NEXT:    vpermi2q %zmm22, %zmm7, %zmm11
-; AVX512F-ONLY-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} zmm3 = [2,10,2,10,2,10,2,10]
-; AVX512F-ONLY-FAST-NEXT:    # zmm3 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm7, %zmm23
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm22, %zmm3, %zmm23
-; AVX512F-ONLY-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} zmm6 = [6,14,6,14,6,14,6,14]
-; AVX512F-ONLY-FAST-NEXT:    # zmm6 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm7, %zmm24
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm22, %zmm6, %zmm24
-; AVX512F-ONLY-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm2 = [0,8,1,9,0,8,1,9]
-; AVX512F-ONLY-FAST-NEXT:    # zmm2 = mem[0,1,2,3,0,1,2,3]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm7, %zmm30
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm22, %zmm2, %zmm30
-; AVX512F-ONLY-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} ymm19 = [7,15,7,15]
-; AVX512F-ONLY-FAST-NEXT:    # ymm19 = mem[0,1,2,3,0,1,2,3]
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm22, %zmm19, %zmm7
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm4, %zmm22
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm1, %zmm3, %zmm22
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm1, %zmm6, %zmm4
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm0, %zmm8
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm1, %zmm2, %zmm8
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm1, %zmm19, %zmm0
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm0, (%rsp) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm27, %zmm0
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm5, %zmm3, %zmm0
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm27, %zmm1
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm5, %zmm6, %zmm1
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm27, %zmm7
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm5, %zmm2, %zmm7
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm5, %zmm19, %zmm27
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm27, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vpermi2q %zmm21, %zmm18, %zmm3
-; AVX512F-ONLY-FAST-NEXT:    vpermi2q %zmm21, %zmm18, %zmm6
-; AVX512F-ONLY-FAST-NEXT:    vpermi2q %zmm21, %zmm18, %zmm2
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm21, %zmm19, %zmm18
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm18, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    movb $12, %al
-; AVX512F-ONLY-FAST-NEXT:    kmovw %eax, %k1
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm5, %zmm26 {%k1}
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm5, %zmm9 {%k1}
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm5, %zmm25 {%k1}
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm20, %zmm11 {%k1}
-; AVX512F-ONLY-FAST-NEXT:    movb $48, %al
-; AVX512F-ONLY-FAST-NEXT:    kmovw %eax, %k2
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm23, %zmm5 {%k2}
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm24, %zmm17 {%k2}
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm22, %zmm31 {%k2}
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm4, %zmm14 {%k2}
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm0, %zmm15 {%k2}
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 (%r8), %zmm23
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm1, %zmm16 {%k2}
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 64(%r8), %zmm1
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm3, %zmm10 {%k2}
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm0 = <0,1,9,u,4,5,6,7>
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm23, %zmm0, %zmm5
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm1, %zmm0, %zmm31
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 128(%r8), %zmm3
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm3, %zmm0, %zmm15
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 192(%r8), %zmm22
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm22, %zmm0, %zmm10
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 (%r9), %zmm20
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm0 = [0,1,2,9,4,5,6,7]
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm20, %zmm0, %zmm5
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 64(%r9), %zmm21
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm21, %zmm0, %zmm31
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 128(%r9), %zmm19
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm19, %zmm0, %zmm15
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 192(%r9), %zmm18
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm18, %zmm0, %zmm10
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm6, %zmm13 {%k2}
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm0 = <0,1,13,u,4,5,6,7>
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm17, %zmm27
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm23, %zmm0, %zmm27
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm1, %zmm0, %zmm14
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm3, %zmm0, %zmm16
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm22, %zmm0, %zmm13
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm0 = [0,1,2,13,4,5,6,7]
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm20, %zmm0, %zmm27
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm21, %zmm0, %zmm14
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm19, %zmm0, %zmm16
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm18, %zmm0, %zmm13
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa (%rdx), %xmm0
-; AVX512F-ONLY-FAST-NEXT:    vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],mem[0]
-; AVX512F-ONLY-FAST-NEXT:    vinserti128 $1, %xmm0, %ymm0, %ymm0
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $0, %ymm0, %zmm0, %zmm30 {%k1}
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa 64(%rdx), %xmm0
-; AVX512F-ONLY-FAST-NEXT:    vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],mem[0]
-; AVX512F-ONLY-FAST-NEXT:    vinserti128 $1, %xmm0, %ymm0, %ymm0
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $0, %ymm0, %zmm0, %zmm8 {%k1}
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa 128(%rdx), %xmm0
-; AVX512F-ONLY-FAST-NEXT:    vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],mem[0]
-; AVX512F-ONLY-FAST-NEXT:    vinserti128 $1, %xmm0, %ymm0, %ymm0
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $0, %ymm0, %zmm0, %zmm7 {%k1}
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa 192(%rdx), %xmm0
-; AVX512F-ONLY-FAST-NEXT:    vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],mem[0]
-; AVX512F-ONLY-FAST-NEXT:    vinserti128 $1, %xmm0, %ymm0, %ymm0
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $0, %ymm0, %zmm0, %zmm2 {%k1}
-; AVX512F-ONLY-FAST-NEXT:    vinserti32x4 $2, (%r8), %zmm30, %zmm24
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm0 = [0,1,2,3,4,8,6,7]
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm20, %zmm0, %zmm24
-; AVX512F-ONLY-FAST-NEXT:    vinserti32x4 $2, 64(%r8), %zmm8, %zmm6
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm21, %zmm0, %zmm6
-; AVX512F-ONLY-FAST-NEXT:    vinserti32x4 $2, 128(%r8), %zmm7, %zmm5
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm19, %zmm0, %zmm5
-; AVX512F-ONLY-FAST-NEXT:    vinserti32x4 $2, 192(%r8), %zmm2, %zmm4
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm18, %zmm0, %zmm4
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa (%rdi), %ymm0
-; AVX512F-ONLY-FAST-NEXT:    vpunpckhqdq {{.*#+}} ymm0 = ymm0[1],mem[1],ymm0[3],mem[3]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $0, %ymm0, %zmm2, %zmm0
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vshufi64x2 $228, {{[-0-9]+}}(%r{{[sb]}}p), %zmm2, %zmm2 # 64-byte Folded Reload
-; AVX512F-ONLY-FAST-NEXT:    # zmm2 = zmm2[0,1,2,3],mem[4,5,6,7]
-; AVX512F-ONLY-FAST-NEXT:    movb $16, %al
-; AVX512F-ONLY-FAST-NEXT:    kmovw %eax, %k1
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm23, %zmm11 {%k1}
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm7 = <10,u,2,3,4,5,11,u>
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm23, %zmm7, %zmm0
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm17 = <14,u,2,3,4,5,15,u>
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm23, %zmm17, %zmm2
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 64(%rdi), %ymm23
-; AVX512F-ONLY-FAST-NEXT:    vpunpckhqdq {{.*#+}} ymm23 = ymm23[1],mem[1],ymm23[3],mem[3]
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $0, %ymm23, %zmm29, %zmm23
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 (%rsp), %zmm8 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vshufi64x2 $228, {{[-0-9]+}}(%r{{[sb]}}p), %zmm8, %zmm29 # 64-byte Folded Reload
-; AVX512F-ONLY-FAST-NEXT:    # zmm29 = zmm8[0,1,2,3],mem[4,5,6,7]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm1, %zmm25 {%k1}
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm1, %zmm7, %zmm23
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm1, %zmm17, %zmm29
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa 128(%rdi), %ymm1
-; AVX512F-ONLY-FAST-NEXT:    vpunpckhqdq {{.*#+}} ymm1 = ymm1[1],mem[1],ymm1[3],mem[3]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm8 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $0, %ymm1, %zmm8, %zmm1
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm8 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vshufi64x2 $228, {{[-0-9]+}}(%r{{[sb]}}p), %zmm8, %zmm30 # 64-byte Folded Reload
-; AVX512F-ONLY-FAST-NEXT:    # zmm30 = zmm8[0,1,2,3],mem[4,5,6,7]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm3, %zmm9 {%k1}
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm3, %zmm7, %zmm1
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm3, %zmm17, %zmm30
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa 192(%rdi), %ymm3
-; AVX512F-ONLY-FAST-NEXT:    vpunpckhqdq {{.*#+}} ymm3 = ymm3[1],mem[1],ymm3[3],mem[3]
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $0, %ymm3, %zmm28, %zmm3
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm22, %zmm7, %zmm3
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm22, %zmm26 {%k1}
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm7 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm7 = zmm7[0,1,2,3],zmm12[4,5,6,7]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm28 = [0,10,2,3,4,5,6,11]
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm20, %zmm28, %zmm0
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm22, %zmm17, %zmm7
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm17 = [0,1,2,3,4,12,6,7]
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm20, %zmm17, %zmm11
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm22 = [0,14,2,3,4,5,6,15]
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm20, %zmm22, %zmm2
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm21, %zmm28, %zmm23
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm21, %zmm17, %zmm25
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm21, %zmm22, %zmm29
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm19, %zmm28, %zmm1
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm19, %zmm17, %zmm9
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm19, %zmm22, %zmm30
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm18, %zmm28, %zmm3
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm18, %zmm17, %zmm26
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm18, %zmm22, %zmm7
-; AVX512F-ONLY-FAST-NEXT:    movq {{[0-9]+}}(%rsp), %rax
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm7, 1472(%rax)
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm13, 1408(%rax)
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm26, 1344(%rax)
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm3, 1280(%rax)
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm10, 1216(%rax)
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm30, 1088(%rax)
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm16, 1024(%rax)
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm9, 960(%rax)
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm1, 896(%rax)
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm15, 832(%rax)
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm29, 704(%rax)
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm14, 640(%rax)
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm25, 576(%rax)
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm23, 512(%rax)
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm31, 448(%rax)
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm2, 320(%rax)
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm27, 256(%rax)
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm11, 192(%rax)
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm0, 128(%rax)
-; AVX512F-ONLY-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vmovaps %zmm0, 64(%rax)
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm4, 1152(%rax)
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm5, 768(%rax)
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm6, 384(%rax)
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm24, (%rax)
-; AVX512F-ONLY-FAST-NEXT:    addq $712, %rsp # imm = 0x2C8
-; AVX512F-ONLY-FAST-NEXT:    vzeroupper
-; AVX512F-ONLY-FAST-NEXT:    retq
-;
-; AVX512DQ-SLOW-LABEL: store_i64_stride6_vf32:
-; AVX512DQ-SLOW:       # %bb.0:
-; AVX512DQ-SLOW-NEXT:    subq $712, %rsp # imm = 0x2C8
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 (%rdi), %zmm7
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 64(%rdi), %zmm4
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 128(%rdi), %zmm2
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 192(%rdi), %zmm18
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 (%rsi), %zmm22
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 64(%rsi), %zmm1
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 128(%rsi), %zmm19
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 192(%rsi), %zmm21
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 (%rdx), %zmm8
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 64(%rdx), %zmm6
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 128(%rdx), %zmm5
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 192(%rdx), %zmm12
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 (%rcx), %zmm29
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 64(%rcx), %zmm27
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 128(%rcx), %zmm26
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 192(%rcx), %zmm25
-; AVX512DQ-SLOW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm11 = [4,12,5,13,4,12,5,13]
-; AVX512DQ-SLOW-NEXT:    # zmm11 = mem[0,1,2,3,0,1,2,3]
-; AVX512DQ-SLOW-NEXT:    vbroadcasti64x2 {{.*#+}} ymm20 = [4,12,4,12]
-; AVX512DQ-SLOW-NEXT:    # ymm20 = mem[0,1,0,1]
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm12, %zmm0
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm25, %zmm20, %zmm0
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm5, %zmm0
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm26, %zmm20, %zmm0
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm6, %zmm0
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm27, %zmm20, %zmm0
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vpermi2q %zmm29, %zmm8, %zmm20
-; AVX512DQ-SLOW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm10 = [1,9,2,10,1,9,2,10]
-; AVX512DQ-SLOW-NEXT:    # zmm10 = mem[0,1,2,3,0,1,2,3]
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm8, %zmm0
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm29, %zmm10, %zmm0
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm13 = [5,13,6,14,5,13,6,14]
-; AVX512DQ-SLOW-NEXT:    # zmm13 = mem[0,1,2,3,0,1,2,3]
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm8, %zmm17
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm29, %zmm13, %zmm17
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm6, %zmm31
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm27, %zmm10, %zmm31
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm6, %zmm14
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm27, %zmm13, %zmm14
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm5, %zmm15
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm26, %zmm10, %zmm15
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm5, %zmm16
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm26, %zmm13, %zmm16
-; AVX512DQ-SLOW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm28 = [3,11,3,11,3,11,3,11]
-; AVX512DQ-SLOW-NEXT:    # zmm28 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm8, %zmm0
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm29, %zmm28, %zmm0
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm0 = [7,15,7,15,7,15,7,15]
-; AVX512DQ-SLOW-NEXT:    # zmm0 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm29, %zmm0, %zmm8
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm6, %zmm29
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm27, %zmm28, %zmm29
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm27, %zmm0, %zmm6
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm5, %zmm6
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm26, %zmm28, %zmm6
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm26, %zmm0, %zmm5
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm18, %zmm26
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm21, %zmm11, %zmm26
-; AVX512DQ-SLOW-NEXT:    vpermi2q %zmm25, %zmm12, %zmm10
-; AVX512DQ-SLOW-NEXT:    vpermi2q %zmm25, %zmm12, %zmm13
-; AVX512DQ-SLOW-NEXT:    vpermi2q %zmm25, %zmm12, %zmm28
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm25, %zmm0, %zmm12
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm2, %zmm27
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm2, %zmm9
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm19, %zmm5
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm19, %zmm11, %zmm9
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm4, %zmm0
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm4, %zmm25
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm1, %zmm11, %zmm25
-; AVX512DQ-SLOW-NEXT:    vpermi2q %zmm22, %zmm7, %zmm11
-; AVX512DQ-SLOW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm3 = [2,10,2,10,2,10,2,10]
-; AVX512DQ-SLOW-NEXT:    # zmm3 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm7, %zmm23
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm22, %zmm3, %zmm23
-; AVX512DQ-SLOW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm6 = [6,14,6,14,6,14,6,14]
-; AVX512DQ-SLOW-NEXT:    # zmm6 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm7, %zmm24
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm22, %zmm6, %zmm24
-; AVX512DQ-SLOW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm2 = [0,8,1,9,0,8,1,9]
-; AVX512DQ-SLOW-NEXT:    # zmm2 = mem[0,1,2,3,0,1,2,3]
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm7, %zmm30
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm22, %zmm2, %zmm30
-; AVX512DQ-SLOW-NEXT:    vbroadcasti64x2 {{.*#+}} ymm19 = [7,15,7,15]
-; AVX512DQ-SLOW-NEXT:    # ymm19 = mem[0,1,0,1]
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm22, %zmm19, %zmm7
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm4, %zmm22
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm1, %zmm3, %zmm22
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm1, %zmm6, %zmm4
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm0, %zmm8
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm1, %zmm2, %zmm8
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm1, %zmm19, %zmm0
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm0, (%rsp) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm27, %zmm0
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm5, %zmm3, %zmm0
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm27, %zmm1
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm5, %zmm6, %zmm1
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm27, %zmm7
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm5, %zmm2, %zmm7
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm5, %zmm19, %zmm27
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm27, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vpermi2q %zmm21, %zmm18, %zmm3
-; AVX512DQ-SLOW-NEXT:    vpermi2q %zmm21, %zmm18, %zmm6
-; AVX512DQ-SLOW-NEXT:    vpermi2q %zmm21, %zmm18, %zmm2
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm21, %zmm19, %zmm18
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm18, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    movb $12, %al
-; AVX512DQ-SLOW-NEXT:    kmovw %eax, %k1
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm5, %zmm26 {%k1}
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm5, %zmm9 {%k1}
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm5, %zmm25 {%k1}
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm20, %zmm11 {%k1}
-; AVX512DQ-SLOW-NEXT:    movb $48, %al
-; AVX512DQ-SLOW-NEXT:    kmovw %eax, %k2
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm23, %zmm5 {%k2}
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm24, %zmm17 {%k2}
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm22, %zmm31 {%k2}
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm4, %zmm14 {%k2}
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm0, %zmm15 {%k2}
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 (%r8), %zmm23
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm1, %zmm16 {%k2}
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 64(%r8), %zmm1
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm3, %zmm10 {%k2}
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm0 = <0,1,9,u,4,5,6,7>
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm23, %zmm0, %zmm5
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm1, %zmm0, %zmm31
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 128(%r8), %zmm3
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm3, %zmm0, %zmm15
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 192(%r8), %zmm22
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm22, %zmm0, %zmm10
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 (%r9), %zmm20
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm0 = [0,1,2,9,4,5,6,7]
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm20, %zmm0, %zmm5
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 64(%r9), %zmm21
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm21, %zmm0, %zmm31
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 128(%r9), %zmm19
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm19, %zmm0, %zmm15
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 192(%r9), %zmm18
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm18, %zmm0, %zmm10
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm6, %zmm13 {%k2}
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm0 = <0,1,13,u,4,5,6,7>
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm17, %zmm27
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm23, %zmm0, %zmm27
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm1, %zmm0, %zmm14
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm3, %zmm0, %zmm16
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm22, %zmm0, %zmm13
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm0 = [0,1,2,13,4,5,6,7]
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm20, %zmm0, %zmm27
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm21, %zmm0, %zmm14
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm19, %zmm0, %zmm16
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm18, %zmm0, %zmm13
-; AVX512DQ-SLOW-NEXT:    vmovdqa (%rdx), %xmm0
-; AVX512DQ-SLOW-NEXT:    vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],mem[0]
-; AVX512DQ-SLOW-NEXT:    vinserti128 $1, %xmm0, %ymm0, %ymm0
-; AVX512DQ-SLOW-NEXT:    vinserti64x4 $0, %ymm0, %zmm0, %zmm30 {%k1}
-; AVX512DQ-SLOW-NEXT:    vmovdqa 64(%rdx), %xmm0
-; AVX512DQ-SLOW-NEXT:    vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],mem[0]
-; AVX512DQ-SLOW-NEXT:    vinserti128 $1, %xmm0, %ymm0, %ymm0
-; AVX512DQ-SLOW-NEXT:    vinserti64x4 $0, %ymm0, %zmm0, %zmm8 {%k1}
-; AVX512DQ-SLOW-NEXT:    vmovdqa 128(%rdx), %xmm0
-; AVX512DQ-SLOW-NEXT:    vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],mem[0]
-; AVX512DQ-SLOW-NEXT:    vinserti128 $1, %xmm0, %ymm0, %ymm0
-; AVX512DQ-SLOW-NEXT:    vinserti64x4 $0, %ymm0, %zmm0, %zmm7 {%k1}
-; AVX512DQ-SLOW-NEXT:    vmovdqa 192(%rdx), %xmm0
-; AVX512DQ-SLOW-NEXT:    vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],mem[0]
-; AVX512DQ-SLOW-NEXT:    vinserti128 $1, %xmm0, %ymm0, %ymm0
-; AVX512DQ-SLOW-NEXT:    vinserti64x4 $0, %ymm0, %zmm0, %zmm2 {%k1}
-; AVX512DQ-SLOW-NEXT:    vinserti32x4 $2, (%r8), %zmm30, %zmm24
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm0 = [0,1,2,3,4,8,6,7]
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm20, %zmm0, %zmm24
-; AVX512DQ-SLOW-NEXT:    vinserti32x4 $2, 64(%r8), %zmm8, %zmm6
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm21, %zmm0, %zmm6
-; AVX512DQ-SLOW-NEXT:    vinserti32x4 $2, 128(%r8), %zmm7, %zmm5
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm19, %zmm0, %zmm5
-; AVX512DQ-SLOW-NEXT:    vinserti32x4 $2, 192(%r8), %zmm2, %zmm4
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm18, %zmm0, %zmm4
-; AVX512DQ-SLOW-NEXT:    vmovdqa (%rdi), %ymm0
-; AVX512DQ-SLOW-NEXT:    vpunpckhqdq {{.*#+}} ymm0 = ymm0[1],mem[1],ymm0[3],mem[3]
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512DQ-SLOW-NEXT:    vinserti64x4 $0, %ymm0, %zmm2, %zmm0
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512DQ-SLOW-NEXT:    vshufi64x2 $228, {{[-0-9]+}}(%r{{[sb]}}p), %zmm2, %zmm2 # 64-byte Folded Reload
-; AVX512DQ-SLOW-NEXT:    # zmm2 = zmm2[0,1,2,3],mem[4,5,6,7]
-; AVX512DQ-SLOW-NEXT:    movb $16, %al
-; AVX512DQ-SLOW-NEXT:    kmovw %eax, %k1
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm23, %zmm11 {%k1}
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm7 = <10,u,2,3,4,5,11,u>
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm23, %zmm7, %zmm0
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm17 = <14,u,2,3,4,5,15,u>
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm23, %zmm17, %zmm2
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 64(%rdi), %ymm23
-; AVX512DQ-SLOW-NEXT:    vpunpckhqdq {{.*#+}} ymm23 = ymm23[1],mem[1],ymm23[3],mem[3]
-; AVX512DQ-SLOW-NEXT:    vinserti64x4 $0, %ymm23, %zmm29, %zmm23
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 (%rsp), %zmm8 # 64-byte Reload
-; AVX512DQ-SLOW-NEXT:    vshufi64x2 $228, {{[-0-9]+}}(%r{{[sb]}}p), %zmm8, %zmm29 # 64-byte Folded Reload
-; AVX512DQ-SLOW-NEXT:    # zmm29 = zmm8[0,1,2,3],mem[4,5,6,7]
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm1, %zmm25 {%k1}
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm1, %zmm7, %zmm23
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm1, %zmm17, %zmm29
-; AVX512DQ-SLOW-NEXT:    vmovdqa 128(%rdi), %ymm1
-; AVX512DQ-SLOW-NEXT:    vpunpckhqdq {{.*#+}} ymm1 = ymm1[1],mem[1],ymm1[3],mem[3]
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm8 # 64-byte Reload
-; AVX512DQ-SLOW-NEXT:    vinserti64x4 $0, %ymm1, %zmm8, %zmm1
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm8 # 64-byte Reload
-; AVX512DQ-SLOW-NEXT:    vshufi64x2 $228, {{[-0-9]+}}(%r{{[sb]}}p), %zmm8, %zmm30 # 64-byte Folded Reload
-; AVX512DQ-SLOW-NEXT:    # zmm30 = zmm8[0,1,2,3],mem[4,5,6,7]
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm3, %zmm9 {%k1}
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm3, %zmm7, %zmm1
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm3, %zmm17, %zmm30
-; AVX512DQ-SLOW-NEXT:    vmovdqa 192(%rdi), %ymm3
-; AVX512DQ-SLOW-NEXT:    vpunpckhqdq {{.*#+}} ymm3 = ymm3[1],mem[1],ymm3[3],mem[3]
-; AVX512DQ-SLOW-NEXT:    vinserti64x4 $0, %ymm3, %zmm28, %zmm3
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm22, %zmm7, %zmm3
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm22, %zmm26 {%k1}
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm7 # 64-byte Reload
-; AVX512DQ-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm7 = zmm7[0,1,2,3],zmm12[4,5,6,7]
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm28 = [0,10,2,3,4,5,6,11]
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm20, %zmm28, %zmm0
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm22, %zmm17, %zmm7
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm17 = [0,1,2,3,4,12,6,7]
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm20, %zmm17, %zmm11
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm22 = [0,14,2,3,4,5,6,15]
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm20, %zmm22, %zmm2
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm21, %zmm28, %zmm23
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm21, %zmm17, %zmm25
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm21, %zmm22, %zmm29
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm19, %zmm28, %zmm1
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm19, %zmm17, %zmm9
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm19, %zmm22, %zmm30
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm18, %zmm28, %zmm3
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm18, %zmm17, %zmm26
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm18, %zmm22, %zmm7
-; AVX512DQ-SLOW-NEXT:    movq {{[0-9]+}}(%rsp), %rax
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm7, 1472(%rax)
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm13, 1408(%rax)
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm26, 1344(%rax)
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm3, 1280(%rax)
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm10, 1216(%rax)
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm30, 1088(%rax)
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm16, 1024(%rax)
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm9, 960(%rax)
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm1, 896(%rax)
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm15, 832(%rax)
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm29, 704(%rax)
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm14, 640(%rax)
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm25, 576(%rax)
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm23, 512(%rax)
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm31, 448(%rax)
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm2, 320(%rax)
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm27, 256(%rax)
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm11, 192(%rax)
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm0, 128(%rax)
-; AVX512DQ-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQ-SLOW-NEXT:    vmovaps %zmm0, 64(%rax)
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm4, 1152(%rax)
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm5, 768(%rax)
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm6, 384(%rax)
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm24, (%rax)
-; AVX512DQ-SLOW-NEXT:    addq $712, %rsp # imm = 0x2C8
-; AVX512DQ-SLOW-NEXT:    vzeroupper
-; AVX512DQ-SLOW-NEXT:    retq
+; AVX512F-ONLY-LABEL: store_i64_stride6_vf32:
+; AVX512F-ONLY:       # %bb.0:
+; AVX512F-ONLY-NEXT:    subq $712, %rsp # imm = 0x2C8
+; AVX512F-ONLY-NEXT:    vmovdqa64 (%rdi), %zmm7
+; AVX512F-ONLY-NEXT:    vmovdqa64 64(%rdi), %zmm4
+; AVX512F-ONLY-NEXT:    vmovdqa64 128(%rdi), %zmm2
+; AVX512F-ONLY-NEXT:    vmovdqa64 192(%rdi), %zmm18
+; AVX512F-ONLY-NEXT:    vmovdqa64 (%rsi), %zmm22
+; AVX512F-ONLY-NEXT:    vmovdqa64 64(%rsi), %zmm1
+; AVX512F-ONLY-NEXT:    vmovdqa64 128(%rsi), %zmm19
+; AVX512F-ONLY-NEXT:    vmovdqa64 192(%rsi), %zmm21
+; AVX512F-ONLY-NEXT:    vmovdqa64 (%rdx), %zmm8
+; AVX512F-ONLY-NEXT:    vmovdqa64 64(%rdx), %zmm6
+; AVX512F-ONLY-NEXT:    vmovdqa64 128(%rdx), %zmm5
+; AVX512F-ONLY-NEXT:    vmovdqa64 192(%rdx), %zmm12
+; AVX512F-ONLY-NEXT:    vmovdqa64 (%rcx), %zmm29
+; AVX512F-ONLY-NEXT:    vmovdqa64 64(%rcx), %zmm27
+; AVX512F-ONLY-NEXT:    vmovdqa64 128(%rcx), %zmm26
+; AVX512F-ONLY-NEXT:    vmovdqa64 192(%rcx), %zmm25
+; AVX512F-ONLY-NEXT:    vbroadcasti64x4 {{.*#+}} zmm11 = [4,12,5,13,4,12,5,13]
+; AVX512F-ONLY-NEXT:    # zmm11 = mem[0,1,2,3,0,1,2,3]
+; AVX512F-ONLY-NEXT:    vbroadcasti32x4 {{.*#+}} ymm20 = [4,12,4,12]
+; AVX512F-ONLY-NEXT:    # ymm20 = mem[0,1,2,3,0,1,2,3]
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm12, %zmm0
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm25, %zmm20, %zmm0
+; AVX512F-ONLY-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm5, %zmm0
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm26, %zmm20, %zmm0
+; AVX512F-ONLY-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm6, %zmm0
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm27, %zmm20, %zmm0
+; AVX512F-ONLY-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-NEXT:    vpermi2q %zmm29, %zmm8, %zmm20
+; AVX512F-ONLY-NEXT:    vbroadcasti64x4 {{.*#+}} zmm10 = [1,9,2,10,1,9,2,10]
+; AVX512F-ONLY-NEXT:    # zmm10 = mem[0,1,2,3,0,1,2,3]
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm8, %zmm0
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm29, %zmm10, %zmm0
+; AVX512F-ONLY-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-NEXT:    vbroadcasti64x4 {{.*#+}} zmm13 = [5,13,6,14,5,13,6,14]
+; AVX512F-ONLY-NEXT:    # zmm13 = mem[0,1,2,3,0,1,2,3]
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm8, %zmm17
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm29, %zmm13, %zmm17
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm6, %zmm31
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm27, %zmm10, %zmm31
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm6, %zmm14
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm27, %zmm13, %zmm14
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm5, %zmm15
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm26, %zmm10, %zmm15
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm5, %zmm16
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm26, %zmm13, %zmm16
+; AVX512F-ONLY-NEXT:    vbroadcasti32x4 {{.*#+}} zmm28 = [3,11,3,11,3,11,3,11]
+; AVX512F-ONLY-NEXT:    # zmm28 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm8, %zmm0
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm29, %zmm28, %zmm0
+; AVX512F-ONLY-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-NEXT:    vbroadcasti32x4 {{.*#+}} zmm0 = [7,15,7,15,7,15,7,15]
+; AVX512F-ONLY-NEXT:    # zmm0 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm29, %zmm0, %zmm8
+; AVX512F-ONLY-NEXT:    vmovdqu64 %zmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm6, %zmm29
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm27, %zmm28, %zmm29
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm27, %zmm0, %zmm6
+; AVX512F-ONLY-NEXT:    vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm5, %zmm6
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm26, %zmm28, %zmm6
+; AVX512F-ONLY-NEXT:    vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm26, %zmm0, %zmm5
+; AVX512F-ONLY-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm18, %zmm26
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm21, %zmm11, %zmm26
+; AVX512F-ONLY-NEXT:    vpermi2q %zmm25, %zmm12, %zmm10
+; AVX512F-ONLY-NEXT:    vpermi2q %zmm25, %zmm12, %zmm13
+; AVX512F-ONLY-NEXT:    vpermi2q %zmm25, %zmm12, %zmm28
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm25, %zmm0, %zmm12
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm2, %zmm27
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm2, %zmm9
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm19, %zmm5
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm19, %zmm11, %zmm9
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm4, %zmm0
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm4, %zmm25
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm1, %zmm11, %zmm25
+; AVX512F-ONLY-NEXT:    vpermi2q %zmm22, %zmm7, %zmm11
+; AVX512F-ONLY-NEXT:    vbroadcasti32x4 {{.*#+}} zmm3 = [2,10,2,10,2,10,2,10]
+; AVX512F-ONLY-NEXT:    # zmm3 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm7, %zmm23
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm22, %zmm3, %zmm23
+; AVX512F-ONLY-NEXT:    vbroadcasti32x4 {{.*#+}} zmm6 = [6,14,6,14,6,14,6,14]
+; AVX512F-ONLY-NEXT:    # zmm6 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm7, %zmm24
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm22, %zmm6, %zmm24
+; AVX512F-ONLY-NEXT:    vbroadcasti64x4 {{.*#+}} zmm2 = [0,8,1,9,0,8,1,9]
+; AVX512F-ONLY-NEXT:    # zmm2 = mem[0,1,2,3,0,1,2,3]
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm7, %zmm30
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm22, %zmm2, %zmm30
+; AVX512F-ONLY-NEXT:    vbroadcasti32x4 {{.*#+}} ymm19 = [7,15,7,15]
+; AVX512F-ONLY-NEXT:    # ymm19 = mem[0,1,2,3,0,1,2,3]
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm22, %zmm19, %zmm7
+; AVX512F-ONLY-NEXT:    vmovdqu64 %zmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm4, %zmm22
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm1, %zmm3, %zmm22
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm1, %zmm6, %zmm4
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm0, %zmm8
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm1, %zmm2, %zmm8
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm1, %zmm19, %zmm0
+; AVX512F-ONLY-NEXT:    vmovdqu64 %zmm0, (%rsp) # 64-byte Spill
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm27, %zmm0
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm5, %zmm3, %zmm0
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm27, %zmm1
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm5, %zmm6, %zmm1
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm27, %zmm7
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm5, %zmm2, %zmm7
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm5, %zmm19, %zmm27
+; AVX512F-ONLY-NEXT:    vmovdqu64 %zmm27, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-NEXT:    vpermi2q %zmm21, %zmm18, %zmm3
+; AVX512F-ONLY-NEXT:    vpermi2q %zmm21, %zmm18, %zmm6
+; AVX512F-ONLY-NEXT:    vpermi2q %zmm21, %zmm18, %zmm2
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm21, %zmm19, %zmm18
+; AVX512F-ONLY-NEXT:    vmovdqu64 %zmm18, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-NEXT:    movb $12, %al
+; AVX512F-ONLY-NEXT:    kmovw %eax, %k1
+; AVX512F-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm5, %zmm26 {%k1}
+; AVX512F-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm5, %zmm9 {%k1}
+; AVX512F-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm5, %zmm25 {%k1}
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm20, %zmm11 {%k1}
+; AVX512F-ONLY-NEXT:    movb $48, %al
+; AVX512F-ONLY-NEXT:    kmovw %eax, %k2
+; AVX512F-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm23, %zmm5 {%k2}
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm24, %zmm17 {%k2}
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm22, %zmm31 {%k2}
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm4, %zmm14 {%k2}
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm0, %zmm15 {%k2}
+; AVX512F-ONLY-NEXT:    vmovdqa64 (%r8), %zmm23
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm1, %zmm16 {%k2}
+; AVX512F-ONLY-NEXT:    vmovdqa64 64(%r8), %zmm1
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm3, %zmm10 {%k2}
+; AVX512F-ONLY-NEXT:    vmovdqa64 {{.*#+}} zmm0 = <0,1,9,u,4,5,6,7>
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm23, %zmm0, %zmm5
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm1, %zmm0, %zmm31
+; AVX512F-ONLY-NEXT:    vmovdqa64 128(%r8), %zmm3
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm3, %zmm0, %zmm15
+; AVX512F-ONLY-NEXT:    vmovdqa64 192(%r8), %zmm22
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm22, %zmm0, %zmm10
+; AVX512F-ONLY-NEXT:    vmovdqa64 (%r9), %zmm20
+; AVX512F-ONLY-NEXT:    vmovdqa64 {{.*#+}} zmm0 = [0,1,2,9,4,5,6,7]
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm20, %zmm0, %zmm5
+; AVX512F-ONLY-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-NEXT:    vmovdqa64 64(%r9), %zmm21
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm21, %zmm0, %zmm31
+; AVX512F-ONLY-NEXT:    vmovdqa64 128(%r9), %zmm19
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm19, %zmm0, %zmm15
+; AVX512F-ONLY-NEXT:    vmovdqa64 192(%r9), %zmm18
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm18, %zmm0, %zmm10
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm6, %zmm13 {%k2}
+; AVX512F-ONLY-NEXT:    vmovdqa64 {{.*#+}} zmm0 = <0,1,13,u,4,5,6,7>
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm17, %zmm27
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm23, %zmm0, %zmm27
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm1, %zmm0, %zmm14
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm3, %zmm0, %zmm16
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm22, %zmm0, %zmm13
+; AVX512F-ONLY-NEXT:    vmovdqa64 {{.*#+}} zmm0 = [0,1,2,13,4,5,6,7]
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm20, %zmm0, %zmm27
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm21, %zmm0, %zmm14
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm19, %zmm0, %zmm16
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm18, %zmm0, %zmm13
+; AVX512F-ONLY-NEXT:    vmovdqa (%rdx), %xmm0
+; AVX512F-ONLY-NEXT:    vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],mem[0]
+; AVX512F-ONLY-NEXT:    vinserti128 $1, %xmm0, %ymm0, %ymm0
+; AVX512F-ONLY-NEXT:    vinserti64x4 $0, %ymm0, %zmm0, %zmm30 {%k1}
+; AVX512F-ONLY-NEXT:    vmovdqa 64(%rdx), %xmm0
+; AVX512F-ONLY-NEXT:    vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],mem[0]
+; AVX512F-ONLY-NEXT:    vinserti128 $1, %xmm0, %ymm0, %ymm0
+; AVX512F-ONLY-NEXT:    vinserti64x4 $0, %ymm0, %zmm0, %zmm8 {%k1}
+; AVX512F-ONLY-NEXT:    vmovdqa 128(%rdx), %xmm0
+; AVX512F-ONLY-NEXT:    vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],mem[0]
+; AVX512F-ONLY-NEXT:    vinserti128 $1, %xmm0, %ymm0, %ymm0
+; AVX512F-ONLY-NEXT:    vinserti64x4 $0, %ymm0, %zmm0, %zmm7 {%k1}
+; AVX512F-ONLY-NEXT:    vmovdqa 192(%rdx), %xmm0
+; AVX512F-ONLY-NEXT:    vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],mem[0]
+; AVX512F-ONLY-NEXT:    vinserti128 $1, %xmm0, %ymm0, %ymm0
+; AVX512F-ONLY-NEXT:    vinserti64x4 $0, %ymm0, %zmm0, %zmm2 {%k1}
+; AVX512F-ONLY-NEXT:    vinserti32x4 $2, (%r8), %zmm30, %zmm24
+; AVX512F-ONLY-NEXT:    vmovdqa64 {{.*#+}} zmm0 = [0,1,2,3,4,8,6,7]
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm20, %zmm0, %zmm24
+; AVX512F-ONLY-NEXT:    vinserti32x4 $2, 64(%r8), %zmm8, %zmm6
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm21, %zmm0, %zmm6
+; AVX512F-ONLY-NEXT:    vinserti32x4 $2, 128(%r8), %zmm7, %zmm5
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm19, %zmm0, %zmm5
+; AVX512F-ONLY-NEXT:    vinserti32x4 $2, 192(%r8), %zmm2, %zmm4
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm18, %zmm0, %zmm4
+; AVX512F-ONLY-NEXT:    vmovdqa (%rdi), %ymm0
+; AVX512F-ONLY-NEXT:    vpunpckhqdq {{.*#+}} ymm0 = ymm0[1],mem[1],ymm0[3],mem[3]
+; AVX512F-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
+; AVX512F-ONLY-NEXT:    vinserti64x4 $0, %ymm0, %zmm2, %zmm0
+; AVX512F-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
+; AVX512F-ONLY-NEXT:    vshufi64x2 $228, {{[-0-9]+}}(%r{{[sb]}}p), %zmm2, %zmm2 # 64-byte Folded Reload
+; AVX512F-ONLY-NEXT:    # zmm2 = zmm2[0,1,2,3],mem[4,5,6,7]
+; AVX512F-ONLY-NEXT:    movb $16, %al
+; AVX512F-ONLY-NEXT:    kmovw %eax, %k1
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm23, %zmm11 {%k1}
+; AVX512F-ONLY-NEXT:    vmovdqa64 {{.*#+}} zmm7 = <10,u,2,3,4,5,11,u>
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm23, %zmm7, %zmm0
+; AVX512F-ONLY-NEXT:    vmovdqa64 {{.*#+}} zmm17 = <14,u,2,3,4,5,15,u>
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm23, %zmm17, %zmm2
+; AVX512F-ONLY-NEXT:    vmovdqa64 64(%rdi), %ymm23
+; AVX512F-ONLY-NEXT:    vpunpckhqdq {{.*#+}} ymm23 = ymm23[1],mem[1],ymm23[3],mem[3]
+; AVX512F-ONLY-NEXT:    vinserti64x4 $0, %ymm23, %zmm29, %zmm23
+; AVX512F-ONLY-NEXT:    vmovdqu64 (%rsp), %zmm8 # 64-byte Reload
+; AVX512F-ONLY-NEXT:    vshufi64x2 $228, {{[-0-9]+}}(%r{{[sb]}}p), %zmm8, %zmm29 # 64-byte Folded Reload
+; AVX512F-ONLY-NEXT:    # zmm29 = zmm8[0,1,2,3],mem[4,5,6,7]
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm1, %zmm25 {%k1}
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm1, %zmm7, %zmm23
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm1, %zmm17, %zmm29
+; AVX512F-ONLY-NEXT:    vmovdqa 128(%rdi), %ymm1
+; AVX512F-ONLY-NEXT:    vpunpckhqdq {{.*#+}} ymm1 = ymm1[1],mem[1],ymm1[3],mem[3]
+; AVX512F-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm8 # 64-byte Reload
+; AVX512F-ONLY-NEXT:    vinserti64x4 $0, %ymm1, %zmm8, %zmm1
+; AVX512F-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm8 # 64-byte Reload
+; AVX512F-ONLY-NEXT:    vshufi64x2 $228, {{[-0-9]+}}(%r{{[sb]}}p), %zmm8, %zmm30 # 64-byte Folded Reload
+; AVX512F-ONLY-NEXT:    # zmm30 = zmm8[0,1,2,3],mem[4,5,6,7]
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm3, %zmm9 {%k1}
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm3, %zmm7, %zmm1
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm3, %zmm17, %zmm30
+; AVX512F-ONLY-NEXT:    vmovdqa 192(%rdi), %ymm3
+; AVX512F-ONLY-NEXT:    vpunpckhqdq {{.*#+}} ymm3 = ymm3[1],mem[1],ymm3[3],mem[3]
+; AVX512F-ONLY-NEXT:    vinserti64x4 $0, %ymm3, %zmm28, %zmm3
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm22, %zmm7, %zmm3
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm22, %zmm26 {%k1}
+; AVX512F-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm7 # 64-byte Reload
+; AVX512F-ONLY-NEXT:    vshufi64x2 {{.*#+}} zmm7 = zmm7[0,1,2,3],zmm12[4,5,6,7]
+; AVX512F-ONLY-NEXT:    vmovdqa64 {{.*#+}} zmm28 = [0,10,2,3,4,5,6,11]
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm20, %zmm28, %zmm0
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm22, %zmm17, %zmm7
+; AVX512F-ONLY-NEXT:    vmovdqa64 {{.*#+}} zmm17 = [0,1,2,3,4,12,6,7]
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm20, %zmm17, %zmm11
+; AVX512F-ONLY-NEXT:    vmovdqa64 {{.*#+}} zmm22 = [0,14,2,3,4,5,6,15]
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm20, %zmm22, %zmm2
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm21, %zmm28, %zmm23
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm21, %zmm17, %zmm25
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm21, %zmm22, %zmm29
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm19, %zmm28, %zmm1
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm19, %zmm17, %zmm9
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm19, %zmm22, %zmm30
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm18, %zmm28, %zmm3
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm18, %zmm17, %zmm26
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm18, %zmm22, %zmm7
+; AVX512F-ONLY-NEXT:    movq {{[0-9]+}}(%rsp), %rax
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm7, 1472(%rax)
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm13, 1408(%rax)
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm26, 1344(%rax)
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm3, 1280(%rax)
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm10, 1216(%rax)
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm30, 1088(%rax)
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm16, 1024(%rax)
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm9, 960(%rax)
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm1, 896(%rax)
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm15, 832(%rax)
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm29, 704(%rax)
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm14, 640(%rax)
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm25, 576(%rax)
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm23, 512(%rax)
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm31, 448(%rax)
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm2, 320(%rax)
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm27, 256(%rax)
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm11, 192(%rax)
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm0, 128(%rax)
+; AVX512F-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512F-ONLY-NEXT:    vmovaps %zmm0, 64(%rax)
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm4, 1152(%rax)
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm5, 768(%rax)
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm6, 384(%rax)
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm24, (%rax)
+; AVX512F-ONLY-NEXT:    addq $712, %rsp # imm = 0x2C8
+; AVX512F-ONLY-NEXT:    vzeroupper
+; AVX512F-ONLY-NEXT:    retq
 ;
-; AVX512DQ-FAST-LABEL: store_i64_stride6_vf32:
-; AVX512DQ-FAST:       # %bb.0:
-; AVX512DQ-FAST-NEXT:    subq $712, %rsp # imm = 0x2C8
-; AVX512DQ-FAST-NEXT:    vmovdqa64 (%rdi), %zmm7
-; AVX512DQ-FAST-NEXT:    vmovdqa64 64(%rdi), %zmm4
-; AVX512DQ-FAST-NEXT:    vmovdqa64 128(%rdi), %zmm2
-; AVX512DQ-FAST-NEXT:    vmovdqa64 192(%rdi), %zmm18
-; AVX512DQ-FAST-NEXT:    vmovdqa64 (%rsi), %zmm22
-; AVX512DQ-FAST-NEXT:    vmovdqa64 64(%rsi), %zmm1
-; AVX512DQ-FAST-NEXT:    vmovdqa64 128(%rsi), %zmm19
-; AVX512DQ-FAST-NEXT:    vmovdqa64 192(%rsi), %zmm21
-; AVX512DQ-FAST-NEXT:    vmovdqa64 (%rdx), %zmm8
-; AVX512DQ-FAST-NEXT:    vmovdqa64 64(%rdx), %zmm6
-; AVX512DQ-FAST-NEXT:    vmovdqa64 128(%rdx), %zmm5
-; AVX512DQ-FAST-NEXT:    vmovdqa64 192(%rdx), %zmm12
-; AVX512DQ-FAST-NEXT:    vmovdqa64 (%rcx), %zmm29
-; AVX512DQ-FAST-NEXT:    vmovdqa64 64(%rcx), %zmm27
-; AVX512DQ-FAST-NEXT:    vmovdqa64 128(%rcx), %zmm26
-; AVX512DQ-FAST-NEXT:    vmovdqa64 192(%rcx), %zmm25
-; AVX512DQ-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm11 = [4,12,5,13,4,12,5,13]
-; AVX512DQ-FAST-NEXT:    # zmm11 = mem[0,1,2,3,0,1,2,3]
-; AVX512DQ-FAST-NEXT:    vbroadcasti64x2 {{.*#+}} ymm20 = [4,12,4,12]
-; AVX512DQ-FAST-NEXT:    # ymm20 = mem[0,1,0,1]
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm12, %zmm0
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm25, %zmm20, %zmm0
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm5, %zmm0
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm26, %zmm20, %zmm0
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm6, %zmm0
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm27, %zmm20, %zmm0
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vpermi2q %zmm29, %zmm8, %zmm20
-; AVX512DQ-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm10 = [1,9,2,10,1,9,2,10]
-; AVX512DQ-FAST-NEXT:    # zmm10 = mem[0,1,2,3,0,1,2,3]
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm8, %zmm0
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm29, %zmm10, %zmm0
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm13 = [5,13,6,14,5,13,6,14]
-; AVX512DQ-FAST-NEXT:    # zmm13 = mem[0,1,2,3,0,1,2,3]
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm8, %zmm17
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm29, %zmm13, %zmm17
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm6, %zmm31
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm27, %zmm10, %zmm31
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm6, %zmm14
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm27, %zmm13, %zmm14
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm5, %zmm15
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm26, %zmm10, %zmm15
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm5, %zmm16
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm26, %zmm13, %zmm16
-; AVX512DQ-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} zmm28 = [3,11,3,11,3,11,3,11]
-; AVX512DQ-FAST-NEXT:    # zmm28 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm8, %zmm0
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm29, %zmm28, %zmm0
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} zmm0 = [7,15,7,15,7,15,7,15]
-; AVX512DQ-FAST-NEXT:    # zmm0 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm29, %zmm0, %zmm8
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm6, %zmm29
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm27, %zmm28, %zmm29
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm27, %zmm0, %zmm6
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm5, %zmm6
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm26, %zmm28, %zmm6
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm26, %zmm0, %zmm5
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm18, %zmm26
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm21, %zmm11, %zmm26
-; AVX512DQ-FAST-NEXT:    vpermi2q %zmm25, %zmm12, %zmm10
-; AVX512DQ-FAST-NEXT:    vpermi2q %zmm25, %zmm12, %zmm13
-; AVX512DQ-FAST-NEXT:    vpermi2q %zmm25, %zmm12, %zmm28
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm25, %zmm0, %zmm12
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm2, %zmm27
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm2, %zmm9
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm19, %zmm5
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm19, %zmm11, %zmm9
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm4, %zmm0
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm4, %zmm25
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm1, %zmm11, %zmm25
-; AVX512DQ-FAST-NEXT:    vpermi2q %zmm22, %zmm7, %zmm11
-; AVX512DQ-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} zmm3 = [2,10,2,10,2,10,2,10]
-; AVX512DQ-FAST-NEXT:    # zmm3 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm7, %zmm23
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm22, %zmm3, %zmm23
-; AVX512DQ-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} zmm6 = [6,14,6,14,6,14,6,14]
-; AVX512DQ-FAST-NEXT:    # zmm6 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm7, %zmm24
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm22, %zmm6, %zmm24
-; AVX512DQ-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm2 = [0,8,1,9,0,8,1,9]
-; AVX512DQ-FAST-NEXT:    # zmm2 = mem[0,1,2,3,0,1,2,3]
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm7, %zmm30
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm22, %zmm2, %zmm30
-; AVX512DQ-FAST-NEXT:    vbroadcasti64x2 {{.*#+}} ymm19 = [7,15,7,15]
-; AVX512DQ-FAST-NEXT:    # ymm19 = mem[0,1,0,1]
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm22, %zmm19, %zmm7
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm4, %zmm22
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm1, %zmm3, %zmm22
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm1, %zmm6, %zmm4
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm0, %zmm8
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm1, %zmm2, %zmm8
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm1, %zmm19, %zmm0
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm0, (%rsp) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm27, %zmm0
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm5, %zmm3, %zmm0
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm27, %zmm1
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm5, %zmm6, %zmm1
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm27, %zmm7
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm5, %zmm2, %zmm7
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm5, %zmm19, %zmm27
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm27, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vpermi2q %zmm21, %zmm18, %zmm3
-; AVX512DQ-FAST-NEXT:    vpermi2q %zmm21, %zmm18, %zmm6
-; AVX512DQ-FAST-NEXT:    vpermi2q %zmm21, %zmm18, %zmm2
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm21, %zmm19, %zmm18
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm18, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    movb $12, %al
-; AVX512DQ-FAST-NEXT:    kmovw %eax, %k1
-; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm5, %zmm26 {%k1}
-; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm5, %zmm9 {%k1}
-; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm5, %zmm25 {%k1}
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm20, %zmm11 {%k1}
-; AVX512DQ-FAST-NEXT:    movb $48, %al
-; AVX512DQ-FAST-NEXT:    kmovw %eax, %k2
-; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm23, %zmm5 {%k2}
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm24, %zmm17 {%k2}
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm22, %zmm31 {%k2}
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm4, %zmm14 {%k2}
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm0, %zmm15 {%k2}
-; AVX512DQ-FAST-NEXT:    vmovdqa64 (%r8), %zmm23
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm1, %zmm16 {%k2}
-; AVX512DQ-FAST-NEXT:    vmovdqa64 64(%r8), %zmm1
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm3, %zmm10 {%k2}
-; AVX512DQ-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm0 = <0,1,9,u,4,5,6,7>
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm23, %zmm0, %zmm5
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm1, %zmm0, %zmm31
-; AVX512DQ-FAST-NEXT:    vmovdqa64 128(%r8), %zmm3
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm3, %zmm0, %zmm15
-; AVX512DQ-FAST-NEXT:    vmovdqa64 192(%r8), %zmm22
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm22, %zmm0, %zmm10
-; AVX512DQ-FAST-NEXT:    vmovdqa64 (%r9), %zmm20
-; AVX512DQ-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm0 = [0,1,2,9,4,5,6,7]
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm20, %zmm0, %zmm5
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vmovdqa64 64(%r9), %zmm21
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm21, %zmm0, %zmm31
-; AVX512DQ-FAST-NEXT:    vmovdqa64 128(%r9), %zmm19
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm19, %zmm0, %zmm15
-; AVX512DQ-FAST-NEXT:    vmovdqa64 192(%r9), %zmm18
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm18, %zmm0, %zmm10
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm6, %zmm13 {%k2}
-; AVX512DQ-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm0 = <0,1,13,u,4,5,6,7>
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm17, %zmm27
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm23, %zmm0, %zmm27
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm1, %zmm0, %zmm14
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm3, %zmm0, %zmm16
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm22, %zmm0, %zmm13
-; AVX512DQ-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm0 = [0,1,2,13,4,5,6,7]
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm20, %zmm0, %zmm27
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm21, %zmm0, %zmm14
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm19, %zmm0, %zmm16
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm18, %zmm0, %zmm13
-; AVX512DQ-FAST-NEXT:    vmovdqa (%rdx), %xmm0
-; AVX512DQ-FAST-NEXT:    vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],mem[0]
-; AVX512DQ-FAST-NEXT:    vinserti128 $1, %xmm0, %ymm0, %ymm0
-; AVX512DQ-FAST-NEXT:    vinserti64x4 $0, %ymm0, %zmm0, %zmm30 {%k1}
-; AVX512DQ-FAST-NEXT:    vmovdqa 64(%rdx), %xmm0
-; AVX512DQ-FAST-NEXT:    vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],mem[0]
-; AVX512DQ-FAST-NEXT:    vinserti128 $1, %xmm0, %ymm0, %ymm0
-; AVX512DQ-FAST-NEXT:    vinserti64x4 $0, %ymm0, %zmm0, %zmm8 {%k1}
-; AVX512DQ-FAST-NEXT:    vmovdqa 128(%rdx), %xmm0
-; AVX512DQ-FAST-NEXT:    vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],mem[0]
-; AVX512DQ-FAST-NEXT:    vinserti128 $1, %xmm0, %ymm0, %ymm0
-; AVX512DQ-FAST-NEXT:    vinserti64x4 $0, %ymm0, %zmm0, %zmm7 {%k1}
-; AVX512DQ-FAST-NEXT:    vmovdqa 192(%rdx), %xmm0
-; AVX512DQ-FAST-NEXT:    vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],mem[0]
-; AVX512DQ-FAST-NEXT:    vinserti128 $1, %xmm0, %ymm0, %ymm0
-; AVX512DQ-FAST-NEXT:    vinserti64x4 $0, %ymm0, %zmm0, %zmm2 {%k1}
-; AVX512DQ-FAST-NEXT:    vinserti32x4 $2, (%r8), %zmm30, %zmm24
-; AVX512DQ-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm0 = [0,1,2,3,4,8,6,7]
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm20, %zmm0, %zmm24
-; AVX512DQ-FAST-NEXT:    vinserti32x4 $2, 64(%r8), %zmm8, %zmm6
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm21, %zmm0, %zmm6
-; AVX512DQ-FAST-NEXT:    vinserti32x4 $2, 128(%r8), %zmm7, %zmm5
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm19, %zmm0, %zmm5
-; AVX512DQ-FAST-NEXT:    vinserti32x4 $2, 192(%r8), %zmm2, %zmm4
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm18, %zmm0, %zmm4
-; AVX512DQ-FAST-NEXT:    vmovdqa (%rdi), %ymm0
-; AVX512DQ-FAST-NEXT:    vpunpckhqdq {{.*#+}} ymm0 = ymm0[1],mem[1],ymm0[3],mem[3]
-; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vinserti64x4 $0, %ymm0, %zmm2, %zmm0
-; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vshufi64x2 $228, {{[-0-9]+}}(%r{{[sb]}}p), %zmm2, %zmm2 # 64-byte Folded Reload
-; AVX512DQ-FAST-NEXT:    # zmm2 = zmm2[0,1,2,3],mem[4,5,6,7]
-; AVX512DQ-FAST-NEXT:    movb $16, %al
-; AVX512DQ-FAST-NEXT:    kmovw %eax, %k1
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm23, %zmm11 {%k1}
-; AVX512DQ-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm7 = <10,u,2,3,4,5,11,u>
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm23, %zmm7, %zmm0
-; AVX512DQ-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm17 = <14,u,2,3,4,5,15,u>
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm23, %zmm17, %zmm2
-; AVX512DQ-FAST-NEXT:    vmovdqa64 64(%rdi), %ymm23
-; AVX512DQ-FAST-NEXT:    vpunpckhqdq {{.*#+}} ymm23 = ymm23[1],mem[1],ymm23[3],mem[3]
-; AVX512DQ-FAST-NEXT:    vinserti64x4 $0, %ymm23, %zmm29, %zmm23
-; AVX512DQ-FAST-NEXT:    vmovdqu64 (%rsp), %zmm8 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vshufi64x2 $228, {{[-0-9]+}}(%r{{[sb]}}p), %zmm8, %zmm29 # 64-byte Folded Reload
-; AVX512DQ-FAST-NEXT:    # zmm29 = zmm8[0,1,2,3],mem[4,5,6,7]
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm1, %zmm25 {%k1}
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm1, %zmm7, %zmm23
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm1, %zmm17, %zmm29
-; AVX512DQ-FAST-NEXT:    vmovdqa 128(%rdi), %ymm1
-; AVX512DQ-FAST-NEXT:    vpunpckhqdq {{.*#+}} ymm1 = ymm1[1],mem[1],ymm1[3],mem[3]
-; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm8 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vinserti64x4 $0, %ymm1, %zmm8, %zmm1
-; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm8 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vshufi64x2 $228, {{[-0-9]+}}(%r{{[sb]}}p), %zmm8, %zmm30 # 64-byte Folded Reload
-; AVX512DQ-FAST-NEXT:    # zmm30 = zmm8[0,1,2,3],mem[4,5,6,7]
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm3, %zmm9 {%k1}
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm3, %zmm7, %zmm1
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm3, %zmm17, %zmm30
-; AVX512DQ-FAST-NEXT:    vmovdqa 192(%rdi), %ymm3
-; AVX512DQ-FAST-NEXT:    vpunpckhqdq {{.*#+}} ymm3 = ymm3[1],mem[1],ymm3[3],mem[3]
-; AVX512DQ-FAST-NEXT:    vinserti64x4 $0, %ymm3, %zmm28, %zmm3
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm22, %zmm7, %zmm3
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm22, %zmm26 {%k1}
-; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm7 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm7 = zmm7[0,1,2,3],zmm12[4,5,6,7]
-; AVX512DQ-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm28 = [0,10,2,3,4,5,6,11]
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm20, %zmm28, %zmm0
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm22, %zmm17, %zmm7
-; AVX512DQ-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm17 = [0,1,2,3,4,12,6,7]
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm20, %zmm17, %zmm11
-; AVX512DQ-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm22 = [0,14,2,3,4,5,6,15]
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm20, %zmm22, %zmm2
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm21, %zmm28, %zmm23
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm21, %zmm17, %zmm25
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm21, %zmm22, %zmm29
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm19, %zmm28, %zmm1
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm19, %zmm17, %zmm9
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm19, %zmm22, %zmm30
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm18, %zmm28, %zmm3
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm18, %zmm17, %zmm26
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm18, %zmm22, %zmm7
-; AVX512DQ-FAST-NEXT:    movq {{[0-9]+}}(%rsp), %rax
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm7, 1472(%rax)
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm13, 1408(%rax)
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm26, 1344(%rax)
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm3, 1280(%rax)
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm10, 1216(%rax)
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm30, 1088(%rax)
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm16, 1024(%rax)
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm9, 960(%rax)
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm1, 896(%rax)
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm15, 832(%rax)
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm29, 704(%rax)
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm14, 640(%rax)
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm25, 576(%rax)
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm23, 512(%rax)
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm31, 448(%rax)
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm2, 320(%rax)
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm27, 256(%rax)
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm11, 192(%rax)
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm0, 128(%rax)
-; AVX512DQ-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vmovaps %zmm0, 64(%rax)
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm4, 1152(%rax)
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm5, 768(%rax)
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm6, 384(%rax)
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm24, (%rax)
-; AVX512DQ-FAST-NEXT:    addq $712, %rsp # imm = 0x2C8
-; AVX512DQ-FAST-NEXT:    vzeroupper
-; AVX512DQ-FAST-NEXT:    retq
+; AVX512DQ-ONLY-LABEL: store_i64_stride6_vf32:
+; AVX512DQ-ONLY:       # %bb.0:
+; AVX512DQ-ONLY-NEXT:    subq $712, %rsp # imm = 0x2C8
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 (%rdi), %zmm7
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 64(%rdi), %zmm4
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 128(%rdi), %zmm2
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 192(%rdi), %zmm18
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 (%rsi), %zmm22
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 64(%rsi), %zmm1
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 128(%rsi), %zmm19
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 192(%rsi), %zmm21
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 (%rdx), %zmm8
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 64(%rdx), %zmm6
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 128(%rdx), %zmm5
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 192(%rdx), %zmm12
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 (%rcx), %zmm29
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 64(%rcx), %zmm27
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 128(%rcx), %zmm26
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 192(%rcx), %zmm25
+; AVX512DQ-ONLY-NEXT:    vbroadcasti64x4 {{.*#+}} zmm11 = [4,12,5,13,4,12,5,13]
+; AVX512DQ-ONLY-NEXT:    # zmm11 = mem[0,1,2,3,0,1,2,3]
+; AVX512DQ-ONLY-NEXT:    vbroadcasti64x2 {{.*#+}} ymm20 = [4,12,4,12]
+; AVX512DQ-ONLY-NEXT:    # ymm20 = mem[0,1,0,1]
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm12, %zmm0
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm25, %zmm20, %zmm0
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm5, %zmm0
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm26, %zmm20, %zmm0
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm6, %zmm0
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm27, %zmm20, %zmm0
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-ONLY-NEXT:    vpermi2q %zmm29, %zmm8, %zmm20
+; AVX512DQ-ONLY-NEXT:    vbroadcasti64x4 {{.*#+}} zmm10 = [1,9,2,10,1,9,2,10]
+; AVX512DQ-ONLY-NEXT:    # zmm10 = mem[0,1,2,3,0,1,2,3]
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm8, %zmm0
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm29, %zmm10, %zmm0
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-ONLY-NEXT:    vbroadcasti64x4 {{.*#+}} zmm13 = [5,13,6,14,5,13,6,14]
+; AVX512DQ-ONLY-NEXT:    # zmm13 = mem[0,1,2,3,0,1,2,3]
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm8, %zmm17
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm29, %zmm13, %zmm17
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm6, %zmm31
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm27, %zmm10, %zmm31
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm6, %zmm14
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm27, %zmm13, %zmm14
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm5, %zmm15
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm26, %zmm10, %zmm15
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm5, %zmm16
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm26, %zmm13, %zmm16
+; AVX512DQ-ONLY-NEXT:    vbroadcasti32x4 {{.*#+}} zmm28 = [3,11,3,11,3,11,3,11]
+; AVX512DQ-ONLY-NEXT:    # zmm28 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm8, %zmm0
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm29, %zmm28, %zmm0
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-ONLY-NEXT:    vbroadcasti32x4 {{.*#+}} zmm0 = [7,15,7,15,7,15,7,15]
+; AVX512DQ-ONLY-NEXT:    # zmm0 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm29, %zmm0, %zmm8
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 %zmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm6, %zmm29
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm27, %zmm28, %zmm29
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm27, %zmm0, %zmm6
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm5, %zmm6
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm26, %zmm28, %zmm6
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm26, %zmm0, %zmm5
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm18, %zmm26
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm21, %zmm11, %zmm26
+; AVX512DQ-ONLY-NEXT:    vpermi2q %zmm25, %zmm12, %zmm10
+; AVX512DQ-ONLY-NEXT:    vpermi2q %zmm25, %zmm12, %zmm13
+; AVX512DQ-ONLY-NEXT:    vpermi2q %zmm25, %zmm12, %zmm28
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm25, %zmm0, %zmm12
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm2, %zmm27
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm2, %zmm9
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm19, %zmm5
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm19, %zmm11, %zmm9
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm4, %zmm0
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm4, %zmm25
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm1, %zmm11, %zmm25
+; AVX512DQ-ONLY-NEXT:    vpermi2q %zmm22, %zmm7, %zmm11
+; AVX512DQ-ONLY-NEXT:    vbroadcasti32x4 {{.*#+}} zmm3 = [2,10,2,10,2,10,2,10]
+; AVX512DQ-ONLY-NEXT:    # zmm3 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm7, %zmm23
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm22, %zmm3, %zmm23
+; AVX512DQ-ONLY-NEXT:    vbroadcasti32x4 {{.*#+}} zmm6 = [6,14,6,14,6,14,6,14]
+; AVX512DQ-ONLY-NEXT:    # zmm6 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm7, %zmm24
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm22, %zmm6, %zmm24
+; AVX512DQ-ONLY-NEXT:    vbroadcasti64x4 {{.*#+}} zmm2 = [0,8,1,9,0,8,1,9]
+; AVX512DQ-ONLY-NEXT:    # zmm2 = mem[0,1,2,3,0,1,2,3]
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm7, %zmm30
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm22, %zmm2, %zmm30
+; AVX512DQ-ONLY-NEXT:    vbroadcasti64x2 {{.*#+}} ymm19 = [7,15,7,15]
+; AVX512DQ-ONLY-NEXT:    # ymm19 = mem[0,1,0,1]
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm22, %zmm19, %zmm7
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 %zmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm4, %zmm22
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm1, %zmm3, %zmm22
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm1, %zmm6, %zmm4
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm0, %zmm8
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm1, %zmm2, %zmm8
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm1, %zmm19, %zmm0
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 %zmm0, (%rsp) # 64-byte Spill
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm27, %zmm0
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm5, %zmm3, %zmm0
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm27, %zmm1
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm5, %zmm6, %zmm1
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm27, %zmm7
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm5, %zmm2, %zmm7
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm5, %zmm19, %zmm27
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 %zmm27, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-ONLY-NEXT:    vpermi2q %zmm21, %zmm18, %zmm3
+; AVX512DQ-ONLY-NEXT:    vpermi2q %zmm21, %zmm18, %zmm6
+; AVX512DQ-ONLY-NEXT:    vpermi2q %zmm21, %zmm18, %zmm2
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm21, %zmm19, %zmm18
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 %zmm18, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-ONLY-NEXT:    movb $12, %al
+; AVX512DQ-ONLY-NEXT:    kmovw %eax, %k1
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm5, %zmm26 {%k1}
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm5, %zmm9 {%k1}
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm5, %zmm25 {%k1}
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm20, %zmm11 {%k1}
+; AVX512DQ-ONLY-NEXT:    movb $48, %al
+; AVX512DQ-ONLY-NEXT:    kmovw %eax, %k2
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm23, %zmm5 {%k2}
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm24, %zmm17 {%k2}
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm22, %zmm31 {%k2}
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm4, %zmm14 {%k2}
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm0, %zmm15 {%k2}
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 (%r8), %zmm23
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm1, %zmm16 {%k2}
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 64(%r8), %zmm1
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm3, %zmm10 {%k2}
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 {{.*#+}} zmm0 = <0,1,9,u,4,5,6,7>
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm23, %zmm0, %zmm5
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm1, %zmm0, %zmm31
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 128(%r8), %zmm3
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm3, %zmm0, %zmm15
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 192(%r8), %zmm22
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm22, %zmm0, %zmm10
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 (%r9), %zmm20
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 {{.*#+}} zmm0 = [0,1,2,9,4,5,6,7]
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm20, %zmm0, %zmm5
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 64(%r9), %zmm21
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm21, %zmm0, %zmm31
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 128(%r9), %zmm19
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm19, %zmm0, %zmm15
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 192(%r9), %zmm18
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm18, %zmm0, %zmm10
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm6, %zmm13 {%k2}
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 {{.*#+}} zmm0 = <0,1,13,u,4,5,6,7>
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm17, %zmm27
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm23, %zmm0, %zmm27
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm1, %zmm0, %zmm14
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm3, %zmm0, %zmm16
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm22, %zmm0, %zmm13
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 {{.*#+}} zmm0 = [0,1,2,13,4,5,6,7]
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm20, %zmm0, %zmm27
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm21, %zmm0, %zmm14
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm19, %zmm0, %zmm16
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm18, %zmm0, %zmm13
+; AVX512DQ-ONLY-NEXT:    vmovdqa (%rdx), %xmm0
+; AVX512DQ-ONLY-NEXT:    vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],mem[0]
+; AVX512DQ-ONLY-NEXT:    vinserti128 $1, %xmm0, %ymm0, %ymm0
+; AVX512DQ-ONLY-NEXT:    vinserti64x4 $0, %ymm0, %zmm0, %zmm30 {%k1}
+; AVX512DQ-ONLY-NEXT:    vmovdqa 64(%rdx), %xmm0
+; AVX512DQ-ONLY-NEXT:    vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],mem[0]
+; AVX512DQ-ONLY-NEXT:    vinserti128 $1, %xmm0, %ymm0, %ymm0
+; AVX512DQ-ONLY-NEXT:    vinserti64x4 $0, %ymm0, %zmm0, %zmm8 {%k1}
+; AVX512DQ-ONLY-NEXT:    vmovdqa 128(%rdx), %xmm0
+; AVX512DQ-ONLY-NEXT:    vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],mem[0]
+; AVX512DQ-ONLY-NEXT:    vinserti128 $1, %xmm0, %ymm0, %ymm0
+; AVX512DQ-ONLY-NEXT:    vinserti64x4 $0, %ymm0, %zmm0, %zmm7 {%k1}
+; AVX512DQ-ONLY-NEXT:    vmovdqa 192(%rdx), %xmm0
+; AVX512DQ-ONLY-NEXT:    vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],mem[0]
+; AVX512DQ-ONLY-NEXT:    vinserti128 $1, %xmm0, %ymm0, %ymm0
+; AVX512DQ-ONLY-NEXT:    vinserti64x4 $0, %ymm0, %zmm0, %zmm2 {%k1}
+; AVX512DQ-ONLY-NEXT:    vinserti32x4 $2, (%r8), %zmm30, %zmm24
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 {{.*#+}} zmm0 = [0,1,2,3,4,8,6,7]
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm20, %zmm0, %zmm24
+; AVX512DQ-ONLY-NEXT:    vinserti32x4 $2, 64(%r8), %zmm8, %zmm6
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm21, %zmm0, %zmm6
+; AVX512DQ-ONLY-NEXT:    vinserti32x4 $2, 128(%r8), %zmm7, %zmm5
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm19, %zmm0, %zmm5
+; AVX512DQ-ONLY-NEXT:    vinserti32x4 $2, 192(%r8), %zmm2, %zmm4
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm18, %zmm0, %zmm4
+; AVX512DQ-ONLY-NEXT:    vmovdqa (%rdi), %ymm0
+; AVX512DQ-ONLY-NEXT:    vpunpckhqdq {{.*#+}} ymm0 = ymm0[1],mem[1],ymm0[3],mem[3]
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
+; AVX512DQ-ONLY-NEXT:    vinserti64x4 $0, %ymm0, %zmm2, %zmm0
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
+; AVX512DQ-ONLY-NEXT:    vshufi64x2 $228, {{[-0-9]+}}(%r{{[sb]}}p), %zmm2, %zmm2 # 64-byte Folded Reload
+; AVX512DQ-ONLY-NEXT:    # zmm2 = zmm2[0,1,2,3],mem[4,5,6,7]
+; AVX512DQ-ONLY-NEXT:    movb $16, %al
+; AVX512DQ-ONLY-NEXT:    kmovw %eax, %k1
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm23, %zmm11 {%k1}
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 {{.*#+}} zmm7 = <10,u,2,3,4,5,11,u>
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm23, %zmm7, %zmm0
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 {{.*#+}} zmm17 = <14,u,2,3,4,5,15,u>
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm23, %zmm17, %zmm2
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 64(%rdi), %ymm23
+; AVX512DQ-ONLY-NEXT:    vpunpckhqdq {{.*#+}} ymm23 = ymm23[1],mem[1],ymm23[3],mem[3]
+; AVX512DQ-ONLY-NEXT:    vinserti64x4 $0, %ymm23, %zmm29, %zmm23
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 (%rsp), %zmm8 # 64-byte Reload
+; AVX512DQ-ONLY-NEXT:    vshufi64x2 $228, {{[-0-9]+}}(%r{{[sb]}}p), %zmm8, %zmm29 # 64-byte Folded Reload
+; AVX512DQ-ONLY-NEXT:    # zmm29 = zmm8[0,1,2,3],mem[4,5,6,7]
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm1, %zmm25 {%k1}
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm1, %zmm7, %zmm23
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm1, %zmm17, %zmm29
+; AVX512DQ-ONLY-NEXT:    vmovdqa 128(%rdi), %ymm1
+; AVX512DQ-ONLY-NEXT:    vpunpckhqdq {{.*#+}} ymm1 = ymm1[1],mem[1],ymm1[3],mem[3]
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm8 # 64-byte Reload
+; AVX512DQ-ONLY-NEXT:    vinserti64x4 $0, %ymm1, %zmm8, %zmm1
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm8 # 64-byte Reload
+; AVX512DQ-ONLY-NEXT:    vshufi64x2 $228, {{[-0-9]+}}(%r{{[sb]}}p), %zmm8, %zmm30 # 64-byte Folded Reload
+; AVX512DQ-ONLY-NEXT:    # zmm30 = zmm8[0,1,2,3],mem[4,5,6,7]
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm3, %zmm9 {%k1}
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm3, %zmm7, %zmm1
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm3, %zmm17, %zmm30
+; AVX512DQ-ONLY-NEXT:    vmovdqa 192(%rdi), %ymm3
+; AVX512DQ-ONLY-NEXT:    vpunpckhqdq {{.*#+}} ymm3 = ymm3[1],mem[1],ymm3[3],mem[3]
+; AVX512DQ-ONLY-NEXT:    vinserti64x4 $0, %ymm3, %zmm28, %zmm3
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm22, %zmm7, %zmm3
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm22, %zmm26 {%k1}
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm7 # 64-byte Reload
+; AVX512DQ-ONLY-NEXT:    vshufi64x2 {{.*#+}} zmm7 = zmm7[0,1,2,3],zmm12[4,5,6,7]
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 {{.*#+}} zmm28 = [0,10,2,3,4,5,6,11]
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm20, %zmm28, %zmm0
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm22, %zmm17, %zmm7
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 {{.*#+}} zmm17 = [0,1,2,3,4,12,6,7]
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm20, %zmm17, %zmm11
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 {{.*#+}} zmm22 = [0,14,2,3,4,5,6,15]
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm20, %zmm22, %zmm2
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm21, %zmm28, %zmm23
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm21, %zmm17, %zmm25
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm21, %zmm22, %zmm29
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm19, %zmm28, %zmm1
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm19, %zmm17, %zmm9
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm19, %zmm22, %zmm30
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm18, %zmm28, %zmm3
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm18, %zmm17, %zmm26
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm18, %zmm22, %zmm7
+; AVX512DQ-ONLY-NEXT:    movq {{[0-9]+}}(%rsp), %rax
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm7, 1472(%rax)
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm13, 1408(%rax)
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm26, 1344(%rax)
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm3, 1280(%rax)
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm10, 1216(%rax)
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm30, 1088(%rax)
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm16, 1024(%rax)
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm9, 960(%rax)
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm1, 896(%rax)
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm15, 832(%rax)
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm29, 704(%rax)
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm14, 640(%rax)
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm25, 576(%rax)
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm23, 512(%rax)
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm31, 448(%rax)
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm2, 320(%rax)
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm27, 256(%rax)
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm11, 192(%rax)
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm0, 128(%rax)
+; AVX512DQ-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512DQ-ONLY-NEXT:    vmovaps %zmm0, 64(%rax)
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm4, 1152(%rax)
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm5, 768(%rax)
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm6, 384(%rax)
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm24, (%rax)
+; AVX512DQ-ONLY-NEXT:    addq $712, %rsp # imm = 0x2C8
+; AVX512DQ-ONLY-NEXT:    vzeroupper
+; AVX512DQ-ONLY-NEXT:    retq
 ;
-; AVX512BW-ONLY-SLOW-LABEL: store_i64_stride6_vf32:
-; AVX512BW-ONLY-SLOW:       # %bb.0:
-; AVX512BW-ONLY-SLOW-NEXT:    subq $712, %rsp # imm = 0x2C8
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 (%rdi), %zmm7
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 64(%rdi), %zmm4
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 128(%rdi), %zmm2
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 192(%rdi), %zmm18
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 (%rsi), %zmm22
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 64(%rsi), %zmm1
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 128(%rsi), %zmm19
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 192(%rsi), %zmm21
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 (%rdx), %zmm8
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 64(%rdx), %zmm6
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 128(%rdx), %zmm5
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 192(%rdx), %zmm12
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 (%rcx), %zmm29
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 64(%rcx), %zmm27
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 128(%rcx), %zmm26
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 192(%rcx), %zmm25
-; AVX512BW-ONLY-SLOW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm11 = [4,12,5,13,4,12,5,13]
-; AVX512BW-ONLY-SLOW-NEXT:    # zmm11 = mem[0,1,2,3,0,1,2,3]
-; AVX512BW-ONLY-SLOW-NEXT:    vbroadcasti32x4 {{.*#+}} ymm20 = [4,12,4,12]
-; AVX512BW-ONLY-SLOW-NEXT:    # ymm20 = mem[0,1,2,3,0,1,2,3]
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm12, %zmm0
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm25, %zmm20, %zmm0
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm5, %zmm0
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm26, %zmm20, %zmm0
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm6, %zmm0
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm27, %zmm20, %zmm0
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vpermi2q %zmm29, %zmm8, %zmm20
-; AVX512BW-ONLY-SLOW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm10 = [1,9,2,10,1,9,2,10]
-; AVX512BW-ONLY-SLOW-NEXT:    # zmm10 = mem[0,1,2,3,0,1,2,3]
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm8, %zmm0
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm29, %zmm10, %zmm0
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm13 = [5,13,6,14,5,13,6,14]
-; AVX512BW-ONLY-SLOW-NEXT:    # zmm13 = mem[0,1,2,3,0,1,2,3]
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm8, %zmm17
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm29, %zmm13, %zmm17
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm6, %zmm31
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm27, %zmm10, %zmm31
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm6, %zmm14
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm27, %zmm13, %zmm14
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm5, %zmm15
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm26, %zmm10, %zmm15
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm5, %zmm16
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm26, %zmm13, %zmm16
-; AVX512BW-ONLY-SLOW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm28 = [3,11,3,11,3,11,3,11]
-; AVX512BW-ONLY-SLOW-NEXT:    # zmm28 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm8, %zmm0
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm29, %zmm28, %zmm0
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm0 = [7,15,7,15,7,15,7,15]
-; AVX512BW-ONLY-SLOW-NEXT:    # zmm0 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm29, %zmm0, %zmm8
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm6, %zmm29
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm27, %zmm28, %zmm29
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm27, %zmm0, %zmm6
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm5, %zmm6
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm26, %zmm28, %zmm6
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm26, %zmm0, %zmm5
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm18, %zmm26
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm21, %zmm11, %zmm26
-; AVX512BW-ONLY-SLOW-NEXT:    vpermi2q %zmm25, %zmm12, %zmm10
-; AVX512BW-ONLY-SLOW-NEXT:    vpermi2q %zmm25, %zmm12, %zmm13
-; AVX512BW-ONLY-SLOW-NEXT:    vpermi2q %zmm25, %zmm12, %zmm28
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm25, %zmm0, %zmm12
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm2, %zmm27
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm2, %zmm9
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm19, %zmm5
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm19, %zmm11, %zmm9
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm4, %zmm0
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm4, %zmm25
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm1, %zmm11, %zmm25
-; AVX512BW-ONLY-SLOW-NEXT:    vpermi2q %zmm22, %zmm7, %zmm11
-; AVX512BW-ONLY-SLOW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm3 = [2,10,2,10,2,10,2,10]
-; AVX512BW-ONLY-SLOW-NEXT:    # zmm3 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm7, %zmm23
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm22, %zmm3, %zmm23
-; AVX512BW-ONLY-SLOW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm6 = [6,14,6,14,6,14,6,14]
-; AVX512BW-ONLY-SLOW-NEXT:    # zmm6 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm7, %zmm24
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm22, %zmm6, %zmm24
-; AVX512BW-ONLY-SLOW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm2 = [0,8,1,9,0,8,1,9]
-; AVX512BW-ONLY-SLOW-NEXT:    # zmm2 = mem[0,1,2,3,0,1,2,3]
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm7, %zmm30
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm22, %zmm2, %zmm30
-; AVX512BW-ONLY-SLOW-NEXT:    vbroadcasti32x4 {{.*#+}} ymm19 = [7,15,7,15]
-; AVX512BW-ONLY-SLOW-NEXT:    # ymm19 = mem[0,1,2,3,0,1,2,3]
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm22, %zmm19, %zmm7
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm4, %zmm22
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm1, %zmm3, %zmm22
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm1, %zmm6, %zmm4
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm0, %zmm8
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm1, %zmm2, %zmm8
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm1, %zmm19, %zmm0
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm0, (%rsp) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm27, %zmm0
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm5, %zmm3, %zmm0
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm27, %zmm1
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm5, %zmm6, %zmm1
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm27, %zmm7
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm5, %zmm2, %zmm7
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm5, %zmm19, %zmm27
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm27, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vpermi2q %zmm21, %zmm18, %zmm3
-; AVX512BW-ONLY-SLOW-NEXT:    vpermi2q %zmm21, %zmm18, %zmm6
-; AVX512BW-ONLY-SLOW-NEXT:    vpermi2q %zmm21, %zmm18, %zmm2
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm21, %zmm19, %zmm18
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm18, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    movb $12, %al
-; AVX512BW-ONLY-SLOW-NEXT:    kmovd %eax, %k1
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm5, %zmm26 {%k1}
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm5, %zmm9 {%k1}
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm5, %zmm25 {%k1}
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm20, %zmm11 {%k1}
-; AVX512BW-ONLY-SLOW-NEXT:    movb $48, %al
-; AVX512BW-ONLY-SLOW-NEXT:    kmovd %eax, %k2
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm23, %zmm5 {%k2}
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm24, %zmm17 {%k2}
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm22, %zmm31 {%k2}
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm4, %zmm14 {%k2}
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm0, %zmm15 {%k2}
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 (%r8), %zmm23
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm1, %zmm16 {%k2}
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 64(%r8), %zmm1
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm3, %zmm10 {%k2}
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm0 = <0,1,9,u,4,5,6,7>
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm23, %zmm0, %zmm5
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm1, %zmm0, %zmm31
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 128(%r8), %zmm3
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm3, %zmm0, %zmm15
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 192(%r8), %zmm22
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm22, %zmm0, %zmm10
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 (%r9), %zmm20
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm0 = [0,1,2,9,4,5,6,7]
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm20, %zmm0, %zmm5
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 64(%r9), %zmm21
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm21, %zmm0, %zmm31
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 128(%r9), %zmm19
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm19, %zmm0, %zmm15
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 192(%r9), %zmm18
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm18, %zmm0, %zmm10
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm6, %zmm13 {%k2}
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm0 = <0,1,13,u,4,5,6,7>
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm17, %zmm27
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm23, %zmm0, %zmm27
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm1, %zmm0, %zmm14
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm3, %zmm0, %zmm16
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm22, %zmm0, %zmm13
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm0 = [0,1,2,13,4,5,6,7]
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm20, %zmm0, %zmm27
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm21, %zmm0, %zmm14
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm19, %zmm0, %zmm16
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm18, %zmm0, %zmm13
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa (%rdx), %xmm0
-; AVX512BW-ONLY-SLOW-NEXT:    vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],mem[0]
-; AVX512BW-ONLY-SLOW-NEXT:    vinserti128 $1, %xmm0, %ymm0, %ymm0
-; AVX512BW-ONLY-SLOW-NEXT:    vinserti64x4 $0, %ymm0, %zmm0, %zmm30 {%k1}
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa 64(%rdx), %xmm0
-; AVX512BW-ONLY-SLOW-NEXT:    vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],mem[0]
-; AVX512BW-ONLY-SLOW-NEXT:    vinserti128 $1, %xmm0, %ymm0, %ymm0
-; AVX512BW-ONLY-SLOW-NEXT:    vinserti64x4 $0, %ymm0, %zmm0, %zmm8 {%k1}
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa 128(%rdx), %xmm0
-; AVX512BW-ONLY-SLOW-NEXT:    vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],mem[0]
-; AVX512BW-ONLY-SLOW-NEXT:    vinserti128 $1, %xmm0, %ymm0, %ymm0
-; AVX512BW-ONLY-SLOW-NEXT:    vinserti64x4 $0, %ymm0, %zmm0, %zmm7 {%k1}
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa 192(%rdx), %xmm0
-; AVX512BW-ONLY-SLOW-NEXT:    vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],mem[0]
-; AVX512BW-ONLY-SLOW-NEXT:    vinserti128 $1, %xmm0, %ymm0, %ymm0
-; AVX512BW-ONLY-SLOW-NEXT:    vinserti64x4 $0, %ymm0, %zmm0, %zmm2 {%k1}
-; AVX512BW-ONLY-SLOW-NEXT:    vinserti32x4 $2, (%r8), %zmm30, %zmm24
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm0 = [0,1,2,3,4,8,6,7]
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm20, %zmm0, %zmm24
-; AVX512BW-ONLY-SLOW-NEXT:    vinserti32x4 $2, 64(%r8), %zmm8, %zmm6
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm21, %zmm0, %zmm6
-; AVX512BW-ONLY-SLOW-NEXT:    vinserti32x4 $2, 128(%r8), %zmm7, %zmm5
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm19, %zmm0, %zmm5
-; AVX512BW-ONLY-SLOW-NEXT:    vinserti32x4 $2, 192(%r8), %zmm2, %zmm4
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm18, %zmm0, %zmm4
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa (%rdi), %ymm0
-; AVX512BW-ONLY-SLOW-NEXT:    vpunpckhqdq {{.*#+}} ymm0 = ymm0[1],mem[1],ymm0[3],mem[3]
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512BW-ONLY-SLOW-NEXT:    vinserti64x4 $0, %ymm0, %zmm2, %zmm0
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512BW-ONLY-SLOW-NEXT:    vshufi64x2 $228, {{[-0-9]+}}(%r{{[sb]}}p), %zmm2, %zmm2 # 64-byte Folded Reload
-; AVX512BW-ONLY-SLOW-NEXT:    # zmm2 = zmm2[0,1,2,3],mem[4,5,6,7]
-; AVX512BW-ONLY-SLOW-NEXT:    movb $16, %al
-; AVX512BW-ONLY-SLOW-NEXT:    kmovd %eax, %k1
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm23, %zmm11 {%k1}
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm7 = <10,u,2,3,4,5,11,u>
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm23, %zmm7, %zmm0
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm17 = <14,u,2,3,4,5,15,u>
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm23, %zmm17, %zmm2
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 64(%rdi), %ymm23
-; AVX512BW-ONLY-SLOW-NEXT:    vpunpckhqdq {{.*#+}} ymm23 = ymm23[1],mem[1],ymm23[3],mem[3]
-; AVX512BW-ONLY-SLOW-NEXT:    vinserti64x4 $0, %ymm23, %zmm29, %zmm23
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 (%rsp), %zmm8 # 64-byte Reload
-; AVX512BW-ONLY-SLOW-NEXT:    vshufi64x2 $228, {{[-0-9]+}}(%r{{[sb]}}p), %zmm8, %zmm29 # 64-byte Folded Reload
-; AVX512BW-ONLY-SLOW-NEXT:    # zmm29 = zmm8[0,1,2,3],mem[4,5,6,7]
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm1, %zmm25 {%k1}
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm1, %zmm7, %zmm23
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm1, %zmm17, %zmm29
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa 128(%rdi), %ymm1
-; AVX512BW-ONLY-SLOW-NEXT:    vpunpckhqdq {{.*#+}} ymm1 = ymm1[1],mem[1],ymm1[3],mem[3]
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm8 # 64-byte Reload
-; AVX512BW-ONLY-SLOW-NEXT:    vinserti64x4 $0, %ymm1, %zmm8, %zmm1
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm8 # 64-byte Reload
-; AVX512BW-ONLY-SLOW-NEXT:    vshufi64x2 $228, {{[-0-9]+}}(%r{{[sb]}}p), %zmm8, %zmm30 # 64-byte Folded Reload
-; AVX512BW-ONLY-SLOW-NEXT:    # zmm30 = zmm8[0,1,2,3],mem[4,5,6,7]
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm3, %zmm9 {%k1}
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm3, %zmm7, %zmm1
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm3, %zmm17, %zmm30
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa 192(%rdi), %ymm3
-; AVX512BW-ONLY-SLOW-NEXT:    vpunpckhqdq {{.*#+}} ymm3 = ymm3[1],mem[1],ymm3[3],mem[3]
-; AVX512BW-ONLY-SLOW-NEXT:    vinserti64x4 $0, %ymm3, %zmm28, %zmm3
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm22, %zmm7, %zmm3
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm22, %zmm26 {%k1}
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm7 # 64-byte Reload
-; AVX512BW-ONLY-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm7 = zmm7[0,1,2,3],zmm12[4,5,6,7]
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm28 = [0,10,2,3,4,5,6,11]
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm20, %zmm28, %zmm0
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm22, %zmm17, %zmm7
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm17 = [0,1,2,3,4,12,6,7]
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm20, %zmm17, %zmm11
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm22 = [0,14,2,3,4,5,6,15]
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm20, %zmm22, %zmm2
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm21, %zmm28, %zmm23
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm21, %zmm17, %zmm25
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm21, %zmm22, %zmm29
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm19, %zmm28, %zmm1
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm19, %zmm17, %zmm9
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm19, %zmm22, %zmm30
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm18, %zmm28, %zmm3
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm18, %zmm17, %zmm26
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm18, %zmm22, %zmm7
-; AVX512BW-ONLY-SLOW-NEXT:    movq {{[0-9]+}}(%rsp), %rax
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm7, 1472(%rax)
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm13, 1408(%rax)
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm26, 1344(%rax)
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm3, 1280(%rax)
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm10, 1216(%rax)
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm30, 1088(%rax)
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm16, 1024(%rax)
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm9, 960(%rax)
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm1, 896(%rax)
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm15, 832(%rax)
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm29, 704(%rax)
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm14, 640(%rax)
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm25, 576(%rax)
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm23, 512(%rax)
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm31, 448(%rax)
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm2, 320(%rax)
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm27, 256(%rax)
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm11, 192(%rax)
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm0, 128(%rax)
-; AVX512BW-ONLY-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512BW-ONLY-SLOW-NEXT:    vmovaps %zmm0, 64(%rax)
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm4, 1152(%rax)
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm5, 768(%rax)
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm6, 384(%rax)
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm24, (%rax)
-; AVX512BW-ONLY-SLOW-NEXT:    addq $712, %rsp # imm = 0x2C8
-; AVX512BW-ONLY-SLOW-NEXT:    vzeroupper
-; AVX512BW-ONLY-SLOW-NEXT:    retq
+; AVX512BW-ONLY-LABEL: store_i64_stride6_vf32:
+; AVX512BW-ONLY:       # %bb.0:
+; AVX512BW-ONLY-NEXT:    subq $712, %rsp # imm = 0x2C8
+; AVX512BW-ONLY-NEXT:    vmovdqa64 (%rdi), %zmm7
+; AVX512BW-ONLY-NEXT:    vmovdqa64 64(%rdi), %zmm4
+; AVX512BW-ONLY-NEXT:    vmovdqa64 128(%rdi), %zmm2
+; AVX512BW-ONLY-NEXT:    vmovdqa64 192(%rdi), %zmm18
+; AVX512BW-ONLY-NEXT:    vmovdqa64 (%rsi), %zmm22
+; AVX512BW-ONLY-NEXT:    vmovdqa64 64(%rsi), %zmm1
+; AVX512BW-ONLY-NEXT:    vmovdqa64 128(%rsi), %zmm19
+; AVX512BW-ONLY-NEXT:    vmovdqa64 192(%rsi), %zmm21
+; AVX512BW-ONLY-NEXT:    vmovdqa64 (%rdx), %zmm8
+; AVX512BW-ONLY-NEXT:    vmovdqa64 64(%rdx), %zmm6
+; AVX512BW-ONLY-NEXT:    vmovdqa64 128(%rdx), %zmm5
+; AVX512BW-ONLY-NEXT:    vmovdqa64 192(%rdx), %zmm12
+; AVX512BW-ONLY-NEXT:    vmovdqa64 (%rcx), %zmm29
+; AVX512BW-ONLY-NEXT:    vmovdqa64 64(%rcx), %zmm27
+; AVX512BW-ONLY-NEXT:    vmovdqa64 128(%rcx), %zmm26
+; AVX512BW-ONLY-NEXT:    vmovdqa64 192(%rcx), %zmm25
+; AVX512BW-ONLY-NEXT:    vbroadcasti64x4 {{.*#+}} zmm11 = [4,12,5,13,4,12,5,13]
+; AVX512BW-ONLY-NEXT:    # zmm11 = mem[0,1,2,3,0,1,2,3]
+; AVX512BW-ONLY-NEXT:    vbroadcasti32x4 {{.*#+}} ymm20 = [4,12,4,12]
+; AVX512BW-ONLY-NEXT:    # ymm20 = mem[0,1,2,3,0,1,2,3]
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm12, %zmm0
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm25, %zmm20, %zmm0
+; AVX512BW-ONLY-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm5, %zmm0
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm26, %zmm20, %zmm0
+; AVX512BW-ONLY-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm6, %zmm0
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm27, %zmm20, %zmm0
+; AVX512BW-ONLY-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-NEXT:    vpermi2q %zmm29, %zmm8, %zmm20
+; AVX512BW-ONLY-NEXT:    vbroadcasti64x4 {{.*#+}} zmm10 = [1,9,2,10,1,9,2,10]
+; AVX512BW-ONLY-NEXT:    # zmm10 = mem[0,1,2,3,0,1,2,3]
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm8, %zmm0
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm29, %zmm10, %zmm0
+; AVX512BW-ONLY-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-NEXT:    vbroadcasti64x4 {{.*#+}} zmm13 = [5,13,6,14,5,13,6,14]
+; AVX512BW-ONLY-NEXT:    # zmm13 = mem[0,1,2,3,0,1,2,3]
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm8, %zmm17
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm29, %zmm13, %zmm17
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm6, %zmm31
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm27, %zmm10, %zmm31
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm6, %zmm14
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm27, %zmm13, %zmm14
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm5, %zmm15
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm26, %zmm10, %zmm15
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm5, %zmm16
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm26, %zmm13, %zmm16
+; AVX512BW-ONLY-NEXT:    vbroadcasti32x4 {{.*#+}} zmm28 = [3,11,3,11,3,11,3,11]
+; AVX512BW-ONLY-NEXT:    # zmm28 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm8, %zmm0
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm29, %zmm28, %zmm0
+; AVX512BW-ONLY-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-NEXT:    vbroadcasti32x4 {{.*#+}} zmm0 = [7,15,7,15,7,15,7,15]
+; AVX512BW-ONLY-NEXT:    # zmm0 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm29, %zmm0, %zmm8
+; AVX512BW-ONLY-NEXT:    vmovdqu64 %zmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm6, %zmm29
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm27, %zmm28, %zmm29
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm27, %zmm0, %zmm6
+; AVX512BW-ONLY-NEXT:    vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm5, %zmm6
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm26, %zmm28, %zmm6
+; AVX512BW-ONLY-NEXT:    vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm26, %zmm0, %zmm5
+; AVX512BW-ONLY-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm18, %zmm26
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm21, %zmm11, %zmm26
+; AVX512BW-ONLY-NEXT:    vpermi2q %zmm25, %zmm12, %zmm10
+; AVX512BW-ONLY-NEXT:    vpermi2q %zmm25, %zmm12, %zmm13
+; AVX512BW-ONLY-NEXT:    vpermi2q %zmm25, %zmm12, %zmm28
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm25, %zmm0, %zmm12
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm2, %zmm27
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm2, %zmm9
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm19, %zmm5
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm19, %zmm11, %zmm9
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm4, %zmm0
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm4, %zmm25
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm1, %zmm11, %zmm25
+; AVX512BW-ONLY-NEXT:    vpermi2q %zmm22, %zmm7, %zmm11
+; AVX512BW-ONLY-NEXT:    vbroadcasti32x4 {{.*#+}} zmm3 = [2,10,2,10,2,10,2,10]
+; AVX512BW-ONLY-NEXT:    # zmm3 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm7, %zmm23
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm22, %zmm3, %zmm23
+; AVX512BW-ONLY-NEXT:    vbroadcasti32x4 {{.*#+}} zmm6 = [6,14,6,14,6,14,6,14]
+; AVX512BW-ONLY-NEXT:    # zmm6 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm7, %zmm24
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm22, %zmm6, %zmm24
+; AVX512BW-ONLY-NEXT:    vbroadcasti64x4 {{.*#+}} zmm2 = [0,8,1,9,0,8,1,9]
+; AVX512BW-ONLY-NEXT:    # zmm2 = mem[0,1,2,3,0,1,2,3]
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm7, %zmm30
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm22, %zmm2, %zmm30
+; AVX512BW-ONLY-NEXT:    vbroadcasti32x4 {{.*#+}} ymm19 = [7,15,7,15]
+; AVX512BW-ONLY-NEXT:    # ymm19 = mem[0,1,2,3,0,1,2,3]
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm22, %zmm19, %zmm7
+; AVX512BW-ONLY-NEXT:    vmovdqu64 %zmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm4, %zmm22
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm1, %zmm3, %zmm22
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm1, %zmm6, %zmm4
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm0, %zmm8
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm1, %zmm2, %zmm8
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm1, %zmm19, %zmm0
+; AVX512BW-ONLY-NEXT:    vmovdqu64 %zmm0, (%rsp) # 64-byte Spill
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm27, %zmm0
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm5, %zmm3, %zmm0
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm27, %zmm1
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm5, %zmm6, %zmm1
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm27, %zmm7
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm5, %zmm2, %zmm7
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm5, %zmm19, %zmm27
+; AVX512BW-ONLY-NEXT:    vmovdqu64 %zmm27, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-NEXT:    vpermi2q %zmm21, %zmm18, %zmm3
+; AVX512BW-ONLY-NEXT:    vpermi2q %zmm21, %zmm18, %zmm6
+; AVX512BW-ONLY-NEXT:    vpermi2q %zmm21, %zmm18, %zmm2
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm21, %zmm19, %zmm18
+; AVX512BW-ONLY-NEXT:    vmovdqu64 %zmm18, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-NEXT:    movb $12, %al
+; AVX512BW-ONLY-NEXT:    kmovd %eax, %k1
+; AVX512BW-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm5, %zmm26 {%k1}
+; AVX512BW-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm5, %zmm9 {%k1}
+; AVX512BW-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm5, %zmm25 {%k1}
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm20, %zmm11 {%k1}
+; AVX512BW-ONLY-NEXT:    movb $48, %al
+; AVX512BW-ONLY-NEXT:    kmovd %eax, %k2
+; AVX512BW-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm23, %zmm5 {%k2}
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm24, %zmm17 {%k2}
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm22, %zmm31 {%k2}
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm4, %zmm14 {%k2}
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm0, %zmm15 {%k2}
+; AVX512BW-ONLY-NEXT:    vmovdqa64 (%r8), %zmm23
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm1, %zmm16 {%k2}
+; AVX512BW-ONLY-NEXT:    vmovdqa64 64(%r8), %zmm1
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm3, %zmm10 {%k2}
+; AVX512BW-ONLY-NEXT:    vmovdqa64 {{.*#+}} zmm0 = <0,1,9,u,4,5,6,7>
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm23, %zmm0, %zmm5
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm1, %zmm0, %zmm31
+; AVX512BW-ONLY-NEXT:    vmovdqa64 128(%r8), %zmm3
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm3, %zmm0, %zmm15
+; AVX512BW-ONLY-NEXT:    vmovdqa64 192(%r8), %zmm22
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm22, %zmm0, %zmm10
+; AVX512BW-ONLY-NEXT:    vmovdqa64 (%r9), %zmm20
+; AVX512BW-ONLY-NEXT:    vmovdqa64 {{.*#+}} zmm0 = [0,1,2,9,4,5,6,7]
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm20, %zmm0, %zmm5
+; AVX512BW-ONLY-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-NEXT:    vmovdqa64 64(%r9), %zmm21
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm21, %zmm0, %zmm31
+; AVX512BW-ONLY-NEXT:    vmovdqa64 128(%r9), %zmm19
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm19, %zmm0, %zmm15
+; AVX512BW-ONLY-NEXT:    vmovdqa64 192(%r9), %zmm18
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm18, %zmm0, %zmm10
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm6, %zmm13 {%k2}
+; AVX512BW-ONLY-NEXT:    vmovdqa64 {{.*#+}} zmm0 = <0,1,13,u,4,5,6,7>
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm17, %zmm27
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm23, %zmm0, %zmm27
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm1, %zmm0, %zmm14
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm3, %zmm0, %zmm16
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm22, %zmm0, %zmm13
+; AVX512BW-ONLY-NEXT:    vmovdqa64 {{.*#+}} zmm0 = [0,1,2,13,4,5,6,7]
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm20, %zmm0, %zmm27
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm21, %zmm0, %zmm14
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm19, %zmm0, %zmm16
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm18, %zmm0, %zmm13
+; AVX512BW-ONLY-NEXT:    vmovdqa (%rdx), %xmm0
+; AVX512BW-ONLY-NEXT:    vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],mem[0]
+; AVX512BW-ONLY-NEXT:    vinserti128 $1, %xmm0, %ymm0, %ymm0
+; AVX512BW-ONLY-NEXT:    vinserti64x4 $0, %ymm0, %zmm0, %zmm30 {%k1}
+; AVX512BW-ONLY-NEXT:    vmovdqa 64(%rdx), %xmm0
+; AVX512BW-ONLY-NEXT:    vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],mem[0]
+; AVX512BW-ONLY-NEXT:    vinserti128 $1, %xmm0, %ymm0, %ymm0
+; AVX512BW-ONLY-NEXT:    vinserti64x4 $0, %ymm0, %zmm0, %zmm8 {%k1}
+; AVX512BW-ONLY-NEXT:    vmovdqa 128(%rdx), %xmm0
+; AVX512BW-ONLY-NEXT:    vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],mem[0]
+; AVX512BW-ONLY-NEXT:    vinserti128 $1, %xmm0, %ymm0, %ymm0
+; AVX512BW-ONLY-NEXT:    vinserti64x4 $0, %ymm0, %zmm0, %zmm7 {%k1}
+; AVX512BW-ONLY-NEXT:    vmovdqa 192(%rdx), %xmm0
+; AVX512BW-ONLY-NEXT:    vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],mem[0]
+; AVX512BW-ONLY-NEXT:    vinserti128 $1, %xmm0, %ymm0, %ymm0
+; AVX512BW-ONLY-NEXT:    vinserti64x4 $0, %ymm0, %zmm0, %zmm2 {%k1}
+; AVX512BW-ONLY-NEXT:    vinserti32x4 $2, (%r8), %zmm30, %zmm24
+; AVX512BW-ONLY-NEXT:    vmovdqa64 {{.*#+}} zmm0 = [0,1,2,3,4,8,6,7]
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm20, %zmm0, %zmm24
+; AVX512BW-ONLY-NEXT:    vinserti32x4 $2, 64(%r8), %zmm8, %zmm6
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm21, %zmm0, %zmm6
+; AVX512BW-ONLY-NEXT:    vinserti32x4 $2, 128(%r8), %zmm7, %zmm5
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm19, %zmm0, %zmm5
+; AVX512BW-ONLY-NEXT:    vinserti32x4 $2, 192(%r8), %zmm2, %zmm4
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm18, %zmm0, %zmm4
+; AVX512BW-ONLY-NEXT:    vmovdqa (%rdi), %ymm0
+; AVX512BW-ONLY-NEXT:    vpunpckhqdq {{.*#+}} ymm0 = ymm0[1],mem[1],ymm0[3],mem[3]
+; AVX512BW-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
+; AVX512BW-ONLY-NEXT:    vinserti64x4 $0, %ymm0, %zmm2, %zmm0
+; AVX512BW-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
+; AVX512BW-ONLY-NEXT:    vshufi64x2 $228, {{[-0-9]+}}(%r{{[sb]}}p), %zmm2, %zmm2 # 64-byte Folded Reload
+; AVX512BW-ONLY-NEXT:    # zmm2 = zmm2[0,1,2,3],mem[4,5,6,7]
+; AVX512BW-ONLY-NEXT:    movb $16, %al
+; AVX512BW-ONLY-NEXT:    kmovd %eax, %k1
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm23, %zmm11 {%k1}
+; AVX512BW-ONLY-NEXT:    vmovdqa64 {{.*#+}} zmm7 = <10,u,2,3,4,5,11,u>
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm23, %zmm7, %zmm0
+; AVX512BW-ONLY-NEXT:    vmovdqa64 {{.*#+}} zmm17 = <14,u,2,3,4,5,15,u>
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm23, %zmm17, %zmm2
+; AVX512BW-ONLY-NEXT:    vmovdqa64 64(%rdi), %ymm23
+; AVX512BW-ONLY-NEXT:    vpunpckhqdq {{.*#+}} ymm23 = ymm23[1],mem[1],ymm23[3],mem[3]
+; AVX512BW-ONLY-NEXT:    vinserti64x4 $0, %ymm23, %zmm29, %zmm23
+; AVX512BW-ONLY-NEXT:    vmovdqu64 (%rsp), %zmm8 # 64-byte Reload
+; AVX512BW-ONLY-NEXT:    vshufi64x2 $228, {{[-0-9]+}}(%r{{[sb]}}p), %zmm8, %zmm29 # 64-byte Folded Reload
+; AVX512BW-ONLY-NEXT:    # zmm29 = zmm8[0,1,2,3],mem[4,5,6,7]
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm1, %zmm25 {%k1}
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm1, %zmm7, %zmm23
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm1, %zmm17, %zmm29
+; AVX512BW-ONLY-NEXT:    vmovdqa 128(%rdi), %ymm1
+; AVX512BW-ONLY-NEXT:    vpunpckhqdq {{.*#+}} ymm1 = ymm1[1],mem[1],ymm1[3],mem[3]
+; AVX512BW-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm8 # 64-byte Reload
+; AVX512BW-ONLY-NEXT:    vinserti64x4 $0, %ymm1, %zmm8, %zmm1
+; AVX512BW-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm8 # 64-byte Reload
+; AVX512BW-ONLY-NEXT:    vshufi64x2 $228, {{[-0-9]+}}(%r{{[sb]}}p), %zmm8, %zmm30 # 64-byte Folded Reload
+; AVX512BW-ONLY-NEXT:    # zmm30 = zmm8[0,1,2,3],mem[4,5,6,7]
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm3, %zmm9 {%k1}
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm3, %zmm7, %zmm1
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm3, %zmm17, %zmm30
+; AVX512BW-ONLY-NEXT:    vmovdqa 192(%rdi), %ymm3
+; AVX512BW-ONLY-NEXT:    vpunpckhqdq {{.*#+}} ymm3 = ymm3[1],mem[1],ymm3[3],mem[3]
+; AVX512BW-ONLY-NEXT:    vinserti64x4 $0, %ymm3, %zmm28, %zmm3
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm22, %zmm7, %zmm3
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm22, %zmm26 {%k1}
+; AVX512BW-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm7 # 64-byte Reload
+; AVX512BW-ONLY-NEXT:    vshufi64x2 {{.*#+}} zmm7 = zmm7[0,1,2,3],zmm12[4,5,6,7]
+; AVX512BW-ONLY-NEXT:    vmovdqa64 {{.*#+}} zmm28 = [0,10,2,3,4,5,6,11]
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm20, %zmm28, %zmm0
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm22, %zmm17, %zmm7
+; AVX512BW-ONLY-NEXT:    vmovdqa64 {{.*#+}} zmm17 = [0,1,2,3,4,12,6,7]
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm20, %zmm17, %zmm11
+; AVX512BW-ONLY-NEXT:    vmovdqa64 {{.*#+}} zmm22 = [0,14,2,3,4,5,6,15]
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm20, %zmm22, %zmm2
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm21, %zmm28, %zmm23
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm21, %zmm17, %zmm25
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm21, %zmm22, %zmm29
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm19, %zmm28, %zmm1
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm19, %zmm17, %zmm9
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm19, %zmm22, %zmm30
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm18, %zmm28, %zmm3
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm18, %zmm17, %zmm26
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm18, %zmm22, %zmm7
+; AVX512BW-ONLY-NEXT:    movq {{[0-9]+}}(%rsp), %rax
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm7, 1472(%rax)
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm13, 1408(%rax)
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm26, 1344(%rax)
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm3, 1280(%rax)
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm10, 1216(%rax)
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm30, 1088(%rax)
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm16, 1024(%rax)
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm9, 960(%rax)
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm1, 896(%rax)
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm15, 832(%rax)
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm29, 704(%rax)
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm14, 640(%rax)
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm25, 576(%rax)
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm23, 512(%rax)
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm31, 448(%rax)
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm2, 320(%rax)
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm27, 256(%rax)
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm11, 192(%rax)
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm0, 128(%rax)
+; AVX512BW-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512BW-ONLY-NEXT:    vmovaps %zmm0, 64(%rax)
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm4, 1152(%rax)
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm5, 768(%rax)
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm6, 384(%rax)
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm24, (%rax)
+; AVX512BW-ONLY-NEXT:    addq $712, %rsp # imm = 0x2C8
+; AVX512BW-ONLY-NEXT:    vzeroupper
+; AVX512BW-ONLY-NEXT:    retq
 ;
-; AVX512BW-ONLY-FAST-LABEL: store_i64_stride6_vf32:
-; AVX512BW-ONLY-FAST:       # %bb.0:
-; AVX512BW-ONLY-FAST-NEXT:    subq $712, %rsp # imm = 0x2C8
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 (%rdi), %zmm7
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 64(%rdi), %zmm4
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 128(%rdi), %zmm2
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 192(%rdi), %zmm18
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 (%rsi), %zmm22
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 64(%rsi), %zmm1
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 128(%rsi), %zmm19
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 192(%rsi), %zmm21
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 (%rdx), %zmm8
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 64(%rdx), %zmm6
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 128(%rdx), %zmm5
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 192(%rdx), %zmm12
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 (%rcx), %zmm29
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 64(%rcx), %zmm27
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 128(%rcx), %zmm26
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 192(%rcx), %zmm25
-; AVX512BW-ONLY-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm11 = [4,12,5,13,4,12,5,13]
-; AVX512BW-ONLY-FAST-NEXT:    # zmm11 = mem[0,1,2,3,0,1,2,3]
-; AVX512BW-ONLY-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} ymm20 = [4,12,4,12]
-; AVX512BW-ONLY-FAST-NEXT:    # ymm20 = mem[0,1,2,3,0,1,2,3]
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm12, %zmm0
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm25, %zmm20, %zmm0
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm5, %zmm0
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm26, %zmm20, %zmm0
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm6, %zmm0
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm27, %zmm20, %zmm0
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vpermi2q %zmm29, %zmm8, %zmm20
-; AVX512BW-ONLY-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm10 = [1,9,2,10,1,9,2,10]
-; AVX512BW-ONLY-FAST-NEXT:    # zmm10 = mem[0,1,2,3,0,1,2,3]
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm8, %zmm0
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm29, %zmm10, %zmm0
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm13 = [5,13,6,14,5,13,6,14]
-; AVX512BW-ONLY-FAST-NEXT:    # zmm13 = mem[0,1,2,3,0,1,2,3]
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm8, %zmm17
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm29, %zmm13, %zmm17
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm6, %zmm31
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm27, %zmm10, %zmm31
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm6, %zmm14
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm27, %zmm13, %zmm14
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm5, %zmm15
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm26, %zmm10, %zmm15
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm5, %zmm16
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm26, %zmm13, %zmm16
-; AVX512BW-ONLY-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} zmm28 = [3,11,3,11,3,11,3,11]
-; AVX512BW-ONLY-FAST-NEXT:    # zmm28 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm8, %zmm0
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm29, %zmm28, %zmm0
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} zmm0 = [7,15,7,15,7,15,7,15]
-; AVX512BW-ONLY-FAST-NEXT:    # zmm0 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm29, %zmm0, %zmm8
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm6, %zmm29
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm27, %zmm28, %zmm29
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm27, %zmm0, %zmm6
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm5, %zmm6
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm26, %zmm28, %zmm6
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm26, %zmm0, %zmm5
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm18, %zmm26
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm21, %zmm11, %zmm26
-; AVX512BW-ONLY-FAST-NEXT:    vpermi2q %zmm25, %zmm12, %zmm10
-; AVX512BW-ONLY-FAST-NEXT:    vpermi2q %zmm25, %zmm12, %zmm13
-; AVX512BW-ONLY-FAST-NEXT:    vpermi2q %zmm25, %zmm12, %zmm28
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm25, %zmm0, %zmm12
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm2, %zmm27
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm2, %zmm9
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm19, %zmm5
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm19, %zmm11, %zmm9
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm4, %zmm0
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm4, %zmm25
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm1, %zmm11, %zmm25
-; AVX512BW-ONLY-FAST-NEXT:    vpermi2q %zmm22, %zmm7, %zmm11
-; AVX512BW-ONLY-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} zmm3 = [2,10,2,10,2,10,2,10]
-; AVX512BW-ONLY-FAST-NEXT:    # zmm3 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm7, %zmm23
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm22, %zmm3, %zmm23
-; AVX512BW-ONLY-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} zmm6 = [6,14,6,14,6,14,6,14]
-; AVX512BW-ONLY-FAST-NEXT:    # zmm6 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm7, %zmm24
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm22, %zmm6, %zmm24
-; AVX512BW-ONLY-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm2 = [0,8,1,9,0,8,1,9]
-; AVX512BW-ONLY-FAST-NEXT:    # zmm2 = mem[0,1,2,3,0,1,2,3]
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm7, %zmm30
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm22, %zmm2, %zmm30
-; AVX512BW-ONLY-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} ymm19 = [7,15,7,15]
-; AVX512BW-ONLY-FAST-NEXT:    # ymm19 = mem[0,1,2,3,0,1,2,3]
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm22, %zmm19, %zmm7
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm4, %zmm22
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm1, %zmm3, %zmm22
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm1, %zmm6, %zmm4
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm0, %zmm8
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm1, %zmm2, %zmm8
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm1, %zmm19, %zmm0
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm0, (%rsp) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm27, %zmm0
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm5, %zmm3, %zmm0
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm27, %zmm1
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm5, %zmm6, %zmm1
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm27, %zmm7
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm5, %zmm2, %zmm7
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm5, %zmm19, %zmm27
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm27, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vpermi2q %zmm21, %zmm18, %zmm3
-; AVX512BW-ONLY-FAST-NEXT:    vpermi2q %zmm21, %zmm18, %zmm6
-; AVX512BW-ONLY-FAST-NEXT:    vpermi2q %zmm21, %zmm18, %zmm2
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm21, %zmm19, %zmm18
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm18, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    movb $12, %al
-; AVX512BW-ONLY-FAST-NEXT:    kmovd %eax, %k1
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm5, %zmm26 {%k1}
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm5, %zmm9 {%k1}
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm5, %zmm25 {%k1}
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm20, %zmm11 {%k1}
-; AVX512BW-ONLY-FAST-NEXT:    movb $48, %al
-; AVX512BW-ONLY-FAST-NEXT:    kmovd %eax, %k2
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm23, %zmm5 {%k2}
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm24, %zmm17 {%k2}
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm22, %zmm31 {%k2}
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm4, %zmm14 {%k2}
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm0, %zmm15 {%k2}
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 (%r8), %zmm23
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm1, %zmm16 {%k2}
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 64(%r8), %zmm1
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm3, %zmm10 {%k2}
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm0 = <0,1,9,u,4,5,6,7>
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm23, %zmm0, %zmm5
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm1, %zmm0, %zmm31
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 128(%r8), %zmm3
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm3, %zmm0, %zmm15
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 192(%r8), %zmm22
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm22, %zmm0, %zmm10
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 (%r9), %zmm20
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm0 = [0,1,2,9,4,5,6,7]
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm20, %zmm0, %zmm5
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 64(%r9), %zmm21
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm21, %zmm0, %zmm31
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 128(%r9), %zmm19
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm19, %zmm0, %zmm15
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 192(%r9), %zmm18
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm18, %zmm0, %zmm10
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm6, %zmm13 {%k2}
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm0 = <0,1,13,u,4,5,6,7>
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm17, %zmm27
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm23, %zmm0, %zmm27
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm1, %zmm0, %zmm14
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm3, %zmm0, %zmm16
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm22, %zmm0, %zmm13
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm0 = [0,1,2,13,4,5,6,7]
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm20, %zmm0, %zmm27
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm21, %zmm0, %zmm14
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm19, %zmm0, %zmm16
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm18, %zmm0, %zmm13
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa (%rdx), %xmm0
-; AVX512BW-ONLY-FAST-NEXT:    vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],mem[0]
-; AVX512BW-ONLY-FAST-NEXT:    vinserti128 $1, %xmm0, %ymm0, %ymm0
-; AVX512BW-ONLY-FAST-NEXT:    vinserti64x4 $0, %ymm0, %zmm0, %zmm30 {%k1}
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa 64(%rdx), %xmm0
-; AVX512BW-ONLY-FAST-NEXT:    vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],mem[0]
-; AVX512BW-ONLY-FAST-NEXT:    vinserti128 $1, %xmm0, %ymm0, %ymm0
-; AVX512BW-ONLY-FAST-NEXT:    vinserti64x4 $0, %ymm0, %zmm0, %zmm8 {%k1}
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa 128(%rdx), %xmm0
-; AVX512BW-ONLY-FAST-NEXT:    vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],mem[0]
-; AVX512BW-ONLY-FAST-NEXT:    vinserti128 $1, %xmm0, %ymm0, %ymm0
-; AVX512BW-ONLY-FAST-NEXT:    vinserti64x4 $0, %ymm0, %zmm0, %zmm7 {%k1}
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa 192(%rdx), %xmm0
-; AVX512BW-ONLY-FAST-NEXT:    vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],mem[0]
-; AVX512BW-ONLY-FAST-NEXT:    vinserti128 $1, %xmm0, %ymm0, %ymm0
-; AVX512BW-ONLY-FAST-NEXT:    vinserti64x4 $0, %ymm0, %zmm0, %zmm2 {%k1}
-; AVX512BW-ONLY-FAST-NEXT:    vinserti32x4 $2, (%r8), %zmm30, %zmm24
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm0 = [0,1,2,3,4,8,6,7]
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm20, %zmm0, %zmm24
-; AVX512BW-ONLY-FAST-NEXT:    vinserti32x4 $2, 64(%r8), %zmm8, %zmm6
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm21, %zmm0, %zmm6
-; AVX512BW-ONLY-FAST-NEXT:    vinserti32x4 $2, 128(%r8), %zmm7, %zmm5
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm19, %zmm0, %zmm5
-; AVX512BW-ONLY-FAST-NEXT:    vinserti32x4 $2, 192(%r8), %zmm2, %zmm4
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm18, %zmm0, %zmm4
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa (%rdi), %ymm0
-; AVX512BW-ONLY-FAST-NEXT:    vpunpckhqdq {{.*#+}} ymm0 = ymm0[1],mem[1],ymm0[3],mem[3]
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512BW-ONLY-FAST-NEXT:    vinserti64x4 $0, %ymm0, %zmm2, %zmm0
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512BW-ONLY-FAST-NEXT:    vshufi64x2 $228, {{[-0-9]+}}(%r{{[sb]}}p), %zmm2, %zmm2 # 64-byte Folded Reload
-; AVX512BW-ONLY-FAST-NEXT:    # zmm2 = zmm2[0,1,2,3],mem[4,5,6,7]
-; AVX512BW-ONLY-FAST-NEXT:    movb $16, %al
-; AVX512BW-ONLY-FAST-NEXT:    kmovd %eax, %k1
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm23, %zmm11 {%k1}
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm7 = <10,u,2,3,4,5,11,u>
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm23, %zmm7, %zmm0
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm17 = <14,u,2,3,4,5,15,u>
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm23, %zmm17, %zmm2
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 64(%rdi), %ymm23
-; AVX512BW-ONLY-FAST-NEXT:    vpunpckhqdq {{.*#+}} ymm23 = ymm23[1],mem[1],ymm23[3],mem[3]
-; AVX512BW-ONLY-FAST-NEXT:    vinserti64x4 $0, %ymm23, %zmm29, %zmm23
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 (%rsp), %zmm8 # 64-byte Reload
-; AVX512BW-ONLY-FAST-NEXT:    vshufi64x2 $228, {{[-0-9]+}}(%r{{[sb]}}p), %zmm8, %zmm29 # 64-byte Folded Reload
-; AVX512BW-ONLY-FAST-NEXT:    # zmm29 = zmm8[0,1,2,3],mem[4,5,6,7]
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm1, %zmm25 {%k1}
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm1, %zmm7, %zmm23
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm1, %zmm17, %zmm29
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa 128(%rdi), %ymm1
-; AVX512BW-ONLY-FAST-NEXT:    vpunpckhqdq {{.*#+}} ymm1 = ymm1[1],mem[1],ymm1[3],mem[3]
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm8 # 64-byte Reload
-; AVX512BW-ONLY-FAST-NEXT:    vinserti64x4 $0, %ymm1, %zmm8, %zmm1
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm8 # 64-byte Reload
-; AVX512BW-ONLY-FAST-NEXT:    vshufi64x2 $228, {{[-0-9]+}}(%r{{[sb]}}p), %zmm8, %zmm30 # 64-byte Folded Reload
-; AVX512BW-ONLY-FAST-NEXT:    # zmm30 = zmm8[0,1,2,3],mem[4,5,6,7]
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm3, %zmm9 {%k1}
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm3, %zmm7, %zmm1
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm3, %zmm17, %zmm30
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa 192(%rdi), %ymm3
-; AVX512BW-ONLY-FAST-NEXT:    vpunpckhqdq {{.*#+}} ymm3 = ymm3[1],mem[1],ymm3[3],mem[3]
-; AVX512BW-ONLY-FAST-NEXT:    vinserti64x4 $0, %ymm3, %zmm28, %zmm3
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm22, %zmm7, %zmm3
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm22, %zmm26 {%k1}
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm7 # 64-byte Reload
-; AVX512BW-ONLY-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm7 = zmm7[0,1,2,3],zmm12[4,5,6,7]
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm28 = [0,10,2,3,4,5,6,11]
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm20, %zmm28, %zmm0
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm22, %zmm17, %zmm7
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm17 = [0,1,2,3,4,12,6,7]
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm20, %zmm17, %zmm11
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm22 = [0,14,2,3,4,5,6,15]
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm20, %zmm22, %zmm2
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm21, %zmm28, %zmm23
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm21, %zmm17, %zmm25
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm21, %zmm22, %zmm29
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm19, %zmm28, %zmm1
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm19, %zmm17, %zmm9
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm19, %zmm22, %zmm30
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm18, %zmm28, %zmm3
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm18, %zmm17, %zmm26
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm18, %zmm22, %zmm7
-; AVX512BW-ONLY-FAST-NEXT:    movq {{[0-9]+}}(%rsp), %rax
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm7, 1472(%rax)
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm13, 1408(%rax)
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm26, 1344(%rax)
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm3, 1280(%rax)
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm10, 1216(%rax)
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm30, 1088(%rax)
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm16, 1024(%rax)
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm9, 960(%rax)
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm1, 896(%rax)
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm15, 832(%rax)
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm29, 704(%rax)
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm14, 640(%rax)
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm25, 576(%rax)
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm23, 512(%rax)
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm31, 448(%rax)
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm2, 320(%rax)
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm27, 256(%rax)
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm11, 192(%rax)
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm0, 128(%rax)
-; AVX512BW-ONLY-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512BW-ONLY-FAST-NEXT:    vmovaps %zmm0, 64(%rax)
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm4, 1152(%rax)
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm5, 768(%rax)
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm6, 384(%rax)
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm24, (%rax)
-; AVX512BW-ONLY-FAST-NEXT:    addq $712, %rsp # imm = 0x2C8
-; AVX512BW-ONLY-FAST-NEXT:    vzeroupper
-; AVX512BW-ONLY-FAST-NEXT:    retq
-;
-; AVX512DQBW-SLOW-LABEL: store_i64_stride6_vf32:
-; AVX512DQBW-SLOW:       # %bb.0:
-; AVX512DQBW-SLOW-NEXT:    subq $712, %rsp # imm = 0x2C8
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 (%rdi), %zmm7
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 64(%rdi), %zmm4
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 128(%rdi), %zmm2
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 192(%rdi), %zmm18
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 (%rsi), %zmm22
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 64(%rsi), %zmm1
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 128(%rsi), %zmm19
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 192(%rsi), %zmm21
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 (%rdx), %zmm8
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 64(%rdx), %zmm6
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 128(%rdx), %zmm5
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 192(%rdx), %zmm12
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 (%rcx), %zmm29
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 64(%rcx), %zmm27
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 128(%rcx), %zmm26
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 192(%rcx), %zmm25
-; AVX512DQBW-SLOW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm11 = [4,12,5,13,4,12,5,13]
-; AVX512DQBW-SLOW-NEXT:    # zmm11 = mem[0,1,2,3,0,1,2,3]
-; AVX512DQBW-SLOW-NEXT:    vbroadcasti64x2 {{.*#+}} ymm20 = [4,12,4,12]
-; AVX512DQBW-SLOW-NEXT:    # ymm20 = mem[0,1,0,1]
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm12, %zmm0
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm25, %zmm20, %zmm0
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm5, %zmm0
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm26, %zmm20, %zmm0
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm6, %zmm0
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm27, %zmm20, %zmm0
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vpermi2q %zmm29, %zmm8, %zmm20
-; AVX512DQBW-SLOW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm10 = [1,9,2,10,1,9,2,10]
-; AVX512DQBW-SLOW-NEXT:    # zmm10 = mem[0,1,2,3,0,1,2,3]
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm8, %zmm0
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm29, %zmm10, %zmm0
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm13 = [5,13,6,14,5,13,6,14]
-; AVX512DQBW-SLOW-NEXT:    # zmm13 = mem[0,1,2,3,0,1,2,3]
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm8, %zmm17
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm29, %zmm13, %zmm17
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm6, %zmm31
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm27, %zmm10, %zmm31
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm6, %zmm14
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm27, %zmm13, %zmm14
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm5, %zmm15
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm26, %zmm10, %zmm15
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm5, %zmm16
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm26, %zmm13, %zmm16
-; AVX512DQBW-SLOW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm28 = [3,11,3,11,3,11,3,11]
-; AVX512DQBW-SLOW-NEXT:    # zmm28 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm8, %zmm0
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm29, %zmm28, %zmm0
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm0 = [7,15,7,15,7,15,7,15]
-; AVX512DQBW-SLOW-NEXT:    # zmm0 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm29, %zmm0, %zmm8
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm6, %zmm29
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm27, %zmm28, %zmm29
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm27, %zmm0, %zmm6
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm5, %zmm6
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm26, %zmm28, %zmm6
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm26, %zmm0, %zmm5
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm18, %zmm26
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm21, %zmm11, %zmm26
-; AVX512DQBW-SLOW-NEXT:    vpermi2q %zmm25, %zmm12, %zmm10
-; AVX512DQBW-SLOW-NEXT:    vpermi2q %zmm25, %zmm12, %zmm13
-; AVX512DQBW-SLOW-NEXT:    vpermi2q %zmm25, %zmm12, %zmm28
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm25, %zmm0, %zmm12
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm2, %zmm27
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm2, %zmm9
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm19, %zmm5
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm19, %zmm11, %zmm9
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm4, %zmm0
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm4, %zmm25
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm1, %zmm11, %zmm25
-; AVX512DQBW-SLOW-NEXT:    vpermi2q %zmm22, %zmm7, %zmm11
-; AVX512DQBW-SLOW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm3 = [2,10,2,10,2,10,2,10]
-; AVX512DQBW-SLOW-NEXT:    # zmm3 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm7, %zmm23
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm22, %zmm3, %zmm23
-; AVX512DQBW-SLOW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm6 = [6,14,6,14,6,14,6,14]
-; AVX512DQBW-SLOW-NEXT:    # zmm6 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm7, %zmm24
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm22, %zmm6, %zmm24
-; AVX512DQBW-SLOW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm2 = [0,8,1,9,0,8,1,9]
-; AVX512DQBW-SLOW-NEXT:    # zmm2 = mem[0,1,2,3,0,1,2,3]
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm7, %zmm30
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm22, %zmm2, %zmm30
-; AVX512DQBW-SLOW-NEXT:    vbroadcasti64x2 {{.*#+}} ymm19 = [7,15,7,15]
-; AVX512DQBW-SLOW-NEXT:    # ymm19 = mem[0,1,0,1]
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm22, %zmm19, %zmm7
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm4, %zmm22
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm1, %zmm3, %zmm22
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm1, %zmm6, %zmm4
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm0, %zmm8
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm1, %zmm2, %zmm8
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm1, %zmm19, %zmm0
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm0, (%rsp) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm27, %zmm0
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm5, %zmm3, %zmm0
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm27, %zmm1
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm5, %zmm6, %zmm1
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm27, %zmm7
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm5, %zmm2, %zmm7
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm5, %zmm19, %zmm27
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm27, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vpermi2q %zmm21, %zmm18, %zmm3
-; AVX512DQBW-SLOW-NEXT:    vpermi2q %zmm21, %zmm18, %zmm6
-; AVX512DQBW-SLOW-NEXT:    vpermi2q %zmm21, %zmm18, %zmm2
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm21, %zmm19, %zmm18
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm18, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    movb $12, %al
-; AVX512DQBW-SLOW-NEXT:    kmovd %eax, %k1
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm5, %zmm26 {%k1}
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm5, %zmm9 {%k1}
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm5, %zmm25 {%k1}
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm20, %zmm11 {%k1}
-; AVX512DQBW-SLOW-NEXT:    movb $48, %al
-; AVX512DQBW-SLOW-NEXT:    kmovd %eax, %k2
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm23, %zmm5 {%k2}
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm24, %zmm17 {%k2}
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm22, %zmm31 {%k2}
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm4, %zmm14 {%k2}
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm0, %zmm15 {%k2}
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 (%r8), %zmm23
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm1, %zmm16 {%k2}
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 64(%r8), %zmm1
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm3, %zmm10 {%k2}
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm0 = <0,1,9,u,4,5,6,7>
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm23, %zmm0, %zmm5
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm1, %zmm0, %zmm31
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 128(%r8), %zmm3
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm3, %zmm0, %zmm15
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 192(%r8), %zmm22
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm22, %zmm0, %zmm10
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 (%r9), %zmm20
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm0 = [0,1,2,9,4,5,6,7]
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm20, %zmm0, %zmm5
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 64(%r9), %zmm21
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm21, %zmm0, %zmm31
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 128(%r9), %zmm19
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm19, %zmm0, %zmm15
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 192(%r9), %zmm18
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm18, %zmm0, %zmm10
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm6, %zmm13 {%k2}
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm0 = <0,1,13,u,4,5,6,7>
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm17, %zmm27
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm23, %zmm0, %zmm27
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm1, %zmm0, %zmm14
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm3, %zmm0, %zmm16
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm22, %zmm0, %zmm13
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm0 = [0,1,2,13,4,5,6,7]
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm20, %zmm0, %zmm27
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm21, %zmm0, %zmm14
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm19, %zmm0, %zmm16
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm18, %zmm0, %zmm13
-; AVX512DQBW-SLOW-NEXT:    vmovdqa (%rdx), %xmm0
-; AVX512DQBW-SLOW-NEXT:    vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],mem[0]
-; AVX512DQBW-SLOW-NEXT:    vinserti128 $1, %xmm0, %ymm0, %ymm0
-; AVX512DQBW-SLOW-NEXT:    vinserti64x4 $0, %ymm0, %zmm0, %zmm30 {%k1}
-; AVX512DQBW-SLOW-NEXT:    vmovdqa 64(%rdx), %xmm0
-; AVX512DQBW-SLOW-NEXT:    vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],mem[0]
-; AVX512DQBW-SLOW-NEXT:    vinserti128 $1, %xmm0, %ymm0, %ymm0
-; AVX512DQBW-SLOW-NEXT:    vinserti64x4 $0, %ymm0, %zmm0, %zmm8 {%k1}
-; AVX512DQBW-SLOW-NEXT:    vmovdqa 128(%rdx), %xmm0
-; AVX512DQBW-SLOW-NEXT:    vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],mem[0]
-; AVX512DQBW-SLOW-NEXT:    vinserti128 $1, %xmm0, %ymm0, %ymm0
-; AVX512DQBW-SLOW-NEXT:    vinserti64x4 $0, %ymm0, %zmm0, %zmm7 {%k1}
-; AVX512DQBW-SLOW-NEXT:    vmovdqa 192(%rdx), %xmm0
-; AVX512DQBW-SLOW-NEXT:    vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],mem[0]
-; AVX512DQBW-SLOW-NEXT:    vinserti128 $1, %xmm0, %ymm0, %ymm0
-; AVX512DQBW-SLOW-NEXT:    vinserti64x4 $0, %ymm0, %zmm0, %zmm2 {%k1}
-; AVX512DQBW-SLOW-NEXT:    vinserti32x4 $2, (%r8), %zmm30, %zmm24
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm0 = [0,1,2,3,4,8,6,7]
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm20, %zmm0, %zmm24
-; AVX512DQBW-SLOW-NEXT:    vinserti32x4 $2, 64(%r8), %zmm8, %zmm6
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm21, %zmm0, %zmm6
-; AVX512DQBW-SLOW-NEXT:    vinserti32x4 $2, 128(%r8), %zmm7, %zmm5
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm19, %zmm0, %zmm5
-; AVX512DQBW-SLOW-NEXT:    vinserti32x4 $2, 192(%r8), %zmm2, %zmm4
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm18, %zmm0, %zmm4
-; AVX512DQBW-SLOW-NEXT:    vmovdqa (%rdi), %ymm0
-; AVX512DQBW-SLOW-NEXT:    vpunpckhqdq {{.*#+}} ymm0 = ymm0[1],mem[1],ymm0[3],mem[3]
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512DQBW-SLOW-NEXT:    vinserti64x4 $0, %ymm0, %zmm2, %zmm0
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512DQBW-SLOW-NEXT:    vshufi64x2 $228, {{[-0-9]+}}(%r{{[sb]}}p), %zmm2, %zmm2 # 64-byte Folded Reload
-; AVX512DQBW-SLOW-NEXT:    # zmm2 = zmm2[0,1,2,3],mem[4,5,6,7]
-; AVX512DQBW-SLOW-NEXT:    movb $16, %al
-; AVX512DQBW-SLOW-NEXT:    kmovd %eax, %k1
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm23, %zmm11 {%k1}
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm7 = <10,u,2,3,4,5,11,u>
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm23, %zmm7, %zmm0
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm17 = <14,u,2,3,4,5,15,u>
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm23, %zmm17, %zmm2
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 64(%rdi), %ymm23
-; AVX512DQBW-SLOW-NEXT:    vpunpckhqdq {{.*#+}} ymm23 = ymm23[1],mem[1],ymm23[3],mem[3]
-; AVX512DQBW-SLOW-NEXT:    vinserti64x4 $0, %ymm23, %zmm29, %zmm23
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 (%rsp), %zmm8 # 64-byte Reload
-; AVX512DQBW-SLOW-NEXT:    vshufi64x2 $228, {{[-0-9]+}}(%r{{[sb]}}p), %zmm8, %zmm29 # 64-byte Folded Reload
-; AVX512DQBW-SLOW-NEXT:    # zmm29 = zmm8[0,1,2,3],mem[4,5,6,7]
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm1, %zmm25 {%k1}
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm1, %zmm7, %zmm23
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm1, %zmm17, %zmm29
-; AVX512DQBW-SLOW-NEXT:    vmovdqa 128(%rdi), %ymm1
-; AVX512DQBW-SLOW-NEXT:    vpunpckhqdq {{.*#+}} ymm1 = ymm1[1],mem[1],ymm1[3],mem[3]
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm8 # 64-byte Reload
-; AVX512DQBW-SLOW-NEXT:    vinserti64x4 $0, %ymm1, %zmm8, %zmm1
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm8 # 64-byte Reload
-; AVX512DQBW-SLOW-NEXT:    vshufi64x2 $228, {{[-0-9]+}}(%r{{[sb]}}p), %zmm8, %zmm30 # 64-byte Folded Reload
-; AVX512DQBW-SLOW-NEXT:    # zmm30 = zmm8[0,1,2,3],mem[4,5,6,7]
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm3, %zmm9 {%k1}
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm3, %zmm7, %zmm1
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm3, %zmm17, %zmm30
-; AVX512DQBW-SLOW-NEXT:    vmovdqa 192(%rdi), %ymm3
-; AVX512DQBW-SLOW-NEXT:    vpunpckhqdq {{.*#+}} ymm3 = ymm3[1],mem[1],ymm3[3],mem[3]
-; AVX512DQBW-SLOW-NEXT:    vinserti64x4 $0, %ymm3, %zmm28, %zmm3
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm22, %zmm7, %zmm3
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm22, %zmm26 {%k1}
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm7 # 64-byte Reload
-; AVX512DQBW-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm7 = zmm7[0,1,2,3],zmm12[4,5,6,7]
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm28 = [0,10,2,3,4,5,6,11]
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm20, %zmm28, %zmm0
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm22, %zmm17, %zmm7
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm17 = [0,1,2,3,4,12,6,7]
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm20, %zmm17, %zmm11
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm22 = [0,14,2,3,4,5,6,15]
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm20, %zmm22, %zmm2
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm21, %zmm28, %zmm23
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm21, %zmm17, %zmm25
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm21, %zmm22, %zmm29
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm19, %zmm28, %zmm1
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm19, %zmm17, %zmm9
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm19, %zmm22, %zmm30
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm18, %zmm28, %zmm3
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm18, %zmm17, %zmm26
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm18, %zmm22, %zmm7
-; AVX512DQBW-SLOW-NEXT:    movq {{[0-9]+}}(%rsp), %rax
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm7, 1472(%rax)
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm13, 1408(%rax)
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm26, 1344(%rax)
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm3, 1280(%rax)
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm10, 1216(%rax)
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm30, 1088(%rax)
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm16, 1024(%rax)
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm9, 960(%rax)
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm1, 896(%rax)
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm15, 832(%rax)
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm29, 704(%rax)
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm14, 640(%rax)
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm25, 576(%rax)
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm23, 512(%rax)
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm31, 448(%rax)
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm2, 320(%rax)
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm27, 256(%rax)
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm11, 192(%rax)
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm0, 128(%rax)
-; AVX512DQBW-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQBW-SLOW-NEXT:    vmovaps %zmm0, 64(%rax)
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm4, 1152(%rax)
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm5, 768(%rax)
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm6, 384(%rax)
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm24, (%rax)
-; AVX512DQBW-SLOW-NEXT:    addq $712, %rsp # imm = 0x2C8
-; AVX512DQBW-SLOW-NEXT:    vzeroupper
-; AVX512DQBW-SLOW-NEXT:    retq
-;
-; AVX512DQBW-FAST-LABEL: store_i64_stride6_vf32:
-; AVX512DQBW-FAST:       # %bb.0:
-; AVX512DQBW-FAST-NEXT:    subq $712, %rsp # imm = 0x2C8
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 (%rdi), %zmm7
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 64(%rdi), %zmm4
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 128(%rdi), %zmm2
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 192(%rdi), %zmm18
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 (%rsi), %zmm22
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 64(%rsi), %zmm1
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 128(%rsi), %zmm19
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 192(%rsi), %zmm21
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 (%rdx), %zmm8
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 64(%rdx), %zmm6
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 128(%rdx), %zmm5
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 192(%rdx), %zmm12
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 (%rcx), %zmm29
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 64(%rcx), %zmm27
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 128(%rcx), %zmm26
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 192(%rcx), %zmm25
-; AVX512DQBW-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm11 = [4,12,5,13,4,12,5,13]
-; AVX512DQBW-FAST-NEXT:    # zmm11 = mem[0,1,2,3,0,1,2,3]
-; AVX512DQBW-FAST-NEXT:    vbroadcasti64x2 {{.*#+}} ymm20 = [4,12,4,12]
-; AVX512DQBW-FAST-NEXT:    # ymm20 = mem[0,1,0,1]
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm12, %zmm0
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm25, %zmm20, %zmm0
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm5, %zmm0
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm26, %zmm20, %zmm0
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm6, %zmm0
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm27, %zmm20, %zmm0
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vpermi2q %zmm29, %zmm8, %zmm20
-; AVX512DQBW-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm10 = [1,9,2,10,1,9,2,10]
-; AVX512DQBW-FAST-NEXT:    # zmm10 = mem[0,1,2,3,0,1,2,3]
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm8, %zmm0
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm29, %zmm10, %zmm0
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm13 = [5,13,6,14,5,13,6,14]
-; AVX512DQBW-FAST-NEXT:    # zmm13 = mem[0,1,2,3,0,1,2,3]
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm8, %zmm17
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm29, %zmm13, %zmm17
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm6, %zmm31
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm27, %zmm10, %zmm31
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm6, %zmm14
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm27, %zmm13, %zmm14
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm5, %zmm15
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm26, %zmm10, %zmm15
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm5, %zmm16
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm26, %zmm13, %zmm16
-; AVX512DQBW-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} zmm28 = [3,11,3,11,3,11,3,11]
-; AVX512DQBW-FAST-NEXT:    # zmm28 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm8, %zmm0
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm29, %zmm28, %zmm0
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} zmm0 = [7,15,7,15,7,15,7,15]
-; AVX512DQBW-FAST-NEXT:    # zmm0 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm29, %zmm0, %zmm8
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm6, %zmm29
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm27, %zmm28, %zmm29
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm27, %zmm0, %zmm6
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm5, %zmm6
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm26, %zmm28, %zmm6
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm26, %zmm0, %zmm5
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm18, %zmm26
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm21, %zmm11, %zmm26
-; AVX512DQBW-FAST-NEXT:    vpermi2q %zmm25, %zmm12, %zmm10
-; AVX512DQBW-FAST-NEXT:    vpermi2q %zmm25, %zmm12, %zmm13
-; AVX512DQBW-FAST-NEXT:    vpermi2q %zmm25, %zmm12, %zmm28
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm25, %zmm0, %zmm12
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm2, %zmm27
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm2, %zmm9
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm19, %zmm5
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm19, %zmm11, %zmm9
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm4, %zmm0
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm4, %zmm25
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm1, %zmm11, %zmm25
-; AVX512DQBW-FAST-NEXT:    vpermi2q %zmm22, %zmm7, %zmm11
-; AVX512DQBW-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} zmm3 = [2,10,2,10,2,10,2,10]
-; AVX512DQBW-FAST-NEXT:    # zmm3 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm7, %zmm23
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm22, %zmm3, %zmm23
-; AVX512DQBW-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} zmm6 = [6,14,6,14,6,14,6,14]
-; AVX512DQBW-FAST-NEXT:    # zmm6 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm7, %zmm24
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm22, %zmm6, %zmm24
-; AVX512DQBW-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm2 = [0,8,1,9,0,8,1,9]
-; AVX512DQBW-FAST-NEXT:    # zmm2 = mem[0,1,2,3,0,1,2,3]
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm7, %zmm30
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm22, %zmm2, %zmm30
-; AVX512DQBW-FAST-NEXT:    vbroadcasti64x2 {{.*#+}} ymm19 = [7,15,7,15]
-; AVX512DQBW-FAST-NEXT:    # ymm19 = mem[0,1,0,1]
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm22, %zmm19, %zmm7
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm4, %zmm22
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm1, %zmm3, %zmm22
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm1, %zmm6, %zmm4
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm0, %zmm8
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm1, %zmm2, %zmm8
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm1, %zmm19, %zmm0
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm0, (%rsp) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm27, %zmm0
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm5, %zmm3, %zmm0
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm27, %zmm1
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm5, %zmm6, %zmm1
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm27, %zmm7
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm5, %zmm2, %zmm7
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm5, %zmm19, %zmm27
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm27, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vpermi2q %zmm21, %zmm18, %zmm3
-; AVX512DQBW-FAST-NEXT:    vpermi2q %zmm21, %zmm18, %zmm6
-; AVX512DQBW-FAST-NEXT:    vpermi2q %zmm21, %zmm18, %zmm2
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm21, %zmm19, %zmm18
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm18, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    movb $12, %al
-; AVX512DQBW-FAST-NEXT:    kmovd %eax, %k1
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm5, %zmm26 {%k1}
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm5, %zmm9 {%k1}
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm5, %zmm25 {%k1}
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm20, %zmm11 {%k1}
-; AVX512DQBW-FAST-NEXT:    movb $48, %al
-; AVX512DQBW-FAST-NEXT:    kmovd %eax, %k2
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm23, %zmm5 {%k2}
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm24, %zmm17 {%k2}
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm22, %zmm31 {%k2}
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm4, %zmm14 {%k2}
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm0, %zmm15 {%k2}
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 (%r8), %zmm23
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm1, %zmm16 {%k2}
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 64(%r8), %zmm1
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm3, %zmm10 {%k2}
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm0 = <0,1,9,u,4,5,6,7>
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm23, %zmm0, %zmm5
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm1, %zmm0, %zmm31
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 128(%r8), %zmm3
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm3, %zmm0, %zmm15
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 192(%r8), %zmm22
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm22, %zmm0, %zmm10
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 (%r9), %zmm20
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm0 = [0,1,2,9,4,5,6,7]
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm20, %zmm0, %zmm5
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 64(%r9), %zmm21
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm21, %zmm0, %zmm31
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 128(%r9), %zmm19
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm19, %zmm0, %zmm15
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 192(%r9), %zmm18
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm18, %zmm0, %zmm10
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm6, %zmm13 {%k2}
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm0 = <0,1,13,u,4,5,6,7>
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm17, %zmm27
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm23, %zmm0, %zmm27
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm1, %zmm0, %zmm14
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm3, %zmm0, %zmm16
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm22, %zmm0, %zmm13
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm0 = [0,1,2,13,4,5,6,7]
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm20, %zmm0, %zmm27
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm21, %zmm0, %zmm14
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm19, %zmm0, %zmm16
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm18, %zmm0, %zmm13
-; AVX512DQBW-FAST-NEXT:    vmovdqa (%rdx), %xmm0
-; AVX512DQBW-FAST-NEXT:    vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],mem[0]
-; AVX512DQBW-FAST-NEXT:    vinserti128 $1, %xmm0, %ymm0, %ymm0
-; AVX512DQBW-FAST-NEXT:    vinserti64x4 $0, %ymm0, %zmm0, %zmm30 {%k1}
-; AVX512DQBW-FAST-NEXT:    vmovdqa 64(%rdx), %xmm0
-; AVX512DQBW-FAST-NEXT:    vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],mem[0]
-; AVX512DQBW-FAST-NEXT:    vinserti128 $1, %xmm0, %ymm0, %ymm0
-; AVX512DQBW-FAST-NEXT:    vinserti64x4 $0, %ymm0, %zmm0, %zmm8 {%k1}
-; AVX512DQBW-FAST-NEXT:    vmovdqa 128(%rdx), %xmm0
-; AVX512DQBW-FAST-NEXT:    vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],mem[0]
-; AVX512DQBW-FAST-NEXT:    vinserti128 $1, %xmm0, %ymm0, %ymm0
-; AVX512DQBW-FAST-NEXT:    vinserti64x4 $0, %ymm0, %zmm0, %zmm7 {%k1}
-; AVX512DQBW-FAST-NEXT:    vmovdqa 192(%rdx), %xmm0
-; AVX512DQBW-FAST-NEXT:    vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],mem[0]
-; AVX512DQBW-FAST-NEXT:    vinserti128 $1, %xmm0, %ymm0, %ymm0
-; AVX512DQBW-FAST-NEXT:    vinserti64x4 $0, %ymm0, %zmm0, %zmm2 {%k1}
-; AVX512DQBW-FAST-NEXT:    vinserti32x4 $2, (%r8), %zmm30, %zmm24
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm0 = [0,1,2,3,4,8,6,7]
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm20, %zmm0, %zmm24
-; AVX512DQBW-FAST-NEXT:    vinserti32x4 $2, 64(%r8), %zmm8, %zmm6
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm21, %zmm0, %zmm6
-; AVX512DQBW-FAST-NEXT:    vinserti32x4 $2, 128(%r8), %zmm7, %zmm5
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm19, %zmm0, %zmm5
-; AVX512DQBW-FAST-NEXT:    vinserti32x4 $2, 192(%r8), %zmm2, %zmm4
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm18, %zmm0, %zmm4
-; AVX512DQBW-FAST-NEXT:    vmovdqa (%rdi), %ymm0
-; AVX512DQBW-FAST-NEXT:    vpunpckhqdq {{.*#+}} ymm0 = ymm0[1],mem[1],ymm0[3],mem[3]
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512DQBW-FAST-NEXT:    vinserti64x4 $0, %ymm0, %zmm2, %zmm0
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512DQBW-FAST-NEXT:    vshufi64x2 $228, {{[-0-9]+}}(%r{{[sb]}}p), %zmm2, %zmm2 # 64-byte Folded Reload
-; AVX512DQBW-FAST-NEXT:    # zmm2 = zmm2[0,1,2,3],mem[4,5,6,7]
-; AVX512DQBW-FAST-NEXT:    movb $16, %al
-; AVX512DQBW-FAST-NEXT:    kmovd %eax, %k1
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm23, %zmm11 {%k1}
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm7 = <10,u,2,3,4,5,11,u>
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm23, %zmm7, %zmm0
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm17 = <14,u,2,3,4,5,15,u>
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm23, %zmm17, %zmm2
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 64(%rdi), %ymm23
-; AVX512DQBW-FAST-NEXT:    vpunpckhqdq {{.*#+}} ymm23 = ymm23[1],mem[1],ymm23[3],mem[3]
-; AVX512DQBW-FAST-NEXT:    vinserti64x4 $0, %ymm23, %zmm29, %zmm23
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 (%rsp), %zmm8 # 64-byte Reload
-; AVX512DQBW-FAST-NEXT:    vshufi64x2 $228, {{[-0-9]+}}(%r{{[sb]}}p), %zmm8, %zmm29 # 64-byte Folded Reload
-; AVX512DQBW-FAST-NEXT:    # zmm29 = zmm8[0,1,2,3],mem[4,5,6,7]
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm1, %zmm25 {%k1}
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm1, %zmm7, %zmm23
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm1, %zmm17, %zmm29
-; AVX512DQBW-FAST-NEXT:    vmovdqa 128(%rdi), %ymm1
-; AVX512DQBW-FAST-NEXT:    vpunpckhqdq {{.*#+}} ymm1 = ymm1[1],mem[1],ymm1[3],mem[3]
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm8 # 64-byte Reload
-; AVX512DQBW-FAST-NEXT:    vinserti64x4 $0, %ymm1, %zmm8, %zmm1
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm8 # 64-byte Reload
-; AVX512DQBW-FAST-NEXT:    vshufi64x2 $228, {{[-0-9]+}}(%r{{[sb]}}p), %zmm8, %zmm30 # 64-byte Folded Reload
-; AVX512DQBW-FAST-NEXT:    # zmm30 = zmm8[0,1,2,3],mem[4,5,6,7]
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm3, %zmm9 {%k1}
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm3, %zmm7, %zmm1
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm3, %zmm17, %zmm30
-; AVX512DQBW-FAST-NEXT:    vmovdqa 192(%rdi), %ymm3
-; AVX512DQBW-FAST-NEXT:    vpunpckhqdq {{.*#+}} ymm3 = ymm3[1],mem[1],ymm3[3],mem[3]
-; AVX512DQBW-FAST-NEXT:    vinserti64x4 $0, %ymm3, %zmm28, %zmm3
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm22, %zmm7, %zmm3
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm22, %zmm26 {%k1}
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm7 # 64-byte Reload
-; AVX512DQBW-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm7 = zmm7[0,1,2,3],zmm12[4,5,6,7]
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm28 = [0,10,2,3,4,5,6,11]
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm20, %zmm28, %zmm0
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm22, %zmm17, %zmm7
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm17 = [0,1,2,3,4,12,6,7]
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm20, %zmm17, %zmm11
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm22 = [0,14,2,3,4,5,6,15]
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm20, %zmm22, %zmm2
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm21, %zmm28, %zmm23
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm21, %zmm17, %zmm25
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm21, %zmm22, %zmm29
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm19, %zmm28, %zmm1
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm19, %zmm17, %zmm9
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm19, %zmm22, %zmm30
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm18, %zmm28, %zmm3
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm18, %zmm17, %zmm26
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm18, %zmm22, %zmm7
-; AVX512DQBW-FAST-NEXT:    movq {{[0-9]+}}(%rsp), %rax
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm7, 1472(%rax)
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm13, 1408(%rax)
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm26, 1344(%rax)
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm3, 1280(%rax)
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm10, 1216(%rax)
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm30, 1088(%rax)
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm16, 1024(%rax)
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm9, 960(%rax)
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm1, 896(%rax)
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm15, 832(%rax)
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm29, 704(%rax)
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm14, 640(%rax)
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm25, 576(%rax)
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm23, 512(%rax)
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm31, 448(%rax)
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm2, 320(%rax)
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm27, 256(%rax)
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm11, 192(%rax)
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm0, 128(%rax)
-; AVX512DQBW-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQBW-FAST-NEXT:    vmovaps %zmm0, 64(%rax)
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm4, 1152(%rax)
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm5, 768(%rax)
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm6, 384(%rax)
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm24, (%rax)
-; AVX512DQBW-FAST-NEXT:    addq $712, %rsp # imm = 0x2C8
-; AVX512DQBW-FAST-NEXT:    vzeroupper
-; AVX512DQBW-FAST-NEXT:    retq
+; AVX512DQBW-ONLY-LABEL: store_i64_stride6_vf32:
+; AVX512DQBW-ONLY:       # %bb.0:
+; AVX512DQBW-ONLY-NEXT:    subq $712, %rsp # imm = 0x2C8
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 (%rdi), %zmm7
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 64(%rdi), %zmm4
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 128(%rdi), %zmm2
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 192(%rdi), %zmm18
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 (%rsi), %zmm22
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 64(%rsi), %zmm1
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 128(%rsi), %zmm19
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 192(%rsi), %zmm21
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 (%rdx), %zmm8
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 64(%rdx), %zmm6
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 128(%rdx), %zmm5
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 192(%rdx), %zmm12
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 (%rcx), %zmm29
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 64(%rcx), %zmm27
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 128(%rcx), %zmm26
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 192(%rcx), %zmm25
+; AVX512DQBW-ONLY-NEXT:    vbroadcasti64x4 {{.*#+}} zmm11 = [4,12,5,13,4,12,5,13]
+; AVX512DQBW-ONLY-NEXT:    # zmm11 = mem[0,1,2,3,0,1,2,3]
+; AVX512DQBW-ONLY-NEXT:    vbroadcasti64x2 {{.*#+}} ymm20 = [4,12,4,12]
+; AVX512DQBW-ONLY-NEXT:    # ymm20 = mem[0,1,0,1]
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm12, %zmm0
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm25, %zmm20, %zmm0
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm5, %zmm0
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm26, %zmm20, %zmm0
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm6, %zmm0
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm27, %zmm20, %zmm0
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-ONLY-NEXT:    vpermi2q %zmm29, %zmm8, %zmm20
+; AVX512DQBW-ONLY-NEXT:    vbroadcasti64x4 {{.*#+}} zmm10 = [1,9,2,10,1,9,2,10]
+; AVX512DQBW-ONLY-NEXT:    # zmm10 = mem[0,1,2,3,0,1,2,3]
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm8, %zmm0
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm29, %zmm10, %zmm0
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-ONLY-NEXT:    vbroadcasti64x4 {{.*#+}} zmm13 = [5,13,6,14,5,13,6,14]
+; AVX512DQBW-ONLY-NEXT:    # zmm13 = mem[0,1,2,3,0,1,2,3]
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm8, %zmm17
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm29, %zmm13, %zmm17
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm6, %zmm31
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm27, %zmm10, %zmm31
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm6, %zmm14
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm27, %zmm13, %zmm14
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm5, %zmm15
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm26, %zmm10, %zmm15
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm5, %zmm16
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm26, %zmm13, %zmm16
+; AVX512DQBW-ONLY-NEXT:    vbroadcasti32x4 {{.*#+}} zmm28 = [3,11,3,11,3,11,3,11]
+; AVX512DQBW-ONLY-NEXT:    # zmm28 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm8, %zmm0
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm29, %zmm28, %zmm0
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-ONLY-NEXT:    vbroadcasti32x4 {{.*#+}} zmm0 = [7,15,7,15,7,15,7,15]
+; AVX512DQBW-ONLY-NEXT:    # zmm0 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm29, %zmm0, %zmm8
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 %zmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm6, %zmm29
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm27, %zmm28, %zmm29
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm27, %zmm0, %zmm6
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm5, %zmm6
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm26, %zmm28, %zmm6
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm26, %zmm0, %zmm5
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm18, %zmm26
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm21, %zmm11, %zmm26
+; AVX512DQBW-ONLY-NEXT:    vpermi2q %zmm25, %zmm12, %zmm10
+; AVX512DQBW-ONLY-NEXT:    vpermi2q %zmm25, %zmm12, %zmm13
+; AVX512DQBW-ONLY-NEXT:    vpermi2q %zmm25, %zmm12, %zmm28
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm25, %zmm0, %zmm12
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm2, %zmm27
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm2, %zmm9
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm19, %zmm5
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm19, %zmm11, %zmm9
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm4, %zmm0
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm4, %zmm25
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm1, %zmm11, %zmm25
+; AVX512DQBW-ONLY-NEXT:    vpermi2q %zmm22, %zmm7, %zmm11
+; AVX512DQBW-ONLY-NEXT:    vbroadcasti32x4 {{.*#+}} zmm3 = [2,10,2,10,2,10,2,10]
+; AVX512DQBW-ONLY-NEXT:    # zmm3 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm7, %zmm23
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm22, %zmm3, %zmm23
+; AVX512DQBW-ONLY-NEXT:    vbroadcasti32x4 {{.*#+}} zmm6 = [6,14,6,14,6,14,6,14]
+; AVX512DQBW-ONLY-NEXT:    # zmm6 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm7, %zmm24
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm22, %zmm6, %zmm24
+; AVX512DQBW-ONLY-NEXT:    vbroadcasti64x4 {{.*#+}} zmm2 = [0,8,1,9,0,8,1,9]
+; AVX512DQBW-ONLY-NEXT:    # zmm2 = mem[0,1,2,3,0,1,2,3]
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm7, %zmm30
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm22, %zmm2, %zmm30
+; AVX512DQBW-ONLY-NEXT:    vbroadcasti64x2 {{.*#+}} ymm19 = [7,15,7,15]
+; AVX512DQBW-ONLY-NEXT:    # ymm19 = mem[0,1,0,1]
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm22, %zmm19, %zmm7
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 %zmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm4, %zmm22
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm1, %zmm3, %zmm22
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm1, %zmm6, %zmm4
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm0, %zmm8
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm1, %zmm2, %zmm8
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm1, %zmm19, %zmm0
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 %zmm0, (%rsp) # 64-byte Spill
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm27, %zmm0
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm5, %zmm3, %zmm0
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm27, %zmm1
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm5, %zmm6, %zmm1
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm27, %zmm7
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm5, %zmm2, %zmm7
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm5, %zmm19, %zmm27
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 %zmm27, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-ONLY-NEXT:    vpermi2q %zmm21, %zmm18, %zmm3
+; AVX512DQBW-ONLY-NEXT:    vpermi2q %zmm21, %zmm18, %zmm6
+; AVX512DQBW-ONLY-NEXT:    vpermi2q %zmm21, %zmm18, %zmm2
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm21, %zmm19, %zmm18
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 %zmm18, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-ONLY-NEXT:    movb $12, %al
+; AVX512DQBW-ONLY-NEXT:    kmovd %eax, %k1
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm5, %zmm26 {%k1}
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm5, %zmm9 {%k1}
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm5, %zmm25 {%k1}
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm20, %zmm11 {%k1}
+; AVX512DQBW-ONLY-NEXT:    movb $48, %al
+; AVX512DQBW-ONLY-NEXT:    kmovd %eax, %k2
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm23, %zmm5 {%k2}
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm24, %zmm17 {%k2}
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm22, %zmm31 {%k2}
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm4, %zmm14 {%k2}
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm0, %zmm15 {%k2}
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 (%r8), %zmm23
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm1, %zmm16 {%k2}
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 64(%r8), %zmm1
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm3, %zmm10 {%k2}
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 {{.*#+}} zmm0 = <0,1,9,u,4,5,6,7>
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm23, %zmm0, %zmm5
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm1, %zmm0, %zmm31
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 128(%r8), %zmm3
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm3, %zmm0, %zmm15
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 192(%r8), %zmm22
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm22, %zmm0, %zmm10
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 (%r9), %zmm20
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 {{.*#+}} zmm0 = [0,1,2,9,4,5,6,7]
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm20, %zmm0, %zmm5
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 64(%r9), %zmm21
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm21, %zmm0, %zmm31
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 128(%r9), %zmm19
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm19, %zmm0, %zmm15
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 192(%r9), %zmm18
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm18, %zmm0, %zmm10
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm6, %zmm13 {%k2}
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 {{.*#+}} zmm0 = <0,1,13,u,4,5,6,7>
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm17, %zmm27
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm23, %zmm0, %zmm27
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm1, %zmm0, %zmm14
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm3, %zmm0, %zmm16
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm22, %zmm0, %zmm13
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 {{.*#+}} zmm0 = [0,1,2,13,4,5,6,7]
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm20, %zmm0, %zmm27
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm21, %zmm0, %zmm14
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm19, %zmm0, %zmm16
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm18, %zmm0, %zmm13
+; AVX512DQBW-ONLY-NEXT:    vmovdqa (%rdx), %xmm0
+; AVX512DQBW-ONLY-NEXT:    vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],mem[0]
+; AVX512DQBW-ONLY-NEXT:    vinserti128 $1, %xmm0, %ymm0, %ymm0
+; AVX512DQBW-ONLY-NEXT:    vinserti64x4 $0, %ymm0, %zmm0, %zmm30 {%k1}
+; AVX512DQBW-ONLY-NEXT:    vmovdqa 64(%rdx), %xmm0
+; AVX512DQBW-ONLY-NEXT:    vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],mem[0]
+; AVX512DQBW-ONLY-NEXT:    vinserti128 $1, %xmm0, %ymm0, %ymm0
+; AVX512DQBW-ONLY-NEXT:    vinserti64x4 $0, %ymm0, %zmm0, %zmm8 {%k1}
+; AVX512DQBW-ONLY-NEXT:    vmovdqa 128(%rdx), %xmm0
+; AVX512DQBW-ONLY-NEXT:    vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],mem[0]
+; AVX512DQBW-ONLY-NEXT:    vinserti128 $1, %xmm0, %ymm0, %ymm0
+; AVX512DQBW-ONLY-NEXT:    vinserti64x4 $0, %ymm0, %zmm0, %zmm7 {%k1}
+; AVX512DQBW-ONLY-NEXT:    vmovdqa 192(%rdx), %xmm0
+; AVX512DQBW-ONLY-NEXT:    vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],mem[0]
+; AVX512DQBW-ONLY-NEXT:    vinserti128 $1, %xmm0, %ymm0, %ymm0
+; AVX512DQBW-ONLY-NEXT:    vinserti64x4 $0, %ymm0, %zmm0, %zmm2 {%k1}
+; AVX512DQBW-ONLY-NEXT:    vinserti32x4 $2, (%r8), %zmm30, %zmm24
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 {{.*#+}} zmm0 = [0,1,2,3,4,8,6,7]
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm20, %zmm0, %zmm24
+; AVX512DQBW-ONLY-NEXT:    vinserti32x4 $2, 64(%r8), %zmm8, %zmm6
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm21, %zmm0, %zmm6
+; AVX512DQBW-ONLY-NEXT:    vinserti32x4 $2, 128(%r8), %zmm7, %zmm5
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm19, %zmm0, %zmm5
+; AVX512DQBW-ONLY-NEXT:    vinserti32x4 $2, 192(%r8), %zmm2, %zmm4
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm18, %zmm0, %zmm4
+; AVX512DQBW-ONLY-NEXT:    vmovdqa (%rdi), %ymm0
+; AVX512DQBW-ONLY-NEXT:    vpunpckhqdq {{.*#+}} ymm0 = ymm0[1],mem[1],ymm0[3],mem[3]
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
+; AVX512DQBW-ONLY-NEXT:    vinserti64x4 $0, %ymm0, %zmm2, %zmm0
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
+; AVX512DQBW-ONLY-NEXT:    vshufi64x2 $228, {{[-0-9]+}}(%r{{[sb]}}p), %zmm2, %zmm2 # 64-byte Folded Reload
+; AVX512DQBW-ONLY-NEXT:    # zmm2 = zmm2[0,1,2,3],mem[4,5,6,7]
+; AVX512DQBW-ONLY-NEXT:    movb $16, %al
+; AVX512DQBW-ONLY-NEXT:    kmovd %eax, %k1
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm23, %zmm11 {%k1}
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 {{.*#+}} zmm7 = <10,u,2,3,4,5,11,u>
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm23, %zmm7, %zmm0
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 {{.*#+}} zmm17 = <14,u,2,3,4,5,15,u>
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm23, %zmm17, %zmm2
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 64(%rdi), %ymm23
+; AVX512DQBW-ONLY-NEXT:    vpunpckhqdq {{.*#+}} ymm23 = ymm23[1],mem[1],ymm23[3],mem[3]
+; AVX512DQBW-ONLY-NEXT:    vinserti64x4 $0, %ymm23, %zmm29, %zmm23
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 (%rsp), %zmm8 # 64-byte Reload
+; AVX512DQBW-ONLY-NEXT:    vshufi64x2 $228, {{[-0-9]+}}(%r{{[sb]}}p), %zmm8, %zmm29 # 64-byte Folded Reload
+; AVX512DQBW-ONLY-NEXT:    # zmm29 = zmm8[0,1,2,3],mem[4,5,6,7]
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm1, %zmm25 {%k1}
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm1, %zmm7, %zmm23
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm1, %zmm17, %zmm29
+; AVX512DQBW-ONLY-NEXT:    vmovdqa 128(%rdi), %ymm1
+; AVX512DQBW-ONLY-NEXT:    vpunpckhqdq {{.*#+}} ymm1 = ymm1[1],mem[1],ymm1[3],mem[3]
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm8 # 64-byte Reload
+; AVX512DQBW-ONLY-NEXT:    vinserti64x4 $0, %ymm1, %zmm8, %zmm1
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm8 # 64-byte Reload
+; AVX512DQBW-ONLY-NEXT:    vshufi64x2 $228, {{[-0-9]+}}(%r{{[sb]}}p), %zmm8, %zmm30 # 64-byte Folded Reload
+; AVX512DQBW-ONLY-NEXT:    # zmm30 = zmm8[0,1,2,3],mem[4,5,6,7]
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm3, %zmm9 {%k1}
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm3, %zmm7, %zmm1
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm3, %zmm17, %zmm30
+; AVX512DQBW-ONLY-NEXT:    vmovdqa 192(%rdi), %ymm3
+; AVX512DQBW-ONLY-NEXT:    vpunpckhqdq {{.*#+}} ymm3 = ymm3[1],mem[1],ymm3[3],mem[3]
+; AVX512DQBW-ONLY-NEXT:    vinserti64x4 $0, %ymm3, %zmm28, %zmm3
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm22, %zmm7, %zmm3
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm22, %zmm26 {%k1}
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm7 # 64-byte Reload
+; AVX512DQBW-ONLY-NEXT:    vshufi64x2 {{.*#+}} zmm7 = zmm7[0,1,2,3],zmm12[4,5,6,7]
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 {{.*#+}} zmm28 = [0,10,2,3,4,5,6,11]
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm20, %zmm28, %zmm0
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm22, %zmm17, %zmm7
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 {{.*#+}} zmm17 = [0,1,2,3,4,12,6,7]
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm20, %zmm17, %zmm11
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 {{.*#+}} zmm22 = [0,14,2,3,4,5,6,15]
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm20, %zmm22, %zmm2
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm21, %zmm28, %zmm23
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm21, %zmm17, %zmm25
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm21, %zmm22, %zmm29
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm19, %zmm28, %zmm1
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm19, %zmm17, %zmm9
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm19, %zmm22, %zmm30
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm18, %zmm28, %zmm3
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm18, %zmm17, %zmm26
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm18, %zmm22, %zmm7
+; AVX512DQBW-ONLY-NEXT:    movq {{[0-9]+}}(%rsp), %rax
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm7, 1472(%rax)
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm13, 1408(%rax)
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm26, 1344(%rax)
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm3, 1280(%rax)
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm10, 1216(%rax)
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm30, 1088(%rax)
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm16, 1024(%rax)
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm9, 960(%rax)
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm1, 896(%rax)
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm15, 832(%rax)
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm29, 704(%rax)
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm14, 640(%rax)
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm25, 576(%rax)
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm23, 512(%rax)
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm31, 448(%rax)
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm2, 320(%rax)
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm27, 256(%rax)
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm11, 192(%rax)
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm0, 128(%rax)
+; AVX512DQBW-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512DQBW-ONLY-NEXT:    vmovaps %zmm0, 64(%rax)
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm4, 1152(%rax)
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm5, 768(%rax)
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm6, 384(%rax)
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm24, (%rax)
+; AVX512DQBW-ONLY-NEXT:    addq $712, %rsp # imm = 0x2C8
+; AVX512DQBW-ONLY-NEXT:    vzeroupper
+; AVX512DQBW-ONLY-NEXT:    retq
   %in.vec0 = load <32 x i64>, ptr %in.vecptr0, align 64
   %in.vec1 = load <32 x i64>, ptr %in.vecptr1, align 64
   %in.vec2 = load <32 x i64>, ptr %in.vecptr2, align 64
@@ -9192,5421 +7516,2713 @@ define void @store_i64_stride6_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX2-ONLY-NEXT:    vzeroupper
 ; AVX2-ONLY-NEXT:    retq
 ;
-; AVX512F-ONLY-SLOW-LABEL: store_i64_stride6_vf64:
-; AVX512F-ONLY-SLOW:       # %bb.0:
-; AVX512F-ONLY-SLOW-NEXT:    subq $3400, %rsp # imm = 0xD48
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 (%rdx), %zmm13
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 64(%rdx), %zmm12
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 128(%rdx), %zmm11
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 192(%rdx), %zmm10
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 256(%rdx), %zmm9
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 320(%rdx), %zmm8
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 384(%rdx), %zmm7
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 448(%rdx), %zmm6
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 (%rcx), %zmm0
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 64(%rcx), %zmm1
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 128(%rcx), %zmm2
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 192(%rcx), %zmm30
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 256(%rcx), %zmm27
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 320(%rcx), %zmm24
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 384(%rcx), %zmm22
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 448(%rcx), %zmm21
-; AVX512F-ONLY-SLOW-NEXT:    vbroadcasti128 {{.*#+}} ymm3 = [4,12,4,12]
-; AVX512F-ONLY-SLOW-NEXT:    # ymm3 = mem[0,1,0,1]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm6, %zmm4
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm21, %zmm3, %zmm4
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm7, %zmm4
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm22, %zmm3, %zmm4
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm8, %zmm4
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm24, %zmm3, %zmm4
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm9, %zmm4
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm27, %zmm3, %zmm4
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm10, %zmm4
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm30, %zmm3, %zmm4
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm11, %zmm4
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm2, %zmm3, %zmm4
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm12, %zmm4
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm1, %zmm3, %zmm4
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vpermi2q %zmm0, %zmm13, %zmm3
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm5 = [1,9,2,10,1,9,2,10]
-; AVX512F-ONLY-SLOW-NEXT:    # zmm5 = mem[0,1,2,3,0,1,2,3]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm13, %zmm3
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm0, %zmm5, %zmm3
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm5, %zmm4
-; AVX512F-ONLY-SLOW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm5 = [5,13,6,14,5,13,6,14]
-; AVX512F-ONLY-SLOW-NEXT:    # zmm5 = mem[0,1,2,3,0,1,2,3]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm13, %zmm3
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm0, %zmm5, %zmm3
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm12, %zmm3
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm1, %zmm4, %zmm3
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm14 = [3,11,3,11,3,11,3,11]
-; AVX512F-ONLY-SLOW-NEXT:    # zmm14 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm13, %zmm3
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm0, %zmm14, %zmm3
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm3 = [7,15,7,15,7,15,7,15]
-; AVX512F-ONLY-SLOW-NEXT:    # zmm3 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm0, %zmm3, %zmm13
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm13, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm12, %zmm0
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm1, %zmm5, %zmm0
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm12, %zmm0
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm1, %zmm14, %zmm0
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm1, %zmm3, %zmm12
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm12, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm11, %zmm0
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm2, %zmm4, %zmm0
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm11, %zmm0
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm2, %zmm5, %zmm0
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm11, %zmm0
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm2, %zmm14, %zmm0
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm2, %zmm3, %zmm11
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm11, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm10, %zmm0
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm30, %zmm4, %zmm0
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm10, %zmm0
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm30, %zmm5, %zmm0
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm10, %zmm0
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm30, %zmm14, %zmm0
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm30, %zmm3, %zmm10
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm9, %zmm0
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm27, %zmm4, %zmm0
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm9, %zmm0
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm27, %zmm5, %zmm0
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm9, %zmm0
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm27, %zmm14, %zmm0
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm27, %zmm3, %zmm9
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm8, %zmm0
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm24, %zmm4, %zmm0
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm8, %zmm0
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm24, %zmm5, %zmm0
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm8, %zmm0
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm24, %zmm14, %zmm0
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm24, %zmm3, %zmm8
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm7, %zmm0
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm22, %zmm4, %zmm0
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm7, %zmm0
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm22, %zmm5, %zmm0
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm7, %zmm0
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm22, %zmm14, %zmm0
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm22, %zmm3, %zmm7
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vpermi2q %zmm21, %zmm6, %zmm4
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm4, (%rsp) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vpermi2q %zmm21, %zmm6, %zmm5
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vpermi2q %zmm21, %zmm6, %zmm14
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm14, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm21, %zmm3, %zmm6
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 448(%rdi), %zmm4
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 448(%rsi), %zmm11
-; AVX512F-ONLY-SLOW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm28 = [4,12,5,13,4,12,5,13]
-; AVX512F-ONLY-SLOW-NEXT:    # zmm28 = mem[0,1,2,3,0,1,2,3]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm4, %zmm0
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm11, %zmm28, %zmm0
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm0, %zmm8
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 384(%rdi), %zmm2
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 384(%rsi), %zmm13
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm2, %zmm1
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm13, %zmm28, %zmm1
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 320(%rdi), %zmm3
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 320(%rsi), %zmm15
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm3, %zmm0
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm15, %zmm28, %zmm0
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 256(%rdi), %zmm31
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 256(%rsi), %zmm17
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm31, %zmm0
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm17, %zmm28, %zmm0
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 192(%rdi), %zmm29
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 192(%rsi), %zmm18
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm29, %zmm0
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm18, %zmm28, %zmm0
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 128(%rdi), %zmm25
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 128(%rsi), %zmm19
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm25, %zmm0
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm19, %zmm28, %zmm0
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm0, %zmm5
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 64(%rdi), %zmm6
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 64(%rsi), %zmm20
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm6, %zmm0
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm20, %zmm28, %zmm0
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 (%rdi), %zmm7
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 (%rsi), %zmm22
-; AVX512F-ONLY-SLOW-NEXT:    vpermi2q %zmm22, %zmm7, %zmm28
-; AVX512F-ONLY-SLOW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm12 = [2,10,2,10,2,10,2,10]
-; AVX512F-ONLY-SLOW-NEXT:    # zmm12 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm7, %zmm14
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm22, %zmm12, %zmm14
-; AVX512F-ONLY-SLOW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm10 = [6,14,6,14,6,14,6,14]
-; AVX512F-ONLY-SLOW-NEXT:    # zmm10 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm7, %zmm16
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm22, %zmm10, %zmm16
-; AVX512F-ONLY-SLOW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm21 = [0,8,1,9,0,8,1,9]
-; AVX512F-ONLY-SLOW-NEXT:    # zmm21 = mem[0,1,2,3,0,1,2,3]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm7, %zmm0
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm22, %zmm21, %zmm0
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vbroadcasti32x4 {{.*#+}} ymm23 = [7,15,7,15]
-; AVX512F-ONLY-SLOW-NEXT:    # ymm23 = mem[0,1,2,3,0,1,2,3]
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm22, %zmm23, %zmm7
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm6, %zmm22
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm20, %zmm12, %zmm22
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm6, %zmm24
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm20, %zmm10, %zmm24
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm6, %zmm0
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm20, %zmm21, %zmm0
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm20, %zmm23, %zmm6
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm25, %zmm20
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm19, %zmm12, %zmm20
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm25, %zmm26
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm19, %zmm10, %zmm26
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm25, %zmm0
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm19, %zmm21, %zmm0
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm19, %zmm23, %zmm25
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm29, %zmm19
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm18, %zmm12, %zmm19
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm29, %zmm27
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm18, %zmm10, %zmm27
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm29, %zmm0
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm18, %zmm21, %zmm0
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm18, %zmm23, %zmm29
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm31, %zmm18
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm17, %zmm12, %zmm18
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm31, %zmm9
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm17, %zmm10, %zmm9
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm31, %zmm0
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm17, %zmm21, %zmm0
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm17, %zmm23, %zmm31
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm3, %zmm17
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm15, %zmm12, %zmm17
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm3, %zmm30
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm15, %zmm10, %zmm30
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm3, %zmm0
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm15, %zmm21, %zmm0
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm15, %zmm23, %zmm3
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm2, %zmm15
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm2, %zmm3
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm13, %zmm12, %zmm3
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm13, %zmm10, %zmm2
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm15, %zmm0
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm13, %zmm21, %zmm0
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm13, %zmm23, %zmm15
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm15, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vpermi2q %zmm11, %zmm4, %zmm12
-; AVX512F-ONLY-SLOW-NEXT:    vpermi2q %zmm11, %zmm4, %zmm10
-; AVX512F-ONLY-SLOW-NEXT:    vpermi2q %zmm11, %zmm4, %zmm21
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm11, %zmm23, %zmm4
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    movb $12, %al
-; AVX512F-ONLY-SLOW-NEXT:    kmovw %eax, %k1
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm11 # 64-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm11, %zmm8 {%k1}
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm11 # 64-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm11, %zmm1 {%k1}
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm11 # 64-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm8 # 64-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm11, %zmm8 {%k1}
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm11 # 64-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm11, %zmm0 {%k1}
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm11 # 64-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm11, %zmm4 {%k1}
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm11 # 64-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm11, %zmm5 {%k1}
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm11 # 64-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm11, %zmm1 {%k1}
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm11 # 64-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm11, %zmm28 {%k1}
-; AVX512F-ONLY-SLOW-NEXT:    movb $48, %al
-; AVX512F-ONLY-SLOW-NEXT:    kmovw %eax, %k2
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm11 # 64-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm14, %zmm11 {%k2}
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm23 # 64-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm16, %zmm23 {%k2}
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm13 # 64-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm22, %zmm13 {%k2}
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm22 # 64-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm24, %zmm22 {%k2}
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm14 # 64-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm20, %zmm14 {%k2}
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm20 # 64-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm26, %zmm20 {%k2}
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm16 # 64-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm19, %zmm16 {%k2}
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm19 # 64-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm27, %zmm19 {%k2}
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm24 # 64-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm18, %zmm24 {%k2}
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm18 # 64-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm9, %zmm18 {%k2}
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm26 # 64-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm17, %zmm26 {%k2}
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm17 # 64-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm30, %zmm17 {%k2}
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm27 # 64-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm3, %zmm27 {%k2}
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm2, %zmm0 {%k2}
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 (%rsp), %zmm15 # 64-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm12, %zmm15 {%k2}
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 (%r8), %zmm5
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm2 = <0,1,9,u,4,5,6,7>
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm5, %zmm2, %zmm11
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm11, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 64(%r8), %zmm3
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm3, %zmm2, %zmm13
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm13, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 128(%r8), %zmm9
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm9, %zmm2, %zmm14
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm14, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 192(%r8), %zmm11
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm11, %zmm2, %zmm16
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm16, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 256(%r8), %zmm13
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm13, %zmm2, %zmm24
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm24, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 320(%r8), %zmm14
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm14, %zmm2, %zmm26
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm26, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 384(%r8), %zmm16
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm16, %zmm2, %zmm27
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm27, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 448(%r8), %zmm12
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm12, %zmm2, %zmm15
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm15, (%rsp) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm15 # 64-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm10, %zmm15 {%k2}
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm2 = <0,1,13,u,4,5,6,7>
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm5, %zmm2, %zmm23
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm3, %zmm2, %zmm22
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm22, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm9, %zmm2, %zmm20
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm20, %zmm24
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm11, %zmm2, %zmm19
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm13, %zmm2, %zmm18
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm18, %zmm26
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm14, %zmm2, %zmm17
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm17, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm16, %zmm2, %zmm0
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm12, %zmm2, %zmm15
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa (%rdi), %ymm2
-; AVX512F-ONLY-SLOW-NEXT:    vpunpckhqdq {{.*#+}} ymm2 = ymm2[1],mem[1],ymm2[3],mem[3]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $0, %ymm2, %zmm0, %zmm10
-; AVX512F-ONLY-SLOW-NEXT:    vshufi64x2 $228, {{[-0-9]+}}(%r{{[sb]}}p), %zmm7, %zmm7 # 64-byte Folded Reload
-; AVX512F-ONLY-SLOW-NEXT:    # zmm7 = zmm7[0,1,2,3],mem[4,5,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 64(%rdi), %ymm2
-; AVX512F-ONLY-SLOW-NEXT:    vpunpckhqdq {{.*#+}} ymm2 = ymm2[1],mem[1],ymm2[3],mem[3]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $0, %ymm2, %zmm0, %zmm0
-; AVX512F-ONLY-SLOW-NEXT:    vshufi64x2 $228, {{[-0-9]+}}(%r{{[sb]}}p), %zmm6, %zmm6 # 64-byte Folded Reload
-; AVX512F-ONLY-SLOW-NEXT:    # zmm6 = zmm6[0,1,2,3],mem[4,5,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    movb $16, %al
-; AVX512F-ONLY-SLOW-NEXT:    kmovw %eax, %k2
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm3, %zmm1 {%k2}
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm1 = <10,u,2,3,4,5,11,u>
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm3, %zmm1, %zmm0
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm2 = <14,u,2,3,4,5,15,u>
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm3, %zmm2, %zmm6
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 128(%rdi), %ymm3
-; AVX512F-ONLY-SLOW-NEXT:    vpunpckhqdq {{.*#+}} ymm3 = ymm3[1],mem[1],ymm3[3],mem[3]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $0, %ymm3, %zmm0, %zmm3
-; AVX512F-ONLY-SLOW-NEXT:    vshufi64x2 $228, {{[-0-9]+}}(%r{{[sb]}}p), %zmm25, %zmm6 # 64-byte Folded Reload
-; AVX512F-ONLY-SLOW-NEXT:    # zmm6 = zmm25[0,1,2,3],mem[4,5,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm9, %zmm0 {%k2}
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm9, %zmm1, %zmm3
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm9, %zmm2, %zmm6
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 192(%rdi), %ymm3
-; AVX512F-ONLY-SLOW-NEXT:    vpunpckhqdq {{.*#+}} ymm3 = ymm3[1],mem[1],ymm3[3],mem[3]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $0, %ymm3, %zmm0, %zmm0
-; AVX512F-ONLY-SLOW-NEXT:    vshufi64x2 $228, {{[-0-9]+}}(%r{{[sb]}}p), %zmm29, %zmm3 # 64-byte Folded Reload
-; AVX512F-ONLY-SLOW-NEXT:    # zmm3 = zmm29[0,1,2,3],mem[4,5,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm11, %zmm4 {%k2}
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm11, %zmm1, %zmm0
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm11, %zmm2, %zmm3
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 256(%rdi), %ymm0
-; AVX512F-ONLY-SLOW-NEXT:    vpunpckhqdq {{.*#+}} ymm0 = ymm0[1],mem[1],ymm0[3],mem[3]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $0, %ymm0, %zmm3, %zmm3
-; AVX512F-ONLY-SLOW-NEXT:    vshufi64x2 $228, {{[-0-9]+}}(%r{{[sb]}}p), %zmm31, %zmm4 # 64-byte Folded Reload
-; AVX512F-ONLY-SLOW-NEXT:    # zmm4 = zmm31[0,1,2,3],mem[4,5,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm13, %zmm0 {%k2}
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm13, %zmm1, %zmm3
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm13, %zmm2, %zmm4
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 320(%rdi), %ymm0
-; AVX512F-ONLY-SLOW-NEXT:    vpunpckhqdq {{.*#+}} ymm0 = ymm0[1],mem[1],ymm0[3],mem[3]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $0, %ymm0, %zmm3, %zmm27
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vshufi64x2 $228, {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm25 # 64-byte Folded Reload
-; AVX512F-ONLY-SLOW-NEXT:    # zmm25 = zmm0[0,1,2,3],mem[4,5,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm14, %zmm8 {%k2}
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm14, %zmm1, %zmm27
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm14, %zmm2, %zmm25
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 384(%rdi), %ymm0
-; AVX512F-ONLY-SLOW-NEXT:    vpunpckhqdq {{.*#+}} ymm0 = ymm0[1],mem[1],ymm0[3],mem[3]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $0, %ymm0, %zmm3, %zmm22
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vshufi64x2 $228, {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm20 # 64-byte Folded Reload
-; AVX512F-ONLY-SLOW-NEXT:    # zmm20 = zmm0[0,1,2,3],mem[4,5,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm16, %zmm0 {%k2}
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm16, %zmm1, %zmm22
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm16, %zmm2, %zmm20
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 448(%rdi), %ymm0
-; AVX512F-ONLY-SLOW-NEXT:    vpunpckhqdq {{.*#+}} ymm0 = ymm0[1],mem[1],ymm0[3],mem[3]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $0, %ymm0, %zmm3, %zmm18
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm5, %zmm1, %zmm10
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm10, %zmm11
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm12, %zmm1, %zmm18
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vshufi64x2 $228, {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm17 # 64-byte Folded Reload
-; AVX512F-ONLY-SLOW-NEXT:    # zmm17 = zmm0[0,1,2,3],mem[4,5,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm12, %zmm0 {%k2}
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm12, %zmm2, %zmm17
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm5, %zmm2, %zmm7
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm7, %zmm13
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm5, %zmm28 {%k2}
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 (%r9), %zmm6
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm10 = [0,1,2,9,4,5,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm6, %zmm10, %zmm1
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 64(%r9), %zmm7
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm7, %zmm10, %zmm2
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 128(%r9), %zmm2
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm2, %zmm10, %zmm3
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 192(%r9), %zmm3
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm3, %zmm10, %zmm4
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 256(%r9), %zmm4
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm4, %zmm10, %zmm5
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 320(%r9), %zmm5
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm5, %zmm10, %zmm0
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 384(%r9), %zmm8
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm8, %zmm10, %zmm0
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 448(%r9), %zmm9
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 (%rsp), %zmm0 # 64-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm9, %zmm10, %zmm0
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm0, (%rsp) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm10 = [0,1,2,13,4,5,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm6, %zmm10, %zmm23
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm23, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm12 # 64-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm7, %zmm10, %zmm12
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm12, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm2, %zmm10, %zmm24
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm24, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm3, %zmm10, %zmm19
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm19, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm4, %zmm10, %zmm26
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm26, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm5, %zmm10, %zmm0
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm8, %zmm10, %zmm0
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm9, %zmm10, %zmm15
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm15, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa (%rdx), %xmm10
-; AVX512F-ONLY-SLOW-NEXT:    vpunpcklqdq {{.*#+}} xmm10 = xmm10[0],mem[0]
-; AVX512F-ONLY-SLOW-NEXT:    vinserti128 $1, %xmm10, %ymm0, %ymm10
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm12 # 64-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $0, %ymm10, %zmm0, %zmm12 {%k1}
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 64(%rdx), %xmm10
-; AVX512F-ONLY-SLOW-NEXT:    vpunpcklqdq {{.*#+}} xmm10 = xmm10[0],mem[0]
-; AVX512F-ONLY-SLOW-NEXT:    vinserti128 $1, %xmm10, %ymm0, %ymm10
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm14 # 64-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $0, %ymm10, %zmm0, %zmm14 {%k1}
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 128(%rdx), %xmm10
-; AVX512F-ONLY-SLOW-NEXT:    vpunpcklqdq {{.*#+}} xmm10 = xmm10[0],mem[0]
-; AVX512F-ONLY-SLOW-NEXT:    vinserti128 $1, %xmm10, %ymm0, %ymm10
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm16 # 64-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $0, %ymm10, %zmm0, %zmm16 {%k1}
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 192(%rdx), %xmm10
-; AVX512F-ONLY-SLOW-NEXT:    vpunpcklqdq {{.*#+}} xmm10 = xmm10[0],mem[0]
-; AVX512F-ONLY-SLOW-NEXT:    vinserti128 $1, %xmm10, %ymm0, %ymm10
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm30 # 64-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $0, %ymm10, %zmm0, %zmm30 {%k1}
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 256(%rdx), %xmm10
-; AVX512F-ONLY-SLOW-NEXT:    vpunpcklqdq {{.*#+}} xmm10 = xmm10[0],mem[0]
-; AVX512F-ONLY-SLOW-NEXT:    vinserti128 $1, %xmm10, %ymm0, %ymm10
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm31 # 64-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $0, %ymm10, %zmm0, %zmm31 {%k1}
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 320(%rdx), %xmm10
-; AVX512F-ONLY-SLOW-NEXT:    vpunpcklqdq {{.*#+}} xmm10 = xmm10[0],mem[0]
-; AVX512F-ONLY-SLOW-NEXT:    vinserti128 $1, %xmm10, %ymm0, %ymm10
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $0, %ymm10, %zmm0, %zmm1 {%k1}
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 384(%rdx), %xmm10
-; AVX512F-ONLY-SLOW-NEXT:    vpunpcklqdq {{.*#+}} xmm10 = xmm10[0],mem[0]
-; AVX512F-ONLY-SLOW-NEXT:    vinserti128 $1, %xmm10, %ymm0, %ymm10
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $0, %ymm10, %zmm0, %zmm0 {%k1}
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 448(%rdx), %xmm10
-; AVX512F-ONLY-SLOW-NEXT:    vpunpcklqdq {{.*#+}} xmm10 = xmm10[0],mem[0]
-; AVX512F-ONLY-SLOW-NEXT:    vinserti128 $1, %xmm10, %ymm0, %ymm10
-; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $0, %ymm10, %zmm0, %zmm21 {%k1}
-; AVX512F-ONLY-SLOW-NEXT:    vinserti32x4 $2, (%r8), %zmm12, %zmm10
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm29 = [0,1,2,3,4,8,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm6, %zmm29, %zmm10
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vinserti32x4 $2, 64(%r8), %zmm14, %zmm10
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm7, %zmm29, %zmm10
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vinserti32x4 $2, 128(%r8), %zmm16, %zmm10
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm2, %zmm29, %zmm10
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vinserti32x4 $2, 192(%r8), %zmm30, %zmm26
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm3, %zmm29, %zmm26
-; AVX512F-ONLY-SLOW-NEXT:    vinserti32x4 $2, 256(%r8), %zmm31, %zmm24
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm4, %zmm29, %zmm24
-; AVX512F-ONLY-SLOW-NEXT:    vinserti32x4 $2, 320(%r8), %zmm1, %zmm23
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm5, %zmm29, %zmm23
-; AVX512F-ONLY-SLOW-NEXT:    vinserti32x4 $2, 384(%r8), %zmm0, %zmm19
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm8, %zmm29, %zmm19
-; AVX512F-ONLY-SLOW-NEXT:    vinserti32x4 $2, 448(%r8), %zmm21, %zmm21
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm9, %zmm29, %zmm21
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm29 = [0,10,2,3,4,5,6,11]
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm6, %zmm29, %zmm11
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm11, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm30 = [0,1,2,3,4,12,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm6, %zmm30, %zmm28
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm31 = [0,14,2,3,4,5,6,15]
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm6, %zmm31, %zmm13
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm13, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm16 # 64-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm7, %zmm29, %zmm16
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm15 # 64-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm7, %zmm30, %zmm15
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm14 # 64-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm7, %zmm31, %zmm14
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm13 # 64-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm2, %zmm29, %zmm13
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm12 # 64-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm2, %zmm30, %zmm12
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm11 # 64-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm2, %zmm31, %zmm11
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm10 # 64-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm3, %zmm29, %zmm10
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm7 # 64-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm3, %zmm30, %zmm7
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm3, %zmm31, %zmm2
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm4, %zmm29, %zmm1
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm4, %zmm30, %zmm3
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm4, %zmm31, %zmm0
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm5, %zmm29, %zmm27
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm5, %zmm30, %zmm4
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm5, %zmm31, %zmm25
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm8, %zmm29, %zmm22
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm8, %zmm30, %zmm5
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm8, %zmm31, %zmm20
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm9, %zmm29, %zmm18
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm6 # 64-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm9, %zmm30, %zmm6
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm9, %zmm31, %zmm17
-; AVX512F-ONLY-SLOW-NEXT:    movq {{[0-9]+}}(%rsp), %rax
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm17, 3008(%rax)
-; AVX512F-ONLY-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm8 # 64-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vmovaps %zmm8, 2944(%rax)
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm6, 2880(%rax)
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm18, 2816(%rax)
-; AVX512F-ONLY-SLOW-NEXT:    vmovups (%rsp), %zmm6 # 64-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vmovaps %zmm6, 2752(%rax)
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm20, 2624(%rax)
-; AVX512F-ONLY-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm6 # 64-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vmovaps %zmm6, 2560(%rax)
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm5, 2496(%rax)
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm22, 2432(%rax)
-; AVX512F-ONLY-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vmovaps %zmm5, 2368(%rax)
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm25, 2240(%rax)
-; AVX512F-ONLY-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vmovaps %zmm5, 2176(%rax)
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm4, 2112(%rax)
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm27, 2048(%rax)
-; AVX512F-ONLY-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vmovaps %zmm4, 1984(%rax)
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm0, 1856(%rax)
-; AVX512F-ONLY-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vmovaps %zmm0, 1792(%rax)
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm3, 1728(%rax)
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm1, 1664(%rax)
-; AVX512F-ONLY-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vmovaps %zmm3, 1600(%rax)
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm2, 1472(%rax)
-; AVX512F-ONLY-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vmovaps %zmm0, 1408(%rax)
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm7, 1344(%rax)
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm10, 1280(%rax)
-; AVX512F-ONLY-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vmovaps %zmm2, 1216(%rax)
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm11, 1088(%rax)
-; AVX512F-ONLY-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vmovaps %zmm0, 1024(%rax)
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm12, 960(%rax)
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm13, 896(%rax)
-; AVX512F-ONLY-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vmovaps %zmm1, 832(%rax)
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm14, 704(%rax)
-; AVX512F-ONLY-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vmovaps %zmm1, 640(%rax)
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm15, 576(%rax)
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm16, 512(%rax)
-; AVX512F-ONLY-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vmovaps %zmm0, 448(%rax)
-; AVX512F-ONLY-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vmovaps %zmm0, 320(%rax)
-; AVX512F-ONLY-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vmovaps %zmm0, 256(%rax)
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm28, 192(%rax)
-; AVX512F-ONLY-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vmovaps %zmm0, 128(%rax)
-; AVX512F-ONLY-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vmovaps %zmm0, 64(%rax)
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm21, 2688(%rax)
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm19, 2304(%rax)
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm23, 1920(%rax)
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm24, 1536(%rax)
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm26, 1152(%rax)
-; AVX512F-ONLY-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vmovaps %zmm0, 768(%rax)
-; AVX512F-ONLY-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vmovaps %zmm0, 384(%rax)
-; AVX512F-ONLY-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vmovaps %zmm0, (%rax)
-; AVX512F-ONLY-SLOW-NEXT:    addq $3400, %rsp # imm = 0xD48
-; AVX512F-ONLY-SLOW-NEXT:    vzeroupper
-; AVX512F-ONLY-SLOW-NEXT:    retq
-;
-; AVX512F-ONLY-FAST-LABEL: store_i64_stride6_vf64:
-; AVX512F-ONLY-FAST:       # %bb.0:
-; AVX512F-ONLY-FAST-NEXT:    subq $3400, %rsp # imm = 0xD48
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 (%rdx), %zmm13
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 64(%rdx), %zmm12
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 128(%rdx), %zmm11
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 192(%rdx), %zmm10
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 256(%rdx), %zmm9
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 320(%rdx), %zmm8
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 384(%rdx), %zmm7
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 448(%rdx), %zmm6
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 (%rcx), %zmm0
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 64(%rcx), %zmm1
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 128(%rcx), %zmm2
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 192(%rcx), %zmm30
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 256(%rcx), %zmm27
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 320(%rcx), %zmm24
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 384(%rcx), %zmm22
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 448(%rcx), %zmm21
-; AVX512F-ONLY-FAST-NEXT:    vbroadcasti128 {{.*#+}} ymm3 = [4,12,4,12]
-; AVX512F-ONLY-FAST-NEXT:    # ymm3 = mem[0,1,0,1]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm6, %zmm4
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm21, %zmm3, %zmm4
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm7, %zmm4
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm22, %zmm3, %zmm4
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm8, %zmm4
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm24, %zmm3, %zmm4
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm9, %zmm4
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm27, %zmm3, %zmm4
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm10, %zmm4
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm30, %zmm3, %zmm4
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm11, %zmm4
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm2, %zmm3, %zmm4
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm12, %zmm4
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm1, %zmm3, %zmm4
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vpermi2q %zmm0, %zmm13, %zmm3
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm5 = [1,9,2,10,1,9,2,10]
-; AVX512F-ONLY-FAST-NEXT:    # zmm5 = mem[0,1,2,3,0,1,2,3]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm13, %zmm3
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm0, %zmm5, %zmm3
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm5, %zmm4
-; AVX512F-ONLY-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm5 = [5,13,6,14,5,13,6,14]
-; AVX512F-ONLY-FAST-NEXT:    # zmm5 = mem[0,1,2,3,0,1,2,3]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm13, %zmm3
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm0, %zmm5, %zmm3
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm12, %zmm3
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm1, %zmm4, %zmm3
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} zmm14 = [3,11,3,11,3,11,3,11]
-; AVX512F-ONLY-FAST-NEXT:    # zmm14 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm13, %zmm3
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm0, %zmm14, %zmm3
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} zmm3 = [7,15,7,15,7,15,7,15]
-; AVX512F-ONLY-FAST-NEXT:    # zmm3 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm0, %zmm3, %zmm13
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm13, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm12, %zmm0
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm1, %zmm5, %zmm0
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm12, %zmm0
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm1, %zmm14, %zmm0
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm1, %zmm3, %zmm12
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm12, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm11, %zmm0
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm2, %zmm4, %zmm0
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm11, %zmm0
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm2, %zmm5, %zmm0
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm11, %zmm0
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm2, %zmm14, %zmm0
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm2, %zmm3, %zmm11
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm11, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm10, %zmm0
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm30, %zmm4, %zmm0
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm10, %zmm0
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm30, %zmm5, %zmm0
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm10, %zmm0
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm30, %zmm14, %zmm0
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm30, %zmm3, %zmm10
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm9, %zmm0
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm27, %zmm4, %zmm0
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm9, %zmm0
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm27, %zmm5, %zmm0
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm9, %zmm0
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm27, %zmm14, %zmm0
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm27, %zmm3, %zmm9
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm8, %zmm0
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm24, %zmm4, %zmm0
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm8, %zmm0
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm24, %zmm5, %zmm0
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm8, %zmm0
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm24, %zmm14, %zmm0
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm24, %zmm3, %zmm8
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm7, %zmm0
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm22, %zmm4, %zmm0
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm7, %zmm0
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm22, %zmm5, %zmm0
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm7, %zmm0
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm22, %zmm14, %zmm0
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm22, %zmm3, %zmm7
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vpermi2q %zmm21, %zmm6, %zmm4
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm4, (%rsp) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vpermi2q %zmm21, %zmm6, %zmm5
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vpermi2q %zmm21, %zmm6, %zmm14
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm14, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm21, %zmm3, %zmm6
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 448(%rdi), %zmm4
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 448(%rsi), %zmm11
-; AVX512F-ONLY-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm28 = [4,12,5,13,4,12,5,13]
-; AVX512F-ONLY-FAST-NEXT:    # zmm28 = mem[0,1,2,3,0,1,2,3]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm4, %zmm0
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm11, %zmm28, %zmm0
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm0, %zmm8
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 384(%rdi), %zmm2
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 384(%rsi), %zmm13
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm2, %zmm1
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm13, %zmm28, %zmm1
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 320(%rdi), %zmm3
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 320(%rsi), %zmm15
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm3, %zmm0
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm15, %zmm28, %zmm0
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 256(%rdi), %zmm31
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 256(%rsi), %zmm17
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm31, %zmm0
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm17, %zmm28, %zmm0
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 192(%rdi), %zmm29
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 192(%rsi), %zmm18
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm29, %zmm0
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm18, %zmm28, %zmm0
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 128(%rdi), %zmm25
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 128(%rsi), %zmm19
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm25, %zmm0
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm19, %zmm28, %zmm0
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm0, %zmm5
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 64(%rdi), %zmm6
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 64(%rsi), %zmm20
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm6, %zmm0
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm20, %zmm28, %zmm0
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 (%rdi), %zmm7
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 (%rsi), %zmm22
-; AVX512F-ONLY-FAST-NEXT:    vpermi2q %zmm22, %zmm7, %zmm28
-; AVX512F-ONLY-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} zmm12 = [2,10,2,10,2,10,2,10]
-; AVX512F-ONLY-FAST-NEXT:    # zmm12 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm7, %zmm14
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm22, %zmm12, %zmm14
-; AVX512F-ONLY-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} zmm10 = [6,14,6,14,6,14,6,14]
-; AVX512F-ONLY-FAST-NEXT:    # zmm10 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm7, %zmm16
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm22, %zmm10, %zmm16
-; AVX512F-ONLY-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm21 = [0,8,1,9,0,8,1,9]
-; AVX512F-ONLY-FAST-NEXT:    # zmm21 = mem[0,1,2,3,0,1,2,3]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm7, %zmm0
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm22, %zmm21, %zmm0
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} ymm23 = [7,15,7,15]
-; AVX512F-ONLY-FAST-NEXT:    # ymm23 = mem[0,1,2,3,0,1,2,3]
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm22, %zmm23, %zmm7
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm6, %zmm22
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm20, %zmm12, %zmm22
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm6, %zmm24
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm20, %zmm10, %zmm24
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm6, %zmm0
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm20, %zmm21, %zmm0
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm20, %zmm23, %zmm6
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm25, %zmm20
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm19, %zmm12, %zmm20
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm25, %zmm26
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm19, %zmm10, %zmm26
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm25, %zmm0
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm19, %zmm21, %zmm0
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm19, %zmm23, %zmm25
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm29, %zmm19
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm18, %zmm12, %zmm19
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm29, %zmm27
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm18, %zmm10, %zmm27
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm29, %zmm0
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm18, %zmm21, %zmm0
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm18, %zmm23, %zmm29
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm31, %zmm18
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm17, %zmm12, %zmm18
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm31, %zmm9
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm17, %zmm10, %zmm9
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm31, %zmm0
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm17, %zmm21, %zmm0
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm17, %zmm23, %zmm31
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm3, %zmm17
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm15, %zmm12, %zmm17
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm3, %zmm30
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm15, %zmm10, %zmm30
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm3, %zmm0
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm15, %zmm21, %zmm0
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm15, %zmm23, %zmm3
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm2, %zmm15
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm2, %zmm3
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm13, %zmm12, %zmm3
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm13, %zmm10, %zmm2
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm15, %zmm0
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm13, %zmm21, %zmm0
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm13, %zmm23, %zmm15
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm15, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vpermi2q %zmm11, %zmm4, %zmm12
-; AVX512F-ONLY-FAST-NEXT:    vpermi2q %zmm11, %zmm4, %zmm10
-; AVX512F-ONLY-FAST-NEXT:    vpermi2q %zmm11, %zmm4, %zmm21
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm11, %zmm23, %zmm4
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    movb $12, %al
-; AVX512F-ONLY-FAST-NEXT:    kmovw %eax, %k1
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm11 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm11, %zmm8 {%k1}
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm11 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm11, %zmm1 {%k1}
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm11 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm8 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm11, %zmm8 {%k1}
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm11 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm11, %zmm0 {%k1}
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm11 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm11, %zmm4 {%k1}
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm11 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm11, %zmm5 {%k1}
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm11 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm11, %zmm1 {%k1}
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm11 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm11, %zmm28 {%k1}
-; AVX512F-ONLY-FAST-NEXT:    movb $48, %al
-; AVX512F-ONLY-FAST-NEXT:    kmovw %eax, %k2
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm11 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm14, %zmm11 {%k2}
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm23 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm16, %zmm23 {%k2}
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm13 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm22, %zmm13 {%k2}
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm22 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm24, %zmm22 {%k2}
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm14 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm20, %zmm14 {%k2}
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm20 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm26, %zmm20 {%k2}
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm16 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm19, %zmm16 {%k2}
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm19 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm27, %zmm19 {%k2}
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm24 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm18, %zmm24 {%k2}
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm18 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm9, %zmm18 {%k2}
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm26 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm17, %zmm26 {%k2}
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm17 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm30, %zmm17 {%k2}
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm27 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm3, %zmm27 {%k2}
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm2, %zmm0 {%k2}
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 (%rsp), %zmm15 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm12, %zmm15 {%k2}
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 (%r8), %zmm5
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm2 = <0,1,9,u,4,5,6,7>
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm5, %zmm2, %zmm11
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm11, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 64(%r8), %zmm3
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm3, %zmm2, %zmm13
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm13, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 128(%r8), %zmm9
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm9, %zmm2, %zmm14
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm14, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 192(%r8), %zmm11
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm11, %zmm2, %zmm16
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm16, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 256(%r8), %zmm13
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm13, %zmm2, %zmm24
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm24, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 320(%r8), %zmm14
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm14, %zmm2, %zmm26
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm26, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 384(%r8), %zmm16
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm16, %zmm2, %zmm27
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm27, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 448(%r8), %zmm12
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm12, %zmm2, %zmm15
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm15, (%rsp) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm15 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm10, %zmm15 {%k2}
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm2 = <0,1,13,u,4,5,6,7>
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm5, %zmm2, %zmm23
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm3, %zmm2, %zmm22
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm22, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm9, %zmm2, %zmm20
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm20, %zmm24
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm11, %zmm2, %zmm19
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm13, %zmm2, %zmm18
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm18, %zmm26
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm14, %zmm2, %zmm17
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm17, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm16, %zmm2, %zmm0
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm12, %zmm2, %zmm15
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa (%rdi), %ymm2
-; AVX512F-ONLY-FAST-NEXT:    vpunpckhqdq {{.*#+}} ymm2 = ymm2[1],mem[1],ymm2[3],mem[3]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $0, %ymm2, %zmm0, %zmm10
-; AVX512F-ONLY-FAST-NEXT:    vshufi64x2 $228, {{[-0-9]+}}(%r{{[sb]}}p), %zmm7, %zmm7 # 64-byte Folded Reload
-; AVX512F-ONLY-FAST-NEXT:    # zmm7 = zmm7[0,1,2,3],mem[4,5,6,7]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa 64(%rdi), %ymm2
-; AVX512F-ONLY-FAST-NEXT:    vpunpckhqdq {{.*#+}} ymm2 = ymm2[1],mem[1],ymm2[3],mem[3]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $0, %ymm2, %zmm0, %zmm0
-; AVX512F-ONLY-FAST-NEXT:    vshufi64x2 $228, {{[-0-9]+}}(%r{{[sb]}}p), %zmm6, %zmm6 # 64-byte Folded Reload
-; AVX512F-ONLY-FAST-NEXT:    # zmm6 = zmm6[0,1,2,3],mem[4,5,6,7]
-; AVX512F-ONLY-FAST-NEXT:    movb $16, %al
-; AVX512F-ONLY-FAST-NEXT:    kmovw %eax, %k2
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm3, %zmm1 {%k2}
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm1 = <10,u,2,3,4,5,11,u>
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm3, %zmm1, %zmm0
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm2 = <14,u,2,3,4,5,15,u>
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm3, %zmm2, %zmm6
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa 128(%rdi), %ymm3
-; AVX512F-ONLY-FAST-NEXT:    vpunpckhqdq {{.*#+}} ymm3 = ymm3[1],mem[1],ymm3[3],mem[3]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $0, %ymm3, %zmm0, %zmm3
-; AVX512F-ONLY-FAST-NEXT:    vshufi64x2 $228, {{[-0-9]+}}(%r{{[sb]}}p), %zmm25, %zmm6 # 64-byte Folded Reload
-; AVX512F-ONLY-FAST-NEXT:    # zmm6 = zmm25[0,1,2,3],mem[4,5,6,7]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm9, %zmm0 {%k2}
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm9, %zmm1, %zmm3
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm9, %zmm2, %zmm6
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa 192(%rdi), %ymm3
-; AVX512F-ONLY-FAST-NEXT:    vpunpckhqdq {{.*#+}} ymm3 = ymm3[1],mem[1],ymm3[3],mem[3]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $0, %ymm3, %zmm0, %zmm0
-; AVX512F-ONLY-FAST-NEXT:    vshufi64x2 $228, {{[-0-9]+}}(%r{{[sb]}}p), %zmm29, %zmm3 # 64-byte Folded Reload
-; AVX512F-ONLY-FAST-NEXT:    # zmm3 = zmm29[0,1,2,3],mem[4,5,6,7]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm11, %zmm4 {%k2}
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm11, %zmm1, %zmm0
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm11, %zmm2, %zmm3
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa 256(%rdi), %ymm0
-; AVX512F-ONLY-FAST-NEXT:    vpunpckhqdq {{.*#+}} ymm0 = ymm0[1],mem[1],ymm0[3],mem[3]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $0, %ymm0, %zmm3, %zmm3
-; AVX512F-ONLY-FAST-NEXT:    vshufi64x2 $228, {{[-0-9]+}}(%r{{[sb]}}p), %zmm31, %zmm4 # 64-byte Folded Reload
-; AVX512F-ONLY-FAST-NEXT:    # zmm4 = zmm31[0,1,2,3],mem[4,5,6,7]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm13, %zmm0 {%k2}
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm13, %zmm1, %zmm3
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm13, %zmm2, %zmm4
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa 320(%rdi), %ymm0
-; AVX512F-ONLY-FAST-NEXT:    vpunpckhqdq {{.*#+}} ymm0 = ymm0[1],mem[1],ymm0[3],mem[3]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $0, %ymm0, %zmm3, %zmm27
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vshufi64x2 $228, {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm25 # 64-byte Folded Reload
-; AVX512F-ONLY-FAST-NEXT:    # zmm25 = zmm0[0,1,2,3],mem[4,5,6,7]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm14, %zmm8 {%k2}
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm14, %zmm1, %zmm27
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm14, %zmm2, %zmm25
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa 384(%rdi), %ymm0
-; AVX512F-ONLY-FAST-NEXT:    vpunpckhqdq {{.*#+}} ymm0 = ymm0[1],mem[1],ymm0[3],mem[3]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $0, %ymm0, %zmm3, %zmm22
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vshufi64x2 $228, {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm20 # 64-byte Folded Reload
-; AVX512F-ONLY-FAST-NEXT:    # zmm20 = zmm0[0,1,2,3],mem[4,5,6,7]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm16, %zmm0 {%k2}
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm16, %zmm1, %zmm22
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm16, %zmm2, %zmm20
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa 448(%rdi), %ymm0
-; AVX512F-ONLY-FAST-NEXT:    vpunpckhqdq {{.*#+}} ymm0 = ymm0[1],mem[1],ymm0[3],mem[3]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $0, %ymm0, %zmm3, %zmm18
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm5, %zmm1, %zmm10
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm10, %zmm11
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm12, %zmm1, %zmm18
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vshufi64x2 $228, {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm17 # 64-byte Folded Reload
-; AVX512F-ONLY-FAST-NEXT:    # zmm17 = zmm0[0,1,2,3],mem[4,5,6,7]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm12, %zmm0 {%k2}
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm12, %zmm2, %zmm17
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm5, %zmm2, %zmm7
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm7, %zmm13
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm5, %zmm28 {%k2}
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 (%r9), %zmm6
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm10 = [0,1,2,9,4,5,6,7]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm6, %zmm10, %zmm1
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 64(%r9), %zmm7
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm7, %zmm10, %zmm2
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 128(%r9), %zmm2
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm2, %zmm10, %zmm3
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 192(%r9), %zmm3
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm3, %zmm10, %zmm4
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 256(%r9), %zmm4
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm4, %zmm10, %zmm5
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 320(%r9), %zmm5
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm5, %zmm10, %zmm0
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 384(%r9), %zmm8
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm8, %zmm10, %zmm0
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 448(%r9), %zmm9
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 (%rsp), %zmm0 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm9, %zmm10, %zmm0
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm0, (%rsp) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm10 = [0,1,2,13,4,5,6,7]
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm6, %zmm10, %zmm23
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm23, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm12 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm7, %zmm10, %zmm12
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm12, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm2, %zmm10, %zmm24
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm24, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm3, %zmm10, %zmm19
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm19, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm4, %zmm10, %zmm26
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm26, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm5, %zmm10, %zmm0
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm8, %zmm10, %zmm0
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm9, %zmm10, %zmm15
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm15, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa (%rdx), %xmm10
-; AVX512F-ONLY-FAST-NEXT:    vpunpcklqdq {{.*#+}} xmm10 = xmm10[0],mem[0]
-; AVX512F-ONLY-FAST-NEXT:    vinserti128 $1, %xmm10, %ymm0, %ymm10
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm12 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $0, %ymm10, %zmm0, %zmm12 {%k1}
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa 64(%rdx), %xmm10
-; AVX512F-ONLY-FAST-NEXT:    vpunpcklqdq {{.*#+}} xmm10 = xmm10[0],mem[0]
-; AVX512F-ONLY-FAST-NEXT:    vinserti128 $1, %xmm10, %ymm0, %ymm10
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm14 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $0, %ymm10, %zmm0, %zmm14 {%k1}
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa 128(%rdx), %xmm10
-; AVX512F-ONLY-FAST-NEXT:    vpunpcklqdq {{.*#+}} xmm10 = xmm10[0],mem[0]
-; AVX512F-ONLY-FAST-NEXT:    vinserti128 $1, %xmm10, %ymm0, %ymm10
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm16 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $0, %ymm10, %zmm0, %zmm16 {%k1}
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa 192(%rdx), %xmm10
-; AVX512F-ONLY-FAST-NEXT:    vpunpcklqdq {{.*#+}} xmm10 = xmm10[0],mem[0]
-; AVX512F-ONLY-FAST-NEXT:    vinserti128 $1, %xmm10, %ymm0, %ymm10
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm30 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $0, %ymm10, %zmm0, %zmm30 {%k1}
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa 256(%rdx), %xmm10
-; AVX512F-ONLY-FAST-NEXT:    vpunpcklqdq {{.*#+}} xmm10 = xmm10[0],mem[0]
-; AVX512F-ONLY-FAST-NEXT:    vinserti128 $1, %xmm10, %ymm0, %ymm10
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm31 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $0, %ymm10, %zmm0, %zmm31 {%k1}
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa 320(%rdx), %xmm10
-; AVX512F-ONLY-FAST-NEXT:    vpunpcklqdq {{.*#+}} xmm10 = xmm10[0],mem[0]
-; AVX512F-ONLY-FAST-NEXT:    vinserti128 $1, %xmm10, %ymm0, %ymm10
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $0, %ymm10, %zmm0, %zmm1 {%k1}
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa 384(%rdx), %xmm10
-; AVX512F-ONLY-FAST-NEXT:    vpunpcklqdq {{.*#+}} xmm10 = xmm10[0],mem[0]
-; AVX512F-ONLY-FAST-NEXT:    vinserti128 $1, %xmm10, %ymm0, %ymm10
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $0, %ymm10, %zmm0, %zmm0 {%k1}
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa 448(%rdx), %xmm10
-; AVX512F-ONLY-FAST-NEXT:    vpunpcklqdq {{.*#+}} xmm10 = xmm10[0],mem[0]
-; AVX512F-ONLY-FAST-NEXT:    vinserti128 $1, %xmm10, %ymm0, %ymm10
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $0, %ymm10, %zmm0, %zmm21 {%k1}
-; AVX512F-ONLY-FAST-NEXT:    vinserti32x4 $2, (%r8), %zmm12, %zmm10
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm29 = [0,1,2,3,4,8,6,7]
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm6, %zmm29, %zmm10
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vinserti32x4 $2, 64(%r8), %zmm14, %zmm10
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm7, %zmm29, %zmm10
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vinserti32x4 $2, 128(%r8), %zmm16, %zmm10
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm2, %zmm29, %zmm10
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vinserti32x4 $2, 192(%r8), %zmm30, %zmm26
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm3, %zmm29, %zmm26
-; AVX512F-ONLY-FAST-NEXT:    vinserti32x4 $2, 256(%r8), %zmm31, %zmm24
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm4, %zmm29, %zmm24
-; AVX512F-ONLY-FAST-NEXT:    vinserti32x4 $2, 320(%r8), %zmm1, %zmm23
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm5, %zmm29, %zmm23
-; AVX512F-ONLY-FAST-NEXT:    vinserti32x4 $2, 384(%r8), %zmm0, %zmm19
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm8, %zmm29, %zmm19
-; AVX512F-ONLY-FAST-NEXT:    vinserti32x4 $2, 448(%r8), %zmm21, %zmm21
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm9, %zmm29, %zmm21
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm29 = [0,10,2,3,4,5,6,11]
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm6, %zmm29, %zmm11
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm11, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm30 = [0,1,2,3,4,12,6,7]
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm6, %zmm30, %zmm28
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm31 = [0,14,2,3,4,5,6,15]
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm6, %zmm31, %zmm13
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm13, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm16 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm7, %zmm29, %zmm16
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm15 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm7, %zmm30, %zmm15
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm14 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm7, %zmm31, %zmm14
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm13 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm2, %zmm29, %zmm13
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm12 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm2, %zmm30, %zmm12
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm11 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm2, %zmm31, %zmm11
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm10 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm3, %zmm29, %zmm10
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm7 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm3, %zmm30, %zmm7
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm3, %zmm31, %zmm2
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm4, %zmm29, %zmm1
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm4, %zmm30, %zmm3
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm4, %zmm31, %zmm0
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm5, %zmm29, %zmm27
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm5, %zmm30, %zmm4
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm5, %zmm31, %zmm25
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm8, %zmm29, %zmm22
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm8, %zmm30, %zmm5
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm8, %zmm31, %zmm20
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm9, %zmm29, %zmm18
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm6 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm9, %zmm30, %zmm6
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm9, %zmm31, %zmm17
-; AVX512F-ONLY-FAST-NEXT:    movq {{[0-9]+}}(%rsp), %rax
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm17, 3008(%rax)
-; AVX512F-ONLY-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm8 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vmovaps %zmm8, 2944(%rax)
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm6, 2880(%rax)
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm18, 2816(%rax)
-; AVX512F-ONLY-FAST-NEXT:    vmovups (%rsp), %zmm6 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vmovaps %zmm6, 2752(%rax)
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm20, 2624(%rax)
-; AVX512F-ONLY-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm6 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vmovaps %zmm6, 2560(%rax)
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm5, 2496(%rax)
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm22, 2432(%rax)
-; AVX512F-ONLY-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vmovaps %zmm5, 2368(%rax)
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm25, 2240(%rax)
-; AVX512F-ONLY-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vmovaps %zmm5, 2176(%rax)
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm4, 2112(%rax)
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm27, 2048(%rax)
-; AVX512F-ONLY-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vmovaps %zmm4, 1984(%rax)
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm0, 1856(%rax)
-; AVX512F-ONLY-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vmovaps %zmm0, 1792(%rax)
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm3, 1728(%rax)
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm1, 1664(%rax)
-; AVX512F-ONLY-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vmovaps %zmm3, 1600(%rax)
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm2, 1472(%rax)
-; AVX512F-ONLY-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vmovaps %zmm0, 1408(%rax)
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm7, 1344(%rax)
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm10, 1280(%rax)
-; AVX512F-ONLY-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vmovaps %zmm2, 1216(%rax)
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm11, 1088(%rax)
-; AVX512F-ONLY-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vmovaps %zmm0, 1024(%rax)
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm12, 960(%rax)
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm13, 896(%rax)
-; AVX512F-ONLY-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vmovaps %zmm1, 832(%rax)
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm14, 704(%rax)
-; AVX512F-ONLY-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vmovaps %zmm1, 640(%rax)
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm15, 576(%rax)
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm16, 512(%rax)
-; AVX512F-ONLY-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vmovaps %zmm0, 448(%rax)
-; AVX512F-ONLY-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vmovaps %zmm0, 320(%rax)
-; AVX512F-ONLY-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vmovaps %zmm0, 256(%rax)
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm28, 192(%rax)
-; AVX512F-ONLY-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vmovaps %zmm0, 128(%rax)
-; AVX512F-ONLY-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vmovaps %zmm0, 64(%rax)
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm21, 2688(%rax)
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm19, 2304(%rax)
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm23, 1920(%rax)
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm24, 1536(%rax)
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm26, 1152(%rax)
-; AVX512F-ONLY-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vmovaps %zmm0, 768(%rax)
-; AVX512F-ONLY-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vmovaps %zmm0, 384(%rax)
-; AVX512F-ONLY-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vmovaps %zmm0, (%rax)
-; AVX512F-ONLY-FAST-NEXT:    addq $3400, %rsp # imm = 0xD48
-; AVX512F-ONLY-FAST-NEXT:    vzeroupper
-; AVX512F-ONLY-FAST-NEXT:    retq
-;
-; AVX512DQ-SLOW-LABEL: store_i64_stride6_vf64:
-; AVX512DQ-SLOW:       # %bb.0:
-; AVX512DQ-SLOW-NEXT:    subq $3400, %rsp # imm = 0xD48
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 (%rdx), %zmm13
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 64(%rdx), %zmm12
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 128(%rdx), %zmm11
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 192(%rdx), %zmm10
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 256(%rdx), %zmm9
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 320(%rdx), %zmm8
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 384(%rdx), %zmm7
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 448(%rdx), %zmm6
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 (%rcx), %zmm0
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 64(%rcx), %zmm1
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 128(%rcx), %zmm2
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 192(%rcx), %zmm30
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 256(%rcx), %zmm27
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 320(%rcx), %zmm24
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 384(%rcx), %zmm22
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 448(%rcx), %zmm21
-; AVX512DQ-SLOW-NEXT:    vbroadcasti128 {{.*#+}} ymm3 = [4,12,4,12]
-; AVX512DQ-SLOW-NEXT:    # ymm3 = mem[0,1,0,1]
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm6, %zmm4
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm21, %zmm3, %zmm4
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm7, %zmm4
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm22, %zmm3, %zmm4
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm8, %zmm4
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm24, %zmm3, %zmm4
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm9, %zmm4
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm27, %zmm3, %zmm4
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm10, %zmm4
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm30, %zmm3, %zmm4
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm11, %zmm4
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm2, %zmm3, %zmm4
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm12, %zmm4
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm1, %zmm3, %zmm4
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vpermi2q %zmm0, %zmm13, %zmm3
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm5 = [1,9,2,10,1,9,2,10]
-; AVX512DQ-SLOW-NEXT:    # zmm5 = mem[0,1,2,3,0,1,2,3]
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm13, %zmm3
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm0, %zmm5, %zmm3
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm5, %zmm4
-; AVX512DQ-SLOW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm5 = [5,13,6,14,5,13,6,14]
-; AVX512DQ-SLOW-NEXT:    # zmm5 = mem[0,1,2,3,0,1,2,3]
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm13, %zmm3
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm0, %zmm5, %zmm3
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm12, %zmm3
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm1, %zmm4, %zmm3
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm14 = [3,11,3,11,3,11,3,11]
-; AVX512DQ-SLOW-NEXT:    # zmm14 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm13, %zmm3
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm0, %zmm14, %zmm3
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm3 = [7,15,7,15,7,15,7,15]
-; AVX512DQ-SLOW-NEXT:    # zmm3 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm0, %zmm3, %zmm13
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm13, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm12, %zmm0
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm1, %zmm5, %zmm0
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm12, %zmm0
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm1, %zmm14, %zmm0
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm1, %zmm3, %zmm12
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm12, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm11, %zmm0
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm2, %zmm4, %zmm0
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm11, %zmm0
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm2, %zmm5, %zmm0
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm11, %zmm0
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm2, %zmm14, %zmm0
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm2, %zmm3, %zmm11
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm11, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm10, %zmm0
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm30, %zmm4, %zmm0
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm10, %zmm0
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm30, %zmm5, %zmm0
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm10, %zmm0
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm30, %zmm14, %zmm0
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm30, %zmm3, %zmm10
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm9, %zmm0
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm27, %zmm4, %zmm0
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm9, %zmm0
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm27, %zmm5, %zmm0
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm9, %zmm0
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm27, %zmm14, %zmm0
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm27, %zmm3, %zmm9
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm8, %zmm0
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm24, %zmm4, %zmm0
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm8, %zmm0
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm24, %zmm5, %zmm0
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm8, %zmm0
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm24, %zmm14, %zmm0
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm24, %zmm3, %zmm8
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm7, %zmm0
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm22, %zmm4, %zmm0
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm7, %zmm0
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm22, %zmm5, %zmm0
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm7, %zmm0
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm22, %zmm14, %zmm0
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm22, %zmm3, %zmm7
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vpermi2q %zmm21, %zmm6, %zmm4
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm4, (%rsp) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vpermi2q %zmm21, %zmm6, %zmm5
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vpermi2q %zmm21, %zmm6, %zmm14
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm14, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm21, %zmm3, %zmm6
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 448(%rdi), %zmm4
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 448(%rsi), %zmm11
-; AVX512DQ-SLOW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm28 = [4,12,5,13,4,12,5,13]
-; AVX512DQ-SLOW-NEXT:    # zmm28 = mem[0,1,2,3,0,1,2,3]
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm4, %zmm0
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm11, %zmm28, %zmm0
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm0, %zmm8
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 384(%rdi), %zmm2
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 384(%rsi), %zmm13
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm2, %zmm1
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm13, %zmm28, %zmm1
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 320(%rdi), %zmm3
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 320(%rsi), %zmm15
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm3, %zmm0
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm15, %zmm28, %zmm0
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 256(%rdi), %zmm31
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 256(%rsi), %zmm17
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm31, %zmm0
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm17, %zmm28, %zmm0
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 192(%rdi), %zmm29
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 192(%rsi), %zmm18
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm29, %zmm0
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm18, %zmm28, %zmm0
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 128(%rdi), %zmm25
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 128(%rsi), %zmm19
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm25, %zmm0
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm19, %zmm28, %zmm0
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm0, %zmm5
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 64(%rdi), %zmm6
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 64(%rsi), %zmm20
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm6, %zmm0
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm20, %zmm28, %zmm0
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 (%rdi), %zmm7
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 (%rsi), %zmm22
-; AVX512DQ-SLOW-NEXT:    vpermi2q %zmm22, %zmm7, %zmm28
-; AVX512DQ-SLOW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm12 = [2,10,2,10,2,10,2,10]
-; AVX512DQ-SLOW-NEXT:    # zmm12 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm7, %zmm14
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm22, %zmm12, %zmm14
-; AVX512DQ-SLOW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm10 = [6,14,6,14,6,14,6,14]
-; AVX512DQ-SLOW-NEXT:    # zmm10 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm7, %zmm16
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm22, %zmm10, %zmm16
-; AVX512DQ-SLOW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm21 = [0,8,1,9,0,8,1,9]
-; AVX512DQ-SLOW-NEXT:    # zmm21 = mem[0,1,2,3,0,1,2,3]
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm7, %zmm0
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm22, %zmm21, %zmm0
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vbroadcasti64x2 {{.*#+}} ymm23 = [7,15,7,15]
-; AVX512DQ-SLOW-NEXT:    # ymm23 = mem[0,1,0,1]
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm22, %zmm23, %zmm7
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm6, %zmm22
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm20, %zmm12, %zmm22
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm6, %zmm24
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm20, %zmm10, %zmm24
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm6, %zmm0
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm20, %zmm21, %zmm0
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm20, %zmm23, %zmm6
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm25, %zmm20
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm19, %zmm12, %zmm20
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm25, %zmm26
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm19, %zmm10, %zmm26
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm25, %zmm0
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm19, %zmm21, %zmm0
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm19, %zmm23, %zmm25
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm29, %zmm19
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm18, %zmm12, %zmm19
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm29, %zmm27
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm18, %zmm10, %zmm27
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm29, %zmm0
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm18, %zmm21, %zmm0
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm18, %zmm23, %zmm29
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm31, %zmm18
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm17, %zmm12, %zmm18
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm31, %zmm9
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm17, %zmm10, %zmm9
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm31, %zmm0
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm17, %zmm21, %zmm0
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm17, %zmm23, %zmm31
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm3, %zmm17
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm15, %zmm12, %zmm17
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm3, %zmm30
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm15, %zmm10, %zmm30
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm3, %zmm0
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm15, %zmm21, %zmm0
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm15, %zmm23, %zmm3
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm2, %zmm15
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm2, %zmm3
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm13, %zmm12, %zmm3
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm13, %zmm10, %zmm2
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm15, %zmm0
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm13, %zmm21, %zmm0
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm13, %zmm23, %zmm15
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm15, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vpermi2q %zmm11, %zmm4, %zmm12
-; AVX512DQ-SLOW-NEXT:    vpermi2q %zmm11, %zmm4, %zmm10
-; AVX512DQ-SLOW-NEXT:    vpermi2q %zmm11, %zmm4, %zmm21
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm11, %zmm23, %zmm4
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    movb $12, %al
-; AVX512DQ-SLOW-NEXT:    kmovw %eax, %k1
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm11 # 64-byte Reload
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm11, %zmm8 {%k1}
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm11 # 64-byte Reload
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm11, %zmm1 {%k1}
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm11 # 64-byte Reload
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm8 # 64-byte Reload
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm11, %zmm8 {%k1}
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm11 # 64-byte Reload
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm11, %zmm0 {%k1}
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm11 # 64-byte Reload
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm11, %zmm4 {%k1}
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm11 # 64-byte Reload
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm11, %zmm5 {%k1}
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm11 # 64-byte Reload
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm11, %zmm1 {%k1}
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm11 # 64-byte Reload
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm11, %zmm28 {%k1}
-; AVX512DQ-SLOW-NEXT:    movb $48, %al
-; AVX512DQ-SLOW-NEXT:    kmovw %eax, %k2
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm11 # 64-byte Reload
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm14, %zmm11 {%k2}
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm23 # 64-byte Reload
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm16, %zmm23 {%k2}
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm13 # 64-byte Reload
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm22, %zmm13 {%k2}
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm22 # 64-byte Reload
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm24, %zmm22 {%k2}
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm14 # 64-byte Reload
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm20, %zmm14 {%k2}
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm20 # 64-byte Reload
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm26, %zmm20 {%k2}
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm16 # 64-byte Reload
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm19, %zmm16 {%k2}
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm19 # 64-byte Reload
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm27, %zmm19 {%k2}
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm24 # 64-byte Reload
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm18, %zmm24 {%k2}
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm18 # 64-byte Reload
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm9, %zmm18 {%k2}
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm26 # 64-byte Reload
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm17, %zmm26 {%k2}
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm17 # 64-byte Reload
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm30, %zmm17 {%k2}
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm27 # 64-byte Reload
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm3, %zmm27 {%k2}
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm2, %zmm0 {%k2}
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 (%rsp), %zmm15 # 64-byte Reload
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm12, %zmm15 {%k2}
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 (%r8), %zmm5
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm2 = <0,1,9,u,4,5,6,7>
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm5, %zmm2, %zmm11
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm11, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 64(%r8), %zmm3
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm3, %zmm2, %zmm13
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm13, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 128(%r8), %zmm9
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm9, %zmm2, %zmm14
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm14, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 192(%r8), %zmm11
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm11, %zmm2, %zmm16
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm16, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 256(%r8), %zmm13
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm13, %zmm2, %zmm24
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm24, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 320(%r8), %zmm14
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm14, %zmm2, %zmm26
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm26, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 384(%r8), %zmm16
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm16, %zmm2, %zmm27
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm27, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 448(%r8), %zmm12
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm12, %zmm2, %zmm15
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm15, (%rsp) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm15 # 64-byte Reload
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm10, %zmm15 {%k2}
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm2 = <0,1,13,u,4,5,6,7>
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm5, %zmm2, %zmm23
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm3, %zmm2, %zmm22
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm22, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm9, %zmm2, %zmm20
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm20, %zmm24
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm11, %zmm2, %zmm19
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm13, %zmm2, %zmm18
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm18, %zmm26
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm14, %zmm2, %zmm17
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm17, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm16, %zmm2, %zmm0
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm12, %zmm2, %zmm15
-; AVX512DQ-SLOW-NEXT:    vmovdqa (%rdi), %ymm2
-; AVX512DQ-SLOW-NEXT:    vpunpckhqdq {{.*#+}} ymm2 = ymm2[1],mem[1],ymm2[3],mem[3]
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQ-SLOW-NEXT:    vinserti64x4 $0, %ymm2, %zmm0, %zmm10
-; AVX512DQ-SLOW-NEXT:    vshufi64x2 $228, {{[-0-9]+}}(%r{{[sb]}}p), %zmm7, %zmm7 # 64-byte Folded Reload
-; AVX512DQ-SLOW-NEXT:    # zmm7 = zmm7[0,1,2,3],mem[4,5,6,7]
-; AVX512DQ-SLOW-NEXT:    vmovdqa 64(%rdi), %ymm2
-; AVX512DQ-SLOW-NEXT:    vpunpckhqdq {{.*#+}} ymm2 = ymm2[1],mem[1],ymm2[3],mem[3]
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQ-SLOW-NEXT:    vinserti64x4 $0, %ymm2, %zmm0, %zmm0
-; AVX512DQ-SLOW-NEXT:    vshufi64x2 $228, {{[-0-9]+}}(%r{{[sb]}}p), %zmm6, %zmm6 # 64-byte Folded Reload
-; AVX512DQ-SLOW-NEXT:    # zmm6 = zmm6[0,1,2,3],mem[4,5,6,7]
-; AVX512DQ-SLOW-NEXT:    movb $16, %al
-; AVX512DQ-SLOW-NEXT:    kmovw %eax, %k2
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm3, %zmm1 {%k2}
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm1 = <10,u,2,3,4,5,11,u>
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm3, %zmm1, %zmm0
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm2 = <14,u,2,3,4,5,15,u>
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm3, %zmm2, %zmm6
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vmovdqa 128(%rdi), %ymm3
-; AVX512DQ-SLOW-NEXT:    vpunpckhqdq {{.*#+}} ymm3 = ymm3[1],mem[1],ymm3[3],mem[3]
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQ-SLOW-NEXT:    vinserti64x4 $0, %ymm3, %zmm0, %zmm3
-; AVX512DQ-SLOW-NEXT:    vshufi64x2 $228, {{[-0-9]+}}(%r{{[sb]}}p), %zmm25, %zmm6 # 64-byte Folded Reload
-; AVX512DQ-SLOW-NEXT:    # zmm6 = zmm25[0,1,2,3],mem[4,5,6,7]
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm9, %zmm0 {%k2}
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm9, %zmm1, %zmm3
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm9, %zmm2, %zmm6
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vmovdqa 192(%rdi), %ymm3
-; AVX512DQ-SLOW-NEXT:    vpunpckhqdq {{.*#+}} ymm3 = ymm3[1],mem[1],ymm3[3],mem[3]
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQ-SLOW-NEXT:    vinserti64x4 $0, %ymm3, %zmm0, %zmm0
-; AVX512DQ-SLOW-NEXT:    vshufi64x2 $228, {{[-0-9]+}}(%r{{[sb]}}p), %zmm29, %zmm3 # 64-byte Folded Reload
-; AVX512DQ-SLOW-NEXT:    # zmm3 = zmm29[0,1,2,3],mem[4,5,6,7]
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm11, %zmm4 {%k2}
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm11, %zmm1, %zmm0
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm11, %zmm2, %zmm3
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vmovdqa 256(%rdi), %ymm0
-; AVX512DQ-SLOW-NEXT:    vpunpckhqdq {{.*#+}} ymm0 = ymm0[1],mem[1],ymm0[3],mem[3]
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
-; AVX512DQ-SLOW-NEXT:    vinserti64x4 $0, %ymm0, %zmm3, %zmm3
-; AVX512DQ-SLOW-NEXT:    vshufi64x2 $228, {{[-0-9]+}}(%r{{[sb]}}p), %zmm31, %zmm4 # 64-byte Folded Reload
-; AVX512DQ-SLOW-NEXT:    # zmm4 = zmm31[0,1,2,3],mem[4,5,6,7]
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm13, %zmm0 {%k2}
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm13, %zmm1, %zmm3
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm13, %zmm2, %zmm4
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vmovdqa 320(%rdi), %ymm0
-; AVX512DQ-SLOW-NEXT:    vpunpckhqdq {{.*#+}} ymm0 = ymm0[1],mem[1],ymm0[3],mem[3]
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
-; AVX512DQ-SLOW-NEXT:    vinserti64x4 $0, %ymm0, %zmm3, %zmm27
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQ-SLOW-NEXT:    vshufi64x2 $228, {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm25 # 64-byte Folded Reload
-; AVX512DQ-SLOW-NEXT:    # zmm25 = zmm0[0,1,2,3],mem[4,5,6,7]
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm14, %zmm8 {%k2}
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm14, %zmm1, %zmm27
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm14, %zmm2, %zmm25
-; AVX512DQ-SLOW-NEXT:    vmovdqa 384(%rdi), %ymm0
-; AVX512DQ-SLOW-NEXT:    vpunpckhqdq {{.*#+}} ymm0 = ymm0[1],mem[1],ymm0[3],mem[3]
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
-; AVX512DQ-SLOW-NEXT:    vinserti64x4 $0, %ymm0, %zmm3, %zmm22
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQ-SLOW-NEXT:    vshufi64x2 $228, {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm20 # 64-byte Folded Reload
-; AVX512DQ-SLOW-NEXT:    # zmm20 = zmm0[0,1,2,3],mem[4,5,6,7]
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm16, %zmm0 {%k2}
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm16, %zmm1, %zmm22
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm16, %zmm2, %zmm20
-; AVX512DQ-SLOW-NEXT:    vmovdqa 448(%rdi), %ymm0
-; AVX512DQ-SLOW-NEXT:    vpunpckhqdq {{.*#+}} ymm0 = ymm0[1],mem[1],ymm0[3],mem[3]
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
-; AVX512DQ-SLOW-NEXT:    vinserti64x4 $0, %ymm0, %zmm3, %zmm18
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm5, %zmm1, %zmm10
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm10, %zmm11
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm12, %zmm1, %zmm18
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQ-SLOW-NEXT:    vshufi64x2 $228, {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm17 # 64-byte Folded Reload
-; AVX512DQ-SLOW-NEXT:    # zmm17 = zmm0[0,1,2,3],mem[4,5,6,7]
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm12, %zmm0 {%k2}
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm12, %zmm2, %zmm17
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm5, %zmm2, %zmm7
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm7, %zmm13
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm5, %zmm28 {%k2}
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 (%r9), %zmm6
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm10 = [0,1,2,9,4,5,6,7]
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm6, %zmm10, %zmm1
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 64(%r9), %zmm7
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm7, %zmm10, %zmm2
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 128(%r9), %zmm2
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm2, %zmm10, %zmm3
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 192(%r9), %zmm3
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm3, %zmm10, %zmm4
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 256(%r9), %zmm4
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm4, %zmm10, %zmm5
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 320(%r9), %zmm5
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm5, %zmm10, %zmm0
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 384(%r9), %zmm8
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm8, %zmm10, %zmm0
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 448(%r9), %zmm9
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 (%rsp), %zmm0 # 64-byte Reload
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm9, %zmm10, %zmm0
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm0, (%rsp) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm10 = [0,1,2,13,4,5,6,7]
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm6, %zmm10, %zmm23
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm23, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm12 # 64-byte Reload
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm7, %zmm10, %zmm12
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm12, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm2, %zmm10, %zmm24
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm24, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm3, %zmm10, %zmm19
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm19, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm4, %zmm10, %zmm26
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm26, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm5, %zmm10, %zmm0
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm8, %zmm10, %zmm0
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm9, %zmm10, %zmm15
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm15, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vmovdqa (%rdx), %xmm10
-; AVX512DQ-SLOW-NEXT:    vpunpcklqdq {{.*#+}} xmm10 = xmm10[0],mem[0]
-; AVX512DQ-SLOW-NEXT:    vinserti128 $1, %xmm10, %ymm0, %ymm10
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm12 # 64-byte Reload
-; AVX512DQ-SLOW-NEXT:    vinserti64x4 $0, %ymm10, %zmm0, %zmm12 {%k1}
-; AVX512DQ-SLOW-NEXT:    vmovdqa 64(%rdx), %xmm10
-; AVX512DQ-SLOW-NEXT:    vpunpcklqdq {{.*#+}} xmm10 = xmm10[0],mem[0]
-; AVX512DQ-SLOW-NEXT:    vinserti128 $1, %xmm10, %ymm0, %ymm10
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm14 # 64-byte Reload
-; AVX512DQ-SLOW-NEXT:    vinserti64x4 $0, %ymm10, %zmm0, %zmm14 {%k1}
-; AVX512DQ-SLOW-NEXT:    vmovdqa 128(%rdx), %xmm10
-; AVX512DQ-SLOW-NEXT:    vpunpcklqdq {{.*#+}} xmm10 = xmm10[0],mem[0]
-; AVX512DQ-SLOW-NEXT:    vinserti128 $1, %xmm10, %ymm0, %ymm10
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm16 # 64-byte Reload
-; AVX512DQ-SLOW-NEXT:    vinserti64x4 $0, %ymm10, %zmm0, %zmm16 {%k1}
-; AVX512DQ-SLOW-NEXT:    vmovdqa 192(%rdx), %xmm10
-; AVX512DQ-SLOW-NEXT:    vpunpcklqdq {{.*#+}} xmm10 = xmm10[0],mem[0]
-; AVX512DQ-SLOW-NEXT:    vinserti128 $1, %xmm10, %ymm0, %ymm10
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm30 # 64-byte Reload
-; AVX512DQ-SLOW-NEXT:    vinserti64x4 $0, %ymm10, %zmm0, %zmm30 {%k1}
-; AVX512DQ-SLOW-NEXT:    vmovdqa 256(%rdx), %xmm10
-; AVX512DQ-SLOW-NEXT:    vpunpcklqdq {{.*#+}} xmm10 = xmm10[0],mem[0]
-; AVX512DQ-SLOW-NEXT:    vinserti128 $1, %xmm10, %ymm0, %ymm10
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm31 # 64-byte Reload
-; AVX512DQ-SLOW-NEXT:    vinserti64x4 $0, %ymm10, %zmm0, %zmm31 {%k1}
-; AVX512DQ-SLOW-NEXT:    vmovdqa 320(%rdx), %xmm10
-; AVX512DQ-SLOW-NEXT:    vpunpcklqdq {{.*#+}} xmm10 = xmm10[0],mem[0]
-; AVX512DQ-SLOW-NEXT:    vinserti128 $1, %xmm10, %ymm0, %ymm10
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512DQ-SLOW-NEXT:    vinserti64x4 $0, %ymm10, %zmm0, %zmm1 {%k1}
-; AVX512DQ-SLOW-NEXT:    vmovdqa 384(%rdx), %xmm10
-; AVX512DQ-SLOW-NEXT:    vpunpcklqdq {{.*#+}} xmm10 = xmm10[0],mem[0]
-; AVX512DQ-SLOW-NEXT:    vinserti128 $1, %xmm10, %ymm0, %ymm10
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQ-SLOW-NEXT:    vinserti64x4 $0, %ymm10, %zmm0, %zmm0 {%k1}
-; AVX512DQ-SLOW-NEXT:    vmovdqa 448(%rdx), %xmm10
-; AVX512DQ-SLOW-NEXT:    vpunpcklqdq {{.*#+}} xmm10 = xmm10[0],mem[0]
-; AVX512DQ-SLOW-NEXT:    vinserti128 $1, %xmm10, %ymm0, %ymm10
-; AVX512DQ-SLOW-NEXT:    vinserti64x4 $0, %ymm10, %zmm0, %zmm21 {%k1}
-; AVX512DQ-SLOW-NEXT:    vinserti32x4 $2, (%r8), %zmm12, %zmm10
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm29 = [0,1,2,3,4,8,6,7]
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm6, %zmm29, %zmm10
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vinserti32x4 $2, 64(%r8), %zmm14, %zmm10
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm7, %zmm29, %zmm10
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vinserti32x4 $2, 128(%r8), %zmm16, %zmm10
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm2, %zmm29, %zmm10
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vinserti32x4 $2, 192(%r8), %zmm30, %zmm26
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm3, %zmm29, %zmm26
-; AVX512DQ-SLOW-NEXT:    vinserti32x4 $2, 256(%r8), %zmm31, %zmm24
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm4, %zmm29, %zmm24
-; AVX512DQ-SLOW-NEXT:    vinserti32x4 $2, 320(%r8), %zmm1, %zmm23
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm5, %zmm29, %zmm23
-; AVX512DQ-SLOW-NEXT:    vinserti32x4 $2, 384(%r8), %zmm0, %zmm19
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm8, %zmm29, %zmm19
-; AVX512DQ-SLOW-NEXT:    vinserti32x4 $2, 448(%r8), %zmm21, %zmm21
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm9, %zmm29, %zmm21
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm29 = [0,10,2,3,4,5,6,11]
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm6, %zmm29, %zmm11
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm11, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm30 = [0,1,2,3,4,12,6,7]
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm6, %zmm30, %zmm28
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm31 = [0,14,2,3,4,5,6,15]
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm6, %zmm31, %zmm13
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm13, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm16 # 64-byte Reload
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm7, %zmm29, %zmm16
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm15 # 64-byte Reload
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm7, %zmm30, %zmm15
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm14 # 64-byte Reload
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm7, %zmm31, %zmm14
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm13 # 64-byte Reload
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm2, %zmm29, %zmm13
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm12 # 64-byte Reload
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm2, %zmm30, %zmm12
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm11 # 64-byte Reload
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm2, %zmm31, %zmm11
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm10 # 64-byte Reload
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm3, %zmm29, %zmm10
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm7 # 64-byte Reload
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm3, %zmm30, %zmm7
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm3, %zmm31, %zmm2
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm4, %zmm29, %zmm1
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm4, %zmm30, %zmm3
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm4, %zmm31, %zmm0
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm5, %zmm29, %zmm27
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm5, %zmm30, %zmm4
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm5, %zmm31, %zmm25
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm8, %zmm29, %zmm22
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm8, %zmm30, %zmm5
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm8, %zmm31, %zmm20
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm9, %zmm29, %zmm18
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm6 # 64-byte Reload
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm9, %zmm30, %zmm6
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm9, %zmm31, %zmm17
-; AVX512DQ-SLOW-NEXT:    movq {{[0-9]+}}(%rsp), %rax
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm17, 3008(%rax)
-; AVX512DQ-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm8 # 64-byte Reload
-; AVX512DQ-SLOW-NEXT:    vmovaps %zmm8, 2944(%rax)
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm6, 2880(%rax)
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm18, 2816(%rax)
-; AVX512DQ-SLOW-NEXT:    vmovups (%rsp), %zmm6 # 64-byte Reload
-; AVX512DQ-SLOW-NEXT:    vmovaps %zmm6, 2752(%rax)
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm20, 2624(%rax)
-; AVX512DQ-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm6 # 64-byte Reload
-; AVX512DQ-SLOW-NEXT:    vmovaps %zmm6, 2560(%rax)
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm5, 2496(%rax)
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm22, 2432(%rax)
-; AVX512DQ-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
-; AVX512DQ-SLOW-NEXT:    vmovaps %zmm5, 2368(%rax)
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm25, 2240(%rax)
-; AVX512DQ-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
-; AVX512DQ-SLOW-NEXT:    vmovaps %zmm5, 2176(%rax)
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm4, 2112(%rax)
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm27, 2048(%rax)
-; AVX512DQ-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
-; AVX512DQ-SLOW-NEXT:    vmovaps %zmm4, 1984(%rax)
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm0, 1856(%rax)
-; AVX512DQ-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQ-SLOW-NEXT:    vmovaps %zmm0, 1792(%rax)
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm3, 1728(%rax)
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm1, 1664(%rax)
-; AVX512DQ-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
-; AVX512DQ-SLOW-NEXT:    vmovaps %zmm3, 1600(%rax)
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm2, 1472(%rax)
-; AVX512DQ-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQ-SLOW-NEXT:    vmovaps %zmm0, 1408(%rax)
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm7, 1344(%rax)
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm10, 1280(%rax)
-; AVX512DQ-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512DQ-SLOW-NEXT:    vmovaps %zmm2, 1216(%rax)
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm11, 1088(%rax)
-; AVX512DQ-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQ-SLOW-NEXT:    vmovaps %zmm0, 1024(%rax)
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm12, 960(%rax)
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm13, 896(%rax)
-; AVX512DQ-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512DQ-SLOW-NEXT:    vmovaps %zmm1, 832(%rax)
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm14, 704(%rax)
-; AVX512DQ-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512DQ-SLOW-NEXT:    vmovaps %zmm1, 640(%rax)
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm15, 576(%rax)
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm16, 512(%rax)
-; AVX512DQ-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQ-SLOW-NEXT:    vmovaps %zmm0, 448(%rax)
-; AVX512DQ-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQ-SLOW-NEXT:    vmovaps %zmm0, 320(%rax)
-; AVX512DQ-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQ-SLOW-NEXT:    vmovaps %zmm0, 256(%rax)
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm28, 192(%rax)
-; AVX512DQ-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQ-SLOW-NEXT:    vmovaps %zmm0, 128(%rax)
-; AVX512DQ-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQ-SLOW-NEXT:    vmovaps %zmm0, 64(%rax)
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm21, 2688(%rax)
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm19, 2304(%rax)
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm23, 1920(%rax)
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm24, 1536(%rax)
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm26, 1152(%rax)
-; AVX512DQ-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQ-SLOW-NEXT:    vmovaps %zmm0, 768(%rax)
-; AVX512DQ-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQ-SLOW-NEXT:    vmovaps %zmm0, 384(%rax)
-; AVX512DQ-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQ-SLOW-NEXT:    vmovaps %zmm0, (%rax)
-; AVX512DQ-SLOW-NEXT:    addq $3400, %rsp # imm = 0xD48
-; AVX512DQ-SLOW-NEXT:    vzeroupper
-; AVX512DQ-SLOW-NEXT:    retq
-;
-; AVX512DQ-FAST-LABEL: store_i64_stride6_vf64:
-; AVX512DQ-FAST:       # %bb.0:
-; AVX512DQ-FAST-NEXT:    subq $3400, %rsp # imm = 0xD48
-; AVX512DQ-FAST-NEXT:    vmovdqa64 (%rdx), %zmm13
-; AVX512DQ-FAST-NEXT:    vmovdqa64 64(%rdx), %zmm12
-; AVX512DQ-FAST-NEXT:    vmovdqa64 128(%rdx), %zmm11
-; AVX512DQ-FAST-NEXT:    vmovdqa64 192(%rdx), %zmm10
-; AVX512DQ-FAST-NEXT:    vmovdqa64 256(%rdx), %zmm9
-; AVX512DQ-FAST-NEXT:    vmovdqa64 320(%rdx), %zmm8
-; AVX512DQ-FAST-NEXT:    vmovdqa64 384(%rdx), %zmm7
-; AVX512DQ-FAST-NEXT:    vmovdqa64 448(%rdx), %zmm6
-; AVX512DQ-FAST-NEXT:    vmovdqa64 (%rcx), %zmm0
-; AVX512DQ-FAST-NEXT:    vmovdqa64 64(%rcx), %zmm1
-; AVX512DQ-FAST-NEXT:    vmovdqa64 128(%rcx), %zmm2
-; AVX512DQ-FAST-NEXT:    vmovdqa64 192(%rcx), %zmm30
-; AVX512DQ-FAST-NEXT:    vmovdqa64 256(%rcx), %zmm27
-; AVX512DQ-FAST-NEXT:    vmovdqa64 320(%rcx), %zmm24
-; AVX512DQ-FAST-NEXT:    vmovdqa64 384(%rcx), %zmm22
-; AVX512DQ-FAST-NEXT:    vmovdqa64 448(%rcx), %zmm21
-; AVX512DQ-FAST-NEXT:    vbroadcasti128 {{.*#+}} ymm3 = [4,12,4,12]
-; AVX512DQ-FAST-NEXT:    # ymm3 = mem[0,1,0,1]
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm6, %zmm4
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm21, %zmm3, %zmm4
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm7, %zmm4
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm22, %zmm3, %zmm4
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm8, %zmm4
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm24, %zmm3, %zmm4
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm9, %zmm4
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm27, %zmm3, %zmm4
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm10, %zmm4
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm30, %zmm3, %zmm4
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm11, %zmm4
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm2, %zmm3, %zmm4
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm12, %zmm4
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm1, %zmm3, %zmm4
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vpermi2q %zmm0, %zmm13, %zmm3
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm5 = [1,9,2,10,1,9,2,10]
-; AVX512DQ-FAST-NEXT:    # zmm5 = mem[0,1,2,3,0,1,2,3]
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm13, %zmm3
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm0, %zmm5, %zmm3
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm5, %zmm4
-; AVX512DQ-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm5 = [5,13,6,14,5,13,6,14]
-; AVX512DQ-FAST-NEXT:    # zmm5 = mem[0,1,2,3,0,1,2,3]
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm13, %zmm3
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm0, %zmm5, %zmm3
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm12, %zmm3
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm1, %zmm4, %zmm3
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} zmm14 = [3,11,3,11,3,11,3,11]
-; AVX512DQ-FAST-NEXT:    # zmm14 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm13, %zmm3
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm0, %zmm14, %zmm3
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} zmm3 = [7,15,7,15,7,15,7,15]
-; AVX512DQ-FAST-NEXT:    # zmm3 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm0, %zmm3, %zmm13
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm13, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm12, %zmm0
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm1, %zmm5, %zmm0
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm12, %zmm0
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm1, %zmm14, %zmm0
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm1, %zmm3, %zmm12
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm12, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm11, %zmm0
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm2, %zmm4, %zmm0
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm11, %zmm0
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm2, %zmm5, %zmm0
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm11, %zmm0
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm2, %zmm14, %zmm0
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm2, %zmm3, %zmm11
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm11, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm10, %zmm0
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm30, %zmm4, %zmm0
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm10, %zmm0
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm30, %zmm5, %zmm0
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm10, %zmm0
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm30, %zmm14, %zmm0
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm30, %zmm3, %zmm10
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm9, %zmm0
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm27, %zmm4, %zmm0
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm9, %zmm0
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm27, %zmm5, %zmm0
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm9, %zmm0
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm27, %zmm14, %zmm0
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm27, %zmm3, %zmm9
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm8, %zmm0
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm24, %zmm4, %zmm0
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm8, %zmm0
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm24, %zmm5, %zmm0
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm8, %zmm0
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm24, %zmm14, %zmm0
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm24, %zmm3, %zmm8
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm7, %zmm0
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm22, %zmm4, %zmm0
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm7, %zmm0
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm22, %zmm5, %zmm0
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm7, %zmm0
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm22, %zmm14, %zmm0
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm22, %zmm3, %zmm7
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vpermi2q %zmm21, %zmm6, %zmm4
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm4, (%rsp) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vpermi2q %zmm21, %zmm6, %zmm5
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vpermi2q %zmm21, %zmm6, %zmm14
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm14, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm21, %zmm3, %zmm6
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vmovdqa64 448(%rdi), %zmm4
-; AVX512DQ-FAST-NEXT:    vmovdqa64 448(%rsi), %zmm11
-; AVX512DQ-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm28 = [4,12,5,13,4,12,5,13]
-; AVX512DQ-FAST-NEXT:    # zmm28 = mem[0,1,2,3,0,1,2,3]
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm4, %zmm0
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm11, %zmm28, %zmm0
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm0, %zmm8
-; AVX512DQ-FAST-NEXT:    vmovdqa64 384(%rdi), %zmm2
-; AVX512DQ-FAST-NEXT:    vmovdqa64 384(%rsi), %zmm13
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm2, %zmm1
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm13, %zmm28, %zmm1
-; AVX512DQ-FAST-NEXT:    vmovdqa64 320(%rdi), %zmm3
-; AVX512DQ-FAST-NEXT:    vmovdqa64 320(%rsi), %zmm15
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm3, %zmm0
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm15, %zmm28, %zmm0
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vmovdqa64 256(%rdi), %zmm31
-; AVX512DQ-FAST-NEXT:    vmovdqa64 256(%rsi), %zmm17
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm31, %zmm0
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm17, %zmm28, %zmm0
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vmovdqa64 192(%rdi), %zmm29
-; AVX512DQ-FAST-NEXT:    vmovdqa64 192(%rsi), %zmm18
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm29, %zmm0
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm18, %zmm28, %zmm0
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vmovdqa64 128(%rdi), %zmm25
-; AVX512DQ-FAST-NEXT:    vmovdqa64 128(%rsi), %zmm19
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm25, %zmm0
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm19, %zmm28, %zmm0
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm0, %zmm5
-; AVX512DQ-FAST-NEXT:    vmovdqa64 64(%rdi), %zmm6
-; AVX512DQ-FAST-NEXT:    vmovdqa64 64(%rsi), %zmm20
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm6, %zmm0
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm20, %zmm28, %zmm0
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vmovdqa64 (%rdi), %zmm7
-; AVX512DQ-FAST-NEXT:    vmovdqa64 (%rsi), %zmm22
-; AVX512DQ-FAST-NEXT:    vpermi2q %zmm22, %zmm7, %zmm28
-; AVX512DQ-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} zmm12 = [2,10,2,10,2,10,2,10]
-; AVX512DQ-FAST-NEXT:    # zmm12 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm7, %zmm14
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm22, %zmm12, %zmm14
-; AVX512DQ-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} zmm10 = [6,14,6,14,6,14,6,14]
-; AVX512DQ-FAST-NEXT:    # zmm10 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm7, %zmm16
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm22, %zmm10, %zmm16
-; AVX512DQ-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm21 = [0,8,1,9,0,8,1,9]
-; AVX512DQ-FAST-NEXT:    # zmm21 = mem[0,1,2,3,0,1,2,3]
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm7, %zmm0
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm22, %zmm21, %zmm0
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vbroadcasti64x2 {{.*#+}} ymm23 = [7,15,7,15]
-; AVX512DQ-FAST-NEXT:    # ymm23 = mem[0,1,0,1]
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm22, %zmm23, %zmm7
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm6, %zmm22
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm20, %zmm12, %zmm22
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm6, %zmm24
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm20, %zmm10, %zmm24
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm6, %zmm0
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm20, %zmm21, %zmm0
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm20, %zmm23, %zmm6
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm25, %zmm20
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm19, %zmm12, %zmm20
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm25, %zmm26
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm19, %zmm10, %zmm26
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm25, %zmm0
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm19, %zmm21, %zmm0
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm19, %zmm23, %zmm25
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm29, %zmm19
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm18, %zmm12, %zmm19
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm29, %zmm27
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm18, %zmm10, %zmm27
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm29, %zmm0
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm18, %zmm21, %zmm0
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm18, %zmm23, %zmm29
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm31, %zmm18
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm17, %zmm12, %zmm18
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm31, %zmm9
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm17, %zmm10, %zmm9
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm31, %zmm0
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm17, %zmm21, %zmm0
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm17, %zmm23, %zmm31
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm3, %zmm17
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm15, %zmm12, %zmm17
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm3, %zmm30
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm15, %zmm10, %zmm30
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm3, %zmm0
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm15, %zmm21, %zmm0
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm15, %zmm23, %zmm3
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm2, %zmm15
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm2, %zmm3
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm13, %zmm12, %zmm3
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm13, %zmm10, %zmm2
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm15, %zmm0
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm13, %zmm21, %zmm0
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm13, %zmm23, %zmm15
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm15, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vpermi2q %zmm11, %zmm4, %zmm12
-; AVX512DQ-FAST-NEXT:    vpermi2q %zmm11, %zmm4, %zmm10
-; AVX512DQ-FAST-NEXT:    vpermi2q %zmm11, %zmm4, %zmm21
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm11, %zmm23, %zmm4
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    movb $12, %al
-; AVX512DQ-FAST-NEXT:    kmovw %eax, %k1
-; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm11 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm11, %zmm8 {%k1}
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm11 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm11, %zmm1 {%k1}
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm11 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm8 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm11, %zmm8 {%k1}
-; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm11 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm11, %zmm0 {%k1}
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm11 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm11, %zmm4 {%k1}
-; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm11 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm11, %zmm5 {%k1}
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm11 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm11, %zmm1 {%k1}
-; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm11 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm11, %zmm28 {%k1}
-; AVX512DQ-FAST-NEXT:    movb $48, %al
-; AVX512DQ-FAST-NEXT:    kmovw %eax, %k2
-; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm11 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm14, %zmm11 {%k2}
-; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm23 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm16, %zmm23 {%k2}
-; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm13 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm22, %zmm13 {%k2}
-; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm22 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm24, %zmm22 {%k2}
-; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm14 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm20, %zmm14 {%k2}
-; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm20 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm26, %zmm20 {%k2}
-; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm16 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm19, %zmm16 {%k2}
-; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm19 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm27, %zmm19 {%k2}
-; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm24 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm18, %zmm24 {%k2}
-; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm18 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm9, %zmm18 {%k2}
-; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm26 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm17, %zmm26 {%k2}
-; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm17 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm30, %zmm17 {%k2}
-; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm27 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm3, %zmm27 {%k2}
-; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm2, %zmm0 {%k2}
-; AVX512DQ-FAST-NEXT:    vmovdqu64 (%rsp), %zmm15 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm12, %zmm15 {%k2}
-; AVX512DQ-FAST-NEXT:    vmovdqa64 (%r8), %zmm5
-; AVX512DQ-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm2 = <0,1,9,u,4,5,6,7>
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm5, %zmm2, %zmm11
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm11, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vmovdqa64 64(%r8), %zmm3
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm3, %zmm2, %zmm13
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm13, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vmovdqa64 128(%r8), %zmm9
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm9, %zmm2, %zmm14
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm14, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vmovdqa64 192(%r8), %zmm11
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm11, %zmm2, %zmm16
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm16, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vmovdqa64 256(%r8), %zmm13
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm13, %zmm2, %zmm24
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm24, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vmovdqa64 320(%r8), %zmm14
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm14, %zmm2, %zmm26
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm26, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vmovdqa64 384(%r8), %zmm16
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm16, %zmm2, %zmm27
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm27, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vmovdqa64 448(%r8), %zmm12
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm12, %zmm2, %zmm15
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm15, (%rsp) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm15 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm10, %zmm15 {%k2}
-; AVX512DQ-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm2 = <0,1,13,u,4,5,6,7>
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm5, %zmm2, %zmm23
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm3, %zmm2, %zmm22
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm22, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm9, %zmm2, %zmm20
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm20, %zmm24
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm11, %zmm2, %zmm19
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm13, %zmm2, %zmm18
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm18, %zmm26
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm14, %zmm2, %zmm17
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm17, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm16, %zmm2, %zmm0
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm12, %zmm2, %zmm15
-; AVX512DQ-FAST-NEXT:    vmovdqa (%rdi), %ymm2
-; AVX512DQ-FAST-NEXT:    vpunpckhqdq {{.*#+}} ymm2 = ymm2[1],mem[1],ymm2[3],mem[3]
-; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vinserti64x4 $0, %ymm2, %zmm0, %zmm10
-; AVX512DQ-FAST-NEXT:    vshufi64x2 $228, {{[-0-9]+}}(%r{{[sb]}}p), %zmm7, %zmm7 # 64-byte Folded Reload
-; AVX512DQ-FAST-NEXT:    # zmm7 = zmm7[0,1,2,3],mem[4,5,6,7]
-; AVX512DQ-FAST-NEXT:    vmovdqa 64(%rdi), %ymm2
-; AVX512DQ-FAST-NEXT:    vpunpckhqdq {{.*#+}} ymm2 = ymm2[1],mem[1],ymm2[3],mem[3]
-; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vinserti64x4 $0, %ymm2, %zmm0, %zmm0
-; AVX512DQ-FAST-NEXT:    vshufi64x2 $228, {{[-0-9]+}}(%r{{[sb]}}p), %zmm6, %zmm6 # 64-byte Folded Reload
-; AVX512DQ-FAST-NEXT:    # zmm6 = zmm6[0,1,2,3],mem[4,5,6,7]
-; AVX512DQ-FAST-NEXT:    movb $16, %al
-; AVX512DQ-FAST-NEXT:    kmovw %eax, %k2
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm3, %zmm1 {%k2}
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm1 = <10,u,2,3,4,5,11,u>
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm3, %zmm1, %zmm0
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm2 = <14,u,2,3,4,5,15,u>
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm3, %zmm2, %zmm6
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vmovdqa 128(%rdi), %ymm3
-; AVX512DQ-FAST-NEXT:    vpunpckhqdq {{.*#+}} ymm3 = ymm3[1],mem[1],ymm3[3],mem[3]
-; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vinserti64x4 $0, %ymm3, %zmm0, %zmm3
-; AVX512DQ-FAST-NEXT:    vshufi64x2 $228, {{[-0-9]+}}(%r{{[sb]}}p), %zmm25, %zmm6 # 64-byte Folded Reload
-; AVX512DQ-FAST-NEXT:    # zmm6 = zmm25[0,1,2,3],mem[4,5,6,7]
-; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm9, %zmm0 {%k2}
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm9, %zmm1, %zmm3
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm9, %zmm2, %zmm6
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vmovdqa 192(%rdi), %ymm3
-; AVX512DQ-FAST-NEXT:    vpunpckhqdq {{.*#+}} ymm3 = ymm3[1],mem[1],ymm3[3],mem[3]
-; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vinserti64x4 $0, %ymm3, %zmm0, %zmm0
-; AVX512DQ-FAST-NEXT:    vshufi64x2 $228, {{[-0-9]+}}(%r{{[sb]}}p), %zmm29, %zmm3 # 64-byte Folded Reload
-; AVX512DQ-FAST-NEXT:    # zmm3 = zmm29[0,1,2,3],mem[4,5,6,7]
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm11, %zmm4 {%k2}
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm11, %zmm1, %zmm0
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm11, %zmm2, %zmm3
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vmovdqa 256(%rdi), %ymm0
-; AVX512DQ-FAST-NEXT:    vpunpckhqdq {{.*#+}} ymm0 = ymm0[1],mem[1],ymm0[3],mem[3]
-; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vinserti64x4 $0, %ymm0, %zmm3, %zmm3
-; AVX512DQ-FAST-NEXT:    vshufi64x2 $228, {{[-0-9]+}}(%r{{[sb]}}p), %zmm31, %zmm4 # 64-byte Folded Reload
-; AVX512DQ-FAST-NEXT:    # zmm4 = zmm31[0,1,2,3],mem[4,5,6,7]
-; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm13, %zmm0 {%k2}
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm13, %zmm1, %zmm3
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm13, %zmm2, %zmm4
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vmovdqa 320(%rdi), %ymm0
-; AVX512DQ-FAST-NEXT:    vpunpckhqdq {{.*#+}} ymm0 = ymm0[1],mem[1],ymm0[3],mem[3]
-; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vinserti64x4 $0, %ymm0, %zmm3, %zmm27
-; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vshufi64x2 $228, {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm25 # 64-byte Folded Reload
-; AVX512DQ-FAST-NEXT:    # zmm25 = zmm0[0,1,2,3],mem[4,5,6,7]
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm14, %zmm8 {%k2}
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm14, %zmm1, %zmm27
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm14, %zmm2, %zmm25
-; AVX512DQ-FAST-NEXT:    vmovdqa 384(%rdi), %ymm0
-; AVX512DQ-FAST-NEXT:    vpunpckhqdq {{.*#+}} ymm0 = ymm0[1],mem[1],ymm0[3],mem[3]
-; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vinserti64x4 $0, %ymm0, %zmm3, %zmm22
-; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vshufi64x2 $228, {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm20 # 64-byte Folded Reload
-; AVX512DQ-FAST-NEXT:    # zmm20 = zmm0[0,1,2,3],mem[4,5,6,7]
-; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm16, %zmm0 {%k2}
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm16, %zmm1, %zmm22
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm16, %zmm2, %zmm20
-; AVX512DQ-FAST-NEXT:    vmovdqa 448(%rdi), %ymm0
-; AVX512DQ-FAST-NEXT:    vpunpckhqdq {{.*#+}} ymm0 = ymm0[1],mem[1],ymm0[3],mem[3]
-; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vinserti64x4 $0, %ymm0, %zmm3, %zmm18
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm5, %zmm1, %zmm10
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm10, %zmm11
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm12, %zmm1, %zmm18
-; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vshufi64x2 $228, {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm17 # 64-byte Folded Reload
-; AVX512DQ-FAST-NEXT:    # zmm17 = zmm0[0,1,2,3],mem[4,5,6,7]
-; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm12, %zmm0 {%k2}
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm12, %zmm2, %zmm17
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm5, %zmm2, %zmm7
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm7, %zmm13
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm5, %zmm28 {%k2}
-; AVX512DQ-FAST-NEXT:    vmovdqa64 (%r9), %zmm6
-; AVX512DQ-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm10 = [0,1,2,9,4,5,6,7]
-; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm6, %zmm10, %zmm1
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vmovdqa64 64(%r9), %zmm7
-; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm7, %zmm10, %zmm2
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vmovdqa64 128(%r9), %zmm2
-; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm2, %zmm10, %zmm3
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vmovdqa64 192(%r9), %zmm3
-; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm3, %zmm10, %zmm4
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vmovdqa64 256(%r9), %zmm4
-; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm4, %zmm10, %zmm5
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vmovdqa64 320(%r9), %zmm5
-; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm5, %zmm10, %zmm0
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vmovdqa64 384(%r9), %zmm8
-; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm8, %zmm10, %zmm0
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vmovdqa64 448(%r9), %zmm9
-; AVX512DQ-FAST-NEXT:    vmovdqu64 (%rsp), %zmm0 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm9, %zmm10, %zmm0
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm0, (%rsp) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm10 = [0,1,2,13,4,5,6,7]
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm6, %zmm10, %zmm23
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm23, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm12 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm7, %zmm10, %zmm12
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm12, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm2, %zmm10, %zmm24
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm24, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm3, %zmm10, %zmm19
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm19, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm4, %zmm10, %zmm26
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm26, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm5, %zmm10, %zmm0
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm8, %zmm10, %zmm0
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm9, %zmm10, %zmm15
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm15, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vmovdqa (%rdx), %xmm10
-; AVX512DQ-FAST-NEXT:    vpunpcklqdq {{.*#+}} xmm10 = xmm10[0],mem[0]
-; AVX512DQ-FAST-NEXT:    vinserti128 $1, %xmm10, %ymm0, %ymm10
-; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm12 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vinserti64x4 $0, %ymm10, %zmm0, %zmm12 {%k1}
-; AVX512DQ-FAST-NEXT:    vmovdqa 64(%rdx), %xmm10
-; AVX512DQ-FAST-NEXT:    vpunpcklqdq {{.*#+}} xmm10 = xmm10[0],mem[0]
-; AVX512DQ-FAST-NEXT:    vinserti128 $1, %xmm10, %ymm0, %ymm10
-; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm14 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vinserti64x4 $0, %ymm10, %zmm0, %zmm14 {%k1}
-; AVX512DQ-FAST-NEXT:    vmovdqa 128(%rdx), %xmm10
-; AVX512DQ-FAST-NEXT:    vpunpcklqdq {{.*#+}} xmm10 = xmm10[0],mem[0]
-; AVX512DQ-FAST-NEXT:    vinserti128 $1, %xmm10, %ymm0, %ymm10
-; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm16 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vinserti64x4 $0, %ymm10, %zmm0, %zmm16 {%k1}
-; AVX512DQ-FAST-NEXT:    vmovdqa 192(%rdx), %xmm10
-; AVX512DQ-FAST-NEXT:    vpunpcklqdq {{.*#+}} xmm10 = xmm10[0],mem[0]
-; AVX512DQ-FAST-NEXT:    vinserti128 $1, %xmm10, %ymm0, %ymm10
-; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm30 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vinserti64x4 $0, %ymm10, %zmm0, %zmm30 {%k1}
-; AVX512DQ-FAST-NEXT:    vmovdqa 256(%rdx), %xmm10
-; AVX512DQ-FAST-NEXT:    vpunpcklqdq {{.*#+}} xmm10 = xmm10[0],mem[0]
-; AVX512DQ-FAST-NEXT:    vinserti128 $1, %xmm10, %ymm0, %ymm10
-; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm31 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vinserti64x4 $0, %ymm10, %zmm0, %zmm31 {%k1}
-; AVX512DQ-FAST-NEXT:    vmovdqa 320(%rdx), %xmm10
-; AVX512DQ-FAST-NEXT:    vpunpcklqdq {{.*#+}} xmm10 = xmm10[0],mem[0]
-; AVX512DQ-FAST-NEXT:    vinserti128 $1, %xmm10, %ymm0, %ymm10
-; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vinserti64x4 $0, %ymm10, %zmm0, %zmm1 {%k1}
-; AVX512DQ-FAST-NEXT:    vmovdqa 384(%rdx), %xmm10
-; AVX512DQ-FAST-NEXT:    vpunpcklqdq {{.*#+}} xmm10 = xmm10[0],mem[0]
-; AVX512DQ-FAST-NEXT:    vinserti128 $1, %xmm10, %ymm0, %ymm10
-; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vinserti64x4 $0, %ymm10, %zmm0, %zmm0 {%k1}
-; AVX512DQ-FAST-NEXT:    vmovdqa 448(%rdx), %xmm10
-; AVX512DQ-FAST-NEXT:    vpunpcklqdq {{.*#+}} xmm10 = xmm10[0],mem[0]
-; AVX512DQ-FAST-NEXT:    vinserti128 $1, %xmm10, %ymm0, %ymm10
-; AVX512DQ-FAST-NEXT:    vinserti64x4 $0, %ymm10, %zmm0, %zmm21 {%k1}
-; AVX512DQ-FAST-NEXT:    vinserti32x4 $2, (%r8), %zmm12, %zmm10
-; AVX512DQ-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm29 = [0,1,2,3,4,8,6,7]
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm6, %zmm29, %zmm10
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vinserti32x4 $2, 64(%r8), %zmm14, %zmm10
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm7, %zmm29, %zmm10
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vinserti32x4 $2, 128(%r8), %zmm16, %zmm10
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm2, %zmm29, %zmm10
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vinserti32x4 $2, 192(%r8), %zmm30, %zmm26
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm3, %zmm29, %zmm26
-; AVX512DQ-FAST-NEXT:    vinserti32x4 $2, 256(%r8), %zmm31, %zmm24
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm4, %zmm29, %zmm24
-; AVX512DQ-FAST-NEXT:    vinserti32x4 $2, 320(%r8), %zmm1, %zmm23
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm5, %zmm29, %zmm23
-; AVX512DQ-FAST-NEXT:    vinserti32x4 $2, 384(%r8), %zmm0, %zmm19
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm8, %zmm29, %zmm19
-; AVX512DQ-FAST-NEXT:    vinserti32x4 $2, 448(%r8), %zmm21, %zmm21
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm9, %zmm29, %zmm21
-; AVX512DQ-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm29 = [0,10,2,3,4,5,6,11]
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm6, %zmm29, %zmm11
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm11, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm30 = [0,1,2,3,4,12,6,7]
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm6, %zmm30, %zmm28
-; AVX512DQ-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm31 = [0,14,2,3,4,5,6,15]
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm6, %zmm31, %zmm13
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm13, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm16 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm7, %zmm29, %zmm16
-; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm15 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm7, %zmm30, %zmm15
-; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm14 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm7, %zmm31, %zmm14
-; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm13 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm2, %zmm29, %zmm13
-; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm12 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm2, %zmm30, %zmm12
-; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm11 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm2, %zmm31, %zmm11
-; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm10 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm3, %zmm29, %zmm10
-; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm7 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm3, %zmm30, %zmm7
-; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm3, %zmm31, %zmm2
-; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm4, %zmm29, %zmm1
-; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm4, %zmm30, %zmm3
-; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm4, %zmm31, %zmm0
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm5, %zmm29, %zmm27
-; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm5, %zmm30, %zmm4
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm5, %zmm31, %zmm25
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm8, %zmm29, %zmm22
-; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm8, %zmm30, %zmm5
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm8, %zmm31, %zmm20
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm9, %zmm29, %zmm18
-; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm6 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm9, %zmm30, %zmm6
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm9, %zmm31, %zmm17
-; AVX512DQ-FAST-NEXT:    movq {{[0-9]+}}(%rsp), %rax
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm17, 3008(%rax)
-; AVX512DQ-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm8 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vmovaps %zmm8, 2944(%rax)
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm6, 2880(%rax)
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm18, 2816(%rax)
-; AVX512DQ-FAST-NEXT:    vmovups (%rsp), %zmm6 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vmovaps %zmm6, 2752(%rax)
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm20, 2624(%rax)
-; AVX512DQ-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm6 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vmovaps %zmm6, 2560(%rax)
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm5, 2496(%rax)
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm22, 2432(%rax)
-; AVX512DQ-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vmovaps %zmm5, 2368(%rax)
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm25, 2240(%rax)
-; AVX512DQ-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vmovaps %zmm5, 2176(%rax)
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm4, 2112(%rax)
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm27, 2048(%rax)
-; AVX512DQ-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vmovaps %zmm4, 1984(%rax)
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm0, 1856(%rax)
-; AVX512DQ-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vmovaps %zmm0, 1792(%rax)
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm3, 1728(%rax)
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm1, 1664(%rax)
-; AVX512DQ-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vmovaps %zmm3, 1600(%rax)
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm2, 1472(%rax)
-; AVX512DQ-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vmovaps %zmm0, 1408(%rax)
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm7, 1344(%rax)
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm10, 1280(%rax)
-; AVX512DQ-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vmovaps %zmm2, 1216(%rax)
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm11, 1088(%rax)
-; AVX512DQ-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vmovaps %zmm0, 1024(%rax)
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm12, 960(%rax)
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm13, 896(%rax)
-; AVX512DQ-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vmovaps %zmm1, 832(%rax)
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm14, 704(%rax)
-; AVX512DQ-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vmovaps %zmm1, 640(%rax)
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm15, 576(%rax)
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm16, 512(%rax)
-; AVX512DQ-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vmovaps %zmm0, 448(%rax)
-; AVX512DQ-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vmovaps %zmm0, 320(%rax)
-; AVX512DQ-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vmovaps %zmm0, 256(%rax)
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm28, 192(%rax)
-; AVX512DQ-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vmovaps %zmm0, 128(%rax)
-; AVX512DQ-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vmovaps %zmm0, 64(%rax)
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm21, 2688(%rax)
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm19, 2304(%rax)
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm23, 1920(%rax)
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm24, 1536(%rax)
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm26, 1152(%rax)
-; AVX512DQ-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vmovaps %zmm0, 768(%rax)
-; AVX512DQ-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vmovaps %zmm0, 384(%rax)
-; AVX512DQ-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vmovaps %zmm0, (%rax)
-; AVX512DQ-FAST-NEXT:    addq $3400, %rsp # imm = 0xD48
-; AVX512DQ-FAST-NEXT:    vzeroupper
-; AVX512DQ-FAST-NEXT:    retq
-;
-; AVX512BW-ONLY-SLOW-LABEL: store_i64_stride6_vf64:
-; AVX512BW-ONLY-SLOW:       # %bb.0:
-; AVX512BW-ONLY-SLOW-NEXT:    subq $3400, %rsp # imm = 0xD48
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 (%rdx), %zmm13
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 64(%rdx), %zmm12
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 128(%rdx), %zmm11
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 192(%rdx), %zmm10
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 256(%rdx), %zmm9
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 320(%rdx), %zmm8
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 384(%rdx), %zmm7
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 448(%rdx), %zmm6
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 (%rcx), %zmm0
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 64(%rcx), %zmm1
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 128(%rcx), %zmm2
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 192(%rcx), %zmm30
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 256(%rcx), %zmm27
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 320(%rcx), %zmm24
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 384(%rcx), %zmm22
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 448(%rcx), %zmm21
-; AVX512BW-ONLY-SLOW-NEXT:    vbroadcasti128 {{.*#+}} ymm3 = [4,12,4,12]
-; AVX512BW-ONLY-SLOW-NEXT:    # ymm3 = mem[0,1,0,1]
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm6, %zmm4
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm21, %zmm3, %zmm4
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm7, %zmm4
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm22, %zmm3, %zmm4
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm8, %zmm4
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm24, %zmm3, %zmm4
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm9, %zmm4
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm27, %zmm3, %zmm4
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm10, %zmm4
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm30, %zmm3, %zmm4
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm11, %zmm4
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm2, %zmm3, %zmm4
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm12, %zmm4
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm1, %zmm3, %zmm4
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vpermi2q %zmm0, %zmm13, %zmm3
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm5 = [1,9,2,10,1,9,2,10]
-; AVX512BW-ONLY-SLOW-NEXT:    # zmm5 = mem[0,1,2,3,0,1,2,3]
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm13, %zmm3
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm0, %zmm5, %zmm3
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm5, %zmm4
-; AVX512BW-ONLY-SLOW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm5 = [5,13,6,14,5,13,6,14]
-; AVX512BW-ONLY-SLOW-NEXT:    # zmm5 = mem[0,1,2,3,0,1,2,3]
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm13, %zmm3
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm0, %zmm5, %zmm3
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm12, %zmm3
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm1, %zmm4, %zmm3
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm14 = [3,11,3,11,3,11,3,11]
-; AVX512BW-ONLY-SLOW-NEXT:    # zmm14 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm13, %zmm3
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm0, %zmm14, %zmm3
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm3 = [7,15,7,15,7,15,7,15]
-; AVX512BW-ONLY-SLOW-NEXT:    # zmm3 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm0, %zmm3, %zmm13
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm13, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm12, %zmm0
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm1, %zmm5, %zmm0
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm12, %zmm0
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm1, %zmm14, %zmm0
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm1, %zmm3, %zmm12
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm12, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm11, %zmm0
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm2, %zmm4, %zmm0
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm11, %zmm0
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm2, %zmm5, %zmm0
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm11, %zmm0
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm2, %zmm14, %zmm0
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm2, %zmm3, %zmm11
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm11, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm10, %zmm0
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm30, %zmm4, %zmm0
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm10, %zmm0
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm30, %zmm5, %zmm0
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm10, %zmm0
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm30, %zmm14, %zmm0
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm30, %zmm3, %zmm10
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm9, %zmm0
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm27, %zmm4, %zmm0
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm9, %zmm0
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm27, %zmm5, %zmm0
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm9, %zmm0
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm27, %zmm14, %zmm0
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm27, %zmm3, %zmm9
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm8, %zmm0
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm24, %zmm4, %zmm0
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm8, %zmm0
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm24, %zmm5, %zmm0
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm8, %zmm0
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm24, %zmm14, %zmm0
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm24, %zmm3, %zmm8
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm7, %zmm0
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm22, %zmm4, %zmm0
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm7, %zmm0
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm22, %zmm5, %zmm0
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm7, %zmm0
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm22, %zmm14, %zmm0
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm22, %zmm3, %zmm7
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vpermi2q %zmm21, %zmm6, %zmm4
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm4, (%rsp) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vpermi2q %zmm21, %zmm6, %zmm5
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vpermi2q %zmm21, %zmm6, %zmm14
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm14, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm21, %zmm3, %zmm6
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 448(%rdi), %zmm4
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 448(%rsi), %zmm11
-; AVX512BW-ONLY-SLOW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm28 = [4,12,5,13,4,12,5,13]
-; AVX512BW-ONLY-SLOW-NEXT:    # zmm28 = mem[0,1,2,3,0,1,2,3]
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm4, %zmm0
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm11, %zmm28, %zmm0
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm0, %zmm8
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 384(%rdi), %zmm2
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 384(%rsi), %zmm13
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm2, %zmm1
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm13, %zmm28, %zmm1
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 320(%rdi), %zmm3
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 320(%rsi), %zmm15
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm3, %zmm0
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm15, %zmm28, %zmm0
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 256(%rdi), %zmm31
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 256(%rsi), %zmm17
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm31, %zmm0
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm17, %zmm28, %zmm0
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 192(%rdi), %zmm29
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 192(%rsi), %zmm18
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm29, %zmm0
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm18, %zmm28, %zmm0
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 128(%rdi), %zmm25
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 128(%rsi), %zmm19
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm25, %zmm0
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm19, %zmm28, %zmm0
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm0, %zmm5
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 64(%rdi), %zmm6
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 64(%rsi), %zmm20
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm6, %zmm0
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm20, %zmm28, %zmm0
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 (%rdi), %zmm7
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 (%rsi), %zmm22
-; AVX512BW-ONLY-SLOW-NEXT:    vpermi2q %zmm22, %zmm7, %zmm28
-; AVX512BW-ONLY-SLOW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm12 = [2,10,2,10,2,10,2,10]
-; AVX512BW-ONLY-SLOW-NEXT:    # zmm12 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm7, %zmm14
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm22, %zmm12, %zmm14
-; AVX512BW-ONLY-SLOW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm10 = [6,14,6,14,6,14,6,14]
-; AVX512BW-ONLY-SLOW-NEXT:    # zmm10 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm7, %zmm16
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm22, %zmm10, %zmm16
-; AVX512BW-ONLY-SLOW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm21 = [0,8,1,9,0,8,1,9]
-; AVX512BW-ONLY-SLOW-NEXT:    # zmm21 = mem[0,1,2,3,0,1,2,3]
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm7, %zmm0
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm22, %zmm21, %zmm0
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vbroadcasti32x4 {{.*#+}} ymm23 = [7,15,7,15]
-; AVX512BW-ONLY-SLOW-NEXT:    # ymm23 = mem[0,1,2,3,0,1,2,3]
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm22, %zmm23, %zmm7
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm6, %zmm22
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm20, %zmm12, %zmm22
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm6, %zmm24
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm20, %zmm10, %zmm24
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm6, %zmm0
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm20, %zmm21, %zmm0
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm20, %zmm23, %zmm6
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm25, %zmm20
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm19, %zmm12, %zmm20
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm25, %zmm26
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm19, %zmm10, %zmm26
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm25, %zmm0
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm19, %zmm21, %zmm0
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm19, %zmm23, %zmm25
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm29, %zmm19
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm18, %zmm12, %zmm19
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm29, %zmm27
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm18, %zmm10, %zmm27
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm29, %zmm0
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm18, %zmm21, %zmm0
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm18, %zmm23, %zmm29
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm31, %zmm18
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm17, %zmm12, %zmm18
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm31, %zmm9
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm17, %zmm10, %zmm9
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm31, %zmm0
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm17, %zmm21, %zmm0
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm17, %zmm23, %zmm31
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm3, %zmm17
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm15, %zmm12, %zmm17
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm3, %zmm30
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm15, %zmm10, %zmm30
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm3, %zmm0
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm15, %zmm21, %zmm0
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm15, %zmm23, %zmm3
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm2, %zmm15
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm2, %zmm3
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm13, %zmm12, %zmm3
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm13, %zmm10, %zmm2
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm15, %zmm0
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm13, %zmm21, %zmm0
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm13, %zmm23, %zmm15
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm15, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vpermi2q %zmm11, %zmm4, %zmm12
-; AVX512BW-ONLY-SLOW-NEXT:    vpermi2q %zmm11, %zmm4, %zmm10
-; AVX512BW-ONLY-SLOW-NEXT:    vpermi2q %zmm11, %zmm4, %zmm21
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm11, %zmm23, %zmm4
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    movb $12, %al
-; AVX512BW-ONLY-SLOW-NEXT:    kmovd %eax, %k1
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm11 # 64-byte Reload
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm11, %zmm8 {%k1}
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm11 # 64-byte Reload
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm11, %zmm1 {%k1}
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm11 # 64-byte Reload
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm8 # 64-byte Reload
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm11, %zmm8 {%k1}
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm11 # 64-byte Reload
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm11, %zmm0 {%k1}
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm11 # 64-byte Reload
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm11, %zmm4 {%k1}
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm11 # 64-byte Reload
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm11, %zmm5 {%k1}
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm11 # 64-byte Reload
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm11, %zmm1 {%k1}
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm11 # 64-byte Reload
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm11, %zmm28 {%k1}
-; AVX512BW-ONLY-SLOW-NEXT:    movb $48, %al
-; AVX512BW-ONLY-SLOW-NEXT:    kmovd %eax, %k2
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm11 # 64-byte Reload
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm14, %zmm11 {%k2}
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm23 # 64-byte Reload
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm16, %zmm23 {%k2}
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm13 # 64-byte Reload
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm22, %zmm13 {%k2}
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm22 # 64-byte Reload
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm24, %zmm22 {%k2}
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm14 # 64-byte Reload
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm20, %zmm14 {%k2}
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm20 # 64-byte Reload
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm26, %zmm20 {%k2}
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm16 # 64-byte Reload
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm19, %zmm16 {%k2}
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm19 # 64-byte Reload
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm27, %zmm19 {%k2}
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm24 # 64-byte Reload
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm18, %zmm24 {%k2}
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm18 # 64-byte Reload
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm9, %zmm18 {%k2}
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm26 # 64-byte Reload
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm17, %zmm26 {%k2}
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm17 # 64-byte Reload
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm30, %zmm17 {%k2}
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm27 # 64-byte Reload
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm3, %zmm27 {%k2}
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm2, %zmm0 {%k2}
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 (%rsp), %zmm15 # 64-byte Reload
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm12, %zmm15 {%k2}
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 (%r8), %zmm5
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm2 = <0,1,9,u,4,5,6,7>
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm5, %zmm2, %zmm11
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm11, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 64(%r8), %zmm3
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm3, %zmm2, %zmm13
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm13, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 128(%r8), %zmm9
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm9, %zmm2, %zmm14
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm14, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 192(%r8), %zmm11
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm11, %zmm2, %zmm16
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm16, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 256(%r8), %zmm13
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm13, %zmm2, %zmm24
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm24, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 320(%r8), %zmm14
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm14, %zmm2, %zmm26
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm26, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 384(%r8), %zmm16
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm16, %zmm2, %zmm27
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm27, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 448(%r8), %zmm12
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm12, %zmm2, %zmm15
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm15, (%rsp) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm15 # 64-byte Reload
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm10, %zmm15 {%k2}
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm2 = <0,1,13,u,4,5,6,7>
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm5, %zmm2, %zmm23
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm3, %zmm2, %zmm22
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm22, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm9, %zmm2, %zmm20
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm20, %zmm24
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm11, %zmm2, %zmm19
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm13, %zmm2, %zmm18
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm18, %zmm26
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm14, %zmm2, %zmm17
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm17, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm16, %zmm2, %zmm0
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm12, %zmm2, %zmm15
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa (%rdi), %ymm2
-; AVX512BW-ONLY-SLOW-NEXT:    vpunpckhqdq {{.*#+}} ymm2 = ymm2[1],mem[1],ymm2[3],mem[3]
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512BW-ONLY-SLOW-NEXT:    vinserti64x4 $0, %ymm2, %zmm0, %zmm10
-; AVX512BW-ONLY-SLOW-NEXT:    vshufi64x2 $228, {{[-0-9]+}}(%r{{[sb]}}p), %zmm7, %zmm7 # 64-byte Folded Reload
-; AVX512BW-ONLY-SLOW-NEXT:    # zmm7 = zmm7[0,1,2,3],mem[4,5,6,7]
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa 64(%rdi), %ymm2
-; AVX512BW-ONLY-SLOW-NEXT:    vpunpckhqdq {{.*#+}} ymm2 = ymm2[1],mem[1],ymm2[3],mem[3]
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512BW-ONLY-SLOW-NEXT:    vinserti64x4 $0, %ymm2, %zmm0, %zmm0
-; AVX512BW-ONLY-SLOW-NEXT:    vshufi64x2 $228, {{[-0-9]+}}(%r{{[sb]}}p), %zmm6, %zmm6 # 64-byte Folded Reload
-; AVX512BW-ONLY-SLOW-NEXT:    # zmm6 = zmm6[0,1,2,3],mem[4,5,6,7]
-; AVX512BW-ONLY-SLOW-NEXT:    movb $16, %al
-; AVX512BW-ONLY-SLOW-NEXT:    kmovd %eax, %k2
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm3, %zmm1 {%k2}
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm1 = <10,u,2,3,4,5,11,u>
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm3, %zmm1, %zmm0
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm2 = <14,u,2,3,4,5,15,u>
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm3, %zmm2, %zmm6
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa 128(%rdi), %ymm3
-; AVX512BW-ONLY-SLOW-NEXT:    vpunpckhqdq {{.*#+}} ymm3 = ymm3[1],mem[1],ymm3[3],mem[3]
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512BW-ONLY-SLOW-NEXT:    vinserti64x4 $0, %ymm3, %zmm0, %zmm3
-; AVX512BW-ONLY-SLOW-NEXT:    vshufi64x2 $228, {{[-0-9]+}}(%r{{[sb]}}p), %zmm25, %zmm6 # 64-byte Folded Reload
-; AVX512BW-ONLY-SLOW-NEXT:    # zmm6 = zmm25[0,1,2,3],mem[4,5,6,7]
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm9, %zmm0 {%k2}
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm9, %zmm1, %zmm3
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm9, %zmm2, %zmm6
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa 192(%rdi), %ymm3
-; AVX512BW-ONLY-SLOW-NEXT:    vpunpckhqdq {{.*#+}} ymm3 = ymm3[1],mem[1],ymm3[3],mem[3]
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512BW-ONLY-SLOW-NEXT:    vinserti64x4 $0, %ymm3, %zmm0, %zmm0
-; AVX512BW-ONLY-SLOW-NEXT:    vshufi64x2 $228, {{[-0-9]+}}(%r{{[sb]}}p), %zmm29, %zmm3 # 64-byte Folded Reload
-; AVX512BW-ONLY-SLOW-NEXT:    # zmm3 = zmm29[0,1,2,3],mem[4,5,6,7]
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm11, %zmm4 {%k2}
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm11, %zmm1, %zmm0
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm11, %zmm2, %zmm3
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa 256(%rdi), %ymm0
-; AVX512BW-ONLY-SLOW-NEXT:    vpunpckhqdq {{.*#+}} ymm0 = ymm0[1],mem[1],ymm0[3],mem[3]
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
-; AVX512BW-ONLY-SLOW-NEXT:    vinserti64x4 $0, %ymm0, %zmm3, %zmm3
-; AVX512BW-ONLY-SLOW-NEXT:    vshufi64x2 $228, {{[-0-9]+}}(%r{{[sb]}}p), %zmm31, %zmm4 # 64-byte Folded Reload
-; AVX512BW-ONLY-SLOW-NEXT:    # zmm4 = zmm31[0,1,2,3],mem[4,5,6,7]
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm13, %zmm0 {%k2}
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm13, %zmm1, %zmm3
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm13, %zmm2, %zmm4
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa 320(%rdi), %ymm0
-; AVX512BW-ONLY-SLOW-NEXT:    vpunpckhqdq {{.*#+}} ymm0 = ymm0[1],mem[1],ymm0[3],mem[3]
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
-; AVX512BW-ONLY-SLOW-NEXT:    vinserti64x4 $0, %ymm0, %zmm3, %zmm27
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512BW-ONLY-SLOW-NEXT:    vshufi64x2 $228, {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm25 # 64-byte Folded Reload
-; AVX512BW-ONLY-SLOW-NEXT:    # zmm25 = zmm0[0,1,2,3],mem[4,5,6,7]
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm14, %zmm8 {%k2}
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm14, %zmm1, %zmm27
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm14, %zmm2, %zmm25
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa 384(%rdi), %ymm0
-; AVX512BW-ONLY-SLOW-NEXT:    vpunpckhqdq {{.*#+}} ymm0 = ymm0[1],mem[1],ymm0[3],mem[3]
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
-; AVX512BW-ONLY-SLOW-NEXT:    vinserti64x4 $0, %ymm0, %zmm3, %zmm22
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512BW-ONLY-SLOW-NEXT:    vshufi64x2 $228, {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm20 # 64-byte Folded Reload
-; AVX512BW-ONLY-SLOW-NEXT:    # zmm20 = zmm0[0,1,2,3],mem[4,5,6,7]
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm16, %zmm0 {%k2}
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm16, %zmm1, %zmm22
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm16, %zmm2, %zmm20
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa 448(%rdi), %ymm0
-; AVX512BW-ONLY-SLOW-NEXT:    vpunpckhqdq {{.*#+}} ymm0 = ymm0[1],mem[1],ymm0[3],mem[3]
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
-; AVX512BW-ONLY-SLOW-NEXT:    vinserti64x4 $0, %ymm0, %zmm3, %zmm18
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm5, %zmm1, %zmm10
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm10, %zmm11
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm12, %zmm1, %zmm18
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512BW-ONLY-SLOW-NEXT:    vshufi64x2 $228, {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm17 # 64-byte Folded Reload
-; AVX512BW-ONLY-SLOW-NEXT:    # zmm17 = zmm0[0,1,2,3],mem[4,5,6,7]
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm12, %zmm0 {%k2}
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm12, %zmm2, %zmm17
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm5, %zmm2, %zmm7
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm7, %zmm13
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm5, %zmm28 {%k2}
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 (%r9), %zmm6
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm10 = [0,1,2,9,4,5,6,7]
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm6, %zmm10, %zmm1
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 64(%r9), %zmm7
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm7, %zmm10, %zmm2
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 128(%r9), %zmm2
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm2, %zmm10, %zmm3
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 192(%r9), %zmm3
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm3, %zmm10, %zmm4
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 256(%r9), %zmm4
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm4, %zmm10, %zmm5
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 320(%r9), %zmm5
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm5, %zmm10, %zmm0
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 384(%r9), %zmm8
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm8, %zmm10, %zmm0
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 448(%r9), %zmm9
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 (%rsp), %zmm0 # 64-byte Reload
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm9, %zmm10, %zmm0
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm0, (%rsp) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm10 = [0,1,2,13,4,5,6,7]
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm6, %zmm10, %zmm23
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm23, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm12 # 64-byte Reload
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm7, %zmm10, %zmm12
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm12, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm2, %zmm10, %zmm24
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm24, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm3, %zmm10, %zmm19
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm19, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm4, %zmm10, %zmm26
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm26, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm5, %zmm10, %zmm0
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm8, %zmm10, %zmm0
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm9, %zmm10, %zmm15
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm15, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa (%rdx), %xmm10
-; AVX512BW-ONLY-SLOW-NEXT:    vpunpcklqdq {{.*#+}} xmm10 = xmm10[0],mem[0]
-; AVX512BW-ONLY-SLOW-NEXT:    vinserti128 $1, %xmm10, %ymm0, %ymm10
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm12 # 64-byte Reload
-; AVX512BW-ONLY-SLOW-NEXT:    vinserti64x4 $0, %ymm10, %zmm0, %zmm12 {%k1}
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa 64(%rdx), %xmm10
-; AVX512BW-ONLY-SLOW-NEXT:    vpunpcklqdq {{.*#+}} xmm10 = xmm10[0],mem[0]
-; AVX512BW-ONLY-SLOW-NEXT:    vinserti128 $1, %xmm10, %ymm0, %ymm10
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm14 # 64-byte Reload
-; AVX512BW-ONLY-SLOW-NEXT:    vinserti64x4 $0, %ymm10, %zmm0, %zmm14 {%k1}
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa 128(%rdx), %xmm10
-; AVX512BW-ONLY-SLOW-NEXT:    vpunpcklqdq {{.*#+}} xmm10 = xmm10[0],mem[0]
-; AVX512BW-ONLY-SLOW-NEXT:    vinserti128 $1, %xmm10, %ymm0, %ymm10
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm16 # 64-byte Reload
-; AVX512BW-ONLY-SLOW-NEXT:    vinserti64x4 $0, %ymm10, %zmm0, %zmm16 {%k1}
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa 192(%rdx), %xmm10
-; AVX512BW-ONLY-SLOW-NEXT:    vpunpcklqdq {{.*#+}} xmm10 = xmm10[0],mem[0]
-; AVX512BW-ONLY-SLOW-NEXT:    vinserti128 $1, %xmm10, %ymm0, %ymm10
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm30 # 64-byte Reload
-; AVX512BW-ONLY-SLOW-NEXT:    vinserti64x4 $0, %ymm10, %zmm0, %zmm30 {%k1}
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa 256(%rdx), %xmm10
-; AVX512BW-ONLY-SLOW-NEXT:    vpunpcklqdq {{.*#+}} xmm10 = xmm10[0],mem[0]
-; AVX512BW-ONLY-SLOW-NEXT:    vinserti128 $1, %xmm10, %ymm0, %ymm10
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm31 # 64-byte Reload
-; AVX512BW-ONLY-SLOW-NEXT:    vinserti64x4 $0, %ymm10, %zmm0, %zmm31 {%k1}
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa 320(%rdx), %xmm10
-; AVX512BW-ONLY-SLOW-NEXT:    vpunpcklqdq {{.*#+}} xmm10 = xmm10[0],mem[0]
-; AVX512BW-ONLY-SLOW-NEXT:    vinserti128 $1, %xmm10, %ymm0, %ymm10
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512BW-ONLY-SLOW-NEXT:    vinserti64x4 $0, %ymm10, %zmm0, %zmm1 {%k1}
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa 384(%rdx), %xmm10
-; AVX512BW-ONLY-SLOW-NEXT:    vpunpcklqdq {{.*#+}} xmm10 = xmm10[0],mem[0]
-; AVX512BW-ONLY-SLOW-NEXT:    vinserti128 $1, %xmm10, %ymm0, %ymm10
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512BW-ONLY-SLOW-NEXT:    vinserti64x4 $0, %ymm10, %zmm0, %zmm0 {%k1}
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa 448(%rdx), %xmm10
-; AVX512BW-ONLY-SLOW-NEXT:    vpunpcklqdq {{.*#+}} xmm10 = xmm10[0],mem[0]
-; AVX512BW-ONLY-SLOW-NEXT:    vinserti128 $1, %xmm10, %ymm0, %ymm10
-; AVX512BW-ONLY-SLOW-NEXT:    vinserti64x4 $0, %ymm10, %zmm0, %zmm21 {%k1}
-; AVX512BW-ONLY-SLOW-NEXT:    vinserti32x4 $2, (%r8), %zmm12, %zmm10
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm29 = [0,1,2,3,4,8,6,7]
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm6, %zmm29, %zmm10
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vinserti32x4 $2, 64(%r8), %zmm14, %zmm10
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm7, %zmm29, %zmm10
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vinserti32x4 $2, 128(%r8), %zmm16, %zmm10
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm2, %zmm29, %zmm10
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vinserti32x4 $2, 192(%r8), %zmm30, %zmm26
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm3, %zmm29, %zmm26
-; AVX512BW-ONLY-SLOW-NEXT:    vinserti32x4 $2, 256(%r8), %zmm31, %zmm24
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm4, %zmm29, %zmm24
-; AVX512BW-ONLY-SLOW-NEXT:    vinserti32x4 $2, 320(%r8), %zmm1, %zmm23
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm5, %zmm29, %zmm23
-; AVX512BW-ONLY-SLOW-NEXT:    vinserti32x4 $2, 384(%r8), %zmm0, %zmm19
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm8, %zmm29, %zmm19
-; AVX512BW-ONLY-SLOW-NEXT:    vinserti32x4 $2, 448(%r8), %zmm21, %zmm21
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm9, %zmm29, %zmm21
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm29 = [0,10,2,3,4,5,6,11]
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm6, %zmm29, %zmm11
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm11, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm30 = [0,1,2,3,4,12,6,7]
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm6, %zmm30, %zmm28
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm31 = [0,14,2,3,4,5,6,15]
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm6, %zmm31, %zmm13
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm13, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm16 # 64-byte Reload
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm7, %zmm29, %zmm16
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm15 # 64-byte Reload
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm7, %zmm30, %zmm15
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm14 # 64-byte Reload
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm7, %zmm31, %zmm14
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm13 # 64-byte Reload
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm2, %zmm29, %zmm13
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm12 # 64-byte Reload
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm2, %zmm30, %zmm12
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm11 # 64-byte Reload
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm2, %zmm31, %zmm11
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm10 # 64-byte Reload
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm3, %zmm29, %zmm10
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm7 # 64-byte Reload
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm3, %zmm30, %zmm7
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm3, %zmm31, %zmm2
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm4, %zmm29, %zmm1
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm4, %zmm30, %zmm3
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm4, %zmm31, %zmm0
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm5, %zmm29, %zmm27
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm5, %zmm30, %zmm4
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm5, %zmm31, %zmm25
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm8, %zmm29, %zmm22
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm8, %zmm30, %zmm5
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm8, %zmm31, %zmm20
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm9, %zmm29, %zmm18
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm6 # 64-byte Reload
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm9, %zmm30, %zmm6
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm9, %zmm31, %zmm17
-; AVX512BW-ONLY-SLOW-NEXT:    movq {{[0-9]+}}(%rsp), %rax
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm17, 3008(%rax)
-; AVX512BW-ONLY-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm8 # 64-byte Reload
-; AVX512BW-ONLY-SLOW-NEXT:    vmovaps %zmm8, 2944(%rax)
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm6, 2880(%rax)
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm18, 2816(%rax)
-; AVX512BW-ONLY-SLOW-NEXT:    vmovups (%rsp), %zmm6 # 64-byte Reload
-; AVX512BW-ONLY-SLOW-NEXT:    vmovaps %zmm6, 2752(%rax)
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm20, 2624(%rax)
-; AVX512BW-ONLY-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm6 # 64-byte Reload
-; AVX512BW-ONLY-SLOW-NEXT:    vmovaps %zmm6, 2560(%rax)
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm5, 2496(%rax)
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm22, 2432(%rax)
-; AVX512BW-ONLY-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
-; AVX512BW-ONLY-SLOW-NEXT:    vmovaps %zmm5, 2368(%rax)
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm25, 2240(%rax)
-; AVX512BW-ONLY-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
-; AVX512BW-ONLY-SLOW-NEXT:    vmovaps %zmm5, 2176(%rax)
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm4, 2112(%rax)
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm27, 2048(%rax)
-; AVX512BW-ONLY-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
-; AVX512BW-ONLY-SLOW-NEXT:    vmovaps %zmm4, 1984(%rax)
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm0, 1856(%rax)
-; AVX512BW-ONLY-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512BW-ONLY-SLOW-NEXT:    vmovaps %zmm0, 1792(%rax)
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm3, 1728(%rax)
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm1, 1664(%rax)
-; AVX512BW-ONLY-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
-; AVX512BW-ONLY-SLOW-NEXT:    vmovaps %zmm3, 1600(%rax)
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm2, 1472(%rax)
-; AVX512BW-ONLY-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512BW-ONLY-SLOW-NEXT:    vmovaps %zmm0, 1408(%rax)
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm7, 1344(%rax)
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm10, 1280(%rax)
-; AVX512BW-ONLY-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512BW-ONLY-SLOW-NEXT:    vmovaps %zmm2, 1216(%rax)
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm11, 1088(%rax)
-; AVX512BW-ONLY-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512BW-ONLY-SLOW-NEXT:    vmovaps %zmm0, 1024(%rax)
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm12, 960(%rax)
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm13, 896(%rax)
-; AVX512BW-ONLY-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512BW-ONLY-SLOW-NEXT:    vmovaps %zmm1, 832(%rax)
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm14, 704(%rax)
-; AVX512BW-ONLY-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512BW-ONLY-SLOW-NEXT:    vmovaps %zmm1, 640(%rax)
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm15, 576(%rax)
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm16, 512(%rax)
-; AVX512BW-ONLY-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512BW-ONLY-SLOW-NEXT:    vmovaps %zmm0, 448(%rax)
-; AVX512BW-ONLY-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512BW-ONLY-SLOW-NEXT:    vmovaps %zmm0, 320(%rax)
-; AVX512BW-ONLY-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512BW-ONLY-SLOW-NEXT:    vmovaps %zmm0, 256(%rax)
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm28, 192(%rax)
-; AVX512BW-ONLY-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512BW-ONLY-SLOW-NEXT:    vmovaps %zmm0, 128(%rax)
-; AVX512BW-ONLY-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512BW-ONLY-SLOW-NEXT:    vmovaps %zmm0, 64(%rax)
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm21, 2688(%rax)
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm19, 2304(%rax)
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm23, 1920(%rax)
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm24, 1536(%rax)
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm26, 1152(%rax)
-; AVX512BW-ONLY-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512BW-ONLY-SLOW-NEXT:    vmovaps %zmm0, 768(%rax)
-; AVX512BW-ONLY-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512BW-ONLY-SLOW-NEXT:    vmovaps %zmm0, 384(%rax)
-; AVX512BW-ONLY-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512BW-ONLY-SLOW-NEXT:    vmovaps %zmm0, (%rax)
-; AVX512BW-ONLY-SLOW-NEXT:    addq $3400, %rsp # imm = 0xD48
-; AVX512BW-ONLY-SLOW-NEXT:    vzeroupper
-; AVX512BW-ONLY-SLOW-NEXT:    retq
+; AVX512F-ONLY-LABEL: store_i64_stride6_vf64:
+; AVX512F-ONLY:       # %bb.0:
+; AVX512F-ONLY-NEXT:    subq $3400, %rsp # imm = 0xD48
+; AVX512F-ONLY-NEXT:    vmovdqa64 (%rdx), %zmm13
+; AVX512F-ONLY-NEXT:    vmovdqa64 64(%rdx), %zmm12
+; AVX512F-ONLY-NEXT:    vmovdqa64 128(%rdx), %zmm11
+; AVX512F-ONLY-NEXT:    vmovdqa64 192(%rdx), %zmm10
+; AVX512F-ONLY-NEXT:    vmovdqa64 256(%rdx), %zmm9
+; AVX512F-ONLY-NEXT:    vmovdqa64 320(%rdx), %zmm8
+; AVX512F-ONLY-NEXT:    vmovdqa64 384(%rdx), %zmm7
+; AVX512F-ONLY-NEXT:    vmovdqa64 448(%rdx), %zmm6
+; AVX512F-ONLY-NEXT:    vmovdqa64 (%rcx), %zmm0
+; AVX512F-ONLY-NEXT:    vmovdqa64 64(%rcx), %zmm1
+; AVX512F-ONLY-NEXT:    vmovdqa64 128(%rcx), %zmm2
+; AVX512F-ONLY-NEXT:    vmovdqa64 192(%rcx), %zmm30
+; AVX512F-ONLY-NEXT:    vmovdqa64 256(%rcx), %zmm27
+; AVX512F-ONLY-NEXT:    vmovdqa64 320(%rcx), %zmm24
+; AVX512F-ONLY-NEXT:    vmovdqa64 384(%rcx), %zmm22
+; AVX512F-ONLY-NEXT:    vmovdqa64 448(%rcx), %zmm21
+; AVX512F-ONLY-NEXT:    vbroadcasti128 {{.*#+}} ymm3 = [4,12,4,12]
+; AVX512F-ONLY-NEXT:    # ymm3 = mem[0,1,0,1]
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm6, %zmm4
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm21, %zmm3, %zmm4
+; AVX512F-ONLY-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm7, %zmm4
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm22, %zmm3, %zmm4
+; AVX512F-ONLY-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm8, %zmm4
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm24, %zmm3, %zmm4
+; AVX512F-ONLY-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm9, %zmm4
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm27, %zmm3, %zmm4
+; AVX512F-ONLY-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm10, %zmm4
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm30, %zmm3, %zmm4
+; AVX512F-ONLY-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm11, %zmm4
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm2, %zmm3, %zmm4
+; AVX512F-ONLY-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm12, %zmm4
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm1, %zmm3, %zmm4
+; AVX512F-ONLY-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-NEXT:    vpermi2q %zmm0, %zmm13, %zmm3
+; AVX512F-ONLY-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-NEXT:    vbroadcasti64x4 {{.*#+}} zmm5 = [1,9,2,10,1,9,2,10]
+; AVX512F-ONLY-NEXT:    # zmm5 = mem[0,1,2,3,0,1,2,3]
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm13, %zmm3
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm0, %zmm5, %zmm3
+; AVX512F-ONLY-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm5, %zmm4
+; AVX512F-ONLY-NEXT:    vbroadcasti64x4 {{.*#+}} zmm5 = [5,13,6,14,5,13,6,14]
+; AVX512F-ONLY-NEXT:    # zmm5 = mem[0,1,2,3,0,1,2,3]
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm13, %zmm3
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm0, %zmm5, %zmm3
+; AVX512F-ONLY-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm12, %zmm3
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm1, %zmm4, %zmm3
+; AVX512F-ONLY-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-NEXT:    vbroadcasti32x4 {{.*#+}} zmm14 = [3,11,3,11,3,11,3,11]
+; AVX512F-ONLY-NEXT:    # zmm14 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm13, %zmm3
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm0, %zmm14, %zmm3
+; AVX512F-ONLY-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-NEXT:    vbroadcasti32x4 {{.*#+}} zmm3 = [7,15,7,15,7,15,7,15]
+; AVX512F-ONLY-NEXT:    # zmm3 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm0, %zmm3, %zmm13
+; AVX512F-ONLY-NEXT:    vmovdqu64 %zmm13, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm12, %zmm0
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm1, %zmm5, %zmm0
+; AVX512F-ONLY-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm12, %zmm0
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm1, %zmm14, %zmm0
+; AVX512F-ONLY-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm1, %zmm3, %zmm12
+; AVX512F-ONLY-NEXT:    vmovdqu64 %zmm12, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm11, %zmm0
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm2, %zmm4, %zmm0
+; AVX512F-ONLY-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm11, %zmm0
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm2, %zmm5, %zmm0
+; AVX512F-ONLY-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm11, %zmm0
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm2, %zmm14, %zmm0
+; AVX512F-ONLY-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm2, %zmm3, %zmm11
+; AVX512F-ONLY-NEXT:    vmovdqu64 %zmm11, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm10, %zmm0
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm30, %zmm4, %zmm0
+; AVX512F-ONLY-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm10, %zmm0
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm30, %zmm5, %zmm0
+; AVX512F-ONLY-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm10, %zmm0
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm30, %zmm14, %zmm0
+; AVX512F-ONLY-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm30, %zmm3, %zmm10
+; AVX512F-ONLY-NEXT:    vmovdqu64 %zmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm9, %zmm0
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm27, %zmm4, %zmm0
+; AVX512F-ONLY-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm9, %zmm0
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm27, %zmm5, %zmm0
+; AVX512F-ONLY-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm9, %zmm0
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm27, %zmm14, %zmm0
+; AVX512F-ONLY-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm27, %zmm3, %zmm9
+; AVX512F-ONLY-NEXT:    vmovdqu64 %zmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm8, %zmm0
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm24, %zmm4, %zmm0
+; AVX512F-ONLY-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm8, %zmm0
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm24, %zmm5, %zmm0
+; AVX512F-ONLY-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm8, %zmm0
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm24, %zmm14, %zmm0
+; AVX512F-ONLY-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm24, %zmm3, %zmm8
+; AVX512F-ONLY-NEXT:    vmovdqu64 %zmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm7, %zmm0
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm22, %zmm4, %zmm0
+; AVX512F-ONLY-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm7, %zmm0
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm22, %zmm5, %zmm0
+; AVX512F-ONLY-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm7, %zmm0
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm22, %zmm14, %zmm0
+; AVX512F-ONLY-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm22, %zmm3, %zmm7
+; AVX512F-ONLY-NEXT:    vmovdqu64 %zmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-NEXT:    vpermi2q %zmm21, %zmm6, %zmm4
+; AVX512F-ONLY-NEXT:    vmovdqu64 %zmm4, (%rsp) # 64-byte Spill
+; AVX512F-ONLY-NEXT:    vpermi2q %zmm21, %zmm6, %zmm5
+; AVX512F-ONLY-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-NEXT:    vpermi2q %zmm21, %zmm6, %zmm14
+; AVX512F-ONLY-NEXT:    vmovdqu64 %zmm14, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm21, %zmm3, %zmm6
+; AVX512F-ONLY-NEXT:    vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-NEXT:    vmovdqa64 448(%rdi), %zmm4
+; AVX512F-ONLY-NEXT:    vmovdqa64 448(%rsi), %zmm11
+; AVX512F-ONLY-NEXT:    vbroadcasti64x4 {{.*#+}} zmm28 = [4,12,5,13,4,12,5,13]
+; AVX512F-ONLY-NEXT:    # zmm28 = mem[0,1,2,3,0,1,2,3]
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm4, %zmm0
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm11, %zmm28, %zmm0
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm0, %zmm8
+; AVX512F-ONLY-NEXT:    vmovdqa64 384(%rdi), %zmm2
+; AVX512F-ONLY-NEXT:    vmovdqa64 384(%rsi), %zmm13
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm2, %zmm1
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm13, %zmm28, %zmm1
+; AVX512F-ONLY-NEXT:    vmovdqa64 320(%rdi), %zmm3
+; AVX512F-ONLY-NEXT:    vmovdqa64 320(%rsi), %zmm15
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm3, %zmm0
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm15, %zmm28, %zmm0
+; AVX512F-ONLY-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-NEXT:    vmovdqa64 256(%rdi), %zmm31
+; AVX512F-ONLY-NEXT:    vmovdqa64 256(%rsi), %zmm17
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm31, %zmm0
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm17, %zmm28, %zmm0
+; AVX512F-ONLY-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-NEXT:    vmovdqa64 192(%rdi), %zmm29
+; AVX512F-ONLY-NEXT:    vmovdqa64 192(%rsi), %zmm18
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm29, %zmm0
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm18, %zmm28, %zmm0
+; AVX512F-ONLY-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-NEXT:    vmovdqa64 128(%rdi), %zmm25
+; AVX512F-ONLY-NEXT:    vmovdqa64 128(%rsi), %zmm19
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm25, %zmm0
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm19, %zmm28, %zmm0
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm0, %zmm5
+; AVX512F-ONLY-NEXT:    vmovdqa64 64(%rdi), %zmm6
+; AVX512F-ONLY-NEXT:    vmovdqa64 64(%rsi), %zmm20
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm6, %zmm0
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm20, %zmm28, %zmm0
+; AVX512F-ONLY-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-NEXT:    vmovdqa64 (%rdi), %zmm7
+; AVX512F-ONLY-NEXT:    vmovdqa64 (%rsi), %zmm22
+; AVX512F-ONLY-NEXT:    vpermi2q %zmm22, %zmm7, %zmm28
+; AVX512F-ONLY-NEXT:    vbroadcasti32x4 {{.*#+}} zmm12 = [2,10,2,10,2,10,2,10]
+; AVX512F-ONLY-NEXT:    # zmm12 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm7, %zmm14
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm22, %zmm12, %zmm14
+; AVX512F-ONLY-NEXT:    vbroadcasti32x4 {{.*#+}} zmm10 = [6,14,6,14,6,14,6,14]
+; AVX512F-ONLY-NEXT:    # zmm10 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm7, %zmm16
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm22, %zmm10, %zmm16
+; AVX512F-ONLY-NEXT:    vbroadcasti64x4 {{.*#+}} zmm21 = [0,8,1,9,0,8,1,9]
+; AVX512F-ONLY-NEXT:    # zmm21 = mem[0,1,2,3,0,1,2,3]
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm7, %zmm0
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm22, %zmm21, %zmm0
+; AVX512F-ONLY-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-NEXT:    vbroadcasti32x4 {{.*#+}} ymm23 = [7,15,7,15]
+; AVX512F-ONLY-NEXT:    # ymm23 = mem[0,1,2,3,0,1,2,3]
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm22, %zmm23, %zmm7
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm6, %zmm22
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm20, %zmm12, %zmm22
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm6, %zmm24
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm20, %zmm10, %zmm24
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm6, %zmm0
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm20, %zmm21, %zmm0
+; AVX512F-ONLY-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm20, %zmm23, %zmm6
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm25, %zmm20
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm19, %zmm12, %zmm20
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm25, %zmm26
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm19, %zmm10, %zmm26
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm25, %zmm0
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm19, %zmm21, %zmm0
+; AVX512F-ONLY-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm19, %zmm23, %zmm25
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm29, %zmm19
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm18, %zmm12, %zmm19
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm29, %zmm27
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm18, %zmm10, %zmm27
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm29, %zmm0
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm18, %zmm21, %zmm0
+; AVX512F-ONLY-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm18, %zmm23, %zmm29
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm31, %zmm18
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm17, %zmm12, %zmm18
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm31, %zmm9
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm17, %zmm10, %zmm9
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm31, %zmm0
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm17, %zmm21, %zmm0
+; AVX512F-ONLY-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm17, %zmm23, %zmm31
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm3, %zmm17
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm15, %zmm12, %zmm17
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm3, %zmm30
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm15, %zmm10, %zmm30
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm3, %zmm0
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm15, %zmm21, %zmm0
+; AVX512F-ONLY-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm15, %zmm23, %zmm3
+; AVX512F-ONLY-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm2, %zmm15
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm2, %zmm3
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm13, %zmm12, %zmm3
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm13, %zmm10, %zmm2
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm15, %zmm0
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm13, %zmm21, %zmm0
+; AVX512F-ONLY-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm13, %zmm23, %zmm15
+; AVX512F-ONLY-NEXT:    vmovdqu64 %zmm15, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-NEXT:    vpermi2q %zmm11, %zmm4, %zmm12
+; AVX512F-ONLY-NEXT:    vpermi2q %zmm11, %zmm4, %zmm10
+; AVX512F-ONLY-NEXT:    vpermi2q %zmm11, %zmm4, %zmm21
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm11, %zmm23, %zmm4
+; AVX512F-ONLY-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-NEXT:    movb $12, %al
+; AVX512F-ONLY-NEXT:    kmovw %eax, %k1
+; AVX512F-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm11 # 64-byte Reload
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm11, %zmm8 {%k1}
+; AVX512F-ONLY-NEXT:    vmovdqu64 %zmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm11 # 64-byte Reload
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm11, %zmm1 {%k1}
+; AVX512F-ONLY-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm11 # 64-byte Reload
+; AVX512F-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm8 # 64-byte Reload
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm11, %zmm8 {%k1}
+; AVX512F-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm11 # 64-byte Reload
+; AVX512F-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm11, %zmm0 {%k1}
+; AVX512F-ONLY-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm11 # 64-byte Reload
+; AVX512F-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm11, %zmm4 {%k1}
+; AVX512F-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm11 # 64-byte Reload
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm11, %zmm5 {%k1}
+; AVX512F-ONLY-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm11 # 64-byte Reload
+; AVX512F-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm11, %zmm1 {%k1}
+; AVX512F-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm11 # 64-byte Reload
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm11, %zmm28 {%k1}
+; AVX512F-ONLY-NEXT:    movb $48, %al
+; AVX512F-ONLY-NEXT:    kmovw %eax, %k2
+; AVX512F-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm11 # 64-byte Reload
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm14, %zmm11 {%k2}
+; AVX512F-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm23 # 64-byte Reload
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm16, %zmm23 {%k2}
+; AVX512F-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm13 # 64-byte Reload
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm22, %zmm13 {%k2}
+; AVX512F-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm22 # 64-byte Reload
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm24, %zmm22 {%k2}
+; AVX512F-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm14 # 64-byte Reload
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm20, %zmm14 {%k2}
+; AVX512F-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm20 # 64-byte Reload
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm26, %zmm20 {%k2}
+; AVX512F-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm16 # 64-byte Reload
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm19, %zmm16 {%k2}
+; AVX512F-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm19 # 64-byte Reload
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm27, %zmm19 {%k2}
+; AVX512F-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm24 # 64-byte Reload
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm18, %zmm24 {%k2}
+; AVX512F-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm18 # 64-byte Reload
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm9, %zmm18 {%k2}
+; AVX512F-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm26 # 64-byte Reload
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm17, %zmm26 {%k2}
+; AVX512F-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm17 # 64-byte Reload
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm30, %zmm17 {%k2}
+; AVX512F-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm27 # 64-byte Reload
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm3, %zmm27 {%k2}
+; AVX512F-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm2, %zmm0 {%k2}
+; AVX512F-ONLY-NEXT:    vmovdqu64 (%rsp), %zmm15 # 64-byte Reload
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm12, %zmm15 {%k2}
+; AVX512F-ONLY-NEXT:    vmovdqa64 (%r8), %zmm5
+; AVX512F-ONLY-NEXT:    vmovdqa64 {{.*#+}} zmm2 = <0,1,9,u,4,5,6,7>
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm5, %zmm2, %zmm11
+; AVX512F-ONLY-NEXT:    vmovdqu64 %zmm11, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-NEXT:    vmovdqa64 64(%r8), %zmm3
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm3, %zmm2, %zmm13
+; AVX512F-ONLY-NEXT:    vmovdqu64 %zmm13, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-NEXT:    vmovdqa64 128(%r8), %zmm9
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm9, %zmm2, %zmm14
+; AVX512F-ONLY-NEXT:    vmovdqu64 %zmm14, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-NEXT:    vmovdqa64 192(%r8), %zmm11
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm11, %zmm2, %zmm16
+; AVX512F-ONLY-NEXT:    vmovdqu64 %zmm16, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-NEXT:    vmovdqa64 256(%r8), %zmm13
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm13, %zmm2, %zmm24
+; AVX512F-ONLY-NEXT:    vmovdqu64 %zmm24, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-NEXT:    vmovdqa64 320(%r8), %zmm14
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm14, %zmm2, %zmm26
+; AVX512F-ONLY-NEXT:    vmovdqu64 %zmm26, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-NEXT:    vmovdqa64 384(%r8), %zmm16
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm16, %zmm2, %zmm27
+; AVX512F-ONLY-NEXT:    vmovdqu64 %zmm27, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-NEXT:    vmovdqa64 448(%r8), %zmm12
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm12, %zmm2, %zmm15
+; AVX512F-ONLY-NEXT:    vmovdqu64 %zmm15, (%rsp) # 64-byte Spill
+; AVX512F-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm15 # 64-byte Reload
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm10, %zmm15 {%k2}
+; AVX512F-ONLY-NEXT:    vmovdqa64 {{.*#+}} zmm2 = <0,1,13,u,4,5,6,7>
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm5, %zmm2, %zmm23
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm3, %zmm2, %zmm22
+; AVX512F-ONLY-NEXT:    vmovdqu64 %zmm22, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm9, %zmm2, %zmm20
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm20, %zmm24
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm11, %zmm2, %zmm19
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm13, %zmm2, %zmm18
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm18, %zmm26
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm14, %zmm2, %zmm17
+; AVX512F-ONLY-NEXT:    vmovdqu64 %zmm17, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm16, %zmm2, %zmm0
+; AVX512F-ONLY-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm12, %zmm2, %zmm15
+; AVX512F-ONLY-NEXT:    vmovdqa (%rdi), %ymm2
+; AVX512F-ONLY-NEXT:    vpunpckhqdq {{.*#+}} ymm2 = ymm2[1],mem[1],ymm2[3],mem[3]
+; AVX512F-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512F-ONLY-NEXT:    vinserti64x4 $0, %ymm2, %zmm0, %zmm10
+; AVX512F-ONLY-NEXT:    vshufi64x2 $228, {{[-0-9]+}}(%r{{[sb]}}p), %zmm7, %zmm7 # 64-byte Folded Reload
+; AVX512F-ONLY-NEXT:    # zmm7 = zmm7[0,1,2,3],mem[4,5,6,7]
+; AVX512F-ONLY-NEXT:    vmovdqa 64(%rdi), %ymm2
+; AVX512F-ONLY-NEXT:    vpunpckhqdq {{.*#+}} ymm2 = ymm2[1],mem[1],ymm2[3],mem[3]
+; AVX512F-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512F-ONLY-NEXT:    vinserti64x4 $0, %ymm2, %zmm0, %zmm0
+; AVX512F-ONLY-NEXT:    vshufi64x2 $228, {{[-0-9]+}}(%r{{[sb]}}p), %zmm6, %zmm6 # 64-byte Folded Reload
+; AVX512F-ONLY-NEXT:    # zmm6 = zmm6[0,1,2,3],mem[4,5,6,7]
+; AVX512F-ONLY-NEXT:    movb $16, %al
+; AVX512F-ONLY-NEXT:    kmovw %eax, %k2
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm3, %zmm1 {%k2}
+; AVX512F-ONLY-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-NEXT:    vmovdqa64 {{.*#+}} zmm1 = <10,u,2,3,4,5,11,u>
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm3, %zmm1, %zmm0
+; AVX512F-ONLY-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-NEXT:    vmovdqa64 {{.*#+}} zmm2 = <14,u,2,3,4,5,15,u>
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm3, %zmm2, %zmm6
+; AVX512F-ONLY-NEXT:    vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-NEXT:    vmovdqa 128(%rdi), %ymm3
+; AVX512F-ONLY-NEXT:    vpunpckhqdq {{.*#+}} ymm3 = ymm3[1],mem[1],ymm3[3],mem[3]
+; AVX512F-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512F-ONLY-NEXT:    vinserti64x4 $0, %ymm3, %zmm0, %zmm3
+; AVX512F-ONLY-NEXT:    vshufi64x2 $228, {{[-0-9]+}}(%r{{[sb]}}p), %zmm25, %zmm6 # 64-byte Folded Reload
+; AVX512F-ONLY-NEXT:    # zmm6 = zmm25[0,1,2,3],mem[4,5,6,7]
+; AVX512F-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm9, %zmm0 {%k2}
+; AVX512F-ONLY-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm9, %zmm1, %zmm3
+; AVX512F-ONLY-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm9, %zmm2, %zmm6
+; AVX512F-ONLY-NEXT:    vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-NEXT:    vmovdqa 192(%rdi), %ymm3
+; AVX512F-ONLY-NEXT:    vpunpckhqdq {{.*#+}} ymm3 = ymm3[1],mem[1],ymm3[3],mem[3]
+; AVX512F-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512F-ONLY-NEXT:    vinserti64x4 $0, %ymm3, %zmm0, %zmm0
+; AVX512F-ONLY-NEXT:    vshufi64x2 $228, {{[-0-9]+}}(%r{{[sb]}}p), %zmm29, %zmm3 # 64-byte Folded Reload
+; AVX512F-ONLY-NEXT:    # zmm3 = zmm29[0,1,2,3],mem[4,5,6,7]
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm11, %zmm4 {%k2}
+; AVX512F-ONLY-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm11, %zmm1, %zmm0
+; AVX512F-ONLY-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm11, %zmm2, %zmm3
+; AVX512F-ONLY-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-NEXT:    vmovdqa 256(%rdi), %ymm0
+; AVX512F-ONLY-NEXT:    vpunpckhqdq {{.*#+}} ymm0 = ymm0[1],mem[1],ymm0[3],mem[3]
+; AVX512F-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
+; AVX512F-ONLY-NEXT:    vinserti64x4 $0, %ymm0, %zmm3, %zmm3
+; AVX512F-ONLY-NEXT:    vshufi64x2 $228, {{[-0-9]+}}(%r{{[sb]}}p), %zmm31, %zmm4 # 64-byte Folded Reload
+; AVX512F-ONLY-NEXT:    # zmm4 = zmm31[0,1,2,3],mem[4,5,6,7]
+; AVX512F-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm13, %zmm0 {%k2}
+; AVX512F-ONLY-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm13, %zmm1, %zmm3
+; AVX512F-ONLY-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm13, %zmm2, %zmm4
+; AVX512F-ONLY-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-NEXT:    vmovdqa 320(%rdi), %ymm0
+; AVX512F-ONLY-NEXT:    vpunpckhqdq {{.*#+}} ymm0 = ymm0[1],mem[1],ymm0[3],mem[3]
+; AVX512F-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
+; AVX512F-ONLY-NEXT:    vinserti64x4 $0, %ymm0, %zmm3, %zmm27
+; AVX512F-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512F-ONLY-NEXT:    vshufi64x2 $228, {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm25 # 64-byte Folded Reload
+; AVX512F-ONLY-NEXT:    # zmm25 = zmm0[0,1,2,3],mem[4,5,6,7]
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm14, %zmm8 {%k2}
+; AVX512F-ONLY-NEXT:    vmovdqu64 %zmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm14, %zmm1, %zmm27
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm14, %zmm2, %zmm25
+; AVX512F-ONLY-NEXT:    vmovdqa 384(%rdi), %ymm0
+; AVX512F-ONLY-NEXT:    vpunpckhqdq {{.*#+}} ymm0 = ymm0[1],mem[1],ymm0[3],mem[3]
+; AVX512F-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
+; AVX512F-ONLY-NEXT:    vinserti64x4 $0, %ymm0, %zmm3, %zmm22
+; AVX512F-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512F-ONLY-NEXT:    vshufi64x2 $228, {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm20 # 64-byte Folded Reload
+; AVX512F-ONLY-NEXT:    # zmm20 = zmm0[0,1,2,3],mem[4,5,6,7]
+; AVX512F-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm16, %zmm0 {%k2}
+; AVX512F-ONLY-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm16, %zmm1, %zmm22
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm16, %zmm2, %zmm20
+; AVX512F-ONLY-NEXT:    vmovdqa 448(%rdi), %ymm0
+; AVX512F-ONLY-NEXT:    vpunpckhqdq {{.*#+}} ymm0 = ymm0[1],mem[1],ymm0[3],mem[3]
+; AVX512F-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
+; AVX512F-ONLY-NEXT:    vinserti64x4 $0, %ymm0, %zmm3, %zmm18
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm5, %zmm1, %zmm10
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm10, %zmm11
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm12, %zmm1, %zmm18
+; AVX512F-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512F-ONLY-NEXT:    vshufi64x2 $228, {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm17 # 64-byte Folded Reload
+; AVX512F-ONLY-NEXT:    # zmm17 = zmm0[0,1,2,3],mem[4,5,6,7]
+; AVX512F-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm12, %zmm0 {%k2}
+; AVX512F-ONLY-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm12, %zmm2, %zmm17
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm5, %zmm2, %zmm7
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm7, %zmm13
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm5, %zmm28 {%k2}
+; AVX512F-ONLY-NEXT:    vmovdqa64 (%r9), %zmm6
+; AVX512F-ONLY-NEXT:    vmovdqa64 {{.*#+}} zmm10 = [0,1,2,9,4,5,6,7]
+; AVX512F-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm6, %zmm10, %zmm1
+; AVX512F-ONLY-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-NEXT:    vmovdqa64 64(%r9), %zmm7
+; AVX512F-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm7, %zmm10, %zmm2
+; AVX512F-ONLY-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-NEXT:    vmovdqa64 128(%r9), %zmm2
+; AVX512F-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm2, %zmm10, %zmm3
+; AVX512F-ONLY-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-NEXT:    vmovdqa64 192(%r9), %zmm3
+; AVX512F-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm3, %zmm10, %zmm4
+; AVX512F-ONLY-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-NEXT:    vmovdqa64 256(%r9), %zmm4
+; AVX512F-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm4, %zmm10, %zmm5
+; AVX512F-ONLY-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-NEXT:    vmovdqa64 320(%r9), %zmm5
+; AVX512F-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm5, %zmm10, %zmm0
+; AVX512F-ONLY-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-NEXT:    vmovdqa64 384(%r9), %zmm8
+; AVX512F-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm8, %zmm10, %zmm0
+; AVX512F-ONLY-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-NEXT:    vmovdqa64 448(%r9), %zmm9
+; AVX512F-ONLY-NEXT:    vmovdqu64 (%rsp), %zmm0 # 64-byte Reload
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm9, %zmm10, %zmm0
+; AVX512F-ONLY-NEXT:    vmovdqu64 %zmm0, (%rsp) # 64-byte Spill
+; AVX512F-ONLY-NEXT:    vmovdqa64 {{.*#+}} zmm10 = [0,1,2,13,4,5,6,7]
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm6, %zmm10, %zmm23
+; AVX512F-ONLY-NEXT:    vmovdqu64 %zmm23, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm12 # 64-byte Reload
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm7, %zmm10, %zmm12
+; AVX512F-ONLY-NEXT:    vmovdqu64 %zmm12, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm2, %zmm10, %zmm24
+; AVX512F-ONLY-NEXT:    vmovdqu64 %zmm24, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm3, %zmm10, %zmm19
+; AVX512F-ONLY-NEXT:    vmovdqu64 %zmm19, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm4, %zmm10, %zmm26
+; AVX512F-ONLY-NEXT:    vmovdqu64 %zmm26, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm5, %zmm10, %zmm0
+; AVX512F-ONLY-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm8, %zmm10, %zmm0
+; AVX512F-ONLY-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm9, %zmm10, %zmm15
+; AVX512F-ONLY-NEXT:    vmovdqu64 %zmm15, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-NEXT:    vmovdqa (%rdx), %xmm10
+; AVX512F-ONLY-NEXT:    vpunpcklqdq {{.*#+}} xmm10 = xmm10[0],mem[0]
+; AVX512F-ONLY-NEXT:    vinserti128 $1, %xmm10, %ymm0, %ymm10
+; AVX512F-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm12 # 64-byte Reload
+; AVX512F-ONLY-NEXT:    vinserti64x4 $0, %ymm10, %zmm0, %zmm12 {%k1}
+; AVX512F-ONLY-NEXT:    vmovdqa 64(%rdx), %xmm10
+; AVX512F-ONLY-NEXT:    vpunpcklqdq {{.*#+}} xmm10 = xmm10[0],mem[0]
+; AVX512F-ONLY-NEXT:    vinserti128 $1, %xmm10, %ymm0, %ymm10
+; AVX512F-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm14 # 64-byte Reload
+; AVX512F-ONLY-NEXT:    vinserti64x4 $0, %ymm10, %zmm0, %zmm14 {%k1}
+; AVX512F-ONLY-NEXT:    vmovdqa 128(%rdx), %xmm10
+; AVX512F-ONLY-NEXT:    vpunpcklqdq {{.*#+}} xmm10 = xmm10[0],mem[0]
+; AVX512F-ONLY-NEXT:    vinserti128 $1, %xmm10, %ymm0, %ymm10
+; AVX512F-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm16 # 64-byte Reload
+; AVX512F-ONLY-NEXT:    vinserti64x4 $0, %ymm10, %zmm0, %zmm16 {%k1}
+; AVX512F-ONLY-NEXT:    vmovdqa 192(%rdx), %xmm10
+; AVX512F-ONLY-NEXT:    vpunpcklqdq {{.*#+}} xmm10 = xmm10[0],mem[0]
+; AVX512F-ONLY-NEXT:    vinserti128 $1, %xmm10, %ymm0, %ymm10
+; AVX512F-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm30 # 64-byte Reload
+; AVX512F-ONLY-NEXT:    vinserti64x4 $0, %ymm10, %zmm0, %zmm30 {%k1}
+; AVX512F-ONLY-NEXT:    vmovdqa 256(%rdx), %xmm10
+; AVX512F-ONLY-NEXT:    vpunpcklqdq {{.*#+}} xmm10 = xmm10[0],mem[0]
+; AVX512F-ONLY-NEXT:    vinserti128 $1, %xmm10, %ymm0, %ymm10
+; AVX512F-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm31 # 64-byte Reload
+; AVX512F-ONLY-NEXT:    vinserti64x4 $0, %ymm10, %zmm0, %zmm31 {%k1}
+; AVX512F-ONLY-NEXT:    vmovdqa 320(%rdx), %xmm10
+; AVX512F-ONLY-NEXT:    vpunpcklqdq {{.*#+}} xmm10 = xmm10[0],mem[0]
+; AVX512F-ONLY-NEXT:    vinserti128 $1, %xmm10, %ymm0, %ymm10
+; AVX512F-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
+; AVX512F-ONLY-NEXT:    vinserti64x4 $0, %ymm10, %zmm0, %zmm1 {%k1}
+; AVX512F-ONLY-NEXT:    vmovdqa 384(%rdx), %xmm10
+; AVX512F-ONLY-NEXT:    vpunpcklqdq {{.*#+}} xmm10 = xmm10[0],mem[0]
+; AVX512F-ONLY-NEXT:    vinserti128 $1, %xmm10, %ymm0, %ymm10
+; AVX512F-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512F-ONLY-NEXT:    vinserti64x4 $0, %ymm10, %zmm0, %zmm0 {%k1}
+; AVX512F-ONLY-NEXT:    vmovdqa 448(%rdx), %xmm10
+; AVX512F-ONLY-NEXT:    vpunpcklqdq {{.*#+}} xmm10 = xmm10[0],mem[0]
+; AVX512F-ONLY-NEXT:    vinserti128 $1, %xmm10, %ymm0, %ymm10
+; AVX512F-ONLY-NEXT:    vinserti64x4 $0, %ymm10, %zmm0, %zmm21 {%k1}
+; AVX512F-ONLY-NEXT:    vinserti32x4 $2, (%r8), %zmm12, %zmm10
+; AVX512F-ONLY-NEXT:    vmovdqa64 {{.*#+}} zmm29 = [0,1,2,3,4,8,6,7]
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm6, %zmm29, %zmm10
+; AVX512F-ONLY-NEXT:    vmovdqu64 %zmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-NEXT:    vinserti32x4 $2, 64(%r8), %zmm14, %zmm10
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm7, %zmm29, %zmm10
+; AVX512F-ONLY-NEXT:    vmovdqu64 %zmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-NEXT:    vinserti32x4 $2, 128(%r8), %zmm16, %zmm10
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm2, %zmm29, %zmm10
+; AVX512F-ONLY-NEXT:    vmovdqu64 %zmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-NEXT:    vinserti32x4 $2, 192(%r8), %zmm30, %zmm26
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm3, %zmm29, %zmm26
+; AVX512F-ONLY-NEXT:    vinserti32x4 $2, 256(%r8), %zmm31, %zmm24
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm4, %zmm29, %zmm24
+; AVX512F-ONLY-NEXT:    vinserti32x4 $2, 320(%r8), %zmm1, %zmm23
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm5, %zmm29, %zmm23
+; AVX512F-ONLY-NEXT:    vinserti32x4 $2, 384(%r8), %zmm0, %zmm19
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm8, %zmm29, %zmm19
+; AVX512F-ONLY-NEXT:    vinserti32x4 $2, 448(%r8), %zmm21, %zmm21
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm9, %zmm29, %zmm21
+; AVX512F-ONLY-NEXT:    vmovdqa64 {{.*#+}} zmm29 = [0,10,2,3,4,5,6,11]
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm6, %zmm29, %zmm11
+; AVX512F-ONLY-NEXT:    vmovdqu64 %zmm11, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-NEXT:    vmovdqa64 {{.*#+}} zmm30 = [0,1,2,3,4,12,6,7]
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm6, %zmm30, %zmm28
+; AVX512F-ONLY-NEXT:    vmovdqa64 {{.*#+}} zmm31 = [0,14,2,3,4,5,6,15]
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm6, %zmm31, %zmm13
+; AVX512F-ONLY-NEXT:    vmovdqu64 %zmm13, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm16 # 64-byte Reload
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm7, %zmm29, %zmm16
+; AVX512F-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm15 # 64-byte Reload
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm7, %zmm30, %zmm15
+; AVX512F-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm14 # 64-byte Reload
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm7, %zmm31, %zmm14
+; AVX512F-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm13 # 64-byte Reload
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm2, %zmm29, %zmm13
+; AVX512F-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm12 # 64-byte Reload
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm2, %zmm30, %zmm12
+; AVX512F-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm11 # 64-byte Reload
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm2, %zmm31, %zmm11
+; AVX512F-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm10 # 64-byte Reload
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm3, %zmm29, %zmm10
+; AVX512F-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm7 # 64-byte Reload
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm3, %zmm30, %zmm7
+; AVX512F-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm3, %zmm31, %zmm2
+; AVX512F-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm4, %zmm29, %zmm1
+; AVX512F-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm4, %zmm30, %zmm3
+; AVX512F-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm4, %zmm31, %zmm0
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm5, %zmm29, %zmm27
+; AVX512F-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm5, %zmm30, %zmm4
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm5, %zmm31, %zmm25
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm8, %zmm29, %zmm22
+; AVX512F-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm8, %zmm30, %zmm5
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm8, %zmm31, %zmm20
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm9, %zmm29, %zmm18
+; AVX512F-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm6 # 64-byte Reload
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm9, %zmm30, %zmm6
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm9, %zmm31, %zmm17
+; AVX512F-ONLY-NEXT:    movq {{[0-9]+}}(%rsp), %rax
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm17, 3008(%rax)
+; AVX512F-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm8 # 64-byte Reload
+; AVX512F-ONLY-NEXT:    vmovaps %zmm8, 2944(%rax)
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm6, 2880(%rax)
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm18, 2816(%rax)
+; AVX512F-ONLY-NEXT:    vmovups (%rsp), %zmm6 # 64-byte Reload
+; AVX512F-ONLY-NEXT:    vmovaps %zmm6, 2752(%rax)
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm20, 2624(%rax)
+; AVX512F-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm6 # 64-byte Reload
+; AVX512F-ONLY-NEXT:    vmovaps %zmm6, 2560(%rax)
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm5, 2496(%rax)
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm22, 2432(%rax)
+; AVX512F-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
+; AVX512F-ONLY-NEXT:    vmovaps %zmm5, 2368(%rax)
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm25, 2240(%rax)
+; AVX512F-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
+; AVX512F-ONLY-NEXT:    vmovaps %zmm5, 2176(%rax)
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm4, 2112(%rax)
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm27, 2048(%rax)
+; AVX512F-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
+; AVX512F-ONLY-NEXT:    vmovaps %zmm4, 1984(%rax)
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm0, 1856(%rax)
+; AVX512F-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512F-ONLY-NEXT:    vmovaps %zmm0, 1792(%rax)
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm3, 1728(%rax)
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm1, 1664(%rax)
+; AVX512F-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
+; AVX512F-ONLY-NEXT:    vmovaps %zmm3, 1600(%rax)
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm2, 1472(%rax)
+; AVX512F-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512F-ONLY-NEXT:    vmovaps %zmm0, 1408(%rax)
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm7, 1344(%rax)
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm10, 1280(%rax)
+; AVX512F-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
+; AVX512F-ONLY-NEXT:    vmovaps %zmm2, 1216(%rax)
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm11, 1088(%rax)
+; AVX512F-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512F-ONLY-NEXT:    vmovaps %zmm0, 1024(%rax)
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm12, 960(%rax)
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm13, 896(%rax)
+; AVX512F-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
+; AVX512F-ONLY-NEXT:    vmovaps %zmm1, 832(%rax)
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm14, 704(%rax)
+; AVX512F-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
+; AVX512F-ONLY-NEXT:    vmovaps %zmm1, 640(%rax)
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm15, 576(%rax)
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm16, 512(%rax)
+; AVX512F-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512F-ONLY-NEXT:    vmovaps %zmm0, 448(%rax)
+; AVX512F-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512F-ONLY-NEXT:    vmovaps %zmm0, 320(%rax)
+; AVX512F-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512F-ONLY-NEXT:    vmovaps %zmm0, 256(%rax)
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm28, 192(%rax)
+; AVX512F-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512F-ONLY-NEXT:    vmovaps %zmm0, 128(%rax)
+; AVX512F-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512F-ONLY-NEXT:    vmovaps %zmm0, 64(%rax)
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm21, 2688(%rax)
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm19, 2304(%rax)
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm23, 1920(%rax)
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm24, 1536(%rax)
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm26, 1152(%rax)
+; AVX512F-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512F-ONLY-NEXT:    vmovaps %zmm0, 768(%rax)
+; AVX512F-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512F-ONLY-NEXT:    vmovaps %zmm0, 384(%rax)
+; AVX512F-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512F-ONLY-NEXT:    vmovaps %zmm0, (%rax)
+; AVX512F-ONLY-NEXT:    addq $3400, %rsp # imm = 0xD48
+; AVX512F-ONLY-NEXT:    vzeroupper
+; AVX512F-ONLY-NEXT:    retq
 ;
-; AVX512BW-ONLY-FAST-LABEL: store_i64_stride6_vf64:
-; AVX512BW-ONLY-FAST:       # %bb.0:
-; AVX512BW-ONLY-FAST-NEXT:    subq $3400, %rsp # imm = 0xD48
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 (%rdx), %zmm13
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 64(%rdx), %zmm12
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 128(%rdx), %zmm11
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 192(%rdx), %zmm10
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 256(%rdx), %zmm9
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 320(%rdx), %zmm8
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 384(%rdx), %zmm7
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 448(%rdx), %zmm6
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 (%rcx), %zmm0
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 64(%rcx), %zmm1
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 128(%rcx), %zmm2
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 192(%rcx), %zmm30
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 256(%rcx), %zmm27
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 320(%rcx), %zmm24
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 384(%rcx), %zmm22
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 448(%rcx), %zmm21
-; AVX512BW-ONLY-FAST-NEXT:    vbroadcasti128 {{.*#+}} ymm3 = [4,12,4,12]
-; AVX512BW-ONLY-FAST-NEXT:    # ymm3 = mem[0,1,0,1]
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm6, %zmm4
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm21, %zmm3, %zmm4
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm7, %zmm4
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm22, %zmm3, %zmm4
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm8, %zmm4
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm24, %zmm3, %zmm4
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm9, %zmm4
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm27, %zmm3, %zmm4
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm10, %zmm4
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm30, %zmm3, %zmm4
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm11, %zmm4
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm2, %zmm3, %zmm4
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm12, %zmm4
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm1, %zmm3, %zmm4
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vpermi2q %zmm0, %zmm13, %zmm3
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm5 = [1,9,2,10,1,9,2,10]
-; AVX512BW-ONLY-FAST-NEXT:    # zmm5 = mem[0,1,2,3,0,1,2,3]
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm13, %zmm3
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm0, %zmm5, %zmm3
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm5, %zmm4
-; AVX512BW-ONLY-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm5 = [5,13,6,14,5,13,6,14]
-; AVX512BW-ONLY-FAST-NEXT:    # zmm5 = mem[0,1,2,3,0,1,2,3]
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm13, %zmm3
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm0, %zmm5, %zmm3
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm12, %zmm3
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm1, %zmm4, %zmm3
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} zmm14 = [3,11,3,11,3,11,3,11]
-; AVX512BW-ONLY-FAST-NEXT:    # zmm14 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm13, %zmm3
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm0, %zmm14, %zmm3
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} zmm3 = [7,15,7,15,7,15,7,15]
-; AVX512BW-ONLY-FAST-NEXT:    # zmm3 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm0, %zmm3, %zmm13
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm13, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm12, %zmm0
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm1, %zmm5, %zmm0
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm12, %zmm0
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm1, %zmm14, %zmm0
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm1, %zmm3, %zmm12
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm12, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm11, %zmm0
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm2, %zmm4, %zmm0
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm11, %zmm0
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm2, %zmm5, %zmm0
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm11, %zmm0
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm2, %zmm14, %zmm0
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm2, %zmm3, %zmm11
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm11, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm10, %zmm0
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm30, %zmm4, %zmm0
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm10, %zmm0
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm30, %zmm5, %zmm0
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm10, %zmm0
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm30, %zmm14, %zmm0
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm30, %zmm3, %zmm10
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm9, %zmm0
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm27, %zmm4, %zmm0
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm9, %zmm0
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm27, %zmm5, %zmm0
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm9, %zmm0
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm27, %zmm14, %zmm0
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm27, %zmm3, %zmm9
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm8, %zmm0
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm24, %zmm4, %zmm0
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm8, %zmm0
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm24, %zmm5, %zmm0
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm8, %zmm0
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm24, %zmm14, %zmm0
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm24, %zmm3, %zmm8
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm7, %zmm0
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm22, %zmm4, %zmm0
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm7, %zmm0
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm22, %zmm5, %zmm0
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm7, %zmm0
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm22, %zmm14, %zmm0
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm22, %zmm3, %zmm7
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vpermi2q %zmm21, %zmm6, %zmm4
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm4, (%rsp) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vpermi2q %zmm21, %zmm6, %zmm5
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vpermi2q %zmm21, %zmm6, %zmm14
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm14, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm21, %zmm3, %zmm6
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 448(%rdi), %zmm4
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 448(%rsi), %zmm11
-; AVX512BW-ONLY-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm28 = [4,12,5,13,4,12,5,13]
-; AVX512BW-ONLY-FAST-NEXT:    # zmm28 = mem[0,1,2,3,0,1,2,3]
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm4, %zmm0
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm11, %zmm28, %zmm0
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm0, %zmm8
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 384(%rdi), %zmm2
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 384(%rsi), %zmm13
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm2, %zmm1
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm13, %zmm28, %zmm1
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 320(%rdi), %zmm3
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 320(%rsi), %zmm15
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm3, %zmm0
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm15, %zmm28, %zmm0
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 256(%rdi), %zmm31
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 256(%rsi), %zmm17
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm31, %zmm0
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm17, %zmm28, %zmm0
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 192(%rdi), %zmm29
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 192(%rsi), %zmm18
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm29, %zmm0
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm18, %zmm28, %zmm0
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 128(%rdi), %zmm25
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 128(%rsi), %zmm19
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm25, %zmm0
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm19, %zmm28, %zmm0
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm0, %zmm5
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 64(%rdi), %zmm6
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 64(%rsi), %zmm20
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm6, %zmm0
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm20, %zmm28, %zmm0
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 (%rdi), %zmm7
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 (%rsi), %zmm22
-; AVX512BW-ONLY-FAST-NEXT:    vpermi2q %zmm22, %zmm7, %zmm28
-; AVX512BW-ONLY-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} zmm12 = [2,10,2,10,2,10,2,10]
-; AVX512BW-ONLY-FAST-NEXT:    # zmm12 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm7, %zmm14
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm22, %zmm12, %zmm14
-; AVX512BW-ONLY-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} zmm10 = [6,14,6,14,6,14,6,14]
-; AVX512BW-ONLY-FAST-NEXT:    # zmm10 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm7, %zmm16
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm22, %zmm10, %zmm16
-; AVX512BW-ONLY-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm21 = [0,8,1,9,0,8,1,9]
-; AVX512BW-ONLY-FAST-NEXT:    # zmm21 = mem[0,1,2,3,0,1,2,3]
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm7, %zmm0
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm22, %zmm21, %zmm0
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} ymm23 = [7,15,7,15]
-; AVX512BW-ONLY-FAST-NEXT:    # ymm23 = mem[0,1,2,3,0,1,2,3]
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm22, %zmm23, %zmm7
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm6, %zmm22
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm20, %zmm12, %zmm22
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm6, %zmm24
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm20, %zmm10, %zmm24
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm6, %zmm0
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm20, %zmm21, %zmm0
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm20, %zmm23, %zmm6
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm25, %zmm20
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm19, %zmm12, %zmm20
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm25, %zmm26
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm19, %zmm10, %zmm26
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm25, %zmm0
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm19, %zmm21, %zmm0
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm19, %zmm23, %zmm25
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm29, %zmm19
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm18, %zmm12, %zmm19
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm29, %zmm27
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm18, %zmm10, %zmm27
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm29, %zmm0
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm18, %zmm21, %zmm0
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm18, %zmm23, %zmm29
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm31, %zmm18
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm17, %zmm12, %zmm18
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm31, %zmm9
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm17, %zmm10, %zmm9
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm31, %zmm0
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm17, %zmm21, %zmm0
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm17, %zmm23, %zmm31
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm3, %zmm17
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm15, %zmm12, %zmm17
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm3, %zmm30
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm15, %zmm10, %zmm30
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm3, %zmm0
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm15, %zmm21, %zmm0
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm15, %zmm23, %zmm3
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm2, %zmm15
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm2, %zmm3
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm13, %zmm12, %zmm3
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm13, %zmm10, %zmm2
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm15, %zmm0
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm13, %zmm21, %zmm0
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm13, %zmm23, %zmm15
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm15, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vpermi2q %zmm11, %zmm4, %zmm12
-; AVX512BW-ONLY-FAST-NEXT:    vpermi2q %zmm11, %zmm4, %zmm10
-; AVX512BW-ONLY-FAST-NEXT:    vpermi2q %zmm11, %zmm4, %zmm21
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm11, %zmm23, %zmm4
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    movb $12, %al
-; AVX512BW-ONLY-FAST-NEXT:    kmovd %eax, %k1
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm11 # 64-byte Reload
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm11, %zmm8 {%k1}
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm11 # 64-byte Reload
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm11, %zmm1 {%k1}
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm11 # 64-byte Reload
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm8 # 64-byte Reload
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm11, %zmm8 {%k1}
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm11 # 64-byte Reload
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm11, %zmm0 {%k1}
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm11 # 64-byte Reload
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm11, %zmm4 {%k1}
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm11 # 64-byte Reload
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm11, %zmm5 {%k1}
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm11 # 64-byte Reload
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm11, %zmm1 {%k1}
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm11 # 64-byte Reload
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm11, %zmm28 {%k1}
-; AVX512BW-ONLY-FAST-NEXT:    movb $48, %al
-; AVX512BW-ONLY-FAST-NEXT:    kmovd %eax, %k2
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm11 # 64-byte Reload
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm14, %zmm11 {%k2}
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm23 # 64-byte Reload
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm16, %zmm23 {%k2}
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm13 # 64-byte Reload
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm22, %zmm13 {%k2}
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm22 # 64-byte Reload
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm24, %zmm22 {%k2}
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm14 # 64-byte Reload
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm20, %zmm14 {%k2}
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm20 # 64-byte Reload
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm26, %zmm20 {%k2}
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm16 # 64-byte Reload
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm19, %zmm16 {%k2}
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm19 # 64-byte Reload
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm27, %zmm19 {%k2}
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm24 # 64-byte Reload
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm18, %zmm24 {%k2}
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm18 # 64-byte Reload
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm9, %zmm18 {%k2}
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm26 # 64-byte Reload
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm17, %zmm26 {%k2}
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm17 # 64-byte Reload
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm30, %zmm17 {%k2}
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm27 # 64-byte Reload
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm3, %zmm27 {%k2}
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm2, %zmm0 {%k2}
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 (%rsp), %zmm15 # 64-byte Reload
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm12, %zmm15 {%k2}
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 (%r8), %zmm5
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm2 = <0,1,9,u,4,5,6,7>
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm5, %zmm2, %zmm11
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm11, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 64(%r8), %zmm3
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm3, %zmm2, %zmm13
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm13, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 128(%r8), %zmm9
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm9, %zmm2, %zmm14
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm14, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 192(%r8), %zmm11
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm11, %zmm2, %zmm16
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm16, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 256(%r8), %zmm13
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm13, %zmm2, %zmm24
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm24, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 320(%r8), %zmm14
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm14, %zmm2, %zmm26
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm26, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 384(%r8), %zmm16
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm16, %zmm2, %zmm27
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm27, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 448(%r8), %zmm12
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm12, %zmm2, %zmm15
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm15, (%rsp) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm15 # 64-byte Reload
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm10, %zmm15 {%k2}
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm2 = <0,1,13,u,4,5,6,7>
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm5, %zmm2, %zmm23
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm3, %zmm2, %zmm22
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm22, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm9, %zmm2, %zmm20
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm20, %zmm24
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm11, %zmm2, %zmm19
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm13, %zmm2, %zmm18
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm18, %zmm26
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm14, %zmm2, %zmm17
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm17, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm16, %zmm2, %zmm0
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm12, %zmm2, %zmm15
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa (%rdi), %ymm2
-; AVX512BW-ONLY-FAST-NEXT:    vpunpckhqdq {{.*#+}} ymm2 = ymm2[1],mem[1],ymm2[3],mem[3]
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512BW-ONLY-FAST-NEXT:    vinserti64x4 $0, %ymm2, %zmm0, %zmm10
-; AVX512BW-ONLY-FAST-NEXT:    vshufi64x2 $228, {{[-0-9]+}}(%r{{[sb]}}p), %zmm7, %zmm7 # 64-byte Folded Reload
-; AVX512BW-ONLY-FAST-NEXT:    # zmm7 = zmm7[0,1,2,3],mem[4,5,6,7]
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa 64(%rdi), %ymm2
-; AVX512BW-ONLY-FAST-NEXT:    vpunpckhqdq {{.*#+}} ymm2 = ymm2[1],mem[1],ymm2[3],mem[3]
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512BW-ONLY-FAST-NEXT:    vinserti64x4 $0, %ymm2, %zmm0, %zmm0
-; AVX512BW-ONLY-FAST-NEXT:    vshufi64x2 $228, {{[-0-9]+}}(%r{{[sb]}}p), %zmm6, %zmm6 # 64-byte Folded Reload
-; AVX512BW-ONLY-FAST-NEXT:    # zmm6 = zmm6[0,1,2,3],mem[4,5,6,7]
-; AVX512BW-ONLY-FAST-NEXT:    movb $16, %al
-; AVX512BW-ONLY-FAST-NEXT:    kmovd %eax, %k2
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm3, %zmm1 {%k2}
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm1 = <10,u,2,3,4,5,11,u>
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm3, %zmm1, %zmm0
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm2 = <14,u,2,3,4,5,15,u>
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm3, %zmm2, %zmm6
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa 128(%rdi), %ymm3
-; AVX512BW-ONLY-FAST-NEXT:    vpunpckhqdq {{.*#+}} ymm3 = ymm3[1],mem[1],ymm3[3],mem[3]
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512BW-ONLY-FAST-NEXT:    vinserti64x4 $0, %ymm3, %zmm0, %zmm3
-; AVX512BW-ONLY-FAST-NEXT:    vshufi64x2 $228, {{[-0-9]+}}(%r{{[sb]}}p), %zmm25, %zmm6 # 64-byte Folded Reload
-; AVX512BW-ONLY-FAST-NEXT:    # zmm6 = zmm25[0,1,2,3],mem[4,5,6,7]
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm9, %zmm0 {%k2}
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm9, %zmm1, %zmm3
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm9, %zmm2, %zmm6
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa 192(%rdi), %ymm3
-; AVX512BW-ONLY-FAST-NEXT:    vpunpckhqdq {{.*#+}} ymm3 = ymm3[1],mem[1],ymm3[3],mem[3]
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512BW-ONLY-FAST-NEXT:    vinserti64x4 $0, %ymm3, %zmm0, %zmm0
-; AVX512BW-ONLY-FAST-NEXT:    vshufi64x2 $228, {{[-0-9]+}}(%r{{[sb]}}p), %zmm29, %zmm3 # 64-byte Folded Reload
-; AVX512BW-ONLY-FAST-NEXT:    # zmm3 = zmm29[0,1,2,3],mem[4,5,6,7]
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm11, %zmm4 {%k2}
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm11, %zmm1, %zmm0
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm11, %zmm2, %zmm3
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa 256(%rdi), %ymm0
-; AVX512BW-ONLY-FAST-NEXT:    vpunpckhqdq {{.*#+}} ymm0 = ymm0[1],mem[1],ymm0[3],mem[3]
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
-; AVX512BW-ONLY-FAST-NEXT:    vinserti64x4 $0, %ymm0, %zmm3, %zmm3
-; AVX512BW-ONLY-FAST-NEXT:    vshufi64x2 $228, {{[-0-9]+}}(%r{{[sb]}}p), %zmm31, %zmm4 # 64-byte Folded Reload
-; AVX512BW-ONLY-FAST-NEXT:    # zmm4 = zmm31[0,1,2,3],mem[4,5,6,7]
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm13, %zmm0 {%k2}
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm13, %zmm1, %zmm3
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm13, %zmm2, %zmm4
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa 320(%rdi), %ymm0
-; AVX512BW-ONLY-FAST-NEXT:    vpunpckhqdq {{.*#+}} ymm0 = ymm0[1],mem[1],ymm0[3],mem[3]
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
-; AVX512BW-ONLY-FAST-NEXT:    vinserti64x4 $0, %ymm0, %zmm3, %zmm27
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512BW-ONLY-FAST-NEXT:    vshufi64x2 $228, {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm25 # 64-byte Folded Reload
-; AVX512BW-ONLY-FAST-NEXT:    # zmm25 = zmm0[0,1,2,3],mem[4,5,6,7]
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm14, %zmm8 {%k2}
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm14, %zmm1, %zmm27
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm14, %zmm2, %zmm25
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa 384(%rdi), %ymm0
-; AVX512BW-ONLY-FAST-NEXT:    vpunpckhqdq {{.*#+}} ymm0 = ymm0[1],mem[1],ymm0[3],mem[3]
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
-; AVX512BW-ONLY-FAST-NEXT:    vinserti64x4 $0, %ymm0, %zmm3, %zmm22
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512BW-ONLY-FAST-NEXT:    vshufi64x2 $228, {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm20 # 64-byte Folded Reload
-; AVX512BW-ONLY-FAST-NEXT:    # zmm20 = zmm0[0,1,2,3],mem[4,5,6,7]
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm16, %zmm0 {%k2}
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm16, %zmm1, %zmm22
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm16, %zmm2, %zmm20
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa 448(%rdi), %ymm0
-; AVX512BW-ONLY-FAST-NEXT:    vpunpckhqdq {{.*#+}} ymm0 = ymm0[1],mem[1],ymm0[3],mem[3]
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
-; AVX512BW-ONLY-FAST-NEXT:    vinserti64x4 $0, %ymm0, %zmm3, %zmm18
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm5, %zmm1, %zmm10
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm10, %zmm11
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm12, %zmm1, %zmm18
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512BW-ONLY-FAST-NEXT:    vshufi64x2 $228, {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm17 # 64-byte Folded Reload
-; AVX512BW-ONLY-FAST-NEXT:    # zmm17 = zmm0[0,1,2,3],mem[4,5,6,7]
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm12, %zmm0 {%k2}
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm12, %zmm2, %zmm17
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm5, %zmm2, %zmm7
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm7, %zmm13
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm5, %zmm28 {%k2}
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 (%r9), %zmm6
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm10 = [0,1,2,9,4,5,6,7]
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm6, %zmm10, %zmm1
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 64(%r9), %zmm7
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm7, %zmm10, %zmm2
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 128(%r9), %zmm2
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm2, %zmm10, %zmm3
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 192(%r9), %zmm3
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm3, %zmm10, %zmm4
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 256(%r9), %zmm4
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm4, %zmm10, %zmm5
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 320(%r9), %zmm5
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm5, %zmm10, %zmm0
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 384(%r9), %zmm8
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm8, %zmm10, %zmm0
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 448(%r9), %zmm9
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 (%rsp), %zmm0 # 64-byte Reload
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm9, %zmm10, %zmm0
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm0, (%rsp) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm10 = [0,1,2,13,4,5,6,7]
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm6, %zmm10, %zmm23
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm23, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm12 # 64-byte Reload
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm7, %zmm10, %zmm12
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm12, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm2, %zmm10, %zmm24
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm24, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm3, %zmm10, %zmm19
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm19, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm4, %zmm10, %zmm26
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm26, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm5, %zmm10, %zmm0
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm8, %zmm10, %zmm0
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm9, %zmm10, %zmm15
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm15, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa (%rdx), %xmm10
-; AVX512BW-ONLY-FAST-NEXT:    vpunpcklqdq {{.*#+}} xmm10 = xmm10[0],mem[0]
-; AVX512BW-ONLY-FAST-NEXT:    vinserti128 $1, %xmm10, %ymm0, %ymm10
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm12 # 64-byte Reload
-; AVX512BW-ONLY-FAST-NEXT:    vinserti64x4 $0, %ymm10, %zmm0, %zmm12 {%k1}
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa 64(%rdx), %xmm10
-; AVX512BW-ONLY-FAST-NEXT:    vpunpcklqdq {{.*#+}} xmm10 = xmm10[0],mem[0]
-; AVX512BW-ONLY-FAST-NEXT:    vinserti128 $1, %xmm10, %ymm0, %ymm10
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm14 # 64-byte Reload
-; AVX512BW-ONLY-FAST-NEXT:    vinserti64x4 $0, %ymm10, %zmm0, %zmm14 {%k1}
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa 128(%rdx), %xmm10
-; AVX512BW-ONLY-FAST-NEXT:    vpunpcklqdq {{.*#+}} xmm10 = xmm10[0],mem[0]
-; AVX512BW-ONLY-FAST-NEXT:    vinserti128 $1, %xmm10, %ymm0, %ymm10
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm16 # 64-byte Reload
-; AVX512BW-ONLY-FAST-NEXT:    vinserti64x4 $0, %ymm10, %zmm0, %zmm16 {%k1}
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa 192(%rdx), %xmm10
-; AVX512BW-ONLY-FAST-NEXT:    vpunpcklqdq {{.*#+}} xmm10 = xmm10[0],mem[0]
-; AVX512BW-ONLY-FAST-NEXT:    vinserti128 $1, %xmm10, %ymm0, %ymm10
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm30 # 64-byte Reload
-; AVX512BW-ONLY-FAST-NEXT:    vinserti64x4 $0, %ymm10, %zmm0, %zmm30 {%k1}
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa 256(%rdx), %xmm10
-; AVX512BW-ONLY-FAST-NEXT:    vpunpcklqdq {{.*#+}} xmm10 = xmm10[0],mem[0]
-; AVX512BW-ONLY-FAST-NEXT:    vinserti128 $1, %xmm10, %ymm0, %ymm10
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm31 # 64-byte Reload
-; AVX512BW-ONLY-FAST-NEXT:    vinserti64x4 $0, %ymm10, %zmm0, %zmm31 {%k1}
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa 320(%rdx), %xmm10
-; AVX512BW-ONLY-FAST-NEXT:    vpunpcklqdq {{.*#+}} xmm10 = xmm10[0],mem[0]
-; AVX512BW-ONLY-FAST-NEXT:    vinserti128 $1, %xmm10, %ymm0, %ymm10
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512BW-ONLY-FAST-NEXT:    vinserti64x4 $0, %ymm10, %zmm0, %zmm1 {%k1}
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa 384(%rdx), %xmm10
-; AVX512BW-ONLY-FAST-NEXT:    vpunpcklqdq {{.*#+}} xmm10 = xmm10[0],mem[0]
-; AVX512BW-ONLY-FAST-NEXT:    vinserti128 $1, %xmm10, %ymm0, %ymm10
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512BW-ONLY-FAST-NEXT:    vinserti64x4 $0, %ymm10, %zmm0, %zmm0 {%k1}
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa 448(%rdx), %xmm10
-; AVX512BW-ONLY-FAST-NEXT:    vpunpcklqdq {{.*#+}} xmm10 = xmm10[0],mem[0]
-; AVX512BW-ONLY-FAST-NEXT:    vinserti128 $1, %xmm10, %ymm0, %ymm10
-; AVX512BW-ONLY-FAST-NEXT:    vinserti64x4 $0, %ymm10, %zmm0, %zmm21 {%k1}
-; AVX512BW-ONLY-FAST-NEXT:    vinserti32x4 $2, (%r8), %zmm12, %zmm10
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm29 = [0,1,2,3,4,8,6,7]
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm6, %zmm29, %zmm10
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vinserti32x4 $2, 64(%r8), %zmm14, %zmm10
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm7, %zmm29, %zmm10
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vinserti32x4 $2, 128(%r8), %zmm16, %zmm10
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm2, %zmm29, %zmm10
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vinserti32x4 $2, 192(%r8), %zmm30, %zmm26
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm3, %zmm29, %zmm26
-; AVX512BW-ONLY-FAST-NEXT:    vinserti32x4 $2, 256(%r8), %zmm31, %zmm24
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm4, %zmm29, %zmm24
-; AVX512BW-ONLY-FAST-NEXT:    vinserti32x4 $2, 320(%r8), %zmm1, %zmm23
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm5, %zmm29, %zmm23
-; AVX512BW-ONLY-FAST-NEXT:    vinserti32x4 $2, 384(%r8), %zmm0, %zmm19
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm8, %zmm29, %zmm19
-; AVX512BW-ONLY-FAST-NEXT:    vinserti32x4 $2, 448(%r8), %zmm21, %zmm21
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm9, %zmm29, %zmm21
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm29 = [0,10,2,3,4,5,6,11]
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm6, %zmm29, %zmm11
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm11, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm30 = [0,1,2,3,4,12,6,7]
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm6, %zmm30, %zmm28
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm31 = [0,14,2,3,4,5,6,15]
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm6, %zmm31, %zmm13
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm13, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm16 # 64-byte Reload
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm7, %zmm29, %zmm16
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm15 # 64-byte Reload
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm7, %zmm30, %zmm15
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm14 # 64-byte Reload
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm7, %zmm31, %zmm14
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm13 # 64-byte Reload
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm2, %zmm29, %zmm13
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm12 # 64-byte Reload
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm2, %zmm30, %zmm12
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm11 # 64-byte Reload
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm2, %zmm31, %zmm11
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm10 # 64-byte Reload
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm3, %zmm29, %zmm10
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm7 # 64-byte Reload
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm3, %zmm30, %zmm7
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm3, %zmm31, %zmm2
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm4, %zmm29, %zmm1
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm4, %zmm30, %zmm3
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm4, %zmm31, %zmm0
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm5, %zmm29, %zmm27
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm5, %zmm30, %zmm4
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm5, %zmm31, %zmm25
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm8, %zmm29, %zmm22
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm8, %zmm30, %zmm5
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm8, %zmm31, %zmm20
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm9, %zmm29, %zmm18
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm6 # 64-byte Reload
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm9, %zmm30, %zmm6
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm9, %zmm31, %zmm17
-; AVX512BW-ONLY-FAST-NEXT:    movq {{[0-9]+}}(%rsp), %rax
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm17, 3008(%rax)
-; AVX512BW-ONLY-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm8 # 64-byte Reload
-; AVX512BW-ONLY-FAST-NEXT:    vmovaps %zmm8, 2944(%rax)
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm6, 2880(%rax)
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm18, 2816(%rax)
-; AVX512BW-ONLY-FAST-NEXT:    vmovups (%rsp), %zmm6 # 64-byte Reload
-; AVX512BW-ONLY-FAST-NEXT:    vmovaps %zmm6, 2752(%rax)
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm20, 2624(%rax)
-; AVX512BW-ONLY-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm6 # 64-byte Reload
-; AVX512BW-ONLY-FAST-NEXT:    vmovaps %zmm6, 2560(%rax)
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm5, 2496(%rax)
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm22, 2432(%rax)
-; AVX512BW-ONLY-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
-; AVX512BW-ONLY-FAST-NEXT:    vmovaps %zmm5, 2368(%rax)
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm25, 2240(%rax)
-; AVX512BW-ONLY-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
-; AVX512BW-ONLY-FAST-NEXT:    vmovaps %zmm5, 2176(%rax)
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm4, 2112(%rax)
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm27, 2048(%rax)
-; AVX512BW-ONLY-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
-; AVX512BW-ONLY-FAST-NEXT:    vmovaps %zmm4, 1984(%rax)
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm0, 1856(%rax)
-; AVX512BW-ONLY-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512BW-ONLY-FAST-NEXT:    vmovaps %zmm0, 1792(%rax)
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm3, 1728(%rax)
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm1, 1664(%rax)
-; AVX512BW-ONLY-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
-; AVX512BW-ONLY-FAST-NEXT:    vmovaps %zmm3, 1600(%rax)
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm2, 1472(%rax)
-; AVX512BW-ONLY-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512BW-ONLY-FAST-NEXT:    vmovaps %zmm0, 1408(%rax)
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm7, 1344(%rax)
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm10, 1280(%rax)
-; AVX512BW-ONLY-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512BW-ONLY-FAST-NEXT:    vmovaps %zmm2, 1216(%rax)
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm11, 1088(%rax)
-; AVX512BW-ONLY-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512BW-ONLY-FAST-NEXT:    vmovaps %zmm0, 1024(%rax)
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm12, 960(%rax)
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm13, 896(%rax)
-; AVX512BW-ONLY-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512BW-ONLY-FAST-NEXT:    vmovaps %zmm1, 832(%rax)
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm14, 704(%rax)
-; AVX512BW-ONLY-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512BW-ONLY-FAST-NEXT:    vmovaps %zmm1, 640(%rax)
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm15, 576(%rax)
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm16, 512(%rax)
-; AVX512BW-ONLY-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512BW-ONLY-FAST-NEXT:    vmovaps %zmm0, 448(%rax)
-; AVX512BW-ONLY-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512BW-ONLY-FAST-NEXT:    vmovaps %zmm0, 320(%rax)
-; AVX512BW-ONLY-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512BW-ONLY-FAST-NEXT:    vmovaps %zmm0, 256(%rax)
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm28, 192(%rax)
-; AVX512BW-ONLY-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512BW-ONLY-FAST-NEXT:    vmovaps %zmm0, 128(%rax)
-; AVX512BW-ONLY-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512BW-ONLY-FAST-NEXT:    vmovaps %zmm0, 64(%rax)
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm21, 2688(%rax)
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm19, 2304(%rax)
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm23, 1920(%rax)
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm24, 1536(%rax)
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm26, 1152(%rax)
-; AVX512BW-ONLY-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512BW-ONLY-FAST-NEXT:    vmovaps %zmm0, 768(%rax)
-; AVX512BW-ONLY-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512BW-ONLY-FAST-NEXT:    vmovaps %zmm0, 384(%rax)
-; AVX512BW-ONLY-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512BW-ONLY-FAST-NEXT:    vmovaps %zmm0, (%rax)
-; AVX512BW-ONLY-FAST-NEXT:    addq $3400, %rsp # imm = 0xD48
-; AVX512BW-ONLY-FAST-NEXT:    vzeroupper
-; AVX512BW-ONLY-FAST-NEXT:    retq
+; AVX512DQ-ONLY-LABEL: store_i64_stride6_vf64:
+; AVX512DQ-ONLY:       # %bb.0:
+; AVX512DQ-ONLY-NEXT:    subq $3400, %rsp # imm = 0xD48
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 (%rdx), %zmm13
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 64(%rdx), %zmm12
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 128(%rdx), %zmm11
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 192(%rdx), %zmm10
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 256(%rdx), %zmm9
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 320(%rdx), %zmm8
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 384(%rdx), %zmm7
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 448(%rdx), %zmm6
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 (%rcx), %zmm0
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 64(%rcx), %zmm1
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 128(%rcx), %zmm2
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 192(%rcx), %zmm30
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 256(%rcx), %zmm27
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 320(%rcx), %zmm24
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 384(%rcx), %zmm22
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 448(%rcx), %zmm21
+; AVX512DQ-ONLY-NEXT:    vbroadcasti128 {{.*#+}} ymm3 = [4,12,4,12]
+; AVX512DQ-ONLY-NEXT:    # ymm3 = mem[0,1,0,1]
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm6, %zmm4
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm21, %zmm3, %zmm4
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm7, %zmm4
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm22, %zmm3, %zmm4
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm8, %zmm4
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm24, %zmm3, %zmm4
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm9, %zmm4
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm27, %zmm3, %zmm4
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm10, %zmm4
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm30, %zmm3, %zmm4
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm11, %zmm4
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm2, %zmm3, %zmm4
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm12, %zmm4
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm1, %zmm3, %zmm4
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-ONLY-NEXT:    vpermi2q %zmm0, %zmm13, %zmm3
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-ONLY-NEXT:    vbroadcasti64x4 {{.*#+}} zmm5 = [1,9,2,10,1,9,2,10]
+; AVX512DQ-ONLY-NEXT:    # zmm5 = mem[0,1,2,3,0,1,2,3]
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm13, %zmm3
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm0, %zmm5, %zmm3
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm5, %zmm4
+; AVX512DQ-ONLY-NEXT:    vbroadcasti64x4 {{.*#+}} zmm5 = [5,13,6,14,5,13,6,14]
+; AVX512DQ-ONLY-NEXT:    # zmm5 = mem[0,1,2,3,0,1,2,3]
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm13, %zmm3
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm0, %zmm5, %zmm3
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm12, %zmm3
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm1, %zmm4, %zmm3
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-ONLY-NEXT:    vbroadcasti32x4 {{.*#+}} zmm14 = [3,11,3,11,3,11,3,11]
+; AVX512DQ-ONLY-NEXT:    # zmm14 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm13, %zmm3
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm0, %zmm14, %zmm3
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-ONLY-NEXT:    vbroadcasti32x4 {{.*#+}} zmm3 = [7,15,7,15,7,15,7,15]
+; AVX512DQ-ONLY-NEXT:    # zmm3 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm0, %zmm3, %zmm13
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 %zmm13, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm12, %zmm0
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm1, %zmm5, %zmm0
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm12, %zmm0
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm1, %zmm14, %zmm0
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm1, %zmm3, %zmm12
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 %zmm12, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm11, %zmm0
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm2, %zmm4, %zmm0
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm11, %zmm0
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm2, %zmm5, %zmm0
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm11, %zmm0
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm2, %zmm14, %zmm0
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm2, %zmm3, %zmm11
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 %zmm11, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm10, %zmm0
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm30, %zmm4, %zmm0
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm10, %zmm0
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm30, %zmm5, %zmm0
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm10, %zmm0
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm30, %zmm14, %zmm0
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm30, %zmm3, %zmm10
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 %zmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm9, %zmm0
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm27, %zmm4, %zmm0
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm9, %zmm0
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm27, %zmm5, %zmm0
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm9, %zmm0
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm27, %zmm14, %zmm0
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm27, %zmm3, %zmm9
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 %zmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm8, %zmm0
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm24, %zmm4, %zmm0
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm8, %zmm0
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm24, %zmm5, %zmm0
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm8, %zmm0
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm24, %zmm14, %zmm0
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm24, %zmm3, %zmm8
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 %zmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm7, %zmm0
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm22, %zmm4, %zmm0
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm7, %zmm0
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm22, %zmm5, %zmm0
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm7, %zmm0
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm22, %zmm14, %zmm0
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm22, %zmm3, %zmm7
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 %zmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-ONLY-NEXT:    vpermi2q %zmm21, %zmm6, %zmm4
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 %zmm4, (%rsp) # 64-byte Spill
+; AVX512DQ-ONLY-NEXT:    vpermi2q %zmm21, %zmm6, %zmm5
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-ONLY-NEXT:    vpermi2q %zmm21, %zmm6, %zmm14
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 %zmm14, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm21, %zmm3, %zmm6
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 448(%rdi), %zmm4
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 448(%rsi), %zmm11
+; AVX512DQ-ONLY-NEXT:    vbroadcasti64x4 {{.*#+}} zmm28 = [4,12,5,13,4,12,5,13]
+; AVX512DQ-ONLY-NEXT:    # zmm28 = mem[0,1,2,3,0,1,2,3]
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm4, %zmm0
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm11, %zmm28, %zmm0
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm0, %zmm8
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 384(%rdi), %zmm2
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 384(%rsi), %zmm13
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm2, %zmm1
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm13, %zmm28, %zmm1
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 320(%rdi), %zmm3
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 320(%rsi), %zmm15
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm3, %zmm0
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm15, %zmm28, %zmm0
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 256(%rdi), %zmm31
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 256(%rsi), %zmm17
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm31, %zmm0
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm17, %zmm28, %zmm0
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 192(%rdi), %zmm29
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 192(%rsi), %zmm18
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm29, %zmm0
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm18, %zmm28, %zmm0
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 128(%rdi), %zmm25
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 128(%rsi), %zmm19
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm25, %zmm0
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm19, %zmm28, %zmm0
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm0, %zmm5
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 64(%rdi), %zmm6
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 64(%rsi), %zmm20
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm6, %zmm0
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm20, %zmm28, %zmm0
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 (%rdi), %zmm7
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 (%rsi), %zmm22
+; AVX512DQ-ONLY-NEXT:    vpermi2q %zmm22, %zmm7, %zmm28
+; AVX512DQ-ONLY-NEXT:    vbroadcasti32x4 {{.*#+}} zmm12 = [2,10,2,10,2,10,2,10]
+; AVX512DQ-ONLY-NEXT:    # zmm12 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm7, %zmm14
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm22, %zmm12, %zmm14
+; AVX512DQ-ONLY-NEXT:    vbroadcasti32x4 {{.*#+}} zmm10 = [6,14,6,14,6,14,6,14]
+; AVX512DQ-ONLY-NEXT:    # zmm10 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm7, %zmm16
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm22, %zmm10, %zmm16
+; AVX512DQ-ONLY-NEXT:    vbroadcasti64x4 {{.*#+}} zmm21 = [0,8,1,9,0,8,1,9]
+; AVX512DQ-ONLY-NEXT:    # zmm21 = mem[0,1,2,3,0,1,2,3]
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm7, %zmm0
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm22, %zmm21, %zmm0
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-ONLY-NEXT:    vbroadcasti64x2 {{.*#+}} ymm23 = [7,15,7,15]
+; AVX512DQ-ONLY-NEXT:    # ymm23 = mem[0,1,0,1]
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm22, %zmm23, %zmm7
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm6, %zmm22
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm20, %zmm12, %zmm22
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm6, %zmm24
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm20, %zmm10, %zmm24
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm6, %zmm0
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm20, %zmm21, %zmm0
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm20, %zmm23, %zmm6
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm25, %zmm20
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm19, %zmm12, %zmm20
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm25, %zmm26
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm19, %zmm10, %zmm26
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm25, %zmm0
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm19, %zmm21, %zmm0
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm19, %zmm23, %zmm25
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm29, %zmm19
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm18, %zmm12, %zmm19
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm29, %zmm27
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm18, %zmm10, %zmm27
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm29, %zmm0
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm18, %zmm21, %zmm0
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm18, %zmm23, %zmm29
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm31, %zmm18
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm17, %zmm12, %zmm18
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm31, %zmm9
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm17, %zmm10, %zmm9
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm31, %zmm0
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm17, %zmm21, %zmm0
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm17, %zmm23, %zmm31
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm3, %zmm17
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm15, %zmm12, %zmm17
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm3, %zmm30
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm15, %zmm10, %zmm30
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm3, %zmm0
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm15, %zmm21, %zmm0
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm15, %zmm23, %zmm3
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm2, %zmm15
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm2, %zmm3
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm13, %zmm12, %zmm3
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm13, %zmm10, %zmm2
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm15, %zmm0
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm13, %zmm21, %zmm0
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm13, %zmm23, %zmm15
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 %zmm15, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-ONLY-NEXT:    vpermi2q %zmm11, %zmm4, %zmm12
+; AVX512DQ-ONLY-NEXT:    vpermi2q %zmm11, %zmm4, %zmm10
+; AVX512DQ-ONLY-NEXT:    vpermi2q %zmm11, %zmm4, %zmm21
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm11, %zmm23, %zmm4
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-ONLY-NEXT:    movb $12, %al
+; AVX512DQ-ONLY-NEXT:    kmovw %eax, %k1
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm11 # 64-byte Reload
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm11, %zmm8 {%k1}
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 %zmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm11 # 64-byte Reload
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm11, %zmm1 {%k1}
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm11 # 64-byte Reload
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm8 # 64-byte Reload
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm11, %zmm8 {%k1}
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm11 # 64-byte Reload
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm11, %zmm0 {%k1}
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm11 # 64-byte Reload
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm11, %zmm4 {%k1}
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm11 # 64-byte Reload
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm11, %zmm5 {%k1}
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm11 # 64-byte Reload
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm11, %zmm1 {%k1}
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm11 # 64-byte Reload
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm11, %zmm28 {%k1}
+; AVX512DQ-ONLY-NEXT:    movb $48, %al
+; AVX512DQ-ONLY-NEXT:    kmovw %eax, %k2
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm11 # 64-byte Reload
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm14, %zmm11 {%k2}
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm23 # 64-byte Reload
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm16, %zmm23 {%k2}
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm13 # 64-byte Reload
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm22, %zmm13 {%k2}
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm22 # 64-byte Reload
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm24, %zmm22 {%k2}
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm14 # 64-byte Reload
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm20, %zmm14 {%k2}
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm20 # 64-byte Reload
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm26, %zmm20 {%k2}
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm16 # 64-byte Reload
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm19, %zmm16 {%k2}
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm19 # 64-byte Reload
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm27, %zmm19 {%k2}
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm24 # 64-byte Reload
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm18, %zmm24 {%k2}
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm18 # 64-byte Reload
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm9, %zmm18 {%k2}
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm26 # 64-byte Reload
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm17, %zmm26 {%k2}
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm17 # 64-byte Reload
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm30, %zmm17 {%k2}
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm27 # 64-byte Reload
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm3, %zmm27 {%k2}
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm2, %zmm0 {%k2}
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 (%rsp), %zmm15 # 64-byte Reload
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm12, %zmm15 {%k2}
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 (%r8), %zmm5
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 {{.*#+}} zmm2 = <0,1,9,u,4,5,6,7>
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm5, %zmm2, %zmm11
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 %zmm11, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 64(%r8), %zmm3
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm3, %zmm2, %zmm13
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 %zmm13, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 128(%r8), %zmm9
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm9, %zmm2, %zmm14
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 %zmm14, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 192(%r8), %zmm11
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm11, %zmm2, %zmm16
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 %zmm16, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 256(%r8), %zmm13
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm13, %zmm2, %zmm24
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 %zmm24, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 320(%r8), %zmm14
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm14, %zmm2, %zmm26
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 %zmm26, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 384(%r8), %zmm16
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm16, %zmm2, %zmm27
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 %zmm27, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 448(%r8), %zmm12
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm12, %zmm2, %zmm15
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 %zmm15, (%rsp) # 64-byte Spill
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm15 # 64-byte Reload
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm10, %zmm15 {%k2}
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 {{.*#+}} zmm2 = <0,1,13,u,4,5,6,7>
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm5, %zmm2, %zmm23
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm3, %zmm2, %zmm22
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 %zmm22, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm9, %zmm2, %zmm20
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm20, %zmm24
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm11, %zmm2, %zmm19
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm13, %zmm2, %zmm18
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm18, %zmm26
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm14, %zmm2, %zmm17
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 %zmm17, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm16, %zmm2, %zmm0
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm12, %zmm2, %zmm15
+; AVX512DQ-ONLY-NEXT:    vmovdqa (%rdi), %ymm2
+; AVX512DQ-ONLY-NEXT:    vpunpckhqdq {{.*#+}} ymm2 = ymm2[1],mem[1],ymm2[3],mem[3]
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512DQ-ONLY-NEXT:    vinserti64x4 $0, %ymm2, %zmm0, %zmm10
+; AVX512DQ-ONLY-NEXT:    vshufi64x2 $228, {{[-0-9]+}}(%r{{[sb]}}p), %zmm7, %zmm7 # 64-byte Folded Reload
+; AVX512DQ-ONLY-NEXT:    # zmm7 = zmm7[0,1,2,3],mem[4,5,6,7]
+; AVX512DQ-ONLY-NEXT:    vmovdqa 64(%rdi), %ymm2
+; AVX512DQ-ONLY-NEXT:    vpunpckhqdq {{.*#+}} ymm2 = ymm2[1],mem[1],ymm2[3],mem[3]
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512DQ-ONLY-NEXT:    vinserti64x4 $0, %ymm2, %zmm0, %zmm0
+; AVX512DQ-ONLY-NEXT:    vshufi64x2 $228, {{[-0-9]+}}(%r{{[sb]}}p), %zmm6, %zmm6 # 64-byte Folded Reload
+; AVX512DQ-ONLY-NEXT:    # zmm6 = zmm6[0,1,2,3],mem[4,5,6,7]
+; AVX512DQ-ONLY-NEXT:    movb $16, %al
+; AVX512DQ-ONLY-NEXT:    kmovw %eax, %k2
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm3, %zmm1 {%k2}
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 {{.*#+}} zmm1 = <10,u,2,3,4,5,11,u>
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm3, %zmm1, %zmm0
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 {{.*#+}} zmm2 = <14,u,2,3,4,5,15,u>
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm3, %zmm2, %zmm6
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-ONLY-NEXT:    vmovdqa 128(%rdi), %ymm3
+; AVX512DQ-ONLY-NEXT:    vpunpckhqdq {{.*#+}} ymm3 = ymm3[1],mem[1],ymm3[3],mem[3]
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512DQ-ONLY-NEXT:    vinserti64x4 $0, %ymm3, %zmm0, %zmm3
+; AVX512DQ-ONLY-NEXT:    vshufi64x2 $228, {{[-0-9]+}}(%r{{[sb]}}p), %zmm25, %zmm6 # 64-byte Folded Reload
+; AVX512DQ-ONLY-NEXT:    # zmm6 = zmm25[0,1,2,3],mem[4,5,6,7]
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm9, %zmm0 {%k2}
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm9, %zmm1, %zmm3
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm9, %zmm2, %zmm6
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-ONLY-NEXT:    vmovdqa 192(%rdi), %ymm3
+; AVX512DQ-ONLY-NEXT:    vpunpckhqdq {{.*#+}} ymm3 = ymm3[1],mem[1],ymm3[3],mem[3]
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512DQ-ONLY-NEXT:    vinserti64x4 $0, %ymm3, %zmm0, %zmm0
+; AVX512DQ-ONLY-NEXT:    vshufi64x2 $228, {{[-0-9]+}}(%r{{[sb]}}p), %zmm29, %zmm3 # 64-byte Folded Reload
+; AVX512DQ-ONLY-NEXT:    # zmm3 = zmm29[0,1,2,3],mem[4,5,6,7]
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm11, %zmm4 {%k2}
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm11, %zmm1, %zmm0
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm11, %zmm2, %zmm3
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-ONLY-NEXT:    vmovdqa 256(%rdi), %ymm0
+; AVX512DQ-ONLY-NEXT:    vpunpckhqdq {{.*#+}} ymm0 = ymm0[1],mem[1],ymm0[3],mem[3]
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
+; AVX512DQ-ONLY-NEXT:    vinserti64x4 $0, %ymm0, %zmm3, %zmm3
+; AVX512DQ-ONLY-NEXT:    vshufi64x2 $228, {{[-0-9]+}}(%r{{[sb]}}p), %zmm31, %zmm4 # 64-byte Folded Reload
+; AVX512DQ-ONLY-NEXT:    # zmm4 = zmm31[0,1,2,3],mem[4,5,6,7]
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm13, %zmm0 {%k2}
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm13, %zmm1, %zmm3
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm13, %zmm2, %zmm4
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-ONLY-NEXT:    vmovdqa 320(%rdi), %ymm0
+; AVX512DQ-ONLY-NEXT:    vpunpckhqdq {{.*#+}} ymm0 = ymm0[1],mem[1],ymm0[3],mem[3]
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
+; AVX512DQ-ONLY-NEXT:    vinserti64x4 $0, %ymm0, %zmm3, %zmm27
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512DQ-ONLY-NEXT:    vshufi64x2 $228, {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm25 # 64-byte Folded Reload
+; AVX512DQ-ONLY-NEXT:    # zmm25 = zmm0[0,1,2,3],mem[4,5,6,7]
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm14, %zmm8 {%k2}
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 %zmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm14, %zmm1, %zmm27
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm14, %zmm2, %zmm25
+; AVX512DQ-ONLY-NEXT:    vmovdqa 384(%rdi), %ymm0
+; AVX512DQ-ONLY-NEXT:    vpunpckhqdq {{.*#+}} ymm0 = ymm0[1],mem[1],ymm0[3],mem[3]
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
+; AVX512DQ-ONLY-NEXT:    vinserti64x4 $0, %ymm0, %zmm3, %zmm22
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512DQ-ONLY-NEXT:    vshufi64x2 $228, {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm20 # 64-byte Folded Reload
+; AVX512DQ-ONLY-NEXT:    # zmm20 = zmm0[0,1,2,3],mem[4,5,6,7]
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm16, %zmm0 {%k2}
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm16, %zmm1, %zmm22
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm16, %zmm2, %zmm20
+; AVX512DQ-ONLY-NEXT:    vmovdqa 448(%rdi), %ymm0
+; AVX512DQ-ONLY-NEXT:    vpunpckhqdq {{.*#+}} ymm0 = ymm0[1],mem[1],ymm0[3],mem[3]
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
+; AVX512DQ-ONLY-NEXT:    vinserti64x4 $0, %ymm0, %zmm3, %zmm18
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm5, %zmm1, %zmm10
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm10, %zmm11
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm12, %zmm1, %zmm18
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512DQ-ONLY-NEXT:    vshufi64x2 $228, {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm17 # 64-byte Folded Reload
+; AVX512DQ-ONLY-NEXT:    # zmm17 = zmm0[0,1,2,3],mem[4,5,6,7]
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm12, %zmm0 {%k2}
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm12, %zmm2, %zmm17
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm5, %zmm2, %zmm7
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm7, %zmm13
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm5, %zmm28 {%k2}
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 (%r9), %zmm6
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 {{.*#+}} zmm10 = [0,1,2,9,4,5,6,7]
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm6, %zmm10, %zmm1
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 64(%r9), %zmm7
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm7, %zmm10, %zmm2
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 128(%r9), %zmm2
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm2, %zmm10, %zmm3
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 192(%r9), %zmm3
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm3, %zmm10, %zmm4
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 256(%r9), %zmm4
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm4, %zmm10, %zmm5
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 320(%r9), %zmm5
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm5, %zmm10, %zmm0
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 384(%r9), %zmm8
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm8, %zmm10, %zmm0
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 448(%r9), %zmm9
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 (%rsp), %zmm0 # 64-byte Reload
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm9, %zmm10, %zmm0
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 %zmm0, (%rsp) # 64-byte Spill
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 {{.*#+}} zmm10 = [0,1,2,13,4,5,6,7]
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm6, %zmm10, %zmm23
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 %zmm23, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm12 # 64-byte Reload
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm7, %zmm10, %zmm12
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 %zmm12, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm2, %zmm10, %zmm24
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 %zmm24, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm3, %zmm10, %zmm19
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 %zmm19, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm4, %zmm10, %zmm26
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 %zmm26, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm5, %zmm10, %zmm0
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm8, %zmm10, %zmm0
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm9, %zmm10, %zmm15
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 %zmm15, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-ONLY-NEXT:    vmovdqa (%rdx), %xmm10
+; AVX512DQ-ONLY-NEXT:    vpunpcklqdq {{.*#+}} xmm10 = xmm10[0],mem[0]
+; AVX512DQ-ONLY-NEXT:    vinserti128 $1, %xmm10, %ymm0, %ymm10
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm12 # 64-byte Reload
+; AVX512DQ-ONLY-NEXT:    vinserti64x4 $0, %ymm10, %zmm0, %zmm12 {%k1}
+; AVX512DQ-ONLY-NEXT:    vmovdqa 64(%rdx), %xmm10
+; AVX512DQ-ONLY-NEXT:    vpunpcklqdq {{.*#+}} xmm10 = xmm10[0],mem[0]
+; AVX512DQ-ONLY-NEXT:    vinserti128 $1, %xmm10, %ymm0, %ymm10
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm14 # 64-byte Reload
+; AVX512DQ-ONLY-NEXT:    vinserti64x4 $0, %ymm10, %zmm0, %zmm14 {%k1}
+; AVX512DQ-ONLY-NEXT:    vmovdqa 128(%rdx), %xmm10
+; AVX512DQ-ONLY-NEXT:    vpunpcklqdq {{.*#+}} xmm10 = xmm10[0],mem[0]
+; AVX512DQ-ONLY-NEXT:    vinserti128 $1, %xmm10, %ymm0, %ymm10
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm16 # 64-byte Reload
+; AVX512DQ-ONLY-NEXT:    vinserti64x4 $0, %ymm10, %zmm0, %zmm16 {%k1}
+; AVX512DQ-ONLY-NEXT:    vmovdqa 192(%rdx), %xmm10
+; AVX512DQ-ONLY-NEXT:    vpunpcklqdq {{.*#+}} xmm10 = xmm10[0],mem[0]
+; AVX512DQ-ONLY-NEXT:    vinserti128 $1, %xmm10, %ymm0, %ymm10
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm30 # 64-byte Reload
+; AVX512DQ-ONLY-NEXT:    vinserti64x4 $0, %ymm10, %zmm0, %zmm30 {%k1}
+; AVX512DQ-ONLY-NEXT:    vmovdqa 256(%rdx), %xmm10
+; AVX512DQ-ONLY-NEXT:    vpunpcklqdq {{.*#+}} xmm10 = xmm10[0],mem[0]
+; AVX512DQ-ONLY-NEXT:    vinserti128 $1, %xmm10, %ymm0, %ymm10
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm31 # 64-byte Reload
+; AVX512DQ-ONLY-NEXT:    vinserti64x4 $0, %ymm10, %zmm0, %zmm31 {%k1}
+; AVX512DQ-ONLY-NEXT:    vmovdqa 320(%rdx), %xmm10
+; AVX512DQ-ONLY-NEXT:    vpunpcklqdq {{.*#+}} xmm10 = xmm10[0],mem[0]
+; AVX512DQ-ONLY-NEXT:    vinserti128 $1, %xmm10, %ymm0, %ymm10
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
+; AVX512DQ-ONLY-NEXT:    vinserti64x4 $0, %ymm10, %zmm0, %zmm1 {%k1}
+; AVX512DQ-ONLY-NEXT:    vmovdqa 384(%rdx), %xmm10
+; AVX512DQ-ONLY-NEXT:    vpunpcklqdq {{.*#+}} xmm10 = xmm10[0],mem[0]
+; AVX512DQ-ONLY-NEXT:    vinserti128 $1, %xmm10, %ymm0, %ymm10
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512DQ-ONLY-NEXT:    vinserti64x4 $0, %ymm10, %zmm0, %zmm0 {%k1}
+; AVX512DQ-ONLY-NEXT:    vmovdqa 448(%rdx), %xmm10
+; AVX512DQ-ONLY-NEXT:    vpunpcklqdq {{.*#+}} xmm10 = xmm10[0],mem[0]
+; AVX512DQ-ONLY-NEXT:    vinserti128 $1, %xmm10, %ymm0, %ymm10
+; AVX512DQ-ONLY-NEXT:    vinserti64x4 $0, %ymm10, %zmm0, %zmm21 {%k1}
+; AVX512DQ-ONLY-NEXT:    vinserti32x4 $2, (%r8), %zmm12, %zmm10
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 {{.*#+}} zmm29 = [0,1,2,3,4,8,6,7]
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm6, %zmm29, %zmm10
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 %zmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-ONLY-NEXT:    vinserti32x4 $2, 64(%r8), %zmm14, %zmm10
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm7, %zmm29, %zmm10
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 %zmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-ONLY-NEXT:    vinserti32x4 $2, 128(%r8), %zmm16, %zmm10
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm2, %zmm29, %zmm10
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 %zmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-ONLY-NEXT:    vinserti32x4 $2, 192(%r8), %zmm30, %zmm26
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm3, %zmm29, %zmm26
+; AVX512DQ-ONLY-NEXT:    vinserti32x4 $2, 256(%r8), %zmm31, %zmm24
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm4, %zmm29, %zmm24
+; AVX512DQ-ONLY-NEXT:    vinserti32x4 $2, 320(%r8), %zmm1, %zmm23
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm5, %zmm29, %zmm23
+; AVX512DQ-ONLY-NEXT:    vinserti32x4 $2, 384(%r8), %zmm0, %zmm19
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm8, %zmm29, %zmm19
+; AVX512DQ-ONLY-NEXT:    vinserti32x4 $2, 448(%r8), %zmm21, %zmm21
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm9, %zmm29, %zmm21
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 {{.*#+}} zmm29 = [0,10,2,3,4,5,6,11]
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm6, %zmm29, %zmm11
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 %zmm11, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 {{.*#+}} zmm30 = [0,1,2,3,4,12,6,7]
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm6, %zmm30, %zmm28
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 {{.*#+}} zmm31 = [0,14,2,3,4,5,6,15]
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm6, %zmm31, %zmm13
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 %zmm13, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm16 # 64-byte Reload
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm7, %zmm29, %zmm16
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm15 # 64-byte Reload
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm7, %zmm30, %zmm15
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm14 # 64-byte Reload
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm7, %zmm31, %zmm14
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm13 # 64-byte Reload
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm2, %zmm29, %zmm13
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm12 # 64-byte Reload
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm2, %zmm30, %zmm12
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm11 # 64-byte Reload
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm2, %zmm31, %zmm11
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm10 # 64-byte Reload
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm3, %zmm29, %zmm10
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm7 # 64-byte Reload
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm3, %zmm30, %zmm7
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm3, %zmm31, %zmm2
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm4, %zmm29, %zmm1
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm4, %zmm30, %zmm3
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm4, %zmm31, %zmm0
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm5, %zmm29, %zmm27
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm5, %zmm30, %zmm4
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm5, %zmm31, %zmm25
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm8, %zmm29, %zmm22
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm8, %zmm30, %zmm5
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm8, %zmm31, %zmm20
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm9, %zmm29, %zmm18
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm6 # 64-byte Reload
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm9, %zmm30, %zmm6
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm9, %zmm31, %zmm17
+; AVX512DQ-ONLY-NEXT:    movq {{[0-9]+}}(%rsp), %rax
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm17, 3008(%rax)
+; AVX512DQ-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm8 # 64-byte Reload
+; AVX512DQ-ONLY-NEXT:    vmovaps %zmm8, 2944(%rax)
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm6, 2880(%rax)
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm18, 2816(%rax)
+; AVX512DQ-ONLY-NEXT:    vmovups (%rsp), %zmm6 # 64-byte Reload
+; AVX512DQ-ONLY-NEXT:    vmovaps %zmm6, 2752(%rax)
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm20, 2624(%rax)
+; AVX512DQ-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm6 # 64-byte Reload
+; AVX512DQ-ONLY-NEXT:    vmovaps %zmm6, 2560(%rax)
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm5, 2496(%rax)
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm22, 2432(%rax)
+; AVX512DQ-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
+; AVX512DQ-ONLY-NEXT:    vmovaps %zmm5, 2368(%rax)
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm25, 2240(%rax)
+; AVX512DQ-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
+; AVX512DQ-ONLY-NEXT:    vmovaps %zmm5, 2176(%rax)
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm4, 2112(%rax)
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm27, 2048(%rax)
+; AVX512DQ-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
+; AVX512DQ-ONLY-NEXT:    vmovaps %zmm4, 1984(%rax)
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm0, 1856(%rax)
+; AVX512DQ-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512DQ-ONLY-NEXT:    vmovaps %zmm0, 1792(%rax)
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm3, 1728(%rax)
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm1, 1664(%rax)
+; AVX512DQ-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
+; AVX512DQ-ONLY-NEXT:    vmovaps %zmm3, 1600(%rax)
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm2, 1472(%rax)
+; AVX512DQ-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512DQ-ONLY-NEXT:    vmovaps %zmm0, 1408(%rax)
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm7, 1344(%rax)
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm10, 1280(%rax)
+; AVX512DQ-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
+; AVX512DQ-ONLY-NEXT:    vmovaps %zmm2, 1216(%rax)
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm11, 1088(%rax)
+; AVX512DQ-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512DQ-ONLY-NEXT:    vmovaps %zmm0, 1024(%rax)
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm12, 960(%rax)
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm13, 896(%rax)
+; AVX512DQ-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
+; AVX512DQ-ONLY-NEXT:    vmovaps %zmm1, 832(%rax)
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm14, 704(%rax)
+; AVX512DQ-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
+; AVX512DQ-ONLY-NEXT:    vmovaps %zmm1, 640(%rax)
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm15, 576(%rax)
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm16, 512(%rax)
+; AVX512DQ-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512DQ-ONLY-NEXT:    vmovaps %zmm0, 448(%rax)
+; AVX512DQ-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512DQ-ONLY-NEXT:    vmovaps %zmm0, 320(%rax)
+; AVX512DQ-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512DQ-ONLY-NEXT:    vmovaps %zmm0, 256(%rax)
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm28, 192(%rax)
+; AVX512DQ-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512DQ-ONLY-NEXT:    vmovaps %zmm0, 128(%rax)
+; AVX512DQ-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512DQ-ONLY-NEXT:    vmovaps %zmm0, 64(%rax)
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm21, 2688(%rax)
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm19, 2304(%rax)
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm23, 1920(%rax)
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm24, 1536(%rax)
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm26, 1152(%rax)
+; AVX512DQ-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512DQ-ONLY-NEXT:    vmovaps %zmm0, 768(%rax)
+; AVX512DQ-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512DQ-ONLY-NEXT:    vmovaps %zmm0, 384(%rax)
+; AVX512DQ-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512DQ-ONLY-NEXT:    vmovaps %zmm0, (%rax)
+; AVX512DQ-ONLY-NEXT:    addq $3400, %rsp # imm = 0xD48
+; AVX512DQ-ONLY-NEXT:    vzeroupper
+; AVX512DQ-ONLY-NEXT:    retq
 ;
-; AVX512DQBW-SLOW-LABEL: store_i64_stride6_vf64:
-; AVX512DQBW-SLOW:       # %bb.0:
-; AVX512DQBW-SLOW-NEXT:    subq $3400, %rsp # imm = 0xD48
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 (%rdx), %zmm13
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 64(%rdx), %zmm12
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 128(%rdx), %zmm11
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 192(%rdx), %zmm10
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 256(%rdx), %zmm9
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 320(%rdx), %zmm8
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 384(%rdx), %zmm7
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 448(%rdx), %zmm6
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 (%rcx), %zmm0
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 64(%rcx), %zmm1
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 128(%rcx), %zmm2
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 192(%rcx), %zmm30
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 256(%rcx), %zmm27
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 320(%rcx), %zmm24
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 384(%rcx), %zmm22
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 448(%rcx), %zmm21
-; AVX512DQBW-SLOW-NEXT:    vbroadcasti128 {{.*#+}} ymm3 = [4,12,4,12]
-; AVX512DQBW-SLOW-NEXT:    # ymm3 = mem[0,1,0,1]
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm6, %zmm4
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm21, %zmm3, %zmm4
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm7, %zmm4
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm22, %zmm3, %zmm4
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm8, %zmm4
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm24, %zmm3, %zmm4
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm9, %zmm4
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm27, %zmm3, %zmm4
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm10, %zmm4
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm30, %zmm3, %zmm4
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm11, %zmm4
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm2, %zmm3, %zmm4
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm12, %zmm4
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm1, %zmm3, %zmm4
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vpermi2q %zmm0, %zmm13, %zmm3
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm5 = [1,9,2,10,1,9,2,10]
-; AVX512DQBW-SLOW-NEXT:    # zmm5 = mem[0,1,2,3,0,1,2,3]
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm13, %zmm3
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm0, %zmm5, %zmm3
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm5, %zmm4
-; AVX512DQBW-SLOW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm5 = [5,13,6,14,5,13,6,14]
-; AVX512DQBW-SLOW-NEXT:    # zmm5 = mem[0,1,2,3,0,1,2,3]
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm13, %zmm3
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm0, %zmm5, %zmm3
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm12, %zmm3
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm1, %zmm4, %zmm3
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm14 = [3,11,3,11,3,11,3,11]
-; AVX512DQBW-SLOW-NEXT:    # zmm14 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm13, %zmm3
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm0, %zmm14, %zmm3
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm3 = [7,15,7,15,7,15,7,15]
-; AVX512DQBW-SLOW-NEXT:    # zmm3 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm0, %zmm3, %zmm13
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm13, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm12, %zmm0
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm1, %zmm5, %zmm0
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm12, %zmm0
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm1, %zmm14, %zmm0
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm1, %zmm3, %zmm12
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm12, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm11, %zmm0
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm2, %zmm4, %zmm0
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm11, %zmm0
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm2, %zmm5, %zmm0
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm11, %zmm0
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm2, %zmm14, %zmm0
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm2, %zmm3, %zmm11
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm11, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm10, %zmm0
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm30, %zmm4, %zmm0
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm10, %zmm0
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm30, %zmm5, %zmm0
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm10, %zmm0
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm30, %zmm14, %zmm0
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm30, %zmm3, %zmm10
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm9, %zmm0
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm27, %zmm4, %zmm0
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm9, %zmm0
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm27, %zmm5, %zmm0
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm9, %zmm0
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm27, %zmm14, %zmm0
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm27, %zmm3, %zmm9
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm8, %zmm0
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm24, %zmm4, %zmm0
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm8, %zmm0
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm24, %zmm5, %zmm0
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm8, %zmm0
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm24, %zmm14, %zmm0
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm24, %zmm3, %zmm8
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm7, %zmm0
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm22, %zmm4, %zmm0
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm7, %zmm0
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm22, %zmm5, %zmm0
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm7, %zmm0
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm22, %zmm14, %zmm0
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm22, %zmm3, %zmm7
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vpermi2q %zmm21, %zmm6, %zmm4
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm4, (%rsp) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vpermi2q %zmm21, %zmm6, %zmm5
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vpermi2q %zmm21, %zmm6, %zmm14
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm14, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm21, %zmm3, %zmm6
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 448(%rdi), %zmm4
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 448(%rsi), %zmm11
-; AVX512DQBW-SLOW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm28 = [4,12,5,13,4,12,5,13]
-; AVX512DQBW-SLOW-NEXT:    # zmm28 = mem[0,1,2,3,0,1,2,3]
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm4, %zmm0
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm11, %zmm28, %zmm0
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm0, %zmm8
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 384(%rdi), %zmm2
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 384(%rsi), %zmm13
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm2, %zmm1
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm13, %zmm28, %zmm1
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 320(%rdi), %zmm3
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 320(%rsi), %zmm15
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm3, %zmm0
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm15, %zmm28, %zmm0
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 256(%rdi), %zmm31
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 256(%rsi), %zmm17
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm31, %zmm0
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm17, %zmm28, %zmm0
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 192(%rdi), %zmm29
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 192(%rsi), %zmm18
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm29, %zmm0
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm18, %zmm28, %zmm0
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 128(%rdi), %zmm25
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 128(%rsi), %zmm19
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm25, %zmm0
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm19, %zmm28, %zmm0
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm0, %zmm5
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 64(%rdi), %zmm6
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 64(%rsi), %zmm20
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm6, %zmm0
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm20, %zmm28, %zmm0
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 (%rdi), %zmm7
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 (%rsi), %zmm22
-; AVX512DQBW-SLOW-NEXT:    vpermi2q %zmm22, %zmm7, %zmm28
-; AVX512DQBW-SLOW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm12 = [2,10,2,10,2,10,2,10]
-; AVX512DQBW-SLOW-NEXT:    # zmm12 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm7, %zmm14
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm22, %zmm12, %zmm14
-; AVX512DQBW-SLOW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm10 = [6,14,6,14,6,14,6,14]
-; AVX512DQBW-SLOW-NEXT:    # zmm10 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm7, %zmm16
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm22, %zmm10, %zmm16
-; AVX512DQBW-SLOW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm21 = [0,8,1,9,0,8,1,9]
-; AVX512DQBW-SLOW-NEXT:    # zmm21 = mem[0,1,2,3,0,1,2,3]
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm7, %zmm0
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm22, %zmm21, %zmm0
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vbroadcasti64x2 {{.*#+}} ymm23 = [7,15,7,15]
-; AVX512DQBW-SLOW-NEXT:    # ymm23 = mem[0,1,0,1]
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm22, %zmm23, %zmm7
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm6, %zmm22
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm20, %zmm12, %zmm22
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm6, %zmm24
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm20, %zmm10, %zmm24
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm6, %zmm0
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm20, %zmm21, %zmm0
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm20, %zmm23, %zmm6
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm25, %zmm20
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm19, %zmm12, %zmm20
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm25, %zmm26
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm19, %zmm10, %zmm26
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm25, %zmm0
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm19, %zmm21, %zmm0
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm19, %zmm23, %zmm25
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm29, %zmm19
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm18, %zmm12, %zmm19
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm29, %zmm27
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm18, %zmm10, %zmm27
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm29, %zmm0
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm18, %zmm21, %zmm0
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm18, %zmm23, %zmm29
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm31, %zmm18
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm17, %zmm12, %zmm18
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm31, %zmm9
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm17, %zmm10, %zmm9
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm31, %zmm0
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm17, %zmm21, %zmm0
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm17, %zmm23, %zmm31
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm3, %zmm17
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm15, %zmm12, %zmm17
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm3, %zmm30
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm15, %zmm10, %zmm30
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm3, %zmm0
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm15, %zmm21, %zmm0
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm15, %zmm23, %zmm3
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm2, %zmm15
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm2, %zmm3
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm13, %zmm12, %zmm3
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm13, %zmm10, %zmm2
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm15, %zmm0
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm13, %zmm21, %zmm0
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm13, %zmm23, %zmm15
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm15, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vpermi2q %zmm11, %zmm4, %zmm12
-; AVX512DQBW-SLOW-NEXT:    vpermi2q %zmm11, %zmm4, %zmm10
-; AVX512DQBW-SLOW-NEXT:    vpermi2q %zmm11, %zmm4, %zmm21
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm11, %zmm23, %zmm4
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    movb $12, %al
-; AVX512DQBW-SLOW-NEXT:    kmovd %eax, %k1
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm11 # 64-byte Reload
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm11, %zmm8 {%k1}
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm11 # 64-byte Reload
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm11, %zmm1 {%k1}
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm11 # 64-byte Reload
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm8 # 64-byte Reload
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm11, %zmm8 {%k1}
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm11 # 64-byte Reload
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm11, %zmm0 {%k1}
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm11 # 64-byte Reload
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm11, %zmm4 {%k1}
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm11 # 64-byte Reload
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm11, %zmm5 {%k1}
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm11 # 64-byte Reload
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm11, %zmm1 {%k1}
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm11 # 64-byte Reload
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm11, %zmm28 {%k1}
-; AVX512DQBW-SLOW-NEXT:    movb $48, %al
-; AVX512DQBW-SLOW-NEXT:    kmovd %eax, %k2
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm11 # 64-byte Reload
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm14, %zmm11 {%k2}
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm23 # 64-byte Reload
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm16, %zmm23 {%k2}
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm13 # 64-byte Reload
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm22, %zmm13 {%k2}
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm22 # 64-byte Reload
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm24, %zmm22 {%k2}
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm14 # 64-byte Reload
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm20, %zmm14 {%k2}
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm20 # 64-byte Reload
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm26, %zmm20 {%k2}
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm16 # 64-byte Reload
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm19, %zmm16 {%k2}
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm19 # 64-byte Reload
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm27, %zmm19 {%k2}
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm24 # 64-byte Reload
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm18, %zmm24 {%k2}
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm18 # 64-byte Reload
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm9, %zmm18 {%k2}
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm26 # 64-byte Reload
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm17, %zmm26 {%k2}
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm17 # 64-byte Reload
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm30, %zmm17 {%k2}
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm27 # 64-byte Reload
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm3, %zmm27 {%k2}
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm2, %zmm0 {%k2}
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 (%rsp), %zmm15 # 64-byte Reload
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm12, %zmm15 {%k2}
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 (%r8), %zmm5
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm2 = <0,1,9,u,4,5,6,7>
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm5, %zmm2, %zmm11
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm11, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 64(%r8), %zmm3
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm3, %zmm2, %zmm13
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm13, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 128(%r8), %zmm9
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm9, %zmm2, %zmm14
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm14, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 192(%r8), %zmm11
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm11, %zmm2, %zmm16
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm16, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 256(%r8), %zmm13
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm13, %zmm2, %zmm24
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm24, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 320(%r8), %zmm14
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm14, %zmm2, %zmm26
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm26, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 384(%r8), %zmm16
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm16, %zmm2, %zmm27
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm27, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 448(%r8), %zmm12
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm12, %zmm2, %zmm15
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm15, (%rsp) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm15 # 64-byte Reload
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm10, %zmm15 {%k2}
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm2 = <0,1,13,u,4,5,6,7>
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm5, %zmm2, %zmm23
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm3, %zmm2, %zmm22
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm22, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm9, %zmm2, %zmm20
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm20, %zmm24
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm11, %zmm2, %zmm19
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm13, %zmm2, %zmm18
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm18, %zmm26
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm14, %zmm2, %zmm17
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm17, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm16, %zmm2, %zmm0
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm12, %zmm2, %zmm15
-; AVX512DQBW-SLOW-NEXT:    vmovdqa (%rdi), %ymm2
-; AVX512DQBW-SLOW-NEXT:    vpunpckhqdq {{.*#+}} ymm2 = ymm2[1],mem[1],ymm2[3],mem[3]
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQBW-SLOW-NEXT:    vinserti64x4 $0, %ymm2, %zmm0, %zmm10
-; AVX512DQBW-SLOW-NEXT:    vshufi64x2 $228, {{[-0-9]+}}(%r{{[sb]}}p), %zmm7, %zmm7 # 64-byte Folded Reload
-; AVX512DQBW-SLOW-NEXT:    # zmm7 = zmm7[0,1,2,3],mem[4,5,6,7]
-; AVX512DQBW-SLOW-NEXT:    vmovdqa 64(%rdi), %ymm2
-; AVX512DQBW-SLOW-NEXT:    vpunpckhqdq {{.*#+}} ymm2 = ymm2[1],mem[1],ymm2[3],mem[3]
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQBW-SLOW-NEXT:    vinserti64x4 $0, %ymm2, %zmm0, %zmm0
-; AVX512DQBW-SLOW-NEXT:    vshufi64x2 $228, {{[-0-9]+}}(%r{{[sb]}}p), %zmm6, %zmm6 # 64-byte Folded Reload
-; AVX512DQBW-SLOW-NEXT:    # zmm6 = zmm6[0,1,2,3],mem[4,5,6,7]
-; AVX512DQBW-SLOW-NEXT:    movb $16, %al
-; AVX512DQBW-SLOW-NEXT:    kmovd %eax, %k2
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm3, %zmm1 {%k2}
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm1 = <10,u,2,3,4,5,11,u>
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm3, %zmm1, %zmm0
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm2 = <14,u,2,3,4,5,15,u>
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm3, %zmm2, %zmm6
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vmovdqa 128(%rdi), %ymm3
-; AVX512DQBW-SLOW-NEXT:    vpunpckhqdq {{.*#+}} ymm3 = ymm3[1],mem[1],ymm3[3],mem[3]
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQBW-SLOW-NEXT:    vinserti64x4 $0, %ymm3, %zmm0, %zmm3
-; AVX512DQBW-SLOW-NEXT:    vshufi64x2 $228, {{[-0-9]+}}(%r{{[sb]}}p), %zmm25, %zmm6 # 64-byte Folded Reload
-; AVX512DQBW-SLOW-NEXT:    # zmm6 = zmm25[0,1,2,3],mem[4,5,6,7]
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm9, %zmm0 {%k2}
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm9, %zmm1, %zmm3
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm9, %zmm2, %zmm6
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vmovdqa 192(%rdi), %ymm3
-; AVX512DQBW-SLOW-NEXT:    vpunpckhqdq {{.*#+}} ymm3 = ymm3[1],mem[1],ymm3[3],mem[3]
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQBW-SLOW-NEXT:    vinserti64x4 $0, %ymm3, %zmm0, %zmm0
-; AVX512DQBW-SLOW-NEXT:    vshufi64x2 $228, {{[-0-9]+}}(%r{{[sb]}}p), %zmm29, %zmm3 # 64-byte Folded Reload
-; AVX512DQBW-SLOW-NEXT:    # zmm3 = zmm29[0,1,2,3],mem[4,5,6,7]
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm11, %zmm4 {%k2}
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm11, %zmm1, %zmm0
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm11, %zmm2, %zmm3
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vmovdqa 256(%rdi), %ymm0
-; AVX512DQBW-SLOW-NEXT:    vpunpckhqdq {{.*#+}} ymm0 = ymm0[1],mem[1],ymm0[3],mem[3]
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
-; AVX512DQBW-SLOW-NEXT:    vinserti64x4 $0, %ymm0, %zmm3, %zmm3
-; AVX512DQBW-SLOW-NEXT:    vshufi64x2 $228, {{[-0-9]+}}(%r{{[sb]}}p), %zmm31, %zmm4 # 64-byte Folded Reload
-; AVX512DQBW-SLOW-NEXT:    # zmm4 = zmm31[0,1,2,3],mem[4,5,6,7]
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm13, %zmm0 {%k2}
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm13, %zmm1, %zmm3
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm13, %zmm2, %zmm4
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vmovdqa 320(%rdi), %ymm0
-; AVX512DQBW-SLOW-NEXT:    vpunpckhqdq {{.*#+}} ymm0 = ymm0[1],mem[1],ymm0[3],mem[3]
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
-; AVX512DQBW-SLOW-NEXT:    vinserti64x4 $0, %ymm0, %zmm3, %zmm27
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQBW-SLOW-NEXT:    vshufi64x2 $228, {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm25 # 64-byte Folded Reload
-; AVX512DQBW-SLOW-NEXT:    # zmm25 = zmm0[0,1,2,3],mem[4,5,6,7]
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm14, %zmm8 {%k2}
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm14, %zmm1, %zmm27
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm14, %zmm2, %zmm25
-; AVX512DQBW-SLOW-NEXT:    vmovdqa 384(%rdi), %ymm0
-; AVX512DQBW-SLOW-NEXT:    vpunpckhqdq {{.*#+}} ymm0 = ymm0[1],mem[1],ymm0[3],mem[3]
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
-; AVX512DQBW-SLOW-NEXT:    vinserti64x4 $0, %ymm0, %zmm3, %zmm22
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQBW-SLOW-NEXT:    vshufi64x2 $228, {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm20 # 64-byte Folded Reload
-; AVX512DQBW-SLOW-NEXT:    # zmm20 = zmm0[0,1,2,3],mem[4,5,6,7]
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm16, %zmm0 {%k2}
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm16, %zmm1, %zmm22
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm16, %zmm2, %zmm20
-; AVX512DQBW-SLOW-NEXT:    vmovdqa 448(%rdi), %ymm0
-; AVX512DQBW-SLOW-NEXT:    vpunpckhqdq {{.*#+}} ymm0 = ymm0[1],mem[1],ymm0[3],mem[3]
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
-; AVX512DQBW-SLOW-NEXT:    vinserti64x4 $0, %ymm0, %zmm3, %zmm18
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm5, %zmm1, %zmm10
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm10, %zmm11
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm12, %zmm1, %zmm18
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQBW-SLOW-NEXT:    vshufi64x2 $228, {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm17 # 64-byte Folded Reload
-; AVX512DQBW-SLOW-NEXT:    # zmm17 = zmm0[0,1,2,3],mem[4,5,6,7]
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm12, %zmm0 {%k2}
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm12, %zmm2, %zmm17
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm5, %zmm2, %zmm7
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm7, %zmm13
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm5, %zmm28 {%k2}
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 (%r9), %zmm6
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm10 = [0,1,2,9,4,5,6,7]
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm6, %zmm10, %zmm1
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 64(%r9), %zmm7
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm7, %zmm10, %zmm2
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 128(%r9), %zmm2
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm2, %zmm10, %zmm3
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 192(%r9), %zmm3
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm3, %zmm10, %zmm4
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 256(%r9), %zmm4
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm4, %zmm10, %zmm5
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 320(%r9), %zmm5
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm5, %zmm10, %zmm0
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 384(%r9), %zmm8
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm8, %zmm10, %zmm0
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 448(%r9), %zmm9
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 (%rsp), %zmm0 # 64-byte Reload
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm9, %zmm10, %zmm0
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm0, (%rsp) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm10 = [0,1,2,13,4,5,6,7]
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm6, %zmm10, %zmm23
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm23, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm12 # 64-byte Reload
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm7, %zmm10, %zmm12
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm12, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm2, %zmm10, %zmm24
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm24, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm3, %zmm10, %zmm19
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm19, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm4, %zmm10, %zmm26
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm26, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm5, %zmm10, %zmm0
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm8, %zmm10, %zmm0
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm9, %zmm10, %zmm15
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm15, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vmovdqa (%rdx), %xmm10
-; AVX512DQBW-SLOW-NEXT:    vpunpcklqdq {{.*#+}} xmm10 = xmm10[0],mem[0]
-; AVX512DQBW-SLOW-NEXT:    vinserti128 $1, %xmm10, %ymm0, %ymm10
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm12 # 64-byte Reload
-; AVX512DQBW-SLOW-NEXT:    vinserti64x4 $0, %ymm10, %zmm0, %zmm12 {%k1}
-; AVX512DQBW-SLOW-NEXT:    vmovdqa 64(%rdx), %xmm10
-; AVX512DQBW-SLOW-NEXT:    vpunpcklqdq {{.*#+}} xmm10 = xmm10[0],mem[0]
-; AVX512DQBW-SLOW-NEXT:    vinserti128 $1, %xmm10, %ymm0, %ymm10
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm14 # 64-byte Reload
-; AVX512DQBW-SLOW-NEXT:    vinserti64x4 $0, %ymm10, %zmm0, %zmm14 {%k1}
-; AVX512DQBW-SLOW-NEXT:    vmovdqa 128(%rdx), %xmm10
-; AVX512DQBW-SLOW-NEXT:    vpunpcklqdq {{.*#+}} xmm10 = xmm10[0],mem[0]
-; AVX512DQBW-SLOW-NEXT:    vinserti128 $1, %xmm10, %ymm0, %ymm10
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm16 # 64-byte Reload
-; AVX512DQBW-SLOW-NEXT:    vinserti64x4 $0, %ymm10, %zmm0, %zmm16 {%k1}
-; AVX512DQBW-SLOW-NEXT:    vmovdqa 192(%rdx), %xmm10
-; AVX512DQBW-SLOW-NEXT:    vpunpcklqdq {{.*#+}} xmm10 = xmm10[0],mem[0]
-; AVX512DQBW-SLOW-NEXT:    vinserti128 $1, %xmm10, %ymm0, %ymm10
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm30 # 64-byte Reload
-; AVX512DQBW-SLOW-NEXT:    vinserti64x4 $0, %ymm10, %zmm0, %zmm30 {%k1}
-; AVX512DQBW-SLOW-NEXT:    vmovdqa 256(%rdx), %xmm10
-; AVX512DQBW-SLOW-NEXT:    vpunpcklqdq {{.*#+}} xmm10 = xmm10[0],mem[0]
-; AVX512DQBW-SLOW-NEXT:    vinserti128 $1, %xmm10, %ymm0, %ymm10
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm31 # 64-byte Reload
-; AVX512DQBW-SLOW-NEXT:    vinserti64x4 $0, %ymm10, %zmm0, %zmm31 {%k1}
-; AVX512DQBW-SLOW-NEXT:    vmovdqa 320(%rdx), %xmm10
-; AVX512DQBW-SLOW-NEXT:    vpunpcklqdq {{.*#+}} xmm10 = xmm10[0],mem[0]
-; AVX512DQBW-SLOW-NEXT:    vinserti128 $1, %xmm10, %ymm0, %ymm10
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512DQBW-SLOW-NEXT:    vinserti64x4 $0, %ymm10, %zmm0, %zmm1 {%k1}
-; AVX512DQBW-SLOW-NEXT:    vmovdqa 384(%rdx), %xmm10
-; AVX512DQBW-SLOW-NEXT:    vpunpcklqdq {{.*#+}} xmm10 = xmm10[0],mem[0]
-; AVX512DQBW-SLOW-NEXT:    vinserti128 $1, %xmm10, %ymm0, %ymm10
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQBW-SLOW-NEXT:    vinserti64x4 $0, %ymm10, %zmm0, %zmm0 {%k1}
-; AVX512DQBW-SLOW-NEXT:    vmovdqa 448(%rdx), %xmm10
-; AVX512DQBW-SLOW-NEXT:    vpunpcklqdq {{.*#+}} xmm10 = xmm10[0],mem[0]
-; AVX512DQBW-SLOW-NEXT:    vinserti128 $1, %xmm10, %ymm0, %ymm10
-; AVX512DQBW-SLOW-NEXT:    vinserti64x4 $0, %ymm10, %zmm0, %zmm21 {%k1}
-; AVX512DQBW-SLOW-NEXT:    vinserti32x4 $2, (%r8), %zmm12, %zmm10
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm29 = [0,1,2,3,4,8,6,7]
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm6, %zmm29, %zmm10
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vinserti32x4 $2, 64(%r8), %zmm14, %zmm10
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm7, %zmm29, %zmm10
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vinserti32x4 $2, 128(%r8), %zmm16, %zmm10
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm2, %zmm29, %zmm10
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vinserti32x4 $2, 192(%r8), %zmm30, %zmm26
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm3, %zmm29, %zmm26
-; AVX512DQBW-SLOW-NEXT:    vinserti32x4 $2, 256(%r8), %zmm31, %zmm24
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm4, %zmm29, %zmm24
-; AVX512DQBW-SLOW-NEXT:    vinserti32x4 $2, 320(%r8), %zmm1, %zmm23
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm5, %zmm29, %zmm23
-; AVX512DQBW-SLOW-NEXT:    vinserti32x4 $2, 384(%r8), %zmm0, %zmm19
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm8, %zmm29, %zmm19
-; AVX512DQBW-SLOW-NEXT:    vinserti32x4 $2, 448(%r8), %zmm21, %zmm21
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm9, %zmm29, %zmm21
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm29 = [0,10,2,3,4,5,6,11]
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm6, %zmm29, %zmm11
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm11, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm30 = [0,1,2,3,4,12,6,7]
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm6, %zmm30, %zmm28
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm31 = [0,14,2,3,4,5,6,15]
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm6, %zmm31, %zmm13
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm13, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm16 # 64-byte Reload
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm7, %zmm29, %zmm16
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm15 # 64-byte Reload
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm7, %zmm30, %zmm15
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm14 # 64-byte Reload
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm7, %zmm31, %zmm14
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm13 # 64-byte Reload
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm2, %zmm29, %zmm13
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm12 # 64-byte Reload
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm2, %zmm30, %zmm12
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm11 # 64-byte Reload
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm2, %zmm31, %zmm11
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm10 # 64-byte Reload
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm3, %zmm29, %zmm10
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm7 # 64-byte Reload
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm3, %zmm30, %zmm7
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm3, %zmm31, %zmm2
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm4, %zmm29, %zmm1
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm4, %zmm30, %zmm3
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm4, %zmm31, %zmm0
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm5, %zmm29, %zmm27
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm5, %zmm30, %zmm4
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm5, %zmm31, %zmm25
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm8, %zmm29, %zmm22
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm8, %zmm30, %zmm5
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm8, %zmm31, %zmm20
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm9, %zmm29, %zmm18
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm6 # 64-byte Reload
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm9, %zmm30, %zmm6
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm9, %zmm31, %zmm17
-; AVX512DQBW-SLOW-NEXT:    movq {{[0-9]+}}(%rsp), %rax
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm17, 3008(%rax)
-; AVX512DQBW-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm8 # 64-byte Reload
-; AVX512DQBW-SLOW-NEXT:    vmovaps %zmm8, 2944(%rax)
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm6, 2880(%rax)
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm18, 2816(%rax)
-; AVX512DQBW-SLOW-NEXT:    vmovups (%rsp), %zmm6 # 64-byte Reload
-; AVX512DQBW-SLOW-NEXT:    vmovaps %zmm6, 2752(%rax)
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm20, 2624(%rax)
-; AVX512DQBW-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm6 # 64-byte Reload
-; AVX512DQBW-SLOW-NEXT:    vmovaps %zmm6, 2560(%rax)
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm5, 2496(%rax)
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm22, 2432(%rax)
-; AVX512DQBW-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
-; AVX512DQBW-SLOW-NEXT:    vmovaps %zmm5, 2368(%rax)
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm25, 2240(%rax)
-; AVX512DQBW-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
-; AVX512DQBW-SLOW-NEXT:    vmovaps %zmm5, 2176(%rax)
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm4, 2112(%rax)
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm27, 2048(%rax)
-; AVX512DQBW-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
-; AVX512DQBW-SLOW-NEXT:    vmovaps %zmm4, 1984(%rax)
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm0, 1856(%rax)
-; AVX512DQBW-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQBW-SLOW-NEXT:    vmovaps %zmm0, 1792(%rax)
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm3, 1728(%rax)
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm1, 1664(%rax)
-; AVX512DQBW-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
-; AVX512DQBW-SLOW-NEXT:    vmovaps %zmm3, 1600(%rax)
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm2, 1472(%rax)
-; AVX512DQBW-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQBW-SLOW-NEXT:    vmovaps %zmm0, 1408(%rax)
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm7, 1344(%rax)
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm10, 1280(%rax)
-; AVX512DQBW-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512DQBW-SLOW-NEXT:    vmovaps %zmm2, 1216(%rax)
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm11, 1088(%rax)
-; AVX512DQBW-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQBW-SLOW-NEXT:    vmovaps %zmm0, 1024(%rax)
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm12, 960(%rax)
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm13, 896(%rax)
-; AVX512DQBW-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512DQBW-SLOW-NEXT:    vmovaps %zmm1, 832(%rax)
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm14, 704(%rax)
-; AVX512DQBW-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512DQBW-SLOW-NEXT:    vmovaps %zmm1, 640(%rax)
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm15, 576(%rax)
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm16, 512(%rax)
-; AVX512DQBW-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQBW-SLOW-NEXT:    vmovaps %zmm0, 448(%rax)
-; AVX512DQBW-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQBW-SLOW-NEXT:    vmovaps %zmm0, 320(%rax)
-; AVX512DQBW-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQBW-SLOW-NEXT:    vmovaps %zmm0, 256(%rax)
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm28, 192(%rax)
-; AVX512DQBW-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQBW-SLOW-NEXT:    vmovaps %zmm0, 128(%rax)
-; AVX512DQBW-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQBW-SLOW-NEXT:    vmovaps %zmm0, 64(%rax)
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm21, 2688(%rax)
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm19, 2304(%rax)
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm23, 1920(%rax)
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm24, 1536(%rax)
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm26, 1152(%rax)
-; AVX512DQBW-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQBW-SLOW-NEXT:    vmovaps %zmm0, 768(%rax)
-; AVX512DQBW-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQBW-SLOW-NEXT:    vmovaps %zmm0, 384(%rax)
-; AVX512DQBW-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQBW-SLOW-NEXT:    vmovaps %zmm0, (%rax)
-; AVX512DQBW-SLOW-NEXT:    addq $3400, %rsp # imm = 0xD48
-; AVX512DQBW-SLOW-NEXT:    vzeroupper
-; AVX512DQBW-SLOW-NEXT:    retq
+; AVX512BW-ONLY-LABEL: store_i64_stride6_vf64:
+; AVX512BW-ONLY:       # %bb.0:
+; AVX512BW-ONLY-NEXT:    subq $3400, %rsp # imm = 0xD48
+; AVX512BW-ONLY-NEXT:    vmovdqa64 (%rdx), %zmm13
+; AVX512BW-ONLY-NEXT:    vmovdqa64 64(%rdx), %zmm12
+; AVX512BW-ONLY-NEXT:    vmovdqa64 128(%rdx), %zmm11
+; AVX512BW-ONLY-NEXT:    vmovdqa64 192(%rdx), %zmm10
+; AVX512BW-ONLY-NEXT:    vmovdqa64 256(%rdx), %zmm9
+; AVX512BW-ONLY-NEXT:    vmovdqa64 320(%rdx), %zmm8
+; AVX512BW-ONLY-NEXT:    vmovdqa64 384(%rdx), %zmm7
+; AVX512BW-ONLY-NEXT:    vmovdqa64 448(%rdx), %zmm6
+; AVX512BW-ONLY-NEXT:    vmovdqa64 (%rcx), %zmm0
+; AVX512BW-ONLY-NEXT:    vmovdqa64 64(%rcx), %zmm1
+; AVX512BW-ONLY-NEXT:    vmovdqa64 128(%rcx), %zmm2
+; AVX512BW-ONLY-NEXT:    vmovdqa64 192(%rcx), %zmm30
+; AVX512BW-ONLY-NEXT:    vmovdqa64 256(%rcx), %zmm27
+; AVX512BW-ONLY-NEXT:    vmovdqa64 320(%rcx), %zmm24
+; AVX512BW-ONLY-NEXT:    vmovdqa64 384(%rcx), %zmm22
+; AVX512BW-ONLY-NEXT:    vmovdqa64 448(%rcx), %zmm21
+; AVX512BW-ONLY-NEXT:    vbroadcasti128 {{.*#+}} ymm3 = [4,12,4,12]
+; AVX512BW-ONLY-NEXT:    # ymm3 = mem[0,1,0,1]
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm6, %zmm4
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm21, %zmm3, %zmm4
+; AVX512BW-ONLY-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm7, %zmm4
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm22, %zmm3, %zmm4
+; AVX512BW-ONLY-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm8, %zmm4
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm24, %zmm3, %zmm4
+; AVX512BW-ONLY-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm9, %zmm4
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm27, %zmm3, %zmm4
+; AVX512BW-ONLY-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm10, %zmm4
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm30, %zmm3, %zmm4
+; AVX512BW-ONLY-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm11, %zmm4
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm2, %zmm3, %zmm4
+; AVX512BW-ONLY-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm12, %zmm4
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm1, %zmm3, %zmm4
+; AVX512BW-ONLY-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-NEXT:    vpermi2q %zmm0, %zmm13, %zmm3
+; AVX512BW-ONLY-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-NEXT:    vbroadcasti64x4 {{.*#+}} zmm5 = [1,9,2,10,1,9,2,10]
+; AVX512BW-ONLY-NEXT:    # zmm5 = mem[0,1,2,3,0,1,2,3]
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm13, %zmm3
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm0, %zmm5, %zmm3
+; AVX512BW-ONLY-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm5, %zmm4
+; AVX512BW-ONLY-NEXT:    vbroadcasti64x4 {{.*#+}} zmm5 = [5,13,6,14,5,13,6,14]
+; AVX512BW-ONLY-NEXT:    # zmm5 = mem[0,1,2,3,0,1,2,3]
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm13, %zmm3
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm0, %zmm5, %zmm3
+; AVX512BW-ONLY-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm12, %zmm3
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm1, %zmm4, %zmm3
+; AVX512BW-ONLY-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-NEXT:    vbroadcasti32x4 {{.*#+}} zmm14 = [3,11,3,11,3,11,3,11]
+; AVX512BW-ONLY-NEXT:    # zmm14 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm13, %zmm3
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm0, %zmm14, %zmm3
+; AVX512BW-ONLY-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-NEXT:    vbroadcasti32x4 {{.*#+}} zmm3 = [7,15,7,15,7,15,7,15]
+; AVX512BW-ONLY-NEXT:    # zmm3 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm0, %zmm3, %zmm13
+; AVX512BW-ONLY-NEXT:    vmovdqu64 %zmm13, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm12, %zmm0
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm1, %zmm5, %zmm0
+; AVX512BW-ONLY-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm12, %zmm0
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm1, %zmm14, %zmm0
+; AVX512BW-ONLY-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm1, %zmm3, %zmm12
+; AVX512BW-ONLY-NEXT:    vmovdqu64 %zmm12, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm11, %zmm0
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm2, %zmm4, %zmm0
+; AVX512BW-ONLY-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm11, %zmm0
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm2, %zmm5, %zmm0
+; AVX512BW-ONLY-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm11, %zmm0
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm2, %zmm14, %zmm0
+; AVX512BW-ONLY-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm2, %zmm3, %zmm11
+; AVX512BW-ONLY-NEXT:    vmovdqu64 %zmm11, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm10, %zmm0
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm30, %zmm4, %zmm0
+; AVX512BW-ONLY-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm10, %zmm0
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm30, %zmm5, %zmm0
+; AVX512BW-ONLY-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm10, %zmm0
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm30, %zmm14, %zmm0
+; AVX512BW-ONLY-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm30, %zmm3, %zmm10
+; AVX512BW-ONLY-NEXT:    vmovdqu64 %zmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm9, %zmm0
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm27, %zmm4, %zmm0
+; AVX512BW-ONLY-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm9, %zmm0
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm27, %zmm5, %zmm0
+; AVX512BW-ONLY-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm9, %zmm0
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm27, %zmm14, %zmm0
+; AVX512BW-ONLY-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm27, %zmm3, %zmm9
+; AVX512BW-ONLY-NEXT:    vmovdqu64 %zmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm8, %zmm0
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm24, %zmm4, %zmm0
+; AVX512BW-ONLY-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm8, %zmm0
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm24, %zmm5, %zmm0
+; AVX512BW-ONLY-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm8, %zmm0
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm24, %zmm14, %zmm0
+; AVX512BW-ONLY-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm24, %zmm3, %zmm8
+; AVX512BW-ONLY-NEXT:    vmovdqu64 %zmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm7, %zmm0
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm22, %zmm4, %zmm0
+; AVX512BW-ONLY-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm7, %zmm0
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm22, %zmm5, %zmm0
+; AVX512BW-ONLY-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm7, %zmm0
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm22, %zmm14, %zmm0
+; AVX512BW-ONLY-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm22, %zmm3, %zmm7
+; AVX512BW-ONLY-NEXT:    vmovdqu64 %zmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-NEXT:    vpermi2q %zmm21, %zmm6, %zmm4
+; AVX512BW-ONLY-NEXT:    vmovdqu64 %zmm4, (%rsp) # 64-byte Spill
+; AVX512BW-ONLY-NEXT:    vpermi2q %zmm21, %zmm6, %zmm5
+; AVX512BW-ONLY-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-NEXT:    vpermi2q %zmm21, %zmm6, %zmm14
+; AVX512BW-ONLY-NEXT:    vmovdqu64 %zmm14, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm21, %zmm3, %zmm6
+; AVX512BW-ONLY-NEXT:    vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-NEXT:    vmovdqa64 448(%rdi), %zmm4
+; AVX512BW-ONLY-NEXT:    vmovdqa64 448(%rsi), %zmm11
+; AVX512BW-ONLY-NEXT:    vbroadcasti64x4 {{.*#+}} zmm28 = [4,12,5,13,4,12,5,13]
+; AVX512BW-ONLY-NEXT:    # zmm28 = mem[0,1,2,3,0,1,2,3]
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm4, %zmm0
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm11, %zmm28, %zmm0
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm0, %zmm8
+; AVX512BW-ONLY-NEXT:    vmovdqa64 384(%rdi), %zmm2
+; AVX512BW-ONLY-NEXT:    vmovdqa64 384(%rsi), %zmm13
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm2, %zmm1
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm13, %zmm28, %zmm1
+; AVX512BW-ONLY-NEXT:    vmovdqa64 320(%rdi), %zmm3
+; AVX512BW-ONLY-NEXT:    vmovdqa64 320(%rsi), %zmm15
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm3, %zmm0
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm15, %zmm28, %zmm0
+; AVX512BW-ONLY-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-NEXT:    vmovdqa64 256(%rdi), %zmm31
+; AVX512BW-ONLY-NEXT:    vmovdqa64 256(%rsi), %zmm17
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm31, %zmm0
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm17, %zmm28, %zmm0
+; AVX512BW-ONLY-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-NEXT:    vmovdqa64 192(%rdi), %zmm29
+; AVX512BW-ONLY-NEXT:    vmovdqa64 192(%rsi), %zmm18
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm29, %zmm0
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm18, %zmm28, %zmm0
+; AVX512BW-ONLY-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-NEXT:    vmovdqa64 128(%rdi), %zmm25
+; AVX512BW-ONLY-NEXT:    vmovdqa64 128(%rsi), %zmm19
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm25, %zmm0
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm19, %zmm28, %zmm0
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm0, %zmm5
+; AVX512BW-ONLY-NEXT:    vmovdqa64 64(%rdi), %zmm6
+; AVX512BW-ONLY-NEXT:    vmovdqa64 64(%rsi), %zmm20
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm6, %zmm0
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm20, %zmm28, %zmm0
+; AVX512BW-ONLY-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-NEXT:    vmovdqa64 (%rdi), %zmm7
+; AVX512BW-ONLY-NEXT:    vmovdqa64 (%rsi), %zmm22
+; AVX512BW-ONLY-NEXT:    vpermi2q %zmm22, %zmm7, %zmm28
+; AVX512BW-ONLY-NEXT:    vbroadcasti32x4 {{.*#+}} zmm12 = [2,10,2,10,2,10,2,10]
+; AVX512BW-ONLY-NEXT:    # zmm12 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm7, %zmm14
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm22, %zmm12, %zmm14
+; AVX512BW-ONLY-NEXT:    vbroadcasti32x4 {{.*#+}} zmm10 = [6,14,6,14,6,14,6,14]
+; AVX512BW-ONLY-NEXT:    # zmm10 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm7, %zmm16
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm22, %zmm10, %zmm16
+; AVX512BW-ONLY-NEXT:    vbroadcasti64x4 {{.*#+}} zmm21 = [0,8,1,9,0,8,1,9]
+; AVX512BW-ONLY-NEXT:    # zmm21 = mem[0,1,2,3,0,1,2,3]
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm7, %zmm0
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm22, %zmm21, %zmm0
+; AVX512BW-ONLY-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-NEXT:    vbroadcasti32x4 {{.*#+}} ymm23 = [7,15,7,15]
+; AVX512BW-ONLY-NEXT:    # ymm23 = mem[0,1,2,3,0,1,2,3]
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm22, %zmm23, %zmm7
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm6, %zmm22
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm20, %zmm12, %zmm22
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm6, %zmm24
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm20, %zmm10, %zmm24
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm6, %zmm0
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm20, %zmm21, %zmm0
+; AVX512BW-ONLY-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm20, %zmm23, %zmm6
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm25, %zmm20
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm19, %zmm12, %zmm20
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm25, %zmm26
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm19, %zmm10, %zmm26
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm25, %zmm0
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm19, %zmm21, %zmm0
+; AVX512BW-ONLY-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm19, %zmm23, %zmm25
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm29, %zmm19
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm18, %zmm12, %zmm19
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm29, %zmm27
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm18, %zmm10, %zmm27
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm29, %zmm0
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm18, %zmm21, %zmm0
+; AVX512BW-ONLY-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm18, %zmm23, %zmm29
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm31, %zmm18
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm17, %zmm12, %zmm18
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm31, %zmm9
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm17, %zmm10, %zmm9
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm31, %zmm0
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm17, %zmm21, %zmm0
+; AVX512BW-ONLY-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm17, %zmm23, %zmm31
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm3, %zmm17
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm15, %zmm12, %zmm17
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm3, %zmm30
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm15, %zmm10, %zmm30
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm3, %zmm0
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm15, %zmm21, %zmm0
+; AVX512BW-ONLY-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm15, %zmm23, %zmm3
+; AVX512BW-ONLY-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm2, %zmm15
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm2, %zmm3
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm13, %zmm12, %zmm3
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm13, %zmm10, %zmm2
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm15, %zmm0
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm13, %zmm21, %zmm0
+; AVX512BW-ONLY-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm13, %zmm23, %zmm15
+; AVX512BW-ONLY-NEXT:    vmovdqu64 %zmm15, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-NEXT:    vpermi2q %zmm11, %zmm4, %zmm12
+; AVX512BW-ONLY-NEXT:    vpermi2q %zmm11, %zmm4, %zmm10
+; AVX512BW-ONLY-NEXT:    vpermi2q %zmm11, %zmm4, %zmm21
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm11, %zmm23, %zmm4
+; AVX512BW-ONLY-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-NEXT:    movb $12, %al
+; AVX512BW-ONLY-NEXT:    kmovd %eax, %k1
+; AVX512BW-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm11 # 64-byte Reload
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm11, %zmm8 {%k1}
+; AVX512BW-ONLY-NEXT:    vmovdqu64 %zmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm11 # 64-byte Reload
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm11, %zmm1 {%k1}
+; AVX512BW-ONLY-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm11 # 64-byte Reload
+; AVX512BW-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm8 # 64-byte Reload
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm11, %zmm8 {%k1}
+; AVX512BW-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm11 # 64-byte Reload
+; AVX512BW-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm11, %zmm0 {%k1}
+; AVX512BW-ONLY-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm11 # 64-byte Reload
+; AVX512BW-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm11, %zmm4 {%k1}
+; AVX512BW-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm11 # 64-byte Reload
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm11, %zmm5 {%k1}
+; AVX512BW-ONLY-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm11 # 64-byte Reload
+; AVX512BW-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm11, %zmm1 {%k1}
+; AVX512BW-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm11 # 64-byte Reload
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm11, %zmm28 {%k1}
+; AVX512BW-ONLY-NEXT:    movb $48, %al
+; AVX512BW-ONLY-NEXT:    kmovd %eax, %k2
+; AVX512BW-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm11 # 64-byte Reload
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm14, %zmm11 {%k2}
+; AVX512BW-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm23 # 64-byte Reload
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm16, %zmm23 {%k2}
+; AVX512BW-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm13 # 64-byte Reload
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm22, %zmm13 {%k2}
+; AVX512BW-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm22 # 64-byte Reload
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm24, %zmm22 {%k2}
+; AVX512BW-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm14 # 64-byte Reload
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm20, %zmm14 {%k2}
+; AVX512BW-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm20 # 64-byte Reload
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm26, %zmm20 {%k2}
+; AVX512BW-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm16 # 64-byte Reload
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm19, %zmm16 {%k2}
+; AVX512BW-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm19 # 64-byte Reload
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm27, %zmm19 {%k2}
+; AVX512BW-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm24 # 64-byte Reload
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm18, %zmm24 {%k2}
+; AVX512BW-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm18 # 64-byte Reload
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm9, %zmm18 {%k2}
+; AVX512BW-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm26 # 64-byte Reload
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm17, %zmm26 {%k2}
+; AVX512BW-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm17 # 64-byte Reload
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm30, %zmm17 {%k2}
+; AVX512BW-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm27 # 64-byte Reload
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm3, %zmm27 {%k2}
+; AVX512BW-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm2, %zmm0 {%k2}
+; AVX512BW-ONLY-NEXT:    vmovdqu64 (%rsp), %zmm15 # 64-byte Reload
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm12, %zmm15 {%k2}
+; AVX512BW-ONLY-NEXT:    vmovdqa64 (%r8), %zmm5
+; AVX512BW-ONLY-NEXT:    vmovdqa64 {{.*#+}} zmm2 = <0,1,9,u,4,5,6,7>
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm5, %zmm2, %zmm11
+; AVX512BW-ONLY-NEXT:    vmovdqu64 %zmm11, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-NEXT:    vmovdqa64 64(%r8), %zmm3
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm3, %zmm2, %zmm13
+; AVX512BW-ONLY-NEXT:    vmovdqu64 %zmm13, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-NEXT:    vmovdqa64 128(%r8), %zmm9
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm9, %zmm2, %zmm14
+; AVX512BW-ONLY-NEXT:    vmovdqu64 %zmm14, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-NEXT:    vmovdqa64 192(%r8), %zmm11
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm11, %zmm2, %zmm16
+; AVX512BW-ONLY-NEXT:    vmovdqu64 %zmm16, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-NEXT:    vmovdqa64 256(%r8), %zmm13
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm13, %zmm2, %zmm24
+; AVX512BW-ONLY-NEXT:    vmovdqu64 %zmm24, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-NEXT:    vmovdqa64 320(%r8), %zmm14
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm14, %zmm2, %zmm26
+; AVX512BW-ONLY-NEXT:    vmovdqu64 %zmm26, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-NEXT:    vmovdqa64 384(%r8), %zmm16
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm16, %zmm2, %zmm27
+; AVX512BW-ONLY-NEXT:    vmovdqu64 %zmm27, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-NEXT:    vmovdqa64 448(%r8), %zmm12
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm12, %zmm2, %zmm15
+; AVX512BW-ONLY-NEXT:    vmovdqu64 %zmm15, (%rsp) # 64-byte Spill
+; AVX512BW-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm15 # 64-byte Reload
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm10, %zmm15 {%k2}
+; AVX512BW-ONLY-NEXT:    vmovdqa64 {{.*#+}} zmm2 = <0,1,13,u,4,5,6,7>
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm5, %zmm2, %zmm23
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm3, %zmm2, %zmm22
+; AVX512BW-ONLY-NEXT:    vmovdqu64 %zmm22, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm9, %zmm2, %zmm20
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm20, %zmm24
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm11, %zmm2, %zmm19
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm13, %zmm2, %zmm18
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm18, %zmm26
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm14, %zmm2, %zmm17
+; AVX512BW-ONLY-NEXT:    vmovdqu64 %zmm17, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm16, %zmm2, %zmm0
+; AVX512BW-ONLY-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm12, %zmm2, %zmm15
+; AVX512BW-ONLY-NEXT:    vmovdqa (%rdi), %ymm2
+; AVX512BW-ONLY-NEXT:    vpunpckhqdq {{.*#+}} ymm2 = ymm2[1],mem[1],ymm2[3],mem[3]
+; AVX512BW-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512BW-ONLY-NEXT:    vinserti64x4 $0, %ymm2, %zmm0, %zmm10
+; AVX512BW-ONLY-NEXT:    vshufi64x2 $228, {{[-0-9]+}}(%r{{[sb]}}p), %zmm7, %zmm7 # 64-byte Folded Reload
+; AVX512BW-ONLY-NEXT:    # zmm7 = zmm7[0,1,2,3],mem[4,5,6,7]
+; AVX512BW-ONLY-NEXT:    vmovdqa 64(%rdi), %ymm2
+; AVX512BW-ONLY-NEXT:    vpunpckhqdq {{.*#+}} ymm2 = ymm2[1],mem[1],ymm2[3],mem[3]
+; AVX512BW-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512BW-ONLY-NEXT:    vinserti64x4 $0, %ymm2, %zmm0, %zmm0
+; AVX512BW-ONLY-NEXT:    vshufi64x2 $228, {{[-0-9]+}}(%r{{[sb]}}p), %zmm6, %zmm6 # 64-byte Folded Reload
+; AVX512BW-ONLY-NEXT:    # zmm6 = zmm6[0,1,2,3],mem[4,5,6,7]
+; AVX512BW-ONLY-NEXT:    movb $16, %al
+; AVX512BW-ONLY-NEXT:    kmovd %eax, %k2
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm3, %zmm1 {%k2}
+; AVX512BW-ONLY-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-NEXT:    vmovdqa64 {{.*#+}} zmm1 = <10,u,2,3,4,5,11,u>
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm3, %zmm1, %zmm0
+; AVX512BW-ONLY-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-NEXT:    vmovdqa64 {{.*#+}} zmm2 = <14,u,2,3,4,5,15,u>
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm3, %zmm2, %zmm6
+; AVX512BW-ONLY-NEXT:    vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-NEXT:    vmovdqa 128(%rdi), %ymm3
+; AVX512BW-ONLY-NEXT:    vpunpckhqdq {{.*#+}} ymm3 = ymm3[1],mem[1],ymm3[3],mem[3]
+; AVX512BW-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512BW-ONLY-NEXT:    vinserti64x4 $0, %ymm3, %zmm0, %zmm3
+; AVX512BW-ONLY-NEXT:    vshufi64x2 $228, {{[-0-9]+}}(%r{{[sb]}}p), %zmm25, %zmm6 # 64-byte Folded Reload
+; AVX512BW-ONLY-NEXT:    # zmm6 = zmm25[0,1,2,3],mem[4,5,6,7]
+; AVX512BW-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm9, %zmm0 {%k2}
+; AVX512BW-ONLY-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm9, %zmm1, %zmm3
+; AVX512BW-ONLY-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm9, %zmm2, %zmm6
+; AVX512BW-ONLY-NEXT:    vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-NEXT:    vmovdqa 192(%rdi), %ymm3
+; AVX512BW-ONLY-NEXT:    vpunpckhqdq {{.*#+}} ymm3 = ymm3[1],mem[1],ymm3[3],mem[3]
+; AVX512BW-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512BW-ONLY-NEXT:    vinserti64x4 $0, %ymm3, %zmm0, %zmm0
+; AVX512BW-ONLY-NEXT:    vshufi64x2 $228, {{[-0-9]+}}(%r{{[sb]}}p), %zmm29, %zmm3 # 64-byte Folded Reload
+; AVX512BW-ONLY-NEXT:    # zmm3 = zmm29[0,1,2,3],mem[4,5,6,7]
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm11, %zmm4 {%k2}
+; AVX512BW-ONLY-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm11, %zmm1, %zmm0
+; AVX512BW-ONLY-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm11, %zmm2, %zmm3
+; AVX512BW-ONLY-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-NEXT:    vmovdqa 256(%rdi), %ymm0
+; AVX512BW-ONLY-NEXT:    vpunpckhqdq {{.*#+}} ymm0 = ymm0[1],mem[1],ymm0[3],mem[3]
+; AVX512BW-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
+; AVX512BW-ONLY-NEXT:    vinserti64x4 $0, %ymm0, %zmm3, %zmm3
+; AVX512BW-ONLY-NEXT:    vshufi64x2 $228, {{[-0-9]+}}(%r{{[sb]}}p), %zmm31, %zmm4 # 64-byte Folded Reload
+; AVX512BW-ONLY-NEXT:    # zmm4 = zmm31[0,1,2,3],mem[4,5,6,7]
+; AVX512BW-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm13, %zmm0 {%k2}
+; AVX512BW-ONLY-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm13, %zmm1, %zmm3
+; AVX512BW-ONLY-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm13, %zmm2, %zmm4
+; AVX512BW-ONLY-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-NEXT:    vmovdqa 320(%rdi), %ymm0
+; AVX512BW-ONLY-NEXT:    vpunpckhqdq {{.*#+}} ymm0 = ymm0[1],mem[1],ymm0[3],mem[3]
+; AVX512BW-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
+; AVX512BW-ONLY-NEXT:    vinserti64x4 $0, %ymm0, %zmm3, %zmm27
+; AVX512BW-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512BW-ONLY-NEXT:    vshufi64x2 $228, {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm25 # 64-byte Folded Reload
+; AVX512BW-ONLY-NEXT:    # zmm25 = zmm0[0,1,2,3],mem[4,5,6,7]
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm14, %zmm8 {%k2}
+; AVX512BW-ONLY-NEXT:    vmovdqu64 %zmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm14, %zmm1, %zmm27
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm14, %zmm2, %zmm25
+; AVX512BW-ONLY-NEXT:    vmovdqa 384(%rdi), %ymm0
+; AVX512BW-ONLY-NEXT:    vpunpckhqdq {{.*#+}} ymm0 = ymm0[1],mem[1],ymm0[3],mem[3]
+; AVX512BW-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
+; AVX512BW-ONLY-NEXT:    vinserti64x4 $0, %ymm0, %zmm3, %zmm22
+; AVX512BW-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512BW-ONLY-NEXT:    vshufi64x2 $228, {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm20 # 64-byte Folded Reload
+; AVX512BW-ONLY-NEXT:    # zmm20 = zmm0[0,1,2,3],mem[4,5,6,7]
+; AVX512BW-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm16, %zmm0 {%k2}
+; AVX512BW-ONLY-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm16, %zmm1, %zmm22
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm16, %zmm2, %zmm20
+; AVX512BW-ONLY-NEXT:    vmovdqa 448(%rdi), %ymm0
+; AVX512BW-ONLY-NEXT:    vpunpckhqdq {{.*#+}} ymm0 = ymm0[1],mem[1],ymm0[3],mem[3]
+; AVX512BW-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
+; AVX512BW-ONLY-NEXT:    vinserti64x4 $0, %ymm0, %zmm3, %zmm18
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm5, %zmm1, %zmm10
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm10, %zmm11
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm12, %zmm1, %zmm18
+; AVX512BW-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512BW-ONLY-NEXT:    vshufi64x2 $228, {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm17 # 64-byte Folded Reload
+; AVX512BW-ONLY-NEXT:    # zmm17 = zmm0[0,1,2,3],mem[4,5,6,7]
+; AVX512BW-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm12, %zmm0 {%k2}
+; AVX512BW-ONLY-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm12, %zmm2, %zmm17
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm5, %zmm2, %zmm7
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm7, %zmm13
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm5, %zmm28 {%k2}
+; AVX512BW-ONLY-NEXT:    vmovdqa64 (%r9), %zmm6
+; AVX512BW-ONLY-NEXT:    vmovdqa64 {{.*#+}} zmm10 = [0,1,2,9,4,5,6,7]
+; AVX512BW-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm6, %zmm10, %zmm1
+; AVX512BW-ONLY-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-NEXT:    vmovdqa64 64(%r9), %zmm7
+; AVX512BW-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm7, %zmm10, %zmm2
+; AVX512BW-ONLY-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-NEXT:    vmovdqa64 128(%r9), %zmm2
+; AVX512BW-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm2, %zmm10, %zmm3
+; AVX512BW-ONLY-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-NEXT:    vmovdqa64 192(%r9), %zmm3
+; AVX512BW-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm3, %zmm10, %zmm4
+; AVX512BW-ONLY-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-NEXT:    vmovdqa64 256(%r9), %zmm4
+; AVX512BW-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm4, %zmm10, %zmm5
+; AVX512BW-ONLY-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-NEXT:    vmovdqa64 320(%r9), %zmm5
+; AVX512BW-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm5, %zmm10, %zmm0
+; AVX512BW-ONLY-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-NEXT:    vmovdqa64 384(%r9), %zmm8
+; AVX512BW-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm8, %zmm10, %zmm0
+; AVX512BW-ONLY-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-NEXT:    vmovdqa64 448(%r9), %zmm9
+; AVX512BW-ONLY-NEXT:    vmovdqu64 (%rsp), %zmm0 # 64-byte Reload
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm9, %zmm10, %zmm0
+; AVX512BW-ONLY-NEXT:    vmovdqu64 %zmm0, (%rsp) # 64-byte Spill
+; AVX512BW-ONLY-NEXT:    vmovdqa64 {{.*#+}} zmm10 = [0,1,2,13,4,5,6,7]
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm6, %zmm10, %zmm23
+; AVX512BW-ONLY-NEXT:    vmovdqu64 %zmm23, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm12 # 64-byte Reload
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm7, %zmm10, %zmm12
+; AVX512BW-ONLY-NEXT:    vmovdqu64 %zmm12, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm2, %zmm10, %zmm24
+; AVX512BW-ONLY-NEXT:    vmovdqu64 %zmm24, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm3, %zmm10, %zmm19
+; AVX512BW-ONLY-NEXT:    vmovdqu64 %zmm19, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm4, %zmm10, %zmm26
+; AVX512BW-ONLY-NEXT:    vmovdqu64 %zmm26, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm5, %zmm10, %zmm0
+; AVX512BW-ONLY-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm8, %zmm10, %zmm0
+; AVX512BW-ONLY-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm9, %zmm10, %zmm15
+; AVX512BW-ONLY-NEXT:    vmovdqu64 %zmm15, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-NEXT:    vmovdqa (%rdx), %xmm10
+; AVX512BW-ONLY-NEXT:    vpunpcklqdq {{.*#+}} xmm10 = xmm10[0],mem[0]
+; AVX512BW-ONLY-NEXT:    vinserti128 $1, %xmm10, %ymm0, %ymm10
+; AVX512BW-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm12 # 64-byte Reload
+; AVX512BW-ONLY-NEXT:    vinserti64x4 $0, %ymm10, %zmm0, %zmm12 {%k1}
+; AVX512BW-ONLY-NEXT:    vmovdqa 64(%rdx), %xmm10
+; AVX512BW-ONLY-NEXT:    vpunpcklqdq {{.*#+}} xmm10 = xmm10[0],mem[0]
+; AVX512BW-ONLY-NEXT:    vinserti128 $1, %xmm10, %ymm0, %ymm10
+; AVX512BW-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm14 # 64-byte Reload
+; AVX512BW-ONLY-NEXT:    vinserti64x4 $0, %ymm10, %zmm0, %zmm14 {%k1}
+; AVX512BW-ONLY-NEXT:    vmovdqa 128(%rdx), %xmm10
+; AVX512BW-ONLY-NEXT:    vpunpcklqdq {{.*#+}} xmm10 = xmm10[0],mem[0]
+; AVX512BW-ONLY-NEXT:    vinserti128 $1, %xmm10, %ymm0, %ymm10
+; AVX512BW-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm16 # 64-byte Reload
+; AVX512BW-ONLY-NEXT:    vinserti64x4 $0, %ymm10, %zmm0, %zmm16 {%k1}
+; AVX512BW-ONLY-NEXT:    vmovdqa 192(%rdx), %xmm10
+; AVX512BW-ONLY-NEXT:    vpunpcklqdq {{.*#+}} xmm10 = xmm10[0],mem[0]
+; AVX512BW-ONLY-NEXT:    vinserti128 $1, %xmm10, %ymm0, %ymm10
+; AVX512BW-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm30 # 64-byte Reload
+; AVX512BW-ONLY-NEXT:    vinserti64x4 $0, %ymm10, %zmm0, %zmm30 {%k1}
+; AVX512BW-ONLY-NEXT:    vmovdqa 256(%rdx), %xmm10
+; AVX512BW-ONLY-NEXT:    vpunpcklqdq {{.*#+}} xmm10 = xmm10[0],mem[0]
+; AVX512BW-ONLY-NEXT:    vinserti128 $1, %xmm10, %ymm0, %ymm10
+; AVX512BW-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm31 # 64-byte Reload
+; AVX512BW-ONLY-NEXT:    vinserti64x4 $0, %ymm10, %zmm0, %zmm31 {%k1}
+; AVX512BW-ONLY-NEXT:    vmovdqa 320(%rdx), %xmm10
+; AVX512BW-ONLY-NEXT:    vpunpcklqdq {{.*#+}} xmm10 = xmm10[0],mem[0]
+; AVX512BW-ONLY-NEXT:    vinserti128 $1, %xmm10, %ymm0, %ymm10
+; AVX512BW-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
+; AVX512BW-ONLY-NEXT:    vinserti64x4 $0, %ymm10, %zmm0, %zmm1 {%k1}
+; AVX512BW-ONLY-NEXT:    vmovdqa 384(%rdx), %xmm10
+; AVX512BW-ONLY-NEXT:    vpunpcklqdq {{.*#+}} xmm10 = xmm10[0],mem[0]
+; AVX512BW-ONLY-NEXT:    vinserti128 $1, %xmm10, %ymm0, %ymm10
+; AVX512BW-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512BW-ONLY-NEXT:    vinserti64x4 $0, %ymm10, %zmm0, %zmm0 {%k1}
+; AVX512BW-ONLY-NEXT:    vmovdqa 448(%rdx), %xmm10
+; AVX512BW-ONLY-NEXT:    vpunpcklqdq {{.*#+}} xmm10 = xmm10[0],mem[0]
+; AVX512BW-ONLY-NEXT:    vinserti128 $1, %xmm10, %ymm0, %ymm10
+; AVX512BW-ONLY-NEXT:    vinserti64x4 $0, %ymm10, %zmm0, %zmm21 {%k1}
+; AVX512BW-ONLY-NEXT:    vinserti32x4 $2, (%r8), %zmm12, %zmm10
+; AVX512BW-ONLY-NEXT:    vmovdqa64 {{.*#+}} zmm29 = [0,1,2,3,4,8,6,7]
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm6, %zmm29, %zmm10
+; AVX512BW-ONLY-NEXT:    vmovdqu64 %zmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-NEXT:    vinserti32x4 $2, 64(%r8), %zmm14, %zmm10
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm7, %zmm29, %zmm10
+; AVX512BW-ONLY-NEXT:    vmovdqu64 %zmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-NEXT:    vinserti32x4 $2, 128(%r8), %zmm16, %zmm10
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm2, %zmm29, %zmm10
+; AVX512BW-ONLY-NEXT:    vmovdqu64 %zmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-NEXT:    vinserti32x4 $2, 192(%r8), %zmm30, %zmm26
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm3, %zmm29, %zmm26
+; AVX512BW-ONLY-NEXT:    vinserti32x4 $2, 256(%r8), %zmm31, %zmm24
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm4, %zmm29, %zmm24
+; AVX512BW-ONLY-NEXT:    vinserti32x4 $2, 320(%r8), %zmm1, %zmm23
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm5, %zmm29, %zmm23
+; AVX512BW-ONLY-NEXT:    vinserti32x4 $2, 384(%r8), %zmm0, %zmm19
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm8, %zmm29, %zmm19
+; AVX512BW-ONLY-NEXT:    vinserti32x4 $2, 448(%r8), %zmm21, %zmm21
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm9, %zmm29, %zmm21
+; AVX512BW-ONLY-NEXT:    vmovdqa64 {{.*#+}} zmm29 = [0,10,2,3,4,5,6,11]
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm6, %zmm29, %zmm11
+; AVX512BW-ONLY-NEXT:    vmovdqu64 %zmm11, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-NEXT:    vmovdqa64 {{.*#+}} zmm30 = [0,1,2,3,4,12,6,7]
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm6, %zmm30, %zmm28
+; AVX512BW-ONLY-NEXT:    vmovdqa64 {{.*#+}} zmm31 = [0,14,2,3,4,5,6,15]
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm6, %zmm31, %zmm13
+; AVX512BW-ONLY-NEXT:    vmovdqu64 %zmm13, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm16 # 64-byte Reload
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm7, %zmm29, %zmm16
+; AVX512BW-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm15 # 64-byte Reload
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm7, %zmm30, %zmm15
+; AVX512BW-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm14 # 64-byte Reload
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm7, %zmm31, %zmm14
+; AVX512BW-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm13 # 64-byte Reload
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm2, %zmm29, %zmm13
+; AVX512BW-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm12 # 64-byte Reload
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm2, %zmm30, %zmm12
+; AVX512BW-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm11 # 64-byte Reload
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm2, %zmm31, %zmm11
+; AVX512BW-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm10 # 64-byte Reload
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm3, %zmm29, %zmm10
+; AVX512BW-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm7 # 64-byte Reload
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm3, %zmm30, %zmm7
+; AVX512BW-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm3, %zmm31, %zmm2
+; AVX512BW-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm4, %zmm29, %zmm1
+; AVX512BW-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm4, %zmm30, %zmm3
+; AVX512BW-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm4, %zmm31, %zmm0
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm5, %zmm29, %zmm27
+; AVX512BW-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm5, %zmm30, %zmm4
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm5, %zmm31, %zmm25
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm8, %zmm29, %zmm22
+; AVX512BW-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm8, %zmm30, %zmm5
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm8, %zmm31, %zmm20
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm9, %zmm29, %zmm18
+; AVX512BW-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm6 # 64-byte Reload
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm9, %zmm30, %zmm6
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm9, %zmm31, %zmm17
+; AVX512BW-ONLY-NEXT:    movq {{[0-9]+}}(%rsp), %rax
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm17, 3008(%rax)
+; AVX512BW-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm8 # 64-byte Reload
+; AVX512BW-ONLY-NEXT:    vmovaps %zmm8, 2944(%rax)
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm6, 2880(%rax)
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm18, 2816(%rax)
+; AVX512BW-ONLY-NEXT:    vmovups (%rsp), %zmm6 # 64-byte Reload
+; AVX512BW-ONLY-NEXT:    vmovaps %zmm6, 2752(%rax)
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm20, 2624(%rax)
+; AVX512BW-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm6 # 64-byte Reload
+; AVX512BW-ONLY-NEXT:    vmovaps %zmm6, 2560(%rax)
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm5, 2496(%rax)
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm22, 2432(%rax)
+; AVX512BW-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
+; AVX512BW-ONLY-NEXT:    vmovaps %zmm5, 2368(%rax)
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm25, 2240(%rax)
+; AVX512BW-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
+; AVX512BW-ONLY-NEXT:    vmovaps %zmm5, 2176(%rax)
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm4, 2112(%rax)
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm27, 2048(%rax)
+; AVX512BW-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
+; AVX512BW-ONLY-NEXT:    vmovaps %zmm4, 1984(%rax)
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm0, 1856(%rax)
+; AVX512BW-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512BW-ONLY-NEXT:    vmovaps %zmm0, 1792(%rax)
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm3, 1728(%rax)
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm1, 1664(%rax)
+; AVX512BW-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
+; AVX512BW-ONLY-NEXT:    vmovaps %zmm3, 1600(%rax)
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm2, 1472(%rax)
+; AVX512BW-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512BW-ONLY-NEXT:    vmovaps %zmm0, 1408(%rax)
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm7, 1344(%rax)
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm10, 1280(%rax)
+; AVX512BW-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
+; AVX512BW-ONLY-NEXT:    vmovaps %zmm2, 1216(%rax)
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm11, 1088(%rax)
+; AVX512BW-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512BW-ONLY-NEXT:    vmovaps %zmm0, 1024(%rax)
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm12, 960(%rax)
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm13, 896(%rax)
+; AVX512BW-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
+; AVX512BW-ONLY-NEXT:    vmovaps %zmm1, 832(%rax)
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm14, 704(%rax)
+; AVX512BW-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
+; AVX512BW-ONLY-NEXT:    vmovaps %zmm1, 640(%rax)
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm15, 576(%rax)
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm16, 512(%rax)
+; AVX512BW-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512BW-ONLY-NEXT:    vmovaps %zmm0, 448(%rax)
+; AVX512BW-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512BW-ONLY-NEXT:    vmovaps %zmm0, 320(%rax)
+; AVX512BW-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512BW-ONLY-NEXT:    vmovaps %zmm0, 256(%rax)
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm28, 192(%rax)
+; AVX512BW-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512BW-ONLY-NEXT:    vmovaps %zmm0, 128(%rax)
+; AVX512BW-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512BW-ONLY-NEXT:    vmovaps %zmm0, 64(%rax)
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm21, 2688(%rax)
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm19, 2304(%rax)
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm23, 1920(%rax)
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm24, 1536(%rax)
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm26, 1152(%rax)
+; AVX512BW-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512BW-ONLY-NEXT:    vmovaps %zmm0, 768(%rax)
+; AVX512BW-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512BW-ONLY-NEXT:    vmovaps %zmm0, 384(%rax)
+; AVX512BW-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512BW-ONLY-NEXT:    vmovaps %zmm0, (%rax)
+; AVX512BW-ONLY-NEXT:    addq $3400, %rsp # imm = 0xD48
+; AVX512BW-ONLY-NEXT:    vzeroupper
+; AVX512BW-ONLY-NEXT:    retq
 ;
-; AVX512DQBW-FAST-LABEL: store_i64_stride6_vf64:
-; AVX512DQBW-FAST:       # %bb.0:
-; AVX512DQBW-FAST-NEXT:    subq $3400, %rsp # imm = 0xD48
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 (%rdx), %zmm13
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 64(%rdx), %zmm12
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 128(%rdx), %zmm11
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 192(%rdx), %zmm10
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 256(%rdx), %zmm9
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 320(%rdx), %zmm8
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 384(%rdx), %zmm7
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 448(%rdx), %zmm6
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 (%rcx), %zmm0
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 64(%rcx), %zmm1
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 128(%rcx), %zmm2
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 192(%rcx), %zmm30
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 256(%rcx), %zmm27
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 320(%rcx), %zmm24
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 384(%rcx), %zmm22
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 448(%rcx), %zmm21
-; AVX512DQBW-FAST-NEXT:    vbroadcasti128 {{.*#+}} ymm3 = [4,12,4,12]
-; AVX512DQBW-FAST-NEXT:    # ymm3 = mem[0,1,0,1]
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm6, %zmm4
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm21, %zmm3, %zmm4
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm7, %zmm4
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm22, %zmm3, %zmm4
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm8, %zmm4
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm24, %zmm3, %zmm4
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm9, %zmm4
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm27, %zmm3, %zmm4
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm10, %zmm4
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm30, %zmm3, %zmm4
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm11, %zmm4
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm2, %zmm3, %zmm4
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm12, %zmm4
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm1, %zmm3, %zmm4
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vpermi2q %zmm0, %zmm13, %zmm3
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm5 = [1,9,2,10,1,9,2,10]
-; AVX512DQBW-FAST-NEXT:    # zmm5 = mem[0,1,2,3,0,1,2,3]
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm13, %zmm3
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm0, %zmm5, %zmm3
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm5, %zmm4
-; AVX512DQBW-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm5 = [5,13,6,14,5,13,6,14]
-; AVX512DQBW-FAST-NEXT:    # zmm5 = mem[0,1,2,3,0,1,2,3]
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm13, %zmm3
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm0, %zmm5, %zmm3
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm12, %zmm3
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm1, %zmm4, %zmm3
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} zmm14 = [3,11,3,11,3,11,3,11]
-; AVX512DQBW-FAST-NEXT:    # zmm14 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm13, %zmm3
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm0, %zmm14, %zmm3
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} zmm3 = [7,15,7,15,7,15,7,15]
-; AVX512DQBW-FAST-NEXT:    # zmm3 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm0, %zmm3, %zmm13
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm13, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm12, %zmm0
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm1, %zmm5, %zmm0
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm12, %zmm0
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm1, %zmm14, %zmm0
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm1, %zmm3, %zmm12
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm12, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm11, %zmm0
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm2, %zmm4, %zmm0
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm11, %zmm0
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm2, %zmm5, %zmm0
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm11, %zmm0
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm2, %zmm14, %zmm0
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm2, %zmm3, %zmm11
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm11, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm10, %zmm0
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm30, %zmm4, %zmm0
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm10, %zmm0
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm30, %zmm5, %zmm0
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm10, %zmm0
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm30, %zmm14, %zmm0
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm30, %zmm3, %zmm10
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm9, %zmm0
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm27, %zmm4, %zmm0
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm9, %zmm0
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm27, %zmm5, %zmm0
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm9, %zmm0
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm27, %zmm14, %zmm0
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm27, %zmm3, %zmm9
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm8, %zmm0
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm24, %zmm4, %zmm0
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm8, %zmm0
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm24, %zmm5, %zmm0
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm8, %zmm0
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm24, %zmm14, %zmm0
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm24, %zmm3, %zmm8
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm7, %zmm0
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm22, %zmm4, %zmm0
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm7, %zmm0
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm22, %zmm5, %zmm0
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm7, %zmm0
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm22, %zmm14, %zmm0
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm22, %zmm3, %zmm7
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vpermi2q %zmm21, %zmm6, %zmm4
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm4, (%rsp) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vpermi2q %zmm21, %zmm6, %zmm5
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vpermi2q %zmm21, %zmm6, %zmm14
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm14, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm21, %zmm3, %zmm6
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 448(%rdi), %zmm4
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 448(%rsi), %zmm11
-; AVX512DQBW-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm28 = [4,12,5,13,4,12,5,13]
-; AVX512DQBW-FAST-NEXT:    # zmm28 = mem[0,1,2,3,0,1,2,3]
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm4, %zmm0
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm11, %zmm28, %zmm0
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm0, %zmm8
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 384(%rdi), %zmm2
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 384(%rsi), %zmm13
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm2, %zmm1
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm13, %zmm28, %zmm1
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 320(%rdi), %zmm3
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 320(%rsi), %zmm15
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm3, %zmm0
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm15, %zmm28, %zmm0
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 256(%rdi), %zmm31
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 256(%rsi), %zmm17
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm31, %zmm0
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm17, %zmm28, %zmm0
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 192(%rdi), %zmm29
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 192(%rsi), %zmm18
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm29, %zmm0
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm18, %zmm28, %zmm0
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 128(%rdi), %zmm25
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 128(%rsi), %zmm19
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm25, %zmm0
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm19, %zmm28, %zmm0
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm0, %zmm5
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 64(%rdi), %zmm6
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 64(%rsi), %zmm20
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm6, %zmm0
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm20, %zmm28, %zmm0
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 (%rdi), %zmm7
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 (%rsi), %zmm22
-; AVX512DQBW-FAST-NEXT:    vpermi2q %zmm22, %zmm7, %zmm28
-; AVX512DQBW-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} zmm12 = [2,10,2,10,2,10,2,10]
-; AVX512DQBW-FAST-NEXT:    # zmm12 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm7, %zmm14
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm22, %zmm12, %zmm14
-; AVX512DQBW-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} zmm10 = [6,14,6,14,6,14,6,14]
-; AVX512DQBW-FAST-NEXT:    # zmm10 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm7, %zmm16
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm22, %zmm10, %zmm16
-; AVX512DQBW-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm21 = [0,8,1,9,0,8,1,9]
-; AVX512DQBW-FAST-NEXT:    # zmm21 = mem[0,1,2,3,0,1,2,3]
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm7, %zmm0
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm22, %zmm21, %zmm0
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vbroadcasti64x2 {{.*#+}} ymm23 = [7,15,7,15]
-; AVX512DQBW-FAST-NEXT:    # ymm23 = mem[0,1,0,1]
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm22, %zmm23, %zmm7
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm6, %zmm22
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm20, %zmm12, %zmm22
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm6, %zmm24
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm20, %zmm10, %zmm24
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm6, %zmm0
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm20, %zmm21, %zmm0
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm20, %zmm23, %zmm6
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm25, %zmm20
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm19, %zmm12, %zmm20
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm25, %zmm26
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm19, %zmm10, %zmm26
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm25, %zmm0
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm19, %zmm21, %zmm0
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm19, %zmm23, %zmm25
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm29, %zmm19
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm18, %zmm12, %zmm19
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm29, %zmm27
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm18, %zmm10, %zmm27
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm29, %zmm0
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm18, %zmm21, %zmm0
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm18, %zmm23, %zmm29
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm31, %zmm18
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm17, %zmm12, %zmm18
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm31, %zmm9
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm17, %zmm10, %zmm9
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm31, %zmm0
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm17, %zmm21, %zmm0
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm17, %zmm23, %zmm31
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm3, %zmm17
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm15, %zmm12, %zmm17
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm3, %zmm30
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm15, %zmm10, %zmm30
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm3, %zmm0
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm15, %zmm21, %zmm0
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm15, %zmm23, %zmm3
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm2, %zmm15
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm2, %zmm3
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm13, %zmm12, %zmm3
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm13, %zmm10, %zmm2
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm15, %zmm0
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm13, %zmm21, %zmm0
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm13, %zmm23, %zmm15
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm15, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vpermi2q %zmm11, %zmm4, %zmm12
-; AVX512DQBW-FAST-NEXT:    vpermi2q %zmm11, %zmm4, %zmm10
-; AVX512DQBW-FAST-NEXT:    vpermi2q %zmm11, %zmm4, %zmm21
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm11, %zmm23, %zmm4
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    movb $12, %al
-; AVX512DQBW-FAST-NEXT:    kmovd %eax, %k1
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm11 # 64-byte Reload
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm11, %zmm8 {%k1}
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm11 # 64-byte Reload
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm11, %zmm1 {%k1}
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm11 # 64-byte Reload
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm8 # 64-byte Reload
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm11, %zmm8 {%k1}
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm11 # 64-byte Reload
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm11, %zmm0 {%k1}
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm11 # 64-byte Reload
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm11, %zmm4 {%k1}
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm11 # 64-byte Reload
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm11, %zmm5 {%k1}
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm11 # 64-byte Reload
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm11, %zmm1 {%k1}
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm11 # 64-byte Reload
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm11, %zmm28 {%k1}
-; AVX512DQBW-FAST-NEXT:    movb $48, %al
-; AVX512DQBW-FAST-NEXT:    kmovd %eax, %k2
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm11 # 64-byte Reload
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm14, %zmm11 {%k2}
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm23 # 64-byte Reload
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm16, %zmm23 {%k2}
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm13 # 64-byte Reload
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm22, %zmm13 {%k2}
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm22 # 64-byte Reload
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm24, %zmm22 {%k2}
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm14 # 64-byte Reload
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm20, %zmm14 {%k2}
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm20 # 64-byte Reload
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm26, %zmm20 {%k2}
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm16 # 64-byte Reload
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm19, %zmm16 {%k2}
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm19 # 64-byte Reload
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm27, %zmm19 {%k2}
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm24 # 64-byte Reload
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm18, %zmm24 {%k2}
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm18 # 64-byte Reload
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm9, %zmm18 {%k2}
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm26 # 64-byte Reload
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm17, %zmm26 {%k2}
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm17 # 64-byte Reload
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm30, %zmm17 {%k2}
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm27 # 64-byte Reload
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm3, %zmm27 {%k2}
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm2, %zmm0 {%k2}
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 (%rsp), %zmm15 # 64-byte Reload
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm12, %zmm15 {%k2}
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 (%r8), %zmm5
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm2 = <0,1,9,u,4,5,6,7>
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm5, %zmm2, %zmm11
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm11, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 64(%r8), %zmm3
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm3, %zmm2, %zmm13
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm13, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 128(%r8), %zmm9
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm9, %zmm2, %zmm14
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm14, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 192(%r8), %zmm11
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm11, %zmm2, %zmm16
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm16, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 256(%r8), %zmm13
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm13, %zmm2, %zmm24
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm24, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 320(%r8), %zmm14
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm14, %zmm2, %zmm26
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm26, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 384(%r8), %zmm16
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm16, %zmm2, %zmm27
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm27, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 448(%r8), %zmm12
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm12, %zmm2, %zmm15
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm15, (%rsp) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm15 # 64-byte Reload
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm10, %zmm15 {%k2}
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm2 = <0,1,13,u,4,5,6,7>
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm5, %zmm2, %zmm23
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm3, %zmm2, %zmm22
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm22, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm9, %zmm2, %zmm20
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm20, %zmm24
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm11, %zmm2, %zmm19
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm13, %zmm2, %zmm18
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm18, %zmm26
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm14, %zmm2, %zmm17
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm17, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm16, %zmm2, %zmm0
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm12, %zmm2, %zmm15
-; AVX512DQBW-FAST-NEXT:    vmovdqa (%rdi), %ymm2
-; AVX512DQBW-FAST-NEXT:    vpunpckhqdq {{.*#+}} ymm2 = ymm2[1],mem[1],ymm2[3],mem[3]
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQBW-FAST-NEXT:    vinserti64x4 $0, %ymm2, %zmm0, %zmm10
-; AVX512DQBW-FAST-NEXT:    vshufi64x2 $228, {{[-0-9]+}}(%r{{[sb]}}p), %zmm7, %zmm7 # 64-byte Folded Reload
-; AVX512DQBW-FAST-NEXT:    # zmm7 = zmm7[0,1,2,3],mem[4,5,6,7]
-; AVX512DQBW-FAST-NEXT:    vmovdqa 64(%rdi), %ymm2
-; AVX512DQBW-FAST-NEXT:    vpunpckhqdq {{.*#+}} ymm2 = ymm2[1],mem[1],ymm2[3],mem[3]
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQBW-FAST-NEXT:    vinserti64x4 $0, %ymm2, %zmm0, %zmm0
-; AVX512DQBW-FAST-NEXT:    vshufi64x2 $228, {{[-0-9]+}}(%r{{[sb]}}p), %zmm6, %zmm6 # 64-byte Folded Reload
-; AVX512DQBW-FAST-NEXT:    # zmm6 = zmm6[0,1,2,3],mem[4,5,6,7]
-; AVX512DQBW-FAST-NEXT:    movb $16, %al
-; AVX512DQBW-FAST-NEXT:    kmovd %eax, %k2
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm3, %zmm1 {%k2}
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm1 = <10,u,2,3,4,5,11,u>
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm3, %zmm1, %zmm0
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm2 = <14,u,2,3,4,5,15,u>
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm3, %zmm2, %zmm6
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vmovdqa 128(%rdi), %ymm3
-; AVX512DQBW-FAST-NEXT:    vpunpckhqdq {{.*#+}} ymm3 = ymm3[1],mem[1],ymm3[3],mem[3]
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQBW-FAST-NEXT:    vinserti64x4 $0, %ymm3, %zmm0, %zmm3
-; AVX512DQBW-FAST-NEXT:    vshufi64x2 $228, {{[-0-9]+}}(%r{{[sb]}}p), %zmm25, %zmm6 # 64-byte Folded Reload
-; AVX512DQBW-FAST-NEXT:    # zmm6 = zmm25[0,1,2,3],mem[4,5,6,7]
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm9, %zmm0 {%k2}
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm9, %zmm1, %zmm3
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm9, %zmm2, %zmm6
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vmovdqa 192(%rdi), %ymm3
-; AVX512DQBW-FAST-NEXT:    vpunpckhqdq {{.*#+}} ymm3 = ymm3[1],mem[1],ymm3[3],mem[3]
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQBW-FAST-NEXT:    vinserti64x4 $0, %ymm3, %zmm0, %zmm0
-; AVX512DQBW-FAST-NEXT:    vshufi64x2 $228, {{[-0-9]+}}(%r{{[sb]}}p), %zmm29, %zmm3 # 64-byte Folded Reload
-; AVX512DQBW-FAST-NEXT:    # zmm3 = zmm29[0,1,2,3],mem[4,5,6,7]
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm11, %zmm4 {%k2}
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm11, %zmm1, %zmm0
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm11, %zmm2, %zmm3
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vmovdqa 256(%rdi), %ymm0
-; AVX512DQBW-FAST-NEXT:    vpunpckhqdq {{.*#+}} ymm0 = ymm0[1],mem[1],ymm0[3],mem[3]
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
-; AVX512DQBW-FAST-NEXT:    vinserti64x4 $0, %ymm0, %zmm3, %zmm3
-; AVX512DQBW-FAST-NEXT:    vshufi64x2 $228, {{[-0-9]+}}(%r{{[sb]}}p), %zmm31, %zmm4 # 64-byte Folded Reload
-; AVX512DQBW-FAST-NEXT:    # zmm4 = zmm31[0,1,2,3],mem[4,5,6,7]
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm13, %zmm0 {%k2}
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm13, %zmm1, %zmm3
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm13, %zmm2, %zmm4
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vmovdqa 320(%rdi), %ymm0
-; AVX512DQBW-FAST-NEXT:    vpunpckhqdq {{.*#+}} ymm0 = ymm0[1],mem[1],ymm0[3],mem[3]
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
-; AVX512DQBW-FAST-NEXT:    vinserti64x4 $0, %ymm0, %zmm3, %zmm27
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQBW-FAST-NEXT:    vshufi64x2 $228, {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm25 # 64-byte Folded Reload
-; AVX512DQBW-FAST-NEXT:    # zmm25 = zmm0[0,1,2,3],mem[4,5,6,7]
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm14, %zmm8 {%k2}
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm14, %zmm1, %zmm27
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm14, %zmm2, %zmm25
-; AVX512DQBW-FAST-NEXT:    vmovdqa 384(%rdi), %ymm0
-; AVX512DQBW-FAST-NEXT:    vpunpckhqdq {{.*#+}} ymm0 = ymm0[1],mem[1],ymm0[3],mem[3]
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
-; AVX512DQBW-FAST-NEXT:    vinserti64x4 $0, %ymm0, %zmm3, %zmm22
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQBW-FAST-NEXT:    vshufi64x2 $228, {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm20 # 64-byte Folded Reload
-; AVX512DQBW-FAST-NEXT:    # zmm20 = zmm0[0,1,2,3],mem[4,5,6,7]
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm16, %zmm0 {%k2}
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm16, %zmm1, %zmm22
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm16, %zmm2, %zmm20
-; AVX512DQBW-FAST-NEXT:    vmovdqa 448(%rdi), %ymm0
-; AVX512DQBW-FAST-NEXT:    vpunpckhqdq {{.*#+}} ymm0 = ymm0[1],mem[1],ymm0[3],mem[3]
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
-; AVX512DQBW-FAST-NEXT:    vinserti64x4 $0, %ymm0, %zmm3, %zmm18
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm5, %zmm1, %zmm10
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm10, %zmm11
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm12, %zmm1, %zmm18
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQBW-FAST-NEXT:    vshufi64x2 $228, {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm17 # 64-byte Folded Reload
-; AVX512DQBW-FAST-NEXT:    # zmm17 = zmm0[0,1,2,3],mem[4,5,6,7]
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm12, %zmm0 {%k2}
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm12, %zmm2, %zmm17
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm5, %zmm2, %zmm7
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm7, %zmm13
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm5, %zmm28 {%k2}
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 (%r9), %zmm6
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm10 = [0,1,2,9,4,5,6,7]
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm6, %zmm10, %zmm1
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 64(%r9), %zmm7
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm7, %zmm10, %zmm2
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 128(%r9), %zmm2
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm2, %zmm10, %zmm3
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 192(%r9), %zmm3
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm3, %zmm10, %zmm4
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 256(%r9), %zmm4
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm4, %zmm10, %zmm5
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 320(%r9), %zmm5
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm5, %zmm10, %zmm0
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 384(%r9), %zmm8
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm8, %zmm10, %zmm0
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 448(%r9), %zmm9
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 (%rsp), %zmm0 # 64-byte Reload
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm9, %zmm10, %zmm0
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm0, (%rsp) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm10 = [0,1,2,13,4,5,6,7]
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm6, %zmm10, %zmm23
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm23, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm12 # 64-byte Reload
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm7, %zmm10, %zmm12
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm12, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm2, %zmm10, %zmm24
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm24, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm3, %zmm10, %zmm19
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm19, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm4, %zmm10, %zmm26
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm26, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm5, %zmm10, %zmm0
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm8, %zmm10, %zmm0
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm9, %zmm10, %zmm15
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm15, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vmovdqa (%rdx), %xmm10
-; AVX512DQBW-FAST-NEXT:    vpunpcklqdq {{.*#+}} xmm10 = xmm10[0],mem[0]
-; AVX512DQBW-FAST-NEXT:    vinserti128 $1, %xmm10, %ymm0, %ymm10
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm12 # 64-byte Reload
-; AVX512DQBW-FAST-NEXT:    vinserti64x4 $0, %ymm10, %zmm0, %zmm12 {%k1}
-; AVX512DQBW-FAST-NEXT:    vmovdqa 64(%rdx), %xmm10
-; AVX512DQBW-FAST-NEXT:    vpunpcklqdq {{.*#+}} xmm10 = xmm10[0],mem[0]
-; AVX512DQBW-FAST-NEXT:    vinserti128 $1, %xmm10, %ymm0, %ymm10
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm14 # 64-byte Reload
-; AVX512DQBW-FAST-NEXT:    vinserti64x4 $0, %ymm10, %zmm0, %zmm14 {%k1}
-; AVX512DQBW-FAST-NEXT:    vmovdqa 128(%rdx), %xmm10
-; AVX512DQBW-FAST-NEXT:    vpunpcklqdq {{.*#+}} xmm10 = xmm10[0],mem[0]
-; AVX512DQBW-FAST-NEXT:    vinserti128 $1, %xmm10, %ymm0, %ymm10
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm16 # 64-byte Reload
-; AVX512DQBW-FAST-NEXT:    vinserti64x4 $0, %ymm10, %zmm0, %zmm16 {%k1}
-; AVX512DQBW-FAST-NEXT:    vmovdqa 192(%rdx), %xmm10
-; AVX512DQBW-FAST-NEXT:    vpunpcklqdq {{.*#+}} xmm10 = xmm10[0],mem[0]
-; AVX512DQBW-FAST-NEXT:    vinserti128 $1, %xmm10, %ymm0, %ymm10
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm30 # 64-byte Reload
-; AVX512DQBW-FAST-NEXT:    vinserti64x4 $0, %ymm10, %zmm0, %zmm30 {%k1}
-; AVX512DQBW-FAST-NEXT:    vmovdqa 256(%rdx), %xmm10
-; AVX512DQBW-FAST-NEXT:    vpunpcklqdq {{.*#+}} xmm10 = xmm10[0],mem[0]
-; AVX512DQBW-FAST-NEXT:    vinserti128 $1, %xmm10, %ymm0, %ymm10
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm31 # 64-byte Reload
-; AVX512DQBW-FAST-NEXT:    vinserti64x4 $0, %ymm10, %zmm0, %zmm31 {%k1}
-; AVX512DQBW-FAST-NEXT:    vmovdqa 320(%rdx), %xmm10
-; AVX512DQBW-FAST-NEXT:    vpunpcklqdq {{.*#+}} xmm10 = xmm10[0],mem[0]
-; AVX512DQBW-FAST-NEXT:    vinserti128 $1, %xmm10, %ymm0, %ymm10
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512DQBW-FAST-NEXT:    vinserti64x4 $0, %ymm10, %zmm0, %zmm1 {%k1}
-; AVX512DQBW-FAST-NEXT:    vmovdqa 384(%rdx), %xmm10
-; AVX512DQBW-FAST-NEXT:    vpunpcklqdq {{.*#+}} xmm10 = xmm10[0],mem[0]
-; AVX512DQBW-FAST-NEXT:    vinserti128 $1, %xmm10, %ymm0, %ymm10
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQBW-FAST-NEXT:    vinserti64x4 $0, %ymm10, %zmm0, %zmm0 {%k1}
-; AVX512DQBW-FAST-NEXT:    vmovdqa 448(%rdx), %xmm10
-; AVX512DQBW-FAST-NEXT:    vpunpcklqdq {{.*#+}} xmm10 = xmm10[0],mem[0]
-; AVX512DQBW-FAST-NEXT:    vinserti128 $1, %xmm10, %ymm0, %ymm10
-; AVX512DQBW-FAST-NEXT:    vinserti64x4 $0, %ymm10, %zmm0, %zmm21 {%k1}
-; AVX512DQBW-FAST-NEXT:    vinserti32x4 $2, (%r8), %zmm12, %zmm10
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm29 = [0,1,2,3,4,8,6,7]
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm6, %zmm29, %zmm10
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vinserti32x4 $2, 64(%r8), %zmm14, %zmm10
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm7, %zmm29, %zmm10
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vinserti32x4 $2, 128(%r8), %zmm16, %zmm10
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm2, %zmm29, %zmm10
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vinserti32x4 $2, 192(%r8), %zmm30, %zmm26
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm3, %zmm29, %zmm26
-; AVX512DQBW-FAST-NEXT:    vinserti32x4 $2, 256(%r8), %zmm31, %zmm24
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm4, %zmm29, %zmm24
-; AVX512DQBW-FAST-NEXT:    vinserti32x4 $2, 320(%r8), %zmm1, %zmm23
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm5, %zmm29, %zmm23
-; AVX512DQBW-FAST-NEXT:    vinserti32x4 $2, 384(%r8), %zmm0, %zmm19
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm8, %zmm29, %zmm19
-; AVX512DQBW-FAST-NEXT:    vinserti32x4 $2, 448(%r8), %zmm21, %zmm21
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm9, %zmm29, %zmm21
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm29 = [0,10,2,3,4,5,6,11]
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm6, %zmm29, %zmm11
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm11, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm30 = [0,1,2,3,4,12,6,7]
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm6, %zmm30, %zmm28
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm31 = [0,14,2,3,4,5,6,15]
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm6, %zmm31, %zmm13
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm13, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm16 # 64-byte Reload
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm7, %zmm29, %zmm16
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm15 # 64-byte Reload
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm7, %zmm30, %zmm15
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm14 # 64-byte Reload
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm7, %zmm31, %zmm14
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm13 # 64-byte Reload
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm2, %zmm29, %zmm13
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm12 # 64-byte Reload
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm2, %zmm30, %zmm12
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm11 # 64-byte Reload
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm2, %zmm31, %zmm11
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm10 # 64-byte Reload
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm3, %zmm29, %zmm10
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm7 # 64-byte Reload
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm3, %zmm30, %zmm7
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm3, %zmm31, %zmm2
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm4, %zmm29, %zmm1
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm4, %zmm30, %zmm3
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm4, %zmm31, %zmm0
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm5, %zmm29, %zmm27
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm5, %zmm30, %zmm4
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm5, %zmm31, %zmm25
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm8, %zmm29, %zmm22
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm8, %zmm30, %zmm5
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm8, %zmm31, %zmm20
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm9, %zmm29, %zmm18
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm6 # 64-byte Reload
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm9, %zmm30, %zmm6
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm9, %zmm31, %zmm17
-; AVX512DQBW-FAST-NEXT:    movq {{[0-9]+}}(%rsp), %rax
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm17, 3008(%rax)
-; AVX512DQBW-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm8 # 64-byte Reload
-; AVX512DQBW-FAST-NEXT:    vmovaps %zmm8, 2944(%rax)
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm6, 2880(%rax)
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm18, 2816(%rax)
-; AVX512DQBW-FAST-NEXT:    vmovups (%rsp), %zmm6 # 64-byte Reload
-; AVX512DQBW-FAST-NEXT:    vmovaps %zmm6, 2752(%rax)
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm20, 2624(%rax)
-; AVX512DQBW-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm6 # 64-byte Reload
-; AVX512DQBW-FAST-NEXT:    vmovaps %zmm6, 2560(%rax)
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm5, 2496(%rax)
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm22, 2432(%rax)
-; AVX512DQBW-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
-; AVX512DQBW-FAST-NEXT:    vmovaps %zmm5, 2368(%rax)
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm25, 2240(%rax)
-; AVX512DQBW-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
-; AVX512DQBW-FAST-NEXT:    vmovaps %zmm5, 2176(%rax)
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm4, 2112(%rax)
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm27, 2048(%rax)
-; AVX512DQBW-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
-; AVX512DQBW-FAST-NEXT:    vmovaps %zmm4, 1984(%rax)
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm0, 1856(%rax)
-; AVX512DQBW-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQBW-FAST-NEXT:    vmovaps %zmm0, 1792(%rax)
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm3, 1728(%rax)
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm1, 1664(%rax)
-; AVX512DQBW-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
-; AVX512DQBW-FAST-NEXT:    vmovaps %zmm3, 1600(%rax)
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm2, 1472(%rax)
-; AVX512DQBW-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQBW-FAST-NEXT:    vmovaps %zmm0, 1408(%rax)
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm7, 1344(%rax)
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm10, 1280(%rax)
-; AVX512DQBW-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512DQBW-FAST-NEXT:    vmovaps %zmm2, 1216(%rax)
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm11, 1088(%rax)
-; AVX512DQBW-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQBW-FAST-NEXT:    vmovaps %zmm0, 1024(%rax)
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm12, 960(%rax)
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm13, 896(%rax)
-; AVX512DQBW-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512DQBW-FAST-NEXT:    vmovaps %zmm1, 832(%rax)
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm14, 704(%rax)
-; AVX512DQBW-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512DQBW-FAST-NEXT:    vmovaps %zmm1, 640(%rax)
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm15, 576(%rax)
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm16, 512(%rax)
-; AVX512DQBW-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQBW-FAST-NEXT:    vmovaps %zmm0, 448(%rax)
-; AVX512DQBW-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQBW-FAST-NEXT:    vmovaps %zmm0, 320(%rax)
-; AVX512DQBW-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQBW-FAST-NEXT:    vmovaps %zmm0, 256(%rax)
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm28, 192(%rax)
-; AVX512DQBW-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQBW-FAST-NEXT:    vmovaps %zmm0, 128(%rax)
-; AVX512DQBW-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQBW-FAST-NEXT:    vmovaps %zmm0, 64(%rax)
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm21, 2688(%rax)
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm19, 2304(%rax)
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm23, 1920(%rax)
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm24, 1536(%rax)
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm26, 1152(%rax)
-; AVX512DQBW-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQBW-FAST-NEXT:    vmovaps %zmm0, 768(%rax)
-; AVX512DQBW-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQBW-FAST-NEXT:    vmovaps %zmm0, 384(%rax)
-; AVX512DQBW-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQBW-FAST-NEXT:    vmovaps %zmm0, (%rax)
-; AVX512DQBW-FAST-NEXT:    addq $3400, %rsp # imm = 0xD48
-; AVX512DQBW-FAST-NEXT:    vzeroupper
-; AVX512DQBW-FAST-NEXT:    retq
+; AVX512DQBW-ONLY-LABEL: store_i64_stride6_vf64:
+; AVX512DQBW-ONLY:       # %bb.0:
+; AVX512DQBW-ONLY-NEXT:    subq $3400, %rsp # imm = 0xD48
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 (%rdx), %zmm13
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 64(%rdx), %zmm12
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 128(%rdx), %zmm11
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 192(%rdx), %zmm10
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 256(%rdx), %zmm9
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 320(%rdx), %zmm8
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 384(%rdx), %zmm7
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 448(%rdx), %zmm6
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 (%rcx), %zmm0
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 64(%rcx), %zmm1
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 128(%rcx), %zmm2
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 192(%rcx), %zmm30
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 256(%rcx), %zmm27
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 320(%rcx), %zmm24
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 384(%rcx), %zmm22
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 448(%rcx), %zmm21
+; AVX512DQBW-ONLY-NEXT:    vbroadcasti128 {{.*#+}} ymm3 = [4,12,4,12]
+; AVX512DQBW-ONLY-NEXT:    # ymm3 = mem[0,1,0,1]
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm6, %zmm4
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm21, %zmm3, %zmm4
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm7, %zmm4
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm22, %zmm3, %zmm4
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm8, %zmm4
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm24, %zmm3, %zmm4
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm9, %zmm4
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm27, %zmm3, %zmm4
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm10, %zmm4
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm30, %zmm3, %zmm4
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm11, %zmm4
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm2, %zmm3, %zmm4
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm12, %zmm4
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm1, %zmm3, %zmm4
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-ONLY-NEXT:    vpermi2q %zmm0, %zmm13, %zmm3
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-ONLY-NEXT:    vbroadcasti64x4 {{.*#+}} zmm5 = [1,9,2,10,1,9,2,10]
+; AVX512DQBW-ONLY-NEXT:    # zmm5 = mem[0,1,2,3,0,1,2,3]
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm13, %zmm3
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm0, %zmm5, %zmm3
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm5, %zmm4
+; AVX512DQBW-ONLY-NEXT:    vbroadcasti64x4 {{.*#+}} zmm5 = [5,13,6,14,5,13,6,14]
+; AVX512DQBW-ONLY-NEXT:    # zmm5 = mem[0,1,2,3,0,1,2,3]
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm13, %zmm3
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm0, %zmm5, %zmm3
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm12, %zmm3
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm1, %zmm4, %zmm3
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-ONLY-NEXT:    vbroadcasti32x4 {{.*#+}} zmm14 = [3,11,3,11,3,11,3,11]
+; AVX512DQBW-ONLY-NEXT:    # zmm14 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm13, %zmm3
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm0, %zmm14, %zmm3
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-ONLY-NEXT:    vbroadcasti32x4 {{.*#+}} zmm3 = [7,15,7,15,7,15,7,15]
+; AVX512DQBW-ONLY-NEXT:    # zmm3 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm0, %zmm3, %zmm13
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 %zmm13, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm12, %zmm0
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm1, %zmm5, %zmm0
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm12, %zmm0
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm1, %zmm14, %zmm0
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm1, %zmm3, %zmm12
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 %zmm12, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm11, %zmm0
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm2, %zmm4, %zmm0
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm11, %zmm0
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm2, %zmm5, %zmm0
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm11, %zmm0
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm2, %zmm14, %zmm0
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm2, %zmm3, %zmm11
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 %zmm11, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm10, %zmm0
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm30, %zmm4, %zmm0
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm10, %zmm0
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm30, %zmm5, %zmm0
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm10, %zmm0
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm30, %zmm14, %zmm0
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm30, %zmm3, %zmm10
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 %zmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm9, %zmm0
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm27, %zmm4, %zmm0
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm9, %zmm0
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm27, %zmm5, %zmm0
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm9, %zmm0
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm27, %zmm14, %zmm0
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm27, %zmm3, %zmm9
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 %zmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm8, %zmm0
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm24, %zmm4, %zmm0
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm8, %zmm0
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm24, %zmm5, %zmm0
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm8, %zmm0
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm24, %zmm14, %zmm0
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm24, %zmm3, %zmm8
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 %zmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm7, %zmm0
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm22, %zmm4, %zmm0
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm7, %zmm0
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm22, %zmm5, %zmm0
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm7, %zmm0
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm22, %zmm14, %zmm0
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm22, %zmm3, %zmm7
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 %zmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-ONLY-NEXT:    vpermi2q %zmm21, %zmm6, %zmm4
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 %zmm4, (%rsp) # 64-byte Spill
+; AVX512DQBW-ONLY-NEXT:    vpermi2q %zmm21, %zmm6, %zmm5
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-ONLY-NEXT:    vpermi2q %zmm21, %zmm6, %zmm14
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 %zmm14, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm21, %zmm3, %zmm6
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 448(%rdi), %zmm4
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 448(%rsi), %zmm11
+; AVX512DQBW-ONLY-NEXT:    vbroadcasti64x4 {{.*#+}} zmm28 = [4,12,5,13,4,12,5,13]
+; AVX512DQBW-ONLY-NEXT:    # zmm28 = mem[0,1,2,3,0,1,2,3]
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm4, %zmm0
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm11, %zmm28, %zmm0
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm0, %zmm8
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 384(%rdi), %zmm2
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 384(%rsi), %zmm13
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm2, %zmm1
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm13, %zmm28, %zmm1
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 320(%rdi), %zmm3
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 320(%rsi), %zmm15
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm3, %zmm0
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm15, %zmm28, %zmm0
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 256(%rdi), %zmm31
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 256(%rsi), %zmm17
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm31, %zmm0
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm17, %zmm28, %zmm0
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 192(%rdi), %zmm29
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 192(%rsi), %zmm18
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm29, %zmm0
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm18, %zmm28, %zmm0
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 128(%rdi), %zmm25
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 128(%rsi), %zmm19
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm25, %zmm0
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm19, %zmm28, %zmm0
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm0, %zmm5
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 64(%rdi), %zmm6
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 64(%rsi), %zmm20
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm6, %zmm0
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm20, %zmm28, %zmm0
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 (%rdi), %zmm7
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 (%rsi), %zmm22
+; AVX512DQBW-ONLY-NEXT:    vpermi2q %zmm22, %zmm7, %zmm28
+; AVX512DQBW-ONLY-NEXT:    vbroadcasti32x4 {{.*#+}} zmm12 = [2,10,2,10,2,10,2,10]
+; AVX512DQBW-ONLY-NEXT:    # zmm12 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm7, %zmm14
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm22, %zmm12, %zmm14
+; AVX512DQBW-ONLY-NEXT:    vbroadcasti32x4 {{.*#+}} zmm10 = [6,14,6,14,6,14,6,14]
+; AVX512DQBW-ONLY-NEXT:    # zmm10 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm7, %zmm16
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm22, %zmm10, %zmm16
+; AVX512DQBW-ONLY-NEXT:    vbroadcasti64x4 {{.*#+}} zmm21 = [0,8,1,9,0,8,1,9]
+; AVX512DQBW-ONLY-NEXT:    # zmm21 = mem[0,1,2,3,0,1,2,3]
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm7, %zmm0
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm22, %zmm21, %zmm0
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-ONLY-NEXT:    vbroadcasti64x2 {{.*#+}} ymm23 = [7,15,7,15]
+; AVX512DQBW-ONLY-NEXT:    # ymm23 = mem[0,1,0,1]
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm22, %zmm23, %zmm7
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm6, %zmm22
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm20, %zmm12, %zmm22
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm6, %zmm24
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm20, %zmm10, %zmm24
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm6, %zmm0
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm20, %zmm21, %zmm0
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm20, %zmm23, %zmm6
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm25, %zmm20
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm19, %zmm12, %zmm20
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm25, %zmm26
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm19, %zmm10, %zmm26
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm25, %zmm0
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm19, %zmm21, %zmm0
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm19, %zmm23, %zmm25
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm29, %zmm19
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm18, %zmm12, %zmm19
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm29, %zmm27
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm18, %zmm10, %zmm27
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm29, %zmm0
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm18, %zmm21, %zmm0
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm18, %zmm23, %zmm29
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm31, %zmm18
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm17, %zmm12, %zmm18
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm31, %zmm9
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm17, %zmm10, %zmm9
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm31, %zmm0
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm17, %zmm21, %zmm0
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm17, %zmm23, %zmm31
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm3, %zmm17
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm15, %zmm12, %zmm17
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm3, %zmm30
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm15, %zmm10, %zmm30
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm3, %zmm0
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm15, %zmm21, %zmm0
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm15, %zmm23, %zmm3
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm2, %zmm15
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm2, %zmm3
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm13, %zmm12, %zmm3
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm13, %zmm10, %zmm2
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm15, %zmm0
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm13, %zmm21, %zmm0
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm13, %zmm23, %zmm15
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 %zmm15, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-ONLY-NEXT:    vpermi2q %zmm11, %zmm4, %zmm12
+; AVX512DQBW-ONLY-NEXT:    vpermi2q %zmm11, %zmm4, %zmm10
+; AVX512DQBW-ONLY-NEXT:    vpermi2q %zmm11, %zmm4, %zmm21
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm11, %zmm23, %zmm4
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-ONLY-NEXT:    movb $12, %al
+; AVX512DQBW-ONLY-NEXT:    kmovd %eax, %k1
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm11 # 64-byte Reload
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm11, %zmm8 {%k1}
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 %zmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm11 # 64-byte Reload
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm11, %zmm1 {%k1}
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm11 # 64-byte Reload
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm8 # 64-byte Reload
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm11, %zmm8 {%k1}
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm11 # 64-byte Reload
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm11, %zmm0 {%k1}
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm11 # 64-byte Reload
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm11, %zmm4 {%k1}
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm11 # 64-byte Reload
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm11, %zmm5 {%k1}
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm11 # 64-byte Reload
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm11, %zmm1 {%k1}
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm11 # 64-byte Reload
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm11, %zmm28 {%k1}
+; AVX512DQBW-ONLY-NEXT:    movb $48, %al
+; AVX512DQBW-ONLY-NEXT:    kmovd %eax, %k2
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm11 # 64-byte Reload
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm14, %zmm11 {%k2}
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm23 # 64-byte Reload
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm16, %zmm23 {%k2}
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm13 # 64-byte Reload
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm22, %zmm13 {%k2}
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm22 # 64-byte Reload
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm24, %zmm22 {%k2}
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm14 # 64-byte Reload
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm20, %zmm14 {%k2}
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm20 # 64-byte Reload
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm26, %zmm20 {%k2}
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm16 # 64-byte Reload
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm19, %zmm16 {%k2}
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm19 # 64-byte Reload
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm27, %zmm19 {%k2}
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm24 # 64-byte Reload
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm18, %zmm24 {%k2}
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm18 # 64-byte Reload
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm9, %zmm18 {%k2}
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm26 # 64-byte Reload
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm17, %zmm26 {%k2}
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm17 # 64-byte Reload
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm30, %zmm17 {%k2}
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm27 # 64-byte Reload
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm3, %zmm27 {%k2}
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm2, %zmm0 {%k2}
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 (%rsp), %zmm15 # 64-byte Reload
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm12, %zmm15 {%k2}
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 (%r8), %zmm5
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 {{.*#+}} zmm2 = <0,1,9,u,4,5,6,7>
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm5, %zmm2, %zmm11
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 %zmm11, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 64(%r8), %zmm3
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm3, %zmm2, %zmm13
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 %zmm13, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 128(%r8), %zmm9
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm9, %zmm2, %zmm14
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 %zmm14, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 192(%r8), %zmm11
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm11, %zmm2, %zmm16
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 %zmm16, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 256(%r8), %zmm13
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm13, %zmm2, %zmm24
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 %zmm24, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 320(%r8), %zmm14
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm14, %zmm2, %zmm26
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 %zmm26, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 384(%r8), %zmm16
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm16, %zmm2, %zmm27
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 %zmm27, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 448(%r8), %zmm12
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm12, %zmm2, %zmm15
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 %zmm15, (%rsp) # 64-byte Spill
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm15 # 64-byte Reload
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm10, %zmm15 {%k2}
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 {{.*#+}} zmm2 = <0,1,13,u,4,5,6,7>
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm5, %zmm2, %zmm23
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm3, %zmm2, %zmm22
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 %zmm22, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm9, %zmm2, %zmm20
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm20, %zmm24
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm11, %zmm2, %zmm19
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm13, %zmm2, %zmm18
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm18, %zmm26
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm14, %zmm2, %zmm17
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 %zmm17, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm16, %zmm2, %zmm0
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm12, %zmm2, %zmm15
+; AVX512DQBW-ONLY-NEXT:    vmovdqa (%rdi), %ymm2
+; AVX512DQBW-ONLY-NEXT:    vpunpckhqdq {{.*#+}} ymm2 = ymm2[1],mem[1],ymm2[3],mem[3]
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512DQBW-ONLY-NEXT:    vinserti64x4 $0, %ymm2, %zmm0, %zmm10
+; AVX512DQBW-ONLY-NEXT:    vshufi64x2 $228, {{[-0-9]+}}(%r{{[sb]}}p), %zmm7, %zmm7 # 64-byte Folded Reload
+; AVX512DQBW-ONLY-NEXT:    # zmm7 = zmm7[0,1,2,3],mem[4,5,6,7]
+; AVX512DQBW-ONLY-NEXT:    vmovdqa 64(%rdi), %ymm2
+; AVX512DQBW-ONLY-NEXT:    vpunpckhqdq {{.*#+}} ymm2 = ymm2[1],mem[1],ymm2[3],mem[3]
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512DQBW-ONLY-NEXT:    vinserti64x4 $0, %ymm2, %zmm0, %zmm0
+; AVX512DQBW-ONLY-NEXT:    vshufi64x2 $228, {{[-0-9]+}}(%r{{[sb]}}p), %zmm6, %zmm6 # 64-byte Folded Reload
+; AVX512DQBW-ONLY-NEXT:    # zmm6 = zmm6[0,1,2,3],mem[4,5,6,7]
+; AVX512DQBW-ONLY-NEXT:    movb $16, %al
+; AVX512DQBW-ONLY-NEXT:    kmovd %eax, %k2
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm3, %zmm1 {%k2}
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 {{.*#+}} zmm1 = <10,u,2,3,4,5,11,u>
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm3, %zmm1, %zmm0
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 {{.*#+}} zmm2 = <14,u,2,3,4,5,15,u>
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm3, %zmm2, %zmm6
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-ONLY-NEXT:    vmovdqa 128(%rdi), %ymm3
+; AVX512DQBW-ONLY-NEXT:    vpunpckhqdq {{.*#+}} ymm3 = ymm3[1],mem[1],ymm3[3],mem[3]
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512DQBW-ONLY-NEXT:    vinserti64x4 $0, %ymm3, %zmm0, %zmm3
+; AVX512DQBW-ONLY-NEXT:    vshufi64x2 $228, {{[-0-9]+}}(%r{{[sb]}}p), %zmm25, %zmm6 # 64-byte Folded Reload
+; AVX512DQBW-ONLY-NEXT:    # zmm6 = zmm25[0,1,2,3],mem[4,5,6,7]
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm9, %zmm0 {%k2}
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm9, %zmm1, %zmm3
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm9, %zmm2, %zmm6
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-ONLY-NEXT:    vmovdqa 192(%rdi), %ymm3
+; AVX512DQBW-ONLY-NEXT:    vpunpckhqdq {{.*#+}} ymm3 = ymm3[1],mem[1],ymm3[3],mem[3]
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512DQBW-ONLY-NEXT:    vinserti64x4 $0, %ymm3, %zmm0, %zmm0
+; AVX512DQBW-ONLY-NEXT:    vshufi64x2 $228, {{[-0-9]+}}(%r{{[sb]}}p), %zmm29, %zmm3 # 64-byte Folded Reload
+; AVX512DQBW-ONLY-NEXT:    # zmm3 = zmm29[0,1,2,3],mem[4,5,6,7]
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm11, %zmm4 {%k2}
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm11, %zmm1, %zmm0
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm11, %zmm2, %zmm3
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-ONLY-NEXT:    vmovdqa 256(%rdi), %ymm0
+; AVX512DQBW-ONLY-NEXT:    vpunpckhqdq {{.*#+}} ymm0 = ymm0[1],mem[1],ymm0[3],mem[3]
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
+; AVX512DQBW-ONLY-NEXT:    vinserti64x4 $0, %ymm0, %zmm3, %zmm3
+; AVX512DQBW-ONLY-NEXT:    vshufi64x2 $228, {{[-0-9]+}}(%r{{[sb]}}p), %zmm31, %zmm4 # 64-byte Folded Reload
+; AVX512DQBW-ONLY-NEXT:    # zmm4 = zmm31[0,1,2,3],mem[4,5,6,7]
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm13, %zmm0 {%k2}
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm13, %zmm1, %zmm3
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm13, %zmm2, %zmm4
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-ONLY-NEXT:    vmovdqa 320(%rdi), %ymm0
+; AVX512DQBW-ONLY-NEXT:    vpunpckhqdq {{.*#+}} ymm0 = ymm0[1],mem[1],ymm0[3],mem[3]
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
+; AVX512DQBW-ONLY-NEXT:    vinserti64x4 $0, %ymm0, %zmm3, %zmm27
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512DQBW-ONLY-NEXT:    vshufi64x2 $228, {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm25 # 64-byte Folded Reload
+; AVX512DQBW-ONLY-NEXT:    # zmm25 = zmm0[0,1,2,3],mem[4,5,6,7]
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm14, %zmm8 {%k2}
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 %zmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm14, %zmm1, %zmm27
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm14, %zmm2, %zmm25
+; AVX512DQBW-ONLY-NEXT:    vmovdqa 384(%rdi), %ymm0
+; AVX512DQBW-ONLY-NEXT:    vpunpckhqdq {{.*#+}} ymm0 = ymm0[1],mem[1],ymm0[3],mem[3]
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
+; AVX512DQBW-ONLY-NEXT:    vinserti64x4 $0, %ymm0, %zmm3, %zmm22
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512DQBW-ONLY-NEXT:    vshufi64x2 $228, {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm20 # 64-byte Folded Reload
+; AVX512DQBW-ONLY-NEXT:    # zmm20 = zmm0[0,1,2,3],mem[4,5,6,7]
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm16, %zmm0 {%k2}
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm16, %zmm1, %zmm22
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm16, %zmm2, %zmm20
+; AVX512DQBW-ONLY-NEXT:    vmovdqa 448(%rdi), %ymm0
+; AVX512DQBW-ONLY-NEXT:    vpunpckhqdq {{.*#+}} ymm0 = ymm0[1],mem[1],ymm0[3],mem[3]
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
+; AVX512DQBW-ONLY-NEXT:    vinserti64x4 $0, %ymm0, %zmm3, %zmm18
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm5, %zmm1, %zmm10
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm10, %zmm11
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm12, %zmm1, %zmm18
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512DQBW-ONLY-NEXT:    vshufi64x2 $228, {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm17 # 64-byte Folded Reload
+; AVX512DQBW-ONLY-NEXT:    # zmm17 = zmm0[0,1,2,3],mem[4,5,6,7]
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm12, %zmm0 {%k2}
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm12, %zmm2, %zmm17
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm5, %zmm2, %zmm7
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm7, %zmm13
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm5, %zmm28 {%k2}
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 (%r9), %zmm6
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 {{.*#+}} zmm10 = [0,1,2,9,4,5,6,7]
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm6, %zmm10, %zmm1
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 64(%r9), %zmm7
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm7, %zmm10, %zmm2
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 128(%r9), %zmm2
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm2, %zmm10, %zmm3
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 192(%r9), %zmm3
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm3, %zmm10, %zmm4
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 256(%r9), %zmm4
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm4, %zmm10, %zmm5
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 320(%r9), %zmm5
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm5, %zmm10, %zmm0
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 384(%r9), %zmm8
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm8, %zmm10, %zmm0
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 448(%r9), %zmm9
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 (%rsp), %zmm0 # 64-byte Reload
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm9, %zmm10, %zmm0
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 %zmm0, (%rsp) # 64-byte Spill
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 {{.*#+}} zmm10 = [0,1,2,13,4,5,6,7]
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm6, %zmm10, %zmm23
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 %zmm23, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm12 # 64-byte Reload
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm7, %zmm10, %zmm12
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 %zmm12, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm2, %zmm10, %zmm24
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 %zmm24, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm3, %zmm10, %zmm19
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 %zmm19, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm4, %zmm10, %zmm26
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 %zmm26, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm5, %zmm10, %zmm0
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm8, %zmm10, %zmm0
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm9, %zmm10, %zmm15
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 %zmm15, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-ONLY-NEXT:    vmovdqa (%rdx), %xmm10
+; AVX512DQBW-ONLY-NEXT:    vpunpcklqdq {{.*#+}} xmm10 = xmm10[0],mem[0]
+; AVX512DQBW-ONLY-NEXT:    vinserti128 $1, %xmm10, %ymm0, %ymm10
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm12 # 64-byte Reload
+; AVX512DQBW-ONLY-NEXT:    vinserti64x4 $0, %ymm10, %zmm0, %zmm12 {%k1}
+; AVX512DQBW-ONLY-NEXT:    vmovdqa 64(%rdx), %xmm10
+; AVX512DQBW-ONLY-NEXT:    vpunpcklqdq {{.*#+}} xmm10 = xmm10[0],mem[0]
+; AVX512DQBW-ONLY-NEXT:    vinserti128 $1, %xmm10, %ymm0, %ymm10
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm14 # 64-byte Reload
+; AVX512DQBW-ONLY-NEXT:    vinserti64x4 $0, %ymm10, %zmm0, %zmm14 {%k1}
+; AVX512DQBW-ONLY-NEXT:    vmovdqa 128(%rdx), %xmm10
+; AVX512DQBW-ONLY-NEXT:    vpunpcklqdq {{.*#+}} xmm10 = xmm10[0],mem[0]
+; AVX512DQBW-ONLY-NEXT:    vinserti128 $1, %xmm10, %ymm0, %ymm10
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm16 # 64-byte Reload
+; AVX512DQBW-ONLY-NEXT:    vinserti64x4 $0, %ymm10, %zmm0, %zmm16 {%k1}
+; AVX512DQBW-ONLY-NEXT:    vmovdqa 192(%rdx), %xmm10
+; AVX512DQBW-ONLY-NEXT:    vpunpcklqdq {{.*#+}} xmm10 = xmm10[0],mem[0]
+; AVX512DQBW-ONLY-NEXT:    vinserti128 $1, %xmm10, %ymm0, %ymm10
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm30 # 64-byte Reload
+; AVX512DQBW-ONLY-NEXT:    vinserti64x4 $0, %ymm10, %zmm0, %zmm30 {%k1}
+; AVX512DQBW-ONLY-NEXT:    vmovdqa 256(%rdx), %xmm10
+; AVX512DQBW-ONLY-NEXT:    vpunpcklqdq {{.*#+}} xmm10 = xmm10[0],mem[0]
+; AVX512DQBW-ONLY-NEXT:    vinserti128 $1, %xmm10, %ymm0, %ymm10
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm31 # 64-byte Reload
+; AVX512DQBW-ONLY-NEXT:    vinserti64x4 $0, %ymm10, %zmm0, %zmm31 {%k1}
+; AVX512DQBW-ONLY-NEXT:    vmovdqa 320(%rdx), %xmm10
+; AVX512DQBW-ONLY-NEXT:    vpunpcklqdq {{.*#+}} xmm10 = xmm10[0],mem[0]
+; AVX512DQBW-ONLY-NEXT:    vinserti128 $1, %xmm10, %ymm0, %ymm10
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
+; AVX512DQBW-ONLY-NEXT:    vinserti64x4 $0, %ymm10, %zmm0, %zmm1 {%k1}
+; AVX512DQBW-ONLY-NEXT:    vmovdqa 384(%rdx), %xmm10
+; AVX512DQBW-ONLY-NEXT:    vpunpcklqdq {{.*#+}} xmm10 = xmm10[0],mem[0]
+; AVX512DQBW-ONLY-NEXT:    vinserti128 $1, %xmm10, %ymm0, %ymm10
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512DQBW-ONLY-NEXT:    vinserti64x4 $0, %ymm10, %zmm0, %zmm0 {%k1}
+; AVX512DQBW-ONLY-NEXT:    vmovdqa 448(%rdx), %xmm10
+; AVX512DQBW-ONLY-NEXT:    vpunpcklqdq {{.*#+}} xmm10 = xmm10[0],mem[0]
+; AVX512DQBW-ONLY-NEXT:    vinserti128 $1, %xmm10, %ymm0, %ymm10
+; AVX512DQBW-ONLY-NEXT:    vinserti64x4 $0, %ymm10, %zmm0, %zmm21 {%k1}
+; AVX512DQBW-ONLY-NEXT:    vinserti32x4 $2, (%r8), %zmm12, %zmm10
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 {{.*#+}} zmm29 = [0,1,2,3,4,8,6,7]
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm6, %zmm29, %zmm10
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 %zmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-ONLY-NEXT:    vinserti32x4 $2, 64(%r8), %zmm14, %zmm10
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm7, %zmm29, %zmm10
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 %zmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-ONLY-NEXT:    vinserti32x4 $2, 128(%r8), %zmm16, %zmm10
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm2, %zmm29, %zmm10
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 %zmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-ONLY-NEXT:    vinserti32x4 $2, 192(%r8), %zmm30, %zmm26
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm3, %zmm29, %zmm26
+; AVX512DQBW-ONLY-NEXT:    vinserti32x4 $2, 256(%r8), %zmm31, %zmm24
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm4, %zmm29, %zmm24
+; AVX512DQBW-ONLY-NEXT:    vinserti32x4 $2, 320(%r8), %zmm1, %zmm23
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm5, %zmm29, %zmm23
+; AVX512DQBW-ONLY-NEXT:    vinserti32x4 $2, 384(%r8), %zmm0, %zmm19
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm8, %zmm29, %zmm19
+; AVX512DQBW-ONLY-NEXT:    vinserti32x4 $2, 448(%r8), %zmm21, %zmm21
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm9, %zmm29, %zmm21
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 {{.*#+}} zmm29 = [0,10,2,3,4,5,6,11]
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm6, %zmm29, %zmm11
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 %zmm11, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 {{.*#+}} zmm30 = [0,1,2,3,4,12,6,7]
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm6, %zmm30, %zmm28
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 {{.*#+}} zmm31 = [0,14,2,3,4,5,6,15]
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm6, %zmm31, %zmm13
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 %zmm13, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm16 # 64-byte Reload
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm7, %zmm29, %zmm16
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm15 # 64-byte Reload
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm7, %zmm30, %zmm15
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm14 # 64-byte Reload
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm7, %zmm31, %zmm14
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm13 # 64-byte Reload
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm2, %zmm29, %zmm13
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm12 # 64-byte Reload
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm2, %zmm30, %zmm12
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm11 # 64-byte Reload
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm2, %zmm31, %zmm11
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm10 # 64-byte Reload
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm3, %zmm29, %zmm10
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm7 # 64-byte Reload
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm3, %zmm30, %zmm7
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm3, %zmm31, %zmm2
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm4, %zmm29, %zmm1
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm4, %zmm30, %zmm3
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm4, %zmm31, %zmm0
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm5, %zmm29, %zmm27
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm5, %zmm30, %zmm4
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm5, %zmm31, %zmm25
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm8, %zmm29, %zmm22
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm8, %zmm30, %zmm5
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm8, %zmm31, %zmm20
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm9, %zmm29, %zmm18
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm6 # 64-byte Reload
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm9, %zmm30, %zmm6
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm9, %zmm31, %zmm17
+; AVX512DQBW-ONLY-NEXT:    movq {{[0-9]+}}(%rsp), %rax
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm17, 3008(%rax)
+; AVX512DQBW-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm8 # 64-byte Reload
+; AVX512DQBW-ONLY-NEXT:    vmovaps %zmm8, 2944(%rax)
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm6, 2880(%rax)
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm18, 2816(%rax)
+; AVX512DQBW-ONLY-NEXT:    vmovups (%rsp), %zmm6 # 64-byte Reload
+; AVX512DQBW-ONLY-NEXT:    vmovaps %zmm6, 2752(%rax)
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm20, 2624(%rax)
+; AVX512DQBW-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm6 # 64-byte Reload
+; AVX512DQBW-ONLY-NEXT:    vmovaps %zmm6, 2560(%rax)
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm5, 2496(%rax)
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm22, 2432(%rax)
+; AVX512DQBW-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
+; AVX512DQBW-ONLY-NEXT:    vmovaps %zmm5, 2368(%rax)
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm25, 2240(%rax)
+; AVX512DQBW-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
+; AVX512DQBW-ONLY-NEXT:    vmovaps %zmm5, 2176(%rax)
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm4, 2112(%rax)
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm27, 2048(%rax)
+; AVX512DQBW-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
+; AVX512DQBW-ONLY-NEXT:    vmovaps %zmm4, 1984(%rax)
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm0, 1856(%rax)
+; AVX512DQBW-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512DQBW-ONLY-NEXT:    vmovaps %zmm0, 1792(%rax)
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm3, 1728(%rax)
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm1, 1664(%rax)
+; AVX512DQBW-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
+; AVX512DQBW-ONLY-NEXT:    vmovaps %zmm3, 1600(%rax)
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm2, 1472(%rax)
+; AVX512DQBW-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512DQBW-ONLY-NEXT:    vmovaps %zmm0, 1408(%rax)
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm7, 1344(%rax)
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm10, 1280(%rax)
+; AVX512DQBW-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
+; AVX512DQBW-ONLY-NEXT:    vmovaps %zmm2, 1216(%rax)
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm11, 1088(%rax)
+; AVX512DQBW-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512DQBW-ONLY-NEXT:    vmovaps %zmm0, 1024(%rax)
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm12, 960(%rax)
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm13, 896(%rax)
+; AVX512DQBW-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
+; AVX512DQBW-ONLY-NEXT:    vmovaps %zmm1, 832(%rax)
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm14, 704(%rax)
+; AVX512DQBW-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
+; AVX512DQBW-ONLY-NEXT:    vmovaps %zmm1, 640(%rax)
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm15, 576(%rax)
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm16, 512(%rax)
+; AVX512DQBW-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512DQBW-ONLY-NEXT:    vmovaps %zmm0, 448(%rax)
+; AVX512DQBW-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512DQBW-ONLY-NEXT:    vmovaps %zmm0, 320(%rax)
+; AVX512DQBW-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512DQBW-ONLY-NEXT:    vmovaps %zmm0, 256(%rax)
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm28, 192(%rax)
+; AVX512DQBW-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512DQBW-ONLY-NEXT:    vmovaps %zmm0, 128(%rax)
+; AVX512DQBW-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512DQBW-ONLY-NEXT:    vmovaps %zmm0, 64(%rax)
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm21, 2688(%rax)
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm19, 2304(%rax)
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm23, 1920(%rax)
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm24, 1536(%rax)
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm26, 1152(%rax)
+; AVX512DQBW-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512DQBW-ONLY-NEXT:    vmovaps %zmm0, 768(%rax)
+; AVX512DQBW-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512DQBW-ONLY-NEXT:    vmovaps %zmm0, 384(%rax)
+; AVX512DQBW-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512DQBW-ONLY-NEXT:    vmovaps %zmm0, (%rax)
+; AVX512DQBW-ONLY-NEXT:    addq $3400, %rsp # imm = 0xD48
+; AVX512DQBW-ONLY-NEXT:    vzeroupper
+; AVX512DQBW-ONLY-NEXT:    retq
   %in.vec0 = load <64 x i64>, ptr %in.vecptr0, align 64
   %in.vec1 = load <64 x i64>, ptr %in.vecptr1, align 64
   %in.vec2 = load <64 x i64>, ptr %in.vecptr2, align 64
@@ -14633,8 +10249,16 @@ define void @store_i64_stride6_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX512-FAST: {{.*}}
 ; AVX512-SLOW: {{.*}}
 ; AVX512BW-FAST: {{.*}}
+; AVX512BW-ONLY-FAST: {{.*}}
+; AVX512BW-ONLY-SLOW: {{.*}}
 ; AVX512BW-SLOW: {{.*}}
+; AVX512DQ-FAST: {{.*}}
+; AVX512DQ-SLOW: {{.*}}
+; AVX512DQBW-FAST: {{.*}}
+; AVX512DQBW-SLOW: {{.*}}
 ; AVX512F-FAST: {{.*}}
+; AVX512F-ONLY-FAST: {{.*}}
+; AVX512F-ONLY-SLOW: {{.*}}
 ; AVX512F-SLOW: {{.*}}
 ; FALLBACK0: {{.*}}
 ; FALLBACK1: {{.*}}

diff  --git a/llvm/test/CodeGen/X86/vector-interleaved-store-i64-stride-7.ll b/llvm/test/CodeGen/X86/vector-interleaved-store-i64-stride-7.ll
index 3d88b7b550dacd6..1e2a9d022f66c76 100644
--- a/llvm/test/CodeGen/X86/vector-interleaved-store-i64-stride-7.ll
+++ b/llvm/test/CodeGen/X86/vector-interleaved-store-i64-stride-7.ll
@@ -4,14 +4,14 @@
 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx2 | FileCheck %s --check-prefixes=AVX,AVX1,AVX2,AVX2-ONLY,AVX2-SLOW,FALLBACK2
 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx2,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX1,AVX2,AVX2-ONLY,AVX2-FAST,FALLBACK3
 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx2,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX1,AVX2,AVX2-ONLY,AVX2-FAST-PERLANE,FALLBACK4
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512-SLOW,AVX512F-SLOW,AVX512F-ONLY-SLOW,FALLBACK5
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512-FAST,AVX512F-FAST,AVX512F-ONLY-FAST,FALLBACK6
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512-SLOW,AVX512F-SLOW,AVX512DQ-SLOW,FALLBACK7
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512-FAST,AVX512F-FAST,AVX512DQ-FAST,FALLBACK8
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512bw | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512-SLOW,AVX512BW-SLOW,AVX512BW-ONLY-SLOW,FALLBACK9
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512bw,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512-FAST,AVX512BW-FAST,AVX512BW-ONLY-FAST,FALLBACK10
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq,+avx512bw | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512-SLOW,AVX512BW-SLOW,AVX512DQBW-SLOW,FALLBACK11
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq,+avx512bw,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512-FAST,AVX512BW-FAST,AVX512DQBW-FAST,FALLBACK12
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512F-ONLY,AVX512-SLOW,AVX512F-SLOW,AVX512F-ONLY-SLOW,FALLBACK5
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512F-ONLY,AVX512-FAST,AVX512F-FAST,AVX512F-ONLY-FAST,FALLBACK6
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512DQ-ONLY,AVX512-SLOW,AVX512F-SLOW,AVX512DQ-SLOW,FALLBACK7
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512DQ-ONLY,AVX512-FAST,AVX512F-FAST,AVX512DQ-FAST,FALLBACK8
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512bw | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512BW-ONLY,AVX512-SLOW,AVX512BW-SLOW,AVX512BW-ONLY-SLOW,FALLBACK9
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512bw,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512BW-ONLY,AVX512-FAST,AVX512BW-FAST,AVX512BW-ONLY-FAST,FALLBACK10
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq,+avx512bw | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512DQBW-ONLY,AVX512-SLOW,AVX512BW-SLOW,AVX512DQBW-SLOW,FALLBACK11
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq,+avx512bw,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512DQBW-ONLY,AVX512-FAST,AVX512BW-FAST,AVX512DQBW-FAST,FALLBACK12
 
 ; These patterns are produced by LoopVectorizer for interleaved stores.
 
@@ -20323,8 +20323,12 @@ define void @store_i64_stride7_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX512-FAST: {{.*}}
 ; AVX512-SLOW: {{.*}}
 ; AVX512BW-FAST: {{.*}}
+; AVX512BW-ONLY: {{.*}}
 ; AVX512BW-SLOW: {{.*}}
+; AVX512DQ-ONLY: {{.*}}
+; AVX512DQBW-ONLY: {{.*}}
 ; AVX512F-FAST: {{.*}}
+; AVX512F-ONLY: {{.*}}
 ; AVX512F-SLOW: {{.*}}
 ; FALLBACK0: {{.*}}
 ; FALLBACK1: {{.*}}

diff  --git a/llvm/test/CodeGen/X86/vector-interleaved-store-i64-stride-8.ll b/llvm/test/CodeGen/X86/vector-interleaved-store-i64-stride-8.ll
index 293a3447577f765..6778ae0647ae3f0 100644
--- a/llvm/test/CodeGen/X86/vector-interleaved-store-i64-stride-8.ll
+++ b/llvm/test/CodeGen/X86/vector-interleaved-store-i64-stride-8.ll
@@ -4,14 +4,14 @@
 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx2 | FileCheck %s --check-prefixes=AVX,AVX1,AVX2,AVX2-ONLY,AVX2-SLOW,FALLBACK2
 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx2,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX1,AVX2,AVX2-ONLY,AVX2-FAST,FALLBACK3
 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx2,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX1,AVX2,AVX2-ONLY,AVX2-FAST-PERLANE,FALLBACK4
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512-SLOW,AVX512F-SLOW,AVX512F-ONLY-SLOW,FALLBACK5
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512-FAST,AVX512F-FAST,AVX512F-ONLY-FAST,FALLBACK6
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512-SLOW,AVX512F-SLOW,AVX512DQ-SLOW,FALLBACK7
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512-FAST,AVX512F-FAST,AVX512DQ-FAST,FALLBACK8
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512bw | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512-SLOW,AVX512BW-SLOW,AVX512BW-ONLY-SLOW,FALLBACK9
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512bw,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512-FAST,AVX512BW-FAST,AVX512BW-ONLY-FAST,FALLBACK10
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq,+avx512bw | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512-SLOW,AVX512BW-SLOW,AVX512DQBW-SLOW,FALLBACK11
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq,+avx512bw,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512-FAST,AVX512BW-FAST,AVX512DQBW-FAST,FALLBACK12
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512F-ONLY,AVX512-SLOW,AVX512F-SLOW,AVX512F-ONLY-SLOW,FALLBACK5
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512F-ONLY,AVX512-FAST,AVX512F-FAST,AVX512F-ONLY-FAST,FALLBACK6
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512DQ-ONLY,AVX512-SLOW,AVX512F-SLOW,AVX512DQ-SLOW,FALLBACK7
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512DQ-ONLY,AVX512-FAST,AVX512F-FAST,AVX512DQ-FAST,FALLBACK8
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512bw | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512BW-ONLY,AVX512-SLOW,AVX512BW-SLOW,AVX512BW-ONLY-SLOW,FALLBACK9
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512bw,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512BW-ONLY,AVX512-FAST,AVX512BW-FAST,AVX512BW-ONLY-FAST,FALLBACK10
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq,+avx512bw | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512DQBW-ONLY,AVX512-SLOW,AVX512BW-SLOW,AVX512DQBW-SLOW,FALLBACK11
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq,+avx512bw,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512DQBW-ONLY,AVX512-FAST,AVX512BW-FAST,AVX512DQBW-FAST,FALLBACK12
 
 ; These patterns are produced by LoopVectorizer for interleaved stores.
 
@@ -8876,7909 +8876,3957 @@ define void @store_i64_stride8_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX2-ONLY-NEXT:    vzeroupper
 ; AVX2-ONLY-NEXT:    retq
 ;
-; AVX512F-ONLY-SLOW-LABEL: store_i64_stride8_vf64:
-; AVX512F-ONLY-SLOW:       # %bb.0:
-; AVX512F-ONLY-SLOW-NEXT:    subq $5512, %rsp # imm = 0x1588
-; AVX512F-ONLY-SLOW-NEXT:    movq {{[0-9]+}}(%rsp), %rax
-; AVX512F-ONLY-SLOW-NEXT:    movq {{[0-9]+}}(%rsp), %r10
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 128(%rdi), %zmm2
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 64(%rdi), %zmm4
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 (%rdi), %zmm8
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 128(%rsi), %zmm17
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 64(%rsi), %zmm19
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 (%rsi), %zmm22
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 64(%rdx), %zmm5
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 (%rdx), %zmm10
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 64(%rcx), %zmm20
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 (%rcx), %zmm11
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 (%r8), %zmm1
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 64(%r8), %zmm25
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 128(%r8), %zmm23
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 (%r9), %zmm28
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 64(%r9), %zmm26
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 128(%r9), %zmm24
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 (%r10), %zmm21
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 64(%r10), %zmm14
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 (%rax), %zmm27
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 64(%rax), %zmm16
-; AVX512F-ONLY-SLOW-NEXT:    movb $-64, %r11b
-; AVX512F-ONLY-SLOW-NEXT:    kmovw %r11d, %k1
-; AVX512F-ONLY-SLOW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm3 = [4,12,4,12,4,12,4,12]
-; AVX512F-ONLY-SLOW-NEXT:    # zmm3 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm21, %zmm0
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm27, %zmm3, %zmm0
-; AVX512F-ONLY-SLOW-NEXT:    vpunpcklqdq {{.*#+}} zmm12 = zmm1[0],zmm28[0],zmm1[2],zmm28[2],zmm1[4],zmm28[4],zmm1[6],zmm28[6]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm0, %zmm12 {%k1}
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm8, %zmm0
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm22, %zmm3, %zmm0
-; AVX512F-ONLY-SLOW-NEXT:    vbroadcasti128 {{.*#+}} ymm6 = [4,12,4,12]
-; AVX512F-ONLY-SLOW-NEXT:    # ymm6 = mem[0,1,0,1]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm10, %zmm15
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm11, %zmm6, %zmm15
-; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm15[4,5,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $0, %ymm0, %zmm12, %zmm0
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm9 = [5,13,5,13,5,13,5,13]
-; AVX512F-ONLY-SLOW-NEXT:    # zmm9 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm21, %zmm0
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm27, %zmm9, %zmm0
-; AVX512F-ONLY-SLOW-NEXT:    vpunpckhqdq {{.*#+}} zmm12 = zmm1[1],zmm28[1],zmm1[3],zmm28[3],zmm1[5],zmm28[5],zmm1[7],zmm28[7]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm0, %zmm12 {%k1}
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm8, %zmm0
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm22, %zmm9, %zmm0
-; AVX512F-ONLY-SLOW-NEXT:    vbroadcasti128 {{.*#+}} ymm7 = [5,13,5,13]
-; AVX512F-ONLY-SLOW-NEXT:    # ymm7 = mem[0,1,0,1]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm10, %zmm15
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm11, %zmm7, %zmm15
-; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm15[4,5,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $0, %ymm0, %zmm12, %zmm0
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm13 = [6,14,6,14,6,14,6,14]
-; AVX512F-ONLY-SLOW-NEXT:    # zmm13 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm1, %zmm0
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm1, %zmm29
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm28, %zmm13, %zmm0
-; AVX512F-ONLY-SLOW-NEXT:    vpunpcklqdq {{.*#+}} zmm0 {%k1} = zmm21[0],zmm27[0],zmm21[2],zmm27[2],zmm21[4],zmm27[4],zmm21[6],zmm27[6]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm8, %zmm12
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm22, %zmm13, %zmm12
-; AVX512F-ONLY-SLOW-NEXT:    vbroadcasti128 {{.*#+}} ymm1 = [6,14,6,14]
-; AVX512F-ONLY-SLOW-NEXT:    # ymm1 = mem[0,1,0,1]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm10, %zmm15
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm11, %zmm1, %zmm15
-; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm12 = ymm12[0,1,2,3],ymm15[4,5,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $0, %ymm12, %zmm0, %zmm0
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm18 = [7,15,7,15,7,15,7,15]
-; AVX512F-ONLY-SLOW-NEXT:    # zmm18 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm29, %zmm12
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm28, %zmm18, %zmm12
-; AVX512F-ONLY-SLOW-NEXT:    vpunpckhqdq {{.*#+}} zmm12 {%k1} = zmm21[1],zmm27[1],zmm21[3],zmm27[3],zmm21[5],zmm27[5],zmm21[7],zmm27[7]
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm22, %zmm18, %zmm8
-; AVX512F-ONLY-SLOW-NEXT:    vbroadcasti32x4 {{.*#+}} ymm30 = [7,15,7,15]
-; AVX512F-ONLY-SLOW-NEXT:    # ymm30 = mem[0,1,2,3,0,1,2,3]
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm11, %zmm30, %zmm10
-; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm8 = ymm8[0,1,2,3],ymm10[4,5,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $0, %ymm8, %zmm12, %zmm0
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm14, %zmm8
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm16, %zmm3, %zmm8
-; AVX512F-ONLY-SLOW-NEXT:    vpunpcklqdq {{.*#+}} zmm10 = zmm25[0],zmm26[0],zmm25[2],zmm26[2],zmm25[4],zmm26[4],zmm25[6],zmm26[6]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm8, %zmm10 {%k1}
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm4, %zmm8
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm19, %zmm3, %zmm8
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm5, %zmm11
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm6, %zmm15
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm20, %zmm6, %zmm11
-; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm8 = ymm8[0,1,2,3],ymm11[4,5,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $0, %ymm8, %zmm10, %zmm0
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm14, %zmm8
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm16, %zmm9, %zmm8
-; AVX512F-ONLY-SLOW-NEXT:    vpunpckhqdq {{.*#+}} zmm10 = zmm25[1],zmm26[1],zmm25[3],zmm26[3],zmm25[5],zmm26[5],zmm25[7],zmm26[7]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm8, %zmm10 {%k1}
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm4, %zmm8
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm19, %zmm9, %zmm8
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm5, %zmm11
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm7, %zmm12
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm20, %zmm7, %zmm11
-; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm8 = ymm8[0,1,2,3],ymm11[4,5,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $0, %ymm8, %zmm10, %zmm0
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm4, %zmm8
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm19, %zmm13, %zmm8
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm5, %zmm10
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm20, %zmm1, %zmm10
-; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm8 = ymm8[0,1,2,3],ymm10[4,5,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm25, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm25, %zmm10
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm26, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm26, %zmm13, %zmm10
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm14, %zmm11
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm14, (%rsp) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm16, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vpunpcklqdq {{.*#+}} zmm10 {%k1} = zmm14[0],zmm16[0],zmm14[2],zmm16[2],zmm14[4],zmm16[4],zmm14[6],zmm16[6]
-; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $0, %ymm8, %zmm10, %zmm0
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 128(%r10), %zmm10
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm19, %zmm18, %zmm4
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 128(%rax), %zmm14
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm20, %zmm30, %zmm5
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm25, %zmm6
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm26, %zmm18, %zmm6
-; AVX512F-ONLY-SLOW-NEXT:    vpunpckhqdq {{.*#+}} zmm6 {%k1} = zmm11[1],zmm16[1],zmm11[3],zmm16[3],zmm11[5],zmm16[5],zmm11[7],zmm16[7]
-; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm4 = ymm4[0,1,2,3],ymm5[4,5,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $0, %ymm4, %zmm6, %zmm0
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm10, %zmm4
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm14, %zmm3, %zmm4
-; AVX512F-ONLY-SLOW-NEXT:    vpunpcklqdq {{.*#+}} zmm5 = zmm23[0],zmm24[0],zmm23[2],zmm24[2],zmm23[4],zmm24[4],zmm23[6],zmm24[6]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm4, %zmm5 {%k1}
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm2, %zmm6
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm17, %zmm3, %zmm6
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm3, %zmm11
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 128(%rdx), %zmm4
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 128(%rcx), %zmm7
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm4, %zmm8
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm7, %zmm15, %zmm8
-; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm6 = ymm6[0,1,2,3],ymm8[4,5,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $0, %ymm6, %zmm5, %zmm0
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm10, %zmm5
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm14, %zmm9, %zmm5
-; AVX512F-ONLY-SLOW-NEXT:    vpunpckhqdq {{.*#+}} zmm6 = zmm23[1],zmm24[1],zmm23[3],zmm24[3],zmm23[5],zmm24[5],zmm23[7],zmm24[7]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm5, %zmm6 {%k1}
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm2, %zmm5
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm17, %zmm9, %zmm5
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm4, %zmm8
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm7, %zmm12, %zmm8
-; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm5 = ymm5[0,1,2,3],ymm8[4,5,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $0, %ymm5, %zmm6, %zmm0
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm2, %zmm5
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm17, %zmm13, %zmm5
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm4, %zmm6
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm7, %zmm1, %zmm6
-; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm5 = ymm5[0,1,2,3],ymm6[4,5,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm23, %zmm3
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm23, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm23, %zmm6
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm24, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm24, %zmm13, %zmm6
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm14, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vpunpcklqdq {{.*#+}} zmm6 {%k1} = zmm10[0],zmm14[0],zmm10[2],zmm14[2],zmm10[4],zmm14[4],zmm10[6],zmm14[6]
-; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $0, %ymm5, %zmm6, %zmm0
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm17, %zmm18, %zmm2
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm7, %zmm30, %zmm4
-; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm4[4,5,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm24, %zmm18, %zmm3
-; AVX512F-ONLY-SLOW-NEXT:    vpunpckhqdq {{.*#+}} zmm3 {%k1} = zmm10[1],zmm14[1],zmm10[3],zmm14[3],zmm10[5],zmm14[5],zmm10[7],zmm14[7]
-; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $0, %ymm2, %zmm3, %zmm0
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 192(%r10), %zmm10
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 192(%rax), %zmm26
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm10, %zmm2
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm26, %zmm11, %zmm2
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 192(%r8), %zmm23
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 192(%r9), %zmm29
-; AVX512F-ONLY-SLOW-NEXT:    vpunpcklqdq {{.*#+}} zmm4 = zmm23[0],zmm29[0],zmm23[2],zmm29[2],zmm23[4],zmm29[4],zmm23[6],zmm29[6]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm2, %zmm4 {%k1}
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 192(%rdi), %zmm2
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 192(%rsi), %zmm5
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm2, %zmm6
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm5, %zmm11, %zmm6
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 192(%rdx), %zmm3
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 192(%rcx), %zmm7
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm3, %zmm8
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm7, %zmm15, %zmm8
-; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm6 = ymm6[0,1,2,3],ymm8[4,5,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $0, %ymm6, %zmm4, %zmm0
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm10, %zmm4
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm26, %zmm9, %zmm4
-; AVX512F-ONLY-SLOW-NEXT:    vpunpckhqdq {{.*#+}} zmm6 = zmm23[1],zmm29[1],zmm23[3],zmm29[3],zmm23[5],zmm29[5],zmm23[7],zmm29[7]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm4, %zmm6 {%k1}
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm2, %zmm4
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm5, %zmm9, %zmm4
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm3, %zmm8
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm7, %zmm12, %zmm8
-; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm4 = ymm4[0,1,2,3],ymm8[4,5,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $0, %ymm4, %zmm6, %zmm0
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm2, %zmm4
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm5, %zmm13, %zmm4
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm3, %zmm6
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm7, %zmm1, %zmm6
-; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm4 = ymm4[0,1,2,3],ymm6[4,5,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm23, %zmm6
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm29, %zmm13, %zmm6
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vpunpcklqdq {{.*#+}} zmm6 {%k1} = zmm10[0],zmm26[0],zmm10[2],zmm26[2],zmm10[4],zmm26[4],zmm10[6],zmm26[6]
-; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $0, %ymm4, %zmm6, %zmm0
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm5, %zmm18, %zmm2
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm7, %zmm30, %zmm3
-; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm3[4,5,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm23, %zmm3
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm29, %zmm18, %zmm3
-; AVX512F-ONLY-SLOW-NEXT:    vpunpckhqdq {{.*#+}} zmm3 {%k1} = zmm10[1],zmm26[1],zmm10[3],zmm26[3],zmm10[5],zmm26[5],zmm10[7],zmm26[7]
-; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $0, %ymm2, %zmm3, %zmm0
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 256(%r10), %zmm10
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 256(%rax), %zmm22
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm10, %zmm2
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm22, %zmm11, %zmm2
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 256(%r8), %zmm0
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 256(%r9), %zmm24
-; AVX512F-ONLY-SLOW-NEXT:    vpunpcklqdq {{.*#+}} zmm4 = zmm0[0],zmm24[0],zmm0[2],zmm24[2],zmm0[4],zmm24[4],zmm0[6],zmm24[6]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm0, %zmm17
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm2, %zmm4 {%k1}
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 256(%rdi), %zmm2
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 256(%rsi), %zmm5
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm2, %zmm6
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm5, %zmm11, %zmm6
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 256(%rdx), %zmm3
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 256(%rcx), %zmm7
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm3, %zmm8
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm7, %zmm15, %zmm8
-; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm6 = ymm6[0,1,2,3],ymm8[4,5,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $0, %ymm6, %zmm4, %zmm0
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm10, %zmm4
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm22, %zmm9, %zmm4
-; AVX512F-ONLY-SLOW-NEXT:    vpunpckhqdq {{.*#+}} zmm6 = zmm17[1],zmm24[1],zmm17[3],zmm24[3],zmm17[5],zmm24[5],zmm17[7],zmm24[7]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm4, %zmm6 {%k1}
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm2, %zmm4
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm5, %zmm9, %zmm4
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm3, %zmm8
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm7, %zmm12, %zmm8
-; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm4 = ymm4[0,1,2,3],ymm8[4,5,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $0, %ymm4, %zmm6, %zmm0
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm2, %zmm4
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm5, %zmm13, %zmm4
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm3, %zmm6
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm7, %zmm1, %zmm6
-; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm4 = ymm4[0,1,2,3],ymm6[4,5,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm17, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm17, %zmm6
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm24, %zmm13, %zmm6
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vpunpcklqdq {{.*#+}} zmm6 {%k1} = zmm10[0],zmm22[0],zmm10[2],zmm22[2],zmm10[4],zmm22[4],zmm10[6],zmm22[6]
-; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $0, %ymm4, %zmm6, %zmm0
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm5, %zmm18, %zmm2
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm7, %zmm30, %zmm3
-; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm3[4,5,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm17, %zmm3
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm24, %zmm18, %zmm3
-; AVX512F-ONLY-SLOW-NEXT:    vpunpckhqdq {{.*#+}} zmm3 {%k1} = zmm10[1],zmm22[1],zmm10[3],zmm22[3],zmm10[5],zmm22[5],zmm10[7],zmm22[7]
-; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $0, %ymm2, %zmm3, %zmm0
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 320(%rdi), %zmm2
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 320(%rsi), %zmm3
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm2, %zmm5
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm3, %zmm11, %zmm5
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 320(%rdx), %zmm14
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 320(%rcx), %zmm6
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm14, %zmm7
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm6, %zmm15, %zmm7
-; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm5 = ymm5[0,1,2,3],ymm7[4,5,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm14, %zmm7
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm6, %zmm12, %zmm7
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm14, %zmm8
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm6, %zmm1, %zmm8
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm6, %zmm30, %zmm14
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 384(%rdx), %zmm0
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 384(%rcx), %zmm6
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm0, %zmm10
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm6, %zmm15, %zmm10
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm0, %zmm10
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm6, %zmm12, %zmm10
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm0, %zmm10
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm6, %zmm1, %zmm10
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm6, %zmm30, %zmm0
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 448(%rdx), %zmm0
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 448(%rcx), %zmm6
-; AVX512F-ONLY-SLOW-NEXT:    vpermi2q %zmm6, %zmm0, %zmm15
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm15, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vpermi2q %zmm6, %zmm0, %zmm12
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm12, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vpermi2q %zmm6, %zmm0, %zmm1
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm6, %zmm30, %zmm0
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 320(%r10), %zmm31
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 320(%rax), %zmm12
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm31, %zmm1
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm12, %zmm11, %zmm1
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 320(%r8), %zmm17
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 320(%r9), %zmm4
-; AVX512F-ONLY-SLOW-NEXT:    vpunpcklqdq {{.*#+}} zmm10 = zmm17[0],zmm4[0],zmm17[2],zmm4[2],zmm17[4],zmm4[4],zmm17[6],zmm4[6]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm1, %zmm10 {%k1}
-; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $0, %ymm5, %zmm10, %zmm0
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm31, %zmm1
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm12, %zmm9, %zmm1
-; AVX512F-ONLY-SLOW-NEXT:    vpunpckhqdq {{.*#+}} zmm5 = zmm17[1],zmm4[1],zmm17[3],zmm4[3],zmm17[5],zmm4[5],zmm17[7],zmm4[7]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm1, %zmm5 {%k1}
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm2, %zmm1
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm3, %zmm9, %zmm1
-; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm7[4,5,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $0, %ymm1, %zmm5, %zmm0
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm2, %zmm1
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm3, %zmm13, %zmm1
-; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm8[4,5,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm17, %zmm5
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm4, %zmm13, %zmm5
-; AVX512F-ONLY-SLOW-NEXT:    vpunpcklqdq {{.*#+}} zmm5 {%k1} = zmm31[0],zmm12[0],zmm31[2],zmm12[2],zmm31[4],zmm12[4],zmm31[6],zmm12[6]
-; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $0, %ymm1, %zmm5, %zmm0
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm3, %zmm18, %zmm2
-; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm1 = ymm2[0,1,2,3],ymm14[4,5,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm17, %zmm2
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm4, %zmm18, %zmm2
-; AVX512F-ONLY-SLOW-NEXT:    vpunpckhqdq {{.*#+}} zmm2 {%k1} = zmm31[1],zmm12[1],zmm31[3],zmm12[3],zmm31[5],zmm12[5],zmm31[7],zmm12[7]
-; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $0, %ymm1, %zmm2, %zmm0
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 384(%rdi), %zmm0
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 384(%rsi), %zmm1
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm0, %zmm3
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm11, %zmm2
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm1, %zmm11, %zmm3
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm0, %zmm3
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm1, %zmm9, %zmm3
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm0, %zmm3
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm1, %zmm13, %zmm3
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm1, %zmm18, %zmm0
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 384(%r10), %zmm11
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 384(%rax), %zmm20
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm11, %zmm0
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm20, %zmm2, %zmm0
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm11, %zmm0
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm20, %zmm9, %zmm0
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 384(%r8), %zmm30
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 384(%r9), %zmm0
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm30, %zmm1
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm0, %zmm13, %zmm1
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 448(%r10), %zmm16
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 448(%rax), %zmm8
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm16, %zmm1
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm8, %zmm2, %zmm1
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 448(%rdi), %zmm3
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 448(%rsi), %zmm1
-; AVX512F-ONLY-SLOW-NEXT:    vpermi2q %zmm1, %zmm3, %zmm2
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm16, %zmm2
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm8, %zmm9, %zmm2
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vpermi2q %zmm1, %zmm3, %zmm9
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 448(%r8), %zmm19
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 448(%r9), %zmm7
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm19, %zmm2
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm7, %zmm13, %zmm2
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vpermi2q %zmm1, %zmm3, %zmm13
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm13, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm1, %zmm18, %zmm3
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm30, %zmm1
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm0, %zmm18, %zmm1
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm1, %zmm5
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm19, %zmm1
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm7, %zmm18, %zmm1
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm15 = [0,8,0,8,0,8,0,8]
-; AVX512F-ONLY-SLOW-NEXT:    # zmm15 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm21, %zmm1
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm27, %zmm15, %zmm1
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm14 = [1,9,1,9,1,9,1,9]
-; AVX512F-ONLY-SLOW-NEXT:    # zmm14 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm21, %zmm1
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm27, %zmm14, %zmm1
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm6 = [2,10,2,10,2,10,2,10]
-; AVX512F-ONLY-SLOW-NEXT:    # zmm6 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm21, %zmm1
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm27, %zmm6, %zmm1
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm1 = [3,11,3,11,3,11,3,11]
-; AVX512F-ONLY-SLOW-NEXT:    # zmm1 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm27, %zmm1, %zmm21
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm21, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm2, %zmm3
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm28, %zmm15, %zmm3
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm2, %zmm3
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm28, %zmm14, %zmm3
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm2, %zmm25
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm28, %zmm6, %zmm25
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm28, %zmm1, %zmm2
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 (%rsp), %zmm27 # 64-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm27, %zmm2
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm3, %zmm15, %zmm2
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm27, %zmm2
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm3, %zmm14, %zmm2
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm27, %zmm2
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm3, %zmm6, %zmm2
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm3, %zmm1, %zmm27
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm27, (%rsp) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm9 # 64-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm9, %zmm3
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm2, %zmm15, %zmm3
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm9, %zmm3
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm2, %zmm14, %zmm3
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm9, %zmm18
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm2, %zmm6, %zmm18
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm2, %zmm1, %zmm9
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm2, %zmm3
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm9 # 64-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm9, %zmm15, %zmm3
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm2, %zmm3
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm9, %zmm14, %zmm3
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm2, %zmm3
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm9, %zmm6, %zmm3
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm9, %zmm1, %zmm2
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm9 # 64-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm9, %zmm3
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm2, %zmm15, %zmm3
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm9, %zmm3
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm2, %zmm14, %zmm3
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm9, %zmm13
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm2, %zmm6, %zmm13
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm2, %zmm1, %zmm9
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm2, %zmm3
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm26, %zmm15, %zmm3
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm2, %zmm3
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm26, %zmm14, %zmm3
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm2, %zmm3
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm26, %zmm6, %zmm3
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm26, %zmm1, %zmm2
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm23, %zmm3
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm29, %zmm15, %zmm3
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm23, %zmm3
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm29, %zmm14, %zmm3
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm23, %zmm10
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm29, %zmm6, %zmm10
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm29, %zmm1, %zmm23
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm23, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm23 # 64-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm23, %zmm2
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm22, %zmm15, %zmm2
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm23, %zmm2
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm22, %zmm14, %zmm2
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm23, %zmm2
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm22, %zmm6, %zmm2
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm22, %zmm1, %zmm23
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm2, %zmm3
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm24, %zmm15, %zmm3
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm2, %zmm3
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm24, %zmm14, %zmm3
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm2, %zmm26
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm24, %zmm6, %zmm26
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm24, %zmm1, %zmm2
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm31, %zmm2
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm12, %zmm15, %zmm2
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm31, %zmm2
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm12, %zmm14, %zmm2
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm31, %zmm2
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm12, %zmm6, %zmm2
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm12, %zmm1, %zmm31
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm17, %zmm29
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm4, %zmm15, %zmm29
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm17, %zmm2
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm4, %zmm14, %zmm2
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm17, %zmm24
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm4, %zmm6, %zmm24
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm4, %zmm1, %zmm17
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm11, %zmm2
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm20, %zmm15, %zmm2
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm11, %zmm2
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm20, %zmm14, %zmm2
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm9 # 64-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vpunpcklqdq {{.*#+}} zmm9 {%k1} = zmm11[0],zmm20[0],zmm11[2],zmm20[2],zmm11[4],zmm20[4],zmm11[6],zmm20[6]
-; AVX512F-ONLY-SLOW-NEXT:    vpunpckhqdq {{.*#+}} zmm5 {%k1} = zmm11[1],zmm20[1],zmm11[3],zmm20[3],zmm11[5],zmm20[5],zmm11[7],zmm20[7]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm5, %zmm27
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm11, %zmm22
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm20, %zmm6, %zmm22
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm20, %zmm1, %zmm11
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm30, %zmm21
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm0, %zmm15, %zmm21
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm30, %zmm28
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm0, %zmm14, %zmm28
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm30, %zmm20
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm0, %zmm6, %zmm20
-; AVX512F-ONLY-SLOW-NEXT:    vpunpcklqdq {{.*#+}} zmm2 = zmm30[0],zmm0[0],zmm30[2],zmm0[2],zmm30[4],zmm0[4],zmm30[6],zmm0[6]
-; AVX512F-ONLY-SLOW-NEXT:    vpunpckhqdq {{.*#+}} zmm3 = zmm30[1],zmm0[1],zmm30[3],zmm0[3],zmm30[5],zmm0[5],zmm30[7],zmm0[7]
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm0, %zmm1, %zmm30
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm16, %zmm0
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm8, %zmm15, %zmm0
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm16, %zmm0
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm8, %zmm14, %zmm0
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vpunpcklqdq {{.*#+}} zmm4 {%k1} = zmm16[0],zmm8[0],zmm16[2],zmm8[2],zmm16[4],zmm8[4],zmm16[6],zmm8[6]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vpunpckhqdq {{.*#+}} zmm5 {%k1} = zmm16[1],zmm8[1],zmm16[3],zmm8[3],zmm16[5],zmm8[5],zmm16[7],zmm8[7]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm16, %zmm12
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm8, %zmm6, %zmm12
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm8, %zmm1, %zmm16
-; AVX512F-ONLY-SLOW-NEXT:    vpermi2q %zmm7, %zmm19, %zmm15
-; AVX512F-ONLY-SLOW-NEXT:    vpermi2q %zmm7, %zmm19, %zmm14
-; AVX512F-ONLY-SLOW-NEXT:    vpermi2q %zmm7, %zmm19, %zmm6
-; AVX512F-ONLY-SLOW-NEXT:    vpunpcklqdq {{.*#+}} zmm8 = zmm19[0],zmm7[0],zmm19[2],zmm7[2],zmm19[4],zmm7[4],zmm19[6],zmm7[6]
-; AVX512F-ONLY-SLOW-NEXT:    vpunpckhqdq {{.*#+}} zmm0 = zmm19[1],zmm7[1],zmm19[3],zmm7[3],zmm19[5],zmm7[5],zmm19[7],zmm7[7]
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2q %zmm7, %zmm1, %zmm19
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vpblendd $240, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm1 # 32-byte Folded Reload
-; AVX512F-ONLY-SLOW-NEXT:    # ymm1 = ymm1[0,1,2,3],mem[4,5,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm7 # 64-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm7, %zmm2 {%k1}
-; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $0, %ymm1, %zmm2, %zmm1
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vpblendd $240, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm1 # 32-byte Folded Reload
-; AVX512F-ONLY-SLOW-NEXT:    # ymm1 = ymm1[0,1,2,3],mem[4,5,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm2, %zmm3 {%k1}
-; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $0, %ymm1, %zmm3, %zmm1
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vpblendd $240, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm1 # 32-byte Folded Reload
-; AVX512F-ONLY-SLOW-NEXT:    # ymm1 = ymm1[0,1,2,3],mem[4,5,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $0, %ymm1, %zmm9, %zmm1
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vpblendd $240, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm1 # 32-byte Folded Reload
-; AVX512F-ONLY-SLOW-NEXT:    # ymm1 = ymm1[0,1,2,3],mem[4,5,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $0, %ymm1, %zmm27, %zmm1
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vpblendd $240, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm1 # 32-byte Folded Reload
-; AVX512F-ONLY-SLOW-NEXT:    # ymm1 = ymm1[0,1,2,3],mem[4,5,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm2, %zmm8 {%k1}
-; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $0, %ymm1, %zmm8, %zmm1
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vpblendd $240, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm1 # 32-byte Folded Reload
-; AVX512F-ONLY-SLOW-NEXT:    # ymm1 = ymm1[0,1,2,3],mem[4,5,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm2, %zmm0 {%k1}
-; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $0, %ymm1, %zmm0, %zmm0
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vpblendd $240, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
-; AVX512F-ONLY-SLOW-NEXT:    # ymm0 = ymm0[0,1,2,3],mem[4,5,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $0, %ymm0, %zmm4, %zmm0
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vpblendd $240, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
-; AVX512F-ONLY-SLOW-NEXT:    # ymm0 = ymm0[0,1,2,3],mem[4,5,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $0, %ymm0, %zmm5, %zmm0
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm0, %zmm25 {%k1}
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa (%rcx), %ymm0
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa (%rdx), %ymm1
-; AVX512F-ONLY-SLOW-NEXT:    vpunpcklqdq {{.*#+}} ymm2 = ymm1[0],ymm0[0],ymm1[2],ymm0[2]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa (%rsi), %ymm3
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa (%rdi), %ymm7
-; AVX512F-ONLY-SLOW-NEXT:    vpunpcklqdq {{.*#+}} ymm8 = ymm7[0],ymm3[0],ymm7[2],ymm3[2]
-; AVX512F-ONLY-SLOW-NEXT:    vperm2i128 {{.*#+}} ymm2 = ymm8[2,3],ymm2[2,3]
-; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $0, %ymm2, %zmm25, %zmm2
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm2, %zmm4 {%k1}
-; AVX512F-ONLY-SLOW-NEXT:    vpunpckhqdq {{.*#+}} ymm0 = ymm1[1],ymm0[1],ymm1[3],ymm0[3]
-; AVX512F-ONLY-SLOW-NEXT:    vpunpckhqdq {{.*#+}} ymm1 = ymm7[1],ymm3[1],ymm7[3],ymm3[3]
-; AVX512F-ONLY-SLOW-NEXT:    vperm2i128 {{.*#+}} ymm0 = ymm1[2,3],ymm0[2,3]
-; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $0, %ymm0, %zmm4, %zmm0
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm0, %zmm18 {%k1}
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 64(%rcx), %ymm0
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 64(%rdx), %ymm1
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 64(%rsi), %ymm2
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 64(%rdi), %ymm3
-; AVX512F-ONLY-SLOW-NEXT:    vpunpcklqdq {{.*#+}} ymm7 = ymm1[0],ymm0[0],ymm1[2],ymm0[2]
-; AVX512F-ONLY-SLOW-NEXT:    vpunpcklqdq {{.*#+}} ymm8 = ymm3[0],ymm2[0],ymm3[2],ymm2[2]
-; AVX512F-ONLY-SLOW-NEXT:    vperm2i128 {{.*#+}} ymm7 = ymm8[2,3],ymm7[2,3]
-; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $0, %ymm7, %zmm18, %zmm4
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 (%rsp), %zmm4 # 64-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm4, %zmm5 {%k1}
-; AVX512F-ONLY-SLOW-NEXT:    vpunpckhqdq {{.*#+}} ymm0 = ymm1[1],ymm0[1],ymm1[3],ymm0[3]
-; AVX512F-ONLY-SLOW-NEXT:    vpunpckhqdq {{.*#+}} ymm1 = ymm3[1],ymm2[1],ymm3[3],ymm2[3]
-; AVX512F-ONLY-SLOW-NEXT:    vperm2i128 {{.*#+}} ymm0 = ymm1[2,3],ymm0[2,3]
-; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $0, %ymm0, %zmm5, %zmm0
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm0, %zmm13 {%k1}
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 128(%rcx), %ymm0
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 128(%rdx), %ymm1
-; AVX512F-ONLY-SLOW-NEXT:    vpunpcklqdq {{.*#+}} ymm2 = ymm1[0],ymm0[0],ymm1[2],ymm0[2]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 128(%rsi), %ymm3
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 128(%rdi), %ymm7
-; AVX512F-ONLY-SLOW-NEXT:    vpunpcklqdq {{.*#+}} ymm8 = ymm7[0],ymm3[0],ymm7[2],ymm3[2]
-; AVX512F-ONLY-SLOW-NEXT:    vperm2i128 {{.*#+}} ymm2 = ymm8[2,3],ymm2[2,3]
-; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $0, %ymm2, %zmm13, %zmm2
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm2, (%rsp) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm2, %zmm4 {%k1}
-; AVX512F-ONLY-SLOW-NEXT:    vpunpckhqdq {{.*#+}} ymm0 = ymm1[1],ymm0[1],ymm1[3],ymm0[3]
-; AVX512F-ONLY-SLOW-NEXT:    vpunpckhqdq {{.*#+}} ymm1 = ymm7[1],ymm3[1],ymm7[3],ymm3[3]
-; AVX512F-ONLY-SLOW-NEXT:    vperm2i128 {{.*#+}} ymm0 = ymm1[2,3],ymm0[2,3]
-; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $0, %ymm0, %zmm4, %zmm0
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm0, %zmm10 {%k1}
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 192(%rcx), %ymm0
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 192(%rdx), %ymm1
-; AVX512F-ONLY-SLOW-NEXT:    vpunpcklqdq {{.*#+}} ymm2 = ymm1[0],ymm0[0],ymm1[2],ymm0[2]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 192(%rsi), %ymm3
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 192(%rdi), %ymm7
-; AVX512F-ONLY-SLOW-NEXT:    vpunpcklqdq {{.*#+}} ymm8 = ymm7[0],ymm3[0],ymm7[2],ymm3[2]
-; AVX512F-ONLY-SLOW-NEXT:    vperm2i128 {{.*#+}} ymm2 = ymm8[2,3],ymm2[2,3]
-; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $0, %ymm2, %zmm10, %zmm2
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm2, %zmm4 {%k1}
-; AVX512F-ONLY-SLOW-NEXT:    vpunpckhqdq {{.*#+}} ymm0 = ymm1[1],ymm0[1],ymm1[3],ymm0[3]
-; AVX512F-ONLY-SLOW-NEXT:    vpunpckhqdq {{.*#+}} ymm1 = ymm7[1],ymm3[1],ymm7[3],ymm3[3]
-; AVX512F-ONLY-SLOW-NEXT:    vperm2i128 {{.*#+}} ymm0 = ymm1[2,3],ymm0[2,3]
-; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $0, %ymm0, %zmm4, %zmm9
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm0, %zmm26 {%k1}
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 256(%rcx), %ymm0
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 256(%rdx), %ymm1
-; AVX512F-ONLY-SLOW-NEXT:    vpunpcklqdq {{.*#+}} ymm2 = ymm1[0],ymm0[0],ymm1[2],ymm0[2]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 256(%rsi), %ymm3
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 256(%rdi), %ymm7
-; AVX512F-ONLY-SLOW-NEXT:    vpunpcklqdq {{.*#+}} ymm8 = ymm7[0],ymm3[0],ymm7[2],ymm3[2]
-; AVX512F-ONLY-SLOW-NEXT:    vperm2i128 {{.*#+}} ymm2 = ymm8[2,3],ymm2[2,3]
-; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $0, %ymm2, %zmm26, %zmm26
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm23, %zmm2 {%k1}
-; AVX512F-ONLY-SLOW-NEXT:    vpunpckhqdq {{.*#+}} ymm0 = ymm1[1],ymm0[1],ymm1[3],ymm0[3]
-; AVX512F-ONLY-SLOW-NEXT:    vpunpckhqdq {{.*#+}} ymm1 = ymm7[1],ymm3[1],ymm7[3],ymm3[3]
-; AVX512F-ONLY-SLOW-NEXT:    vperm2i128 {{.*#+}} ymm0 = ymm1[2,3],ymm0[2,3]
-; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $0, %ymm0, %zmm2, %zmm7
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm0, %zmm24 {%k1}
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 320(%rcx), %ymm0
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 320(%rdx), %ymm1
-; AVX512F-ONLY-SLOW-NEXT:    vpunpcklqdq {{.*#+}} ymm2 = ymm1[0],ymm0[0],ymm1[2],ymm0[2]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 320(%rsi), %ymm3
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 320(%rdi), %ymm23
-; AVX512F-ONLY-SLOW-NEXT:    vpunpcklqdq {{.*#+}} ymm8 = ymm23[0],ymm3[0],ymm23[2],ymm3[2]
-; AVX512F-ONLY-SLOW-NEXT:    vperm2i128 {{.*#+}} ymm2 = ymm8[2,3],ymm2[2,3]
-; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $0, %ymm2, %zmm24, %zmm8
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm31, %zmm17 {%k1}
-; AVX512F-ONLY-SLOW-NEXT:    vpunpckhqdq {{.*#+}} ymm0 = ymm1[1],ymm0[1],ymm1[3],ymm0[3]
-; AVX512F-ONLY-SLOW-NEXT:    vpunpckhqdq {{.*#+}} ymm1 = ymm23[1],ymm3[1],ymm23[3],ymm3[3]
-; AVX512F-ONLY-SLOW-NEXT:    vperm2i128 {{.*#+}} ymm0 = ymm1[2,3],ymm0[2,3]
-; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $0, %ymm0, %zmm17, %zmm17
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm22, %zmm20 {%k1}
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 384(%rcx), %ymm0
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 384(%rdx), %ymm1
-; AVX512F-ONLY-SLOW-NEXT:    vpunpcklqdq {{.*#+}} ymm2 = ymm1[0],ymm0[0],ymm1[2],ymm0[2]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 384(%rsi), %ymm3
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 384(%rdi), %ymm18
-; AVX512F-ONLY-SLOW-NEXT:    vpunpcklqdq {{.*#+}} ymm13 = ymm18[0],ymm3[0],ymm18[2],ymm3[2]
-; AVX512F-ONLY-SLOW-NEXT:    vperm2i128 {{.*#+}} ymm2 = ymm13[2,3],ymm2[2,3]
-; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $0, %ymm2, %zmm20, %zmm20
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm11, %zmm30 {%k1}
-; AVX512F-ONLY-SLOW-NEXT:    vpunpckhqdq {{.*#+}} ymm0 = ymm1[1],ymm0[1],ymm1[3],ymm0[3]
-; AVX512F-ONLY-SLOW-NEXT:    vpunpckhqdq {{.*#+}} ymm1 = ymm18[1],ymm3[1],ymm18[3],ymm3[3]
-; AVX512F-ONLY-SLOW-NEXT:    vperm2i128 {{.*#+}} ymm0 = ymm1[2,3],ymm0[2,3]
-; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $0, %ymm0, %zmm30, %zmm24
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm12, %zmm6 {%k1}
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 448(%rcx), %ymm0
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 448(%rdx), %ymm1
-; AVX512F-ONLY-SLOW-NEXT:    vpunpcklqdq {{.*#+}} ymm2 = ymm1[0],ymm0[0],ymm1[2],ymm0[2]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 448(%rsi), %ymm3
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 448(%rdi), %ymm10
-; AVX512F-ONLY-SLOW-NEXT:    vpunpcklqdq {{.*#+}} ymm12 = ymm10[0],ymm3[0],ymm10[2],ymm3[2]
-; AVX512F-ONLY-SLOW-NEXT:    vperm2i128 {{.*#+}} ymm2 = ymm12[2,3],ymm2[2,3]
-; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $0, %ymm2, %zmm6, %zmm31
-; AVX512F-ONLY-SLOW-NEXT:    vpunpckhqdq {{.*#+}} ymm0 = ymm1[1],ymm0[1],ymm1[3],ymm0[3]
-; AVX512F-ONLY-SLOW-NEXT:    vpunpckhqdq {{.*#+}} ymm1 = ymm10[1],ymm3[1],ymm10[3],ymm3[3]
-; AVX512F-ONLY-SLOW-NEXT:    vperm2i128 {{.*#+}} ymm0 = ymm1[2,3],ymm0[2,3]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm16, %zmm19 {%k1}
-; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $0, %ymm0, %zmm19, %zmm0
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm1, %zmm4 {%k1}
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa (%rsi), %xmm1
-; AVX512F-ONLY-SLOW-NEXT:    vinserti128 $1, (%rcx), %ymm1, %ymm1
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa (%rdi), %xmm2
-; AVX512F-ONLY-SLOW-NEXT:    vinserti128 $1, (%rdx), %ymm2, %ymm2
-; AVX512F-ONLY-SLOW-NEXT:    vpunpcklqdq {{.*#+}} ymm3 = ymm2[0],ymm1[0],ymm2[2],ymm1[2]
-; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $0, %ymm3, %zmm4, %zmm3
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm10 # 64-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm4, %zmm10 {%k1}
-; AVX512F-ONLY-SLOW-NEXT:    vpunpckhqdq {{.*#+}} ymm1 = ymm2[1],ymm1[1],ymm2[3],ymm1[3]
-; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $0, %ymm1, %zmm10, %zmm2
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm11 # 64-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm1, %zmm11 {%k1}
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 64(%rsi), %xmm1
-; AVX512F-ONLY-SLOW-NEXT:    vinserti128 $1, 64(%rcx), %ymm1, %ymm1
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 64(%rdi), %xmm4
-; AVX512F-ONLY-SLOW-NEXT:    vinserti128 $1, 64(%rdx), %ymm4, %ymm4
-; AVX512F-ONLY-SLOW-NEXT:    vpunpcklqdq {{.*#+}} ymm10 = ymm4[0],ymm1[0],ymm4[2],ymm1[2]
-; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $0, %ymm10, %zmm11, %zmm10
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm11 # 64-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm12 # 64-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm11, %zmm12 {%k1}
-; AVX512F-ONLY-SLOW-NEXT:    vpunpckhqdq {{.*#+}} ymm1 = ymm4[1],ymm1[1],ymm4[3],ymm1[3]
-; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $0, %ymm1, %zmm12, %zmm11
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm13 # 64-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm1, %zmm13 {%k1}
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 128(%rsi), %xmm1
-; AVX512F-ONLY-SLOW-NEXT:    vinserti128 $1, 128(%rcx), %ymm1, %ymm1
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 128(%rdi), %xmm4
-; AVX512F-ONLY-SLOW-NEXT:    vinserti128 $1, 128(%rdx), %ymm4, %ymm12
-; AVX512F-ONLY-SLOW-NEXT:    vpunpcklqdq {{.*#+}} ymm4 = ymm12[0],ymm1[0],ymm12[2],ymm1[2]
-; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $0, %ymm4, %zmm13, %zmm4
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm13 # 64-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm18 # 64-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm13, %zmm18 {%k1}
-; AVX512F-ONLY-SLOW-NEXT:    vpunpckhqdq {{.*#+}} ymm1 = ymm12[1],ymm1[1],ymm12[3],ymm1[3]
-; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $0, %ymm1, %zmm18, %zmm19
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm18 # 64-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm1, %zmm18 {%k1}
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 192(%rsi), %xmm1
-; AVX512F-ONLY-SLOW-NEXT:    vinserti128 $1, 192(%rcx), %ymm1, %ymm1
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 192(%rdi), %xmm12
-; AVX512F-ONLY-SLOW-NEXT:    vinserti128 $1, 192(%rdx), %ymm12, %ymm12
-; AVX512F-ONLY-SLOW-NEXT:    vpunpcklqdq {{.*#+}} ymm13 = ymm12[0],ymm1[0],ymm12[2],ymm1[2]
-; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $0, %ymm13, %zmm18, %zmm30
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm13 # 64-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm18 # 64-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm13, %zmm18 {%k1}
-; AVX512F-ONLY-SLOW-NEXT:    vpunpckhqdq {{.*#+}} ymm1 = ymm12[1],ymm1[1],ymm12[3],ymm1[3]
-; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $0, %ymm1, %zmm18, %zmm1
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm12 # 64-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm12, %zmm5 {%k1}
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 256(%rsi), %xmm12
-; AVX512F-ONLY-SLOW-NEXT:    vinserti128 $1, 256(%rcx), %ymm12, %ymm13
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 256(%rdi), %xmm12
-; AVX512F-ONLY-SLOW-NEXT:    vinserti32x4 $1, 256(%rdx), %ymm12, %ymm18
-; AVX512F-ONLY-SLOW-NEXT:    vpunpcklqdq {{.*#+}} ymm12 = ymm18[0],ymm13[0],ymm18[2],ymm13[2]
-; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $0, %ymm12, %zmm5, %zmm12
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm23 # 64-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm23, %zmm5 {%k1}
-; AVX512F-ONLY-SLOW-NEXT:    vpunpckhqdq {{.*#+}} ymm13 = ymm18[1],ymm13[1],ymm18[3],ymm13[3]
-; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $0, %ymm13, %zmm5, %zmm23
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm5, %zmm29 {%k1}
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 320(%rsi), %xmm13
-; AVX512F-ONLY-SLOW-NEXT:    vinserti128 $1, 320(%rcx), %ymm13, %ymm13
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 320(%rdi), %xmm18
-; AVX512F-ONLY-SLOW-NEXT:    vinserti32x4 $1, 320(%rdx), %ymm18, %ymm18
-; AVX512F-ONLY-SLOW-NEXT:    vpunpcklqdq {{.*#+}} ymm27 = ymm18[0],ymm13[0],ymm18[2],ymm13[2]
-; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $0, %ymm27, %zmm29, %zmm22
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm6 # 64-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm5, %zmm6 {%k1}
-; AVX512F-ONLY-SLOW-NEXT:    vpunpckhqdq {{.*#+}} ymm13 = ymm18[1],ymm13[1],ymm18[3],ymm13[3]
-; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $0, %ymm13, %zmm6, %zmm13
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm5, %zmm21 {%k1}
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 384(%rsi), %xmm18
-; AVX512F-ONLY-SLOW-NEXT:    vinserti32x4 $1, 384(%rcx), %ymm18, %ymm18
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 384(%rdi), %xmm25
-; AVX512F-ONLY-SLOW-NEXT:    vinserti32x4 $1, 384(%rdx), %ymm25, %ymm25
-; AVX512F-ONLY-SLOW-NEXT:    vpunpcklqdq {{.*#+}} ymm27 = ymm25[0],ymm18[0],ymm25[2],ymm18[2]
-; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $0, %ymm27, %zmm21, %zmm16
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm5, %zmm28 {%k1}
-; AVX512F-ONLY-SLOW-NEXT:    vpunpckhqdq {{.*#+}} ymm18 = ymm25[1],ymm18[1],ymm25[3],ymm18[3]
-; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $0, %ymm18, %zmm28, %zmm21
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm5, %zmm15 {%k1}
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 448(%rsi), %xmm18
-; AVX512F-ONLY-SLOW-NEXT:    vinserti32x4 $1, 448(%rcx), %ymm18, %ymm18
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 448(%rdi), %xmm25
-; AVX512F-ONLY-SLOW-NEXT:    vinserti32x4 $1, 448(%rdx), %ymm25, %ymm25
-; AVX512F-ONLY-SLOW-NEXT:    vpunpcklqdq {{.*#+}} ymm27 = ymm25[0],ymm18[0],ymm25[2],ymm18[2]
-; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $0, %ymm27, %zmm15, %zmm6
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm5, %zmm14 {%k1}
-; AVX512F-ONLY-SLOW-NEXT:    vpunpckhqdq {{.*#+}} ymm18 = ymm25[1],ymm18[1],ymm25[3],ymm18[3]
-; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $0, %ymm18, %zmm14, %zmm5
-; AVX512F-ONLY-SLOW-NEXT:    movq {{[0-9]+}}(%rsp), %rax
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm0, 3776(%rax)
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm31, 3712(%rax)
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm24, 3264(%rax)
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm20, 3200(%rax)
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm17, 2752(%rax)
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm8, 2688(%rax)
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm7, 2240(%rax)
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm26, 2176(%rax)
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm9, 1728(%rax)
-; AVX512F-ONLY-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vmovaps %zmm0, 1664(%rax)
-; AVX512F-ONLY-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vmovaps %zmm0, 1216(%rax)
-; AVX512F-ONLY-SLOW-NEXT:    vmovups (%rsp), %zmm0 # 64-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vmovaps %zmm0, 1152(%rax)
-; AVX512F-ONLY-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vmovaps %zmm0, 704(%rax)
-; AVX512F-ONLY-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vmovaps %zmm0, 640(%rax)
-; AVX512F-ONLY-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vmovaps %zmm0, 192(%rax)
-; AVX512F-ONLY-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vmovaps %zmm0, 128(%rax)
-; AVX512F-ONLY-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vmovaps %zmm0, 4032(%rax)
-; AVX512F-ONLY-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vmovaps %zmm0, 3968(%rax)
-; AVX512F-ONLY-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vmovaps %zmm0, 3904(%rax)
-; AVX512F-ONLY-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vmovaps %zmm0, 3840(%rax)
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm5, 3648(%rax)
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm6, 3584(%rax)
-; AVX512F-ONLY-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vmovaps %zmm0, 3520(%rax)
-; AVX512F-ONLY-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vmovaps %zmm0, 3456(%rax)
-; AVX512F-ONLY-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vmovaps %zmm0, 3392(%rax)
-; AVX512F-ONLY-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vmovaps %zmm0, 3328(%rax)
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm21, 3136(%rax)
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm16, 3072(%rax)
-; AVX512F-ONLY-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vmovaps %zmm0, 3008(%rax)
-; AVX512F-ONLY-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vmovaps %zmm0, 2944(%rax)
-; AVX512F-ONLY-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vmovaps %zmm0, 2880(%rax)
-; AVX512F-ONLY-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vmovaps %zmm0, 2816(%rax)
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm13, 2624(%rax)
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm22, 2560(%rax)
-; AVX512F-ONLY-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vmovaps %zmm0, 2496(%rax)
-; AVX512F-ONLY-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vmovaps %zmm0, 2432(%rax)
-; AVX512F-ONLY-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vmovaps %zmm0, 2368(%rax)
-; AVX512F-ONLY-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vmovaps %zmm0, 2304(%rax)
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm23, 2112(%rax)
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm12, 2048(%rax)
-; AVX512F-ONLY-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vmovaps %zmm0, 1984(%rax)
-; AVX512F-ONLY-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vmovaps %zmm0, 1920(%rax)
-; AVX512F-ONLY-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vmovaps %zmm0, 1856(%rax)
-; AVX512F-ONLY-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vmovaps %zmm0, 1792(%rax)
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm1, 1600(%rax)
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm30, 1536(%rax)
-; AVX512F-ONLY-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vmovaps %zmm0, 1472(%rax)
-; AVX512F-ONLY-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vmovaps %zmm0, 1408(%rax)
-; AVX512F-ONLY-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vmovaps %zmm0, 1344(%rax)
-; AVX512F-ONLY-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vmovaps %zmm0, 1280(%rax)
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm19, 1088(%rax)
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm4, 1024(%rax)
-; AVX512F-ONLY-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vmovaps %zmm0, 960(%rax)
-; AVX512F-ONLY-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vmovaps %zmm0, 896(%rax)
-; AVX512F-ONLY-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vmovaps %zmm0, 832(%rax)
-; AVX512F-ONLY-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vmovaps %zmm0, 768(%rax)
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm11, 576(%rax)
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm10, 512(%rax)
-; AVX512F-ONLY-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vmovaps %zmm0, 448(%rax)
-; AVX512F-ONLY-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vmovaps %zmm0, 384(%rax)
-; AVX512F-ONLY-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vmovaps %zmm0, 320(%rax)
-; AVX512F-ONLY-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vmovaps %zmm0, 256(%rax)
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm2, 64(%rax)
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm3, (%rax)
-; AVX512F-ONLY-SLOW-NEXT:    addq $5512, %rsp # imm = 0x1588
-; AVX512F-ONLY-SLOW-NEXT:    vzeroupper
-; AVX512F-ONLY-SLOW-NEXT:    retq
+; AVX512F-ONLY-LABEL: store_i64_stride8_vf64:
+; AVX512F-ONLY:       # %bb.0:
+; AVX512F-ONLY-NEXT:    subq $5512, %rsp # imm = 0x1588
+; AVX512F-ONLY-NEXT:    movq {{[0-9]+}}(%rsp), %rax
+; AVX512F-ONLY-NEXT:    movq {{[0-9]+}}(%rsp), %r10
+; AVX512F-ONLY-NEXT:    vmovdqa64 128(%rdi), %zmm2
+; AVX512F-ONLY-NEXT:    vmovdqa64 64(%rdi), %zmm4
+; AVX512F-ONLY-NEXT:    vmovdqa64 (%rdi), %zmm8
+; AVX512F-ONLY-NEXT:    vmovdqa64 128(%rsi), %zmm17
+; AVX512F-ONLY-NEXT:    vmovdqa64 64(%rsi), %zmm19
+; AVX512F-ONLY-NEXT:    vmovdqa64 (%rsi), %zmm22
+; AVX512F-ONLY-NEXT:    vmovdqa64 64(%rdx), %zmm5
+; AVX512F-ONLY-NEXT:    vmovdqa64 (%rdx), %zmm10
+; AVX512F-ONLY-NEXT:    vmovdqa64 64(%rcx), %zmm20
+; AVX512F-ONLY-NEXT:    vmovdqa64 (%rcx), %zmm11
+; AVX512F-ONLY-NEXT:    vmovdqa64 (%r8), %zmm1
+; AVX512F-ONLY-NEXT:    vmovdqa64 64(%r8), %zmm25
+; AVX512F-ONLY-NEXT:    vmovdqa64 128(%r8), %zmm23
+; AVX512F-ONLY-NEXT:    vmovdqa64 (%r9), %zmm28
+; AVX512F-ONLY-NEXT:    vmovdqa64 64(%r9), %zmm26
+; AVX512F-ONLY-NEXT:    vmovdqa64 128(%r9), %zmm24
+; AVX512F-ONLY-NEXT:    vmovdqa64 (%r10), %zmm21
+; AVX512F-ONLY-NEXT:    vmovdqa64 64(%r10), %zmm14
+; AVX512F-ONLY-NEXT:    vmovdqa64 (%rax), %zmm27
+; AVX512F-ONLY-NEXT:    vmovdqa64 64(%rax), %zmm16
+; AVX512F-ONLY-NEXT:    movb $-64, %r11b
+; AVX512F-ONLY-NEXT:    kmovw %r11d, %k1
+; AVX512F-ONLY-NEXT:    vbroadcasti32x4 {{.*#+}} zmm3 = [4,12,4,12,4,12,4,12]
+; AVX512F-ONLY-NEXT:    # zmm3 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm21, %zmm0
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm27, %zmm3, %zmm0
+; AVX512F-ONLY-NEXT:    vpunpcklqdq {{.*#+}} zmm12 = zmm1[0],zmm28[0],zmm1[2],zmm28[2],zmm1[4],zmm28[4],zmm1[6],zmm28[6]
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm0, %zmm12 {%k1}
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm8, %zmm0
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm22, %zmm3, %zmm0
+; AVX512F-ONLY-NEXT:    vbroadcasti128 {{.*#+}} ymm6 = [4,12,4,12]
+; AVX512F-ONLY-NEXT:    # ymm6 = mem[0,1,0,1]
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm10, %zmm15
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm11, %zmm6, %zmm15
+; AVX512F-ONLY-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm15[4,5,6,7]
+; AVX512F-ONLY-NEXT:    vinserti64x4 $0, %ymm0, %zmm12, %zmm0
+; AVX512F-ONLY-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-NEXT:    vbroadcasti32x4 {{.*#+}} zmm9 = [5,13,5,13,5,13,5,13]
+; AVX512F-ONLY-NEXT:    # zmm9 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm21, %zmm0
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm27, %zmm9, %zmm0
+; AVX512F-ONLY-NEXT:    vpunpckhqdq {{.*#+}} zmm12 = zmm1[1],zmm28[1],zmm1[3],zmm28[3],zmm1[5],zmm28[5],zmm1[7],zmm28[7]
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm0, %zmm12 {%k1}
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm8, %zmm0
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm22, %zmm9, %zmm0
+; AVX512F-ONLY-NEXT:    vbroadcasti128 {{.*#+}} ymm7 = [5,13,5,13]
+; AVX512F-ONLY-NEXT:    # ymm7 = mem[0,1,0,1]
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm10, %zmm15
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm11, %zmm7, %zmm15
+; AVX512F-ONLY-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm15[4,5,6,7]
+; AVX512F-ONLY-NEXT:    vinserti64x4 $0, %ymm0, %zmm12, %zmm0
+; AVX512F-ONLY-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-NEXT:    vbroadcasti32x4 {{.*#+}} zmm13 = [6,14,6,14,6,14,6,14]
+; AVX512F-ONLY-NEXT:    # zmm13 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm1, %zmm0
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm1, %zmm29
+; AVX512F-ONLY-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm28, %zmm13, %zmm0
+; AVX512F-ONLY-NEXT:    vpunpcklqdq {{.*#+}} zmm0 {%k1} = zmm21[0],zmm27[0],zmm21[2],zmm27[2],zmm21[4],zmm27[4],zmm21[6],zmm27[6]
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm8, %zmm12
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm22, %zmm13, %zmm12
+; AVX512F-ONLY-NEXT:    vbroadcasti128 {{.*#+}} ymm1 = [6,14,6,14]
+; AVX512F-ONLY-NEXT:    # ymm1 = mem[0,1,0,1]
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm10, %zmm15
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm11, %zmm1, %zmm15
+; AVX512F-ONLY-NEXT:    vpblendd {{.*#+}} ymm12 = ymm12[0,1,2,3],ymm15[4,5,6,7]
+; AVX512F-ONLY-NEXT:    vinserti64x4 $0, %ymm12, %zmm0, %zmm0
+; AVX512F-ONLY-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-NEXT:    vbroadcasti32x4 {{.*#+}} zmm18 = [7,15,7,15,7,15,7,15]
+; AVX512F-ONLY-NEXT:    # zmm18 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm29, %zmm12
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm28, %zmm18, %zmm12
+; AVX512F-ONLY-NEXT:    vpunpckhqdq {{.*#+}} zmm12 {%k1} = zmm21[1],zmm27[1],zmm21[3],zmm27[3],zmm21[5],zmm27[5],zmm21[7],zmm27[7]
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm22, %zmm18, %zmm8
+; AVX512F-ONLY-NEXT:    vbroadcasti32x4 {{.*#+}} ymm30 = [7,15,7,15]
+; AVX512F-ONLY-NEXT:    # ymm30 = mem[0,1,2,3,0,1,2,3]
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm11, %zmm30, %zmm10
+; AVX512F-ONLY-NEXT:    vpblendd {{.*#+}} ymm8 = ymm8[0,1,2,3],ymm10[4,5,6,7]
+; AVX512F-ONLY-NEXT:    vinserti64x4 $0, %ymm8, %zmm12, %zmm0
+; AVX512F-ONLY-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm14, %zmm8
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm16, %zmm3, %zmm8
+; AVX512F-ONLY-NEXT:    vpunpcklqdq {{.*#+}} zmm10 = zmm25[0],zmm26[0],zmm25[2],zmm26[2],zmm25[4],zmm26[4],zmm25[6],zmm26[6]
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm8, %zmm10 {%k1}
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm4, %zmm8
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm19, %zmm3, %zmm8
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm5, %zmm11
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm6, %zmm15
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm20, %zmm6, %zmm11
+; AVX512F-ONLY-NEXT:    vpblendd {{.*#+}} ymm8 = ymm8[0,1,2,3],ymm11[4,5,6,7]
+; AVX512F-ONLY-NEXT:    vinserti64x4 $0, %ymm8, %zmm10, %zmm0
+; AVX512F-ONLY-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm14, %zmm8
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm16, %zmm9, %zmm8
+; AVX512F-ONLY-NEXT:    vpunpckhqdq {{.*#+}} zmm10 = zmm25[1],zmm26[1],zmm25[3],zmm26[3],zmm25[5],zmm26[5],zmm25[7],zmm26[7]
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm8, %zmm10 {%k1}
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm4, %zmm8
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm19, %zmm9, %zmm8
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm5, %zmm11
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm7, %zmm12
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm20, %zmm7, %zmm11
+; AVX512F-ONLY-NEXT:    vpblendd {{.*#+}} ymm8 = ymm8[0,1,2,3],ymm11[4,5,6,7]
+; AVX512F-ONLY-NEXT:    vinserti64x4 $0, %ymm8, %zmm10, %zmm0
+; AVX512F-ONLY-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm4, %zmm8
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm19, %zmm13, %zmm8
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm5, %zmm10
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm20, %zmm1, %zmm10
+; AVX512F-ONLY-NEXT:    vpblendd {{.*#+}} ymm8 = ymm8[0,1,2,3],ymm10[4,5,6,7]
+; AVX512F-ONLY-NEXT:    vmovdqu64 %zmm25, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm25, %zmm10
+; AVX512F-ONLY-NEXT:    vmovdqu64 %zmm26, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm26, %zmm13, %zmm10
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm14, %zmm11
+; AVX512F-ONLY-NEXT:    vmovdqu64 %zmm14, (%rsp) # 64-byte Spill
+; AVX512F-ONLY-NEXT:    vmovdqu64 %zmm16, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-NEXT:    vpunpcklqdq {{.*#+}} zmm10 {%k1} = zmm14[0],zmm16[0],zmm14[2],zmm16[2],zmm14[4],zmm16[4],zmm14[6],zmm16[6]
+; AVX512F-ONLY-NEXT:    vinserti64x4 $0, %ymm8, %zmm10, %zmm0
+; AVX512F-ONLY-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-NEXT:    vmovdqa64 128(%r10), %zmm10
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm19, %zmm18, %zmm4
+; AVX512F-ONLY-NEXT:    vmovdqa64 128(%rax), %zmm14
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm20, %zmm30, %zmm5
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm25, %zmm6
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm26, %zmm18, %zmm6
+; AVX512F-ONLY-NEXT:    vpunpckhqdq {{.*#+}} zmm6 {%k1} = zmm11[1],zmm16[1],zmm11[3],zmm16[3],zmm11[5],zmm16[5],zmm11[7],zmm16[7]
+; AVX512F-ONLY-NEXT:    vpblendd {{.*#+}} ymm4 = ymm4[0,1,2,3],ymm5[4,5,6,7]
+; AVX512F-ONLY-NEXT:    vinserti64x4 $0, %ymm4, %zmm6, %zmm0
+; AVX512F-ONLY-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm10, %zmm4
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm14, %zmm3, %zmm4
+; AVX512F-ONLY-NEXT:    vpunpcklqdq {{.*#+}} zmm5 = zmm23[0],zmm24[0],zmm23[2],zmm24[2],zmm23[4],zmm24[4],zmm23[6],zmm24[6]
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm4, %zmm5 {%k1}
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm2, %zmm6
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm17, %zmm3, %zmm6
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm3, %zmm11
+; AVX512F-ONLY-NEXT:    vmovdqa64 128(%rdx), %zmm4
+; AVX512F-ONLY-NEXT:    vmovdqa64 128(%rcx), %zmm7
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm4, %zmm8
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm7, %zmm15, %zmm8
+; AVX512F-ONLY-NEXT:    vpblendd {{.*#+}} ymm6 = ymm6[0,1,2,3],ymm8[4,5,6,7]
+; AVX512F-ONLY-NEXT:    vinserti64x4 $0, %ymm6, %zmm5, %zmm0
+; AVX512F-ONLY-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm10, %zmm5
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm14, %zmm9, %zmm5
+; AVX512F-ONLY-NEXT:    vpunpckhqdq {{.*#+}} zmm6 = zmm23[1],zmm24[1],zmm23[3],zmm24[3],zmm23[5],zmm24[5],zmm23[7],zmm24[7]
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm5, %zmm6 {%k1}
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm2, %zmm5
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm17, %zmm9, %zmm5
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm4, %zmm8
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm7, %zmm12, %zmm8
+; AVX512F-ONLY-NEXT:    vpblendd {{.*#+}} ymm5 = ymm5[0,1,2,3],ymm8[4,5,6,7]
+; AVX512F-ONLY-NEXT:    vinserti64x4 $0, %ymm5, %zmm6, %zmm0
+; AVX512F-ONLY-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm2, %zmm5
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm17, %zmm13, %zmm5
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm4, %zmm6
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm7, %zmm1, %zmm6
+; AVX512F-ONLY-NEXT:    vpblendd {{.*#+}} ymm5 = ymm5[0,1,2,3],ymm6[4,5,6,7]
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm23, %zmm3
+; AVX512F-ONLY-NEXT:    vmovdqu64 %zmm23, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm23, %zmm6
+; AVX512F-ONLY-NEXT:    vmovdqu64 %zmm24, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm24, %zmm13, %zmm6
+; AVX512F-ONLY-NEXT:    vmovdqu64 %zmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-NEXT:    vmovdqu64 %zmm14, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-NEXT:    vpunpcklqdq {{.*#+}} zmm6 {%k1} = zmm10[0],zmm14[0],zmm10[2],zmm14[2],zmm10[4],zmm14[4],zmm10[6],zmm14[6]
+; AVX512F-ONLY-NEXT:    vinserti64x4 $0, %ymm5, %zmm6, %zmm0
+; AVX512F-ONLY-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm17, %zmm18, %zmm2
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm7, %zmm30, %zmm4
+; AVX512F-ONLY-NEXT:    vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm4[4,5,6,7]
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm24, %zmm18, %zmm3
+; AVX512F-ONLY-NEXT:    vpunpckhqdq {{.*#+}} zmm3 {%k1} = zmm10[1],zmm14[1],zmm10[3],zmm14[3],zmm10[5],zmm14[5],zmm10[7],zmm14[7]
+; AVX512F-ONLY-NEXT:    vinserti64x4 $0, %ymm2, %zmm3, %zmm0
+; AVX512F-ONLY-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-NEXT:    vmovdqa64 192(%r10), %zmm10
+; AVX512F-ONLY-NEXT:    vmovdqa64 192(%rax), %zmm26
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm10, %zmm2
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm26, %zmm11, %zmm2
+; AVX512F-ONLY-NEXT:    vmovdqa64 192(%r8), %zmm23
+; AVX512F-ONLY-NEXT:    vmovdqa64 192(%r9), %zmm29
+; AVX512F-ONLY-NEXT:    vpunpcklqdq {{.*#+}} zmm4 = zmm23[0],zmm29[0],zmm23[2],zmm29[2],zmm23[4],zmm29[4],zmm23[6],zmm29[6]
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm2, %zmm4 {%k1}
+; AVX512F-ONLY-NEXT:    vmovdqa64 192(%rdi), %zmm2
+; AVX512F-ONLY-NEXT:    vmovdqa64 192(%rsi), %zmm5
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm2, %zmm6
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm5, %zmm11, %zmm6
+; AVX512F-ONLY-NEXT:    vmovdqa64 192(%rdx), %zmm3
+; AVX512F-ONLY-NEXT:    vmovdqa64 192(%rcx), %zmm7
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm3, %zmm8
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm7, %zmm15, %zmm8
+; AVX512F-ONLY-NEXT:    vpblendd {{.*#+}} ymm6 = ymm6[0,1,2,3],ymm8[4,5,6,7]
+; AVX512F-ONLY-NEXT:    vinserti64x4 $0, %ymm6, %zmm4, %zmm0
+; AVX512F-ONLY-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm10, %zmm4
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm26, %zmm9, %zmm4
+; AVX512F-ONLY-NEXT:    vpunpckhqdq {{.*#+}} zmm6 = zmm23[1],zmm29[1],zmm23[3],zmm29[3],zmm23[5],zmm29[5],zmm23[7],zmm29[7]
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm4, %zmm6 {%k1}
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm2, %zmm4
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm5, %zmm9, %zmm4
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm3, %zmm8
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm7, %zmm12, %zmm8
+; AVX512F-ONLY-NEXT:    vpblendd {{.*#+}} ymm4 = ymm4[0,1,2,3],ymm8[4,5,6,7]
+; AVX512F-ONLY-NEXT:    vinserti64x4 $0, %ymm4, %zmm6, %zmm0
+; AVX512F-ONLY-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm2, %zmm4
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm5, %zmm13, %zmm4
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm3, %zmm6
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm7, %zmm1, %zmm6
+; AVX512F-ONLY-NEXT:    vpblendd {{.*#+}} ymm4 = ymm4[0,1,2,3],ymm6[4,5,6,7]
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm23, %zmm6
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm29, %zmm13, %zmm6
+; AVX512F-ONLY-NEXT:    vmovdqu64 %zmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-NEXT:    vpunpcklqdq {{.*#+}} zmm6 {%k1} = zmm10[0],zmm26[0],zmm10[2],zmm26[2],zmm10[4],zmm26[4],zmm10[6],zmm26[6]
+; AVX512F-ONLY-NEXT:    vinserti64x4 $0, %ymm4, %zmm6, %zmm0
+; AVX512F-ONLY-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm5, %zmm18, %zmm2
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm7, %zmm30, %zmm3
+; AVX512F-ONLY-NEXT:    vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm3[4,5,6,7]
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm23, %zmm3
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm29, %zmm18, %zmm3
+; AVX512F-ONLY-NEXT:    vpunpckhqdq {{.*#+}} zmm3 {%k1} = zmm10[1],zmm26[1],zmm10[3],zmm26[3],zmm10[5],zmm26[5],zmm10[7],zmm26[7]
+; AVX512F-ONLY-NEXT:    vinserti64x4 $0, %ymm2, %zmm3, %zmm0
+; AVX512F-ONLY-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-NEXT:    vmovdqa64 256(%r10), %zmm10
+; AVX512F-ONLY-NEXT:    vmovdqa64 256(%rax), %zmm22
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm10, %zmm2
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm22, %zmm11, %zmm2
+; AVX512F-ONLY-NEXT:    vmovdqa64 256(%r8), %zmm0
+; AVX512F-ONLY-NEXT:    vmovdqa64 256(%r9), %zmm24
+; AVX512F-ONLY-NEXT:    vpunpcklqdq {{.*#+}} zmm4 = zmm0[0],zmm24[0],zmm0[2],zmm24[2],zmm0[4],zmm24[4],zmm0[6],zmm24[6]
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm0, %zmm17
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm2, %zmm4 {%k1}
+; AVX512F-ONLY-NEXT:    vmovdqa64 256(%rdi), %zmm2
+; AVX512F-ONLY-NEXT:    vmovdqa64 256(%rsi), %zmm5
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm2, %zmm6
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm5, %zmm11, %zmm6
+; AVX512F-ONLY-NEXT:    vmovdqa64 256(%rdx), %zmm3
+; AVX512F-ONLY-NEXT:    vmovdqa64 256(%rcx), %zmm7
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm3, %zmm8
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm7, %zmm15, %zmm8
+; AVX512F-ONLY-NEXT:    vpblendd {{.*#+}} ymm6 = ymm6[0,1,2,3],ymm8[4,5,6,7]
+; AVX512F-ONLY-NEXT:    vinserti64x4 $0, %ymm6, %zmm4, %zmm0
+; AVX512F-ONLY-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm10, %zmm4
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm22, %zmm9, %zmm4
+; AVX512F-ONLY-NEXT:    vpunpckhqdq {{.*#+}} zmm6 = zmm17[1],zmm24[1],zmm17[3],zmm24[3],zmm17[5],zmm24[5],zmm17[7],zmm24[7]
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm4, %zmm6 {%k1}
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm2, %zmm4
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm5, %zmm9, %zmm4
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm3, %zmm8
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm7, %zmm12, %zmm8
+; AVX512F-ONLY-NEXT:    vpblendd {{.*#+}} ymm4 = ymm4[0,1,2,3],ymm8[4,5,6,7]
+; AVX512F-ONLY-NEXT:    vinserti64x4 $0, %ymm4, %zmm6, %zmm0
+; AVX512F-ONLY-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm2, %zmm4
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm5, %zmm13, %zmm4
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm3, %zmm6
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm7, %zmm1, %zmm6
+; AVX512F-ONLY-NEXT:    vpblendd {{.*#+}} ymm4 = ymm4[0,1,2,3],ymm6[4,5,6,7]
+; AVX512F-ONLY-NEXT:    vmovdqu64 %zmm17, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm17, %zmm6
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm24, %zmm13, %zmm6
+; AVX512F-ONLY-NEXT:    vmovdqu64 %zmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-NEXT:    vpunpcklqdq {{.*#+}} zmm6 {%k1} = zmm10[0],zmm22[0],zmm10[2],zmm22[2],zmm10[4],zmm22[4],zmm10[6],zmm22[6]
+; AVX512F-ONLY-NEXT:    vinserti64x4 $0, %ymm4, %zmm6, %zmm0
+; AVX512F-ONLY-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm5, %zmm18, %zmm2
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm7, %zmm30, %zmm3
+; AVX512F-ONLY-NEXT:    vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm3[4,5,6,7]
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm17, %zmm3
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm24, %zmm18, %zmm3
+; AVX512F-ONLY-NEXT:    vpunpckhqdq {{.*#+}} zmm3 {%k1} = zmm10[1],zmm22[1],zmm10[3],zmm22[3],zmm10[5],zmm22[5],zmm10[7],zmm22[7]
+; AVX512F-ONLY-NEXT:    vinserti64x4 $0, %ymm2, %zmm3, %zmm0
+; AVX512F-ONLY-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-NEXT:    vmovdqa64 320(%rdi), %zmm2
+; AVX512F-ONLY-NEXT:    vmovdqa64 320(%rsi), %zmm3
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm2, %zmm5
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm3, %zmm11, %zmm5
+; AVX512F-ONLY-NEXT:    vmovdqa64 320(%rdx), %zmm14
+; AVX512F-ONLY-NEXT:    vmovdqa64 320(%rcx), %zmm6
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm14, %zmm7
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm6, %zmm15, %zmm7
+; AVX512F-ONLY-NEXT:    vpblendd {{.*#+}} ymm5 = ymm5[0,1,2,3],ymm7[4,5,6,7]
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm14, %zmm7
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm6, %zmm12, %zmm7
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm14, %zmm8
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm6, %zmm1, %zmm8
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm6, %zmm30, %zmm14
+; AVX512F-ONLY-NEXT:    vmovdqa64 384(%rdx), %zmm0
+; AVX512F-ONLY-NEXT:    vmovdqa64 384(%rcx), %zmm6
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm0, %zmm10
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm6, %zmm15, %zmm10
+; AVX512F-ONLY-NEXT:    vmovdqu64 %zmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm0, %zmm10
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm6, %zmm12, %zmm10
+; AVX512F-ONLY-NEXT:    vmovdqu64 %zmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm0, %zmm10
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm6, %zmm1, %zmm10
+; AVX512F-ONLY-NEXT:    vmovdqu64 %zmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm6, %zmm30, %zmm0
+; AVX512F-ONLY-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-NEXT:    vmovdqa64 448(%rdx), %zmm0
+; AVX512F-ONLY-NEXT:    vmovdqa64 448(%rcx), %zmm6
+; AVX512F-ONLY-NEXT:    vpermi2q %zmm6, %zmm0, %zmm15
+; AVX512F-ONLY-NEXT:    vmovdqu64 %zmm15, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-NEXT:    vpermi2q %zmm6, %zmm0, %zmm12
+; AVX512F-ONLY-NEXT:    vmovdqu64 %zmm12, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-NEXT:    vpermi2q %zmm6, %zmm0, %zmm1
+; AVX512F-ONLY-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm6, %zmm30, %zmm0
+; AVX512F-ONLY-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-NEXT:    vmovdqa64 320(%r10), %zmm31
+; AVX512F-ONLY-NEXT:    vmovdqa64 320(%rax), %zmm12
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm31, %zmm1
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm12, %zmm11, %zmm1
+; AVX512F-ONLY-NEXT:    vmovdqa64 320(%r8), %zmm17
+; AVX512F-ONLY-NEXT:    vmovdqa64 320(%r9), %zmm4
+; AVX512F-ONLY-NEXT:    vpunpcklqdq {{.*#+}} zmm10 = zmm17[0],zmm4[0],zmm17[2],zmm4[2],zmm17[4],zmm4[4],zmm17[6],zmm4[6]
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm1, %zmm10 {%k1}
+; AVX512F-ONLY-NEXT:    vinserti64x4 $0, %ymm5, %zmm10, %zmm0
+; AVX512F-ONLY-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm31, %zmm1
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm12, %zmm9, %zmm1
+; AVX512F-ONLY-NEXT:    vpunpckhqdq {{.*#+}} zmm5 = zmm17[1],zmm4[1],zmm17[3],zmm4[3],zmm17[5],zmm4[5],zmm17[7],zmm4[7]
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm1, %zmm5 {%k1}
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm2, %zmm1
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm3, %zmm9, %zmm1
+; AVX512F-ONLY-NEXT:    vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm7[4,5,6,7]
+; AVX512F-ONLY-NEXT:    vinserti64x4 $0, %ymm1, %zmm5, %zmm0
+; AVX512F-ONLY-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm2, %zmm1
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm3, %zmm13, %zmm1
+; AVX512F-ONLY-NEXT:    vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm8[4,5,6,7]
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm17, %zmm5
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm4, %zmm13, %zmm5
+; AVX512F-ONLY-NEXT:    vpunpcklqdq {{.*#+}} zmm5 {%k1} = zmm31[0],zmm12[0],zmm31[2],zmm12[2],zmm31[4],zmm12[4],zmm31[6],zmm12[6]
+; AVX512F-ONLY-NEXT:    vinserti64x4 $0, %ymm1, %zmm5, %zmm0
+; AVX512F-ONLY-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm3, %zmm18, %zmm2
+; AVX512F-ONLY-NEXT:    vpblendd {{.*#+}} ymm1 = ymm2[0,1,2,3],ymm14[4,5,6,7]
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm17, %zmm2
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm4, %zmm18, %zmm2
+; AVX512F-ONLY-NEXT:    vpunpckhqdq {{.*#+}} zmm2 {%k1} = zmm31[1],zmm12[1],zmm31[3],zmm12[3],zmm31[5],zmm12[5],zmm31[7],zmm12[7]
+; AVX512F-ONLY-NEXT:    vinserti64x4 $0, %ymm1, %zmm2, %zmm0
+; AVX512F-ONLY-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-NEXT:    vmovdqa64 384(%rdi), %zmm0
+; AVX512F-ONLY-NEXT:    vmovdqa64 384(%rsi), %zmm1
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm0, %zmm3
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm11, %zmm2
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm1, %zmm11, %zmm3
+; AVX512F-ONLY-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm0, %zmm3
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm1, %zmm9, %zmm3
+; AVX512F-ONLY-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm0, %zmm3
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm1, %zmm13, %zmm3
+; AVX512F-ONLY-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm1, %zmm18, %zmm0
+; AVX512F-ONLY-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-NEXT:    vmovdqa64 384(%r10), %zmm11
+; AVX512F-ONLY-NEXT:    vmovdqa64 384(%rax), %zmm20
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm11, %zmm0
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm20, %zmm2, %zmm0
+; AVX512F-ONLY-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm11, %zmm0
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm20, %zmm9, %zmm0
+; AVX512F-ONLY-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-NEXT:    vmovdqa64 384(%r8), %zmm30
+; AVX512F-ONLY-NEXT:    vmovdqa64 384(%r9), %zmm0
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm30, %zmm1
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm0, %zmm13, %zmm1
+; AVX512F-ONLY-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-NEXT:    vmovdqa64 448(%r10), %zmm16
+; AVX512F-ONLY-NEXT:    vmovdqa64 448(%rax), %zmm8
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm16, %zmm1
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm8, %zmm2, %zmm1
+; AVX512F-ONLY-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-NEXT:    vmovdqa64 448(%rdi), %zmm3
+; AVX512F-ONLY-NEXT:    vmovdqa64 448(%rsi), %zmm1
+; AVX512F-ONLY-NEXT:    vpermi2q %zmm1, %zmm3, %zmm2
+; AVX512F-ONLY-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm16, %zmm2
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm8, %zmm9, %zmm2
+; AVX512F-ONLY-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-NEXT:    vpermi2q %zmm1, %zmm3, %zmm9
+; AVX512F-ONLY-NEXT:    vmovdqu64 %zmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-NEXT:    vmovdqa64 448(%r8), %zmm19
+; AVX512F-ONLY-NEXT:    vmovdqa64 448(%r9), %zmm7
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm19, %zmm2
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm7, %zmm13, %zmm2
+; AVX512F-ONLY-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-NEXT:    vpermi2q %zmm1, %zmm3, %zmm13
+; AVX512F-ONLY-NEXT:    vmovdqu64 %zmm13, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm1, %zmm18, %zmm3
+; AVX512F-ONLY-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm30, %zmm1
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm0, %zmm18, %zmm1
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm1, %zmm5
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm19, %zmm1
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm7, %zmm18, %zmm1
+; AVX512F-ONLY-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-NEXT:    vbroadcasti32x4 {{.*#+}} zmm15 = [0,8,0,8,0,8,0,8]
+; AVX512F-ONLY-NEXT:    # zmm15 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm21, %zmm1
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm27, %zmm15, %zmm1
+; AVX512F-ONLY-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-NEXT:    vbroadcasti32x4 {{.*#+}} zmm14 = [1,9,1,9,1,9,1,9]
+; AVX512F-ONLY-NEXT:    # zmm14 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm21, %zmm1
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm27, %zmm14, %zmm1
+; AVX512F-ONLY-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-NEXT:    vbroadcasti32x4 {{.*#+}} zmm6 = [2,10,2,10,2,10,2,10]
+; AVX512F-ONLY-NEXT:    # zmm6 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm21, %zmm1
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm27, %zmm6, %zmm1
+; AVX512F-ONLY-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-NEXT:    vbroadcasti32x4 {{.*#+}} zmm1 = [3,11,3,11,3,11,3,11]
+; AVX512F-ONLY-NEXT:    # zmm1 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm27, %zmm1, %zmm21
+; AVX512F-ONLY-NEXT:    vmovdqu64 %zmm21, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm2, %zmm3
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm28, %zmm15, %zmm3
+; AVX512F-ONLY-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm2, %zmm3
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm28, %zmm14, %zmm3
+; AVX512F-ONLY-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm2, %zmm25
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm28, %zmm6, %zmm25
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm28, %zmm1, %zmm2
+; AVX512F-ONLY-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-NEXT:    vmovdqu64 (%rsp), %zmm27 # 64-byte Reload
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm27, %zmm2
+; AVX512F-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm3, %zmm15, %zmm2
+; AVX512F-ONLY-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm27, %zmm2
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm3, %zmm14, %zmm2
+; AVX512F-ONLY-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm27, %zmm2
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm3, %zmm6, %zmm2
+; AVX512F-ONLY-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm3, %zmm1, %zmm27
+; AVX512F-ONLY-NEXT:    vmovdqu64 %zmm27, (%rsp) # 64-byte Spill
+; AVX512F-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm9 # 64-byte Reload
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm9, %zmm3
+; AVX512F-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm2, %zmm15, %zmm3
+; AVX512F-ONLY-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm9, %zmm3
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm2, %zmm14, %zmm3
+; AVX512F-ONLY-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm9, %zmm18
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm2, %zmm6, %zmm18
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm2, %zmm1, %zmm9
+; AVX512F-ONLY-NEXT:    vmovdqu64 %zmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm2, %zmm3
+; AVX512F-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm9 # 64-byte Reload
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm9, %zmm15, %zmm3
+; AVX512F-ONLY-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm2, %zmm3
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm9, %zmm14, %zmm3
+; AVX512F-ONLY-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm2, %zmm3
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm9, %zmm6, %zmm3
+; AVX512F-ONLY-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm9, %zmm1, %zmm2
+; AVX512F-ONLY-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm9 # 64-byte Reload
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm9, %zmm3
+; AVX512F-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm2, %zmm15, %zmm3
+; AVX512F-ONLY-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm9, %zmm3
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm2, %zmm14, %zmm3
+; AVX512F-ONLY-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm9, %zmm13
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm2, %zmm6, %zmm13
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm2, %zmm1, %zmm9
+; AVX512F-ONLY-NEXT:    vmovdqu64 %zmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm2, %zmm3
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm26, %zmm15, %zmm3
+; AVX512F-ONLY-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm2, %zmm3
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm26, %zmm14, %zmm3
+; AVX512F-ONLY-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm2, %zmm3
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm26, %zmm6, %zmm3
+; AVX512F-ONLY-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm26, %zmm1, %zmm2
+; AVX512F-ONLY-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm23, %zmm3
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm29, %zmm15, %zmm3
+; AVX512F-ONLY-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm23, %zmm3
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm29, %zmm14, %zmm3
+; AVX512F-ONLY-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm23, %zmm10
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm29, %zmm6, %zmm10
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm29, %zmm1, %zmm23
+; AVX512F-ONLY-NEXT:    vmovdqu64 %zmm23, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm23 # 64-byte Reload
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm23, %zmm2
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm22, %zmm15, %zmm2
+; AVX512F-ONLY-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm23, %zmm2
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm22, %zmm14, %zmm2
+; AVX512F-ONLY-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm23, %zmm2
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm22, %zmm6, %zmm2
+; AVX512F-ONLY-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm22, %zmm1, %zmm23
+; AVX512F-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm2, %zmm3
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm24, %zmm15, %zmm3
+; AVX512F-ONLY-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm2, %zmm3
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm24, %zmm14, %zmm3
+; AVX512F-ONLY-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm2, %zmm26
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm24, %zmm6, %zmm26
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm24, %zmm1, %zmm2
+; AVX512F-ONLY-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm31, %zmm2
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm12, %zmm15, %zmm2
+; AVX512F-ONLY-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm31, %zmm2
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm12, %zmm14, %zmm2
+; AVX512F-ONLY-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm31, %zmm2
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm12, %zmm6, %zmm2
+; AVX512F-ONLY-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm12, %zmm1, %zmm31
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm17, %zmm29
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm4, %zmm15, %zmm29
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm17, %zmm2
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm4, %zmm14, %zmm2
+; AVX512F-ONLY-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm17, %zmm24
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm4, %zmm6, %zmm24
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm4, %zmm1, %zmm17
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm11, %zmm2
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm20, %zmm15, %zmm2
+; AVX512F-ONLY-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm11, %zmm2
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm20, %zmm14, %zmm2
+; AVX512F-ONLY-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm9 # 64-byte Reload
+; AVX512F-ONLY-NEXT:    vpunpcklqdq {{.*#+}} zmm9 {%k1} = zmm11[0],zmm20[0],zmm11[2],zmm20[2],zmm11[4],zmm20[4],zmm11[6],zmm20[6]
+; AVX512F-ONLY-NEXT:    vpunpckhqdq {{.*#+}} zmm5 {%k1} = zmm11[1],zmm20[1],zmm11[3],zmm20[3],zmm11[5],zmm20[5],zmm11[7],zmm20[7]
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm5, %zmm27
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm11, %zmm22
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm20, %zmm6, %zmm22
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm20, %zmm1, %zmm11
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm30, %zmm21
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm0, %zmm15, %zmm21
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm30, %zmm28
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm0, %zmm14, %zmm28
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm30, %zmm20
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm0, %zmm6, %zmm20
+; AVX512F-ONLY-NEXT:    vpunpcklqdq {{.*#+}} zmm2 = zmm30[0],zmm0[0],zmm30[2],zmm0[2],zmm30[4],zmm0[4],zmm30[6],zmm0[6]
+; AVX512F-ONLY-NEXT:    vpunpckhqdq {{.*#+}} zmm3 = zmm30[1],zmm0[1],zmm30[3],zmm0[3],zmm30[5],zmm0[5],zmm30[7],zmm0[7]
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm0, %zmm1, %zmm30
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm16, %zmm0
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm8, %zmm15, %zmm0
+; AVX512F-ONLY-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm16, %zmm0
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm8, %zmm14, %zmm0
+; AVX512F-ONLY-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
+; AVX512F-ONLY-NEXT:    vpunpcklqdq {{.*#+}} zmm4 {%k1} = zmm16[0],zmm8[0],zmm16[2],zmm8[2],zmm16[4],zmm8[4],zmm16[6],zmm8[6]
+; AVX512F-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
+; AVX512F-ONLY-NEXT:    vpunpckhqdq {{.*#+}} zmm5 {%k1} = zmm16[1],zmm8[1],zmm16[3],zmm8[3],zmm16[5],zmm8[5],zmm16[7],zmm8[7]
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm16, %zmm12
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm8, %zmm6, %zmm12
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm8, %zmm1, %zmm16
+; AVX512F-ONLY-NEXT:    vpermi2q %zmm7, %zmm19, %zmm15
+; AVX512F-ONLY-NEXT:    vpermi2q %zmm7, %zmm19, %zmm14
+; AVX512F-ONLY-NEXT:    vpermi2q %zmm7, %zmm19, %zmm6
+; AVX512F-ONLY-NEXT:    vpunpcklqdq {{.*#+}} zmm8 = zmm19[0],zmm7[0],zmm19[2],zmm7[2],zmm19[4],zmm7[4],zmm19[6],zmm7[6]
+; AVX512F-ONLY-NEXT:    vpunpckhqdq {{.*#+}} zmm0 = zmm19[1],zmm7[1],zmm19[3],zmm7[3],zmm19[5],zmm7[5],zmm19[7],zmm7[7]
+; AVX512F-ONLY-NEXT:    vpermt2q %zmm7, %zmm1, %zmm19
+; AVX512F-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
+; AVX512F-ONLY-NEXT:    vpblendd $240, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm1 # 32-byte Folded Reload
+; AVX512F-ONLY-NEXT:    # ymm1 = ymm1[0,1,2,3],mem[4,5,6,7]
+; AVX512F-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm7 # 64-byte Reload
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm7, %zmm2 {%k1}
+; AVX512F-ONLY-NEXT:    vinserti64x4 $0, %ymm1, %zmm2, %zmm1
+; AVX512F-ONLY-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
+; AVX512F-ONLY-NEXT:    vpblendd $240, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm1 # 32-byte Folded Reload
+; AVX512F-ONLY-NEXT:    # ymm1 = ymm1[0,1,2,3],mem[4,5,6,7]
+; AVX512F-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm2, %zmm3 {%k1}
+; AVX512F-ONLY-NEXT:    vinserti64x4 $0, %ymm1, %zmm3, %zmm1
+; AVX512F-ONLY-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
+; AVX512F-ONLY-NEXT:    vpblendd $240, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm1 # 32-byte Folded Reload
+; AVX512F-ONLY-NEXT:    # ymm1 = ymm1[0,1,2,3],mem[4,5,6,7]
+; AVX512F-ONLY-NEXT:    vinserti64x4 $0, %ymm1, %zmm9, %zmm1
+; AVX512F-ONLY-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
+; AVX512F-ONLY-NEXT:    vpblendd $240, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm1 # 32-byte Folded Reload
+; AVX512F-ONLY-NEXT:    # ymm1 = ymm1[0,1,2,3],mem[4,5,6,7]
+; AVX512F-ONLY-NEXT:    vinserti64x4 $0, %ymm1, %zmm27, %zmm1
+; AVX512F-ONLY-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
+; AVX512F-ONLY-NEXT:    vpblendd $240, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm1 # 32-byte Folded Reload
+; AVX512F-ONLY-NEXT:    # ymm1 = ymm1[0,1,2,3],mem[4,5,6,7]
+; AVX512F-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm2, %zmm8 {%k1}
+; AVX512F-ONLY-NEXT:    vinserti64x4 $0, %ymm1, %zmm8, %zmm1
+; AVX512F-ONLY-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
+; AVX512F-ONLY-NEXT:    vpblendd $240, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm1 # 32-byte Folded Reload
+; AVX512F-ONLY-NEXT:    # ymm1 = ymm1[0,1,2,3],mem[4,5,6,7]
+; AVX512F-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm2, %zmm0 {%k1}
+; AVX512F-ONLY-NEXT:    vinserti64x4 $0, %ymm1, %zmm0, %zmm0
+; AVX512F-ONLY-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512F-ONLY-NEXT:    vpblendd $240, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
+; AVX512F-ONLY-NEXT:    # ymm0 = ymm0[0,1,2,3],mem[4,5,6,7]
+; AVX512F-ONLY-NEXT:    vinserti64x4 $0, %ymm0, %zmm4, %zmm0
+; AVX512F-ONLY-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512F-ONLY-NEXT:    vpblendd $240, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
+; AVX512F-ONLY-NEXT:    # ymm0 = ymm0[0,1,2,3],mem[4,5,6,7]
+; AVX512F-ONLY-NEXT:    vinserti64x4 $0, %ymm0, %zmm5, %zmm0
+; AVX512F-ONLY-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm0, %zmm25 {%k1}
+; AVX512F-ONLY-NEXT:    vmovdqa (%rcx), %ymm0
+; AVX512F-ONLY-NEXT:    vmovdqa (%rdx), %ymm1
+; AVX512F-ONLY-NEXT:    vpunpcklqdq {{.*#+}} ymm2 = ymm1[0],ymm0[0],ymm1[2],ymm0[2]
+; AVX512F-ONLY-NEXT:    vmovdqa (%rsi), %ymm3
+; AVX512F-ONLY-NEXT:    vmovdqa (%rdi), %ymm7
+; AVX512F-ONLY-NEXT:    vpunpcklqdq {{.*#+}} ymm8 = ymm7[0],ymm3[0],ymm7[2],ymm3[2]
+; AVX512F-ONLY-NEXT:    vperm2i128 {{.*#+}} ymm2 = ymm8[2,3],ymm2[2,3]
+; AVX512F-ONLY-NEXT:    vinserti64x4 $0, %ymm2, %zmm25, %zmm2
+; AVX512F-ONLY-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
+; AVX512F-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm2, %zmm4 {%k1}
+; AVX512F-ONLY-NEXT:    vpunpckhqdq {{.*#+}} ymm0 = ymm1[1],ymm0[1],ymm1[3],ymm0[3]
+; AVX512F-ONLY-NEXT:    vpunpckhqdq {{.*#+}} ymm1 = ymm7[1],ymm3[1],ymm7[3],ymm3[3]
+; AVX512F-ONLY-NEXT:    vperm2i128 {{.*#+}} ymm0 = ymm1[2,3],ymm0[2,3]
+; AVX512F-ONLY-NEXT:    vinserti64x4 $0, %ymm0, %zmm4, %zmm0
+; AVX512F-ONLY-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm0, %zmm18 {%k1}
+; AVX512F-ONLY-NEXT:    vmovdqa 64(%rcx), %ymm0
+; AVX512F-ONLY-NEXT:    vmovdqa 64(%rdx), %ymm1
+; AVX512F-ONLY-NEXT:    vmovdqa 64(%rsi), %ymm2
+; AVX512F-ONLY-NEXT:    vmovdqa 64(%rdi), %ymm3
+; AVX512F-ONLY-NEXT:    vpunpcklqdq {{.*#+}} ymm7 = ymm1[0],ymm0[0],ymm1[2],ymm0[2]
+; AVX512F-ONLY-NEXT:    vpunpcklqdq {{.*#+}} ymm8 = ymm3[0],ymm2[0],ymm3[2],ymm2[2]
+; AVX512F-ONLY-NEXT:    vperm2i128 {{.*#+}} ymm7 = ymm8[2,3],ymm7[2,3]
+; AVX512F-ONLY-NEXT:    vinserti64x4 $0, %ymm7, %zmm18, %zmm4
+; AVX512F-ONLY-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-NEXT:    vmovdqu64 (%rsp), %zmm4 # 64-byte Reload
+; AVX512F-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm4, %zmm5 {%k1}
+; AVX512F-ONLY-NEXT:    vpunpckhqdq {{.*#+}} ymm0 = ymm1[1],ymm0[1],ymm1[3],ymm0[3]
+; AVX512F-ONLY-NEXT:    vpunpckhqdq {{.*#+}} ymm1 = ymm3[1],ymm2[1],ymm3[3],ymm2[3]
+; AVX512F-ONLY-NEXT:    vperm2i128 {{.*#+}} ymm0 = ymm1[2,3],ymm0[2,3]
+; AVX512F-ONLY-NEXT:    vinserti64x4 $0, %ymm0, %zmm5, %zmm0
+; AVX512F-ONLY-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm0, %zmm13 {%k1}
+; AVX512F-ONLY-NEXT:    vmovdqa 128(%rcx), %ymm0
+; AVX512F-ONLY-NEXT:    vmovdqa 128(%rdx), %ymm1
+; AVX512F-ONLY-NEXT:    vpunpcklqdq {{.*#+}} ymm2 = ymm1[0],ymm0[0],ymm1[2],ymm0[2]
+; AVX512F-ONLY-NEXT:    vmovdqa 128(%rsi), %ymm3
+; AVX512F-ONLY-NEXT:    vmovdqa 128(%rdi), %ymm7
+; AVX512F-ONLY-NEXT:    vpunpcklqdq {{.*#+}} ymm8 = ymm7[0],ymm3[0],ymm7[2],ymm3[2]
+; AVX512F-ONLY-NEXT:    vperm2i128 {{.*#+}} ymm2 = ymm8[2,3],ymm2[2,3]
+; AVX512F-ONLY-NEXT:    vinserti64x4 $0, %ymm2, %zmm13, %zmm2
+; AVX512F-ONLY-NEXT:    vmovdqu64 %zmm2, (%rsp) # 64-byte Spill
+; AVX512F-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
+; AVX512F-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm2, %zmm4 {%k1}
+; AVX512F-ONLY-NEXT:    vpunpckhqdq {{.*#+}} ymm0 = ymm1[1],ymm0[1],ymm1[3],ymm0[3]
+; AVX512F-ONLY-NEXT:    vpunpckhqdq {{.*#+}} ymm1 = ymm7[1],ymm3[1],ymm7[3],ymm3[3]
+; AVX512F-ONLY-NEXT:    vperm2i128 {{.*#+}} ymm0 = ymm1[2,3],ymm0[2,3]
+; AVX512F-ONLY-NEXT:    vinserti64x4 $0, %ymm0, %zmm4, %zmm0
+; AVX512F-ONLY-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm0, %zmm10 {%k1}
+; AVX512F-ONLY-NEXT:    vmovdqa 192(%rcx), %ymm0
+; AVX512F-ONLY-NEXT:    vmovdqa 192(%rdx), %ymm1
+; AVX512F-ONLY-NEXT:    vpunpcklqdq {{.*#+}} ymm2 = ymm1[0],ymm0[0],ymm1[2],ymm0[2]
+; AVX512F-ONLY-NEXT:    vmovdqa 192(%rsi), %ymm3
+; AVX512F-ONLY-NEXT:    vmovdqa 192(%rdi), %ymm7
+; AVX512F-ONLY-NEXT:    vpunpcklqdq {{.*#+}} ymm8 = ymm7[0],ymm3[0],ymm7[2],ymm3[2]
+; AVX512F-ONLY-NEXT:    vperm2i128 {{.*#+}} ymm2 = ymm8[2,3],ymm2[2,3]
+; AVX512F-ONLY-NEXT:    vinserti64x4 $0, %ymm2, %zmm10, %zmm2
+; AVX512F-ONLY-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
+; AVX512F-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm2, %zmm4 {%k1}
+; AVX512F-ONLY-NEXT:    vpunpckhqdq {{.*#+}} ymm0 = ymm1[1],ymm0[1],ymm1[3],ymm0[3]
+; AVX512F-ONLY-NEXT:    vpunpckhqdq {{.*#+}} ymm1 = ymm7[1],ymm3[1],ymm7[3],ymm3[3]
+; AVX512F-ONLY-NEXT:    vperm2i128 {{.*#+}} ymm0 = ymm1[2,3],ymm0[2,3]
+; AVX512F-ONLY-NEXT:    vinserti64x4 $0, %ymm0, %zmm4, %zmm9
+; AVX512F-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm0, %zmm26 {%k1}
+; AVX512F-ONLY-NEXT:    vmovdqa 256(%rcx), %ymm0
+; AVX512F-ONLY-NEXT:    vmovdqa 256(%rdx), %ymm1
+; AVX512F-ONLY-NEXT:    vpunpcklqdq {{.*#+}} ymm2 = ymm1[0],ymm0[0],ymm1[2],ymm0[2]
+; AVX512F-ONLY-NEXT:    vmovdqa 256(%rsi), %ymm3
+; AVX512F-ONLY-NEXT:    vmovdqa 256(%rdi), %ymm7
+; AVX512F-ONLY-NEXT:    vpunpcklqdq {{.*#+}} ymm8 = ymm7[0],ymm3[0],ymm7[2],ymm3[2]
+; AVX512F-ONLY-NEXT:    vperm2i128 {{.*#+}} ymm2 = ymm8[2,3],ymm2[2,3]
+; AVX512F-ONLY-NEXT:    vinserti64x4 $0, %ymm2, %zmm26, %zmm26
+; AVX512F-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm23, %zmm2 {%k1}
+; AVX512F-ONLY-NEXT:    vpunpckhqdq {{.*#+}} ymm0 = ymm1[1],ymm0[1],ymm1[3],ymm0[3]
+; AVX512F-ONLY-NEXT:    vpunpckhqdq {{.*#+}} ymm1 = ymm7[1],ymm3[1],ymm7[3],ymm3[3]
+; AVX512F-ONLY-NEXT:    vperm2i128 {{.*#+}} ymm0 = ymm1[2,3],ymm0[2,3]
+; AVX512F-ONLY-NEXT:    vinserti64x4 $0, %ymm0, %zmm2, %zmm7
+; AVX512F-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm0, %zmm24 {%k1}
+; AVX512F-ONLY-NEXT:    vmovdqa 320(%rcx), %ymm0
+; AVX512F-ONLY-NEXT:    vmovdqa 320(%rdx), %ymm1
+; AVX512F-ONLY-NEXT:    vpunpcklqdq {{.*#+}} ymm2 = ymm1[0],ymm0[0],ymm1[2],ymm0[2]
+; AVX512F-ONLY-NEXT:    vmovdqa 320(%rsi), %ymm3
+; AVX512F-ONLY-NEXT:    vmovdqa64 320(%rdi), %ymm23
+; AVX512F-ONLY-NEXT:    vpunpcklqdq {{.*#+}} ymm8 = ymm23[0],ymm3[0],ymm23[2],ymm3[2]
+; AVX512F-ONLY-NEXT:    vperm2i128 {{.*#+}} ymm2 = ymm8[2,3],ymm2[2,3]
+; AVX512F-ONLY-NEXT:    vinserti64x4 $0, %ymm2, %zmm24, %zmm8
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm31, %zmm17 {%k1}
+; AVX512F-ONLY-NEXT:    vpunpckhqdq {{.*#+}} ymm0 = ymm1[1],ymm0[1],ymm1[3],ymm0[3]
+; AVX512F-ONLY-NEXT:    vpunpckhqdq {{.*#+}} ymm1 = ymm23[1],ymm3[1],ymm23[3],ymm3[3]
+; AVX512F-ONLY-NEXT:    vperm2i128 {{.*#+}} ymm0 = ymm1[2,3],ymm0[2,3]
+; AVX512F-ONLY-NEXT:    vinserti64x4 $0, %ymm0, %zmm17, %zmm17
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm22, %zmm20 {%k1}
+; AVX512F-ONLY-NEXT:    vmovdqa 384(%rcx), %ymm0
+; AVX512F-ONLY-NEXT:    vmovdqa 384(%rdx), %ymm1
+; AVX512F-ONLY-NEXT:    vpunpcklqdq {{.*#+}} ymm2 = ymm1[0],ymm0[0],ymm1[2],ymm0[2]
+; AVX512F-ONLY-NEXT:    vmovdqa 384(%rsi), %ymm3
+; AVX512F-ONLY-NEXT:    vmovdqa64 384(%rdi), %ymm18
+; AVX512F-ONLY-NEXT:    vpunpcklqdq {{.*#+}} ymm13 = ymm18[0],ymm3[0],ymm18[2],ymm3[2]
+; AVX512F-ONLY-NEXT:    vperm2i128 {{.*#+}} ymm2 = ymm13[2,3],ymm2[2,3]
+; AVX512F-ONLY-NEXT:    vinserti64x4 $0, %ymm2, %zmm20, %zmm20
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm11, %zmm30 {%k1}
+; AVX512F-ONLY-NEXT:    vpunpckhqdq {{.*#+}} ymm0 = ymm1[1],ymm0[1],ymm1[3],ymm0[3]
+; AVX512F-ONLY-NEXT:    vpunpckhqdq {{.*#+}} ymm1 = ymm18[1],ymm3[1],ymm18[3],ymm3[3]
+; AVX512F-ONLY-NEXT:    vperm2i128 {{.*#+}} ymm0 = ymm1[2,3],ymm0[2,3]
+; AVX512F-ONLY-NEXT:    vinserti64x4 $0, %ymm0, %zmm30, %zmm24
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm12, %zmm6 {%k1}
+; AVX512F-ONLY-NEXT:    vmovdqa 448(%rcx), %ymm0
+; AVX512F-ONLY-NEXT:    vmovdqa 448(%rdx), %ymm1
+; AVX512F-ONLY-NEXT:    vpunpcklqdq {{.*#+}} ymm2 = ymm1[0],ymm0[0],ymm1[2],ymm0[2]
+; AVX512F-ONLY-NEXT:    vmovdqa 448(%rsi), %ymm3
+; AVX512F-ONLY-NEXT:    vmovdqa 448(%rdi), %ymm10
+; AVX512F-ONLY-NEXT:    vpunpcklqdq {{.*#+}} ymm12 = ymm10[0],ymm3[0],ymm10[2],ymm3[2]
+; AVX512F-ONLY-NEXT:    vperm2i128 {{.*#+}} ymm2 = ymm12[2,3],ymm2[2,3]
+; AVX512F-ONLY-NEXT:    vinserti64x4 $0, %ymm2, %zmm6, %zmm31
+; AVX512F-ONLY-NEXT:    vpunpckhqdq {{.*#+}} ymm0 = ymm1[1],ymm0[1],ymm1[3],ymm0[3]
+; AVX512F-ONLY-NEXT:    vpunpckhqdq {{.*#+}} ymm1 = ymm10[1],ymm3[1],ymm10[3],ymm3[3]
+; AVX512F-ONLY-NEXT:    vperm2i128 {{.*#+}} ymm0 = ymm1[2,3],ymm0[2,3]
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm16, %zmm19 {%k1}
+; AVX512F-ONLY-NEXT:    vinserti64x4 $0, %ymm0, %zmm19, %zmm0
+; AVX512F-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
+; AVX512F-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm1, %zmm4 {%k1}
+; AVX512F-ONLY-NEXT:    vmovdqa (%rsi), %xmm1
+; AVX512F-ONLY-NEXT:    vinserti128 $1, (%rcx), %ymm1, %ymm1
+; AVX512F-ONLY-NEXT:    vmovdqa (%rdi), %xmm2
+; AVX512F-ONLY-NEXT:    vinserti128 $1, (%rdx), %ymm2, %ymm2
+; AVX512F-ONLY-NEXT:    vpunpcklqdq {{.*#+}} ymm3 = ymm2[0],ymm1[0],ymm2[2],ymm1[2]
+; AVX512F-ONLY-NEXT:    vinserti64x4 $0, %ymm3, %zmm4, %zmm3
+; AVX512F-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
+; AVX512F-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm10 # 64-byte Reload
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm4, %zmm10 {%k1}
+; AVX512F-ONLY-NEXT:    vpunpckhqdq {{.*#+}} ymm1 = ymm2[1],ymm1[1],ymm2[3],ymm1[3]
+; AVX512F-ONLY-NEXT:    vinserti64x4 $0, %ymm1, %zmm10, %zmm2
+; AVX512F-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
+; AVX512F-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm11 # 64-byte Reload
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm1, %zmm11 {%k1}
+; AVX512F-ONLY-NEXT:    vmovdqa 64(%rsi), %xmm1
+; AVX512F-ONLY-NEXT:    vinserti128 $1, 64(%rcx), %ymm1, %ymm1
+; AVX512F-ONLY-NEXT:    vmovdqa 64(%rdi), %xmm4
+; AVX512F-ONLY-NEXT:    vinserti128 $1, 64(%rdx), %ymm4, %ymm4
+; AVX512F-ONLY-NEXT:    vpunpcklqdq {{.*#+}} ymm10 = ymm4[0],ymm1[0],ymm4[2],ymm1[2]
+; AVX512F-ONLY-NEXT:    vinserti64x4 $0, %ymm10, %zmm11, %zmm10
+; AVX512F-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm11 # 64-byte Reload
+; AVX512F-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm12 # 64-byte Reload
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm11, %zmm12 {%k1}
+; AVX512F-ONLY-NEXT:    vpunpckhqdq {{.*#+}} ymm1 = ymm4[1],ymm1[1],ymm4[3],ymm1[3]
+; AVX512F-ONLY-NEXT:    vinserti64x4 $0, %ymm1, %zmm12, %zmm11
+; AVX512F-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
+; AVX512F-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm13 # 64-byte Reload
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm1, %zmm13 {%k1}
+; AVX512F-ONLY-NEXT:    vmovdqa 128(%rsi), %xmm1
+; AVX512F-ONLY-NEXT:    vinserti128 $1, 128(%rcx), %ymm1, %ymm1
+; AVX512F-ONLY-NEXT:    vmovdqa 128(%rdi), %xmm4
+; AVX512F-ONLY-NEXT:    vinserti128 $1, 128(%rdx), %ymm4, %ymm12
+; AVX512F-ONLY-NEXT:    vpunpcklqdq {{.*#+}} ymm4 = ymm12[0],ymm1[0],ymm12[2],ymm1[2]
+; AVX512F-ONLY-NEXT:    vinserti64x4 $0, %ymm4, %zmm13, %zmm4
+; AVX512F-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm13 # 64-byte Reload
+; AVX512F-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm18 # 64-byte Reload
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm13, %zmm18 {%k1}
+; AVX512F-ONLY-NEXT:    vpunpckhqdq {{.*#+}} ymm1 = ymm12[1],ymm1[1],ymm12[3],ymm1[3]
+; AVX512F-ONLY-NEXT:    vinserti64x4 $0, %ymm1, %zmm18, %zmm19
+; AVX512F-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
+; AVX512F-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm18 # 64-byte Reload
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm1, %zmm18 {%k1}
+; AVX512F-ONLY-NEXT:    vmovdqa 192(%rsi), %xmm1
+; AVX512F-ONLY-NEXT:    vinserti128 $1, 192(%rcx), %ymm1, %ymm1
+; AVX512F-ONLY-NEXT:    vmovdqa 192(%rdi), %xmm12
+; AVX512F-ONLY-NEXT:    vinserti128 $1, 192(%rdx), %ymm12, %ymm12
+; AVX512F-ONLY-NEXT:    vpunpcklqdq {{.*#+}} ymm13 = ymm12[0],ymm1[0],ymm12[2],ymm1[2]
+; AVX512F-ONLY-NEXT:    vinserti64x4 $0, %ymm13, %zmm18, %zmm30
+; AVX512F-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm13 # 64-byte Reload
+; AVX512F-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm18 # 64-byte Reload
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm13, %zmm18 {%k1}
+; AVX512F-ONLY-NEXT:    vpunpckhqdq {{.*#+}} ymm1 = ymm12[1],ymm1[1],ymm12[3],ymm1[3]
+; AVX512F-ONLY-NEXT:    vinserti64x4 $0, %ymm1, %zmm18, %zmm1
+; AVX512F-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm12 # 64-byte Reload
+; AVX512F-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm12, %zmm5 {%k1}
+; AVX512F-ONLY-NEXT:    vmovdqa 256(%rsi), %xmm12
+; AVX512F-ONLY-NEXT:    vinserti128 $1, 256(%rcx), %ymm12, %ymm13
+; AVX512F-ONLY-NEXT:    vmovdqa 256(%rdi), %xmm12
+; AVX512F-ONLY-NEXT:    vinserti32x4 $1, 256(%rdx), %ymm12, %ymm18
+; AVX512F-ONLY-NEXT:    vpunpcklqdq {{.*#+}} ymm12 = ymm18[0],ymm13[0],ymm18[2],ymm13[2]
+; AVX512F-ONLY-NEXT:    vinserti64x4 $0, %ymm12, %zmm5, %zmm12
+; AVX512F-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm23 # 64-byte Reload
+; AVX512F-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm23, %zmm5 {%k1}
+; AVX512F-ONLY-NEXT:    vpunpckhqdq {{.*#+}} ymm13 = ymm18[1],ymm13[1],ymm18[3],ymm13[3]
+; AVX512F-ONLY-NEXT:    vinserti64x4 $0, %ymm13, %zmm5, %zmm23
+; AVX512F-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm5, %zmm29 {%k1}
+; AVX512F-ONLY-NEXT:    vmovdqa 320(%rsi), %xmm13
+; AVX512F-ONLY-NEXT:    vinserti128 $1, 320(%rcx), %ymm13, %ymm13
+; AVX512F-ONLY-NEXT:    vmovdqa64 320(%rdi), %xmm18
+; AVX512F-ONLY-NEXT:    vinserti32x4 $1, 320(%rdx), %ymm18, %ymm18
+; AVX512F-ONLY-NEXT:    vpunpcklqdq {{.*#+}} ymm27 = ymm18[0],ymm13[0],ymm18[2],ymm13[2]
+; AVX512F-ONLY-NEXT:    vinserti64x4 $0, %ymm27, %zmm29, %zmm22
+; AVX512F-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
+; AVX512F-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm6 # 64-byte Reload
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm5, %zmm6 {%k1}
+; AVX512F-ONLY-NEXT:    vpunpckhqdq {{.*#+}} ymm13 = ymm18[1],ymm13[1],ymm18[3],ymm13[3]
+; AVX512F-ONLY-NEXT:    vinserti64x4 $0, %ymm13, %zmm6, %zmm13
+; AVX512F-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm5, %zmm21 {%k1}
+; AVX512F-ONLY-NEXT:    vmovdqa64 384(%rsi), %xmm18
+; AVX512F-ONLY-NEXT:    vinserti32x4 $1, 384(%rcx), %ymm18, %ymm18
+; AVX512F-ONLY-NEXT:    vmovdqa64 384(%rdi), %xmm25
+; AVX512F-ONLY-NEXT:    vinserti32x4 $1, 384(%rdx), %ymm25, %ymm25
+; AVX512F-ONLY-NEXT:    vpunpcklqdq {{.*#+}} ymm27 = ymm25[0],ymm18[0],ymm25[2],ymm18[2]
+; AVX512F-ONLY-NEXT:    vinserti64x4 $0, %ymm27, %zmm21, %zmm16
+; AVX512F-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm5, %zmm28 {%k1}
+; AVX512F-ONLY-NEXT:    vpunpckhqdq {{.*#+}} ymm18 = ymm25[1],ymm18[1],ymm25[3],ymm18[3]
+; AVX512F-ONLY-NEXT:    vinserti64x4 $0, %ymm18, %zmm28, %zmm21
+; AVX512F-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm5, %zmm15 {%k1}
+; AVX512F-ONLY-NEXT:    vmovdqa64 448(%rsi), %xmm18
+; AVX512F-ONLY-NEXT:    vinserti32x4 $1, 448(%rcx), %ymm18, %ymm18
+; AVX512F-ONLY-NEXT:    vmovdqa64 448(%rdi), %xmm25
+; AVX512F-ONLY-NEXT:    vinserti32x4 $1, 448(%rdx), %ymm25, %ymm25
+; AVX512F-ONLY-NEXT:    vpunpcklqdq {{.*#+}} ymm27 = ymm25[0],ymm18[0],ymm25[2],ymm18[2]
+; AVX512F-ONLY-NEXT:    vinserti64x4 $0, %ymm27, %zmm15, %zmm6
+; AVX512F-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm5, %zmm14 {%k1}
+; AVX512F-ONLY-NEXT:    vpunpckhqdq {{.*#+}} ymm18 = ymm25[1],ymm18[1],ymm25[3],ymm18[3]
+; AVX512F-ONLY-NEXT:    vinserti64x4 $0, %ymm18, %zmm14, %zmm5
+; AVX512F-ONLY-NEXT:    movq {{[0-9]+}}(%rsp), %rax
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm0, 3776(%rax)
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm31, 3712(%rax)
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm24, 3264(%rax)
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm20, 3200(%rax)
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm17, 2752(%rax)
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm8, 2688(%rax)
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm7, 2240(%rax)
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm26, 2176(%rax)
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm9, 1728(%rax)
+; AVX512F-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512F-ONLY-NEXT:    vmovaps %zmm0, 1664(%rax)
+; AVX512F-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512F-ONLY-NEXT:    vmovaps %zmm0, 1216(%rax)
+; AVX512F-ONLY-NEXT:    vmovups (%rsp), %zmm0 # 64-byte Reload
+; AVX512F-ONLY-NEXT:    vmovaps %zmm0, 1152(%rax)
+; AVX512F-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512F-ONLY-NEXT:    vmovaps %zmm0, 704(%rax)
+; AVX512F-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512F-ONLY-NEXT:    vmovaps %zmm0, 640(%rax)
+; AVX512F-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512F-ONLY-NEXT:    vmovaps %zmm0, 192(%rax)
+; AVX512F-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512F-ONLY-NEXT:    vmovaps %zmm0, 128(%rax)
+; AVX512F-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512F-ONLY-NEXT:    vmovaps %zmm0, 4032(%rax)
+; AVX512F-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512F-ONLY-NEXT:    vmovaps %zmm0, 3968(%rax)
+; AVX512F-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512F-ONLY-NEXT:    vmovaps %zmm0, 3904(%rax)
+; AVX512F-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512F-ONLY-NEXT:    vmovaps %zmm0, 3840(%rax)
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm5, 3648(%rax)
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm6, 3584(%rax)
+; AVX512F-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512F-ONLY-NEXT:    vmovaps %zmm0, 3520(%rax)
+; AVX512F-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512F-ONLY-NEXT:    vmovaps %zmm0, 3456(%rax)
+; AVX512F-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512F-ONLY-NEXT:    vmovaps %zmm0, 3392(%rax)
+; AVX512F-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512F-ONLY-NEXT:    vmovaps %zmm0, 3328(%rax)
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm21, 3136(%rax)
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm16, 3072(%rax)
+; AVX512F-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512F-ONLY-NEXT:    vmovaps %zmm0, 3008(%rax)
+; AVX512F-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512F-ONLY-NEXT:    vmovaps %zmm0, 2944(%rax)
+; AVX512F-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512F-ONLY-NEXT:    vmovaps %zmm0, 2880(%rax)
+; AVX512F-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512F-ONLY-NEXT:    vmovaps %zmm0, 2816(%rax)
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm13, 2624(%rax)
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm22, 2560(%rax)
+; AVX512F-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512F-ONLY-NEXT:    vmovaps %zmm0, 2496(%rax)
+; AVX512F-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512F-ONLY-NEXT:    vmovaps %zmm0, 2432(%rax)
+; AVX512F-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512F-ONLY-NEXT:    vmovaps %zmm0, 2368(%rax)
+; AVX512F-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512F-ONLY-NEXT:    vmovaps %zmm0, 2304(%rax)
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm23, 2112(%rax)
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm12, 2048(%rax)
+; AVX512F-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512F-ONLY-NEXT:    vmovaps %zmm0, 1984(%rax)
+; AVX512F-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512F-ONLY-NEXT:    vmovaps %zmm0, 1920(%rax)
+; AVX512F-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512F-ONLY-NEXT:    vmovaps %zmm0, 1856(%rax)
+; AVX512F-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512F-ONLY-NEXT:    vmovaps %zmm0, 1792(%rax)
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm1, 1600(%rax)
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm30, 1536(%rax)
+; AVX512F-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512F-ONLY-NEXT:    vmovaps %zmm0, 1472(%rax)
+; AVX512F-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512F-ONLY-NEXT:    vmovaps %zmm0, 1408(%rax)
+; AVX512F-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512F-ONLY-NEXT:    vmovaps %zmm0, 1344(%rax)
+; AVX512F-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512F-ONLY-NEXT:    vmovaps %zmm0, 1280(%rax)
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm19, 1088(%rax)
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm4, 1024(%rax)
+; AVX512F-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512F-ONLY-NEXT:    vmovaps %zmm0, 960(%rax)
+; AVX512F-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512F-ONLY-NEXT:    vmovaps %zmm0, 896(%rax)
+; AVX512F-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512F-ONLY-NEXT:    vmovaps %zmm0, 832(%rax)
+; AVX512F-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512F-ONLY-NEXT:    vmovaps %zmm0, 768(%rax)
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm11, 576(%rax)
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm10, 512(%rax)
+; AVX512F-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512F-ONLY-NEXT:    vmovaps %zmm0, 448(%rax)
+; AVX512F-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512F-ONLY-NEXT:    vmovaps %zmm0, 384(%rax)
+; AVX512F-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512F-ONLY-NEXT:    vmovaps %zmm0, 320(%rax)
+; AVX512F-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512F-ONLY-NEXT:    vmovaps %zmm0, 256(%rax)
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm2, 64(%rax)
+; AVX512F-ONLY-NEXT:    vmovdqa64 %zmm3, (%rax)
+; AVX512F-ONLY-NEXT:    addq $5512, %rsp # imm = 0x1588
+; AVX512F-ONLY-NEXT:    vzeroupper
+; AVX512F-ONLY-NEXT:    retq
 ;
-; AVX512F-ONLY-FAST-LABEL: store_i64_stride8_vf64:
-; AVX512F-ONLY-FAST:       # %bb.0:
-; AVX512F-ONLY-FAST-NEXT:    subq $5512, %rsp # imm = 0x1588
-; AVX512F-ONLY-FAST-NEXT:    movq {{[0-9]+}}(%rsp), %rax
-; AVX512F-ONLY-FAST-NEXT:    movq {{[0-9]+}}(%rsp), %r10
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 128(%rdi), %zmm2
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 64(%rdi), %zmm4
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 (%rdi), %zmm8
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 128(%rsi), %zmm17
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 64(%rsi), %zmm19
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 (%rsi), %zmm22
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 64(%rdx), %zmm5
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 (%rdx), %zmm10
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 64(%rcx), %zmm20
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 (%rcx), %zmm11
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 (%r8), %zmm1
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 64(%r8), %zmm25
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 128(%r8), %zmm23
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 (%r9), %zmm28
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 64(%r9), %zmm26
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 128(%r9), %zmm24
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 (%r10), %zmm21
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 64(%r10), %zmm14
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 (%rax), %zmm27
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 64(%rax), %zmm16
-; AVX512F-ONLY-FAST-NEXT:    movb $-64, %r11b
-; AVX512F-ONLY-FAST-NEXT:    kmovw %r11d, %k1
-; AVX512F-ONLY-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} zmm3 = [4,12,4,12,4,12,4,12]
-; AVX512F-ONLY-FAST-NEXT:    # zmm3 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm21, %zmm0
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm27, %zmm3, %zmm0
-; AVX512F-ONLY-FAST-NEXT:    vpunpcklqdq {{.*#+}} zmm12 = zmm1[0],zmm28[0],zmm1[2],zmm28[2],zmm1[4],zmm28[4],zmm1[6],zmm28[6]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm0, %zmm12 {%k1}
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm8, %zmm0
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm22, %zmm3, %zmm0
-; AVX512F-ONLY-FAST-NEXT:    vbroadcasti128 {{.*#+}} ymm6 = [4,12,4,12]
-; AVX512F-ONLY-FAST-NEXT:    # ymm6 = mem[0,1,0,1]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm10, %zmm15
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm11, %zmm6, %zmm15
-; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm15[4,5,6,7]
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $0, %ymm0, %zmm12, %zmm0
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} zmm9 = [5,13,5,13,5,13,5,13]
-; AVX512F-ONLY-FAST-NEXT:    # zmm9 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm21, %zmm0
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm27, %zmm9, %zmm0
-; AVX512F-ONLY-FAST-NEXT:    vpunpckhqdq {{.*#+}} zmm12 = zmm1[1],zmm28[1],zmm1[3],zmm28[3],zmm1[5],zmm28[5],zmm1[7],zmm28[7]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm0, %zmm12 {%k1}
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm8, %zmm0
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm22, %zmm9, %zmm0
-; AVX512F-ONLY-FAST-NEXT:    vbroadcasti128 {{.*#+}} ymm7 = [5,13,5,13]
-; AVX512F-ONLY-FAST-NEXT:    # ymm7 = mem[0,1,0,1]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm10, %zmm15
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm11, %zmm7, %zmm15
-; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm15[4,5,6,7]
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $0, %ymm0, %zmm12, %zmm0
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} zmm13 = [6,14,6,14,6,14,6,14]
-; AVX512F-ONLY-FAST-NEXT:    # zmm13 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm1, %zmm0
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm1, %zmm29
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm28, %zmm13, %zmm0
-; AVX512F-ONLY-FAST-NEXT:    vpunpcklqdq {{.*#+}} zmm0 {%k1} = zmm21[0],zmm27[0],zmm21[2],zmm27[2],zmm21[4],zmm27[4],zmm21[6],zmm27[6]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm8, %zmm12
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm22, %zmm13, %zmm12
-; AVX512F-ONLY-FAST-NEXT:    vbroadcasti128 {{.*#+}} ymm1 = [6,14,6,14]
-; AVX512F-ONLY-FAST-NEXT:    # ymm1 = mem[0,1,0,1]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm10, %zmm15
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm11, %zmm1, %zmm15
-; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm12 = ymm12[0,1,2,3],ymm15[4,5,6,7]
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $0, %ymm12, %zmm0, %zmm0
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} zmm18 = [7,15,7,15,7,15,7,15]
-; AVX512F-ONLY-FAST-NEXT:    # zmm18 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm29, %zmm12
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm28, %zmm18, %zmm12
-; AVX512F-ONLY-FAST-NEXT:    vpunpckhqdq {{.*#+}} zmm12 {%k1} = zmm21[1],zmm27[1],zmm21[3],zmm27[3],zmm21[5],zmm27[5],zmm21[7],zmm27[7]
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm22, %zmm18, %zmm8
-; AVX512F-ONLY-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} ymm30 = [7,15,7,15]
-; AVX512F-ONLY-FAST-NEXT:    # ymm30 = mem[0,1,2,3,0,1,2,3]
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm11, %zmm30, %zmm10
-; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm8 = ymm8[0,1,2,3],ymm10[4,5,6,7]
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $0, %ymm8, %zmm12, %zmm0
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm14, %zmm8
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm16, %zmm3, %zmm8
-; AVX512F-ONLY-FAST-NEXT:    vpunpcklqdq {{.*#+}} zmm10 = zmm25[0],zmm26[0],zmm25[2],zmm26[2],zmm25[4],zmm26[4],zmm25[6],zmm26[6]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm8, %zmm10 {%k1}
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm4, %zmm8
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm19, %zmm3, %zmm8
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm5, %zmm11
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm6, %zmm15
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm20, %zmm6, %zmm11
-; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm8 = ymm8[0,1,2,3],ymm11[4,5,6,7]
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $0, %ymm8, %zmm10, %zmm0
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm14, %zmm8
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm16, %zmm9, %zmm8
-; AVX512F-ONLY-FAST-NEXT:    vpunpckhqdq {{.*#+}} zmm10 = zmm25[1],zmm26[1],zmm25[3],zmm26[3],zmm25[5],zmm26[5],zmm25[7],zmm26[7]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm8, %zmm10 {%k1}
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm4, %zmm8
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm19, %zmm9, %zmm8
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm5, %zmm11
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm7, %zmm12
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm20, %zmm7, %zmm11
-; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm8 = ymm8[0,1,2,3],ymm11[4,5,6,7]
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $0, %ymm8, %zmm10, %zmm0
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm4, %zmm8
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm19, %zmm13, %zmm8
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm5, %zmm10
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm20, %zmm1, %zmm10
-; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm8 = ymm8[0,1,2,3],ymm10[4,5,6,7]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm25, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm25, %zmm10
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm26, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm26, %zmm13, %zmm10
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm14, %zmm11
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm14, (%rsp) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm16, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vpunpcklqdq {{.*#+}} zmm10 {%k1} = zmm14[0],zmm16[0],zmm14[2],zmm16[2],zmm14[4],zmm16[4],zmm14[6],zmm16[6]
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $0, %ymm8, %zmm10, %zmm0
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 128(%r10), %zmm10
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm19, %zmm18, %zmm4
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 128(%rax), %zmm14
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm20, %zmm30, %zmm5
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm25, %zmm6
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm26, %zmm18, %zmm6
-; AVX512F-ONLY-FAST-NEXT:    vpunpckhqdq {{.*#+}} zmm6 {%k1} = zmm11[1],zmm16[1],zmm11[3],zmm16[3],zmm11[5],zmm16[5],zmm11[7],zmm16[7]
-; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm4 = ymm4[0,1,2,3],ymm5[4,5,6,7]
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $0, %ymm4, %zmm6, %zmm0
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm10, %zmm4
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm14, %zmm3, %zmm4
-; AVX512F-ONLY-FAST-NEXT:    vpunpcklqdq {{.*#+}} zmm5 = zmm23[0],zmm24[0],zmm23[2],zmm24[2],zmm23[4],zmm24[4],zmm23[6],zmm24[6]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm4, %zmm5 {%k1}
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm2, %zmm6
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm17, %zmm3, %zmm6
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm3, %zmm11
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 128(%rdx), %zmm4
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 128(%rcx), %zmm7
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm4, %zmm8
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm7, %zmm15, %zmm8
-; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm6 = ymm6[0,1,2,3],ymm8[4,5,6,7]
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $0, %ymm6, %zmm5, %zmm0
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm10, %zmm5
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm14, %zmm9, %zmm5
-; AVX512F-ONLY-FAST-NEXT:    vpunpckhqdq {{.*#+}} zmm6 = zmm23[1],zmm24[1],zmm23[3],zmm24[3],zmm23[5],zmm24[5],zmm23[7],zmm24[7]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm5, %zmm6 {%k1}
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm2, %zmm5
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm17, %zmm9, %zmm5
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm4, %zmm8
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm7, %zmm12, %zmm8
-; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm5 = ymm5[0,1,2,3],ymm8[4,5,6,7]
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $0, %ymm5, %zmm6, %zmm0
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm2, %zmm5
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm17, %zmm13, %zmm5
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm4, %zmm6
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm7, %zmm1, %zmm6
-; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm5 = ymm5[0,1,2,3],ymm6[4,5,6,7]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm23, %zmm3
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm23, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm23, %zmm6
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm24, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm24, %zmm13, %zmm6
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm14, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vpunpcklqdq {{.*#+}} zmm6 {%k1} = zmm10[0],zmm14[0],zmm10[2],zmm14[2],zmm10[4],zmm14[4],zmm10[6],zmm14[6]
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $0, %ymm5, %zmm6, %zmm0
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm17, %zmm18, %zmm2
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm7, %zmm30, %zmm4
-; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm4[4,5,6,7]
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm24, %zmm18, %zmm3
-; AVX512F-ONLY-FAST-NEXT:    vpunpckhqdq {{.*#+}} zmm3 {%k1} = zmm10[1],zmm14[1],zmm10[3],zmm14[3],zmm10[5],zmm14[5],zmm10[7],zmm14[7]
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $0, %ymm2, %zmm3, %zmm0
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 192(%r10), %zmm10
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 192(%rax), %zmm26
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm10, %zmm2
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm26, %zmm11, %zmm2
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 192(%r8), %zmm23
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 192(%r9), %zmm29
-; AVX512F-ONLY-FAST-NEXT:    vpunpcklqdq {{.*#+}} zmm4 = zmm23[0],zmm29[0],zmm23[2],zmm29[2],zmm23[4],zmm29[4],zmm23[6],zmm29[6]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm2, %zmm4 {%k1}
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 192(%rdi), %zmm2
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 192(%rsi), %zmm5
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm2, %zmm6
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm5, %zmm11, %zmm6
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 192(%rdx), %zmm3
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 192(%rcx), %zmm7
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm3, %zmm8
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm7, %zmm15, %zmm8
-; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm6 = ymm6[0,1,2,3],ymm8[4,5,6,7]
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $0, %ymm6, %zmm4, %zmm0
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm10, %zmm4
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm26, %zmm9, %zmm4
-; AVX512F-ONLY-FAST-NEXT:    vpunpckhqdq {{.*#+}} zmm6 = zmm23[1],zmm29[1],zmm23[3],zmm29[3],zmm23[5],zmm29[5],zmm23[7],zmm29[7]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm4, %zmm6 {%k1}
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm2, %zmm4
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm5, %zmm9, %zmm4
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm3, %zmm8
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm7, %zmm12, %zmm8
-; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm4 = ymm4[0,1,2,3],ymm8[4,5,6,7]
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $0, %ymm4, %zmm6, %zmm0
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm2, %zmm4
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm5, %zmm13, %zmm4
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm3, %zmm6
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm7, %zmm1, %zmm6
-; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm4 = ymm4[0,1,2,3],ymm6[4,5,6,7]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm23, %zmm6
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm29, %zmm13, %zmm6
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vpunpcklqdq {{.*#+}} zmm6 {%k1} = zmm10[0],zmm26[0],zmm10[2],zmm26[2],zmm10[4],zmm26[4],zmm10[6],zmm26[6]
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $0, %ymm4, %zmm6, %zmm0
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm5, %zmm18, %zmm2
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm7, %zmm30, %zmm3
-; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm3[4,5,6,7]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm23, %zmm3
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm29, %zmm18, %zmm3
-; AVX512F-ONLY-FAST-NEXT:    vpunpckhqdq {{.*#+}} zmm3 {%k1} = zmm10[1],zmm26[1],zmm10[3],zmm26[3],zmm10[5],zmm26[5],zmm10[7],zmm26[7]
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $0, %ymm2, %zmm3, %zmm0
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 256(%r10), %zmm10
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 256(%rax), %zmm22
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm10, %zmm2
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm22, %zmm11, %zmm2
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 256(%r8), %zmm0
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 256(%r9), %zmm24
-; AVX512F-ONLY-FAST-NEXT:    vpunpcklqdq {{.*#+}} zmm4 = zmm0[0],zmm24[0],zmm0[2],zmm24[2],zmm0[4],zmm24[4],zmm0[6],zmm24[6]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm0, %zmm17
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm2, %zmm4 {%k1}
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 256(%rdi), %zmm2
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 256(%rsi), %zmm5
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm2, %zmm6
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm5, %zmm11, %zmm6
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 256(%rdx), %zmm3
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 256(%rcx), %zmm7
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm3, %zmm8
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm7, %zmm15, %zmm8
-; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm6 = ymm6[0,1,2,3],ymm8[4,5,6,7]
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $0, %ymm6, %zmm4, %zmm0
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm10, %zmm4
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm22, %zmm9, %zmm4
-; AVX512F-ONLY-FAST-NEXT:    vpunpckhqdq {{.*#+}} zmm6 = zmm17[1],zmm24[1],zmm17[3],zmm24[3],zmm17[5],zmm24[5],zmm17[7],zmm24[7]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm4, %zmm6 {%k1}
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm2, %zmm4
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm5, %zmm9, %zmm4
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm3, %zmm8
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm7, %zmm12, %zmm8
-; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm4 = ymm4[0,1,2,3],ymm8[4,5,6,7]
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $0, %ymm4, %zmm6, %zmm0
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm2, %zmm4
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm5, %zmm13, %zmm4
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm3, %zmm6
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm7, %zmm1, %zmm6
-; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm4 = ymm4[0,1,2,3],ymm6[4,5,6,7]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm17, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm17, %zmm6
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm24, %zmm13, %zmm6
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vpunpcklqdq {{.*#+}} zmm6 {%k1} = zmm10[0],zmm22[0],zmm10[2],zmm22[2],zmm10[4],zmm22[4],zmm10[6],zmm22[6]
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $0, %ymm4, %zmm6, %zmm0
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm5, %zmm18, %zmm2
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm7, %zmm30, %zmm3
-; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm3[4,5,6,7]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm17, %zmm3
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm24, %zmm18, %zmm3
-; AVX512F-ONLY-FAST-NEXT:    vpunpckhqdq {{.*#+}} zmm3 {%k1} = zmm10[1],zmm22[1],zmm10[3],zmm22[3],zmm10[5],zmm22[5],zmm10[7],zmm22[7]
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $0, %ymm2, %zmm3, %zmm0
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 320(%rdi), %zmm2
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 320(%rsi), %zmm3
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm2, %zmm5
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm3, %zmm11, %zmm5
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 320(%rdx), %zmm14
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 320(%rcx), %zmm6
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm14, %zmm7
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm6, %zmm15, %zmm7
-; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm5 = ymm5[0,1,2,3],ymm7[4,5,6,7]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm14, %zmm7
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm6, %zmm12, %zmm7
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm14, %zmm8
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm6, %zmm1, %zmm8
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm6, %zmm30, %zmm14
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 384(%rdx), %zmm0
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 384(%rcx), %zmm6
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm0, %zmm10
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm6, %zmm15, %zmm10
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm0, %zmm10
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm6, %zmm12, %zmm10
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm0, %zmm10
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm6, %zmm1, %zmm10
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm6, %zmm30, %zmm0
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 448(%rdx), %zmm0
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 448(%rcx), %zmm6
-; AVX512F-ONLY-FAST-NEXT:    vpermi2q %zmm6, %zmm0, %zmm15
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm15, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vpermi2q %zmm6, %zmm0, %zmm12
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm12, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vpermi2q %zmm6, %zmm0, %zmm1
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm6, %zmm30, %zmm0
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 320(%r10), %zmm31
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 320(%rax), %zmm12
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm31, %zmm1
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm12, %zmm11, %zmm1
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 320(%r8), %zmm17
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 320(%r9), %zmm4
-; AVX512F-ONLY-FAST-NEXT:    vpunpcklqdq {{.*#+}} zmm10 = zmm17[0],zmm4[0],zmm17[2],zmm4[2],zmm17[4],zmm4[4],zmm17[6],zmm4[6]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm1, %zmm10 {%k1}
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $0, %ymm5, %zmm10, %zmm0
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm31, %zmm1
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm12, %zmm9, %zmm1
-; AVX512F-ONLY-FAST-NEXT:    vpunpckhqdq {{.*#+}} zmm5 = zmm17[1],zmm4[1],zmm17[3],zmm4[3],zmm17[5],zmm4[5],zmm17[7],zmm4[7]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm1, %zmm5 {%k1}
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm2, %zmm1
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm3, %zmm9, %zmm1
-; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm7[4,5,6,7]
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $0, %ymm1, %zmm5, %zmm0
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm2, %zmm1
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm3, %zmm13, %zmm1
-; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm8[4,5,6,7]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm17, %zmm5
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm4, %zmm13, %zmm5
-; AVX512F-ONLY-FAST-NEXT:    vpunpcklqdq {{.*#+}} zmm5 {%k1} = zmm31[0],zmm12[0],zmm31[2],zmm12[2],zmm31[4],zmm12[4],zmm31[6],zmm12[6]
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $0, %ymm1, %zmm5, %zmm0
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm3, %zmm18, %zmm2
-; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm1 = ymm2[0,1,2,3],ymm14[4,5,6,7]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm17, %zmm2
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm4, %zmm18, %zmm2
-; AVX512F-ONLY-FAST-NEXT:    vpunpckhqdq {{.*#+}} zmm2 {%k1} = zmm31[1],zmm12[1],zmm31[3],zmm12[3],zmm31[5],zmm12[5],zmm31[7],zmm12[7]
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $0, %ymm1, %zmm2, %zmm0
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 384(%rdi), %zmm0
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 384(%rsi), %zmm1
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm0, %zmm3
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm11, %zmm2
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm1, %zmm11, %zmm3
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm0, %zmm3
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm1, %zmm9, %zmm3
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm0, %zmm3
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm1, %zmm13, %zmm3
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm1, %zmm18, %zmm0
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 384(%r10), %zmm11
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 384(%rax), %zmm20
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm11, %zmm0
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm20, %zmm2, %zmm0
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm11, %zmm0
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm20, %zmm9, %zmm0
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 384(%r8), %zmm30
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 384(%r9), %zmm0
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm30, %zmm1
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm0, %zmm13, %zmm1
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 448(%r10), %zmm16
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 448(%rax), %zmm8
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm16, %zmm1
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm8, %zmm2, %zmm1
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 448(%rdi), %zmm3
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 448(%rsi), %zmm1
-; AVX512F-ONLY-FAST-NEXT:    vpermi2q %zmm1, %zmm3, %zmm2
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm16, %zmm2
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm8, %zmm9, %zmm2
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vpermi2q %zmm1, %zmm3, %zmm9
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 448(%r8), %zmm19
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 448(%r9), %zmm7
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm19, %zmm2
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm7, %zmm13, %zmm2
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vpermi2q %zmm1, %zmm3, %zmm13
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm13, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm1, %zmm18, %zmm3
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm30, %zmm1
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm0, %zmm18, %zmm1
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm1, %zmm5
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm19, %zmm1
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm7, %zmm18, %zmm1
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} zmm15 = [0,8,0,8,0,8,0,8]
-; AVX512F-ONLY-FAST-NEXT:    # zmm15 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm21, %zmm1
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm27, %zmm15, %zmm1
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} zmm14 = [1,9,1,9,1,9,1,9]
-; AVX512F-ONLY-FAST-NEXT:    # zmm14 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm21, %zmm1
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm27, %zmm14, %zmm1
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} zmm6 = [2,10,2,10,2,10,2,10]
-; AVX512F-ONLY-FAST-NEXT:    # zmm6 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm21, %zmm1
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm27, %zmm6, %zmm1
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} zmm1 = [3,11,3,11,3,11,3,11]
-; AVX512F-ONLY-FAST-NEXT:    # zmm1 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm27, %zmm1, %zmm21
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm21, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm2, %zmm3
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm28, %zmm15, %zmm3
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm2, %zmm3
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm28, %zmm14, %zmm3
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm2, %zmm25
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm28, %zmm6, %zmm25
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm28, %zmm1, %zmm2
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 (%rsp), %zmm27 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm27, %zmm2
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm3, %zmm15, %zmm2
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm27, %zmm2
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm3, %zmm14, %zmm2
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm27, %zmm2
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm3, %zmm6, %zmm2
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm3, %zmm1, %zmm27
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm27, (%rsp) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm9 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm9, %zmm3
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm2, %zmm15, %zmm3
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm9, %zmm3
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm2, %zmm14, %zmm3
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm9, %zmm18
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm2, %zmm6, %zmm18
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm2, %zmm1, %zmm9
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm2, %zmm3
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm9 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm9, %zmm15, %zmm3
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm2, %zmm3
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm9, %zmm14, %zmm3
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm2, %zmm3
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm9, %zmm6, %zmm3
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm9, %zmm1, %zmm2
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm9 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm9, %zmm3
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm2, %zmm15, %zmm3
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm9, %zmm3
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm2, %zmm14, %zmm3
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm9, %zmm13
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm2, %zmm6, %zmm13
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm2, %zmm1, %zmm9
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm2, %zmm3
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm26, %zmm15, %zmm3
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm2, %zmm3
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm26, %zmm14, %zmm3
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm2, %zmm3
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm26, %zmm6, %zmm3
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm26, %zmm1, %zmm2
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm23, %zmm3
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm29, %zmm15, %zmm3
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm23, %zmm3
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm29, %zmm14, %zmm3
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm23, %zmm10
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm29, %zmm6, %zmm10
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm29, %zmm1, %zmm23
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm23, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm23 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm23, %zmm2
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm22, %zmm15, %zmm2
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm23, %zmm2
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm22, %zmm14, %zmm2
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm23, %zmm2
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm22, %zmm6, %zmm2
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm22, %zmm1, %zmm23
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm2, %zmm3
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm24, %zmm15, %zmm3
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm2, %zmm3
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm24, %zmm14, %zmm3
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm2, %zmm26
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm24, %zmm6, %zmm26
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm24, %zmm1, %zmm2
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm31, %zmm2
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm12, %zmm15, %zmm2
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm31, %zmm2
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm12, %zmm14, %zmm2
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm31, %zmm2
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm12, %zmm6, %zmm2
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm12, %zmm1, %zmm31
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm17, %zmm29
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm4, %zmm15, %zmm29
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm17, %zmm2
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm4, %zmm14, %zmm2
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm17, %zmm24
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm4, %zmm6, %zmm24
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm4, %zmm1, %zmm17
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm11, %zmm2
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm20, %zmm15, %zmm2
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm11, %zmm2
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm20, %zmm14, %zmm2
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm9 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vpunpcklqdq {{.*#+}} zmm9 {%k1} = zmm11[0],zmm20[0],zmm11[2],zmm20[2],zmm11[4],zmm20[4],zmm11[6],zmm20[6]
-; AVX512F-ONLY-FAST-NEXT:    vpunpckhqdq {{.*#+}} zmm5 {%k1} = zmm11[1],zmm20[1],zmm11[3],zmm20[3],zmm11[5],zmm20[5],zmm11[7],zmm20[7]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm5, %zmm27
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm11, %zmm22
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm20, %zmm6, %zmm22
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm20, %zmm1, %zmm11
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm30, %zmm21
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm0, %zmm15, %zmm21
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm30, %zmm28
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm0, %zmm14, %zmm28
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm30, %zmm20
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm0, %zmm6, %zmm20
-; AVX512F-ONLY-FAST-NEXT:    vpunpcklqdq {{.*#+}} zmm2 = zmm30[0],zmm0[0],zmm30[2],zmm0[2],zmm30[4],zmm0[4],zmm30[6],zmm0[6]
-; AVX512F-ONLY-FAST-NEXT:    vpunpckhqdq {{.*#+}} zmm3 = zmm30[1],zmm0[1],zmm30[3],zmm0[3],zmm30[5],zmm0[5],zmm30[7],zmm0[7]
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm0, %zmm1, %zmm30
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm16, %zmm0
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm8, %zmm15, %zmm0
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm16, %zmm0
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm8, %zmm14, %zmm0
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vpunpcklqdq {{.*#+}} zmm4 {%k1} = zmm16[0],zmm8[0],zmm16[2],zmm8[2],zmm16[4],zmm8[4],zmm16[6],zmm8[6]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vpunpckhqdq {{.*#+}} zmm5 {%k1} = zmm16[1],zmm8[1],zmm16[3],zmm8[3],zmm16[5],zmm8[5],zmm16[7],zmm8[7]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm16, %zmm12
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm8, %zmm6, %zmm12
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm8, %zmm1, %zmm16
-; AVX512F-ONLY-FAST-NEXT:    vpermi2q %zmm7, %zmm19, %zmm15
-; AVX512F-ONLY-FAST-NEXT:    vpermi2q %zmm7, %zmm19, %zmm14
-; AVX512F-ONLY-FAST-NEXT:    vpermi2q %zmm7, %zmm19, %zmm6
-; AVX512F-ONLY-FAST-NEXT:    vpunpcklqdq {{.*#+}} zmm8 = zmm19[0],zmm7[0],zmm19[2],zmm7[2],zmm19[4],zmm7[4],zmm19[6],zmm7[6]
-; AVX512F-ONLY-FAST-NEXT:    vpunpckhqdq {{.*#+}} zmm0 = zmm19[1],zmm7[1],zmm19[3],zmm7[3],zmm19[5],zmm7[5],zmm19[7],zmm7[7]
-; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm7, %zmm1, %zmm19
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vpblendd $240, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm1 # 32-byte Folded Reload
-; AVX512F-ONLY-FAST-NEXT:    # ymm1 = ymm1[0,1,2,3],mem[4,5,6,7]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm7 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm7, %zmm2 {%k1}
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $0, %ymm1, %zmm2, %zmm1
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vpblendd $240, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm1 # 32-byte Folded Reload
-; AVX512F-ONLY-FAST-NEXT:    # ymm1 = ymm1[0,1,2,3],mem[4,5,6,7]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm2, %zmm3 {%k1}
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $0, %ymm1, %zmm3, %zmm1
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vpblendd $240, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm1 # 32-byte Folded Reload
-; AVX512F-ONLY-FAST-NEXT:    # ymm1 = ymm1[0,1,2,3],mem[4,5,6,7]
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $0, %ymm1, %zmm9, %zmm1
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vpblendd $240, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm1 # 32-byte Folded Reload
-; AVX512F-ONLY-FAST-NEXT:    # ymm1 = ymm1[0,1,2,3],mem[4,5,6,7]
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $0, %ymm1, %zmm27, %zmm1
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vpblendd $240, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm1 # 32-byte Folded Reload
-; AVX512F-ONLY-FAST-NEXT:    # ymm1 = ymm1[0,1,2,3],mem[4,5,6,7]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm2, %zmm8 {%k1}
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $0, %ymm1, %zmm8, %zmm1
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vpblendd $240, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm1 # 32-byte Folded Reload
-; AVX512F-ONLY-FAST-NEXT:    # ymm1 = ymm1[0,1,2,3],mem[4,5,6,7]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm2, %zmm0 {%k1}
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $0, %ymm1, %zmm0, %zmm0
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vpblendd $240, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
-; AVX512F-ONLY-FAST-NEXT:    # ymm0 = ymm0[0,1,2,3],mem[4,5,6,7]
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $0, %ymm0, %zmm4, %zmm0
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vpblendd $240, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
-; AVX512F-ONLY-FAST-NEXT:    # ymm0 = ymm0[0,1,2,3],mem[4,5,6,7]
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $0, %ymm0, %zmm5, %zmm0
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm0, %zmm25 {%k1}
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa (%rcx), %ymm0
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa (%rdx), %ymm1
-; AVX512F-ONLY-FAST-NEXT:    vpunpcklqdq {{.*#+}} ymm2 = ymm1[0],ymm0[0],ymm1[2],ymm0[2]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa (%rsi), %ymm3
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa (%rdi), %ymm7
-; AVX512F-ONLY-FAST-NEXT:    vpunpcklqdq {{.*#+}} ymm8 = ymm7[0],ymm3[0],ymm7[2],ymm3[2]
-; AVX512F-ONLY-FAST-NEXT:    vperm2i128 {{.*#+}} ymm2 = ymm8[2,3],ymm2[2,3]
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $0, %ymm2, %zmm25, %zmm2
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm2, %zmm4 {%k1}
-; AVX512F-ONLY-FAST-NEXT:    vpunpckhqdq {{.*#+}} ymm0 = ymm1[1],ymm0[1],ymm1[3],ymm0[3]
-; AVX512F-ONLY-FAST-NEXT:    vpunpckhqdq {{.*#+}} ymm1 = ymm7[1],ymm3[1],ymm7[3],ymm3[3]
-; AVX512F-ONLY-FAST-NEXT:    vperm2i128 {{.*#+}} ymm0 = ymm1[2,3],ymm0[2,3]
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $0, %ymm0, %zmm4, %zmm0
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm0, %zmm18 {%k1}
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa 64(%rcx), %ymm0
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa 64(%rdx), %ymm1
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa 64(%rsi), %ymm2
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa 64(%rdi), %ymm3
-; AVX512F-ONLY-FAST-NEXT:    vpunpcklqdq {{.*#+}} ymm7 = ymm1[0],ymm0[0],ymm1[2],ymm0[2]
-; AVX512F-ONLY-FAST-NEXT:    vpunpcklqdq {{.*#+}} ymm8 = ymm3[0],ymm2[0],ymm3[2],ymm2[2]
-; AVX512F-ONLY-FAST-NEXT:    vperm2i128 {{.*#+}} ymm7 = ymm8[2,3],ymm7[2,3]
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $0, %ymm7, %zmm18, %zmm4
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 (%rsp), %zmm4 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm4, %zmm5 {%k1}
-; AVX512F-ONLY-FAST-NEXT:    vpunpckhqdq {{.*#+}} ymm0 = ymm1[1],ymm0[1],ymm1[3],ymm0[3]
-; AVX512F-ONLY-FAST-NEXT:    vpunpckhqdq {{.*#+}} ymm1 = ymm3[1],ymm2[1],ymm3[3],ymm2[3]
-; AVX512F-ONLY-FAST-NEXT:    vperm2i128 {{.*#+}} ymm0 = ymm1[2,3],ymm0[2,3]
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $0, %ymm0, %zmm5, %zmm0
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm0, %zmm13 {%k1}
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa 128(%rcx), %ymm0
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa 128(%rdx), %ymm1
-; AVX512F-ONLY-FAST-NEXT:    vpunpcklqdq {{.*#+}} ymm2 = ymm1[0],ymm0[0],ymm1[2],ymm0[2]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa 128(%rsi), %ymm3
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa 128(%rdi), %ymm7
-; AVX512F-ONLY-FAST-NEXT:    vpunpcklqdq {{.*#+}} ymm8 = ymm7[0],ymm3[0],ymm7[2],ymm3[2]
-; AVX512F-ONLY-FAST-NEXT:    vperm2i128 {{.*#+}} ymm2 = ymm8[2,3],ymm2[2,3]
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $0, %ymm2, %zmm13, %zmm2
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm2, (%rsp) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm2, %zmm4 {%k1}
-; AVX512F-ONLY-FAST-NEXT:    vpunpckhqdq {{.*#+}} ymm0 = ymm1[1],ymm0[1],ymm1[3],ymm0[3]
-; AVX512F-ONLY-FAST-NEXT:    vpunpckhqdq {{.*#+}} ymm1 = ymm7[1],ymm3[1],ymm7[3],ymm3[3]
-; AVX512F-ONLY-FAST-NEXT:    vperm2i128 {{.*#+}} ymm0 = ymm1[2,3],ymm0[2,3]
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $0, %ymm0, %zmm4, %zmm0
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm0, %zmm10 {%k1}
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa 192(%rcx), %ymm0
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa 192(%rdx), %ymm1
-; AVX512F-ONLY-FAST-NEXT:    vpunpcklqdq {{.*#+}} ymm2 = ymm1[0],ymm0[0],ymm1[2],ymm0[2]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa 192(%rsi), %ymm3
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa 192(%rdi), %ymm7
-; AVX512F-ONLY-FAST-NEXT:    vpunpcklqdq {{.*#+}} ymm8 = ymm7[0],ymm3[0],ymm7[2],ymm3[2]
-; AVX512F-ONLY-FAST-NEXT:    vperm2i128 {{.*#+}} ymm2 = ymm8[2,3],ymm2[2,3]
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $0, %ymm2, %zmm10, %zmm2
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm2, %zmm4 {%k1}
-; AVX512F-ONLY-FAST-NEXT:    vpunpckhqdq {{.*#+}} ymm0 = ymm1[1],ymm0[1],ymm1[3],ymm0[3]
-; AVX512F-ONLY-FAST-NEXT:    vpunpckhqdq {{.*#+}} ymm1 = ymm7[1],ymm3[1],ymm7[3],ymm3[3]
-; AVX512F-ONLY-FAST-NEXT:    vperm2i128 {{.*#+}} ymm0 = ymm1[2,3],ymm0[2,3]
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $0, %ymm0, %zmm4, %zmm9
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm0, %zmm26 {%k1}
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa 256(%rcx), %ymm0
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa 256(%rdx), %ymm1
-; AVX512F-ONLY-FAST-NEXT:    vpunpcklqdq {{.*#+}} ymm2 = ymm1[0],ymm0[0],ymm1[2],ymm0[2]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa 256(%rsi), %ymm3
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa 256(%rdi), %ymm7
-; AVX512F-ONLY-FAST-NEXT:    vpunpcklqdq {{.*#+}} ymm8 = ymm7[0],ymm3[0],ymm7[2],ymm3[2]
-; AVX512F-ONLY-FAST-NEXT:    vperm2i128 {{.*#+}} ymm2 = ymm8[2,3],ymm2[2,3]
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $0, %ymm2, %zmm26, %zmm26
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm23, %zmm2 {%k1}
-; AVX512F-ONLY-FAST-NEXT:    vpunpckhqdq {{.*#+}} ymm0 = ymm1[1],ymm0[1],ymm1[3],ymm0[3]
-; AVX512F-ONLY-FAST-NEXT:    vpunpckhqdq {{.*#+}} ymm1 = ymm7[1],ymm3[1],ymm7[3],ymm3[3]
-; AVX512F-ONLY-FAST-NEXT:    vperm2i128 {{.*#+}} ymm0 = ymm1[2,3],ymm0[2,3]
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $0, %ymm0, %zmm2, %zmm7
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm0, %zmm24 {%k1}
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa 320(%rcx), %ymm0
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa 320(%rdx), %ymm1
-; AVX512F-ONLY-FAST-NEXT:    vpunpcklqdq {{.*#+}} ymm2 = ymm1[0],ymm0[0],ymm1[2],ymm0[2]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa 320(%rsi), %ymm3
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 320(%rdi), %ymm23
-; AVX512F-ONLY-FAST-NEXT:    vpunpcklqdq {{.*#+}} ymm8 = ymm23[0],ymm3[0],ymm23[2],ymm3[2]
-; AVX512F-ONLY-FAST-NEXT:    vperm2i128 {{.*#+}} ymm2 = ymm8[2,3],ymm2[2,3]
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $0, %ymm2, %zmm24, %zmm8
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm31, %zmm17 {%k1}
-; AVX512F-ONLY-FAST-NEXT:    vpunpckhqdq {{.*#+}} ymm0 = ymm1[1],ymm0[1],ymm1[3],ymm0[3]
-; AVX512F-ONLY-FAST-NEXT:    vpunpckhqdq {{.*#+}} ymm1 = ymm23[1],ymm3[1],ymm23[3],ymm3[3]
-; AVX512F-ONLY-FAST-NEXT:    vperm2i128 {{.*#+}} ymm0 = ymm1[2,3],ymm0[2,3]
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $0, %ymm0, %zmm17, %zmm17
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm22, %zmm20 {%k1}
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa 384(%rcx), %ymm0
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa 384(%rdx), %ymm1
-; AVX512F-ONLY-FAST-NEXT:    vpunpcklqdq {{.*#+}} ymm2 = ymm1[0],ymm0[0],ymm1[2],ymm0[2]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa 384(%rsi), %ymm3
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 384(%rdi), %ymm18
-; AVX512F-ONLY-FAST-NEXT:    vpunpcklqdq {{.*#+}} ymm13 = ymm18[0],ymm3[0],ymm18[2],ymm3[2]
-; AVX512F-ONLY-FAST-NEXT:    vperm2i128 {{.*#+}} ymm2 = ymm13[2,3],ymm2[2,3]
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $0, %ymm2, %zmm20, %zmm20
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm11, %zmm30 {%k1}
-; AVX512F-ONLY-FAST-NEXT:    vpunpckhqdq {{.*#+}} ymm0 = ymm1[1],ymm0[1],ymm1[3],ymm0[3]
-; AVX512F-ONLY-FAST-NEXT:    vpunpckhqdq {{.*#+}} ymm1 = ymm18[1],ymm3[1],ymm18[3],ymm3[3]
-; AVX512F-ONLY-FAST-NEXT:    vperm2i128 {{.*#+}} ymm0 = ymm1[2,3],ymm0[2,3]
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $0, %ymm0, %zmm30, %zmm24
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm12, %zmm6 {%k1}
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa 448(%rcx), %ymm0
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa 448(%rdx), %ymm1
-; AVX512F-ONLY-FAST-NEXT:    vpunpcklqdq {{.*#+}} ymm2 = ymm1[0],ymm0[0],ymm1[2],ymm0[2]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa 448(%rsi), %ymm3
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa 448(%rdi), %ymm10
-; AVX512F-ONLY-FAST-NEXT:    vpunpcklqdq {{.*#+}} ymm12 = ymm10[0],ymm3[0],ymm10[2],ymm3[2]
-; AVX512F-ONLY-FAST-NEXT:    vperm2i128 {{.*#+}} ymm2 = ymm12[2,3],ymm2[2,3]
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $0, %ymm2, %zmm6, %zmm31
-; AVX512F-ONLY-FAST-NEXT:    vpunpckhqdq {{.*#+}} ymm0 = ymm1[1],ymm0[1],ymm1[3],ymm0[3]
-; AVX512F-ONLY-FAST-NEXT:    vpunpckhqdq {{.*#+}} ymm1 = ymm10[1],ymm3[1],ymm10[3],ymm3[3]
-; AVX512F-ONLY-FAST-NEXT:    vperm2i128 {{.*#+}} ymm0 = ymm1[2,3],ymm0[2,3]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm16, %zmm19 {%k1}
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $0, %ymm0, %zmm19, %zmm0
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm1, %zmm4 {%k1}
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa (%rsi), %xmm1
-; AVX512F-ONLY-FAST-NEXT:    vinserti128 $1, (%rcx), %ymm1, %ymm1
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa (%rdi), %xmm2
-; AVX512F-ONLY-FAST-NEXT:    vinserti128 $1, (%rdx), %ymm2, %ymm2
-; AVX512F-ONLY-FAST-NEXT:    vpunpcklqdq {{.*#+}} ymm3 = ymm2[0],ymm1[0],ymm2[2],ymm1[2]
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $0, %ymm3, %zmm4, %zmm3
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm10 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm4, %zmm10 {%k1}
-; AVX512F-ONLY-FAST-NEXT:    vpunpckhqdq {{.*#+}} ymm1 = ymm2[1],ymm1[1],ymm2[3],ymm1[3]
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $0, %ymm1, %zmm10, %zmm2
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm11 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm1, %zmm11 {%k1}
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa 64(%rsi), %xmm1
-; AVX512F-ONLY-FAST-NEXT:    vinserti128 $1, 64(%rcx), %ymm1, %ymm1
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa 64(%rdi), %xmm4
-; AVX512F-ONLY-FAST-NEXT:    vinserti128 $1, 64(%rdx), %ymm4, %ymm4
-; AVX512F-ONLY-FAST-NEXT:    vpunpcklqdq {{.*#+}} ymm10 = ymm4[0],ymm1[0],ymm4[2],ymm1[2]
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $0, %ymm10, %zmm11, %zmm10
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm11 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm12 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm11, %zmm12 {%k1}
-; AVX512F-ONLY-FAST-NEXT:    vpunpckhqdq {{.*#+}} ymm1 = ymm4[1],ymm1[1],ymm4[3],ymm1[3]
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $0, %ymm1, %zmm12, %zmm11
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm13 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm1, %zmm13 {%k1}
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa 128(%rsi), %xmm1
-; AVX512F-ONLY-FAST-NEXT:    vinserti128 $1, 128(%rcx), %ymm1, %ymm1
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa 128(%rdi), %xmm4
-; AVX512F-ONLY-FAST-NEXT:    vinserti128 $1, 128(%rdx), %ymm4, %ymm12
-; AVX512F-ONLY-FAST-NEXT:    vpunpcklqdq {{.*#+}} ymm4 = ymm12[0],ymm1[0],ymm12[2],ymm1[2]
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $0, %ymm4, %zmm13, %zmm4
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm13 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm18 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm13, %zmm18 {%k1}
-; AVX512F-ONLY-FAST-NEXT:    vpunpckhqdq {{.*#+}} ymm1 = ymm12[1],ymm1[1],ymm12[3],ymm1[3]
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $0, %ymm1, %zmm18, %zmm19
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm18 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm1, %zmm18 {%k1}
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa 192(%rsi), %xmm1
-; AVX512F-ONLY-FAST-NEXT:    vinserti128 $1, 192(%rcx), %ymm1, %ymm1
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa 192(%rdi), %xmm12
-; AVX512F-ONLY-FAST-NEXT:    vinserti128 $1, 192(%rdx), %ymm12, %ymm12
-; AVX512F-ONLY-FAST-NEXT:    vpunpcklqdq {{.*#+}} ymm13 = ymm12[0],ymm1[0],ymm12[2],ymm1[2]
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $0, %ymm13, %zmm18, %zmm30
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm13 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm18 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm13, %zmm18 {%k1}
-; AVX512F-ONLY-FAST-NEXT:    vpunpckhqdq {{.*#+}} ymm1 = ymm12[1],ymm1[1],ymm12[3],ymm1[3]
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $0, %ymm1, %zmm18, %zmm1
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm12 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm12, %zmm5 {%k1}
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa 256(%rsi), %xmm12
-; AVX512F-ONLY-FAST-NEXT:    vinserti128 $1, 256(%rcx), %ymm12, %ymm13
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa 256(%rdi), %xmm12
-; AVX512F-ONLY-FAST-NEXT:    vinserti32x4 $1, 256(%rdx), %ymm12, %ymm18
-; AVX512F-ONLY-FAST-NEXT:    vpunpcklqdq {{.*#+}} ymm12 = ymm18[0],ymm13[0],ymm18[2],ymm13[2]
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $0, %ymm12, %zmm5, %zmm12
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm23 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm23, %zmm5 {%k1}
-; AVX512F-ONLY-FAST-NEXT:    vpunpckhqdq {{.*#+}} ymm13 = ymm18[1],ymm13[1],ymm18[3],ymm13[3]
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $0, %ymm13, %zmm5, %zmm23
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm5, %zmm29 {%k1}
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa 320(%rsi), %xmm13
-; AVX512F-ONLY-FAST-NEXT:    vinserti128 $1, 320(%rcx), %ymm13, %ymm13
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 320(%rdi), %xmm18
-; AVX512F-ONLY-FAST-NEXT:    vinserti32x4 $1, 320(%rdx), %ymm18, %ymm18
-; AVX512F-ONLY-FAST-NEXT:    vpunpcklqdq {{.*#+}} ymm27 = ymm18[0],ymm13[0],ymm18[2],ymm13[2]
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $0, %ymm27, %zmm29, %zmm22
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm6 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm5, %zmm6 {%k1}
-; AVX512F-ONLY-FAST-NEXT:    vpunpckhqdq {{.*#+}} ymm13 = ymm18[1],ymm13[1],ymm18[3],ymm13[3]
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $0, %ymm13, %zmm6, %zmm13
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm5, %zmm21 {%k1}
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 384(%rsi), %xmm18
-; AVX512F-ONLY-FAST-NEXT:    vinserti32x4 $1, 384(%rcx), %ymm18, %ymm18
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 384(%rdi), %xmm25
-; AVX512F-ONLY-FAST-NEXT:    vinserti32x4 $1, 384(%rdx), %ymm25, %ymm25
-; AVX512F-ONLY-FAST-NEXT:    vpunpcklqdq {{.*#+}} ymm27 = ymm25[0],ymm18[0],ymm25[2],ymm18[2]
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $0, %ymm27, %zmm21, %zmm16
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm5, %zmm28 {%k1}
-; AVX512F-ONLY-FAST-NEXT:    vpunpckhqdq {{.*#+}} ymm18 = ymm25[1],ymm18[1],ymm25[3],ymm18[3]
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $0, %ymm18, %zmm28, %zmm21
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm5, %zmm15 {%k1}
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 448(%rsi), %xmm18
-; AVX512F-ONLY-FAST-NEXT:    vinserti32x4 $1, 448(%rcx), %ymm18, %ymm18
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 448(%rdi), %xmm25
-; AVX512F-ONLY-FAST-NEXT:    vinserti32x4 $1, 448(%rdx), %ymm25, %ymm25
-; AVX512F-ONLY-FAST-NEXT:    vpunpcklqdq {{.*#+}} ymm27 = ymm25[0],ymm18[0],ymm25[2],ymm18[2]
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $0, %ymm27, %zmm15, %zmm6
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm5, %zmm14 {%k1}
-; AVX512F-ONLY-FAST-NEXT:    vpunpckhqdq {{.*#+}} ymm18 = ymm25[1],ymm18[1],ymm25[3],ymm18[3]
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $0, %ymm18, %zmm14, %zmm5
-; AVX512F-ONLY-FAST-NEXT:    movq {{[0-9]+}}(%rsp), %rax
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm0, 3776(%rax)
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm31, 3712(%rax)
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm24, 3264(%rax)
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm20, 3200(%rax)
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm17, 2752(%rax)
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm8, 2688(%rax)
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm7, 2240(%rax)
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm26, 2176(%rax)
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm9, 1728(%rax)
-; AVX512F-ONLY-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vmovaps %zmm0, 1664(%rax)
-; AVX512F-ONLY-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vmovaps %zmm0, 1216(%rax)
-; AVX512F-ONLY-FAST-NEXT:    vmovups (%rsp), %zmm0 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vmovaps %zmm0, 1152(%rax)
-; AVX512F-ONLY-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vmovaps %zmm0, 704(%rax)
-; AVX512F-ONLY-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vmovaps %zmm0, 640(%rax)
-; AVX512F-ONLY-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vmovaps %zmm0, 192(%rax)
-; AVX512F-ONLY-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vmovaps %zmm0, 128(%rax)
-; AVX512F-ONLY-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vmovaps %zmm0, 4032(%rax)
-; AVX512F-ONLY-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vmovaps %zmm0, 3968(%rax)
-; AVX512F-ONLY-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vmovaps %zmm0, 3904(%rax)
-; AVX512F-ONLY-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vmovaps %zmm0, 3840(%rax)
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm5, 3648(%rax)
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm6, 3584(%rax)
-; AVX512F-ONLY-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vmovaps %zmm0, 3520(%rax)
-; AVX512F-ONLY-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vmovaps %zmm0, 3456(%rax)
-; AVX512F-ONLY-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vmovaps %zmm0, 3392(%rax)
-; AVX512F-ONLY-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vmovaps %zmm0, 3328(%rax)
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm21, 3136(%rax)
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm16, 3072(%rax)
-; AVX512F-ONLY-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vmovaps %zmm0, 3008(%rax)
-; AVX512F-ONLY-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vmovaps %zmm0, 2944(%rax)
-; AVX512F-ONLY-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vmovaps %zmm0, 2880(%rax)
-; AVX512F-ONLY-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vmovaps %zmm0, 2816(%rax)
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm13, 2624(%rax)
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm22, 2560(%rax)
-; AVX512F-ONLY-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vmovaps %zmm0, 2496(%rax)
-; AVX512F-ONLY-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vmovaps %zmm0, 2432(%rax)
-; AVX512F-ONLY-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vmovaps %zmm0, 2368(%rax)
-; AVX512F-ONLY-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vmovaps %zmm0, 2304(%rax)
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm23, 2112(%rax)
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm12, 2048(%rax)
-; AVX512F-ONLY-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vmovaps %zmm0, 1984(%rax)
-; AVX512F-ONLY-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vmovaps %zmm0, 1920(%rax)
-; AVX512F-ONLY-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vmovaps %zmm0, 1856(%rax)
-; AVX512F-ONLY-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vmovaps %zmm0, 1792(%rax)
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm1, 1600(%rax)
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm30, 1536(%rax)
-; AVX512F-ONLY-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vmovaps %zmm0, 1472(%rax)
-; AVX512F-ONLY-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vmovaps %zmm0, 1408(%rax)
-; AVX512F-ONLY-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vmovaps %zmm0, 1344(%rax)
-; AVX512F-ONLY-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vmovaps %zmm0, 1280(%rax)
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm19, 1088(%rax)
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm4, 1024(%rax)
-; AVX512F-ONLY-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vmovaps %zmm0, 960(%rax)
-; AVX512F-ONLY-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vmovaps %zmm0, 896(%rax)
-; AVX512F-ONLY-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vmovaps %zmm0, 832(%rax)
-; AVX512F-ONLY-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vmovaps %zmm0, 768(%rax)
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm11, 576(%rax)
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm10, 512(%rax)
-; AVX512F-ONLY-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vmovaps %zmm0, 448(%rax)
-; AVX512F-ONLY-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vmovaps %zmm0, 384(%rax)
-; AVX512F-ONLY-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vmovaps %zmm0, 320(%rax)
-; AVX512F-ONLY-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vmovaps %zmm0, 256(%rax)
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm2, 64(%rax)
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm3, (%rax)
-; AVX512F-ONLY-FAST-NEXT:    addq $5512, %rsp # imm = 0x1588
-; AVX512F-ONLY-FAST-NEXT:    vzeroupper
-; AVX512F-ONLY-FAST-NEXT:    retq
+; AVX512DQ-ONLY-LABEL: store_i64_stride8_vf64:
+; AVX512DQ-ONLY:       # %bb.0:
+; AVX512DQ-ONLY-NEXT:    subq $5512, %rsp # imm = 0x1588
+; AVX512DQ-ONLY-NEXT:    movq {{[0-9]+}}(%rsp), %rax
+; AVX512DQ-ONLY-NEXT:    movq {{[0-9]+}}(%rsp), %r10
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 128(%rdi), %zmm2
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 64(%rdi), %zmm4
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 (%rdi), %zmm8
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 128(%rsi), %zmm17
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 64(%rsi), %zmm19
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 (%rsi), %zmm22
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 64(%rdx), %zmm5
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 (%rdx), %zmm10
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 64(%rcx), %zmm20
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 (%rcx), %zmm11
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 (%r8), %zmm1
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 64(%r8), %zmm25
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 128(%r8), %zmm23
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 (%r9), %zmm28
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 64(%r9), %zmm26
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 128(%r9), %zmm24
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 (%r10), %zmm21
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 64(%r10), %zmm14
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 (%rax), %zmm27
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 64(%rax), %zmm16
+; AVX512DQ-ONLY-NEXT:    movb $-64, %r11b
+; AVX512DQ-ONLY-NEXT:    kmovw %r11d, %k1
+; AVX512DQ-ONLY-NEXT:    vbroadcasti32x4 {{.*#+}} zmm3 = [4,12,4,12,4,12,4,12]
+; AVX512DQ-ONLY-NEXT:    # zmm3 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm21, %zmm0
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm27, %zmm3, %zmm0
+; AVX512DQ-ONLY-NEXT:    vpunpcklqdq {{.*#+}} zmm12 = zmm1[0],zmm28[0],zmm1[2],zmm28[2],zmm1[4],zmm28[4],zmm1[6],zmm28[6]
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm0, %zmm12 {%k1}
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm8, %zmm0
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm22, %zmm3, %zmm0
+; AVX512DQ-ONLY-NEXT:    vbroadcasti128 {{.*#+}} ymm6 = [4,12,4,12]
+; AVX512DQ-ONLY-NEXT:    # ymm6 = mem[0,1,0,1]
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm10, %zmm15
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm11, %zmm6, %zmm15
+; AVX512DQ-ONLY-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm15[4,5,6,7]
+; AVX512DQ-ONLY-NEXT:    vinserti64x4 $0, %ymm0, %zmm12, %zmm0
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-ONLY-NEXT:    vbroadcasti32x4 {{.*#+}} zmm9 = [5,13,5,13,5,13,5,13]
+; AVX512DQ-ONLY-NEXT:    # zmm9 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm21, %zmm0
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm27, %zmm9, %zmm0
+; AVX512DQ-ONLY-NEXT:    vpunpckhqdq {{.*#+}} zmm12 = zmm1[1],zmm28[1],zmm1[3],zmm28[3],zmm1[5],zmm28[5],zmm1[7],zmm28[7]
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm0, %zmm12 {%k1}
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm8, %zmm0
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm22, %zmm9, %zmm0
+; AVX512DQ-ONLY-NEXT:    vbroadcasti128 {{.*#+}} ymm7 = [5,13,5,13]
+; AVX512DQ-ONLY-NEXT:    # ymm7 = mem[0,1,0,1]
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm10, %zmm15
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm11, %zmm7, %zmm15
+; AVX512DQ-ONLY-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm15[4,5,6,7]
+; AVX512DQ-ONLY-NEXT:    vinserti64x4 $0, %ymm0, %zmm12, %zmm0
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-ONLY-NEXT:    vbroadcasti32x4 {{.*#+}} zmm13 = [6,14,6,14,6,14,6,14]
+; AVX512DQ-ONLY-NEXT:    # zmm13 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm1, %zmm0
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm1, %zmm29
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm28, %zmm13, %zmm0
+; AVX512DQ-ONLY-NEXT:    vpunpcklqdq {{.*#+}} zmm0 {%k1} = zmm21[0],zmm27[0],zmm21[2],zmm27[2],zmm21[4],zmm27[4],zmm21[6],zmm27[6]
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm8, %zmm12
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm22, %zmm13, %zmm12
+; AVX512DQ-ONLY-NEXT:    vbroadcasti128 {{.*#+}} ymm1 = [6,14,6,14]
+; AVX512DQ-ONLY-NEXT:    # ymm1 = mem[0,1,0,1]
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm10, %zmm15
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm11, %zmm1, %zmm15
+; AVX512DQ-ONLY-NEXT:    vpblendd {{.*#+}} ymm12 = ymm12[0,1,2,3],ymm15[4,5,6,7]
+; AVX512DQ-ONLY-NEXT:    vinserti64x4 $0, %ymm12, %zmm0, %zmm0
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-ONLY-NEXT:    vbroadcasti32x4 {{.*#+}} zmm18 = [7,15,7,15,7,15,7,15]
+; AVX512DQ-ONLY-NEXT:    # zmm18 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm29, %zmm12
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm28, %zmm18, %zmm12
+; AVX512DQ-ONLY-NEXT:    vpunpckhqdq {{.*#+}} zmm12 {%k1} = zmm21[1],zmm27[1],zmm21[3],zmm27[3],zmm21[5],zmm27[5],zmm21[7],zmm27[7]
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm22, %zmm18, %zmm8
+; AVX512DQ-ONLY-NEXT:    vbroadcasti64x2 {{.*#+}} ymm30 = [7,15,7,15]
+; AVX512DQ-ONLY-NEXT:    # ymm30 = mem[0,1,0,1]
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm11, %zmm30, %zmm10
+; AVX512DQ-ONLY-NEXT:    vpblendd {{.*#+}} ymm8 = ymm8[0,1,2,3],ymm10[4,5,6,7]
+; AVX512DQ-ONLY-NEXT:    vinserti64x4 $0, %ymm8, %zmm12, %zmm0
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm14, %zmm8
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm16, %zmm3, %zmm8
+; AVX512DQ-ONLY-NEXT:    vpunpcklqdq {{.*#+}} zmm10 = zmm25[0],zmm26[0],zmm25[2],zmm26[2],zmm25[4],zmm26[4],zmm25[6],zmm26[6]
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm8, %zmm10 {%k1}
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm4, %zmm8
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm19, %zmm3, %zmm8
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm5, %zmm11
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm6, %zmm15
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm20, %zmm6, %zmm11
+; AVX512DQ-ONLY-NEXT:    vpblendd {{.*#+}} ymm8 = ymm8[0,1,2,3],ymm11[4,5,6,7]
+; AVX512DQ-ONLY-NEXT:    vinserti64x4 $0, %ymm8, %zmm10, %zmm0
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm14, %zmm8
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm16, %zmm9, %zmm8
+; AVX512DQ-ONLY-NEXT:    vpunpckhqdq {{.*#+}} zmm10 = zmm25[1],zmm26[1],zmm25[3],zmm26[3],zmm25[5],zmm26[5],zmm25[7],zmm26[7]
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm8, %zmm10 {%k1}
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm4, %zmm8
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm19, %zmm9, %zmm8
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm5, %zmm11
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm7, %zmm12
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm20, %zmm7, %zmm11
+; AVX512DQ-ONLY-NEXT:    vpblendd {{.*#+}} ymm8 = ymm8[0,1,2,3],ymm11[4,5,6,7]
+; AVX512DQ-ONLY-NEXT:    vinserti64x4 $0, %ymm8, %zmm10, %zmm0
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm4, %zmm8
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm19, %zmm13, %zmm8
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm5, %zmm10
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm20, %zmm1, %zmm10
+; AVX512DQ-ONLY-NEXT:    vpblendd {{.*#+}} ymm8 = ymm8[0,1,2,3],ymm10[4,5,6,7]
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 %zmm25, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm25, %zmm10
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 %zmm26, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm26, %zmm13, %zmm10
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm14, %zmm11
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 %zmm14, (%rsp) # 64-byte Spill
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 %zmm16, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-ONLY-NEXT:    vpunpcklqdq {{.*#+}} zmm10 {%k1} = zmm14[0],zmm16[0],zmm14[2],zmm16[2],zmm14[4],zmm16[4],zmm14[6],zmm16[6]
+; AVX512DQ-ONLY-NEXT:    vinserti64x4 $0, %ymm8, %zmm10, %zmm0
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 128(%r10), %zmm10
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm19, %zmm18, %zmm4
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 128(%rax), %zmm14
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm20, %zmm30, %zmm5
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm25, %zmm6
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm26, %zmm18, %zmm6
+; AVX512DQ-ONLY-NEXT:    vpunpckhqdq {{.*#+}} zmm6 {%k1} = zmm11[1],zmm16[1],zmm11[3],zmm16[3],zmm11[5],zmm16[5],zmm11[7],zmm16[7]
+; AVX512DQ-ONLY-NEXT:    vpblendd {{.*#+}} ymm4 = ymm4[0,1,2,3],ymm5[4,5,6,7]
+; AVX512DQ-ONLY-NEXT:    vinserti64x4 $0, %ymm4, %zmm6, %zmm0
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm10, %zmm4
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm14, %zmm3, %zmm4
+; AVX512DQ-ONLY-NEXT:    vpunpcklqdq {{.*#+}} zmm5 = zmm23[0],zmm24[0],zmm23[2],zmm24[2],zmm23[4],zmm24[4],zmm23[6],zmm24[6]
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm4, %zmm5 {%k1}
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm2, %zmm6
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm17, %zmm3, %zmm6
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm3, %zmm11
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 128(%rdx), %zmm4
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 128(%rcx), %zmm7
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm4, %zmm8
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm7, %zmm15, %zmm8
+; AVX512DQ-ONLY-NEXT:    vpblendd {{.*#+}} ymm6 = ymm6[0,1,2,3],ymm8[4,5,6,7]
+; AVX512DQ-ONLY-NEXT:    vinserti64x4 $0, %ymm6, %zmm5, %zmm0
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm10, %zmm5
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm14, %zmm9, %zmm5
+; AVX512DQ-ONLY-NEXT:    vpunpckhqdq {{.*#+}} zmm6 = zmm23[1],zmm24[1],zmm23[3],zmm24[3],zmm23[5],zmm24[5],zmm23[7],zmm24[7]
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm5, %zmm6 {%k1}
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm2, %zmm5
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm17, %zmm9, %zmm5
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm4, %zmm8
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm7, %zmm12, %zmm8
+; AVX512DQ-ONLY-NEXT:    vpblendd {{.*#+}} ymm5 = ymm5[0,1,2,3],ymm8[4,5,6,7]
+; AVX512DQ-ONLY-NEXT:    vinserti64x4 $0, %ymm5, %zmm6, %zmm0
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm2, %zmm5
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm17, %zmm13, %zmm5
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm4, %zmm6
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm7, %zmm1, %zmm6
+; AVX512DQ-ONLY-NEXT:    vpblendd {{.*#+}} ymm5 = ymm5[0,1,2,3],ymm6[4,5,6,7]
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm23, %zmm3
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 %zmm23, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm23, %zmm6
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 %zmm24, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm24, %zmm13, %zmm6
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 %zmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 %zmm14, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-ONLY-NEXT:    vpunpcklqdq {{.*#+}} zmm6 {%k1} = zmm10[0],zmm14[0],zmm10[2],zmm14[2],zmm10[4],zmm14[4],zmm10[6],zmm14[6]
+; AVX512DQ-ONLY-NEXT:    vinserti64x4 $0, %ymm5, %zmm6, %zmm0
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm17, %zmm18, %zmm2
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm7, %zmm30, %zmm4
+; AVX512DQ-ONLY-NEXT:    vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm4[4,5,6,7]
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm24, %zmm18, %zmm3
+; AVX512DQ-ONLY-NEXT:    vpunpckhqdq {{.*#+}} zmm3 {%k1} = zmm10[1],zmm14[1],zmm10[3],zmm14[3],zmm10[5],zmm14[5],zmm10[7],zmm14[7]
+; AVX512DQ-ONLY-NEXT:    vinserti64x4 $0, %ymm2, %zmm3, %zmm0
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 192(%r10), %zmm10
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 192(%rax), %zmm26
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm10, %zmm2
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm26, %zmm11, %zmm2
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 192(%r8), %zmm23
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 192(%r9), %zmm29
+; AVX512DQ-ONLY-NEXT:    vpunpcklqdq {{.*#+}} zmm4 = zmm23[0],zmm29[0],zmm23[2],zmm29[2],zmm23[4],zmm29[4],zmm23[6],zmm29[6]
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm2, %zmm4 {%k1}
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 192(%rdi), %zmm2
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 192(%rsi), %zmm5
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm2, %zmm6
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm5, %zmm11, %zmm6
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 192(%rdx), %zmm3
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 192(%rcx), %zmm7
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm3, %zmm8
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm7, %zmm15, %zmm8
+; AVX512DQ-ONLY-NEXT:    vpblendd {{.*#+}} ymm6 = ymm6[0,1,2,3],ymm8[4,5,6,7]
+; AVX512DQ-ONLY-NEXT:    vinserti64x4 $0, %ymm6, %zmm4, %zmm0
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm10, %zmm4
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm26, %zmm9, %zmm4
+; AVX512DQ-ONLY-NEXT:    vpunpckhqdq {{.*#+}} zmm6 = zmm23[1],zmm29[1],zmm23[3],zmm29[3],zmm23[5],zmm29[5],zmm23[7],zmm29[7]
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm4, %zmm6 {%k1}
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm2, %zmm4
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm5, %zmm9, %zmm4
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm3, %zmm8
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm7, %zmm12, %zmm8
+; AVX512DQ-ONLY-NEXT:    vpblendd {{.*#+}} ymm4 = ymm4[0,1,2,3],ymm8[4,5,6,7]
+; AVX512DQ-ONLY-NEXT:    vinserti64x4 $0, %ymm4, %zmm6, %zmm0
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm2, %zmm4
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm5, %zmm13, %zmm4
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm3, %zmm6
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm7, %zmm1, %zmm6
+; AVX512DQ-ONLY-NEXT:    vpblendd {{.*#+}} ymm4 = ymm4[0,1,2,3],ymm6[4,5,6,7]
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm23, %zmm6
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm29, %zmm13, %zmm6
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 %zmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-ONLY-NEXT:    vpunpcklqdq {{.*#+}} zmm6 {%k1} = zmm10[0],zmm26[0],zmm10[2],zmm26[2],zmm10[4],zmm26[4],zmm10[6],zmm26[6]
+; AVX512DQ-ONLY-NEXT:    vinserti64x4 $0, %ymm4, %zmm6, %zmm0
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm5, %zmm18, %zmm2
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm7, %zmm30, %zmm3
+; AVX512DQ-ONLY-NEXT:    vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm3[4,5,6,7]
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm23, %zmm3
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm29, %zmm18, %zmm3
+; AVX512DQ-ONLY-NEXT:    vpunpckhqdq {{.*#+}} zmm3 {%k1} = zmm10[1],zmm26[1],zmm10[3],zmm26[3],zmm10[5],zmm26[5],zmm10[7],zmm26[7]
+; AVX512DQ-ONLY-NEXT:    vinserti64x4 $0, %ymm2, %zmm3, %zmm0
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 256(%r10), %zmm10
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 256(%rax), %zmm22
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm10, %zmm2
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm22, %zmm11, %zmm2
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 256(%r8), %zmm0
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 256(%r9), %zmm24
+; AVX512DQ-ONLY-NEXT:    vpunpcklqdq {{.*#+}} zmm4 = zmm0[0],zmm24[0],zmm0[2],zmm24[2],zmm0[4],zmm24[4],zmm0[6],zmm24[6]
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm0, %zmm17
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm2, %zmm4 {%k1}
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 256(%rdi), %zmm2
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 256(%rsi), %zmm5
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm2, %zmm6
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm5, %zmm11, %zmm6
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 256(%rdx), %zmm3
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 256(%rcx), %zmm7
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm3, %zmm8
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm7, %zmm15, %zmm8
+; AVX512DQ-ONLY-NEXT:    vpblendd {{.*#+}} ymm6 = ymm6[0,1,2,3],ymm8[4,5,6,7]
+; AVX512DQ-ONLY-NEXT:    vinserti64x4 $0, %ymm6, %zmm4, %zmm0
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm10, %zmm4
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm22, %zmm9, %zmm4
+; AVX512DQ-ONLY-NEXT:    vpunpckhqdq {{.*#+}} zmm6 = zmm17[1],zmm24[1],zmm17[3],zmm24[3],zmm17[5],zmm24[5],zmm17[7],zmm24[7]
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm4, %zmm6 {%k1}
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm2, %zmm4
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm5, %zmm9, %zmm4
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm3, %zmm8
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm7, %zmm12, %zmm8
+; AVX512DQ-ONLY-NEXT:    vpblendd {{.*#+}} ymm4 = ymm4[0,1,2,3],ymm8[4,5,6,7]
+; AVX512DQ-ONLY-NEXT:    vinserti64x4 $0, %ymm4, %zmm6, %zmm0
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm2, %zmm4
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm5, %zmm13, %zmm4
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm3, %zmm6
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm7, %zmm1, %zmm6
+; AVX512DQ-ONLY-NEXT:    vpblendd {{.*#+}} ymm4 = ymm4[0,1,2,3],ymm6[4,5,6,7]
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 %zmm17, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm17, %zmm6
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm24, %zmm13, %zmm6
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 %zmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-ONLY-NEXT:    vpunpcklqdq {{.*#+}} zmm6 {%k1} = zmm10[0],zmm22[0],zmm10[2],zmm22[2],zmm10[4],zmm22[4],zmm10[6],zmm22[6]
+; AVX512DQ-ONLY-NEXT:    vinserti64x4 $0, %ymm4, %zmm6, %zmm0
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm5, %zmm18, %zmm2
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm7, %zmm30, %zmm3
+; AVX512DQ-ONLY-NEXT:    vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm3[4,5,6,7]
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm17, %zmm3
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm24, %zmm18, %zmm3
+; AVX512DQ-ONLY-NEXT:    vpunpckhqdq {{.*#+}} zmm3 {%k1} = zmm10[1],zmm22[1],zmm10[3],zmm22[3],zmm10[5],zmm22[5],zmm10[7],zmm22[7]
+; AVX512DQ-ONLY-NEXT:    vinserti64x4 $0, %ymm2, %zmm3, %zmm0
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 320(%rdi), %zmm2
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 320(%rsi), %zmm3
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm2, %zmm5
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm3, %zmm11, %zmm5
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 320(%rdx), %zmm14
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 320(%rcx), %zmm6
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm14, %zmm7
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm6, %zmm15, %zmm7
+; AVX512DQ-ONLY-NEXT:    vpblendd {{.*#+}} ymm5 = ymm5[0,1,2,3],ymm7[4,5,6,7]
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm14, %zmm7
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm6, %zmm12, %zmm7
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm14, %zmm8
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm6, %zmm1, %zmm8
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm6, %zmm30, %zmm14
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 384(%rdx), %zmm0
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 384(%rcx), %zmm6
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm0, %zmm10
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm6, %zmm15, %zmm10
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 %zmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm0, %zmm10
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm6, %zmm12, %zmm10
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 %zmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm0, %zmm10
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm6, %zmm1, %zmm10
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 %zmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm6, %zmm30, %zmm0
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 448(%rdx), %zmm0
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 448(%rcx), %zmm6
+; AVX512DQ-ONLY-NEXT:    vpermi2q %zmm6, %zmm0, %zmm15
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 %zmm15, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-ONLY-NEXT:    vpermi2q %zmm6, %zmm0, %zmm12
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 %zmm12, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-ONLY-NEXT:    vpermi2q %zmm6, %zmm0, %zmm1
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm6, %zmm30, %zmm0
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 320(%r10), %zmm31
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 320(%rax), %zmm12
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm31, %zmm1
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm12, %zmm11, %zmm1
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 320(%r8), %zmm17
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 320(%r9), %zmm4
+; AVX512DQ-ONLY-NEXT:    vpunpcklqdq {{.*#+}} zmm10 = zmm17[0],zmm4[0],zmm17[2],zmm4[2],zmm17[4],zmm4[4],zmm17[6],zmm4[6]
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm1, %zmm10 {%k1}
+; AVX512DQ-ONLY-NEXT:    vinserti64x4 $0, %ymm5, %zmm10, %zmm0
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm31, %zmm1
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm12, %zmm9, %zmm1
+; AVX512DQ-ONLY-NEXT:    vpunpckhqdq {{.*#+}} zmm5 = zmm17[1],zmm4[1],zmm17[3],zmm4[3],zmm17[5],zmm4[5],zmm17[7],zmm4[7]
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm1, %zmm5 {%k1}
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm2, %zmm1
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm3, %zmm9, %zmm1
+; AVX512DQ-ONLY-NEXT:    vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm7[4,5,6,7]
+; AVX512DQ-ONLY-NEXT:    vinserti64x4 $0, %ymm1, %zmm5, %zmm0
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm2, %zmm1
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm3, %zmm13, %zmm1
+; AVX512DQ-ONLY-NEXT:    vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm8[4,5,6,7]
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm17, %zmm5
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm4, %zmm13, %zmm5
+; AVX512DQ-ONLY-NEXT:    vpunpcklqdq {{.*#+}} zmm5 {%k1} = zmm31[0],zmm12[0],zmm31[2],zmm12[2],zmm31[4],zmm12[4],zmm31[6],zmm12[6]
+; AVX512DQ-ONLY-NEXT:    vinserti64x4 $0, %ymm1, %zmm5, %zmm0
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm3, %zmm18, %zmm2
+; AVX512DQ-ONLY-NEXT:    vpblendd {{.*#+}} ymm1 = ymm2[0,1,2,3],ymm14[4,5,6,7]
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm17, %zmm2
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm4, %zmm18, %zmm2
+; AVX512DQ-ONLY-NEXT:    vpunpckhqdq {{.*#+}} zmm2 {%k1} = zmm31[1],zmm12[1],zmm31[3],zmm12[3],zmm31[5],zmm12[5],zmm31[7],zmm12[7]
+; AVX512DQ-ONLY-NEXT:    vinserti64x4 $0, %ymm1, %zmm2, %zmm0
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 384(%rdi), %zmm0
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 384(%rsi), %zmm1
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm0, %zmm3
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm11, %zmm2
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm1, %zmm11, %zmm3
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm0, %zmm3
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm1, %zmm9, %zmm3
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm0, %zmm3
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm1, %zmm13, %zmm3
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm1, %zmm18, %zmm0
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 384(%r10), %zmm11
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 384(%rax), %zmm20
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm11, %zmm0
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm20, %zmm2, %zmm0
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm11, %zmm0
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm20, %zmm9, %zmm0
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 384(%r8), %zmm30
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 384(%r9), %zmm0
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm30, %zmm1
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm0, %zmm13, %zmm1
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 448(%r10), %zmm16
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 448(%rax), %zmm8
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm16, %zmm1
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm8, %zmm2, %zmm1
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 448(%rdi), %zmm3
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 448(%rsi), %zmm1
+; AVX512DQ-ONLY-NEXT:    vpermi2q %zmm1, %zmm3, %zmm2
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm16, %zmm2
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm8, %zmm9, %zmm2
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-ONLY-NEXT:    vpermi2q %zmm1, %zmm3, %zmm9
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 %zmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 448(%r8), %zmm19
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 448(%r9), %zmm7
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm19, %zmm2
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm7, %zmm13, %zmm2
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-ONLY-NEXT:    vpermi2q %zmm1, %zmm3, %zmm13
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 %zmm13, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm1, %zmm18, %zmm3
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm30, %zmm1
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm0, %zmm18, %zmm1
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm1, %zmm5
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm19, %zmm1
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm7, %zmm18, %zmm1
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-ONLY-NEXT:    vbroadcasti32x4 {{.*#+}} zmm15 = [0,8,0,8,0,8,0,8]
+; AVX512DQ-ONLY-NEXT:    # zmm15 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm21, %zmm1
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm27, %zmm15, %zmm1
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-ONLY-NEXT:    vbroadcasti32x4 {{.*#+}} zmm14 = [1,9,1,9,1,9,1,9]
+; AVX512DQ-ONLY-NEXT:    # zmm14 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm21, %zmm1
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm27, %zmm14, %zmm1
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-ONLY-NEXT:    vbroadcasti32x4 {{.*#+}} zmm6 = [2,10,2,10,2,10,2,10]
+; AVX512DQ-ONLY-NEXT:    # zmm6 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm21, %zmm1
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm27, %zmm6, %zmm1
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-ONLY-NEXT:    vbroadcasti32x4 {{.*#+}} zmm1 = [3,11,3,11,3,11,3,11]
+; AVX512DQ-ONLY-NEXT:    # zmm1 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm27, %zmm1, %zmm21
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 %zmm21, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm2, %zmm3
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm28, %zmm15, %zmm3
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm2, %zmm3
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm28, %zmm14, %zmm3
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm2, %zmm25
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm28, %zmm6, %zmm25
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm28, %zmm1, %zmm2
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 (%rsp), %zmm27 # 64-byte Reload
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm27, %zmm2
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm3, %zmm15, %zmm2
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm27, %zmm2
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm3, %zmm14, %zmm2
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm27, %zmm2
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm3, %zmm6, %zmm2
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm3, %zmm1, %zmm27
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 %zmm27, (%rsp) # 64-byte Spill
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm9 # 64-byte Reload
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm9, %zmm3
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm2, %zmm15, %zmm3
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm9, %zmm3
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm2, %zmm14, %zmm3
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm9, %zmm18
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm2, %zmm6, %zmm18
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm2, %zmm1, %zmm9
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 %zmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm2, %zmm3
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm9 # 64-byte Reload
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm9, %zmm15, %zmm3
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm2, %zmm3
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm9, %zmm14, %zmm3
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm2, %zmm3
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm9, %zmm6, %zmm3
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm9, %zmm1, %zmm2
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm9 # 64-byte Reload
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm9, %zmm3
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm2, %zmm15, %zmm3
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm9, %zmm3
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm2, %zmm14, %zmm3
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm9, %zmm13
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm2, %zmm6, %zmm13
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm2, %zmm1, %zmm9
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 %zmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm2, %zmm3
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm26, %zmm15, %zmm3
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm2, %zmm3
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm26, %zmm14, %zmm3
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm2, %zmm3
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm26, %zmm6, %zmm3
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm26, %zmm1, %zmm2
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm23, %zmm3
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm29, %zmm15, %zmm3
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm23, %zmm3
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm29, %zmm14, %zmm3
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm23, %zmm10
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm29, %zmm6, %zmm10
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm29, %zmm1, %zmm23
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 %zmm23, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm23 # 64-byte Reload
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm23, %zmm2
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm22, %zmm15, %zmm2
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm23, %zmm2
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm22, %zmm14, %zmm2
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm23, %zmm2
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm22, %zmm6, %zmm2
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm22, %zmm1, %zmm23
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm2, %zmm3
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm24, %zmm15, %zmm3
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm2, %zmm3
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm24, %zmm14, %zmm3
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm2, %zmm26
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm24, %zmm6, %zmm26
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm24, %zmm1, %zmm2
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm31, %zmm2
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm12, %zmm15, %zmm2
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm31, %zmm2
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm12, %zmm14, %zmm2
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm31, %zmm2
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm12, %zmm6, %zmm2
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm12, %zmm1, %zmm31
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm17, %zmm29
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm4, %zmm15, %zmm29
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm17, %zmm2
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm4, %zmm14, %zmm2
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm17, %zmm24
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm4, %zmm6, %zmm24
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm4, %zmm1, %zmm17
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm11, %zmm2
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm20, %zmm15, %zmm2
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm11, %zmm2
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm20, %zmm14, %zmm2
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm9 # 64-byte Reload
+; AVX512DQ-ONLY-NEXT:    vpunpcklqdq {{.*#+}} zmm9 {%k1} = zmm11[0],zmm20[0],zmm11[2],zmm20[2],zmm11[4],zmm20[4],zmm11[6],zmm20[6]
+; AVX512DQ-ONLY-NEXT:    vpunpckhqdq {{.*#+}} zmm5 {%k1} = zmm11[1],zmm20[1],zmm11[3],zmm20[3],zmm11[5],zmm20[5],zmm11[7],zmm20[7]
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm5, %zmm27
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm11, %zmm22
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm20, %zmm6, %zmm22
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm20, %zmm1, %zmm11
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm30, %zmm21
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm0, %zmm15, %zmm21
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm30, %zmm28
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm0, %zmm14, %zmm28
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm30, %zmm20
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm0, %zmm6, %zmm20
+; AVX512DQ-ONLY-NEXT:    vpunpcklqdq {{.*#+}} zmm2 = zmm30[0],zmm0[0],zmm30[2],zmm0[2],zmm30[4],zmm0[4],zmm30[6],zmm0[6]
+; AVX512DQ-ONLY-NEXT:    vpunpckhqdq {{.*#+}} zmm3 = zmm30[1],zmm0[1],zmm30[3],zmm0[3],zmm30[5],zmm0[5],zmm30[7],zmm0[7]
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm0, %zmm1, %zmm30
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm16, %zmm0
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm8, %zmm15, %zmm0
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm16, %zmm0
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm8, %zmm14, %zmm0
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
+; AVX512DQ-ONLY-NEXT:    vpunpcklqdq {{.*#+}} zmm4 {%k1} = zmm16[0],zmm8[0],zmm16[2],zmm8[2],zmm16[4],zmm8[4],zmm16[6],zmm8[6]
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
+; AVX512DQ-ONLY-NEXT:    vpunpckhqdq {{.*#+}} zmm5 {%k1} = zmm16[1],zmm8[1],zmm16[3],zmm8[3],zmm16[5],zmm8[5],zmm16[7],zmm8[7]
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm16, %zmm12
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm8, %zmm6, %zmm12
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm8, %zmm1, %zmm16
+; AVX512DQ-ONLY-NEXT:    vpermi2q %zmm7, %zmm19, %zmm15
+; AVX512DQ-ONLY-NEXT:    vpermi2q %zmm7, %zmm19, %zmm14
+; AVX512DQ-ONLY-NEXT:    vpermi2q %zmm7, %zmm19, %zmm6
+; AVX512DQ-ONLY-NEXT:    vpunpcklqdq {{.*#+}} zmm8 = zmm19[0],zmm7[0],zmm19[2],zmm7[2],zmm19[4],zmm7[4],zmm19[6],zmm7[6]
+; AVX512DQ-ONLY-NEXT:    vpunpckhqdq {{.*#+}} zmm0 = zmm19[1],zmm7[1],zmm19[3],zmm7[3],zmm19[5],zmm7[5],zmm19[7],zmm7[7]
+; AVX512DQ-ONLY-NEXT:    vpermt2q %zmm7, %zmm1, %zmm19
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
+; AVX512DQ-ONLY-NEXT:    vpblendd $240, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm1 # 32-byte Folded Reload
+; AVX512DQ-ONLY-NEXT:    # ymm1 = ymm1[0,1,2,3],mem[4,5,6,7]
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm7 # 64-byte Reload
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm7, %zmm2 {%k1}
+; AVX512DQ-ONLY-NEXT:    vinserti64x4 $0, %ymm1, %zmm2, %zmm1
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
+; AVX512DQ-ONLY-NEXT:    vpblendd $240, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm1 # 32-byte Folded Reload
+; AVX512DQ-ONLY-NEXT:    # ymm1 = ymm1[0,1,2,3],mem[4,5,6,7]
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm2, %zmm3 {%k1}
+; AVX512DQ-ONLY-NEXT:    vinserti64x4 $0, %ymm1, %zmm3, %zmm1
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
+; AVX512DQ-ONLY-NEXT:    vpblendd $240, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm1 # 32-byte Folded Reload
+; AVX512DQ-ONLY-NEXT:    # ymm1 = ymm1[0,1,2,3],mem[4,5,6,7]
+; AVX512DQ-ONLY-NEXT:    vinserti64x4 $0, %ymm1, %zmm9, %zmm1
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
+; AVX512DQ-ONLY-NEXT:    vpblendd $240, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm1 # 32-byte Folded Reload
+; AVX512DQ-ONLY-NEXT:    # ymm1 = ymm1[0,1,2,3],mem[4,5,6,7]
+; AVX512DQ-ONLY-NEXT:    vinserti64x4 $0, %ymm1, %zmm27, %zmm1
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
+; AVX512DQ-ONLY-NEXT:    vpblendd $240, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm1 # 32-byte Folded Reload
+; AVX512DQ-ONLY-NEXT:    # ymm1 = ymm1[0,1,2,3],mem[4,5,6,7]
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm2, %zmm8 {%k1}
+; AVX512DQ-ONLY-NEXT:    vinserti64x4 $0, %ymm1, %zmm8, %zmm1
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
+; AVX512DQ-ONLY-NEXT:    vpblendd $240, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm1 # 32-byte Folded Reload
+; AVX512DQ-ONLY-NEXT:    # ymm1 = ymm1[0,1,2,3],mem[4,5,6,7]
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm2, %zmm0 {%k1}
+; AVX512DQ-ONLY-NEXT:    vinserti64x4 $0, %ymm1, %zmm0, %zmm0
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512DQ-ONLY-NEXT:    vpblendd $240, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
+; AVX512DQ-ONLY-NEXT:    # ymm0 = ymm0[0,1,2,3],mem[4,5,6,7]
+; AVX512DQ-ONLY-NEXT:    vinserti64x4 $0, %ymm0, %zmm4, %zmm0
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512DQ-ONLY-NEXT:    vpblendd $240, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
+; AVX512DQ-ONLY-NEXT:    # ymm0 = ymm0[0,1,2,3],mem[4,5,6,7]
+; AVX512DQ-ONLY-NEXT:    vinserti64x4 $0, %ymm0, %zmm5, %zmm0
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm0, %zmm25 {%k1}
+; AVX512DQ-ONLY-NEXT:    vmovdqa (%rcx), %ymm0
+; AVX512DQ-ONLY-NEXT:    vmovdqa (%rdx), %ymm1
+; AVX512DQ-ONLY-NEXT:    vpunpcklqdq {{.*#+}} ymm2 = ymm1[0],ymm0[0],ymm1[2],ymm0[2]
+; AVX512DQ-ONLY-NEXT:    vmovdqa (%rsi), %ymm3
+; AVX512DQ-ONLY-NEXT:    vmovdqa (%rdi), %ymm7
+; AVX512DQ-ONLY-NEXT:    vpunpcklqdq {{.*#+}} ymm8 = ymm7[0],ymm3[0],ymm7[2],ymm3[2]
+; AVX512DQ-ONLY-NEXT:    vperm2i128 {{.*#+}} ymm2 = ymm8[2,3],ymm2[2,3]
+; AVX512DQ-ONLY-NEXT:    vinserti64x4 $0, %ymm2, %zmm25, %zmm2
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm2, %zmm4 {%k1}
+; AVX512DQ-ONLY-NEXT:    vpunpckhqdq {{.*#+}} ymm0 = ymm1[1],ymm0[1],ymm1[3],ymm0[3]
+; AVX512DQ-ONLY-NEXT:    vpunpckhqdq {{.*#+}} ymm1 = ymm7[1],ymm3[1],ymm7[3],ymm3[3]
+; AVX512DQ-ONLY-NEXT:    vperm2i128 {{.*#+}} ymm0 = ymm1[2,3],ymm0[2,3]
+; AVX512DQ-ONLY-NEXT:    vinserti64x4 $0, %ymm0, %zmm4, %zmm0
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm0, %zmm18 {%k1}
+; AVX512DQ-ONLY-NEXT:    vmovdqa 64(%rcx), %ymm0
+; AVX512DQ-ONLY-NEXT:    vmovdqa 64(%rdx), %ymm1
+; AVX512DQ-ONLY-NEXT:    vmovdqa 64(%rsi), %ymm2
+; AVX512DQ-ONLY-NEXT:    vmovdqa 64(%rdi), %ymm3
+; AVX512DQ-ONLY-NEXT:    vpunpcklqdq {{.*#+}} ymm7 = ymm1[0],ymm0[0],ymm1[2],ymm0[2]
+; AVX512DQ-ONLY-NEXT:    vpunpcklqdq {{.*#+}} ymm8 = ymm3[0],ymm2[0],ymm3[2],ymm2[2]
+; AVX512DQ-ONLY-NEXT:    vperm2i128 {{.*#+}} ymm7 = ymm8[2,3],ymm7[2,3]
+; AVX512DQ-ONLY-NEXT:    vinserti64x4 $0, %ymm7, %zmm18, %zmm4
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 (%rsp), %zmm4 # 64-byte Reload
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm4, %zmm5 {%k1}
+; AVX512DQ-ONLY-NEXT:    vpunpckhqdq {{.*#+}} ymm0 = ymm1[1],ymm0[1],ymm1[3],ymm0[3]
+; AVX512DQ-ONLY-NEXT:    vpunpckhqdq {{.*#+}} ymm1 = ymm3[1],ymm2[1],ymm3[3],ymm2[3]
+; AVX512DQ-ONLY-NEXT:    vperm2i128 {{.*#+}} ymm0 = ymm1[2,3],ymm0[2,3]
+; AVX512DQ-ONLY-NEXT:    vinserti64x4 $0, %ymm0, %zmm5, %zmm0
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm0, %zmm13 {%k1}
+; AVX512DQ-ONLY-NEXT:    vmovdqa 128(%rcx), %ymm0
+; AVX512DQ-ONLY-NEXT:    vmovdqa 128(%rdx), %ymm1
+; AVX512DQ-ONLY-NEXT:    vpunpcklqdq {{.*#+}} ymm2 = ymm1[0],ymm0[0],ymm1[2],ymm0[2]
+; AVX512DQ-ONLY-NEXT:    vmovdqa 128(%rsi), %ymm3
+; AVX512DQ-ONLY-NEXT:    vmovdqa 128(%rdi), %ymm7
+; AVX512DQ-ONLY-NEXT:    vpunpcklqdq {{.*#+}} ymm8 = ymm7[0],ymm3[0],ymm7[2],ymm3[2]
+; AVX512DQ-ONLY-NEXT:    vperm2i128 {{.*#+}} ymm2 = ymm8[2,3],ymm2[2,3]
+; AVX512DQ-ONLY-NEXT:    vinserti64x4 $0, %ymm2, %zmm13, %zmm2
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 %zmm2, (%rsp) # 64-byte Spill
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm2, %zmm4 {%k1}
+; AVX512DQ-ONLY-NEXT:    vpunpckhqdq {{.*#+}} ymm0 = ymm1[1],ymm0[1],ymm1[3],ymm0[3]
+; AVX512DQ-ONLY-NEXT:    vpunpckhqdq {{.*#+}} ymm1 = ymm7[1],ymm3[1],ymm7[3],ymm3[3]
+; AVX512DQ-ONLY-NEXT:    vperm2i128 {{.*#+}} ymm0 = ymm1[2,3],ymm0[2,3]
+; AVX512DQ-ONLY-NEXT:    vinserti64x4 $0, %ymm0, %zmm4, %zmm0
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm0, %zmm10 {%k1}
+; AVX512DQ-ONLY-NEXT:    vmovdqa 192(%rcx), %ymm0
+; AVX512DQ-ONLY-NEXT:    vmovdqa 192(%rdx), %ymm1
+; AVX512DQ-ONLY-NEXT:    vpunpcklqdq {{.*#+}} ymm2 = ymm1[0],ymm0[0],ymm1[2],ymm0[2]
+; AVX512DQ-ONLY-NEXT:    vmovdqa 192(%rsi), %ymm3
+; AVX512DQ-ONLY-NEXT:    vmovdqa 192(%rdi), %ymm7
+; AVX512DQ-ONLY-NEXT:    vpunpcklqdq {{.*#+}} ymm8 = ymm7[0],ymm3[0],ymm7[2],ymm3[2]
+; AVX512DQ-ONLY-NEXT:    vperm2i128 {{.*#+}} ymm2 = ymm8[2,3],ymm2[2,3]
+; AVX512DQ-ONLY-NEXT:    vinserti64x4 $0, %ymm2, %zmm10, %zmm2
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm2, %zmm4 {%k1}
+; AVX512DQ-ONLY-NEXT:    vpunpckhqdq {{.*#+}} ymm0 = ymm1[1],ymm0[1],ymm1[3],ymm0[3]
+; AVX512DQ-ONLY-NEXT:    vpunpckhqdq {{.*#+}} ymm1 = ymm7[1],ymm3[1],ymm7[3],ymm3[3]
+; AVX512DQ-ONLY-NEXT:    vperm2i128 {{.*#+}} ymm0 = ymm1[2,3],ymm0[2,3]
+; AVX512DQ-ONLY-NEXT:    vinserti64x4 $0, %ymm0, %zmm4, %zmm9
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm0, %zmm26 {%k1}
+; AVX512DQ-ONLY-NEXT:    vmovdqa 256(%rcx), %ymm0
+; AVX512DQ-ONLY-NEXT:    vmovdqa 256(%rdx), %ymm1
+; AVX512DQ-ONLY-NEXT:    vpunpcklqdq {{.*#+}} ymm2 = ymm1[0],ymm0[0],ymm1[2],ymm0[2]
+; AVX512DQ-ONLY-NEXT:    vmovdqa 256(%rsi), %ymm3
+; AVX512DQ-ONLY-NEXT:    vmovdqa 256(%rdi), %ymm7
+; AVX512DQ-ONLY-NEXT:    vpunpcklqdq {{.*#+}} ymm8 = ymm7[0],ymm3[0],ymm7[2],ymm3[2]
+; AVX512DQ-ONLY-NEXT:    vperm2i128 {{.*#+}} ymm2 = ymm8[2,3],ymm2[2,3]
+; AVX512DQ-ONLY-NEXT:    vinserti64x4 $0, %ymm2, %zmm26, %zmm26
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm23, %zmm2 {%k1}
+; AVX512DQ-ONLY-NEXT:    vpunpckhqdq {{.*#+}} ymm0 = ymm1[1],ymm0[1],ymm1[3],ymm0[3]
+; AVX512DQ-ONLY-NEXT:    vpunpckhqdq {{.*#+}} ymm1 = ymm7[1],ymm3[1],ymm7[3],ymm3[3]
+; AVX512DQ-ONLY-NEXT:    vperm2i128 {{.*#+}} ymm0 = ymm1[2,3],ymm0[2,3]
+; AVX512DQ-ONLY-NEXT:    vinserti64x4 $0, %ymm0, %zmm2, %zmm7
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm0, %zmm24 {%k1}
+; AVX512DQ-ONLY-NEXT:    vmovdqa 320(%rcx), %ymm0
+; AVX512DQ-ONLY-NEXT:    vmovdqa 320(%rdx), %ymm1
+; AVX512DQ-ONLY-NEXT:    vpunpcklqdq {{.*#+}} ymm2 = ymm1[0],ymm0[0],ymm1[2],ymm0[2]
+; AVX512DQ-ONLY-NEXT:    vmovdqa 320(%rsi), %ymm3
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 320(%rdi), %ymm23
+; AVX512DQ-ONLY-NEXT:    vpunpcklqdq {{.*#+}} ymm8 = ymm23[0],ymm3[0],ymm23[2],ymm3[2]
+; AVX512DQ-ONLY-NEXT:    vperm2i128 {{.*#+}} ymm2 = ymm8[2,3],ymm2[2,3]
+; AVX512DQ-ONLY-NEXT:    vinserti64x4 $0, %ymm2, %zmm24, %zmm8
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm31, %zmm17 {%k1}
+; AVX512DQ-ONLY-NEXT:    vpunpckhqdq {{.*#+}} ymm0 = ymm1[1],ymm0[1],ymm1[3],ymm0[3]
+; AVX512DQ-ONLY-NEXT:    vpunpckhqdq {{.*#+}} ymm1 = ymm23[1],ymm3[1],ymm23[3],ymm3[3]
+; AVX512DQ-ONLY-NEXT:    vperm2i128 {{.*#+}} ymm0 = ymm1[2,3],ymm0[2,3]
+; AVX512DQ-ONLY-NEXT:    vinserti64x4 $0, %ymm0, %zmm17, %zmm17
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm22, %zmm20 {%k1}
+; AVX512DQ-ONLY-NEXT:    vmovdqa 384(%rcx), %ymm0
+; AVX512DQ-ONLY-NEXT:    vmovdqa 384(%rdx), %ymm1
+; AVX512DQ-ONLY-NEXT:    vpunpcklqdq {{.*#+}} ymm2 = ymm1[0],ymm0[0],ymm1[2],ymm0[2]
+; AVX512DQ-ONLY-NEXT:    vmovdqa 384(%rsi), %ymm3
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 384(%rdi), %ymm18
+; AVX512DQ-ONLY-NEXT:    vpunpcklqdq {{.*#+}} ymm13 = ymm18[0],ymm3[0],ymm18[2],ymm3[2]
+; AVX512DQ-ONLY-NEXT:    vperm2i128 {{.*#+}} ymm2 = ymm13[2,3],ymm2[2,3]
+; AVX512DQ-ONLY-NEXT:    vinserti64x4 $0, %ymm2, %zmm20, %zmm20
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm11, %zmm30 {%k1}
+; AVX512DQ-ONLY-NEXT:    vpunpckhqdq {{.*#+}} ymm0 = ymm1[1],ymm0[1],ymm1[3],ymm0[3]
+; AVX512DQ-ONLY-NEXT:    vpunpckhqdq {{.*#+}} ymm1 = ymm18[1],ymm3[1],ymm18[3],ymm3[3]
+; AVX512DQ-ONLY-NEXT:    vperm2i128 {{.*#+}} ymm0 = ymm1[2,3],ymm0[2,3]
+; AVX512DQ-ONLY-NEXT:    vinserti64x4 $0, %ymm0, %zmm30, %zmm24
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm12, %zmm6 {%k1}
+; AVX512DQ-ONLY-NEXT:    vmovdqa 448(%rcx), %ymm0
+; AVX512DQ-ONLY-NEXT:    vmovdqa 448(%rdx), %ymm1
+; AVX512DQ-ONLY-NEXT:    vpunpcklqdq {{.*#+}} ymm2 = ymm1[0],ymm0[0],ymm1[2],ymm0[2]
+; AVX512DQ-ONLY-NEXT:    vmovdqa 448(%rsi), %ymm3
+; AVX512DQ-ONLY-NEXT:    vmovdqa 448(%rdi), %ymm10
+; AVX512DQ-ONLY-NEXT:    vpunpcklqdq {{.*#+}} ymm12 = ymm10[0],ymm3[0],ymm10[2],ymm3[2]
+; AVX512DQ-ONLY-NEXT:    vperm2i128 {{.*#+}} ymm2 = ymm12[2,3],ymm2[2,3]
+; AVX512DQ-ONLY-NEXT:    vinserti64x4 $0, %ymm2, %zmm6, %zmm31
+; AVX512DQ-ONLY-NEXT:    vpunpckhqdq {{.*#+}} ymm0 = ymm1[1],ymm0[1],ymm1[3],ymm0[3]
+; AVX512DQ-ONLY-NEXT:    vpunpckhqdq {{.*#+}} ymm1 = ymm10[1],ymm3[1],ymm10[3],ymm3[3]
+; AVX512DQ-ONLY-NEXT:    vperm2i128 {{.*#+}} ymm0 = ymm1[2,3],ymm0[2,3]
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm16, %zmm19 {%k1}
+; AVX512DQ-ONLY-NEXT:    vinserti64x4 $0, %ymm0, %zmm19, %zmm0
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm1, %zmm4 {%k1}
+; AVX512DQ-ONLY-NEXT:    vmovdqa (%rsi), %xmm1
+; AVX512DQ-ONLY-NEXT:    vinserti128 $1, (%rcx), %ymm1, %ymm1
+; AVX512DQ-ONLY-NEXT:    vmovdqa (%rdi), %xmm2
+; AVX512DQ-ONLY-NEXT:    vinserti128 $1, (%rdx), %ymm2, %ymm2
+; AVX512DQ-ONLY-NEXT:    vpunpcklqdq {{.*#+}} ymm3 = ymm2[0],ymm1[0],ymm2[2],ymm1[2]
+; AVX512DQ-ONLY-NEXT:    vinserti64x4 $0, %ymm3, %zmm4, %zmm3
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm10 # 64-byte Reload
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm4, %zmm10 {%k1}
+; AVX512DQ-ONLY-NEXT:    vpunpckhqdq {{.*#+}} ymm1 = ymm2[1],ymm1[1],ymm2[3],ymm1[3]
+; AVX512DQ-ONLY-NEXT:    vinserti64x4 $0, %ymm1, %zmm10, %zmm2
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm11 # 64-byte Reload
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm1, %zmm11 {%k1}
+; AVX512DQ-ONLY-NEXT:    vmovdqa 64(%rsi), %xmm1
+; AVX512DQ-ONLY-NEXT:    vinserti128 $1, 64(%rcx), %ymm1, %ymm1
+; AVX512DQ-ONLY-NEXT:    vmovdqa 64(%rdi), %xmm4
+; AVX512DQ-ONLY-NEXT:    vinserti128 $1, 64(%rdx), %ymm4, %ymm4
+; AVX512DQ-ONLY-NEXT:    vpunpcklqdq {{.*#+}} ymm10 = ymm4[0],ymm1[0],ymm4[2],ymm1[2]
+; AVX512DQ-ONLY-NEXT:    vinserti64x4 $0, %ymm10, %zmm11, %zmm10
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm11 # 64-byte Reload
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm12 # 64-byte Reload
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm11, %zmm12 {%k1}
+; AVX512DQ-ONLY-NEXT:    vpunpckhqdq {{.*#+}} ymm1 = ymm4[1],ymm1[1],ymm4[3],ymm1[3]
+; AVX512DQ-ONLY-NEXT:    vinserti64x4 $0, %ymm1, %zmm12, %zmm11
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm13 # 64-byte Reload
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm1, %zmm13 {%k1}
+; AVX512DQ-ONLY-NEXT:    vmovdqa 128(%rsi), %xmm1
+; AVX512DQ-ONLY-NEXT:    vinserti128 $1, 128(%rcx), %ymm1, %ymm1
+; AVX512DQ-ONLY-NEXT:    vmovdqa 128(%rdi), %xmm4
+; AVX512DQ-ONLY-NEXT:    vinserti128 $1, 128(%rdx), %ymm4, %ymm12
+; AVX512DQ-ONLY-NEXT:    vpunpcklqdq {{.*#+}} ymm4 = ymm12[0],ymm1[0],ymm12[2],ymm1[2]
+; AVX512DQ-ONLY-NEXT:    vinserti64x4 $0, %ymm4, %zmm13, %zmm4
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm13 # 64-byte Reload
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm18 # 64-byte Reload
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm13, %zmm18 {%k1}
+; AVX512DQ-ONLY-NEXT:    vpunpckhqdq {{.*#+}} ymm1 = ymm12[1],ymm1[1],ymm12[3],ymm1[3]
+; AVX512DQ-ONLY-NEXT:    vinserti64x4 $0, %ymm1, %zmm18, %zmm19
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm18 # 64-byte Reload
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm1, %zmm18 {%k1}
+; AVX512DQ-ONLY-NEXT:    vmovdqa 192(%rsi), %xmm1
+; AVX512DQ-ONLY-NEXT:    vinserti128 $1, 192(%rcx), %ymm1, %ymm1
+; AVX512DQ-ONLY-NEXT:    vmovdqa 192(%rdi), %xmm12
+; AVX512DQ-ONLY-NEXT:    vinserti128 $1, 192(%rdx), %ymm12, %ymm12
+; AVX512DQ-ONLY-NEXT:    vpunpcklqdq {{.*#+}} ymm13 = ymm12[0],ymm1[0],ymm12[2],ymm1[2]
+; AVX512DQ-ONLY-NEXT:    vinserti64x4 $0, %ymm13, %zmm18, %zmm30
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm13 # 64-byte Reload
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm18 # 64-byte Reload
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm13, %zmm18 {%k1}
+; AVX512DQ-ONLY-NEXT:    vpunpckhqdq {{.*#+}} ymm1 = ymm12[1],ymm1[1],ymm12[3],ymm1[3]
+; AVX512DQ-ONLY-NEXT:    vinserti64x4 $0, %ymm1, %zmm18, %zmm1
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm12 # 64-byte Reload
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm12, %zmm5 {%k1}
+; AVX512DQ-ONLY-NEXT:    vmovdqa 256(%rsi), %xmm12
+; AVX512DQ-ONLY-NEXT:    vinserti128 $1, 256(%rcx), %ymm12, %ymm13
+; AVX512DQ-ONLY-NEXT:    vmovdqa 256(%rdi), %xmm12
+; AVX512DQ-ONLY-NEXT:    vinserti32x4 $1, 256(%rdx), %ymm12, %ymm18
+; AVX512DQ-ONLY-NEXT:    vpunpcklqdq {{.*#+}} ymm12 = ymm18[0],ymm13[0],ymm18[2],ymm13[2]
+; AVX512DQ-ONLY-NEXT:    vinserti64x4 $0, %ymm12, %zmm5, %zmm12
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm23 # 64-byte Reload
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm23, %zmm5 {%k1}
+; AVX512DQ-ONLY-NEXT:    vpunpckhqdq {{.*#+}} ymm13 = ymm18[1],ymm13[1],ymm18[3],ymm13[3]
+; AVX512DQ-ONLY-NEXT:    vinserti64x4 $0, %ymm13, %zmm5, %zmm23
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm5, %zmm29 {%k1}
+; AVX512DQ-ONLY-NEXT:    vmovdqa 320(%rsi), %xmm13
+; AVX512DQ-ONLY-NEXT:    vinserti128 $1, 320(%rcx), %ymm13, %ymm13
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 320(%rdi), %xmm18
+; AVX512DQ-ONLY-NEXT:    vinserti32x4 $1, 320(%rdx), %ymm18, %ymm18
+; AVX512DQ-ONLY-NEXT:    vpunpcklqdq {{.*#+}} ymm27 = ymm18[0],ymm13[0],ymm18[2],ymm13[2]
+; AVX512DQ-ONLY-NEXT:    vinserti64x4 $0, %ymm27, %zmm29, %zmm22
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm6 # 64-byte Reload
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm5, %zmm6 {%k1}
+; AVX512DQ-ONLY-NEXT:    vpunpckhqdq {{.*#+}} ymm13 = ymm18[1],ymm13[1],ymm18[3],ymm13[3]
+; AVX512DQ-ONLY-NEXT:    vinserti64x4 $0, %ymm13, %zmm6, %zmm13
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm5, %zmm21 {%k1}
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 384(%rsi), %xmm18
+; AVX512DQ-ONLY-NEXT:    vinserti32x4 $1, 384(%rcx), %ymm18, %ymm18
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 384(%rdi), %xmm25
+; AVX512DQ-ONLY-NEXT:    vinserti32x4 $1, 384(%rdx), %ymm25, %ymm25
+; AVX512DQ-ONLY-NEXT:    vpunpcklqdq {{.*#+}} ymm27 = ymm25[0],ymm18[0],ymm25[2],ymm18[2]
+; AVX512DQ-ONLY-NEXT:    vinserti64x4 $0, %ymm27, %zmm21, %zmm16
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm5, %zmm28 {%k1}
+; AVX512DQ-ONLY-NEXT:    vpunpckhqdq {{.*#+}} ymm18 = ymm25[1],ymm18[1],ymm25[3],ymm18[3]
+; AVX512DQ-ONLY-NEXT:    vinserti64x4 $0, %ymm18, %zmm28, %zmm21
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm5, %zmm15 {%k1}
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 448(%rsi), %xmm18
+; AVX512DQ-ONLY-NEXT:    vinserti32x4 $1, 448(%rcx), %ymm18, %ymm18
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 448(%rdi), %xmm25
+; AVX512DQ-ONLY-NEXT:    vinserti32x4 $1, 448(%rdx), %ymm25, %ymm25
+; AVX512DQ-ONLY-NEXT:    vpunpcklqdq {{.*#+}} ymm27 = ymm25[0],ymm18[0],ymm25[2],ymm18[2]
+; AVX512DQ-ONLY-NEXT:    vinserti64x4 $0, %ymm27, %zmm15, %zmm6
+; AVX512DQ-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm5, %zmm14 {%k1}
+; AVX512DQ-ONLY-NEXT:    vpunpckhqdq {{.*#+}} ymm18 = ymm25[1],ymm18[1],ymm25[3],ymm18[3]
+; AVX512DQ-ONLY-NEXT:    vinserti64x4 $0, %ymm18, %zmm14, %zmm5
+; AVX512DQ-ONLY-NEXT:    movq {{[0-9]+}}(%rsp), %rax
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm0, 3776(%rax)
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm31, 3712(%rax)
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm24, 3264(%rax)
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm20, 3200(%rax)
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm17, 2752(%rax)
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm8, 2688(%rax)
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm7, 2240(%rax)
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm26, 2176(%rax)
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm9, 1728(%rax)
+; AVX512DQ-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512DQ-ONLY-NEXT:    vmovaps %zmm0, 1664(%rax)
+; AVX512DQ-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512DQ-ONLY-NEXT:    vmovaps %zmm0, 1216(%rax)
+; AVX512DQ-ONLY-NEXT:    vmovups (%rsp), %zmm0 # 64-byte Reload
+; AVX512DQ-ONLY-NEXT:    vmovaps %zmm0, 1152(%rax)
+; AVX512DQ-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512DQ-ONLY-NEXT:    vmovaps %zmm0, 704(%rax)
+; AVX512DQ-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512DQ-ONLY-NEXT:    vmovaps %zmm0, 640(%rax)
+; AVX512DQ-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512DQ-ONLY-NEXT:    vmovaps %zmm0, 192(%rax)
+; AVX512DQ-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512DQ-ONLY-NEXT:    vmovaps %zmm0, 128(%rax)
+; AVX512DQ-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512DQ-ONLY-NEXT:    vmovaps %zmm0, 4032(%rax)
+; AVX512DQ-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512DQ-ONLY-NEXT:    vmovaps %zmm0, 3968(%rax)
+; AVX512DQ-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512DQ-ONLY-NEXT:    vmovaps %zmm0, 3904(%rax)
+; AVX512DQ-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512DQ-ONLY-NEXT:    vmovaps %zmm0, 3840(%rax)
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm5, 3648(%rax)
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm6, 3584(%rax)
+; AVX512DQ-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512DQ-ONLY-NEXT:    vmovaps %zmm0, 3520(%rax)
+; AVX512DQ-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512DQ-ONLY-NEXT:    vmovaps %zmm0, 3456(%rax)
+; AVX512DQ-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512DQ-ONLY-NEXT:    vmovaps %zmm0, 3392(%rax)
+; AVX512DQ-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512DQ-ONLY-NEXT:    vmovaps %zmm0, 3328(%rax)
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm21, 3136(%rax)
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm16, 3072(%rax)
+; AVX512DQ-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512DQ-ONLY-NEXT:    vmovaps %zmm0, 3008(%rax)
+; AVX512DQ-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512DQ-ONLY-NEXT:    vmovaps %zmm0, 2944(%rax)
+; AVX512DQ-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512DQ-ONLY-NEXT:    vmovaps %zmm0, 2880(%rax)
+; AVX512DQ-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512DQ-ONLY-NEXT:    vmovaps %zmm0, 2816(%rax)
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm13, 2624(%rax)
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm22, 2560(%rax)
+; AVX512DQ-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512DQ-ONLY-NEXT:    vmovaps %zmm0, 2496(%rax)
+; AVX512DQ-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512DQ-ONLY-NEXT:    vmovaps %zmm0, 2432(%rax)
+; AVX512DQ-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512DQ-ONLY-NEXT:    vmovaps %zmm0, 2368(%rax)
+; AVX512DQ-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512DQ-ONLY-NEXT:    vmovaps %zmm0, 2304(%rax)
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm23, 2112(%rax)
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm12, 2048(%rax)
+; AVX512DQ-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512DQ-ONLY-NEXT:    vmovaps %zmm0, 1984(%rax)
+; AVX512DQ-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512DQ-ONLY-NEXT:    vmovaps %zmm0, 1920(%rax)
+; AVX512DQ-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512DQ-ONLY-NEXT:    vmovaps %zmm0, 1856(%rax)
+; AVX512DQ-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512DQ-ONLY-NEXT:    vmovaps %zmm0, 1792(%rax)
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm1, 1600(%rax)
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm30, 1536(%rax)
+; AVX512DQ-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512DQ-ONLY-NEXT:    vmovaps %zmm0, 1472(%rax)
+; AVX512DQ-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512DQ-ONLY-NEXT:    vmovaps %zmm0, 1408(%rax)
+; AVX512DQ-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512DQ-ONLY-NEXT:    vmovaps %zmm0, 1344(%rax)
+; AVX512DQ-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512DQ-ONLY-NEXT:    vmovaps %zmm0, 1280(%rax)
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm19, 1088(%rax)
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm4, 1024(%rax)
+; AVX512DQ-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512DQ-ONLY-NEXT:    vmovaps %zmm0, 960(%rax)
+; AVX512DQ-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512DQ-ONLY-NEXT:    vmovaps %zmm0, 896(%rax)
+; AVX512DQ-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512DQ-ONLY-NEXT:    vmovaps %zmm0, 832(%rax)
+; AVX512DQ-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512DQ-ONLY-NEXT:    vmovaps %zmm0, 768(%rax)
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm11, 576(%rax)
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm10, 512(%rax)
+; AVX512DQ-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512DQ-ONLY-NEXT:    vmovaps %zmm0, 448(%rax)
+; AVX512DQ-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512DQ-ONLY-NEXT:    vmovaps %zmm0, 384(%rax)
+; AVX512DQ-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512DQ-ONLY-NEXT:    vmovaps %zmm0, 320(%rax)
+; AVX512DQ-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512DQ-ONLY-NEXT:    vmovaps %zmm0, 256(%rax)
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm2, 64(%rax)
+; AVX512DQ-ONLY-NEXT:    vmovdqa64 %zmm3, (%rax)
+; AVX512DQ-ONLY-NEXT:    addq $5512, %rsp # imm = 0x1588
+; AVX512DQ-ONLY-NEXT:    vzeroupper
+; AVX512DQ-ONLY-NEXT:    retq
 ;
-; AVX512DQ-SLOW-LABEL: store_i64_stride8_vf64:
-; AVX512DQ-SLOW:       # %bb.0:
-; AVX512DQ-SLOW-NEXT:    subq $5512, %rsp # imm = 0x1588
-; AVX512DQ-SLOW-NEXT:    movq {{[0-9]+}}(%rsp), %rax
-; AVX512DQ-SLOW-NEXT:    movq {{[0-9]+}}(%rsp), %r10
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 128(%rdi), %zmm2
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 64(%rdi), %zmm4
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 (%rdi), %zmm8
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 128(%rsi), %zmm17
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 64(%rsi), %zmm19
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 (%rsi), %zmm22
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 64(%rdx), %zmm5
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 (%rdx), %zmm10
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 64(%rcx), %zmm20
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 (%rcx), %zmm11
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 (%r8), %zmm1
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 64(%r8), %zmm25
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 128(%r8), %zmm23
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 (%r9), %zmm28
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 64(%r9), %zmm26
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 128(%r9), %zmm24
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 (%r10), %zmm21
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 64(%r10), %zmm14
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 (%rax), %zmm27
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 64(%rax), %zmm16
-; AVX512DQ-SLOW-NEXT:    movb $-64, %r11b
-; AVX512DQ-SLOW-NEXT:    kmovw %r11d, %k1
-; AVX512DQ-SLOW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm3 = [4,12,4,12,4,12,4,12]
-; AVX512DQ-SLOW-NEXT:    # zmm3 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm21, %zmm0
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm27, %zmm3, %zmm0
-; AVX512DQ-SLOW-NEXT:    vpunpcklqdq {{.*#+}} zmm12 = zmm1[0],zmm28[0],zmm1[2],zmm28[2],zmm1[4],zmm28[4],zmm1[6],zmm28[6]
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm0, %zmm12 {%k1}
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm8, %zmm0
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm22, %zmm3, %zmm0
-; AVX512DQ-SLOW-NEXT:    vbroadcasti128 {{.*#+}} ymm6 = [4,12,4,12]
-; AVX512DQ-SLOW-NEXT:    # ymm6 = mem[0,1,0,1]
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm10, %zmm15
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm11, %zmm6, %zmm15
-; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm15[4,5,6,7]
-; AVX512DQ-SLOW-NEXT:    vinserti64x4 $0, %ymm0, %zmm12, %zmm0
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm9 = [5,13,5,13,5,13,5,13]
-; AVX512DQ-SLOW-NEXT:    # zmm9 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm21, %zmm0
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm27, %zmm9, %zmm0
-; AVX512DQ-SLOW-NEXT:    vpunpckhqdq {{.*#+}} zmm12 = zmm1[1],zmm28[1],zmm1[3],zmm28[3],zmm1[5],zmm28[5],zmm1[7],zmm28[7]
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm0, %zmm12 {%k1}
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm8, %zmm0
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm22, %zmm9, %zmm0
-; AVX512DQ-SLOW-NEXT:    vbroadcasti128 {{.*#+}} ymm7 = [5,13,5,13]
-; AVX512DQ-SLOW-NEXT:    # ymm7 = mem[0,1,0,1]
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm10, %zmm15
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm11, %zmm7, %zmm15
-; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm15[4,5,6,7]
-; AVX512DQ-SLOW-NEXT:    vinserti64x4 $0, %ymm0, %zmm12, %zmm0
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm13 = [6,14,6,14,6,14,6,14]
-; AVX512DQ-SLOW-NEXT:    # zmm13 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm1, %zmm0
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm1, %zmm29
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm28, %zmm13, %zmm0
-; AVX512DQ-SLOW-NEXT:    vpunpcklqdq {{.*#+}} zmm0 {%k1} = zmm21[0],zmm27[0],zmm21[2],zmm27[2],zmm21[4],zmm27[4],zmm21[6],zmm27[6]
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm8, %zmm12
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm22, %zmm13, %zmm12
-; AVX512DQ-SLOW-NEXT:    vbroadcasti128 {{.*#+}} ymm1 = [6,14,6,14]
-; AVX512DQ-SLOW-NEXT:    # ymm1 = mem[0,1,0,1]
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm10, %zmm15
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm11, %zmm1, %zmm15
-; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm12 = ymm12[0,1,2,3],ymm15[4,5,6,7]
-; AVX512DQ-SLOW-NEXT:    vinserti64x4 $0, %ymm12, %zmm0, %zmm0
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm18 = [7,15,7,15,7,15,7,15]
-; AVX512DQ-SLOW-NEXT:    # zmm18 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm29, %zmm12
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm28, %zmm18, %zmm12
-; AVX512DQ-SLOW-NEXT:    vpunpckhqdq {{.*#+}} zmm12 {%k1} = zmm21[1],zmm27[1],zmm21[3],zmm27[3],zmm21[5],zmm27[5],zmm21[7],zmm27[7]
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm22, %zmm18, %zmm8
-; AVX512DQ-SLOW-NEXT:    vbroadcasti64x2 {{.*#+}} ymm30 = [7,15,7,15]
-; AVX512DQ-SLOW-NEXT:    # ymm30 = mem[0,1,0,1]
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm11, %zmm30, %zmm10
-; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm8 = ymm8[0,1,2,3],ymm10[4,5,6,7]
-; AVX512DQ-SLOW-NEXT:    vinserti64x4 $0, %ymm8, %zmm12, %zmm0
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm14, %zmm8
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm16, %zmm3, %zmm8
-; AVX512DQ-SLOW-NEXT:    vpunpcklqdq {{.*#+}} zmm10 = zmm25[0],zmm26[0],zmm25[2],zmm26[2],zmm25[4],zmm26[4],zmm25[6],zmm26[6]
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm8, %zmm10 {%k1}
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm4, %zmm8
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm19, %zmm3, %zmm8
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm5, %zmm11
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm6, %zmm15
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm20, %zmm6, %zmm11
-; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm8 = ymm8[0,1,2,3],ymm11[4,5,6,7]
-; AVX512DQ-SLOW-NEXT:    vinserti64x4 $0, %ymm8, %zmm10, %zmm0
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm14, %zmm8
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm16, %zmm9, %zmm8
-; AVX512DQ-SLOW-NEXT:    vpunpckhqdq {{.*#+}} zmm10 = zmm25[1],zmm26[1],zmm25[3],zmm26[3],zmm25[5],zmm26[5],zmm25[7],zmm26[7]
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm8, %zmm10 {%k1}
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm4, %zmm8
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm19, %zmm9, %zmm8
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm5, %zmm11
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm7, %zmm12
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm20, %zmm7, %zmm11
-; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm8 = ymm8[0,1,2,3],ymm11[4,5,6,7]
-; AVX512DQ-SLOW-NEXT:    vinserti64x4 $0, %ymm8, %zmm10, %zmm0
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm4, %zmm8
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm19, %zmm13, %zmm8
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm5, %zmm10
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm20, %zmm1, %zmm10
-; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm8 = ymm8[0,1,2,3],ymm10[4,5,6,7]
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm25, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm25, %zmm10
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm26, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm26, %zmm13, %zmm10
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm14, %zmm11
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm14, (%rsp) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm16, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vpunpcklqdq {{.*#+}} zmm10 {%k1} = zmm14[0],zmm16[0],zmm14[2],zmm16[2],zmm14[4],zmm16[4],zmm14[6],zmm16[6]
-; AVX512DQ-SLOW-NEXT:    vinserti64x4 $0, %ymm8, %zmm10, %zmm0
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 128(%r10), %zmm10
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm19, %zmm18, %zmm4
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 128(%rax), %zmm14
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm20, %zmm30, %zmm5
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm25, %zmm6
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm26, %zmm18, %zmm6
-; AVX512DQ-SLOW-NEXT:    vpunpckhqdq {{.*#+}} zmm6 {%k1} = zmm11[1],zmm16[1],zmm11[3],zmm16[3],zmm11[5],zmm16[5],zmm11[7],zmm16[7]
-; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm4 = ymm4[0,1,2,3],ymm5[4,5,6,7]
-; AVX512DQ-SLOW-NEXT:    vinserti64x4 $0, %ymm4, %zmm6, %zmm0
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm10, %zmm4
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm14, %zmm3, %zmm4
-; AVX512DQ-SLOW-NEXT:    vpunpcklqdq {{.*#+}} zmm5 = zmm23[0],zmm24[0],zmm23[2],zmm24[2],zmm23[4],zmm24[4],zmm23[6],zmm24[6]
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm4, %zmm5 {%k1}
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm2, %zmm6
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm17, %zmm3, %zmm6
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm3, %zmm11
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 128(%rdx), %zmm4
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 128(%rcx), %zmm7
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm4, %zmm8
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm7, %zmm15, %zmm8
-; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm6 = ymm6[0,1,2,3],ymm8[4,5,6,7]
-; AVX512DQ-SLOW-NEXT:    vinserti64x4 $0, %ymm6, %zmm5, %zmm0
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm10, %zmm5
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm14, %zmm9, %zmm5
-; AVX512DQ-SLOW-NEXT:    vpunpckhqdq {{.*#+}} zmm6 = zmm23[1],zmm24[1],zmm23[3],zmm24[3],zmm23[5],zmm24[5],zmm23[7],zmm24[7]
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm5, %zmm6 {%k1}
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm2, %zmm5
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm17, %zmm9, %zmm5
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm4, %zmm8
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm7, %zmm12, %zmm8
-; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm5 = ymm5[0,1,2,3],ymm8[4,5,6,7]
-; AVX512DQ-SLOW-NEXT:    vinserti64x4 $0, %ymm5, %zmm6, %zmm0
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm2, %zmm5
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm17, %zmm13, %zmm5
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm4, %zmm6
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm7, %zmm1, %zmm6
-; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm5 = ymm5[0,1,2,3],ymm6[4,5,6,7]
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm23, %zmm3
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm23, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm23, %zmm6
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm24, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm24, %zmm13, %zmm6
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm14, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vpunpcklqdq {{.*#+}} zmm6 {%k1} = zmm10[0],zmm14[0],zmm10[2],zmm14[2],zmm10[4],zmm14[4],zmm10[6],zmm14[6]
-; AVX512DQ-SLOW-NEXT:    vinserti64x4 $0, %ymm5, %zmm6, %zmm0
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm17, %zmm18, %zmm2
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm7, %zmm30, %zmm4
-; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm4[4,5,6,7]
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm24, %zmm18, %zmm3
-; AVX512DQ-SLOW-NEXT:    vpunpckhqdq {{.*#+}} zmm3 {%k1} = zmm10[1],zmm14[1],zmm10[3],zmm14[3],zmm10[5],zmm14[5],zmm10[7],zmm14[7]
-; AVX512DQ-SLOW-NEXT:    vinserti64x4 $0, %ymm2, %zmm3, %zmm0
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 192(%r10), %zmm10
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 192(%rax), %zmm26
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm10, %zmm2
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm26, %zmm11, %zmm2
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 192(%r8), %zmm23
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 192(%r9), %zmm29
-; AVX512DQ-SLOW-NEXT:    vpunpcklqdq {{.*#+}} zmm4 = zmm23[0],zmm29[0],zmm23[2],zmm29[2],zmm23[4],zmm29[4],zmm23[6],zmm29[6]
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm2, %zmm4 {%k1}
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 192(%rdi), %zmm2
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 192(%rsi), %zmm5
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm2, %zmm6
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm5, %zmm11, %zmm6
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 192(%rdx), %zmm3
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 192(%rcx), %zmm7
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm3, %zmm8
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm7, %zmm15, %zmm8
-; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm6 = ymm6[0,1,2,3],ymm8[4,5,6,7]
-; AVX512DQ-SLOW-NEXT:    vinserti64x4 $0, %ymm6, %zmm4, %zmm0
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm10, %zmm4
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm26, %zmm9, %zmm4
-; AVX512DQ-SLOW-NEXT:    vpunpckhqdq {{.*#+}} zmm6 = zmm23[1],zmm29[1],zmm23[3],zmm29[3],zmm23[5],zmm29[5],zmm23[7],zmm29[7]
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm4, %zmm6 {%k1}
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm2, %zmm4
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm5, %zmm9, %zmm4
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm3, %zmm8
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm7, %zmm12, %zmm8
-; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm4 = ymm4[0,1,2,3],ymm8[4,5,6,7]
-; AVX512DQ-SLOW-NEXT:    vinserti64x4 $0, %ymm4, %zmm6, %zmm0
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm2, %zmm4
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm5, %zmm13, %zmm4
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm3, %zmm6
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm7, %zmm1, %zmm6
-; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm4 = ymm4[0,1,2,3],ymm6[4,5,6,7]
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm23, %zmm6
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm29, %zmm13, %zmm6
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vpunpcklqdq {{.*#+}} zmm6 {%k1} = zmm10[0],zmm26[0],zmm10[2],zmm26[2],zmm10[4],zmm26[4],zmm10[6],zmm26[6]
-; AVX512DQ-SLOW-NEXT:    vinserti64x4 $0, %ymm4, %zmm6, %zmm0
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm5, %zmm18, %zmm2
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm7, %zmm30, %zmm3
-; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm3[4,5,6,7]
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm23, %zmm3
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm29, %zmm18, %zmm3
-; AVX512DQ-SLOW-NEXT:    vpunpckhqdq {{.*#+}} zmm3 {%k1} = zmm10[1],zmm26[1],zmm10[3],zmm26[3],zmm10[5],zmm26[5],zmm10[7],zmm26[7]
-; AVX512DQ-SLOW-NEXT:    vinserti64x4 $0, %ymm2, %zmm3, %zmm0
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 256(%r10), %zmm10
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 256(%rax), %zmm22
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm10, %zmm2
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm22, %zmm11, %zmm2
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 256(%r8), %zmm0
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 256(%r9), %zmm24
-; AVX512DQ-SLOW-NEXT:    vpunpcklqdq {{.*#+}} zmm4 = zmm0[0],zmm24[0],zmm0[2],zmm24[2],zmm0[4],zmm24[4],zmm0[6],zmm24[6]
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm0, %zmm17
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm2, %zmm4 {%k1}
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 256(%rdi), %zmm2
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 256(%rsi), %zmm5
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm2, %zmm6
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm5, %zmm11, %zmm6
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 256(%rdx), %zmm3
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 256(%rcx), %zmm7
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm3, %zmm8
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm7, %zmm15, %zmm8
-; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm6 = ymm6[0,1,2,3],ymm8[4,5,6,7]
-; AVX512DQ-SLOW-NEXT:    vinserti64x4 $0, %ymm6, %zmm4, %zmm0
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm10, %zmm4
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm22, %zmm9, %zmm4
-; AVX512DQ-SLOW-NEXT:    vpunpckhqdq {{.*#+}} zmm6 = zmm17[1],zmm24[1],zmm17[3],zmm24[3],zmm17[5],zmm24[5],zmm17[7],zmm24[7]
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm4, %zmm6 {%k1}
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm2, %zmm4
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm5, %zmm9, %zmm4
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm3, %zmm8
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm7, %zmm12, %zmm8
-; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm4 = ymm4[0,1,2,3],ymm8[4,5,6,7]
-; AVX512DQ-SLOW-NEXT:    vinserti64x4 $0, %ymm4, %zmm6, %zmm0
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm2, %zmm4
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm5, %zmm13, %zmm4
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm3, %zmm6
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm7, %zmm1, %zmm6
-; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm4 = ymm4[0,1,2,3],ymm6[4,5,6,7]
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm17, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm17, %zmm6
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm24, %zmm13, %zmm6
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vpunpcklqdq {{.*#+}} zmm6 {%k1} = zmm10[0],zmm22[0],zmm10[2],zmm22[2],zmm10[4],zmm22[4],zmm10[6],zmm22[6]
-; AVX512DQ-SLOW-NEXT:    vinserti64x4 $0, %ymm4, %zmm6, %zmm0
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm5, %zmm18, %zmm2
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm7, %zmm30, %zmm3
-; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm3[4,5,6,7]
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm17, %zmm3
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm24, %zmm18, %zmm3
-; AVX512DQ-SLOW-NEXT:    vpunpckhqdq {{.*#+}} zmm3 {%k1} = zmm10[1],zmm22[1],zmm10[3],zmm22[3],zmm10[5],zmm22[5],zmm10[7],zmm22[7]
-; AVX512DQ-SLOW-NEXT:    vinserti64x4 $0, %ymm2, %zmm3, %zmm0
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 320(%rdi), %zmm2
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 320(%rsi), %zmm3
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm2, %zmm5
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm3, %zmm11, %zmm5
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 320(%rdx), %zmm14
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 320(%rcx), %zmm6
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm14, %zmm7
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm6, %zmm15, %zmm7
-; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm5 = ymm5[0,1,2,3],ymm7[4,5,6,7]
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm14, %zmm7
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm6, %zmm12, %zmm7
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm14, %zmm8
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm6, %zmm1, %zmm8
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm6, %zmm30, %zmm14
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 384(%rdx), %zmm0
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 384(%rcx), %zmm6
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm0, %zmm10
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm6, %zmm15, %zmm10
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm0, %zmm10
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm6, %zmm12, %zmm10
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm0, %zmm10
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm6, %zmm1, %zmm10
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm6, %zmm30, %zmm0
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 448(%rdx), %zmm0
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 448(%rcx), %zmm6
-; AVX512DQ-SLOW-NEXT:    vpermi2q %zmm6, %zmm0, %zmm15
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm15, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vpermi2q %zmm6, %zmm0, %zmm12
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm12, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vpermi2q %zmm6, %zmm0, %zmm1
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm6, %zmm30, %zmm0
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 320(%r10), %zmm31
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 320(%rax), %zmm12
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm31, %zmm1
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm12, %zmm11, %zmm1
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 320(%r8), %zmm17
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 320(%r9), %zmm4
-; AVX512DQ-SLOW-NEXT:    vpunpcklqdq {{.*#+}} zmm10 = zmm17[0],zmm4[0],zmm17[2],zmm4[2],zmm17[4],zmm4[4],zmm17[6],zmm4[6]
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm1, %zmm10 {%k1}
-; AVX512DQ-SLOW-NEXT:    vinserti64x4 $0, %ymm5, %zmm10, %zmm0
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm31, %zmm1
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm12, %zmm9, %zmm1
-; AVX512DQ-SLOW-NEXT:    vpunpckhqdq {{.*#+}} zmm5 = zmm17[1],zmm4[1],zmm17[3],zmm4[3],zmm17[5],zmm4[5],zmm17[7],zmm4[7]
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm1, %zmm5 {%k1}
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm2, %zmm1
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm3, %zmm9, %zmm1
-; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm7[4,5,6,7]
-; AVX512DQ-SLOW-NEXT:    vinserti64x4 $0, %ymm1, %zmm5, %zmm0
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm2, %zmm1
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm3, %zmm13, %zmm1
-; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm8[4,5,6,7]
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm17, %zmm5
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm4, %zmm13, %zmm5
-; AVX512DQ-SLOW-NEXT:    vpunpcklqdq {{.*#+}} zmm5 {%k1} = zmm31[0],zmm12[0],zmm31[2],zmm12[2],zmm31[4],zmm12[4],zmm31[6],zmm12[6]
-; AVX512DQ-SLOW-NEXT:    vinserti64x4 $0, %ymm1, %zmm5, %zmm0
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm3, %zmm18, %zmm2
-; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm1 = ymm2[0,1,2,3],ymm14[4,5,6,7]
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm17, %zmm2
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm4, %zmm18, %zmm2
-; AVX512DQ-SLOW-NEXT:    vpunpckhqdq {{.*#+}} zmm2 {%k1} = zmm31[1],zmm12[1],zmm31[3],zmm12[3],zmm31[5],zmm12[5],zmm31[7],zmm12[7]
-; AVX512DQ-SLOW-NEXT:    vinserti64x4 $0, %ymm1, %zmm2, %zmm0
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 384(%rdi), %zmm0
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 384(%rsi), %zmm1
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm0, %zmm3
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm11, %zmm2
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm1, %zmm11, %zmm3
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm0, %zmm3
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm1, %zmm9, %zmm3
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm0, %zmm3
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm1, %zmm13, %zmm3
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm1, %zmm18, %zmm0
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 384(%r10), %zmm11
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 384(%rax), %zmm20
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm11, %zmm0
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm20, %zmm2, %zmm0
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm11, %zmm0
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm20, %zmm9, %zmm0
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 384(%r8), %zmm30
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 384(%r9), %zmm0
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm30, %zmm1
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm0, %zmm13, %zmm1
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 448(%r10), %zmm16
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 448(%rax), %zmm8
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm16, %zmm1
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm8, %zmm2, %zmm1
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 448(%rdi), %zmm3
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 448(%rsi), %zmm1
-; AVX512DQ-SLOW-NEXT:    vpermi2q %zmm1, %zmm3, %zmm2
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm16, %zmm2
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm8, %zmm9, %zmm2
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vpermi2q %zmm1, %zmm3, %zmm9
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 448(%r8), %zmm19
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 448(%r9), %zmm7
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm19, %zmm2
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm7, %zmm13, %zmm2
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vpermi2q %zmm1, %zmm3, %zmm13
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm13, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm1, %zmm18, %zmm3
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm30, %zmm1
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm0, %zmm18, %zmm1
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm1, %zmm5
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm19, %zmm1
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm7, %zmm18, %zmm1
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm15 = [0,8,0,8,0,8,0,8]
-; AVX512DQ-SLOW-NEXT:    # zmm15 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm21, %zmm1
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm27, %zmm15, %zmm1
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm14 = [1,9,1,9,1,9,1,9]
-; AVX512DQ-SLOW-NEXT:    # zmm14 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm21, %zmm1
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm27, %zmm14, %zmm1
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm6 = [2,10,2,10,2,10,2,10]
-; AVX512DQ-SLOW-NEXT:    # zmm6 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm21, %zmm1
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm27, %zmm6, %zmm1
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm1 = [3,11,3,11,3,11,3,11]
-; AVX512DQ-SLOW-NEXT:    # zmm1 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm27, %zmm1, %zmm21
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm21, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm2, %zmm3
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm28, %zmm15, %zmm3
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm2, %zmm3
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm28, %zmm14, %zmm3
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm2, %zmm25
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm28, %zmm6, %zmm25
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm28, %zmm1, %zmm2
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 (%rsp), %zmm27 # 64-byte Reload
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm27, %zmm2
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm3, %zmm15, %zmm2
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm27, %zmm2
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm3, %zmm14, %zmm2
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm27, %zmm2
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm3, %zmm6, %zmm2
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm3, %zmm1, %zmm27
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm27, (%rsp) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm9 # 64-byte Reload
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm9, %zmm3
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm2, %zmm15, %zmm3
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm9, %zmm3
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm2, %zmm14, %zmm3
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm9, %zmm18
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm2, %zmm6, %zmm18
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm2, %zmm1, %zmm9
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm2, %zmm3
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm9 # 64-byte Reload
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm9, %zmm15, %zmm3
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm2, %zmm3
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm9, %zmm14, %zmm3
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm2, %zmm3
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm9, %zmm6, %zmm3
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm9, %zmm1, %zmm2
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm9 # 64-byte Reload
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm9, %zmm3
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm2, %zmm15, %zmm3
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm9, %zmm3
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm2, %zmm14, %zmm3
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm9, %zmm13
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm2, %zmm6, %zmm13
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm2, %zmm1, %zmm9
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm2, %zmm3
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm26, %zmm15, %zmm3
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm2, %zmm3
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm26, %zmm14, %zmm3
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm2, %zmm3
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm26, %zmm6, %zmm3
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm26, %zmm1, %zmm2
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm23, %zmm3
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm29, %zmm15, %zmm3
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm23, %zmm3
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm29, %zmm14, %zmm3
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm23, %zmm10
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm29, %zmm6, %zmm10
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm29, %zmm1, %zmm23
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm23, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm23 # 64-byte Reload
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm23, %zmm2
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm22, %zmm15, %zmm2
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm23, %zmm2
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm22, %zmm14, %zmm2
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm23, %zmm2
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm22, %zmm6, %zmm2
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm22, %zmm1, %zmm23
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm2, %zmm3
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm24, %zmm15, %zmm3
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm2, %zmm3
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm24, %zmm14, %zmm3
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm2, %zmm26
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm24, %zmm6, %zmm26
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm24, %zmm1, %zmm2
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm31, %zmm2
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm12, %zmm15, %zmm2
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm31, %zmm2
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm12, %zmm14, %zmm2
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm31, %zmm2
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm12, %zmm6, %zmm2
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm12, %zmm1, %zmm31
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm17, %zmm29
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm4, %zmm15, %zmm29
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm17, %zmm2
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm4, %zmm14, %zmm2
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm17, %zmm24
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm4, %zmm6, %zmm24
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm4, %zmm1, %zmm17
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm11, %zmm2
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm20, %zmm15, %zmm2
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm11, %zmm2
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm20, %zmm14, %zmm2
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm9 # 64-byte Reload
-; AVX512DQ-SLOW-NEXT:    vpunpcklqdq {{.*#+}} zmm9 {%k1} = zmm11[0],zmm20[0],zmm11[2],zmm20[2],zmm11[4],zmm20[4],zmm11[6],zmm20[6]
-; AVX512DQ-SLOW-NEXT:    vpunpckhqdq {{.*#+}} zmm5 {%k1} = zmm11[1],zmm20[1],zmm11[3],zmm20[3],zmm11[5],zmm20[5],zmm11[7],zmm20[7]
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm5, %zmm27
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm11, %zmm22
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm20, %zmm6, %zmm22
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm20, %zmm1, %zmm11
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm30, %zmm21
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm0, %zmm15, %zmm21
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm30, %zmm28
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm0, %zmm14, %zmm28
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm30, %zmm20
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm0, %zmm6, %zmm20
-; AVX512DQ-SLOW-NEXT:    vpunpcklqdq {{.*#+}} zmm2 = zmm30[0],zmm0[0],zmm30[2],zmm0[2],zmm30[4],zmm0[4],zmm30[6],zmm0[6]
-; AVX512DQ-SLOW-NEXT:    vpunpckhqdq {{.*#+}} zmm3 = zmm30[1],zmm0[1],zmm30[3],zmm0[3],zmm30[5],zmm0[5],zmm30[7],zmm0[7]
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm0, %zmm1, %zmm30
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm16, %zmm0
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm8, %zmm15, %zmm0
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm16, %zmm0
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm8, %zmm14, %zmm0
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
-; AVX512DQ-SLOW-NEXT:    vpunpcklqdq {{.*#+}} zmm4 {%k1} = zmm16[0],zmm8[0],zmm16[2],zmm8[2],zmm16[4],zmm8[4],zmm16[6],zmm8[6]
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
-; AVX512DQ-SLOW-NEXT:    vpunpckhqdq {{.*#+}} zmm5 {%k1} = zmm16[1],zmm8[1],zmm16[3],zmm8[3],zmm16[5],zmm8[5],zmm16[7],zmm8[7]
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm16, %zmm12
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm8, %zmm6, %zmm12
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm8, %zmm1, %zmm16
-; AVX512DQ-SLOW-NEXT:    vpermi2q %zmm7, %zmm19, %zmm15
-; AVX512DQ-SLOW-NEXT:    vpermi2q %zmm7, %zmm19, %zmm14
-; AVX512DQ-SLOW-NEXT:    vpermi2q %zmm7, %zmm19, %zmm6
-; AVX512DQ-SLOW-NEXT:    vpunpcklqdq {{.*#+}} zmm8 = zmm19[0],zmm7[0],zmm19[2],zmm7[2],zmm19[4],zmm7[4],zmm19[6],zmm7[6]
-; AVX512DQ-SLOW-NEXT:    vpunpckhqdq {{.*#+}} zmm0 = zmm19[1],zmm7[1],zmm19[3],zmm7[3],zmm19[5],zmm7[5],zmm19[7],zmm7[7]
-; AVX512DQ-SLOW-NEXT:    vpermt2q %zmm7, %zmm1, %zmm19
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512DQ-SLOW-NEXT:    vpblendd $240, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm1 # 32-byte Folded Reload
-; AVX512DQ-SLOW-NEXT:    # ymm1 = ymm1[0,1,2,3],mem[4,5,6,7]
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm7 # 64-byte Reload
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm7, %zmm2 {%k1}
-; AVX512DQ-SLOW-NEXT:    vinserti64x4 $0, %ymm1, %zmm2, %zmm1
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512DQ-SLOW-NEXT:    vpblendd $240, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm1 # 32-byte Folded Reload
-; AVX512DQ-SLOW-NEXT:    # ymm1 = ymm1[0,1,2,3],mem[4,5,6,7]
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm2, %zmm3 {%k1}
-; AVX512DQ-SLOW-NEXT:    vinserti64x4 $0, %ymm1, %zmm3, %zmm1
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512DQ-SLOW-NEXT:    vpblendd $240, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm1 # 32-byte Folded Reload
-; AVX512DQ-SLOW-NEXT:    # ymm1 = ymm1[0,1,2,3],mem[4,5,6,7]
-; AVX512DQ-SLOW-NEXT:    vinserti64x4 $0, %ymm1, %zmm9, %zmm1
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512DQ-SLOW-NEXT:    vpblendd $240, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm1 # 32-byte Folded Reload
-; AVX512DQ-SLOW-NEXT:    # ymm1 = ymm1[0,1,2,3],mem[4,5,6,7]
-; AVX512DQ-SLOW-NEXT:    vinserti64x4 $0, %ymm1, %zmm27, %zmm1
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512DQ-SLOW-NEXT:    vpblendd $240, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm1 # 32-byte Folded Reload
-; AVX512DQ-SLOW-NEXT:    # ymm1 = ymm1[0,1,2,3],mem[4,5,6,7]
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm2, %zmm8 {%k1}
-; AVX512DQ-SLOW-NEXT:    vinserti64x4 $0, %ymm1, %zmm8, %zmm1
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512DQ-SLOW-NEXT:    vpblendd $240, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm1 # 32-byte Folded Reload
-; AVX512DQ-SLOW-NEXT:    # ymm1 = ymm1[0,1,2,3],mem[4,5,6,7]
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm2, %zmm0 {%k1}
-; AVX512DQ-SLOW-NEXT:    vinserti64x4 $0, %ymm1, %zmm0, %zmm0
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQ-SLOW-NEXT:    vpblendd $240, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
-; AVX512DQ-SLOW-NEXT:    # ymm0 = ymm0[0,1,2,3],mem[4,5,6,7]
-; AVX512DQ-SLOW-NEXT:    vinserti64x4 $0, %ymm0, %zmm4, %zmm0
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQ-SLOW-NEXT:    vpblendd $240, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
-; AVX512DQ-SLOW-NEXT:    # ymm0 = ymm0[0,1,2,3],mem[4,5,6,7]
-; AVX512DQ-SLOW-NEXT:    vinserti64x4 $0, %ymm0, %zmm5, %zmm0
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm0, %zmm25 {%k1}
-; AVX512DQ-SLOW-NEXT:    vmovdqa (%rcx), %ymm0
-; AVX512DQ-SLOW-NEXT:    vmovdqa (%rdx), %ymm1
-; AVX512DQ-SLOW-NEXT:    vpunpcklqdq {{.*#+}} ymm2 = ymm1[0],ymm0[0],ymm1[2],ymm0[2]
-; AVX512DQ-SLOW-NEXT:    vmovdqa (%rsi), %ymm3
-; AVX512DQ-SLOW-NEXT:    vmovdqa (%rdi), %ymm7
-; AVX512DQ-SLOW-NEXT:    vpunpcklqdq {{.*#+}} ymm8 = ymm7[0],ymm3[0],ymm7[2],ymm3[2]
-; AVX512DQ-SLOW-NEXT:    vperm2i128 {{.*#+}} ymm2 = ymm8[2,3],ymm2[2,3]
-; AVX512DQ-SLOW-NEXT:    vinserti64x4 $0, %ymm2, %zmm25, %zmm2
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm2, %zmm4 {%k1}
-; AVX512DQ-SLOW-NEXT:    vpunpckhqdq {{.*#+}} ymm0 = ymm1[1],ymm0[1],ymm1[3],ymm0[3]
-; AVX512DQ-SLOW-NEXT:    vpunpckhqdq {{.*#+}} ymm1 = ymm7[1],ymm3[1],ymm7[3],ymm3[3]
-; AVX512DQ-SLOW-NEXT:    vperm2i128 {{.*#+}} ymm0 = ymm1[2,3],ymm0[2,3]
-; AVX512DQ-SLOW-NEXT:    vinserti64x4 $0, %ymm0, %zmm4, %zmm0
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm0, %zmm18 {%k1}
-; AVX512DQ-SLOW-NEXT:    vmovdqa 64(%rcx), %ymm0
-; AVX512DQ-SLOW-NEXT:    vmovdqa 64(%rdx), %ymm1
-; AVX512DQ-SLOW-NEXT:    vmovdqa 64(%rsi), %ymm2
-; AVX512DQ-SLOW-NEXT:    vmovdqa 64(%rdi), %ymm3
-; AVX512DQ-SLOW-NEXT:    vpunpcklqdq {{.*#+}} ymm7 = ymm1[0],ymm0[0],ymm1[2],ymm0[2]
-; AVX512DQ-SLOW-NEXT:    vpunpcklqdq {{.*#+}} ymm8 = ymm3[0],ymm2[0],ymm3[2],ymm2[2]
-; AVX512DQ-SLOW-NEXT:    vperm2i128 {{.*#+}} ymm7 = ymm8[2,3],ymm7[2,3]
-; AVX512DQ-SLOW-NEXT:    vinserti64x4 $0, %ymm7, %zmm18, %zmm4
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 (%rsp), %zmm4 # 64-byte Reload
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm4, %zmm5 {%k1}
-; AVX512DQ-SLOW-NEXT:    vpunpckhqdq {{.*#+}} ymm0 = ymm1[1],ymm0[1],ymm1[3],ymm0[3]
-; AVX512DQ-SLOW-NEXT:    vpunpckhqdq {{.*#+}} ymm1 = ymm3[1],ymm2[1],ymm3[3],ymm2[3]
-; AVX512DQ-SLOW-NEXT:    vperm2i128 {{.*#+}} ymm0 = ymm1[2,3],ymm0[2,3]
-; AVX512DQ-SLOW-NEXT:    vinserti64x4 $0, %ymm0, %zmm5, %zmm0
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm0, %zmm13 {%k1}
-; AVX512DQ-SLOW-NEXT:    vmovdqa 128(%rcx), %ymm0
-; AVX512DQ-SLOW-NEXT:    vmovdqa 128(%rdx), %ymm1
-; AVX512DQ-SLOW-NEXT:    vpunpcklqdq {{.*#+}} ymm2 = ymm1[0],ymm0[0],ymm1[2],ymm0[2]
-; AVX512DQ-SLOW-NEXT:    vmovdqa 128(%rsi), %ymm3
-; AVX512DQ-SLOW-NEXT:    vmovdqa 128(%rdi), %ymm7
-; AVX512DQ-SLOW-NEXT:    vpunpcklqdq {{.*#+}} ymm8 = ymm7[0],ymm3[0],ymm7[2],ymm3[2]
-; AVX512DQ-SLOW-NEXT:    vperm2i128 {{.*#+}} ymm2 = ymm8[2,3],ymm2[2,3]
-; AVX512DQ-SLOW-NEXT:    vinserti64x4 $0, %ymm2, %zmm13, %zmm2
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm2, (%rsp) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm2, %zmm4 {%k1}
-; AVX512DQ-SLOW-NEXT:    vpunpckhqdq {{.*#+}} ymm0 = ymm1[1],ymm0[1],ymm1[3],ymm0[3]
-; AVX512DQ-SLOW-NEXT:    vpunpckhqdq {{.*#+}} ymm1 = ymm7[1],ymm3[1],ymm7[3],ymm3[3]
-; AVX512DQ-SLOW-NEXT:    vperm2i128 {{.*#+}} ymm0 = ymm1[2,3],ymm0[2,3]
-; AVX512DQ-SLOW-NEXT:    vinserti64x4 $0, %ymm0, %zmm4, %zmm0
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm0, %zmm10 {%k1}
-; AVX512DQ-SLOW-NEXT:    vmovdqa 192(%rcx), %ymm0
-; AVX512DQ-SLOW-NEXT:    vmovdqa 192(%rdx), %ymm1
-; AVX512DQ-SLOW-NEXT:    vpunpcklqdq {{.*#+}} ymm2 = ymm1[0],ymm0[0],ymm1[2],ymm0[2]
-; AVX512DQ-SLOW-NEXT:    vmovdqa 192(%rsi), %ymm3
-; AVX512DQ-SLOW-NEXT:    vmovdqa 192(%rdi), %ymm7
-; AVX512DQ-SLOW-NEXT:    vpunpcklqdq {{.*#+}} ymm8 = ymm7[0],ymm3[0],ymm7[2],ymm3[2]
-; AVX512DQ-SLOW-NEXT:    vperm2i128 {{.*#+}} ymm2 = ymm8[2,3],ymm2[2,3]
-; AVX512DQ-SLOW-NEXT:    vinserti64x4 $0, %ymm2, %zmm10, %zmm2
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm2, %zmm4 {%k1}
-; AVX512DQ-SLOW-NEXT:    vpunpckhqdq {{.*#+}} ymm0 = ymm1[1],ymm0[1],ymm1[3],ymm0[3]
-; AVX512DQ-SLOW-NEXT:    vpunpckhqdq {{.*#+}} ymm1 = ymm7[1],ymm3[1],ymm7[3],ymm3[3]
-; AVX512DQ-SLOW-NEXT:    vperm2i128 {{.*#+}} ymm0 = ymm1[2,3],ymm0[2,3]
-; AVX512DQ-SLOW-NEXT:    vinserti64x4 $0, %ymm0, %zmm4, %zmm9
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm0, %zmm26 {%k1}
-; AVX512DQ-SLOW-NEXT:    vmovdqa 256(%rcx), %ymm0
-; AVX512DQ-SLOW-NEXT:    vmovdqa 256(%rdx), %ymm1
-; AVX512DQ-SLOW-NEXT:    vpunpcklqdq {{.*#+}} ymm2 = ymm1[0],ymm0[0],ymm1[2],ymm0[2]
-; AVX512DQ-SLOW-NEXT:    vmovdqa 256(%rsi), %ymm3
-; AVX512DQ-SLOW-NEXT:    vmovdqa 256(%rdi), %ymm7
-; AVX512DQ-SLOW-NEXT:    vpunpcklqdq {{.*#+}} ymm8 = ymm7[0],ymm3[0],ymm7[2],ymm3[2]
-; AVX512DQ-SLOW-NEXT:    vperm2i128 {{.*#+}} ymm2 = ymm8[2,3],ymm2[2,3]
-; AVX512DQ-SLOW-NEXT:    vinserti64x4 $0, %ymm2, %zmm26, %zmm26
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm23, %zmm2 {%k1}
-; AVX512DQ-SLOW-NEXT:    vpunpckhqdq {{.*#+}} ymm0 = ymm1[1],ymm0[1],ymm1[3],ymm0[3]
-; AVX512DQ-SLOW-NEXT:    vpunpckhqdq {{.*#+}} ymm1 = ymm7[1],ymm3[1],ymm7[3],ymm3[3]
-; AVX512DQ-SLOW-NEXT:    vperm2i128 {{.*#+}} ymm0 = ymm1[2,3],ymm0[2,3]
-; AVX512DQ-SLOW-NEXT:    vinserti64x4 $0, %ymm0, %zmm2, %zmm7
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm0, %zmm24 {%k1}
-; AVX512DQ-SLOW-NEXT:    vmovdqa 320(%rcx), %ymm0
-; AVX512DQ-SLOW-NEXT:    vmovdqa 320(%rdx), %ymm1
-; AVX512DQ-SLOW-NEXT:    vpunpcklqdq {{.*#+}} ymm2 = ymm1[0],ymm0[0],ymm1[2],ymm0[2]
-; AVX512DQ-SLOW-NEXT:    vmovdqa 320(%rsi), %ymm3
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 320(%rdi), %ymm23
-; AVX512DQ-SLOW-NEXT:    vpunpcklqdq {{.*#+}} ymm8 = ymm23[0],ymm3[0],ymm23[2],ymm3[2]
-; AVX512DQ-SLOW-NEXT:    vperm2i128 {{.*#+}} ymm2 = ymm8[2,3],ymm2[2,3]
-; AVX512DQ-SLOW-NEXT:    vinserti64x4 $0, %ymm2, %zmm24, %zmm8
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm31, %zmm17 {%k1}
-; AVX512DQ-SLOW-NEXT:    vpunpckhqdq {{.*#+}} ymm0 = ymm1[1],ymm0[1],ymm1[3],ymm0[3]
-; AVX512DQ-SLOW-NEXT:    vpunpckhqdq {{.*#+}} ymm1 = ymm23[1],ymm3[1],ymm23[3],ymm3[3]
-; AVX512DQ-SLOW-NEXT:    vperm2i128 {{.*#+}} ymm0 = ymm1[2,3],ymm0[2,3]
-; AVX512DQ-SLOW-NEXT:    vinserti64x4 $0, %ymm0, %zmm17, %zmm17
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm22, %zmm20 {%k1}
-; AVX512DQ-SLOW-NEXT:    vmovdqa 384(%rcx), %ymm0
-; AVX512DQ-SLOW-NEXT:    vmovdqa 384(%rdx), %ymm1
-; AVX512DQ-SLOW-NEXT:    vpunpcklqdq {{.*#+}} ymm2 = ymm1[0],ymm0[0],ymm1[2],ymm0[2]
-; AVX512DQ-SLOW-NEXT:    vmovdqa 384(%rsi), %ymm3
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 384(%rdi), %ymm18
-; AVX512DQ-SLOW-NEXT:    vpunpcklqdq {{.*#+}} ymm13 = ymm18[0],ymm3[0],ymm18[2],ymm3[2]
-; AVX512DQ-SLOW-NEXT:    vperm2i128 {{.*#+}} ymm2 = ymm13[2,3],ymm2[2,3]
-; AVX512DQ-SLOW-NEXT:    vinserti64x4 $0, %ymm2, %zmm20, %zmm20
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm11, %zmm30 {%k1}
-; AVX512DQ-SLOW-NEXT:    vpunpckhqdq {{.*#+}} ymm0 = ymm1[1],ymm0[1],ymm1[3],ymm0[3]
-; AVX512DQ-SLOW-NEXT:    vpunpckhqdq {{.*#+}} ymm1 = ymm18[1],ymm3[1],ymm18[3],ymm3[3]
-; AVX512DQ-SLOW-NEXT:    vperm2i128 {{.*#+}} ymm0 = ymm1[2,3],ymm0[2,3]
-; AVX512DQ-SLOW-NEXT:    vinserti64x4 $0, %ymm0, %zmm30, %zmm24
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm12, %zmm6 {%k1}
-; AVX512DQ-SLOW-NEXT:    vmovdqa 448(%rcx), %ymm0
-; AVX512DQ-SLOW-NEXT:    vmovdqa 448(%rdx), %ymm1
-; AVX512DQ-SLOW-NEXT:    vpunpcklqdq {{.*#+}} ymm2 = ymm1[0],ymm0[0],ymm1[2],ymm0[2]
-; AVX512DQ-SLOW-NEXT:    vmovdqa 448(%rsi), %ymm3
-; AVX512DQ-SLOW-NEXT:    vmovdqa 448(%rdi), %ymm10
-; AVX512DQ-SLOW-NEXT:    vpunpcklqdq {{.*#+}} ymm12 = ymm10[0],ymm3[0],ymm10[2],ymm3[2]
-; AVX512DQ-SLOW-NEXT:    vperm2i128 {{.*#+}} ymm2 = ymm12[2,3],ymm2[2,3]
-; AVX512DQ-SLOW-NEXT:    vinserti64x4 $0, %ymm2, %zmm6, %zmm31
-; AVX512DQ-SLOW-NEXT:    vpunpckhqdq {{.*#+}} ymm0 = ymm1[1],ymm0[1],ymm1[3],ymm0[3]
-; AVX512DQ-SLOW-NEXT:    vpunpckhqdq {{.*#+}} ymm1 = ymm10[1],ymm3[1],ymm10[3],ymm3[3]
-; AVX512DQ-SLOW-NEXT:    vperm2i128 {{.*#+}} ymm0 = ymm1[2,3],ymm0[2,3]
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm16, %zmm19 {%k1}
-; AVX512DQ-SLOW-NEXT:    vinserti64x4 $0, %ymm0, %zmm19, %zmm0
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm1, %zmm4 {%k1}
-; AVX512DQ-SLOW-NEXT:    vmovdqa (%rsi), %xmm1
-; AVX512DQ-SLOW-NEXT:    vinserti128 $1, (%rcx), %ymm1, %ymm1
-; AVX512DQ-SLOW-NEXT:    vmovdqa (%rdi), %xmm2
-; AVX512DQ-SLOW-NEXT:    vinserti128 $1, (%rdx), %ymm2, %ymm2
-; AVX512DQ-SLOW-NEXT:    vpunpcklqdq {{.*#+}} ymm3 = ymm2[0],ymm1[0],ymm2[2],ymm1[2]
-; AVX512DQ-SLOW-NEXT:    vinserti64x4 $0, %ymm3, %zmm4, %zmm3
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm10 # 64-byte Reload
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm4, %zmm10 {%k1}
-; AVX512DQ-SLOW-NEXT:    vpunpckhqdq {{.*#+}} ymm1 = ymm2[1],ymm1[1],ymm2[3],ymm1[3]
-; AVX512DQ-SLOW-NEXT:    vinserti64x4 $0, %ymm1, %zmm10, %zmm2
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm11 # 64-byte Reload
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm1, %zmm11 {%k1}
-; AVX512DQ-SLOW-NEXT:    vmovdqa 64(%rsi), %xmm1
-; AVX512DQ-SLOW-NEXT:    vinserti128 $1, 64(%rcx), %ymm1, %ymm1
-; AVX512DQ-SLOW-NEXT:    vmovdqa 64(%rdi), %xmm4
-; AVX512DQ-SLOW-NEXT:    vinserti128 $1, 64(%rdx), %ymm4, %ymm4
-; AVX512DQ-SLOW-NEXT:    vpunpcklqdq {{.*#+}} ymm10 = ymm4[0],ymm1[0],ymm4[2],ymm1[2]
-; AVX512DQ-SLOW-NEXT:    vinserti64x4 $0, %ymm10, %zmm11, %zmm10
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm11 # 64-byte Reload
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm12 # 64-byte Reload
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm11, %zmm12 {%k1}
-; AVX512DQ-SLOW-NEXT:    vpunpckhqdq {{.*#+}} ymm1 = ymm4[1],ymm1[1],ymm4[3],ymm1[3]
-; AVX512DQ-SLOW-NEXT:    vinserti64x4 $0, %ymm1, %zmm12, %zmm11
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm13 # 64-byte Reload
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm1, %zmm13 {%k1}
-; AVX512DQ-SLOW-NEXT:    vmovdqa 128(%rsi), %xmm1
-; AVX512DQ-SLOW-NEXT:    vinserti128 $1, 128(%rcx), %ymm1, %ymm1
-; AVX512DQ-SLOW-NEXT:    vmovdqa 128(%rdi), %xmm4
-; AVX512DQ-SLOW-NEXT:    vinserti128 $1, 128(%rdx), %ymm4, %ymm12
-; AVX512DQ-SLOW-NEXT:    vpunpcklqdq {{.*#+}} ymm4 = ymm12[0],ymm1[0],ymm12[2],ymm1[2]
-; AVX512DQ-SLOW-NEXT:    vinserti64x4 $0, %ymm4, %zmm13, %zmm4
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm13 # 64-byte Reload
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm18 # 64-byte Reload
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm13, %zmm18 {%k1}
-; AVX512DQ-SLOW-NEXT:    vpunpckhqdq {{.*#+}} ymm1 = ymm12[1],ymm1[1],ymm12[3],ymm1[3]
-; AVX512DQ-SLOW-NEXT:    vinserti64x4 $0, %ymm1, %zmm18, %zmm19
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm18 # 64-byte Reload
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm1, %zmm18 {%k1}
-; AVX512DQ-SLOW-NEXT:    vmovdqa 192(%rsi), %xmm1
-; AVX512DQ-SLOW-NEXT:    vinserti128 $1, 192(%rcx), %ymm1, %ymm1
-; AVX512DQ-SLOW-NEXT:    vmovdqa 192(%rdi), %xmm12
-; AVX512DQ-SLOW-NEXT:    vinserti128 $1, 192(%rdx), %ymm12, %ymm12
-; AVX512DQ-SLOW-NEXT:    vpunpcklqdq {{.*#+}} ymm13 = ymm12[0],ymm1[0],ymm12[2],ymm1[2]
-; AVX512DQ-SLOW-NEXT:    vinserti64x4 $0, %ymm13, %zmm18, %zmm30
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm13 # 64-byte Reload
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm18 # 64-byte Reload
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm13, %zmm18 {%k1}
-; AVX512DQ-SLOW-NEXT:    vpunpckhqdq {{.*#+}} ymm1 = ymm12[1],ymm1[1],ymm12[3],ymm1[3]
-; AVX512DQ-SLOW-NEXT:    vinserti64x4 $0, %ymm1, %zmm18, %zmm1
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm12 # 64-byte Reload
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm12, %zmm5 {%k1}
-; AVX512DQ-SLOW-NEXT:    vmovdqa 256(%rsi), %xmm12
-; AVX512DQ-SLOW-NEXT:    vinserti128 $1, 256(%rcx), %ymm12, %ymm13
-; AVX512DQ-SLOW-NEXT:    vmovdqa 256(%rdi), %xmm12
-; AVX512DQ-SLOW-NEXT:    vinserti32x4 $1, 256(%rdx), %ymm12, %ymm18
-; AVX512DQ-SLOW-NEXT:    vpunpcklqdq {{.*#+}} ymm12 = ymm18[0],ymm13[0],ymm18[2],ymm13[2]
-; AVX512DQ-SLOW-NEXT:    vinserti64x4 $0, %ymm12, %zmm5, %zmm12
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm23 # 64-byte Reload
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm23, %zmm5 {%k1}
-; AVX512DQ-SLOW-NEXT:    vpunpckhqdq {{.*#+}} ymm13 = ymm18[1],ymm13[1],ymm18[3],ymm13[3]
-; AVX512DQ-SLOW-NEXT:    vinserti64x4 $0, %ymm13, %zmm5, %zmm23
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm5, %zmm29 {%k1}
-; AVX512DQ-SLOW-NEXT:    vmovdqa 320(%rsi), %xmm13
-; AVX512DQ-SLOW-NEXT:    vinserti128 $1, 320(%rcx), %ymm13, %ymm13
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 320(%rdi), %xmm18
-; AVX512DQ-SLOW-NEXT:    vinserti32x4 $1, 320(%rdx), %ymm18, %ymm18
-; AVX512DQ-SLOW-NEXT:    vpunpcklqdq {{.*#+}} ymm27 = ymm18[0],ymm13[0],ymm18[2],ymm13[2]
-; AVX512DQ-SLOW-NEXT:    vinserti64x4 $0, %ymm27, %zmm29, %zmm22
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm6 # 64-byte Reload
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm5, %zmm6 {%k1}
-; AVX512DQ-SLOW-NEXT:    vpunpckhqdq {{.*#+}} ymm13 = ymm18[1],ymm13[1],ymm18[3],ymm13[3]
-; AVX512DQ-SLOW-NEXT:    vinserti64x4 $0, %ymm13, %zmm6, %zmm13
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm5, %zmm21 {%k1}
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 384(%rsi), %xmm18
-; AVX512DQ-SLOW-NEXT:    vinserti32x4 $1, 384(%rcx), %ymm18, %ymm18
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 384(%rdi), %xmm25
-; AVX512DQ-SLOW-NEXT:    vinserti32x4 $1, 384(%rdx), %ymm25, %ymm25
-; AVX512DQ-SLOW-NEXT:    vpunpcklqdq {{.*#+}} ymm27 = ymm25[0],ymm18[0],ymm25[2],ymm18[2]
-; AVX512DQ-SLOW-NEXT:    vinserti64x4 $0, %ymm27, %zmm21, %zmm16
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm5, %zmm28 {%k1}
-; AVX512DQ-SLOW-NEXT:    vpunpckhqdq {{.*#+}} ymm18 = ymm25[1],ymm18[1],ymm25[3],ymm18[3]
-; AVX512DQ-SLOW-NEXT:    vinserti64x4 $0, %ymm18, %zmm28, %zmm21
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm5, %zmm15 {%k1}
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 448(%rsi), %xmm18
-; AVX512DQ-SLOW-NEXT:    vinserti32x4 $1, 448(%rcx), %ymm18, %ymm18
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 448(%rdi), %xmm25
-; AVX512DQ-SLOW-NEXT:    vinserti32x4 $1, 448(%rdx), %ymm25, %ymm25
-; AVX512DQ-SLOW-NEXT:    vpunpcklqdq {{.*#+}} ymm27 = ymm25[0],ymm18[0],ymm25[2],ymm18[2]
-; AVX512DQ-SLOW-NEXT:    vinserti64x4 $0, %ymm27, %zmm15, %zmm6
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm5, %zmm14 {%k1}
-; AVX512DQ-SLOW-NEXT:    vpunpckhqdq {{.*#+}} ymm18 = ymm25[1],ymm18[1],ymm25[3],ymm18[3]
-; AVX512DQ-SLOW-NEXT:    vinserti64x4 $0, %ymm18, %zmm14, %zmm5
-; AVX512DQ-SLOW-NEXT:    movq {{[0-9]+}}(%rsp), %rax
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm0, 3776(%rax)
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm31, 3712(%rax)
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm24, 3264(%rax)
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm20, 3200(%rax)
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm17, 2752(%rax)
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm8, 2688(%rax)
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm7, 2240(%rax)
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm26, 2176(%rax)
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm9, 1728(%rax)
-; AVX512DQ-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQ-SLOW-NEXT:    vmovaps %zmm0, 1664(%rax)
-; AVX512DQ-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQ-SLOW-NEXT:    vmovaps %zmm0, 1216(%rax)
-; AVX512DQ-SLOW-NEXT:    vmovups (%rsp), %zmm0 # 64-byte Reload
-; AVX512DQ-SLOW-NEXT:    vmovaps %zmm0, 1152(%rax)
-; AVX512DQ-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQ-SLOW-NEXT:    vmovaps %zmm0, 704(%rax)
-; AVX512DQ-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQ-SLOW-NEXT:    vmovaps %zmm0, 640(%rax)
-; AVX512DQ-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQ-SLOW-NEXT:    vmovaps %zmm0, 192(%rax)
-; AVX512DQ-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQ-SLOW-NEXT:    vmovaps %zmm0, 128(%rax)
-; AVX512DQ-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQ-SLOW-NEXT:    vmovaps %zmm0, 4032(%rax)
-; AVX512DQ-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQ-SLOW-NEXT:    vmovaps %zmm0, 3968(%rax)
-; AVX512DQ-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQ-SLOW-NEXT:    vmovaps %zmm0, 3904(%rax)
-; AVX512DQ-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQ-SLOW-NEXT:    vmovaps %zmm0, 3840(%rax)
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm5, 3648(%rax)
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm6, 3584(%rax)
-; AVX512DQ-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQ-SLOW-NEXT:    vmovaps %zmm0, 3520(%rax)
-; AVX512DQ-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQ-SLOW-NEXT:    vmovaps %zmm0, 3456(%rax)
-; AVX512DQ-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQ-SLOW-NEXT:    vmovaps %zmm0, 3392(%rax)
-; AVX512DQ-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQ-SLOW-NEXT:    vmovaps %zmm0, 3328(%rax)
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm21, 3136(%rax)
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm16, 3072(%rax)
-; AVX512DQ-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQ-SLOW-NEXT:    vmovaps %zmm0, 3008(%rax)
-; AVX512DQ-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQ-SLOW-NEXT:    vmovaps %zmm0, 2944(%rax)
-; AVX512DQ-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQ-SLOW-NEXT:    vmovaps %zmm0, 2880(%rax)
-; AVX512DQ-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQ-SLOW-NEXT:    vmovaps %zmm0, 2816(%rax)
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm13, 2624(%rax)
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm22, 2560(%rax)
-; AVX512DQ-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQ-SLOW-NEXT:    vmovaps %zmm0, 2496(%rax)
-; AVX512DQ-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQ-SLOW-NEXT:    vmovaps %zmm0, 2432(%rax)
-; AVX512DQ-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQ-SLOW-NEXT:    vmovaps %zmm0, 2368(%rax)
-; AVX512DQ-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQ-SLOW-NEXT:    vmovaps %zmm0, 2304(%rax)
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm23, 2112(%rax)
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm12, 2048(%rax)
-; AVX512DQ-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQ-SLOW-NEXT:    vmovaps %zmm0, 1984(%rax)
-; AVX512DQ-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQ-SLOW-NEXT:    vmovaps %zmm0, 1920(%rax)
-; AVX512DQ-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQ-SLOW-NEXT:    vmovaps %zmm0, 1856(%rax)
-; AVX512DQ-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQ-SLOW-NEXT:    vmovaps %zmm0, 1792(%rax)
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm1, 1600(%rax)
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm30, 1536(%rax)
-; AVX512DQ-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQ-SLOW-NEXT:    vmovaps %zmm0, 1472(%rax)
-; AVX512DQ-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQ-SLOW-NEXT:    vmovaps %zmm0, 1408(%rax)
-; AVX512DQ-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQ-SLOW-NEXT:    vmovaps %zmm0, 1344(%rax)
-; AVX512DQ-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQ-SLOW-NEXT:    vmovaps %zmm0, 1280(%rax)
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm19, 1088(%rax)
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm4, 1024(%rax)
-; AVX512DQ-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQ-SLOW-NEXT:    vmovaps %zmm0, 960(%rax)
-; AVX512DQ-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQ-SLOW-NEXT:    vmovaps %zmm0, 896(%rax)
-; AVX512DQ-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQ-SLOW-NEXT:    vmovaps %zmm0, 832(%rax)
-; AVX512DQ-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQ-SLOW-NEXT:    vmovaps %zmm0, 768(%rax)
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm11, 576(%rax)
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm10, 512(%rax)
-; AVX512DQ-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQ-SLOW-NEXT:    vmovaps %zmm0, 448(%rax)
-; AVX512DQ-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQ-SLOW-NEXT:    vmovaps %zmm0, 384(%rax)
-; AVX512DQ-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQ-SLOW-NEXT:    vmovaps %zmm0, 320(%rax)
-; AVX512DQ-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQ-SLOW-NEXT:    vmovaps %zmm0, 256(%rax)
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm2, 64(%rax)
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm3, (%rax)
-; AVX512DQ-SLOW-NEXT:    addq $5512, %rsp # imm = 0x1588
-; AVX512DQ-SLOW-NEXT:    vzeroupper
-; AVX512DQ-SLOW-NEXT:    retq
+; AVX512BW-ONLY-LABEL: store_i64_stride8_vf64:
+; AVX512BW-ONLY:       # %bb.0:
+; AVX512BW-ONLY-NEXT:    subq $5512, %rsp # imm = 0x1588
+; AVX512BW-ONLY-NEXT:    movq {{[0-9]+}}(%rsp), %rax
+; AVX512BW-ONLY-NEXT:    movq {{[0-9]+}}(%rsp), %r10
+; AVX512BW-ONLY-NEXT:    vmovdqa64 128(%rdi), %zmm2
+; AVX512BW-ONLY-NEXT:    vmovdqa64 64(%rdi), %zmm4
+; AVX512BW-ONLY-NEXT:    vmovdqa64 (%rdi), %zmm8
+; AVX512BW-ONLY-NEXT:    vmovdqa64 128(%rsi), %zmm17
+; AVX512BW-ONLY-NEXT:    vmovdqa64 64(%rsi), %zmm19
+; AVX512BW-ONLY-NEXT:    vmovdqa64 (%rsi), %zmm22
+; AVX512BW-ONLY-NEXT:    vmovdqa64 64(%rdx), %zmm5
+; AVX512BW-ONLY-NEXT:    vmovdqa64 (%rdx), %zmm10
+; AVX512BW-ONLY-NEXT:    vmovdqa64 64(%rcx), %zmm20
+; AVX512BW-ONLY-NEXT:    vmovdqa64 (%rcx), %zmm11
+; AVX512BW-ONLY-NEXT:    vmovdqa64 (%r8), %zmm1
+; AVX512BW-ONLY-NEXT:    vmovdqa64 64(%r8), %zmm25
+; AVX512BW-ONLY-NEXT:    vmovdqa64 128(%r8), %zmm23
+; AVX512BW-ONLY-NEXT:    vmovdqa64 (%r9), %zmm28
+; AVX512BW-ONLY-NEXT:    vmovdqa64 64(%r9), %zmm26
+; AVX512BW-ONLY-NEXT:    vmovdqa64 128(%r9), %zmm24
+; AVX512BW-ONLY-NEXT:    vmovdqa64 (%r10), %zmm21
+; AVX512BW-ONLY-NEXT:    vmovdqa64 64(%r10), %zmm14
+; AVX512BW-ONLY-NEXT:    vmovdqa64 (%rax), %zmm27
+; AVX512BW-ONLY-NEXT:    vmovdqa64 64(%rax), %zmm16
+; AVX512BW-ONLY-NEXT:    movb $-64, %r11b
+; AVX512BW-ONLY-NEXT:    kmovd %r11d, %k1
+; AVX512BW-ONLY-NEXT:    vbroadcasti32x4 {{.*#+}} zmm3 = [4,12,4,12,4,12,4,12]
+; AVX512BW-ONLY-NEXT:    # zmm3 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm21, %zmm0
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm27, %zmm3, %zmm0
+; AVX512BW-ONLY-NEXT:    vpunpcklqdq {{.*#+}} zmm12 = zmm1[0],zmm28[0],zmm1[2],zmm28[2],zmm1[4],zmm28[4],zmm1[6],zmm28[6]
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm0, %zmm12 {%k1}
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm8, %zmm0
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm22, %zmm3, %zmm0
+; AVX512BW-ONLY-NEXT:    vbroadcasti128 {{.*#+}} ymm6 = [4,12,4,12]
+; AVX512BW-ONLY-NEXT:    # ymm6 = mem[0,1,0,1]
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm10, %zmm15
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm11, %zmm6, %zmm15
+; AVX512BW-ONLY-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm15[4,5,6,7]
+; AVX512BW-ONLY-NEXT:    vinserti64x4 $0, %ymm0, %zmm12, %zmm0
+; AVX512BW-ONLY-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-NEXT:    vbroadcasti32x4 {{.*#+}} zmm9 = [5,13,5,13,5,13,5,13]
+; AVX512BW-ONLY-NEXT:    # zmm9 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm21, %zmm0
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm27, %zmm9, %zmm0
+; AVX512BW-ONLY-NEXT:    vpunpckhqdq {{.*#+}} zmm12 = zmm1[1],zmm28[1],zmm1[3],zmm28[3],zmm1[5],zmm28[5],zmm1[7],zmm28[7]
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm0, %zmm12 {%k1}
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm8, %zmm0
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm22, %zmm9, %zmm0
+; AVX512BW-ONLY-NEXT:    vbroadcasti128 {{.*#+}} ymm7 = [5,13,5,13]
+; AVX512BW-ONLY-NEXT:    # ymm7 = mem[0,1,0,1]
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm10, %zmm15
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm11, %zmm7, %zmm15
+; AVX512BW-ONLY-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm15[4,5,6,7]
+; AVX512BW-ONLY-NEXT:    vinserti64x4 $0, %ymm0, %zmm12, %zmm0
+; AVX512BW-ONLY-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-NEXT:    vbroadcasti32x4 {{.*#+}} zmm13 = [6,14,6,14,6,14,6,14]
+; AVX512BW-ONLY-NEXT:    # zmm13 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm1, %zmm0
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm1, %zmm29
+; AVX512BW-ONLY-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm28, %zmm13, %zmm0
+; AVX512BW-ONLY-NEXT:    vpunpcklqdq {{.*#+}} zmm0 {%k1} = zmm21[0],zmm27[0],zmm21[2],zmm27[2],zmm21[4],zmm27[4],zmm21[6],zmm27[6]
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm8, %zmm12
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm22, %zmm13, %zmm12
+; AVX512BW-ONLY-NEXT:    vbroadcasti128 {{.*#+}} ymm1 = [6,14,6,14]
+; AVX512BW-ONLY-NEXT:    # ymm1 = mem[0,1,0,1]
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm10, %zmm15
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm11, %zmm1, %zmm15
+; AVX512BW-ONLY-NEXT:    vpblendd {{.*#+}} ymm12 = ymm12[0,1,2,3],ymm15[4,5,6,7]
+; AVX512BW-ONLY-NEXT:    vinserti64x4 $0, %ymm12, %zmm0, %zmm0
+; AVX512BW-ONLY-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-NEXT:    vbroadcasti32x4 {{.*#+}} zmm18 = [7,15,7,15,7,15,7,15]
+; AVX512BW-ONLY-NEXT:    # zmm18 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm29, %zmm12
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm28, %zmm18, %zmm12
+; AVX512BW-ONLY-NEXT:    vpunpckhqdq {{.*#+}} zmm12 {%k1} = zmm21[1],zmm27[1],zmm21[3],zmm27[3],zmm21[5],zmm27[5],zmm21[7],zmm27[7]
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm22, %zmm18, %zmm8
+; AVX512BW-ONLY-NEXT:    vbroadcasti32x4 {{.*#+}} ymm30 = [7,15,7,15]
+; AVX512BW-ONLY-NEXT:    # ymm30 = mem[0,1,2,3,0,1,2,3]
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm11, %zmm30, %zmm10
+; AVX512BW-ONLY-NEXT:    vpblendd {{.*#+}} ymm8 = ymm8[0,1,2,3],ymm10[4,5,6,7]
+; AVX512BW-ONLY-NEXT:    vinserti64x4 $0, %ymm8, %zmm12, %zmm0
+; AVX512BW-ONLY-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm14, %zmm8
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm16, %zmm3, %zmm8
+; AVX512BW-ONLY-NEXT:    vpunpcklqdq {{.*#+}} zmm10 = zmm25[0],zmm26[0],zmm25[2],zmm26[2],zmm25[4],zmm26[4],zmm25[6],zmm26[6]
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm8, %zmm10 {%k1}
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm4, %zmm8
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm19, %zmm3, %zmm8
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm5, %zmm11
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm6, %zmm15
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm20, %zmm6, %zmm11
+; AVX512BW-ONLY-NEXT:    vpblendd {{.*#+}} ymm8 = ymm8[0,1,2,3],ymm11[4,5,6,7]
+; AVX512BW-ONLY-NEXT:    vinserti64x4 $0, %ymm8, %zmm10, %zmm0
+; AVX512BW-ONLY-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm14, %zmm8
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm16, %zmm9, %zmm8
+; AVX512BW-ONLY-NEXT:    vpunpckhqdq {{.*#+}} zmm10 = zmm25[1],zmm26[1],zmm25[3],zmm26[3],zmm25[5],zmm26[5],zmm25[7],zmm26[7]
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm8, %zmm10 {%k1}
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm4, %zmm8
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm19, %zmm9, %zmm8
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm5, %zmm11
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm7, %zmm12
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm20, %zmm7, %zmm11
+; AVX512BW-ONLY-NEXT:    vpblendd {{.*#+}} ymm8 = ymm8[0,1,2,3],ymm11[4,5,6,7]
+; AVX512BW-ONLY-NEXT:    vinserti64x4 $0, %ymm8, %zmm10, %zmm0
+; AVX512BW-ONLY-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm4, %zmm8
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm19, %zmm13, %zmm8
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm5, %zmm10
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm20, %zmm1, %zmm10
+; AVX512BW-ONLY-NEXT:    vpblendd {{.*#+}} ymm8 = ymm8[0,1,2,3],ymm10[4,5,6,7]
+; AVX512BW-ONLY-NEXT:    vmovdqu64 %zmm25, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm25, %zmm10
+; AVX512BW-ONLY-NEXT:    vmovdqu64 %zmm26, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm26, %zmm13, %zmm10
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm14, %zmm11
+; AVX512BW-ONLY-NEXT:    vmovdqu64 %zmm14, (%rsp) # 64-byte Spill
+; AVX512BW-ONLY-NEXT:    vmovdqu64 %zmm16, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-NEXT:    vpunpcklqdq {{.*#+}} zmm10 {%k1} = zmm14[0],zmm16[0],zmm14[2],zmm16[2],zmm14[4],zmm16[4],zmm14[6],zmm16[6]
+; AVX512BW-ONLY-NEXT:    vinserti64x4 $0, %ymm8, %zmm10, %zmm0
+; AVX512BW-ONLY-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-NEXT:    vmovdqa64 128(%r10), %zmm10
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm19, %zmm18, %zmm4
+; AVX512BW-ONLY-NEXT:    vmovdqa64 128(%rax), %zmm14
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm20, %zmm30, %zmm5
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm25, %zmm6
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm26, %zmm18, %zmm6
+; AVX512BW-ONLY-NEXT:    vpunpckhqdq {{.*#+}} zmm6 {%k1} = zmm11[1],zmm16[1],zmm11[3],zmm16[3],zmm11[5],zmm16[5],zmm11[7],zmm16[7]
+; AVX512BW-ONLY-NEXT:    vpblendd {{.*#+}} ymm4 = ymm4[0,1,2,3],ymm5[4,5,6,7]
+; AVX512BW-ONLY-NEXT:    vinserti64x4 $0, %ymm4, %zmm6, %zmm0
+; AVX512BW-ONLY-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm10, %zmm4
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm14, %zmm3, %zmm4
+; AVX512BW-ONLY-NEXT:    vpunpcklqdq {{.*#+}} zmm5 = zmm23[0],zmm24[0],zmm23[2],zmm24[2],zmm23[4],zmm24[4],zmm23[6],zmm24[6]
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm4, %zmm5 {%k1}
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm2, %zmm6
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm17, %zmm3, %zmm6
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm3, %zmm11
+; AVX512BW-ONLY-NEXT:    vmovdqa64 128(%rdx), %zmm4
+; AVX512BW-ONLY-NEXT:    vmovdqa64 128(%rcx), %zmm7
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm4, %zmm8
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm7, %zmm15, %zmm8
+; AVX512BW-ONLY-NEXT:    vpblendd {{.*#+}} ymm6 = ymm6[0,1,2,3],ymm8[4,5,6,7]
+; AVX512BW-ONLY-NEXT:    vinserti64x4 $0, %ymm6, %zmm5, %zmm0
+; AVX512BW-ONLY-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm10, %zmm5
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm14, %zmm9, %zmm5
+; AVX512BW-ONLY-NEXT:    vpunpckhqdq {{.*#+}} zmm6 = zmm23[1],zmm24[1],zmm23[3],zmm24[3],zmm23[5],zmm24[5],zmm23[7],zmm24[7]
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm5, %zmm6 {%k1}
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm2, %zmm5
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm17, %zmm9, %zmm5
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm4, %zmm8
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm7, %zmm12, %zmm8
+; AVX512BW-ONLY-NEXT:    vpblendd {{.*#+}} ymm5 = ymm5[0,1,2,3],ymm8[4,5,6,7]
+; AVX512BW-ONLY-NEXT:    vinserti64x4 $0, %ymm5, %zmm6, %zmm0
+; AVX512BW-ONLY-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm2, %zmm5
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm17, %zmm13, %zmm5
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm4, %zmm6
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm7, %zmm1, %zmm6
+; AVX512BW-ONLY-NEXT:    vpblendd {{.*#+}} ymm5 = ymm5[0,1,2,3],ymm6[4,5,6,7]
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm23, %zmm3
+; AVX512BW-ONLY-NEXT:    vmovdqu64 %zmm23, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm23, %zmm6
+; AVX512BW-ONLY-NEXT:    vmovdqu64 %zmm24, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm24, %zmm13, %zmm6
+; AVX512BW-ONLY-NEXT:    vmovdqu64 %zmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-NEXT:    vmovdqu64 %zmm14, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-NEXT:    vpunpcklqdq {{.*#+}} zmm6 {%k1} = zmm10[0],zmm14[0],zmm10[2],zmm14[2],zmm10[4],zmm14[4],zmm10[6],zmm14[6]
+; AVX512BW-ONLY-NEXT:    vinserti64x4 $0, %ymm5, %zmm6, %zmm0
+; AVX512BW-ONLY-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm17, %zmm18, %zmm2
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm7, %zmm30, %zmm4
+; AVX512BW-ONLY-NEXT:    vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm4[4,5,6,7]
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm24, %zmm18, %zmm3
+; AVX512BW-ONLY-NEXT:    vpunpckhqdq {{.*#+}} zmm3 {%k1} = zmm10[1],zmm14[1],zmm10[3],zmm14[3],zmm10[5],zmm14[5],zmm10[7],zmm14[7]
+; AVX512BW-ONLY-NEXT:    vinserti64x4 $0, %ymm2, %zmm3, %zmm0
+; AVX512BW-ONLY-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-NEXT:    vmovdqa64 192(%r10), %zmm10
+; AVX512BW-ONLY-NEXT:    vmovdqa64 192(%rax), %zmm26
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm10, %zmm2
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm26, %zmm11, %zmm2
+; AVX512BW-ONLY-NEXT:    vmovdqa64 192(%r8), %zmm23
+; AVX512BW-ONLY-NEXT:    vmovdqa64 192(%r9), %zmm29
+; AVX512BW-ONLY-NEXT:    vpunpcklqdq {{.*#+}} zmm4 = zmm23[0],zmm29[0],zmm23[2],zmm29[2],zmm23[4],zmm29[4],zmm23[6],zmm29[6]
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm2, %zmm4 {%k1}
+; AVX512BW-ONLY-NEXT:    vmovdqa64 192(%rdi), %zmm2
+; AVX512BW-ONLY-NEXT:    vmovdqa64 192(%rsi), %zmm5
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm2, %zmm6
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm5, %zmm11, %zmm6
+; AVX512BW-ONLY-NEXT:    vmovdqa64 192(%rdx), %zmm3
+; AVX512BW-ONLY-NEXT:    vmovdqa64 192(%rcx), %zmm7
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm3, %zmm8
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm7, %zmm15, %zmm8
+; AVX512BW-ONLY-NEXT:    vpblendd {{.*#+}} ymm6 = ymm6[0,1,2,3],ymm8[4,5,6,7]
+; AVX512BW-ONLY-NEXT:    vinserti64x4 $0, %ymm6, %zmm4, %zmm0
+; AVX512BW-ONLY-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm10, %zmm4
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm26, %zmm9, %zmm4
+; AVX512BW-ONLY-NEXT:    vpunpckhqdq {{.*#+}} zmm6 = zmm23[1],zmm29[1],zmm23[3],zmm29[3],zmm23[5],zmm29[5],zmm23[7],zmm29[7]
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm4, %zmm6 {%k1}
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm2, %zmm4
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm5, %zmm9, %zmm4
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm3, %zmm8
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm7, %zmm12, %zmm8
+; AVX512BW-ONLY-NEXT:    vpblendd {{.*#+}} ymm4 = ymm4[0,1,2,3],ymm8[4,5,6,7]
+; AVX512BW-ONLY-NEXT:    vinserti64x4 $0, %ymm4, %zmm6, %zmm0
+; AVX512BW-ONLY-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm2, %zmm4
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm5, %zmm13, %zmm4
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm3, %zmm6
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm7, %zmm1, %zmm6
+; AVX512BW-ONLY-NEXT:    vpblendd {{.*#+}} ymm4 = ymm4[0,1,2,3],ymm6[4,5,6,7]
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm23, %zmm6
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm29, %zmm13, %zmm6
+; AVX512BW-ONLY-NEXT:    vmovdqu64 %zmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-NEXT:    vpunpcklqdq {{.*#+}} zmm6 {%k1} = zmm10[0],zmm26[0],zmm10[2],zmm26[2],zmm10[4],zmm26[4],zmm10[6],zmm26[6]
+; AVX512BW-ONLY-NEXT:    vinserti64x4 $0, %ymm4, %zmm6, %zmm0
+; AVX512BW-ONLY-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm5, %zmm18, %zmm2
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm7, %zmm30, %zmm3
+; AVX512BW-ONLY-NEXT:    vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm3[4,5,6,7]
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm23, %zmm3
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm29, %zmm18, %zmm3
+; AVX512BW-ONLY-NEXT:    vpunpckhqdq {{.*#+}} zmm3 {%k1} = zmm10[1],zmm26[1],zmm10[3],zmm26[3],zmm10[5],zmm26[5],zmm10[7],zmm26[7]
+; AVX512BW-ONLY-NEXT:    vinserti64x4 $0, %ymm2, %zmm3, %zmm0
+; AVX512BW-ONLY-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-NEXT:    vmovdqa64 256(%r10), %zmm10
+; AVX512BW-ONLY-NEXT:    vmovdqa64 256(%rax), %zmm22
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm10, %zmm2
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm22, %zmm11, %zmm2
+; AVX512BW-ONLY-NEXT:    vmovdqa64 256(%r8), %zmm0
+; AVX512BW-ONLY-NEXT:    vmovdqa64 256(%r9), %zmm24
+; AVX512BW-ONLY-NEXT:    vpunpcklqdq {{.*#+}} zmm4 = zmm0[0],zmm24[0],zmm0[2],zmm24[2],zmm0[4],zmm24[4],zmm0[6],zmm24[6]
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm0, %zmm17
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm2, %zmm4 {%k1}
+; AVX512BW-ONLY-NEXT:    vmovdqa64 256(%rdi), %zmm2
+; AVX512BW-ONLY-NEXT:    vmovdqa64 256(%rsi), %zmm5
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm2, %zmm6
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm5, %zmm11, %zmm6
+; AVX512BW-ONLY-NEXT:    vmovdqa64 256(%rdx), %zmm3
+; AVX512BW-ONLY-NEXT:    vmovdqa64 256(%rcx), %zmm7
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm3, %zmm8
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm7, %zmm15, %zmm8
+; AVX512BW-ONLY-NEXT:    vpblendd {{.*#+}} ymm6 = ymm6[0,1,2,3],ymm8[4,5,6,7]
+; AVX512BW-ONLY-NEXT:    vinserti64x4 $0, %ymm6, %zmm4, %zmm0
+; AVX512BW-ONLY-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm10, %zmm4
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm22, %zmm9, %zmm4
+; AVX512BW-ONLY-NEXT:    vpunpckhqdq {{.*#+}} zmm6 = zmm17[1],zmm24[1],zmm17[3],zmm24[3],zmm17[5],zmm24[5],zmm17[7],zmm24[7]
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm4, %zmm6 {%k1}
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm2, %zmm4
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm5, %zmm9, %zmm4
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm3, %zmm8
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm7, %zmm12, %zmm8
+; AVX512BW-ONLY-NEXT:    vpblendd {{.*#+}} ymm4 = ymm4[0,1,2,3],ymm8[4,5,6,7]
+; AVX512BW-ONLY-NEXT:    vinserti64x4 $0, %ymm4, %zmm6, %zmm0
+; AVX512BW-ONLY-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm2, %zmm4
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm5, %zmm13, %zmm4
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm3, %zmm6
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm7, %zmm1, %zmm6
+; AVX512BW-ONLY-NEXT:    vpblendd {{.*#+}} ymm4 = ymm4[0,1,2,3],ymm6[4,5,6,7]
+; AVX512BW-ONLY-NEXT:    vmovdqu64 %zmm17, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm17, %zmm6
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm24, %zmm13, %zmm6
+; AVX512BW-ONLY-NEXT:    vmovdqu64 %zmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-NEXT:    vpunpcklqdq {{.*#+}} zmm6 {%k1} = zmm10[0],zmm22[0],zmm10[2],zmm22[2],zmm10[4],zmm22[4],zmm10[6],zmm22[6]
+; AVX512BW-ONLY-NEXT:    vinserti64x4 $0, %ymm4, %zmm6, %zmm0
+; AVX512BW-ONLY-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm5, %zmm18, %zmm2
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm7, %zmm30, %zmm3
+; AVX512BW-ONLY-NEXT:    vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm3[4,5,6,7]
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm17, %zmm3
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm24, %zmm18, %zmm3
+; AVX512BW-ONLY-NEXT:    vpunpckhqdq {{.*#+}} zmm3 {%k1} = zmm10[1],zmm22[1],zmm10[3],zmm22[3],zmm10[5],zmm22[5],zmm10[7],zmm22[7]
+; AVX512BW-ONLY-NEXT:    vinserti64x4 $0, %ymm2, %zmm3, %zmm0
+; AVX512BW-ONLY-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-NEXT:    vmovdqa64 320(%rdi), %zmm2
+; AVX512BW-ONLY-NEXT:    vmovdqa64 320(%rsi), %zmm3
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm2, %zmm5
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm3, %zmm11, %zmm5
+; AVX512BW-ONLY-NEXT:    vmovdqa64 320(%rdx), %zmm14
+; AVX512BW-ONLY-NEXT:    vmovdqa64 320(%rcx), %zmm6
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm14, %zmm7
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm6, %zmm15, %zmm7
+; AVX512BW-ONLY-NEXT:    vpblendd {{.*#+}} ymm5 = ymm5[0,1,2,3],ymm7[4,5,6,7]
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm14, %zmm7
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm6, %zmm12, %zmm7
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm14, %zmm8
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm6, %zmm1, %zmm8
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm6, %zmm30, %zmm14
+; AVX512BW-ONLY-NEXT:    vmovdqa64 384(%rdx), %zmm0
+; AVX512BW-ONLY-NEXT:    vmovdqa64 384(%rcx), %zmm6
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm0, %zmm10
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm6, %zmm15, %zmm10
+; AVX512BW-ONLY-NEXT:    vmovdqu64 %zmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm0, %zmm10
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm6, %zmm12, %zmm10
+; AVX512BW-ONLY-NEXT:    vmovdqu64 %zmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm0, %zmm10
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm6, %zmm1, %zmm10
+; AVX512BW-ONLY-NEXT:    vmovdqu64 %zmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm6, %zmm30, %zmm0
+; AVX512BW-ONLY-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-NEXT:    vmovdqa64 448(%rdx), %zmm0
+; AVX512BW-ONLY-NEXT:    vmovdqa64 448(%rcx), %zmm6
+; AVX512BW-ONLY-NEXT:    vpermi2q %zmm6, %zmm0, %zmm15
+; AVX512BW-ONLY-NEXT:    vmovdqu64 %zmm15, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-NEXT:    vpermi2q %zmm6, %zmm0, %zmm12
+; AVX512BW-ONLY-NEXT:    vmovdqu64 %zmm12, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-NEXT:    vpermi2q %zmm6, %zmm0, %zmm1
+; AVX512BW-ONLY-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm6, %zmm30, %zmm0
+; AVX512BW-ONLY-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-NEXT:    vmovdqa64 320(%r10), %zmm31
+; AVX512BW-ONLY-NEXT:    vmovdqa64 320(%rax), %zmm12
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm31, %zmm1
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm12, %zmm11, %zmm1
+; AVX512BW-ONLY-NEXT:    vmovdqa64 320(%r8), %zmm17
+; AVX512BW-ONLY-NEXT:    vmovdqa64 320(%r9), %zmm4
+; AVX512BW-ONLY-NEXT:    vpunpcklqdq {{.*#+}} zmm10 = zmm17[0],zmm4[0],zmm17[2],zmm4[2],zmm17[4],zmm4[4],zmm17[6],zmm4[6]
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm1, %zmm10 {%k1}
+; AVX512BW-ONLY-NEXT:    vinserti64x4 $0, %ymm5, %zmm10, %zmm0
+; AVX512BW-ONLY-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm31, %zmm1
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm12, %zmm9, %zmm1
+; AVX512BW-ONLY-NEXT:    vpunpckhqdq {{.*#+}} zmm5 = zmm17[1],zmm4[1],zmm17[3],zmm4[3],zmm17[5],zmm4[5],zmm17[7],zmm4[7]
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm1, %zmm5 {%k1}
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm2, %zmm1
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm3, %zmm9, %zmm1
+; AVX512BW-ONLY-NEXT:    vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm7[4,5,6,7]
+; AVX512BW-ONLY-NEXT:    vinserti64x4 $0, %ymm1, %zmm5, %zmm0
+; AVX512BW-ONLY-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm2, %zmm1
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm3, %zmm13, %zmm1
+; AVX512BW-ONLY-NEXT:    vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm8[4,5,6,7]
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm17, %zmm5
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm4, %zmm13, %zmm5
+; AVX512BW-ONLY-NEXT:    vpunpcklqdq {{.*#+}} zmm5 {%k1} = zmm31[0],zmm12[0],zmm31[2],zmm12[2],zmm31[4],zmm12[4],zmm31[6],zmm12[6]
+; AVX512BW-ONLY-NEXT:    vinserti64x4 $0, %ymm1, %zmm5, %zmm0
+; AVX512BW-ONLY-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm3, %zmm18, %zmm2
+; AVX512BW-ONLY-NEXT:    vpblendd {{.*#+}} ymm1 = ymm2[0,1,2,3],ymm14[4,5,6,7]
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm17, %zmm2
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm4, %zmm18, %zmm2
+; AVX512BW-ONLY-NEXT:    vpunpckhqdq {{.*#+}} zmm2 {%k1} = zmm31[1],zmm12[1],zmm31[3],zmm12[3],zmm31[5],zmm12[5],zmm31[7],zmm12[7]
+; AVX512BW-ONLY-NEXT:    vinserti64x4 $0, %ymm1, %zmm2, %zmm0
+; AVX512BW-ONLY-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-NEXT:    vmovdqa64 384(%rdi), %zmm0
+; AVX512BW-ONLY-NEXT:    vmovdqa64 384(%rsi), %zmm1
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm0, %zmm3
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm11, %zmm2
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm1, %zmm11, %zmm3
+; AVX512BW-ONLY-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm0, %zmm3
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm1, %zmm9, %zmm3
+; AVX512BW-ONLY-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm0, %zmm3
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm1, %zmm13, %zmm3
+; AVX512BW-ONLY-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm1, %zmm18, %zmm0
+; AVX512BW-ONLY-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-NEXT:    vmovdqa64 384(%r10), %zmm11
+; AVX512BW-ONLY-NEXT:    vmovdqa64 384(%rax), %zmm20
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm11, %zmm0
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm20, %zmm2, %zmm0
+; AVX512BW-ONLY-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm11, %zmm0
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm20, %zmm9, %zmm0
+; AVX512BW-ONLY-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-NEXT:    vmovdqa64 384(%r8), %zmm30
+; AVX512BW-ONLY-NEXT:    vmovdqa64 384(%r9), %zmm0
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm30, %zmm1
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm0, %zmm13, %zmm1
+; AVX512BW-ONLY-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-NEXT:    vmovdqa64 448(%r10), %zmm16
+; AVX512BW-ONLY-NEXT:    vmovdqa64 448(%rax), %zmm8
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm16, %zmm1
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm8, %zmm2, %zmm1
+; AVX512BW-ONLY-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-NEXT:    vmovdqa64 448(%rdi), %zmm3
+; AVX512BW-ONLY-NEXT:    vmovdqa64 448(%rsi), %zmm1
+; AVX512BW-ONLY-NEXT:    vpermi2q %zmm1, %zmm3, %zmm2
+; AVX512BW-ONLY-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm16, %zmm2
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm8, %zmm9, %zmm2
+; AVX512BW-ONLY-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-NEXT:    vpermi2q %zmm1, %zmm3, %zmm9
+; AVX512BW-ONLY-NEXT:    vmovdqu64 %zmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-NEXT:    vmovdqa64 448(%r8), %zmm19
+; AVX512BW-ONLY-NEXT:    vmovdqa64 448(%r9), %zmm7
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm19, %zmm2
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm7, %zmm13, %zmm2
+; AVX512BW-ONLY-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-NEXT:    vpermi2q %zmm1, %zmm3, %zmm13
+; AVX512BW-ONLY-NEXT:    vmovdqu64 %zmm13, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm1, %zmm18, %zmm3
+; AVX512BW-ONLY-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm30, %zmm1
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm0, %zmm18, %zmm1
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm1, %zmm5
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm19, %zmm1
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm7, %zmm18, %zmm1
+; AVX512BW-ONLY-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-NEXT:    vbroadcasti32x4 {{.*#+}} zmm15 = [0,8,0,8,0,8,0,8]
+; AVX512BW-ONLY-NEXT:    # zmm15 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm21, %zmm1
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm27, %zmm15, %zmm1
+; AVX512BW-ONLY-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-NEXT:    vbroadcasti32x4 {{.*#+}} zmm14 = [1,9,1,9,1,9,1,9]
+; AVX512BW-ONLY-NEXT:    # zmm14 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm21, %zmm1
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm27, %zmm14, %zmm1
+; AVX512BW-ONLY-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-NEXT:    vbroadcasti32x4 {{.*#+}} zmm6 = [2,10,2,10,2,10,2,10]
+; AVX512BW-ONLY-NEXT:    # zmm6 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm21, %zmm1
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm27, %zmm6, %zmm1
+; AVX512BW-ONLY-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-NEXT:    vbroadcasti32x4 {{.*#+}} zmm1 = [3,11,3,11,3,11,3,11]
+; AVX512BW-ONLY-NEXT:    # zmm1 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm27, %zmm1, %zmm21
+; AVX512BW-ONLY-NEXT:    vmovdqu64 %zmm21, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm2, %zmm3
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm28, %zmm15, %zmm3
+; AVX512BW-ONLY-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm2, %zmm3
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm28, %zmm14, %zmm3
+; AVX512BW-ONLY-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm2, %zmm25
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm28, %zmm6, %zmm25
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm28, %zmm1, %zmm2
+; AVX512BW-ONLY-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-NEXT:    vmovdqu64 (%rsp), %zmm27 # 64-byte Reload
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm27, %zmm2
+; AVX512BW-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm3, %zmm15, %zmm2
+; AVX512BW-ONLY-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm27, %zmm2
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm3, %zmm14, %zmm2
+; AVX512BW-ONLY-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm27, %zmm2
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm3, %zmm6, %zmm2
+; AVX512BW-ONLY-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm3, %zmm1, %zmm27
+; AVX512BW-ONLY-NEXT:    vmovdqu64 %zmm27, (%rsp) # 64-byte Spill
+; AVX512BW-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm9 # 64-byte Reload
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm9, %zmm3
+; AVX512BW-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm2, %zmm15, %zmm3
+; AVX512BW-ONLY-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm9, %zmm3
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm2, %zmm14, %zmm3
+; AVX512BW-ONLY-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm9, %zmm18
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm2, %zmm6, %zmm18
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm2, %zmm1, %zmm9
+; AVX512BW-ONLY-NEXT:    vmovdqu64 %zmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm2, %zmm3
+; AVX512BW-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm9 # 64-byte Reload
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm9, %zmm15, %zmm3
+; AVX512BW-ONLY-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm2, %zmm3
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm9, %zmm14, %zmm3
+; AVX512BW-ONLY-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm2, %zmm3
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm9, %zmm6, %zmm3
+; AVX512BW-ONLY-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm9, %zmm1, %zmm2
+; AVX512BW-ONLY-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm9 # 64-byte Reload
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm9, %zmm3
+; AVX512BW-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm2, %zmm15, %zmm3
+; AVX512BW-ONLY-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm9, %zmm3
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm2, %zmm14, %zmm3
+; AVX512BW-ONLY-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm9, %zmm13
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm2, %zmm6, %zmm13
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm2, %zmm1, %zmm9
+; AVX512BW-ONLY-NEXT:    vmovdqu64 %zmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm2, %zmm3
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm26, %zmm15, %zmm3
+; AVX512BW-ONLY-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm2, %zmm3
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm26, %zmm14, %zmm3
+; AVX512BW-ONLY-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm2, %zmm3
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm26, %zmm6, %zmm3
+; AVX512BW-ONLY-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm26, %zmm1, %zmm2
+; AVX512BW-ONLY-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm23, %zmm3
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm29, %zmm15, %zmm3
+; AVX512BW-ONLY-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm23, %zmm3
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm29, %zmm14, %zmm3
+; AVX512BW-ONLY-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm23, %zmm10
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm29, %zmm6, %zmm10
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm29, %zmm1, %zmm23
+; AVX512BW-ONLY-NEXT:    vmovdqu64 %zmm23, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm23 # 64-byte Reload
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm23, %zmm2
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm22, %zmm15, %zmm2
+; AVX512BW-ONLY-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm23, %zmm2
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm22, %zmm14, %zmm2
+; AVX512BW-ONLY-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm23, %zmm2
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm22, %zmm6, %zmm2
+; AVX512BW-ONLY-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm22, %zmm1, %zmm23
+; AVX512BW-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm2, %zmm3
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm24, %zmm15, %zmm3
+; AVX512BW-ONLY-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm2, %zmm3
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm24, %zmm14, %zmm3
+; AVX512BW-ONLY-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm2, %zmm26
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm24, %zmm6, %zmm26
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm24, %zmm1, %zmm2
+; AVX512BW-ONLY-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm31, %zmm2
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm12, %zmm15, %zmm2
+; AVX512BW-ONLY-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm31, %zmm2
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm12, %zmm14, %zmm2
+; AVX512BW-ONLY-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm31, %zmm2
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm12, %zmm6, %zmm2
+; AVX512BW-ONLY-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm12, %zmm1, %zmm31
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm17, %zmm29
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm4, %zmm15, %zmm29
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm17, %zmm2
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm4, %zmm14, %zmm2
+; AVX512BW-ONLY-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm17, %zmm24
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm4, %zmm6, %zmm24
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm4, %zmm1, %zmm17
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm11, %zmm2
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm20, %zmm15, %zmm2
+; AVX512BW-ONLY-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm11, %zmm2
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm20, %zmm14, %zmm2
+; AVX512BW-ONLY-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm9 # 64-byte Reload
+; AVX512BW-ONLY-NEXT:    vpunpcklqdq {{.*#+}} zmm9 {%k1} = zmm11[0],zmm20[0],zmm11[2],zmm20[2],zmm11[4],zmm20[4],zmm11[6],zmm20[6]
+; AVX512BW-ONLY-NEXT:    vpunpckhqdq {{.*#+}} zmm5 {%k1} = zmm11[1],zmm20[1],zmm11[3],zmm20[3],zmm11[5],zmm20[5],zmm11[7],zmm20[7]
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm5, %zmm27
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm11, %zmm22
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm20, %zmm6, %zmm22
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm20, %zmm1, %zmm11
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm30, %zmm21
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm0, %zmm15, %zmm21
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm30, %zmm28
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm0, %zmm14, %zmm28
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm30, %zmm20
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm0, %zmm6, %zmm20
+; AVX512BW-ONLY-NEXT:    vpunpcklqdq {{.*#+}} zmm2 = zmm30[0],zmm0[0],zmm30[2],zmm0[2],zmm30[4],zmm0[4],zmm30[6],zmm0[6]
+; AVX512BW-ONLY-NEXT:    vpunpckhqdq {{.*#+}} zmm3 = zmm30[1],zmm0[1],zmm30[3],zmm0[3],zmm30[5],zmm0[5],zmm30[7],zmm0[7]
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm0, %zmm1, %zmm30
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm16, %zmm0
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm8, %zmm15, %zmm0
+; AVX512BW-ONLY-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm16, %zmm0
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm8, %zmm14, %zmm0
+; AVX512BW-ONLY-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
+; AVX512BW-ONLY-NEXT:    vpunpcklqdq {{.*#+}} zmm4 {%k1} = zmm16[0],zmm8[0],zmm16[2],zmm8[2],zmm16[4],zmm8[4],zmm16[6],zmm8[6]
+; AVX512BW-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
+; AVX512BW-ONLY-NEXT:    vpunpckhqdq {{.*#+}} zmm5 {%k1} = zmm16[1],zmm8[1],zmm16[3],zmm8[3],zmm16[5],zmm8[5],zmm16[7],zmm8[7]
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm16, %zmm12
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm8, %zmm6, %zmm12
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm8, %zmm1, %zmm16
+; AVX512BW-ONLY-NEXT:    vpermi2q %zmm7, %zmm19, %zmm15
+; AVX512BW-ONLY-NEXT:    vpermi2q %zmm7, %zmm19, %zmm14
+; AVX512BW-ONLY-NEXT:    vpermi2q %zmm7, %zmm19, %zmm6
+; AVX512BW-ONLY-NEXT:    vpunpcklqdq {{.*#+}} zmm8 = zmm19[0],zmm7[0],zmm19[2],zmm7[2],zmm19[4],zmm7[4],zmm19[6],zmm7[6]
+; AVX512BW-ONLY-NEXT:    vpunpckhqdq {{.*#+}} zmm0 = zmm19[1],zmm7[1],zmm19[3],zmm7[3],zmm19[5],zmm7[5],zmm19[7],zmm7[7]
+; AVX512BW-ONLY-NEXT:    vpermt2q %zmm7, %zmm1, %zmm19
+; AVX512BW-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
+; AVX512BW-ONLY-NEXT:    vpblendd $240, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm1 # 32-byte Folded Reload
+; AVX512BW-ONLY-NEXT:    # ymm1 = ymm1[0,1,2,3],mem[4,5,6,7]
+; AVX512BW-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm7 # 64-byte Reload
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm7, %zmm2 {%k1}
+; AVX512BW-ONLY-NEXT:    vinserti64x4 $0, %ymm1, %zmm2, %zmm1
+; AVX512BW-ONLY-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
+; AVX512BW-ONLY-NEXT:    vpblendd $240, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm1 # 32-byte Folded Reload
+; AVX512BW-ONLY-NEXT:    # ymm1 = ymm1[0,1,2,3],mem[4,5,6,7]
+; AVX512BW-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm2, %zmm3 {%k1}
+; AVX512BW-ONLY-NEXT:    vinserti64x4 $0, %ymm1, %zmm3, %zmm1
+; AVX512BW-ONLY-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
+; AVX512BW-ONLY-NEXT:    vpblendd $240, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm1 # 32-byte Folded Reload
+; AVX512BW-ONLY-NEXT:    # ymm1 = ymm1[0,1,2,3],mem[4,5,6,7]
+; AVX512BW-ONLY-NEXT:    vinserti64x4 $0, %ymm1, %zmm9, %zmm1
+; AVX512BW-ONLY-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
+; AVX512BW-ONLY-NEXT:    vpblendd $240, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm1 # 32-byte Folded Reload
+; AVX512BW-ONLY-NEXT:    # ymm1 = ymm1[0,1,2,3],mem[4,5,6,7]
+; AVX512BW-ONLY-NEXT:    vinserti64x4 $0, %ymm1, %zmm27, %zmm1
+; AVX512BW-ONLY-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
+; AVX512BW-ONLY-NEXT:    vpblendd $240, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm1 # 32-byte Folded Reload
+; AVX512BW-ONLY-NEXT:    # ymm1 = ymm1[0,1,2,3],mem[4,5,6,7]
+; AVX512BW-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm2, %zmm8 {%k1}
+; AVX512BW-ONLY-NEXT:    vinserti64x4 $0, %ymm1, %zmm8, %zmm1
+; AVX512BW-ONLY-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
+; AVX512BW-ONLY-NEXT:    vpblendd $240, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm1 # 32-byte Folded Reload
+; AVX512BW-ONLY-NEXT:    # ymm1 = ymm1[0,1,2,3],mem[4,5,6,7]
+; AVX512BW-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm2, %zmm0 {%k1}
+; AVX512BW-ONLY-NEXT:    vinserti64x4 $0, %ymm1, %zmm0, %zmm0
+; AVX512BW-ONLY-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512BW-ONLY-NEXT:    vpblendd $240, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
+; AVX512BW-ONLY-NEXT:    # ymm0 = ymm0[0,1,2,3],mem[4,5,6,7]
+; AVX512BW-ONLY-NEXT:    vinserti64x4 $0, %ymm0, %zmm4, %zmm0
+; AVX512BW-ONLY-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512BW-ONLY-NEXT:    vpblendd $240, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
+; AVX512BW-ONLY-NEXT:    # ymm0 = ymm0[0,1,2,3],mem[4,5,6,7]
+; AVX512BW-ONLY-NEXT:    vinserti64x4 $0, %ymm0, %zmm5, %zmm0
+; AVX512BW-ONLY-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm0, %zmm25 {%k1}
+; AVX512BW-ONLY-NEXT:    vmovdqa (%rcx), %ymm0
+; AVX512BW-ONLY-NEXT:    vmovdqa (%rdx), %ymm1
+; AVX512BW-ONLY-NEXT:    vpunpcklqdq {{.*#+}} ymm2 = ymm1[0],ymm0[0],ymm1[2],ymm0[2]
+; AVX512BW-ONLY-NEXT:    vmovdqa (%rsi), %ymm3
+; AVX512BW-ONLY-NEXT:    vmovdqa (%rdi), %ymm7
+; AVX512BW-ONLY-NEXT:    vpunpcklqdq {{.*#+}} ymm8 = ymm7[0],ymm3[0],ymm7[2],ymm3[2]
+; AVX512BW-ONLY-NEXT:    vperm2i128 {{.*#+}} ymm2 = ymm8[2,3],ymm2[2,3]
+; AVX512BW-ONLY-NEXT:    vinserti64x4 $0, %ymm2, %zmm25, %zmm2
+; AVX512BW-ONLY-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
+; AVX512BW-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm2, %zmm4 {%k1}
+; AVX512BW-ONLY-NEXT:    vpunpckhqdq {{.*#+}} ymm0 = ymm1[1],ymm0[1],ymm1[3],ymm0[3]
+; AVX512BW-ONLY-NEXT:    vpunpckhqdq {{.*#+}} ymm1 = ymm7[1],ymm3[1],ymm7[3],ymm3[3]
+; AVX512BW-ONLY-NEXT:    vperm2i128 {{.*#+}} ymm0 = ymm1[2,3],ymm0[2,3]
+; AVX512BW-ONLY-NEXT:    vinserti64x4 $0, %ymm0, %zmm4, %zmm0
+; AVX512BW-ONLY-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm0, %zmm18 {%k1}
+; AVX512BW-ONLY-NEXT:    vmovdqa 64(%rcx), %ymm0
+; AVX512BW-ONLY-NEXT:    vmovdqa 64(%rdx), %ymm1
+; AVX512BW-ONLY-NEXT:    vmovdqa 64(%rsi), %ymm2
+; AVX512BW-ONLY-NEXT:    vmovdqa 64(%rdi), %ymm3
+; AVX512BW-ONLY-NEXT:    vpunpcklqdq {{.*#+}} ymm7 = ymm1[0],ymm0[0],ymm1[2],ymm0[2]
+; AVX512BW-ONLY-NEXT:    vpunpcklqdq {{.*#+}} ymm8 = ymm3[0],ymm2[0],ymm3[2],ymm2[2]
+; AVX512BW-ONLY-NEXT:    vperm2i128 {{.*#+}} ymm7 = ymm8[2,3],ymm7[2,3]
+; AVX512BW-ONLY-NEXT:    vinserti64x4 $0, %ymm7, %zmm18, %zmm4
+; AVX512BW-ONLY-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-NEXT:    vmovdqu64 (%rsp), %zmm4 # 64-byte Reload
+; AVX512BW-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm4, %zmm5 {%k1}
+; AVX512BW-ONLY-NEXT:    vpunpckhqdq {{.*#+}} ymm0 = ymm1[1],ymm0[1],ymm1[3],ymm0[3]
+; AVX512BW-ONLY-NEXT:    vpunpckhqdq {{.*#+}} ymm1 = ymm3[1],ymm2[1],ymm3[3],ymm2[3]
+; AVX512BW-ONLY-NEXT:    vperm2i128 {{.*#+}} ymm0 = ymm1[2,3],ymm0[2,3]
+; AVX512BW-ONLY-NEXT:    vinserti64x4 $0, %ymm0, %zmm5, %zmm0
+; AVX512BW-ONLY-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm0, %zmm13 {%k1}
+; AVX512BW-ONLY-NEXT:    vmovdqa 128(%rcx), %ymm0
+; AVX512BW-ONLY-NEXT:    vmovdqa 128(%rdx), %ymm1
+; AVX512BW-ONLY-NEXT:    vpunpcklqdq {{.*#+}} ymm2 = ymm1[0],ymm0[0],ymm1[2],ymm0[2]
+; AVX512BW-ONLY-NEXT:    vmovdqa 128(%rsi), %ymm3
+; AVX512BW-ONLY-NEXT:    vmovdqa 128(%rdi), %ymm7
+; AVX512BW-ONLY-NEXT:    vpunpcklqdq {{.*#+}} ymm8 = ymm7[0],ymm3[0],ymm7[2],ymm3[2]
+; AVX512BW-ONLY-NEXT:    vperm2i128 {{.*#+}} ymm2 = ymm8[2,3],ymm2[2,3]
+; AVX512BW-ONLY-NEXT:    vinserti64x4 $0, %ymm2, %zmm13, %zmm2
+; AVX512BW-ONLY-NEXT:    vmovdqu64 %zmm2, (%rsp) # 64-byte Spill
+; AVX512BW-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
+; AVX512BW-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm2, %zmm4 {%k1}
+; AVX512BW-ONLY-NEXT:    vpunpckhqdq {{.*#+}} ymm0 = ymm1[1],ymm0[1],ymm1[3],ymm0[3]
+; AVX512BW-ONLY-NEXT:    vpunpckhqdq {{.*#+}} ymm1 = ymm7[1],ymm3[1],ymm7[3],ymm3[3]
+; AVX512BW-ONLY-NEXT:    vperm2i128 {{.*#+}} ymm0 = ymm1[2,3],ymm0[2,3]
+; AVX512BW-ONLY-NEXT:    vinserti64x4 $0, %ymm0, %zmm4, %zmm0
+; AVX512BW-ONLY-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm0, %zmm10 {%k1}
+; AVX512BW-ONLY-NEXT:    vmovdqa 192(%rcx), %ymm0
+; AVX512BW-ONLY-NEXT:    vmovdqa 192(%rdx), %ymm1
+; AVX512BW-ONLY-NEXT:    vpunpcklqdq {{.*#+}} ymm2 = ymm1[0],ymm0[0],ymm1[2],ymm0[2]
+; AVX512BW-ONLY-NEXT:    vmovdqa 192(%rsi), %ymm3
+; AVX512BW-ONLY-NEXT:    vmovdqa 192(%rdi), %ymm7
+; AVX512BW-ONLY-NEXT:    vpunpcklqdq {{.*#+}} ymm8 = ymm7[0],ymm3[0],ymm7[2],ymm3[2]
+; AVX512BW-ONLY-NEXT:    vperm2i128 {{.*#+}} ymm2 = ymm8[2,3],ymm2[2,3]
+; AVX512BW-ONLY-NEXT:    vinserti64x4 $0, %ymm2, %zmm10, %zmm2
+; AVX512BW-ONLY-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
+; AVX512BW-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm2, %zmm4 {%k1}
+; AVX512BW-ONLY-NEXT:    vpunpckhqdq {{.*#+}} ymm0 = ymm1[1],ymm0[1],ymm1[3],ymm0[3]
+; AVX512BW-ONLY-NEXT:    vpunpckhqdq {{.*#+}} ymm1 = ymm7[1],ymm3[1],ymm7[3],ymm3[3]
+; AVX512BW-ONLY-NEXT:    vperm2i128 {{.*#+}} ymm0 = ymm1[2,3],ymm0[2,3]
+; AVX512BW-ONLY-NEXT:    vinserti64x4 $0, %ymm0, %zmm4, %zmm9
+; AVX512BW-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm0, %zmm26 {%k1}
+; AVX512BW-ONLY-NEXT:    vmovdqa 256(%rcx), %ymm0
+; AVX512BW-ONLY-NEXT:    vmovdqa 256(%rdx), %ymm1
+; AVX512BW-ONLY-NEXT:    vpunpcklqdq {{.*#+}} ymm2 = ymm1[0],ymm0[0],ymm1[2],ymm0[2]
+; AVX512BW-ONLY-NEXT:    vmovdqa 256(%rsi), %ymm3
+; AVX512BW-ONLY-NEXT:    vmovdqa 256(%rdi), %ymm7
+; AVX512BW-ONLY-NEXT:    vpunpcklqdq {{.*#+}} ymm8 = ymm7[0],ymm3[0],ymm7[2],ymm3[2]
+; AVX512BW-ONLY-NEXT:    vperm2i128 {{.*#+}} ymm2 = ymm8[2,3],ymm2[2,3]
+; AVX512BW-ONLY-NEXT:    vinserti64x4 $0, %ymm2, %zmm26, %zmm26
+; AVX512BW-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm23, %zmm2 {%k1}
+; AVX512BW-ONLY-NEXT:    vpunpckhqdq {{.*#+}} ymm0 = ymm1[1],ymm0[1],ymm1[3],ymm0[3]
+; AVX512BW-ONLY-NEXT:    vpunpckhqdq {{.*#+}} ymm1 = ymm7[1],ymm3[1],ymm7[3],ymm3[3]
+; AVX512BW-ONLY-NEXT:    vperm2i128 {{.*#+}} ymm0 = ymm1[2,3],ymm0[2,3]
+; AVX512BW-ONLY-NEXT:    vinserti64x4 $0, %ymm0, %zmm2, %zmm7
+; AVX512BW-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm0, %zmm24 {%k1}
+; AVX512BW-ONLY-NEXT:    vmovdqa 320(%rcx), %ymm0
+; AVX512BW-ONLY-NEXT:    vmovdqa 320(%rdx), %ymm1
+; AVX512BW-ONLY-NEXT:    vpunpcklqdq {{.*#+}} ymm2 = ymm1[0],ymm0[0],ymm1[2],ymm0[2]
+; AVX512BW-ONLY-NEXT:    vmovdqa 320(%rsi), %ymm3
+; AVX512BW-ONLY-NEXT:    vmovdqa64 320(%rdi), %ymm23
+; AVX512BW-ONLY-NEXT:    vpunpcklqdq {{.*#+}} ymm8 = ymm23[0],ymm3[0],ymm23[2],ymm3[2]
+; AVX512BW-ONLY-NEXT:    vperm2i128 {{.*#+}} ymm2 = ymm8[2,3],ymm2[2,3]
+; AVX512BW-ONLY-NEXT:    vinserti64x4 $0, %ymm2, %zmm24, %zmm8
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm31, %zmm17 {%k1}
+; AVX512BW-ONLY-NEXT:    vpunpckhqdq {{.*#+}} ymm0 = ymm1[1],ymm0[1],ymm1[3],ymm0[3]
+; AVX512BW-ONLY-NEXT:    vpunpckhqdq {{.*#+}} ymm1 = ymm23[1],ymm3[1],ymm23[3],ymm3[3]
+; AVX512BW-ONLY-NEXT:    vperm2i128 {{.*#+}} ymm0 = ymm1[2,3],ymm0[2,3]
+; AVX512BW-ONLY-NEXT:    vinserti64x4 $0, %ymm0, %zmm17, %zmm17
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm22, %zmm20 {%k1}
+; AVX512BW-ONLY-NEXT:    vmovdqa 384(%rcx), %ymm0
+; AVX512BW-ONLY-NEXT:    vmovdqa 384(%rdx), %ymm1
+; AVX512BW-ONLY-NEXT:    vpunpcklqdq {{.*#+}} ymm2 = ymm1[0],ymm0[0],ymm1[2],ymm0[2]
+; AVX512BW-ONLY-NEXT:    vmovdqa 384(%rsi), %ymm3
+; AVX512BW-ONLY-NEXT:    vmovdqa64 384(%rdi), %ymm18
+; AVX512BW-ONLY-NEXT:    vpunpcklqdq {{.*#+}} ymm13 = ymm18[0],ymm3[0],ymm18[2],ymm3[2]
+; AVX512BW-ONLY-NEXT:    vperm2i128 {{.*#+}} ymm2 = ymm13[2,3],ymm2[2,3]
+; AVX512BW-ONLY-NEXT:    vinserti64x4 $0, %ymm2, %zmm20, %zmm20
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm11, %zmm30 {%k1}
+; AVX512BW-ONLY-NEXT:    vpunpckhqdq {{.*#+}} ymm0 = ymm1[1],ymm0[1],ymm1[3],ymm0[3]
+; AVX512BW-ONLY-NEXT:    vpunpckhqdq {{.*#+}} ymm1 = ymm18[1],ymm3[1],ymm18[3],ymm3[3]
+; AVX512BW-ONLY-NEXT:    vperm2i128 {{.*#+}} ymm0 = ymm1[2,3],ymm0[2,3]
+; AVX512BW-ONLY-NEXT:    vinserti64x4 $0, %ymm0, %zmm30, %zmm24
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm12, %zmm6 {%k1}
+; AVX512BW-ONLY-NEXT:    vmovdqa 448(%rcx), %ymm0
+; AVX512BW-ONLY-NEXT:    vmovdqa 448(%rdx), %ymm1
+; AVX512BW-ONLY-NEXT:    vpunpcklqdq {{.*#+}} ymm2 = ymm1[0],ymm0[0],ymm1[2],ymm0[2]
+; AVX512BW-ONLY-NEXT:    vmovdqa 448(%rsi), %ymm3
+; AVX512BW-ONLY-NEXT:    vmovdqa 448(%rdi), %ymm10
+; AVX512BW-ONLY-NEXT:    vpunpcklqdq {{.*#+}} ymm12 = ymm10[0],ymm3[0],ymm10[2],ymm3[2]
+; AVX512BW-ONLY-NEXT:    vperm2i128 {{.*#+}} ymm2 = ymm12[2,3],ymm2[2,3]
+; AVX512BW-ONLY-NEXT:    vinserti64x4 $0, %ymm2, %zmm6, %zmm31
+; AVX512BW-ONLY-NEXT:    vpunpckhqdq {{.*#+}} ymm0 = ymm1[1],ymm0[1],ymm1[3],ymm0[3]
+; AVX512BW-ONLY-NEXT:    vpunpckhqdq {{.*#+}} ymm1 = ymm10[1],ymm3[1],ymm10[3],ymm3[3]
+; AVX512BW-ONLY-NEXT:    vperm2i128 {{.*#+}} ymm0 = ymm1[2,3],ymm0[2,3]
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm16, %zmm19 {%k1}
+; AVX512BW-ONLY-NEXT:    vinserti64x4 $0, %ymm0, %zmm19, %zmm0
+; AVX512BW-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
+; AVX512BW-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm1, %zmm4 {%k1}
+; AVX512BW-ONLY-NEXT:    vmovdqa (%rsi), %xmm1
+; AVX512BW-ONLY-NEXT:    vinserti128 $1, (%rcx), %ymm1, %ymm1
+; AVX512BW-ONLY-NEXT:    vmovdqa (%rdi), %xmm2
+; AVX512BW-ONLY-NEXT:    vinserti128 $1, (%rdx), %ymm2, %ymm2
+; AVX512BW-ONLY-NEXT:    vpunpcklqdq {{.*#+}} ymm3 = ymm2[0],ymm1[0],ymm2[2],ymm1[2]
+; AVX512BW-ONLY-NEXT:    vinserti64x4 $0, %ymm3, %zmm4, %zmm3
+; AVX512BW-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
+; AVX512BW-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm10 # 64-byte Reload
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm4, %zmm10 {%k1}
+; AVX512BW-ONLY-NEXT:    vpunpckhqdq {{.*#+}} ymm1 = ymm2[1],ymm1[1],ymm2[3],ymm1[3]
+; AVX512BW-ONLY-NEXT:    vinserti64x4 $0, %ymm1, %zmm10, %zmm2
+; AVX512BW-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
+; AVX512BW-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm11 # 64-byte Reload
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm1, %zmm11 {%k1}
+; AVX512BW-ONLY-NEXT:    vmovdqa 64(%rsi), %xmm1
+; AVX512BW-ONLY-NEXT:    vinserti128 $1, 64(%rcx), %ymm1, %ymm1
+; AVX512BW-ONLY-NEXT:    vmovdqa 64(%rdi), %xmm4
+; AVX512BW-ONLY-NEXT:    vinserti128 $1, 64(%rdx), %ymm4, %ymm4
+; AVX512BW-ONLY-NEXT:    vpunpcklqdq {{.*#+}} ymm10 = ymm4[0],ymm1[0],ymm4[2],ymm1[2]
+; AVX512BW-ONLY-NEXT:    vinserti64x4 $0, %ymm10, %zmm11, %zmm10
+; AVX512BW-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm11 # 64-byte Reload
+; AVX512BW-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm12 # 64-byte Reload
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm11, %zmm12 {%k1}
+; AVX512BW-ONLY-NEXT:    vpunpckhqdq {{.*#+}} ymm1 = ymm4[1],ymm1[1],ymm4[3],ymm1[3]
+; AVX512BW-ONLY-NEXT:    vinserti64x4 $0, %ymm1, %zmm12, %zmm11
+; AVX512BW-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
+; AVX512BW-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm13 # 64-byte Reload
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm1, %zmm13 {%k1}
+; AVX512BW-ONLY-NEXT:    vmovdqa 128(%rsi), %xmm1
+; AVX512BW-ONLY-NEXT:    vinserti128 $1, 128(%rcx), %ymm1, %ymm1
+; AVX512BW-ONLY-NEXT:    vmovdqa 128(%rdi), %xmm4
+; AVX512BW-ONLY-NEXT:    vinserti128 $1, 128(%rdx), %ymm4, %ymm12
+; AVX512BW-ONLY-NEXT:    vpunpcklqdq {{.*#+}} ymm4 = ymm12[0],ymm1[0],ymm12[2],ymm1[2]
+; AVX512BW-ONLY-NEXT:    vinserti64x4 $0, %ymm4, %zmm13, %zmm4
+; AVX512BW-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm13 # 64-byte Reload
+; AVX512BW-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm18 # 64-byte Reload
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm13, %zmm18 {%k1}
+; AVX512BW-ONLY-NEXT:    vpunpckhqdq {{.*#+}} ymm1 = ymm12[1],ymm1[1],ymm12[3],ymm1[3]
+; AVX512BW-ONLY-NEXT:    vinserti64x4 $0, %ymm1, %zmm18, %zmm19
+; AVX512BW-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
+; AVX512BW-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm18 # 64-byte Reload
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm1, %zmm18 {%k1}
+; AVX512BW-ONLY-NEXT:    vmovdqa 192(%rsi), %xmm1
+; AVX512BW-ONLY-NEXT:    vinserti128 $1, 192(%rcx), %ymm1, %ymm1
+; AVX512BW-ONLY-NEXT:    vmovdqa 192(%rdi), %xmm12
+; AVX512BW-ONLY-NEXT:    vinserti128 $1, 192(%rdx), %ymm12, %ymm12
+; AVX512BW-ONLY-NEXT:    vpunpcklqdq {{.*#+}} ymm13 = ymm12[0],ymm1[0],ymm12[2],ymm1[2]
+; AVX512BW-ONLY-NEXT:    vinserti64x4 $0, %ymm13, %zmm18, %zmm30
+; AVX512BW-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm13 # 64-byte Reload
+; AVX512BW-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm18 # 64-byte Reload
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm13, %zmm18 {%k1}
+; AVX512BW-ONLY-NEXT:    vpunpckhqdq {{.*#+}} ymm1 = ymm12[1],ymm1[1],ymm12[3],ymm1[3]
+; AVX512BW-ONLY-NEXT:    vinserti64x4 $0, %ymm1, %zmm18, %zmm1
+; AVX512BW-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm12 # 64-byte Reload
+; AVX512BW-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm12, %zmm5 {%k1}
+; AVX512BW-ONLY-NEXT:    vmovdqa 256(%rsi), %xmm12
+; AVX512BW-ONLY-NEXT:    vinserti128 $1, 256(%rcx), %ymm12, %ymm13
+; AVX512BW-ONLY-NEXT:    vmovdqa 256(%rdi), %xmm12
+; AVX512BW-ONLY-NEXT:    vinserti32x4 $1, 256(%rdx), %ymm12, %ymm18
+; AVX512BW-ONLY-NEXT:    vpunpcklqdq {{.*#+}} ymm12 = ymm18[0],ymm13[0],ymm18[2],ymm13[2]
+; AVX512BW-ONLY-NEXT:    vinserti64x4 $0, %ymm12, %zmm5, %zmm12
+; AVX512BW-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm23 # 64-byte Reload
+; AVX512BW-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm23, %zmm5 {%k1}
+; AVX512BW-ONLY-NEXT:    vpunpckhqdq {{.*#+}} ymm13 = ymm18[1],ymm13[1],ymm18[3],ymm13[3]
+; AVX512BW-ONLY-NEXT:    vinserti64x4 $0, %ymm13, %zmm5, %zmm23
+; AVX512BW-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm5, %zmm29 {%k1}
+; AVX512BW-ONLY-NEXT:    vmovdqa 320(%rsi), %xmm13
+; AVX512BW-ONLY-NEXT:    vinserti128 $1, 320(%rcx), %ymm13, %ymm13
+; AVX512BW-ONLY-NEXT:    vmovdqa64 320(%rdi), %xmm18
+; AVX512BW-ONLY-NEXT:    vinserti32x4 $1, 320(%rdx), %ymm18, %ymm18
+; AVX512BW-ONLY-NEXT:    vpunpcklqdq {{.*#+}} ymm27 = ymm18[0],ymm13[0],ymm18[2],ymm13[2]
+; AVX512BW-ONLY-NEXT:    vinserti64x4 $0, %ymm27, %zmm29, %zmm22
+; AVX512BW-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
+; AVX512BW-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm6 # 64-byte Reload
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm5, %zmm6 {%k1}
+; AVX512BW-ONLY-NEXT:    vpunpckhqdq {{.*#+}} ymm13 = ymm18[1],ymm13[1],ymm18[3],ymm13[3]
+; AVX512BW-ONLY-NEXT:    vinserti64x4 $0, %ymm13, %zmm6, %zmm13
+; AVX512BW-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm5, %zmm21 {%k1}
+; AVX512BW-ONLY-NEXT:    vmovdqa64 384(%rsi), %xmm18
+; AVX512BW-ONLY-NEXT:    vinserti32x4 $1, 384(%rcx), %ymm18, %ymm18
+; AVX512BW-ONLY-NEXT:    vmovdqa64 384(%rdi), %xmm25
+; AVX512BW-ONLY-NEXT:    vinserti32x4 $1, 384(%rdx), %ymm25, %ymm25
+; AVX512BW-ONLY-NEXT:    vpunpcklqdq {{.*#+}} ymm27 = ymm25[0],ymm18[0],ymm25[2],ymm18[2]
+; AVX512BW-ONLY-NEXT:    vinserti64x4 $0, %ymm27, %zmm21, %zmm16
+; AVX512BW-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm5, %zmm28 {%k1}
+; AVX512BW-ONLY-NEXT:    vpunpckhqdq {{.*#+}} ymm18 = ymm25[1],ymm18[1],ymm25[3],ymm18[3]
+; AVX512BW-ONLY-NEXT:    vinserti64x4 $0, %ymm18, %zmm28, %zmm21
+; AVX512BW-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm5, %zmm15 {%k1}
+; AVX512BW-ONLY-NEXT:    vmovdqa64 448(%rsi), %xmm18
+; AVX512BW-ONLY-NEXT:    vinserti32x4 $1, 448(%rcx), %ymm18, %ymm18
+; AVX512BW-ONLY-NEXT:    vmovdqa64 448(%rdi), %xmm25
+; AVX512BW-ONLY-NEXT:    vinserti32x4 $1, 448(%rdx), %ymm25, %ymm25
+; AVX512BW-ONLY-NEXT:    vpunpcklqdq {{.*#+}} ymm27 = ymm25[0],ymm18[0],ymm25[2],ymm18[2]
+; AVX512BW-ONLY-NEXT:    vinserti64x4 $0, %ymm27, %zmm15, %zmm6
+; AVX512BW-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm5, %zmm14 {%k1}
+; AVX512BW-ONLY-NEXT:    vpunpckhqdq {{.*#+}} ymm18 = ymm25[1],ymm18[1],ymm25[3],ymm18[3]
+; AVX512BW-ONLY-NEXT:    vinserti64x4 $0, %ymm18, %zmm14, %zmm5
+; AVX512BW-ONLY-NEXT:    movq {{[0-9]+}}(%rsp), %rax
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm0, 3776(%rax)
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm31, 3712(%rax)
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm24, 3264(%rax)
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm20, 3200(%rax)
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm17, 2752(%rax)
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm8, 2688(%rax)
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm7, 2240(%rax)
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm26, 2176(%rax)
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm9, 1728(%rax)
+; AVX512BW-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512BW-ONLY-NEXT:    vmovaps %zmm0, 1664(%rax)
+; AVX512BW-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512BW-ONLY-NEXT:    vmovaps %zmm0, 1216(%rax)
+; AVX512BW-ONLY-NEXT:    vmovups (%rsp), %zmm0 # 64-byte Reload
+; AVX512BW-ONLY-NEXT:    vmovaps %zmm0, 1152(%rax)
+; AVX512BW-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512BW-ONLY-NEXT:    vmovaps %zmm0, 704(%rax)
+; AVX512BW-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512BW-ONLY-NEXT:    vmovaps %zmm0, 640(%rax)
+; AVX512BW-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512BW-ONLY-NEXT:    vmovaps %zmm0, 192(%rax)
+; AVX512BW-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512BW-ONLY-NEXT:    vmovaps %zmm0, 128(%rax)
+; AVX512BW-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512BW-ONLY-NEXT:    vmovaps %zmm0, 4032(%rax)
+; AVX512BW-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512BW-ONLY-NEXT:    vmovaps %zmm0, 3968(%rax)
+; AVX512BW-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512BW-ONLY-NEXT:    vmovaps %zmm0, 3904(%rax)
+; AVX512BW-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512BW-ONLY-NEXT:    vmovaps %zmm0, 3840(%rax)
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm5, 3648(%rax)
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm6, 3584(%rax)
+; AVX512BW-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512BW-ONLY-NEXT:    vmovaps %zmm0, 3520(%rax)
+; AVX512BW-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512BW-ONLY-NEXT:    vmovaps %zmm0, 3456(%rax)
+; AVX512BW-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512BW-ONLY-NEXT:    vmovaps %zmm0, 3392(%rax)
+; AVX512BW-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512BW-ONLY-NEXT:    vmovaps %zmm0, 3328(%rax)
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm21, 3136(%rax)
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm16, 3072(%rax)
+; AVX512BW-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512BW-ONLY-NEXT:    vmovaps %zmm0, 3008(%rax)
+; AVX512BW-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512BW-ONLY-NEXT:    vmovaps %zmm0, 2944(%rax)
+; AVX512BW-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512BW-ONLY-NEXT:    vmovaps %zmm0, 2880(%rax)
+; AVX512BW-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512BW-ONLY-NEXT:    vmovaps %zmm0, 2816(%rax)
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm13, 2624(%rax)
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm22, 2560(%rax)
+; AVX512BW-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512BW-ONLY-NEXT:    vmovaps %zmm0, 2496(%rax)
+; AVX512BW-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512BW-ONLY-NEXT:    vmovaps %zmm0, 2432(%rax)
+; AVX512BW-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512BW-ONLY-NEXT:    vmovaps %zmm0, 2368(%rax)
+; AVX512BW-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512BW-ONLY-NEXT:    vmovaps %zmm0, 2304(%rax)
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm23, 2112(%rax)
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm12, 2048(%rax)
+; AVX512BW-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512BW-ONLY-NEXT:    vmovaps %zmm0, 1984(%rax)
+; AVX512BW-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512BW-ONLY-NEXT:    vmovaps %zmm0, 1920(%rax)
+; AVX512BW-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512BW-ONLY-NEXT:    vmovaps %zmm0, 1856(%rax)
+; AVX512BW-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512BW-ONLY-NEXT:    vmovaps %zmm0, 1792(%rax)
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm1, 1600(%rax)
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm30, 1536(%rax)
+; AVX512BW-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512BW-ONLY-NEXT:    vmovaps %zmm0, 1472(%rax)
+; AVX512BW-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512BW-ONLY-NEXT:    vmovaps %zmm0, 1408(%rax)
+; AVX512BW-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512BW-ONLY-NEXT:    vmovaps %zmm0, 1344(%rax)
+; AVX512BW-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512BW-ONLY-NEXT:    vmovaps %zmm0, 1280(%rax)
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm19, 1088(%rax)
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm4, 1024(%rax)
+; AVX512BW-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512BW-ONLY-NEXT:    vmovaps %zmm0, 960(%rax)
+; AVX512BW-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512BW-ONLY-NEXT:    vmovaps %zmm0, 896(%rax)
+; AVX512BW-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512BW-ONLY-NEXT:    vmovaps %zmm0, 832(%rax)
+; AVX512BW-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512BW-ONLY-NEXT:    vmovaps %zmm0, 768(%rax)
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm11, 576(%rax)
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm10, 512(%rax)
+; AVX512BW-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512BW-ONLY-NEXT:    vmovaps %zmm0, 448(%rax)
+; AVX512BW-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512BW-ONLY-NEXT:    vmovaps %zmm0, 384(%rax)
+; AVX512BW-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512BW-ONLY-NEXT:    vmovaps %zmm0, 320(%rax)
+; AVX512BW-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512BW-ONLY-NEXT:    vmovaps %zmm0, 256(%rax)
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm2, 64(%rax)
+; AVX512BW-ONLY-NEXT:    vmovdqa64 %zmm3, (%rax)
+; AVX512BW-ONLY-NEXT:    addq $5512, %rsp # imm = 0x1588
+; AVX512BW-ONLY-NEXT:    vzeroupper
+; AVX512BW-ONLY-NEXT:    retq
 ;
-; AVX512DQ-FAST-LABEL: store_i64_stride8_vf64:
-; AVX512DQ-FAST:       # %bb.0:
-; AVX512DQ-FAST-NEXT:    subq $5512, %rsp # imm = 0x1588
-; AVX512DQ-FAST-NEXT:    movq {{[0-9]+}}(%rsp), %rax
-; AVX512DQ-FAST-NEXT:    movq {{[0-9]+}}(%rsp), %r10
-; AVX512DQ-FAST-NEXT:    vmovdqa64 128(%rdi), %zmm2
-; AVX512DQ-FAST-NEXT:    vmovdqa64 64(%rdi), %zmm4
-; AVX512DQ-FAST-NEXT:    vmovdqa64 (%rdi), %zmm8
-; AVX512DQ-FAST-NEXT:    vmovdqa64 128(%rsi), %zmm17
-; AVX512DQ-FAST-NEXT:    vmovdqa64 64(%rsi), %zmm19
-; AVX512DQ-FAST-NEXT:    vmovdqa64 (%rsi), %zmm22
-; AVX512DQ-FAST-NEXT:    vmovdqa64 64(%rdx), %zmm5
-; AVX512DQ-FAST-NEXT:    vmovdqa64 (%rdx), %zmm10
-; AVX512DQ-FAST-NEXT:    vmovdqa64 64(%rcx), %zmm20
-; AVX512DQ-FAST-NEXT:    vmovdqa64 (%rcx), %zmm11
-; AVX512DQ-FAST-NEXT:    vmovdqa64 (%r8), %zmm1
-; AVX512DQ-FAST-NEXT:    vmovdqa64 64(%r8), %zmm25
-; AVX512DQ-FAST-NEXT:    vmovdqa64 128(%r8), %zmm23
-; AVX512DQ-FAST-NEXT:    vmovdqa64 (%r9), %zmm28
-; AVX512DQ-FAST-NEXT:    vmovdqa64 64(%r9), %zmm26
-; AVX512DQ-FAST-NEXT:    vmovdqa64 128(%r9), %zmm24
-; AVX512DQ-FAST-NEXT:    vmovdqa64 (%r10), %zmm21
-; AVX512DQ-FAST-NEXT:    vmovdqa64 64(%r10), %zmm14
-; AVX512DQ-FAST-NEXT:    vmovdqa64 (%rax), %zmm27
-; AVX512DQ-FAST-NEXT:    vmovdqa64 64(%rax), %zmm16
-; AVX512DQ-FAST-NEXT:    movb $-64, %r11b
-; AVX512DQ-FAST-NEXT:    kmovw %r11d, %k1
-; AVX512DQ-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} zmm3 = [4,12,4,12,4,12,4,12]
-; AVX512DQ-FAST-NEXT:    # zmm3 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm21, %zmm0
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm27, %zmm3, %zmm0
-; AVX512DQ-FAST-NEXT:    vpunpcklqdq {{.*#+}} zmm12 = zmm1[0],zmm28[0],zmm1[2],zmm28[2],zmm1[4],zmm28[4],zmm1[6],zmm28[6]
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm0, %zmm12 {%k1}
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm8, %zmm0
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm22, %zmm3, %zmm0
-; AVX512DQ-FAST-NEXT:    vbroadcasti128 {{.*#+}} ymm6 = [4,12,4,12]
-; AVX512DQ-FAST-NEXT:    # ymm6 = mem[0,1,0,1]
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm10, %zmm15
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm11, %zmm6, %zmm15
-; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm15[4,5,6,7]
-; AVX512DQ-FAST-NEXT:    vinserti64x4 $0, %ymm0, %zmm12, %zmm0
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} zmm9 = [5,13,5,13,5,13,5,13]
-; AVX512DQ-FAST-NEXT:    # zmm9 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm21, %zmm0
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm27, %zmm9, %zmm0
-; AVX512DQ-FAST-NEXT:    vpunpckhqdq {{.*#+}} zmm12 = zmm1[1],zmm28[1],zmm1[3],zmm28[3],zmm1[5],zmm28[5],zmm1[7],zmm28[7]
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm0, %zmm12 {%k1}
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm8, %zmm0
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm22, %zmm9, %zmm0
-; AVX512DQ-FAST-NEXT:    vbroadcasti128 {{.*#+}} ymm7 = [5,13,5,13]
-; AVX512DQ-FAST-NEXT:    # ymm7 = mem[0,1,0,1]
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm10, %zmm15
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm11, %zmm7, %zmm15
-; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm15[4,5,6,7]
-; AVX512DQ-FAST-NEXT:    vinserti64x4 $0, %ymm0, %zmm12, %zmm0
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} zmm13 = [6,14,6,14,6,14,6,14]
-; AVX512DQ-FAST-NEXT:    # zmm13 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm1, %zmm0
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm1, %zmm29
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm28, %zmm13, %zmm0
-; AVX512DQ-FAST-NEXT:    vpunpcklqdq {{.*#+}} zmm0 {%k1} = zmm21[0],zmm27[0],zmm21[2],zmm27[2],zmm21[4],zmm27[4],zmm21[6],zmm27[6]
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm8, %zmm12
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm22, %zmm13, %zmm12
-; AVX512DQ-FAST-NEXT:    vbroadcasti128 {{.*#+}} ymm1 = [6,14,6,14]
-; AVX512DQ-FAST-NEXT:    # ymm1 = mem[0,1,0,1]
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm10, %zmm15
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm11, %zmm1, %zmm15
-; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm12 = ymm12[0,1,2,3],ymm15[4,5,6,7]
-; AVX512DQ-FAST-NEXT:    vinserti64x4 $0, %ymm12, %zmm0, %zmm0
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} zmm18 = [7,15,7,15,7,15,7,15]
-; AVX512DQ-FAST-NEXT:    # zmm18 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm29, %zmm12
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm28, %zmm18, %zmm12
-; AVX512DQ-FAST-NEXT:    vpunpckhqdq {{.*#+}} zmm12 {%k1} = zmm21[1],zmm27[1],zmm21[3],zmm27[3],zmm21[5],zmm27[5],zmm21[7],zmm27[7]
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm22, %zmm18, %zmm8
-; AVX512DQ-FAST-NEXT:    vbroadcasti64x2 {{.*#+}} ymm30 = [7,15,7,15]
-; AVX512DQ-FAST-NEXT:    # ymm30 = mem[0,1,0,1]
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm11, %zmm30, %zmm10
-; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm8 = ymm8[0,1,2,3],ymm10[4,5,6,7]
-; AVX512DQ-FAST-NEXT:    vinserti64x4 $0, %ymm8, %zmm12, %zmm0
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm14, %zmm8
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm16, %zmm3, %zmm8
-; AVX512DQ-FAST-NEXT:    vpunpcklqdq {{.*#+}} zmm10 = zmm25[0],zmm26[0],zmm25[2],zmm26[2],zmm25[4],zmm26[4],zmm25[6],zmm26[6]
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm8, %zmm10 {%k1}
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm4, %zmm8
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm19, %zmm3, %zmm8
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm5, %zmm11
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm6, %zmm15
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm20, %zmm6, %zmm11
-; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm8 = ymm8[0,1,2,3],ymm11[4,5,6,7]
-; AVX512DQ-FAST-NEXT:    vinserti64x4 $0, %ymm8, %zmm10, %zmm0
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm14, %zmm8
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm16, %zmm9, %zmm8
-; AVX512DQ-FAST-NEXT:    vpunpckhqdq {{.*#+}} zmm10 = zmm25[1],zmm26[1],zmm25[3],zmm26[3],zmm25[5],zmm26[5],zmm25[7],zmm26[7]
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm8, %zmm10 {%k1}
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm4, %zmm8
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm19, %zmm9, %zmm8
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm5, %zmm11
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm7, %zmm12
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm20, %zmm7, %zmm11
-; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm8 = ymm8[0,1,2,3],ymm11[4,5,6,7]
-; AVX512DQ-FAST-NEXT:    vinserti64x4 $0, %ymm8, %zmm10, %zmm0
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm4, %zmm8
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm19, %zmm13, %zmm8
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm5, %zmm10
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm20, %zmm1, %zmm10
-; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm8 = ymm8[0,1,2,3],ymm10[4,5,6,7]
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm25, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm25, %zmm10
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm26, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm26, %zmm13, %zmm10
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm14, %zmm11
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm14, (%rsp) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm16, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vpunpcklqdq {{.*#+}} zmm10 {%k1} = zmm14[0],zmm16[0],zmm14[2],zmm16[2],zmm14[4],zmm16[4],zmm14[6],zmm16[6]
-; AVX512DQ-FAST-NEXT:    vinserti64x4 $0, %ymm8, %zmm10, %zmm0
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vmovdqa64 128(%r10), %zmm10
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm19, %zmm18, %zmm4
-; AVX512DQ-FAST-NEXT:    vmovdqa64 128(%rax), %zmm14
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm20, %zmm30, %zmm5
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm25, %zmm6
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm26, %zmm18, %zmm6
-; AVX512DQ-FAST-NEXT:    vpunpckhqdq {{.*#+}} zmm6 {%k1} = zmm11[1],zmm16[1],zmm11[3],zmm16[3],zmm11[5],zmm16[5],zmm11[7],zmm16[7]
-; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm4 = ymm4[0,1,2,3],ymm5[4,5,6,7]
-; AVX512DQ-FAST-NEXT:    vinserti64x4 $0, %ymm4, %zmm6, %zmm0
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm10, %zmm4
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm14, %zmm3, %zmm4
-; AVX512DQ-FAST-NEXT:    vpunpcklqdq {{.*#+}} zmm5 = zmm23[0],zmm24[0],zmm23[2],zmm24[2],zmm23[4],zmm24[4],zmm23[6],zmm24[6]
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm4, %zmm5 {%k1}
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm2, %zmm6
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm17, %zmm3, %zmm6
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm3, %zmm11
-; AVX512DQ-FAST-NEXT:    vmovdqa64 128(%rdx), %zmm4
-; AVX512DQ-FAST-NEXT:    vmovdqa64 128(%rcx), %zmm7
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm4, %zmm8
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm7, %zmm15, %zmm8
-; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm6 = ymm6[0,1,2,3],ymm8[4,5,6,7]
-; AVX512DQ-FAST-NEXT:    vinserti64x4 $0, %ymm6, %zmm5, %zmm0
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm10, %zmm5
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm14, %zmm9, %zmm5
-; AVX512DQ-FAST-NEXT:    vpunpckhqdq {{.*#+}} zmm6 = zmm23[1],zmm24[1],zmm23[3],zmm24[3],zmm23[5],zmm24[5],zmm23[7],zmm24[7]
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm5, %zmm6 {%k1}
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm2, %zmm5
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm17, %zmm9, %zmm5
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm4, %zmm8
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm7, %zmm12, %zmm8
-; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm5 = ymm5[0,1,2,3],ymm8[4,5,6,7]
-; AVX512DQ-FAST-NEXT:    vinserti64x4 $0, %ymm5, %zmm6, %zmm0
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm2, %zmm5
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm17, %zmm13, %zmm5
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm4, %zmm6
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm7, %zmm1, %zmm6
-; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm5 = ymm5[0,1,2,3],ymm6[4,5,6,7]
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm23, %zmm3
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm23, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm23, %zmm6
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm24, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm24, %zmm13, %zmm6
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm14, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vpunpcklqdq {{.*#+}} zmm6 {%k1} = zmm10[0],zmm14[0],zmm10[2],zmm14[2],zmm10[4],zmm14[4],zmm10[6],zmm14[6]
-; AVX512DQ-FAST-NEXT:    vinserti64x4 $0, %ymm5, %zmm6, %zmm0
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm17, %zmm18, %zmm2
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm7, %zmm30, %zmm4
-; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm4[4,5,6,7]
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm24, %zmm18, %zmm3
-; AVX512DQ-FAST-NEXT:    vpunpckhqdq {{.*#+}} zmm3 {%k1} = zmm10[1],zmm14[1],zmm10[3],zmm14[3],zmm10[5],zmm14[5],zmm10[7],zmm14[7]
-; AVX512DQ-FAST-NEXT:    vinserti64x4 $0, %ymm2, %zmm3, %zmm0
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vmovdqa64 192(%r10), %zmm10
-; AVX512DQ-FAST-NEXT:    vmovdqa64 192(%rax), %zmm26
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm10, %zmm2
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm26, %zmm11, %zmm2
-; AVX512DQ-FAST-NEXT:    vmovdqa64 192(%r8), %zmm23
-; AVX512DQ-FAST-NEXT:    vmovdqa64 192(%r9), %zmm29
-; AVX512DQ-FAST-NEXT:    vpunpcklqdq {{.*#+}} zmm4 = zmm23[0],zmm29[0],zmm23[2],zmm29[2],zmm23[4],zmm29[4],zmm23[6],zmm29[6]
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm2, %zmm4 {%k1}
-; AVX512DQ-FAST-NEXT:    vmovdqa64 192(%rdi), %zmm2
-; AVX512DQ-FAST-NEXT:    vmovdqa64 192(%rsi), %zmm5
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm2, %zmm6
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm5, %zmm11, %zmm6
-; AVX512DQ-FAST-NEXT:    vmovdqa64 192(%rdx), %zmm3
-; AVX512DQ-FAST-NEXT:    vmovdqa64 192(%rcx), %zmm7
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm3, %zmm8
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm7, %zmm15, %zmm8
-; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm6 = ymm6[0,1,2,3],ymm8[4,5,6,7]
-; AVX512DQ-FAST-NEXT:    vinserti64x4 $0, %ymm6, %zmm4, %zmm0
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm10, %zmm4
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm26, %zmm9, %zmm4
-; AVX512DQ-FAST-NEXT:    vpunpckhqdq {{.*#+}} zmm6 = zmm23[1],zmm29[1],zmm23[3],zmm29[3],zmm23[5],zmm29[5],zmm23[7],zmm29[7]
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm4, %zmm6 {%k1}
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm2, %zmm4
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm5, %zmm9, %zmm4
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm3, %zmm8
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm7, %zmm12, %zmm8
-; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm4 = ymm4[0,1,2,3],ymm8[4,5,6,7]
-; AVX512DQ-FAST-NEXT:    vinserti64x4 $0, %ymm4, %zmm6, %zmm0
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm2, %zmm4
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm5, %zmm13, %zmm4
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm3, %zmm6
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm7, %zmm1, %zmm6
-; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm4 = ymm4[0,1,2,3],ymm6[4,5,6,7]
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm23, %zmm6
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm29, %zmm13, %zmm6
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vpunpcklqdq {{.*#+}} zmm6 {%k1} = zmm10[0],zmm26[0],zmm10[2],zmm26[2],zmm10[4],zmm26[4],zmm10[6],zmm26[6]
-; AVX512DQ-FAST-NEXT:    vinserti64x4 $0, %ymm4, %zmm6, %zmm0
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm5, %zmm18, %zmm2
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm7, %zmm30, %zmm3
-; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm3[4,5,6,7]
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm23, %zmm3
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm29, %zmm18, %zmm3
-; AVX512DQ-FAST-NEXT:    vpunpckhqdq {{.*#+}} zmm3 {%k1} = zmm10[1],zmm26[1],zmm10[3],zmm26[3],zmm10[5],zmm26[5],zmm10[7],zmm26[7]
-; AVX512DQ-FAST-NEXT:    vinserti64x4 $0, %ymm2, %zmm3, %zmm0
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vmovdqa64 256(%r10), %zmm10
-; AVX512DQ-FAST-NEXT:    vmovdqa64 256(%rax), %zmm22
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm10, %zmm2
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm22, %zmm11, %zmm2
-; AVX512DQ-FAST-NEXT:    vmovdqa64 256(%r8), %zmm0
-; AVX512DQ-FAST-NEXT:    vmovdqa64 256(%r9), %zmm24
-; AVX512DQ-FAST-NEXT:    vpunpcklqdq {{.*#+}} zmm4 = zmm0[0],zmm24[0],zmm0[2],zmm24[2],zmm0[4],zmm24[4],zmm0[6],zmm24[6]
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm0, %zmm17
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm2, %zmm4 {%k1}
-; AVX512DQ-FAST-NEXT:    vmovdqa64 256(%rdi), %zmm2
-; AVX512DQ-FAST-NEXT:    vmovdqa64 256(%rsi), %zmm5
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm2, %zmm6
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm5, %zmm11, %zmm6
-; AVX512DQ-FAST-NEXT:    vmovdqa64 256(%rdx), %zmm3
-; AVX512DQ-FAST-NEXT:    vmovdqa64 256(%rcx), %zmm7
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm3, %zmm8
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm7, %zmm15, %zmm8
-; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm6 = ymm6[0,1,2,3],ymm8[4,5,6,7]
-; AVX512DQ-FAST-NEXT:    vinserti64x4 $0, %ymm6, %zmm4, %zmm0
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm10, %zmm4
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm22, %zmm9, %zmm4
-; AVX512DQ-FAST-NEXT:    vpunpckhqdq {{.*#+}} zmm6 = zmm17[1],zmm24[1],zmm17[3],zmm24[3],zmm17[5],zmm24[5],zmm17[7],zmm24[7]
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm4, %zmm6 {%k1}
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm2, %zmm4
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm5, %zmm9, %zmm4
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm3, %zmm8
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm7, %zmm12, %zmm8
-; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm4 = ymm4[0,1,2,3],ymm8[4,5,6,7]
-; AVX512DQ-FAST-NEXT:    vinserti64x4 $0, %ymm4, %zmm6, %zmm0
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm2, %zmm4
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm5, %zmm13, %zmm4
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm3, %zmm6
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm7, %zmm1, %zmm6
-; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm4 = ymm4[0,1,2,3],ymm6[4,5,6,7]
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm17, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm17, %zmm6
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm24, %zmm13, %zmm6
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vpunpcklqdq {{.*#+}} zmm6 {%k1} = zmm10[0],zmm22[0],zmm10[2],zmm22[2],zmm10[4],zmm22[4],zmm10[6],zmm22[6]
-; AVX512DQ-FAST-NEXT:    vinserti64x4 $0, %ymm4, %zmm6, %zmm0
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm5, %zmm18, %zmm2
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm7, %zmm30, %zmm3
-; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm3[4,5,6,7]
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm17, %zmm3
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm24, %zmm18, %zmm3
-; AVX512DQ-FAST-NEXT:    vpunpckhqdq {{.*#+}} zmm3 {%k1} = zmm10[1],zmm22[1],zmm10[3],zmm22[3],zmm10[5],zmm22[5],zmm10[7],zmm22[7]
-; AVX512DQ-FAST-NEXT:    vinserti64x4 $0, %ymm2, %zmm3, %zmm0
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vmovdqa64 320(%rdi), %zmm2
-; AVX512DQ-FAST-NEXT:    vmovdqa64 320(%rsi), %zmm3
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm2, %zmm5
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm3, %zmm11, %zmm5
-; AVX512DQ-FAST-NEXT:    vmovdqa64 320(%rdx), %zmm14
-; AVX512DQ-FAST-NEXT:    vmovdqa64 320(%rcx), %zmm6
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm14, %zmm7
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm6, %zmm15, %zmm7
-; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm5 = ymm5[0,1,2,3],ymm7[4,5,6,7]
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm14, %zmm7
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm6, %zmm12, %zmm7
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm14, %zmm8
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm6, %zmm1, %zmm8
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm6, %zmm30, %zmm14
-; AVX512DQ-FAST-NEXT:    vmovdqa64 384(%rdx), %zmm0
-; AVX512DQ-FAST-NEXT:    vmovdqa64 384(%rcx), %zmm6
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm0, %zmm10
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm6, %zmm15, %zmm10
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm0, %zmm10
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm6, %zmm12, %zmm10
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm0, %zmm10
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm6, %zmm1, %zmm10
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm6, %zmm30, %zmm0
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vmovdqa64 448(%rdx), %zmm0
-; AVX512DQ-FAST-NEXT:    vmovdqa64 448(%rcx), %zmm6
-; AVX512DQ-FAST-NEXT:    vpermi2q %zmm6, %zmm0, %zmm15
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm15, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vpermi2q %zmm6, %zmm0, %zmm12
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm12, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vpermi2q %zmm6, %zmm0, %zmm1
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm6, %zmm30, %zmm0
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vmovdqa64 320(%r10), %zmm31
-; AVX512DQ-FAST-NEXT:    vmovdqa64 320(%rax), %zmm12
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm31, %zmm1
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm12, %zmm11, %zmm1
-; AVX512DQ-FAST-NEXT:    vmovdqa64 320(%r8), %zmm17
-; AVX512DQ-FAST-NEXT:    vmovdqa64 320(%r9), %zmm4
-; AVX512DQ-FAST-NEXT:    vpunpcklqdq {{.*#+}} zmm10 = zmm17[0],zmm4[0],zmm17[2],zmm4[2],zmm17[4],zmm4[4],zmm17[6],zmm4[6]
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm1, %zmm10 {%k1}
-; AVX512DQ-FAST-NEXT:    vinserti64x4 $0, %ymm5, %zmm10, %zmm0
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm31, %zmm1
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm12, %zmm9, %zmm1
-; AVX512DQ-FAST-NEXT:    vpunpckhqdq {{.*#+}} zmm5 = zmm17[1],zmm4[1],zmm17[3],zmm4[3],zmm17[5],zmm4[5],zmm17[7],zmm4[7]
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm1, %zmm5 {%k1}
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm2, %zmm1
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm3, %zmm9, %zmm1
-; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm7[4,5,6,7]
-; AVX512DQ-FAST-NEXT:    vinserti64x4 $0, %ymm1, %zmm5, %zmm0
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm2, %zmm1
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm3, %zmm13, %zmm1
-; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm8[4,5,6,7]
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm17, %zmm5
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm4, %zmm13, %zmm5
-; AVX512DQ-FAST-NEXT:    vpunpcklqdq {{.*#+}} zmm5 {%k1} = zmm31[0],zmm12[0],zmm31[2],zmm12[2],zmm31[4],zmm12[4],zmm31[6],zmm12[6]
-; AVX512DQ-FAST-NEXT:    vinserti64x4 $0, %ymm1, %zmm5, %zmm0
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm3, %zmm18, %zmm2
-; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm1 = ymm2[0,1,2,3],ymm14[4,5,6,7]
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm17, %zmm2
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm4, %zmm18, %zmm2
-; AVX512DQ-FAST-NEXT:    vpunpckhqdq {{.*#+}} zmm2 {%k1} = zmm31[1],zmm12[1],zmm31[3],zmm12[3],zmm31[5],zmm12[5],zmm31[7],zmm12[7]
-; AVX512DQ-FAST-NEXT:    vinserti64x4 $0, %ymm1, %zmm2, %zmm0
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vmovdqa64 384(%rdi), %zmm0
-; AVX512DQ-FAST-NEXT:    vmovdqa64 384(%rsi), %zmm1
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm0, %zmm3
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm11, %zmm2
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm1, %zmm11, %zmm3
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm0, %zmm3
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm1, %zmm9, %zmm3
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm0, %zmm3
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm1, %zmm13, %zmm3
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm1, %zmm18, %zmm0
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vmovdqa64 384(%r10), %zmm11
-; AVX512DQ-FAST-NEXT:    vmovdqa64 384(%rax), %zmm20
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm11, %zmm0
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm20, %zmm2, %zmm0
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm11, %zmm0
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm20, %zmm9, %zmm0
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vmovdqa64 384(%r8), %zmm30
-; AVX512DQ-FAST-NEXT:    vmovdqa64 384(%r9), %zmm0
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm30, %zmm1
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm0, %zmm13, %zmm1
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vmovdqa64 448(%r10), %zmm16
-; AVX512DQ-FAST-NEXT:    vmovdqa64 448(%rax), %zmm8
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm16, %zmm1
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm8, %zmm2, %zmm1
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vmovdqa64 448(%rdi), %zmm3
-; AVX512DQ-FAST-NEXT:    vmovdqa64 448(%rsi), %zmm1
-; AVX512DQ-FAST-NEXT:    vpermi2q %zmm1, %zmm3, %zmm2
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm16, %zmm2
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm8, %zmm9, %zmm2
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vpermi2q %zmm1, %zmm3, %zmm9
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vmovdqa64 448(%r8), %zmm19
-; AVX512DQ-FAST-NEXT:    vmovdqa64 448(%r9), %zmm7
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm19, %zmm2
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm7, %zmm13, %zmm2
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vpermi2q %zmm1, %zmm3, %zmm13
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm13, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm1, %zmm18, %zmm3
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm30, %zmm1
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm0, %zmm18, %zmm1
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm1, %zmm5
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm19, %zmm1
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm7, %zmm18, %zmm1
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} zmm15 = [0,8,0,8,0,8,0,8]
-; AVX512DQ-FAST-NEXT:    # zmm15 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm21, %zmm1
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm27, %zmm15, %zmm1
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} zmm14 = [1,9,1,9,1,9,1,9]
-; AVX512DQ-FAST-NEXT:    # zmm14 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm21, %zmm1
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm27, %zmm14, %zmm1
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} zmm6 = [2,10,2,10,2,10,2,10]
-; AVX512DQ-FAST-NEXT:    # zmm6 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm21, %zmm1
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm27, %zmm6, %zmm1
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} zmm1 = [3,11,3,11,3,11,3,11]
-; AVX512DQ-FAST-NEXT:    # zmm1 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm27, %zmm1, %zmm21
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm21, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm2, %zmm3
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm28, %zmm15, %zmm3
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm2, %zmm3
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm28, %zmm14, %zmm3
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm2, %zmm25
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm28, %zmm6, %zmm25
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm28, %zmm1, %zmm2
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vmovdqu64 (%rsp), %zmm27 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm27, %zmm2
-; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm3, %zmm15, %zmm2
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm27, %zmm2
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm3, %zmm14, %zmm2
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm27, %zmm2
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm3, %zmm6, %zmm2
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm3, %zmm1, %zmm27
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm27, (%rsp) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm9 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm9, %zmm3
-; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm2, %zmm15, %zmm3
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm9, %zmm3
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm2, %zmm14, %zmm3
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm9, %zmm18
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm2, %zmm6, %zmm18
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm2, %zmm1, %zmm9
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm2, %zmm3
-; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm9 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm9, %zmm15, %zmm3
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm2, %zmm3
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm9, %zmm14, %zmm3
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm2, %zmm3
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm9, %zmm6, %zmm3
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm9, %zmm1, %zmm2
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm9 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm9, %zmm3
-; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm2, %zmm15, %zmm3
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm9, %zmm3
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm2, %zmm14, %zmm3
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm9, %zmm13
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm2, %zmm6, %zmm13
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm2, %zmm1, %zmm9
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm2, %zmm3
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm26, %zmm15, %zmm3
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm2, %zmm3
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm26, %zmm14, %zmm3
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm2, %zmm3
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm26, %zmm6, %zmm3
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm26, %zmm1, %zmm2
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm23, %zmm3
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm29, %zmm15, %zmm3
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm23, %zmm3
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm29, %zmm14, %zmm3
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm23, %zmm10
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm29, %zmm6, %zmm10
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm29, %zmm1, %zmm23
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm23, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm23 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm23, %zmm2
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm22, %zmm15, %zmm2
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm23, %zmm2
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm22, %zmm14, %zmm2
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm23, %zmm2
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm22, %zmm6, %zmm2
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm22, %zmm1, %zmm23
-; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm2, %zmm3
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm24, %zmm15, %zmm3
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm2, %zmm3
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm24, %zmm14, %zmm3
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm2, %zmm26
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm24, %zmm6, %zmm26
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm24, %zmm1, %zmm2
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm31, %zmm2
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm12, %zmm15, %zmm2
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm31, %zmm2
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm12, %zmm14, %zmm2
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm31, %zmm2
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm12, %zmm6, %zmm2
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm12, %zmm1, %zmm31
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm17, %zmm29
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm4, %zmm15, %zmm29
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm17, %zmm2
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm4, %zmm14, %zmm2
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm17, %zmm24
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm4, %zmm6, %zmm24
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm4, %zmm1, %zmm17
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm11, %zmm2
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm20, %zmm15, %zmm2
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm11, %zmm2
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm20, %zmm14, %zmm2
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm9 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vpunpcklqdq {{.*#+}} zmm9 {%k1} = zmm11[0],zmm20[0],zmm11[2],zmm20[2],zmm11[4],zmm20[4],zmm11[6],zmm20[6]
-; AVX512DQ-FAST-NEXT:    vpunpckhqdq {{.*#+}} zmm5 {%k1} = zmm11[1],zmm20[1],zmm11[3],zmm20[3],zmm11[5],zmm20[5],zmm11[7],zmm20[7]
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm5, %zmm27
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm11, %zmm22
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm20, %zmm6, %zmm22
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm20, %zmm1, %zmm11
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm30, %zmm21
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm0, %zmm15, %zmm21
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm30, %zmm28
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm0, %zmm14, %zmm28
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm30, %zmm20
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm0, %zmm6, %zmm20
-; AVX512DQ-FAST-NEXT:    vpunpcklqdq {{.*#+}} zmm2 = zmm30[0],zmm0[0],zmm30[2],zmm0[2],zmm30[4],zmm0[4],zmm30[6],zmm0[6]
-; AVX512DQ-FAST-NEXT:    vpunpckhqdq {{.*#+}} zmm3 = zmm30[1],zmm0[1],zmm30[3],zmm0[3],zmm30[5],zmm0[5],zmm30[7],zmm0[7]
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm0, %zmm1, %zmm30
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm16, %zmm0
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm8, %zmm15, %zmm0
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm16, %zmm0
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm8, %zmm14, %zmm0
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vpunpcklqdq {{.*#+}} zmm4 {%k1} = zmm16[0],zmm8[0],zmm16[2],zmm8[2],zmm16[4],zmm8[4],zmm16[6],zmm8[6]
-; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vpunpckhqdq {{.*#+}} zmm5 {%k1} = zmm16[1],zmm8[1],zmm16[3],zmm8[3],zmm16[5],zmm8[5],zmm16[7],zmm8[7]
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm16, %zmm12
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm8, %zmm6, %zmm12
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm8, %zmm1, %zmm16
-; AVX512DQ-FAST-NEXT:    vpermi2q %zmm7, %zmm19, %zmm15
-; AVX512DQ-FAST-NEXT:    vpermi2q %zmm7, %zmm19, %zmm14
-; AVX512DQ-FAST-NEXT:    vpermi2q %zmm7, %zmm19, %zmm6
-; AVX512DQ-FAST-NEXT:    vpunpcklqdq {{.*#+}} zmm8 = zmm19[0],zmm7[0],zmm19[2],zmm7[2],zmm19[4],zmm7[4],zmm19[6],zmm7[6]
-; AVX512DQ-FAST-NEXT:    vpunpckhqdq {{.*#+}} zmm0 = zmm19[1],zmm7[1],zmm19[3],zmm7[3],zmm19[5],zmm7[5],zmm19[7],zmm7[7]
-; AVX512DQ-FAST-NEXT:    vpermt2q %zmm7, %zmm1, %zmm19
-; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vpblendd $240, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm1 # 32-byte Folded Reload
-; AVX512DQ-FAST-NEXT:    # ymm1 = ymm1[0,1,2,3],mem[4,5,6,7]
-; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm7 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm7, %zmm2 {%k1}
-; AVX512DQ-FAST-NEXT:    vinserti64x4 $0, %ymm1, %zmm2, %zmm1
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vpblendd $240, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm1 # 32-byte Folded Reload
-; AVX512DQ-FAST-NEXT:    # ymm1 = ymm1[0,1,2,3],mem[4,5,6,7]
-; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm2, %zmm3 {%k1}
-; AVX512DQ-FAST-NEXT:    vinserti64x4 $0, %ymm1, %zmm3, %zmm1
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vpblendd $240, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm1 # 32-byte Folded Reload
-; AVX512DQ-FAST-NEXT:    # ymm1 = ymm1[0,1,2,3],mem[4,5,6,7]
-; AVX512DQ-FAST-NEXT:    vinserti64x4 $0, %ymm1, %zmm9, %zmm1
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vpblendd $240, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm1 # 32-byte Folded Reload
-; AVX512DQ-FAST-NEXT:    # ymm1 = ymm1[0,1,2,3],mem[4,5,6,7]
-; AVX512DQ-FAST-NEXT:    vinserti64x4 $0, %ymm1, %zmm27, %zmm1
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vpblendd $240, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm1 # 32-byte Folded Reload
-; AVX512DQ-FAST-NEXT:    # ymm1 = ymm1[0,1,2,3],mem[4,5,6,7]
-; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm2, %zmm8 {%k1}
-; AVX512DQ-FAST-NEXT:    vinserti64x4 $0, %ymm1, %zmm8, %zmm1
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vpblendd $240, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm1 # 32-byte Folded Reload
-; AVX512DQ-FAST-NEXT:    # ymm1 = ymm1[0,1,2,3],mem[4,5,6,7]
-; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm2, %zmm0 {%k1}
-; AVX512DQ-FAST-NEXT:    vinserti64x4 $0, %ymm1, %zmm0, %zmm0
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vpblendd $240, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
-; AVX512DQ-FAST-NEXT:    # ymm0 = ymm0[0,1,2,3],mem[4,5,6,7]
-; AVX512DQ-FAST-NEXT:    vinserti64x4 $0, %ymm0, %zmm4, %zmm0
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vpblendd $240, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
-; AVX512DQ-FAST-NEXT:    # ymm0 = ymm0[0,1,2,3],mem[4,5,6,7]
-; AVX512DQ-FAST-NEXT:    vinserti64x4 $0, %ymm0, %zmm5, %zmm0
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm0, %zmm25 {%k1}
-; AVX512DQ-FAST-NEXT:    vmovdqa (%rcx), %ymm0
-; AVX512DQ-FAST-NEXT:    vmovdqa (%rdx), %ymm1
-; AVX512DQ-FAST-NEXT:    vpunpcklqdq {{.*#+}} ymm2 = ymm1[0],ymm0[0],ymm1[2],ymm0[2]
-; AVX512DQ-FAST-NEXT:    vmovdqa (%rsi), %ymm3
-; AVX512DQ-FAST-NEXT:    vmovdqa (%rdi), %ymm7
-; AVX512DQ-FAST-NEXT:    vpunpcklqdq {{.*#+}} ymm8 = ymm7[0],ymm3[0],ymm7[2],ymm3[2]
-; AVX512DQ-FAST-NEXT:    vperm2i128 {{.*#+}} ymm2 = ymm8[2,3],ymm2[2,3]
-; AVX512DQ-FAST-NEXT:    vinserti64x4 $0, %ymm2, %zmm25, %zmm2
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm2, %zmm4 {%k1}
-; AVX512DQ-FAST-NEXT:    vpunpckhqdq {{.*#+}} ymm0 = ymm1[1],ymm0[1],ymm1[3],ymm0[3]
-; AVX512DQ-FAST-NEXT:    vpunpckhqdq {{.*#+}} ymm1 = ymm7[1],ymm3[1],ymm7[3],ymm3[3]
-; AVX512DQ-FAST-NEXT:    vperm2i128 {{.*#+}} ymm0 = ymm1[2,3],ymm0[2,3]
-; AVX512DQ-FAST-NEXT:    vinserti64x4 $0, %ymm0, %zmm4, %zmm0
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm0, %zmm18 {%k1}
-; AVX512DQ-FAST-NEXT:    vmovdqa 64(%rcx), %ymm0
-; AVX512DQ-FAST-NEXT:    vmovdqa 64(%rdx), %ymm1
-; AVX512DQ-FAST-NEXT:    vmovdqa 64(%rsi), %ymm2
-; AVX512DQ-FAST-NEXT:    vmovdqa 64(%rdi), %ymm3
-; AVX512DQ-FAST-NEXT:    vpunpcklqdq {{.*#+}} ymm7 = ymm1[0],ymm0[0],ymm1[2],ymm0[2]
-; AVX512DQ-FAST-NEXT:    vpunpcklqdq {{.*#+}} ymm8 = ymm3[0],ymm2[0],ymm3[2],ymm2[2]
-; AVX512DQ-FAST-NEXT:    vperm2i128 {{.*#+}} ymm7 = ymm8[2,3],ymm7[2,3]
-; AVX512DQ-FAST-NEXT:    vinserti64x4 $0, %ymm7, %zmm18, %zmm4
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vmovdqu64 (%rsp), %zmm4 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm4, %zmm5 {%k1}
-; AVX512DQ-FAST-NEXT:    vpunpckhqdq {{.*#+}} ymm0 = ymm1[1],ymm0[1],ymm1[3],ymm0[3]
-; AVX512DQ-FAST-NEXT:    vpunpckhqdq {{.*#+}} ymm1 = ymm3[1],ymm2[1],ymm3[3],ymm2[3]
-; AVX512DQ-FAST-NEXT:    vperm2i128 {{.*#+}} ymm0 = ymm1[2,3],ymm0[2,3]
-; AVX512DQ-FAST-NEXT:    vinserti64x4 $0, %ymm0, %zmm5, %zmm0
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm0, %zmm13 {%k1}
-; AVX512DQ-FAST-NEXT:    vmovdqa 128(%rcx), %ymm0
-; AVX512DQ-FAST-NEXT:    vmovdqa 128(%rdx), %ymm1
-; AVX512DQ-FAST-NEXT:    vpunpcklqdq {{.*#+}} ymm2 = ymm1[0],ymm0[0],ymm1[2],ymm0[2]
-; AVX512DQ-FAST-NEXT:    vmovdqa 128(%rsi), %ymm3
-; AVX512DQ-FAST-NEXT:    vmovdqa 128(%rdi), %ymm7
-; AVX512DQ-FAST-NEXT:    vpunpcklqdq {{.*#+}} ymm8 = ymm7[0],ymm3[0],ymm7[2],ymm3[2]
-; AVX512DQ-FAST-NEXT:    vperm2i128 {{.*#+}} ymm2 = ymm8[2,3],ymm2[2,3]
-; AVX512DQ-FAST-NEXT:    vinserti64x4 $0, %ymm2, %zmm13, %zmm2
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm2, (%rsp) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm2, %zmm4 {%k1}
-; AVX512DQ-FAST-NEXT:    vpunpckhqdq {{.*#+}} ymm0 = ymm1[1],ymm0[1],ymm1[3],ymm0[3]
-; AVX512DQ-FAST-NEXT:    vpunpckhqdq {{.*#+}} ymm1 = ymm7[1],ymm3[1],ymm7[3],ymm3[3]
-; AVX512DQ-FAST-NEXT:    vperm2i128 {{.*#+}} ymm0 = ymm1[2,3],ymm0[2,3]
-; AVX512DQ-FAST-NEXT:    vinserti64x4 $0, %ymm0, %zmm4, %zmm0
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm0, %zmm10 {%k1}
-; AVX512DQ-FAST-NEXT:    vmovdqa 192(%rcx), %ymm0
-; AVX512DQ-FAST-NEXT:    vmovdqa 192(%rdx), %ymm1
-; AVX512DQ-FAST-NEXT:    vpunpcklqdq {{.*#+}} ymm2 = ymm1[0],ymm0[0],ymm1[2],ymm0[2]
-; AVX512DQ-FAST-NEXT:    vmovdqa 192(%rsi), %ymm3
-; AVX512DQ-FAST-NEXT:    vmovdqa 192(%rdi), %ymm7
-; AVX512DQ-FAST-NEXT:    vpunpcklqdq {{.*#+}} ymm8 = ymm7[0],ymm3[0],ymm7[2],ymm3[2]
-; AVX512DQ-FAST-NEXT:    vperm2i128 {{.*#+}} ymm2 = ymm8[2,3],ymm2[2,3]
-; AVX512DQ-FAST-NEXT:    vinserti64x4 $0, %ymm2, %zmm10, %zmm2
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm2, %zmm4 {%k1}
-; AVX512DQ-FAST-NEXT:    vpunpckhqdq {{.*#+}} ymm0 = ymm1[1],ymm0[1],ymm1[3],ymm0[3]
-; AVX512DQ-FAST-NEXT:    vpunpckhqdq {{.*#+}} ymm1 = ymm7[1],ymm3[1],ymm7[3],ymm3[3]
-; AVX512DQ-FAST-NEXT:    vperm2i128 {{.*#+}} ymm0 = ymm1[2,3],ymm0[2,3]
-; AVX512DQ-FAST-NEXT:    vinserti64x4 $0, %ymm0, %zmm4, %zmm9
-; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm0, %zmm26 {%k1}
-; AVX512DQ-FAST-NEXT:    vmovdqa 256(%rcx), %ymm0
-; AVX512DQ-FAST-NEXT:    vmovdqa 256(%rdx), %ymm1
-; AVX512DQ-FAST-NEXT:    vpunpcklqdq {{.*#+}} ymm2 = ymm1[0],ymm0[0],ymm1[2],ymm0[2]
-; AVX512DQ-FAST-NEXT:    vmovdqa 256(%rsi), %ymm3
-; AVX512DQ-FAST-NEXT:    vmovdqa 256(%rdi), %ymm7
-; AVX512DQ-FAST-NEXT:    vpunpcklqdq {{.*#+}} ymm8 = ymm7[0],ymm3[0],ymm7[2],ymm3[2]
-; AVX512DQ-FAST-NEXT:    vperm2i128 {{.*#+}} ymm2 = ymm8[2,3],ymm2[2,3]
-; AVX512DQ-FAST-NEXT:    vinserti64x4 $0, %ymm2, %zmm26, %zmm26
-; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm23, %zmm2 {%k1}
-; AVX512DQ-FAST-NEXT:    vpunpckhqdq {{.*#+}} ymm0 = ymm1[1],ymm0[1],ymm1[3],ymm0[3]
-; AVX512DQ-FAST-NEXT:    vpunpckhqdq {{.*#+}} ymm1 = ymm7[1],ymm3[1],ymm7[3],ymm3[3]
-; AVX512DQ-FAST-NEXT:    vperm2i128 {{.*#+}} ymm0 = ymm1[2,3],ymm0[2,3]
-; AVX512DQ-FAST-NEXT:    vinserti64x4 $0, %ymm0, %zmm2, %zmm7
-; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm0, %zmm24 {%k1}
-; AVX512DQ-FAST-NEXT:    vmovdqa 320(%rcx), %ymm0
-; AVX512DQ-FAST-NEXT:    vmovdqa 320(%rdx), %ymm1
-; AVX512DQ-FAST-NEXT:    vpunpcklqdq {{.*#+}} ymm2 = ymm1[0],ymm0[0],ymm1[2],ymm0[2]
-; AVX512DQ-FAST-NEXT:    vmovdqa 320(%rsi), %ymm3
-; AVX512DQ-FAST-NEXT:    vmovdqa64 320(%rdi), %ymm23
-; AVX512DQ-FAST-NEXT:    vpunpcklqdq {{.*#+}} ymm8 = ymm23[0],ymm3[0],ymm23[2],ymm3[2]
-; AVX512DQ-FAST-NEXT:    vperm2i128 {{.*#+}} ymm2 = ymm8[2,3],ymm2[2,3]
-; AVX512DQ-FAST-NEXT:    vinserti64x4 $0, %ymm2, %zmm24, %zmm8
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm31, %zmm17 {%k1}
-; AVX512DQ-FAST-NEXT:    vpunpckhqdq {{.*#+}} ymm0 = ymm1[1],ymm0[1],ymm1[3],ymm0[3]
-; AVX512DQ-FAST-NEXT:    vpunpckhqdq {{.*#+}} ymm1 = ymm23[1],ymm3[1],ymm23[3],ymm3[3]
-; AVX512DQ-FAST-NEXT:    vperm2i128 {{.*#+}} ymm0 = ymm1[2,3],ymm0[2,3]
-; AVX512DQ-FAST-NEXT:    vinserti64x4 $0, %ymm0, %zmm17, %zmm17
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm22, %zmm20 {%k1}
-; AVX512DQ-FAST-NEXT:    vmovdqa 384(%rcx), %ymm0
-; AVX512DQ-FAST-NEXT:    vmovdqa 384(%rdx), %ymm1
-; AVX512DQ-FAST-NEXT:    vpunpcklqdq {{.*#+}} ymm2 = ymm1[0],ymm0[0],ymm1[2],ymm0[2]
-; AVX512DQ-FAST-NEXT:    vmovdqa 384(%rsi), %ymm3
-; AVX512DQ-FAST-NEXT:    vmovdqa64 384(%rdi), %ymm18
-; AVX512DQ-FAST-NEXT:    vpunpcklqdq {{.*#+}} ymm13 = ymm18[0],ymm3[0],ymm18[2],ymm3[2]
-; AVX512DQ-FAST-NEXT:    vperm2i128 {{.*#+}} ymm2 = ymm13[2,3],ymm2[2,3]
-; AVX512DQ-FAST-NEXT:    vinserti64x4 $0, %ymm2, %zmm20, %zmm20
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm11, %zmm30 {%k1}
-; AVX512DQ-FAST-NEXT:    vpunpckhqdq {{.*#+}} ymm0 = ymm1[1],ymm0[1],ymm1[3],ymm0[3]
-; AVX512DQ-FAST-NEXT:    vpunpckhqdq {{.*#+}} ymm1 = ymm18[1],ymm3[1],ymm18[3],ymm3[3]
-; AVX512DQ-FAST-NEXT:    vperm2i128 {{.*#+}} ymm0 = ymm1[2,3],ymm0[2,3]
-; AVX512DQ-FAST-NEXT:    vinserti64x4 $0, %ymm0, %zmm30, %zmm24
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm12, %zmm6 {%k1}
-; AVX512DQ-FAST-NEXT:    vmovdqa 448(%rcx), %ymm0
-; AVX512DQ-FAST-NEXT:    vmovdqa 448(%rdx), %ymm1
-; AVX512DQ-FAST-NEXT:    vpunpcklqdq {{.*#+}} ymm2 = ymm1[0],ymm0[0],ymm1[2],ymm0[2]
-; AVX512DQ-FAST-NEXT:    vmovdqa 448(%rsi), %ymm3
-; AVX512DQ-FAST-NEXT:    vmovdqa 448(%rdi), %ymm10
-; AVX512DQ-FAST-NEXT:    vpunpcklqdq {{.*#+}} ymm12 = ymm10[0],ymm3[0],ymm10[2],ymm3[2]
-; AVX512DQ-FAST-NEXT:    vperm2i128 {{.*#+}} ymm2 = ymm12[2,3],ymm2[2,3]
-; AVX512DQ-FAST-NEXT:    vinserti64x4 $0, %ymm2, %zmm6, %zmm31
-; AVX512DQ-FAST-NEXT:    vpunpckhqdq {{.*#+}} ymm0 = ymm1[1],ymm0[1],ymm1[3],ymm0[3]
-; AVX512DQ-FAST-NEXT:    vpunpckhqdq {{.*#+}} ymm1 = ymm10[1],ymm3[1],ymm10[3],ymm3[3]
-; AVX512DQ-FAST-NEXT:    vperm2i128 {{.*#+}} ymm0 = ymm1[2,3],ymm0[2,3]
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm16, %zmm19 {%k1}
-; AVX512DQ-FAST-NEXT:    vinserti64x4 $0, %ymm0, %zmm19, %zmm0
-; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm1, %zmm4 {%k1}
-; AVX512DQ-FAST-NEXT:    vmovdqa (%rsi), %xmm1
-; AVX512DQ-FAST-NEXT:    vinserti128 $1, (%rcx), %ymm1, %ymm1
-; AVX512DQ-FAST-NEXT:    vmovdqa (%rdi), %xmm2
-; AVX512DQ-FAST-NEXT:    vinserti128 $1, (%rdx), %ymm2, %ymm2
-; AVX512DQ-FAST-NEXT:    vpunpcklqdq {{.*#+}} ymm3 = ymm2[0],ymm1[0],ymm2[2],ymm1[2]
-; AVX512DQ-FAST-NEXT:    vinserti64x4 $0, %ymm3, %zmm4, %zmm3
-; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm10 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm4, %zmm10 {%k1}
-; AVX512DQ-FAST-NEXT:    vpunpckhqdq {{.*#+}} ymm1 = ymm2[1],ymm1[1],ymm2[3],ymm1[3]
-; AVX512DQ-FAST-NEXT:    vinserti64x4 $0, %ymm1, %zmm10, %zmm2
-; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm11 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm1, %zmm11 {%k1}
-; AVX512DQ-FAST-NEXT:    vmovdqa 64(%rsi), %xmm1
-; AVX512DQ-FAST-NEXT:    vinserti128 $1, 64(%rcx), %ymm1, %ymm1
-; AVX512DQ-FAST-NEXT:    vmovdqa 64(%rdi), %xmm4
-; AVX512DQ-FAST-NEXT:    vinserti128 $1, 64(%rdx), %ymm4, %ymm4
-; AVX512DQ-FAST-NEXT:    vpunpcklqdq {{.*#+}} ymm10 = ymm4[0],ymm1[0],ymm4[2],ymm1[2]
-; AVX512DQ-FAST-NEXT:    vinserti64x4 $0, %ymm10, %zmm11, %zmm10
-; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm11 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm12 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm11, %zmm12 {%k1}
-; AVX512DQ-FAST-NEXT:    vpunpckhqdq {{.*#+}} ymm1 = ymm4[1],ymm1[1],ymm4[3],ymm1[3]
-; AVX512DQ-FAST-NEXT:    vinserti64x4 $0, %ymm1, %zmm12, %zmm11
-; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm13 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm1, %zmm13 {%k1}
-; AVX512DQ-FAST-NEXT:    vmovdqa 128(%rsi), %xmm1
-; AVX512DQ-FAST-NEXT:    vinserti128 $1, 128(%rcx), %ymm1, %ymm1
-; AVX512DQ-FAST-NEXT:    vmovdqa 128(%rdi), %xmm4
-; AVX512DQ-FAST-NEXT:    vinserti128 $1, 128(%rdx), %ymm4, %ymm12
-; AVX512DQ-FAST-NEXT:    vpunpcklqdq {{.*#+}} ymm4 = ymm12[0],ymm1[0],ymm12[2],ymm1[2]
-; AVX512DQ-FAST-NEXT:    vinserti64x4 $0, %ymm4, %zmm13, %zmm4
-; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm13 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm18 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm13, %zmm18 {%k1}
-; AVX512DQ-FAST-NEXT:    vpunpckhqdq {{.*#+}} ymm1 = ymm12[1],ymm1[1],ymm12[3],ymm1[3]
-; AVX512DQ-FAST-NEXT:    vinserti64x4 $0, %ymm1, %zmm18, %zmm19
-; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm18 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm1, %zmm18 {%k1}
-; AVX512DQ-FAST-NEXT:    vmovdqa 192(%rsi), %xmm1
-; AVX512DQ-FAST-NEXT:    vinserti128 $1, 192(%rcx), %ymm1, %ymm1
-; AVX512DQ-FAST-NEXT:    vmovdqa 192(%rdi), %xmm12
-; AVX512DQ-FAST-NEXT:    vinserti128 $1, 192(%rdx), %ymm12, %ymm12
-; AVX512DQ-FAST-NEXT:    vpunpcklqdq {{.*#+}} ymm13 = ymm12[0],ymm1[0],ymm12[2],ymm1[2]
-; AVX512DQ-FAST-NEXT:    vinserti64x4 $0, %ymm13, %zmm18, %zmm30
-; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm13 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm18 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm13, %zmm18 {%k1}
-; AVX512DQ-FAST-NEXT:    vpunpckhqdq {{.*#+}} ymm1 = ymm12[1],ymm1[1],ymm12[3],ymm1[3]
-; AVX512DQ-FAST-NEXT:    vinserti64x4 $0, %ymm1, %zmm18, %zmm1
-; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm12 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm12, %zmm5 {%k1}
-; AVX512DQ-FAST-NEXT:    vmovdqa 256(%rsi), %xmm12
-; AVX512DQ-FAST-NEXT:    vinserti128 $1, 256(%rcx), %ymm12, %ymm13
-; AVX512DQ-FAST-NEXT:    vmovdqa 256(%rdi), %xmm12
-; AVX512DQ-FAST-NEXT:    vinserti32x4 $1, 256(%rdx), %ymm12, %ymm18
-; AVX512DQ-FAST-NEXT:    vpunpcklqdq {{.*#+}} ymm12 = ymm18[0],ymm13[0],ymm18[2],ymm13[2]
-; AVX512DQ-FAST-NEXT:    vinserti64x4 $0, %ymm12, %zmm5, %zmm12
-; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm23 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm23, %zmm5 {%k1}
-; AVX512DQ-FAST-NEXT:    vpunpckhqdq {{.*#+}} ymm13 = ymm18[1],ymm13[1],ymm18[3],ymm13[3]
-; AVX512DQ-FAST-NEXT:    vinserti64x4 $0, %ymm13, %zmm5, %zmm23
-; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm5, %zmm29 {%k1}
-; AVX512DQ-FAST-NEXT:    vmovdqa 320(%rsi), %xmm13
-; AVX512DQ-FAST-NEXT:    vinserti128 $1, 320(%rcx), %ymm13, %ymm13
-; AVX512DQ-FAST-NEXT:    vmovdqa64 320(%rdi), %xmm18
-; AVX512DQ-FAST-NEXT:    vinserti32x4 $1, 320(%rdx), %ymm18, %ymm18
-; AVX512DQ-FAST-NEXT:    vpunpcklqdq {{.*#+}} ymm27 = ymm18[0],ymm13[0],ymm18[2],ymm13[2]
-; AVX512DQ-FAST-NEXT:    vinserti64x4 $0, %ymm27, %zmm29, %zmm22
-; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm6 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm5, %zmm6 {%k1}
-; AVX512DQ-FAST-NEXT:    vpunpckhqdq {{.*#+}} ymm13 = ymm18[1],ymm13[1],ymm18[3],ymm13[3]
-; AVX512DQ-FAST-NEXT:    vinserti64x4 $0, %ymm13, %zmm6, %zmm13
-; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm5, %zmm21 {%k1}
-; AVX512DQ-FAST-NEXT:    vmovdqa64 384(%rsi), %xmm18
-; AVX512DQ-FAST-NEXT:    vinserti32x4 $1, 384(%rcx), %ymm18, %ymm18
-; AVX512DQ-FAST-NEXT:    vmovdqa64 384(%rdi), %xmm25
-; AVX512DQ-FAST-NEXT:    vinserti32x4 $1, 384(%rdx), %ymm25, %ymm25
-; AVX512DQ-FAST-NEXT:    vpunpcklqdq {{.*#+}} ymm27 = ymm25[0],ymm18[0],ymm25[2],ymm18[2]
-; AVX512DQ-FAST-NEXT:    vinserti64x4 $0, %ymm27, %zmm21, %zmm16
-; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm5, %zmm28 {%k1}
-; AVX512DQ-FAST-NEXT:    vpunpckhqdq {{.*#+}} ymm18 = ymm25[1],ymm18[1],ymm25[3],ymm18[3]
-; AVX512DQ-FAST-NEXT:    vinserti64x4 $0, %ymm18, %zmm28, %zmm21
-; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm5, %zmm15 {%k1}
-; AVX512DQ-FAST-NEXT:    vmovdqa64 448(%rsi), %xmm18
-; AVX512DQ-FAST-NEXT:    vinserti32x4 $1, 448(%rcx), %ymm18, %ymm18
-; AVX512DQ-FAST-NEXT:    vmovdqa64 448(%rdi), %xmm25
-; AVX512DQ-FAST-NEXT:    vinserti32x4 $1, 448(%rdx), %ymm25, %ymm25
-; AVX512DQ-FAST-NEXT:    vpunpcklqdq {{.*#+}} ymm27 = ymm25[0],ymm18[0],ymm25[2],ymm18[2]
-; AVX512DQ-FAST-NEXT:    vinserti64x4 $0, %ymm27, %zmm15, %zmm6
-; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm5, %zmm14 {%k1}
-; AVX512DQ-FAST-NEXT:    vpunpckhqdq {{.*#+}} ymm18 = ymm25[1],ymm18[1],ymm25[3],ymm18[3]
-; AVX512DQ-FAST-NEXT:    vinserti64x4 $0, %ymm18, %zmm14, %zmm5
-; AVX512DQ-FAST-NEXT:    movq {{[0-9]+}}(%rsp), %rax
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm0, 3776(%rax)
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm31, 3712(%rax)
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm24, 3264(%rax)
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm20, 3200(%rax)
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm17, 2752(%rax)
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm8, 2688(%rax)
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm7, 2240(%rax)
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm26, 2176(%rax)
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm9, 1728(%rax)
-; AVX512DQ-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vmovaps %zmm0, 1664(%rax)
-; AVX512DQ-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vmovaps %zmm0, 1216(%rax)
-; AVX512DQ-FAST-NEXT:    vmovups (%rsp), %zmm0 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vmovaps %zmm0, 1152(%rax)
-; AVX512DQ-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vmovaps %zmm0, 704(%rax)
-; AVX512DQ-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vmovaps %zmm0, 640(%rax)
-; AVX512DQ-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vmovaps %zmm0, 192(%rax)
-; AVX512DQ-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vmovaps %zmm0, 128(%rax)
-; AVX512DQ-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vmovaps %zmm0, 4032(%rax)
-; AVX512DQ-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vmovaps %zmm0, 3968(%rax)
-; AVX512DQ-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vmovaps %zmm0, 3904(%rax)
-; AVX512DQ-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vmovaps %zmm0, 3840(%rax)
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm5, 3648(%rax)
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm6, 3584(%rax)
-; AVX512DQ-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vmovaps %zmm0, 3520(%rax)
-; AVX512DQ-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vmovaps %zmm0, 3456(%rax)
-; AVX512DQ-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vmovaps %zmm0, 3392(%rax)
-; AVX512DQ-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vmovaps %zmm0, 3328(%rax)
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm21, 3136(%rax)
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm16, 3072(%rax)
-; AVX512DQ-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vmovaps %zmm0, 3008(%rax)
-; AVX512DQ-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vmovaps %zmm0, 2944(%rax)
-; AVX512DQ-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vmovaps %zmm0, 2880(%rax)
-; AVX512DQ-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vmovaps %zmm0, 2816(%rax)
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm13, 2624(%rax)
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm22, 2560(%rax)
-; AVX512DQ-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vmovaps %zmm0, 2496(%rax)
-; AVX512DQ-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vmovaps %zmm0, 2432(%rax)
-; AVX512DQ-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vmovaps %zmm0, 2368(%rax)
-; AVX512DQ-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vmovaps %zmm0, 2304(%rax)
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm23, 2112(%rax)
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm12, 2048(%rax)
-; AVX512DQ-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vmovaps %zmm0, 1984(%rax)
-; AVX512DQ-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vmovaps %zmm0, 1920(%rax)
-; AVX512DQ-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vmovaps %zmm0, 1856(%rax)
-; AVX512DQ-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vmovaps %zmm0, 1792(%rax)
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm1, 1600(%rax)
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm30, 1536(%rax)
-; AVX512DQ-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vmovaps %zmm0, 1472(%rax)
-; AVX512DQ-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vmovaps %zmm0, 1408(%rax)
-; AVX512DQ-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vmovaps %zmm0, 1344(%rax)
-; AVX512DQ-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vmovaps %zmm0, 1280(%rax)
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm19, 1088(%rax)
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm4, 1024(%rax)
-; AVX512DQ-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vmovaps %zmm0, 960(%rax)
-; AVX512DQ-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vmovaps %zmm0, 896(%rax)
-; AVX512DQ-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vmovaps %zmm0, 832(%rax)
-; AVX512DQ-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vmovaps %zmm0, 768(%rax)
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm11, 576(%rax)
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm10, 512(%rax)
-; AVX512DQ-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vmovaps %zmm0, 448(%rax)
-; AVX512DQ-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vmovaps %zmm0, 384(%rax)
-; AVX512DQ-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vmovaps %zmm0, 320(%rax)
-; AVX512DQ-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vmovaps %zmm0, 256(%rax)
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm2, 64(%rax)
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm3, (%rax)
-; AVX512DQ-FAST-NEXT:    addq $5512, %rsp # imm = 0x1588
-; AVX512DQ-FAST-NEXT:    vzeroupper
-; AVX512DQ-FAST-NEXT:    retq
-;
-; AVX512BW-ONLY-SLOW-LABEL: store_i64_stride8_vf64:
-; AVX512BW-ONLY-SLOW:       # %bb.0:
-; AVX512BW-ONLY-SLOW-NEXT:    subq $5512, %rsp # imm = 0x1588
-; AVX512BW-ONLY-SLOW-NEXT:    movq {{[0-9]+}}(%rsp), %rax
-; AVX512BW-ONLY-SLOW-NEXT:    movq {{[0-9]+}}(%rsp), %r10
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 128(%rdi), %zmm2
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 64(%rdi), %zmm4
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 (%rdi), %zmm8
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 128(%rsi), %zmm17
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 64(%rsi), %zmm19
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 (%rsi), %zmm22
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 64(%rdx), %zmm5
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 (%rdx), %zmm10
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 64(%rcx), %zmm20
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 (%rcx), %zmm11
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 (%r8), %zmm1
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 64(%r8), %zmm25
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 128(%r8), %zmm23
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 (%r9), %zmm28
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 64(%r9), %zmm26
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 128(%r9), %zmm24
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 (%r10), %zmm21
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 64(%r10), %zmm14
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 (%rax), %zmm27
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 64(%rax), %zmm16
-; AVX512BW-ONLY-SLOW-NEXT:    movb $-64, %r11b
-; AVX512BW-ONLY-SLOW-NEXT:    kmovd %r11d, %k1
-; AVX512BW-ONLY-SLOW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm3 = [4,12,4,12,4,12,4,12]
-; AVX512BW-ONLY-SLOW-NEXT:    # zmm3 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm21, %zmm0
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm27, %zmm3, %zmm0
-; AVX512BW-ONLY-SLOW-NEXT:    vpunpcklqdq {{.*#+}} zmm12 = zmm1[0],zmm28[0],zmm1[2],zmm28[2],zmm1[4],zmm28[4],zmm1[6],zmm28[6]
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm0, %zmm12 {%k1}
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm8, %zmm0
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm22, %zmm3, %zmm0
-; AVX512BW-ONLY-SLOW-NEXT:    vbroadcasti128 {{.*#+}} ymm6 = [4,12,4,12]
-; AVX512BW-ONLY-SLOW-NEXT:    # ymm6 = mem[0,1,0,1]
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm10, %zmm15
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm11, %zmm6, %zmm15
-; AVX512BW-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm15[4,5,6,7]
-; AVX512BW-ONLY-SLOW-NEXT:    vinserti64x4 $0, %ymm0, %zmm12, %zmm0
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm9 = [5,13,5,13,5,13,5,13]
-; AVX512BW-ONLY-SLOW-NEXT:    # zmm9 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm21, %zmm0
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm27, %zmm9, %zmm0
-; AVX512BW-ONLY-SLOW-NEXT:    vpunpckhqdq {{.*#+}} zmm12 = zmm1[1],zmm28[1],zmm1[3],zmm28[3],zmm1[5],zmm28[5],zmm1[7],zmm28[7]
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm0, %zmm12 {%k1}
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm8, %zmm0
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm22, %zmm9, %zmm0
-; AVX512BW-ONLY-SLOW-NEXT:    vbroadcasti128 {{.*#+}} ymm7 = [5,13,5,13]
-; AVX512BW-ONLY-SLOW-NEXT:    # ymm7 = mem[0,1,0,1]
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm10, %zmm15
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm11, %zmm7, %zmm15
-; AVX512BW-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm15[4,5,6,7]
-; AVX512BW-ONLY-SLOW-NEXT:    vinserti64x4 $0, %ymm0, %zmm12, %zmm0
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm13 = [6,14,6,14,6,14,6,14]
-; AVX512BW-ONLY-SLOW-NEXT:    # zmm13 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm1, %zmm0
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm1, %zmm29
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm28, %zmm13, %zmm0
-; AVX512BW-ONLY-SLOW-NEXT:    vpunpcklqdq {{.*#+}} zmm0 {%k1} = zmm21[0],zmm27[0],zmm21[2],zmm27[2],zmm21[4],zmm27[4],zmm21[6],zmm27[6]
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm8, %zmm12
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm22, %zmm13, %zmm12
-; AVX512BW-ONLY-SLOW-NEXT:    vbroadcasti128 {{.*#+}} ymm1 = [6,14,6,14]
-; AVX512BW-ONLY-SLOW-NEXT:    # ymm1 = mem[0,1,0,1]
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm10, %zmm15
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm11, %zmm1, %zmm15
-; AVX512BW-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm12 = ymm12[0,1,2,3],ymm15[4,5,6,7]
-; AVX512BW-ONLY-SLOW-NEXT:    vinserti64x4 $0, %ymm12, %zmm0, %zmm0
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm18 = [7,15,7,15,7,15,7,15]
-; AVX512BW-ONLY-SLOW-NEXT:    # zmm18 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm29, %zmm12
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm28, %zmm18, %zmm12
-; AVX512BW-ONLY-SLOW-NEXT:    vpunpckhqdq {{.*#+}} zmm12 {%k1} = zmm21[1],zmm27[1],zmm21[3],zmm27[3],zmm21[5],zmm27[5],zmm21[7],zmm27[7]
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm22, %zmm18, %zmm8
-; AVX512BW-ONLY-SLOW-NEXT:    vbroadcasti32x4 {{.*#+}} ymm30 = [7,15,7,15]
-; AVX512BW-ONLY-SLOW-NEXT:    # ymm30 = mem[0,1,2,3,0,1,2,3]
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm11, %zmm30, %zmm10
-; AVX512BW-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm8 = ymm8[0,1,2,3],ymm10[4,5,6,7]
-; AVX512BW-ONLY-SLOW-NEXT:    vinserti64x4 $0, %ymm8, %zmm12, %zmm0
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm14, %zmm8
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm16, %zmm3, %zmm8
-; AVX512BW-ONLY-SLOW-NEXT:    vpunpcklqdq {{.*#+}} zmm10 = zmm25[0],zmm26[0],zmm25[2],zmm26[2],zmm25[4],zmm26[4],zmm25[6],zmm26[6]
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm8, %zmm10 {%k1}
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm4, %zmm8
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm19, %zmm3, %zmm8
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm5, %zmm11
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm6, %zmm15
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm20, %zmm6, %zmm11
-; AVX512BW-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm8 = ymm8[0,1,2,3],ymm11[4,5,6,7]
-; AVX512BW-ONLY-SLOW-NEXT:    vinserti64x4 $0, %ymm8, %zmm10, %zmm0
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm14, %zmm8
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm16, %zmm9, %zmm8
-; AVX512BW-ONLY-SLOW-NEXT:    vpunpckhqdq {{.*#+}} zmm10 = zmm25[1],zmm26[1],zmm25[3],zmm26[3],zmm25[5],zmm26[5],zmm25[7],zmm26[7]
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm8, %zmm10 {%k1}
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm4, %zmm8
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm19, %zmm9, %zmm8
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm5, %zmm11
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm7, %zmm12
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm20, %zmm7, %zmm11
-; AVX512BW-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm8 = ymm8[0,1,2,3],ymm11[4,5,6,7]
-; AVX512BW-ONLY-SLOW-NEXT:    vinserti64x4 $0, %ymm8, %zmm10, %zmm0
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm4, %zmm8
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm19, %zmm13, %zmm8
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm5, %zmm10
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm20, %zmm1, %zmm10
-; AVX512BW-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm8 = ymm8[0,1,2,3],ymm10[4,5,6,7]
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm25, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm25, %zmm10
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm26, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm26, %zmm13, %zmm10
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm14, %zmm11
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm14, (%rsp) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm16, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vpunpcklqdq {{.*#+}} zmm10 {%k1} = zmm14[0],zmm16[0],zmm14[2],zmm16[2],zmm14[4],zmm16[4],zmm14[6],zmm16[6]
-; AVX512BW-ONLY-SLOW-NEXT:    vinserti64x4 $0, %ymm8, %zmm10, %zmm0
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 128(%r10), %zmm10
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm19, %zmm18, %zmm4
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 128(%rax), %zmm14
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm20, %zmm30, %zmm5
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm25, %zmm6
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm26, %zmm18, %zmm6
-; AVX512BW-ONLY-SLOW-NEXT:    vpunpckhqdq {{.*#+}} zmm6 {%k1} = zmm11[1],zmm16[1],zmm11[3],zmm16[3],zmm11[5],zmm16[5],zmm11[7],zmm16[7]
-; AVX512BW-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm4 = ymm4[0,1,2,3],ymm5[4,5,6,7]
-; AVX512BW-ONLY-SLOW-NEXT:    vinserti64x4 $0, %ymm4, %zmm6, %zmm0
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm10, %zmm4
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm14, %zmm3, %zmm4
-; AVX512BW-ONLY-SLOW-NEXT:    vpunpcklqdq {{.*#+}} zmm5 = zmm23[0],zmm24[0],zmm23[2],zmm24[2],zmm23[4],zmm24[4],zmm23[6],zmm24[6]
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm4, %zmm5 {%k1}
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm2, %zmm6
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm17, %zmm3, %zmm6
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm3, %zmm11
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 128(%rdx), %zmm4
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 128(%rcx), %zmm7
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm4, %zmm8
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm7, %zmm15, %zmm8
-; AVX512BW-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm6 = ymm6[0,1,2,3],ymm8[4,5,6,7]
-; AVX512BW-ONLY-SLOW-NEXT:    vinserti64x4 $0, %ymm6, %zmm5, %zmm0
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm10, %zmm5
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm14, %zmm9, %zmm5
-; AVX512BW-ONLY-SLOW-NEXT:    vpunpckhqdq {{.*#+}} zmm6 = zmm23[1],zmm24[1],zmm23[3],zmm24[3],zmm23[5],zmm24[5],zmm23[7],zmm24[7]
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm5, %zmm6 {%k1}
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm2, %zmm5
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm17, %zmm9, %zmm5
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm4, %zmm8
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm7, %zmm12, %zmm8
-; AVX512BW-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm5 = ymm5[0,1,2,3],ymm8[4,5,6,7]
-; AVX512BW-ONLY-SLOW-NEXT:    vinserti64x4 $0, %ymm5, %zmm6, %zmm0
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm2, %zmm5
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm17, %zmm13, %zmm5
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm4, %zmm6
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm7, %zmm1, %zmm6
-; AVX512BW-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm5 = ymm5[0,1,2,3],ymm6[4,5,6,7]
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm23, %zmm3
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm23, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm23, %zmm6
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm24, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm24, %zmm13, %zmm6
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm14, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vpunpcklqdq {{.*#+}} zmm6 {%k1} = zmm10[0],zmm14[0],zmm10[2],zmm14[2],zmm10[4],zmm14[4],zmm10[6],zmm14[6]
-; AVX512BW-ONLY-SLOW-NEXT:    vinserti64x4 $0, %ymm5, %zmm6, %zmm0
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm17, %zmm18, %zmm2
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm7, %zmm30, %zmm4
-; AVX512BW-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm4[4,5,6,7]
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm24, %zmm18, %zmm3
-; AVX512BW-ONLY-SLOW-NEXT:    vpunpckhqdq {{.*#+}} zmm3 {%k1} = zmm10[1],zmm14[1],zmm10[3],zmm14[3],zmm10[5],zmm14[5],zmm10[7],zmm14[7]
-; AVX512BW-ONLY-SLOW-NEXT:    vinserti64x4 $0, %ymm2, %zmm3, %zmm0
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 192(%r10), %zmm10
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 192(%rax), %zmm26
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm10, %zmm2
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm26, %zmm11, %zmm2
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 192(%r8), %zmm23
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 192(%r9), %zmm29
-; AVX512BW-ONLY-SLOW-NEXT:    vpunpcklqdq {{.*#+}} zmm4 = zmm23[0],zmm29[0],zmm23[2],zmm29[2],zmm23[4],zmm29[4],zmm23[6],zmm29[6]
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm2, %zmm4 {%k1}
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 192(%rdi), %zmm2
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 192(%rsi), %zmm5
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm2, %zmm6
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm5, %zmm11, %zmm6
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 192(%rdx), %zmm3
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 192(%rcx), %zmm7
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm3, %zmm8
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm7, %zmm15, %zmm8
-; AVX512BW-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm6 = ymm6[0,1,2,3],ymm8[4,5,6,7]
-; AVX512BW-ONLY-SLOW-NEXT:    vinserti64x4 $0, %ymm6, %zmm4, %zmm0
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm10, %zmm4
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm26, %zmm9, %zmm4
-; AVX512BW-ONLY-SLOW-NEXT:    vpunpckhqdq {{.*#+}} zmm6 = zmm23[1],zmm29[1],zmm23[3],zmm29[3],zmm23[5],zmm29[5],zmm23[7],zmm29[7]
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm4, %zmm6 {%k1}
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm2, %zmm4
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm5, %zmm9, %zmm4
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm3, %zmm8
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm7, %zmm12, %zmm8
-; AVX512BW-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm4 = ymm4[0,1,2,3],ymm8[4,5,6,7]
-; AVX512BW-ONLY-SLOW-NEXT:    vinserti64x4 $0, %ymm4, %zmm6, %zmm0
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm2, %zmm4
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm5, %zmm13, %zmm4
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm3, %zmm6
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm7, %zmm1, %zmm6
-; AVX512BW-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm4 = ymm4[0,1,2,3],ymm6[4,5,6,7]
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm23, %zmm6
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm29, %zmm13, %zmm6
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vpunpcklqdq {{.*#+}} zmm6 {%k1} = zmm10[0],zmm26[0],zmm10[2],zmm26[2],zmm10[4],zmm26[4],zmm10[6],zmm26[6]
-; AVX512BW-ONLY-SLOW-NEXT:    vinserti64x4 $0, %ymm4, %zmm6, %zmm0
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm5, %zmm18, %zmm2
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm7, %zmm30, %zmm3
-; AVX512BW-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm3[4,5,6,7]
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm23, %zmm3
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm29, %zmm18, %zmm3
-; AVX512BW-ONLY-SLOW-NEXT:    vpunpckhqdq {{.*#+}} zmm3 {%k1} = zmm10[1],zmm26[1],zmm10[3],zmm26[3],zmm10[5],zmm26[5],zmm10[7],zmm26[7]
-; AVX512BW-ONLY-SLOW-NEXT:    vinserti64x4 $0, %ymm2, %zmm3, %zmm0
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 256(%r10), %zmm10
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 256(%rax), %zmm22
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm10, %zmm2
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm22, %zmm11, %zmm2
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 256(%r8), %zmm0
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 256(%r9), %zmm24
-; AVX512BW-ONLY-SLOW-NEXT:    vpunpcklqdq {{.*#+}} zmm4 = zmm0[0],zmm24[0],zmm0[2],zmm24[2],zmm0[4],zmm24[4],zmm0[6],zmm24[6]
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm0, %zmm17
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm2, %zmm4 {%k1}
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 256(%rdi), %zmm2
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 256(%rsi), %zmm5
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm2, %zmm6
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm5, %zmm11, %zmm6
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 256(%rdx), %zmm3
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 256(%rcx), %zmm7
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm3, %zmm8
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm7, %zmm15, %zmm8
-; AVX512BW-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm6 = ymm6[0,1,2,3],ymm8[4,5,6,7]
-; AVX512BW-ONLY-SLOW-NEXT:    vinserti64x4 $0, %ymm6, %zmm4, %zmm0
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm10, %zmm4
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm22, %zmm9, %zmm4
-; AVX512BW-ONLY-SLOW-NEXT:    vpunpckhqdq {{.*#+}} zmm6 = zmm17[1],zmm24[1],zmm17[3],zmm24[3],zmm17[5],zmm24[5],zmm17[7],zmm24[7]
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm4, %zmm6 {%k1}
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm2, %zmm4
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm5, %zmm9, %zmm4
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm3, %zmm8
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm7, %zmm12, %zmm8
-; AVX512BW-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm4 = ymm4[0,1,2,3],ymm8[4,5,6,7]
-; AVX512BW-ONLY-SLOW-NEXT:    vinserti64x4 $0, %ymm4, %zmm6, %zmm0
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm2, %zmm4
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm5, %zmm13, %zmm4
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm3, %zmm6
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm7, %zmm1, %zmm6
-; AVX512BW-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm4 = ymm4[0,1,2,3],ymm6[4,5,6,7]
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm17, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm17, %zmm6
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm24, %zmm13, %zmm6
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vpunpcklqdq {{.*#+}} zmm6 {%k1} = zmm10[0],zmm22[0],zmm10[2],zmm22[2],zmm10[4],zmm22[4],zmm10[6],zmm22[6]
-; AVX512BW-ONLY-SLOW-NEXT:    vinserti64x4 $0, %ymm4, %zmm6, %zmm0
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm5, %zmm18, %zmm2
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm7, %zmm30, %zmm3
-; AVX512BW-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm3[4,5,6,7]
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm17, %zmm3
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm24, %zmm18, %zmm3
-; AVX512BW-ONLY-SLOW-NEXT:    vpunpckhqdq {{.*#+}} zmm3 {%k1} = zmm10[1],zmm22[1],zmm10[3],zmm22[3],zmm10[5],zmm22[5],zmm10[7],zmm22[7]
-; AVX512BW-ONLY-SLOW-NEXT:    vinserti64x4 $0, %ymm2, %zmm3, %zmm0
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 320(%rdi), %zmm2
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 320(%rsi), %zmm3
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm2, %zmm5
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm3, %zmm11, %zmm5
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 320(%rdx), %zmm14
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 320(%rcx), %zmm6
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm14, %zmm7
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm6, %zmm15, %zmm7
-; AVX512BW-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm5 = ymm5[0,1,2,3],ymm7[4,5,6,7]
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm14, %zmm7
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm6, %zmm12, %zmm7
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm14, %zmm8
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm6, %zmm1, %zmm8
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm6, %zmm30, %zmm14
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 384(%rdx), %zmm0
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 384(%rcx), %zmm6
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm0, %zmm10
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm6, %zmm15, %zmm10
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm0, %zmm10
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm6, %zmm12, %zmm10
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm0, %zmm10
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm6, %zmm1, %zmm10
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm6, %zmm30, %zmm0
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 448(%rdx), %zmm0
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 448(%rcx), %zmm6
-; AVX512BW-ONLY-SLOW-NEXT:    vpermi2q %zmm6, %zmm0, %zmm15
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm15, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vpermi2q %zmm6, %zmm0, %zmm12
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm12, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vpermi2q %zmm6, %zmm0, %zmm1
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm6, %zmm30, %zmm0
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 320(%r10), %zmm31
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 320(%rax), %zmm12
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm31, %zmm1
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm12, %zmm11, %zmm1
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 320(%r8), %zmm17
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 320(%r9), %zmm4
-; AVX512BW-ONLY-SLOW-NEXT:    vpunpcklqdq {{.*#+}} zmm10 = zmm17[0],zmm4[0],zmm17[2],zmm4[2],zmm17[4],zmm4[4],zmm17[6],zmm4[6]
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm1, %zmm10 {%k1}
-; AVX512BW-ONLY-SLOW-NEXT:    vinserti64x4 $0, %ymm5, %zmm10, %zmm0
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm31, %zmm1
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm12, %zmm9, %zmm1
-; AVX512BW-ONLY-SLOW-NEXT:    vpunpckhqdq {{.*#+}} zmm5 = zmm17[1],zmm4[1],zmm17[3],zmm4[3],zmm17[5],zmm4[5],zmm17[7],zmm4[7]
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm1, %zmm5 {%k1}
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm2, %zmm1
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm3, %zmm9, %zmm1
-; AVX512BW-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm7[4,5,6,7]
-; AVX512BW-ONLY-SLOW-NEXT:    vinserti64x4 $0, %ymm1, %zmm5, %zmm0
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm2, %zmm1
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm3, %zmm13, %zmm1
-; AVX512BW-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm8[4,5,6,7]
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm17, %zmm5
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm4, %zmm13, %zmm5
-; AVX512BW-ONLY-SLOW-NEXT:    vpunpcklqdq {{.*#+}} zmm5 {%k1} = zmm31[0],zmm12[0],zmm31[2],zmm12[2],zmm31[4],zmm12[4],zmm31[6],zmm12[6]
-; AVX512BW-ONLY-SLOW-NEXT:    vinserti64x4 $0, %ymm1, %zmm5, %zmm0
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm3, %zmm18, %zmm2
-; AVX512BW-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm1 = ymm2[0,1,2,3],ymm14[4,5,6,7]
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm17, %zmm2
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm4, %zmm18, %zmm2
-; AVX512BW-ONLY-SLOW-NEXT:    vpunpckhqdq {{.*#+}} zmm2 {%k1} = zmm31[1],zmm12[1],zmm31[3],zmm12[3],zmm31[5],zmm12[5],zmm31[7],zmm12[7]
-; AVX512BW-ONLY-SLOW-NEXT:    vinserti64x4 $0, %ymm1, %zmm2, %zmm0
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 384(%rdi), %zmm0
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 384(%rsi), %zmm1
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm0, %zmm3
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm11, %zmm2
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm1, %zmm11, %zmm3
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm0, %zmm3
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm1, %zmm9, %zmm3
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm0, %zmm3
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm1, %zmm13, %zmm3
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm1, %zmm18, %zmm0
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 384(%r10), %zmm11
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 384(%rax), %zmm20
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm11, %zmm0
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm20, %zmm2, %zmm0
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm11, %zmm0
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm20, %zmm9, %zmm0
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 384(%r8), %zmm30
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 384(%r9), %zmm0
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm30, %zmm1
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm0, %zmm13, %zmm1
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 448(%r10), %zmm16
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 448(%rax), %zmm8
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm16, %zmm1
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm8, %zmm2, %zmm1
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 448(%rdi), %zmm3
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 448(%rsi), %zmm1
-; AVX512BW-ONLY-SLOW-NEXT:    vpermi2q %zmm1, %zmm3, %zmm2
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm16, %zmm2
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm8, %zmm9, %zmm2
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vpermi2q %zmm1, %zmm3, %zmm9
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 448(%r8), %zmm19
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 448(%r9), %zmm7
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm19, %zmm2
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm7, %zmm13, %zmm2
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vpermi2q %zmm1, %zmm3, %zmm13
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm13, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm1, %zmm18, %zmm3
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm30, %zmm1
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm0, %zmm18, %zmm1
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm1, %zmm5
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm19, %zmm1
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm7, %zmm18, %zmm1
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm15 = [0,8,0,8,0,8,0,8]
-; AVX512BW-ONLY-SLOW-NEXT:    # zmm15 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm21, %zmm1
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm27, %zmm15, %zmm1
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm14 = [1,9,1,9,1,9,1,9]
-; AVX512BW-ONLY-SLOW-NEXT:    # zmm14 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm21, %zmm1
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm27, %zmm14, %zmm1
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm6 = [2,10,2,10,2,10,2,10]
-; AVX512BW-ONLY-SLOW-NEXT:    # zmm6 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm21, %zmm1
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm27, %zmm6, %zmm1
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm1 = [3,11,3,11,3,11,3,11]
-; AVX512BW-ONLY-SLOW-NEXT:    # zmm1 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm27, %zmm1, %zmm21
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm21, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm2, %zmm3
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm28, %zmm15, %zmm3
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm2, %zmm3
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm28, %zmm14, %zmm3
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm2, %zmm25
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm28, %zmm6, %zmm25
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm28, %zmm1, %zmm2
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 (%rsp), %zmm27 # 64-byte Reload
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm27, %zmm2
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm3, %zmm15, %zmm2
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm27, %zmm2
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm3, %zmm14, %zmm2
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm27, %zmm2
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm3, %zmm6, %zmm2
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm3, %zmm1, %zmm27
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm27, (%rsp) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm9 # 64-byte Reload
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm9, %zmm3
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm2, %zmm15, %zmm3
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm9, %zmm3
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm2, %zmm14, %zmm3
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm9, %zmm18
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm2, %zmm6, %zmm18
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm2, %zmm1, %zmm9
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm2, %zmm3
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm9 # 64-byte Reload
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm9, %zmm15, %zmm3
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm2, %zmm3
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm9, %zmm14, %zmm3
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm2, %zmm3
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm9, %zmm6, %zmm3
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm9, %zmm1, %zmm2
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm9 # 64-byte Reload
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm9, %zmm3
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm2, %zmm15, %zmm3
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm9, %zmm3
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm2, %zmm14, %zmm3
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm9, %zmm13
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm2, %zmm6, %zmm13
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm2, %zmm1, %zmm9
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm2, %zmm3
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm26, %zmm15, %zmm3
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm2, %zmm3
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm26, %zmm14, %zmm3
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm2, %zmm3
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm26, %zmm6, %zmm3
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm26, %zmm1, %zmm2
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm23, %zmm3
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm29, %zmm15, %zmm3
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm23, %zmm3
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm29, %zmm14, %zmm3
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm23, %zmm10
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm29, %zmm6, %zmm10
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm29, %zmm1, %zmm23
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm23, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm23 # 64-byte Reload
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm23, %zmm2
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm22, %zmm15, %zmm2
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm23, %zmm2
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm22, %zmm14, %zmm2
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm23, %zmm2
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm22, %zmm6, %zmm2
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm22, %zmm1, %zmm23
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm2, %zmm3
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm24, %zmm15, %zmm3
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm2, %zmm3
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm24, %zmm14, %zmm3
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm2, %zmm26
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm24, %zmm6, %zmm26
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm24, %zmm1, %zmm2
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm31, %zmm2
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm12, %zmm15, %zmm2
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm31, %zmm2
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm12, %zmm14, %zmm2
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm31, %zmm2
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm12, %zmm6, %zmm2
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm12, %zmm1, %zmm31
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm17, %zmm29
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm4, %zmm15, %zmm29
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm17, %zmm2
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm4, %zmm14, %zmm2
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm17, %zmm24
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm4, %zmm6, %zmm24
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm4, %zmm1, %zmm17
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm11, %zmm2
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm20, %zmm15, %zmm2
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm11, %zmm2
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm20, %zmm14, %zmm2
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm9 # 64-byte Reload
-; AVX512BW-ONLY-SLOW-NEXT:    vpunpcklqdq {{.*#+}} zmm9 {%k1} = zmm11[0],zmm20[0],zmm11[2],zmm20[2],zmm11[4],zmm20[4],zmm11[6],zmm20[6]
-; AVX512BW-ONLY-SLOW-NEXT:    vpunpckhqdq {{.*#+}} zmm5 {%k1} = zmm11[1],zmm20[1],zmm11[3],zmm20[3],zmm11[5],zmm20[5],zmm11[7],zmm20[7]
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm5, %zmm27
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm11, %zmm22
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm20, %zmm6, %zmm22
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm20, %zmm1, %zmm11
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm30, %zmm21
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm0, %zmm15, %zmm21
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm30, %zmm28
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm0, %zmm14, %zmm28
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm30, %zmm20
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm0, %zmm6, %zmm20
-; AVX512BW-ONLY-SLOW-NEXT:    vpunpcklqdq {{.*#+}} zmm2 = zmm30[0],zmm0[0],zmm30[2],zmm0[2],zmm30[4],zmm0[4],zmm30[6],zmm0[6]
-; AVX512BW-ONLY-SLOW-NEXT:    vpunpckhqdq {{.*#+}} zmm3 = zmm30[1],zmm0[1],zmm30[3],zmm0[3],zmm30[5],zmm0[5],zmm30[7],zmm0[7]
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm0, %zmm1, %zmm30
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm16, %zmm0
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm8, %zmm15, %zmm0
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm16, %zmm0
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm8, %zmm14, %zmm0
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
-; AVX512BW-ONLY-SLOW-NEXT:    vpunpcklqdq {{.*#+}} zmm4 {%k1} = zmm16[0],zmm8[0],zmm16[2],zmm8[2],zmm16[4],zmm8[4],zmm16[6],zmm8[6]
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
-; AVX512BW-ONLY-SLOW-NEXT:    vpunpckhqdq {{.*#+}} zmm5 {%k1} = zmm16[1],zmm8[1],zmm16[3],zmm8[3],zmm16[5],zmm8[5],zmm16[7],zmm8[7]
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm16, %zmm12
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm8, %zmm6, %zmm12
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm8, %zmm1, %zmm16
-; AVX512BW-ONLY-SLOW-NEXT:    vpermi2q %zmm7, %zmm19, %zmm15
-; AVX512BW-ONLY-SLOW-NEXT:    vpermi2q %zmm7, %zmm19, %zmm14
-; AVX512BW-ONLY-SLOW-NEXT:    vpermi2q %zmm7, %zmm19, %zmm6
-; AVX512BW-ONLY-SLOW-NEXT:    vpunpcklqdq {{.*#+}} zmm8 = zmm19[0],zmm7[0],zmm19[2],zmm7[2],zmm19[4],zmm7[4],zmm19[6],zmm7[6]
-; AVX512BW-ONLY-SLOW-NEXT:    vpunpckhqdq {{.*#+}} zmm0 = zmm19[1],zmm7[1],zmm19[3],zmm7[3],zmm19[5],zmm7[5],zmm19[7],zmm7[7]
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2q %zmm7, %zmm1, %zmm19
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512BW-ONLY-SLOW-NEXT:    vpblendd $240, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm1 # 32-byte Folded Reload
-; AVX512BW-ONLY-SLOW-NEXT:    # ymm1 = ymm1[0,1,2,3],mem[4,5,6,7]
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm7 # 64-byte Reload
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm7, %zmm2 {%k1}
-; AVX512BW-ONLY-SLOW-NEXT:    vinserti64x4 $0, %ymm1, %zmm2, %zmm1
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512BW-ONLY-SLOW-NEXT:    vpblendd $240, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm1 # 32-byte Folded Reload
-; AVX512BW-ONLY-SLOW-NEXT:    # ymm1 = ymm1[0,1,2,3],mem[4,5,6,7]
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm2, %zmm3 {%k1}
-; AVX512BW-ONLY-SLOW-NEXT:    vinserti64x4 $0, %ymm1, %zmm3, %zmm1
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512BW-ONLY-SLOW-NEXT:    vpblendd $240, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm1 # 32-byte Folded Reload
-; AVX512BW-ONLY-SLOW-NEXT:    # ymm1 = ymm1[0,1,2,3],mem[4,5,6,7]
-; AVX512BW-ONLY-SLOW-NEXT:    vinserti64x4 $0, %ymm1, %zmm9, %zmm1
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512BW-ONLY-SLOW-NEXT:    vpblendd $240, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm1 # 32-byte Folded Reload
-; AVX512BW-ONLY-SLOW-NEXT:    # ymm1 = ymm1[0,1,2,3],mem[4,5,6,7]
-; AVX512BW-ONLY-SLOW-NEXT:    vinserti64x4 $0, %ymm1, %zmm27, %zmm1
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512BW-ONLY-SLOW-NEXT:    vpblendd $240, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm1 # 32-byte Folded Reload
-; AVX512BW-ONLY-SLOW-NEXT:    # ymm1 = ymm1[0,1,2,3],mem[4,5,6,7]
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm2, %zmm8 {%k1}
-; AVX512BW-ONLY-SLOW-NEXT:    vinserti64x4 $0, %ymm1, %zmm8, %zmm1
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512BW-ONLY-SLOW-NEXT:    vpblendd $240, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm1 # 32-byte Folded Reload
-; AVX512BW-ONLY-SLOW-NEXT:    # ymm1 = ymm1[0,1,2,3],mem[4,5,6,7]
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm2, %zmm0 {%k1}
-; AVX512BW-ONLY-SLOW-NEXT:    vinserti64x4 $0, %ymm1, %zmm0, %zmm0
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512BW-ONLY-SLOW-NEXT:    vpblendd $240, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
-; AVX512BW-ONLY-SLOW-NEXT:    # ymm0 = ymm0[0,1,2,3],mem[4,5,6,7]
-; AVX512BW-ONLY-SLOW-NEXT:    vinserti64x4 $0, %ymm0, %zmm4, %zmm0
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512BW-ONLY-SLOW-NEXT:    vpblendd $240, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
-; AVX512BW-ONLY-SLOW-NEXT:    # ymm0 = ymm0[0,1,2,3],mem[4,5,6,7]
-; AVX512BW-ONLY-SLOW-NEXT:    vinserti64x4 $0, %ymm0, %zmm5, %zmm0
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm0, %zmm25 {%k1}
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa (%rcx), %ymm0
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa (%rdx), %ymm1
-; AVX512BW-ONLY-SLOW-NEXT:    vpunpcklqdq {{.*#+}} ymm2 = ymm1[0],ymm0[0],ymm1[2],ymm0[2]
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa (%rsi), %ymm3
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa (%rdi), %ymm7
-; AVX512BW-ONLY-SLOW-NEXT:    vpunpcklqdq {{.*#+}} ymm8 = ymm7[0],ymm3[0],ymm7[2],ymm3[2]
-; AVX512BW-ONLY-SLOW-NEXT:    vperm2i128 {{.*#+}} ymm2 = ymm8[2,3],ymm2[2,3]
-; AVX512BW-ONLY-SLOW-NEXT:    vinserti64x4 $0, %ymm2, %zmm25, %zmm2
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm2, %zmm4 {%k1}
-; AVX512BW-ONLY-SLOW-NEXT:    vpunpckhqdq {{.*#+}} ymm0 = ymm1[1],ymm0[1],ymm1[3],ymm0[3]
-; AVX512BW-ONLY-SLOW-NEXT:    vpunpckhqdq {{.*#+}} ymm1 = ymm7[1],ymm3[1],ymm7[3],ymm3[3]
-; AVX512BW-ONLY-SLOW-NEXT:    vperm2i128 {{.*#+}} ymm0 = ymm1[2,3],ymm0[2,3]
-; AVX512BW-ONLY-SLOW-NEXT:    vinserti64x4 $0, %ymm0, %zmm4, %zmm0
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm0, %zmm18 {%k1}
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa 64(%rcx), %ymm0
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa 64(%rdx), %ymm1
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa 64(%rsi), %ymm2
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa 64(%rdi), %ymm3
-; AVX512BW-ONLY-SLOW-NEXT:    vpunpcklqdq {{.*#+}} ymm7 = ymm1[0],ymm0[0],ymm1[2],ymm0[2]
-; AVX512BW-ONLY-SLOW-NEXT:    vpunpcklqdq {{.*#+}} ymm8 = ymm3[0],ymm2[0],ymm3[2],ymm2[2]
-; AVX512BW-ONLY-SLOW-NEXT:    vperm2i128 {{.*#+}} ymm7 = ymm8[2,3],ymm7[2,3]
-; AVX512BW-ONLY-SLOW-NEXT:    vinserti64x4 $0, %ymm7, %zmm18, %zmm4
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 (%rsp), %zmm4 # 64-byte Reload
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm4, %zmm5 {%k1}
-; AVX512BW-ONLY-SLOW-NEXT:    vpunpckhqdq {{.*#+}} ymm0 = ymm1[1],ymm0[1],ymm1[3],ymm0[3]
-; AVX512BW-ONLY-SLOW-NEXT:    vpunpckhqdq {{.*#+}} ymm1 = ymm3[1],ymm2[1],ymm3[3],ymm2[3]
-; AVX512BW-ONLY-SLOW-NEXT:    vperm2i128 {{.*#+}} ymm0 = ymm1[2,3],ymm0[2,3]
-; AVX512BW-ONLY-SLOW-NEXT:    vinserti64x4 $0, %ymm0, %zmm5, %zmm0
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm0, %zmm13 {%k1}
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa 128(%rcx), %ymm0
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa 128(%rdx), %ymm1
-; AVX512BW-ONLY-SLOW-NEXT:    vpunpcklqdq {{.*#+}} ymm2 = ymm1[0],ymm0[0],ymm1[2],ymm0[2]
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa 128(%rsi), %ymm3
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa 128(%rdi), %ymm7
-; AVX512BW-ONLY-SLOW-NEXT:    vpunpcklqdq {{.*#+}} ymm8 = ymm7[0],ymm3[0],ymm7[2],ymm3[2]
-; AVX512BW-ONLY-SLOW-NEXT:    vperm2i128 {{.*#+}} ymm2 = ymm8[2,3],ymm2[2,3]
-; AVX512BW-ONLY-SLOW-NEXT:    vinserti64x4 $0, %ymm2, %zmm13, %zmm2
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm2, (%rsp) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm2, %zmm4 {%k1}
-; AVX512BW-ONLY-SLOW-NEXT:    vpunpckhqdq {{.*#+}} ymm0 = ymm1[1],ymm0[1],ymm1[3],ymm0[3]
-; AVX512BW-ONLY-SLOW-NEXT:    vpunpckhqdq {{.*#+}} ymm1 = ymm7[1],ymm3[1],ymm7[3],ymm3[3]
-; AVX512BW-ONLY-SLOW-NEXT:    vperm2i128 {{.*#+}} ymm0 = ymm1[2,3],ymm0[2,3]
-; AVX512BW-ONLY-SLOW-NEXT:    vinserti64x4 $0, %ymm0, %zmm4, %zmm0
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm0, %zmm10 {%k1}
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa 192(%rcx), %ymm0
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa 192(%rdx), %ymm1
-; AVX512BW-ONLY-SLOW-NEXT:    vpunpcklqdq {{.*#+}} ymm2 = ymm1[0],ymm0[0],ymm1[2],ymm0[2]
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa 192(%rsi), %ymm3
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa 192(%rdi), %ymm7
-; AVX512BW-ONLY-SLOW-NEXT:    vpunpcklqdq {{.*#+}} ymm8 = ymm7[0],ymm3[0],ymm7[2],ymm3[2]
-; AVX512BW-ONLY-SLOW-NEXT:    vperm2i128 {{.*#+}} ymm2 = ymm8[2,3],ymm2[2,3]
-; AVX512BW-ONLY-SLOW-NEXT:    vinserti64x4 $0, %ymm2, %zmm10, %zmm2
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm2, %zmm4 {%k1}
-; AVX512BW-ONLY-SLOW-NEXT:    vpunpckhqdq {{.*#+}} ymm0 = ymm1[1],ymm0[1],ymm1[3],ymm0[3]
-; AVX512BW-ONLY-SLOW-NEXT:    vpunpckhqdq {{.*#+}} ymm1 = ymm7[1],ymm3[1],ymm7[3],ymm3[3]
-; AVX512BW-ONLY-SLOW-NEXT:    vperm2i128 {{.*#+}} ymm0 = ymm1[2,3],ymm0[2,3]
-; AVX512BW-ONLY-SLOW-NEXT:    vinserti64x4 $0, %ymm0, %zmm4, %zmm9
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm0, %zmm26 {%k1}
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa 256(%rcx), %ymm0
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa 256(%rdx), %ymm1
-; AVX512BW-ONLY-SLOW-NEXT:    vpunpcklqdq {{.*#+}} ymm2 = ymm1[0],ymm0[0],ymm1[2],ymm0[2]
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa 256(%rsi), %ymm3
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa 256(%rdi), %ymm7
-; AVX512BW-ONLY-SLOW-NEXT:    vpunpcklqdq {{.*#+}} ymm8 = ymm7[0],ymm3[0],ymm7[2],ymm3[2]
-; AVX512BW-ONLY-SLOW-NEXT:    vperm2i128 {{.*#+}} ymm2 = ymm8[2,3],ymm2[2,3]
-; AVX512BW-ONLY-SLOW-NEXT:    vinserti64x4 $0, %ymm2, %zmm26, %zmm26
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm23, %zmm2 {%k1}
-; AVX512BW-ONLY-SLOW-NEXT:    vpunpckhqdq {{.*#+}} ymm0 = ymm1[1],ymm0[1],ymm1[3],ymm0[3]
-; AVX512BW-ONLY-SLOW-NEXT:    vpunpckhqdq {{.*#+}} ymm1 = ymm7[1],ymm3[1],ymm7[3],ymm3[3]
-; AVX512BW-ONLY-SLOW-NEXT:    vperm2i128 {{.*#+}} ymm0 = ymm1[2,3],ymm0[2,3]
-; AVX512BW-ONLY-SLOW-NEXT:    vinserti64x4 $0, %ymm0, %zmm2, %zmm7
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm0, %zmm24 {%k1}
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa 320(%rcx), %ymm0
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa 320(%rdx), %ymm1
-; AVX512BW-ONLY-SLOW-NEXT:    vpunpcklqdq {{.*#+}} ymm2 = ymm1[0],ymm0[0],ymm1[2],ymm0[2]
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa 320(%rsi), %ymm3
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 320(%rdi), %ymm23
-; AVX512BW-ONLY-SLOW-NEXT:    vpunpcklqdq {{.*#+}} ymm8 = ymm23[0],ymm3[0],ymm23[2],ymm3[2]
-; AVX512BW-ONLY-SLOW-NEXT:    vperm2i128 {{.*#+}} ymm2 = ymm8[2,3],ymm2[2,3]
-; AVX512BW-ONLY-SLOW-NEXT:    vinserti64x4 $0, %ymm2, %zmm24, %zmm8
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm31, %zmm17 {%k1}
-; AVX512BW-ONLY-SLOW-NEXT:    vpunpckhqdq {{.*#+}} ymm0 = ymm1[1],ymm0[1],ymm1[3],ymm0[3]
-; AVX512BW-ONLY-SLOW-NEXT:    vpunpckhqdq {{.*#+}} ymm1 = ymm23[1],ymm3[1],ymm23[3],ymm3[3]
-; AVX512BW-ONLY-SLOW-NEXT:    vperm2i128 {{.*#+}} ymm0 = ymm1[2,3],ymm0[2,3]
-; AVX512BW-ONLY-SLOW-NEXT:    vinserti64x4 $0, %ymm0, %zmm17, %zmm17
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm22, %zmm20 {%k1}
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa 384(%rcx), %ymm0
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa 384(%rdx), %ymm1
-; AVX512BW-ONLY-SLOW-NEXT:    vpunpcklqdq {{.*#+}} ymm2 = ymm1[0],ymm0[0],ymm1[2],ymm0[2]
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa 384(%rsi), %ymm3
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 384(%rdi), %ymm18
-; AVX512BW-ONLY-SLOW-NEXT:    vpunpcklqdq {{.*#+}} ymm13 = ymm18[0],ymm3[0],ymm18[2],ymm3[2]
-; AVX512BW-ONLY-SLOW-NEXT:    vperm2i128 {{.*#+}} ymm2 = ymm13[2,3],ymm2[2,3]
-; AVX512BW-ONLY-SLOW-NEXT:    vinserti64x4 $0, %ymm2, %zmm20, %zmm20
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm11, %zmm30 {%k1}
-; AVX512BW-ONLY-SLOW-NEXT:    vpunpckhqdq {{.*#+}} ymm0 = ymm1[1],ymm0[1],ymm1[3],ymm0[3]
-; AVX512BW-ONLY-SLOW-NEXT:    vpunpckhqdq {{.*#+}} ymm1 = ymm18[1],ymm3[1],ymm18[3],ymm3[3]
-; AVX512BW-ONLY-SLOW-NEXT:    vperm2i128 {{.*#+}} ymm0 = ymm1[2,3],ymm0[2,3]
-; AVX512BW-ONLY-SLOW-NEXT:    vinserti64x4 $0, %ymm0, %zmm30, %zmm24
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm12, %zmm6 {%k1}
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa 448(%rcx), %ymm0
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa 448(%rdx), %ymm1
-; AVX512BW-ONLY-SLOW-NEXT:    vpunpcklqdq {{.*#+}} ymm2 = ymm1[0],ymm0[0],ymm1[2],ymm0[2]
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa 448(%rsi), %ymm3
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa 448(%rdi), %ymm10
-; AVX512BW-ONLY-SLOW-NEXT:    vpunpcklqdq {{.*#+}} ymm12 = ymm10[0],ymm3[0],ymm10[2],ymm3[2]
-; AVX512BW-ONLY-SLOW-NEXT:    vperm2i128 {{.*#+}} ymm2 = ymm12[2,3],ymm2[2,3]
-; AVX512BW-ONLY-SLOW-NEXT:    vinserti64x4 $0, %ymm2, %zmm6, %zmm31
-; AVX512BW-ONLY-SLOW-NEXT:    vpunpckhqdq {{.*#+}} ymm0 = ymm1[1],ymm0[1],ymm1[3],ymm0[3]
-; AVX512BW-ONLY-SLOW-NEXT:    vpunpckhqdq {{.*#+}} ymm1 = ymm10[1],ymm3[1],ymm10[3],ymm3[3]
-; AVX512BW-ONLY-SLOW-NEXT:    vperm2i128 {{.*#+}} ymm0 = ymm1[2,3],ymm0[2,3]
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm16, %zmm19 {%k1}
-; AVX512BW-ONLY-SLOW-NEXT:    vinserti64x4 $0, %ymm0, %zmm19, %zmm0
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm1, %zmm4 {%k1}
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa (%rsi), %xmm1
-; AVX512BW-ONLY-SLOW-NEXT:    vinserti128 $1, (%rcx), %ymm1, %ymm1
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa (%rdi), %xmm2
-; AVX512BW-ONLY-SLOW-NEXT:    vinserti128 $1, (%rdx), %ymm2, %ymm2
-; AVX512BW-ONLY-SLOW-NEXT:    vpunpcklqdq {{.*#+}} ymm3 = ymm2[0],ymm1[0],ymm2[2],ymm1[2]
-; AVX512BW-ONLY-SLOW-NEXT:    vinserti64x4 $0, %ymm3, %zmm4, %zmm3
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm10 # 64-byte Reload
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm4, %zmm10 {%k1}
-; AVX512BW-ONLY-SLOW-NEXT:    vpunpckhqdq {{.*#+}} ymm1 = ymm2[1],ymm1[1],ymm2[3],ymm1[3]
-; AVX512BW-ONLY-SLOW-NEXT:    vinserti64x4 $0, %ymm1, %zmm10, %zmm2
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm11 # 64-byte Reload
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm1, %zmm11 {%k1}
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa 64(%rsi), %xmm1
-; AVX512BW-ONLY-SLOW-NEXT:    vinserti128 $1, 64(%rcx), %ymm1, %ymm1
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa 64(%rdi), %xmm4
-; AVX512BW-ONLY-SLOW-NEXT:    vinserti128 $1, 64(%rdx), %ymm4, %ymm4
-; AVX512BW-ONLY-SLOW-NEXT:    vpunpcklqdq {{.*#+}} ymm10 = ymm4[0],ymm1[0],ymm4[2],ymm1[2]
-; AVX512BW-ONLY-SLOW-NEXT:    vinserti64x4 $0, %ymm10, %zmm11, %zmm10
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm11 # 64-byte Reload
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm12 # 64-byte Reload
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm11, %zmm12 {%k1}
-; AVX512BW-ONLY-SLOW-NEXT:    vpunpckhqdq {{.*#+}} ymm1 = ymm4[1],ymm1[1],ymm4[3],ymm1[3]
-; AVX512BW-ONLY-SLOW-NEXT:    vinserti64x4 $0, %ymm1, %zmm12, %zmm11
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm13 # 64-byte Reload
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm1, %zmm13 {%k1}
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa 128(%rsi), %xmm1
-; AVX512BW-ONLY-SLOW-NEXT:    vinserti128 $1, 128(%rcx), %ymm1, %ymm1
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa 128(%rdi), %xmm4
-; AVX512BW-ONLY-SLOW-NEXT:    vinserti128 $1, 128(%rdx), %ymm4, %ymm12
-; AVX512BW-ONLY-SLOW-NEXT:    vpunpcklqdq {{.*#+}} ymm4 = ymm12[0],ymm1[0],ymm12[2],ymm1[2]
-; AVX512BW-ONLY-SLOW-NEXT:    vinserti64x4 $0, %ymm4, %zmm13, %zmm4
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm13 # 64-byte Reload
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm18 # 64-byte Reload
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm13, %zmm18 {%k1}
-; AVX512BW-ONLY-SLOW-NEXT:    vpunpckhqdq {{.*#+}} ymm1 = ymm12[1],ymm1[1],ymm12[3],ymm1[3]
-; AVX512BW-ONLY-SLOW-NEXT:    vinserti64x4 $0, %ymm1, %zmm18, %zmm19
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm18 # 64-byte Reload
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm1, %zmm18 {%k1}
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa 192(%rsi), %xmm1
-; AVX512BW-ONLY-SLOW-NEXT:    vinserti128 $1, 192(%rcx), %ymm1, %ymm1
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa 192(%rdi), %xmm12
-; AVX512BW-ONLY-SLOW-NEXT:    vinserti128 $1, 192(%rdx), %ymm12, %ymm12
-; AVX512BW-ONLY-SLOW-NEXT:    vpunpcklqdq {{.*#+}} ymm13 = ymm12[0],ymm1[0],ymm12[2],ymm1[2]
-; AVX512BW-ONLY-SLOW-NEXT:    vinserti64x4 $0, %ymm13, %zmm18, %zmm30
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm13 # 64-byte Reload
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm18 # 64-byte Reload
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm13, %zmm18 {%k1}
-; AVX512BW-ONLY-SLOW-NEXT:    vpunpckhqdq {{.*#+}} ymm1 = ymm12[1],ymm1[1],ymm12[3],ymm1[3]
-; AVX512BW-ONLY-SLOW-NEXT:    vinserti64x4 $0, %ymm1, %zmm18, %zmm1
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm12 # 64-byte Reload
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm12, %zmm5 {%k1}
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa 256(%rsi), %xmm12
-; AVX512BW-ONLY-SLOW-NEXT:    vinserti128 $1, 256(%rcx), %ymm12, %ymm13
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa 256(%rdi), %xmm12
-; AVX512BW-ONLY-SLOW-NEXT:    vinserti32x4 $1, 256(%rdx), %ymm12, %ymm18
-; AVX512BW-ONLY-SLOW-NEXT:    vpunpcklqdq {{.*#+}} ymm12 = ymm18[0],ymm13[0],ymm18[2],ymm13[2]
-; AVX512BW-ONLY-SLOW-NEXT:    vinserti64x4 $0, %ymm12, %zmm5, %zmm12
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm23 # 64-byte Reload
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm23, %zmm5 {%k1}
-; AVX512BW-ONLY-SLOW-NEXT:    vpunpckhqdq {{.*#+}} ymm13 = ymm18[1],ymm13[1],ymm18[3],ymm13[3]
-; AVX512BW-ONLY-SLOW-NEXT:    vinserti64x4 $0, %ymm13, %zmm5, %zmm23
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm5, %zmm29 {%k1}
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa 320(%rsi), %xmm13
-; AVX512BW-ONLY-SLOW-NEXT:    vinserti128 $1, 320(%rcx), %ymm13, %ymm13
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 320(%rdi), %xmm18
-; AVX512BW-ONLY-SLOW-NEXT:    vinserti32x4 $1, 320(%rdx), %ymm18, %ymm18
-; AVX512BW-ONLY-SLOW-NEXT:    vpunpcklqdq {{.*#+}} ymm27 = ymm18[0],ymm13[0],ymm18[2],ymm13[2]
-; AVX512BW-ONLY-SLOW-NEXT:    vinserti64x4 $0, %ymm27, %zmm29, %zmm22
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm6 # 64-byte Reload
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm5, %zmm6 {%k1}
-; AVX512BW-ONLY-SLOW-NEXT:    vpunpckhqdq {{.*#+}} ymm13 = ymm18[1],ymm13[1],ymm18[3],ymm13[3]
-; AVX512BW-ONLY-SLOW-NEXT:    vinserti64x4 $0, %ymm13, %zmm6, %zmm13
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm5, %zmm21 {%k1}
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 384(%rsi), %xmm18
-; AVX512BW-ONLY-SLOW-NEXT:    vinserti32x4 $1, 384(%rcx), %ymm18, %ymm18
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 384(%rdi), %xmm25
-; AVX512BW-ONLY-SLOW-NEXT:    vinserti32x4 $1, 384(%rdx), %ymm25, %ymm25
-; AVX512BW-ONLY-SLOW-NEXT:    vpunpcklqdq {{.*#+}} ymm27 = ymm25[0],ymm18[0],ymm25[2],ymm18[2]
-; AVX512BW-ONLY-SLOW-NEXT:    vinserti64x4 $0, %ymm27, %zmm21, %zmm16
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm5, %zmm28 {%k1}
-; AVX512BW-ONLY-SLOW-NEXT:    vpunpckhqdq {{.*#+}} ymm18 = ymm25[1],ymm18[1],ymm25[3],ymm18[3]
-; AVX512BW-ONLY-SLOW-NEXT:    vinserti64x4 $0, %ymm18, %zmm28, %zmm21
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm5, %zmm15 {%k1}
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 448(%rsi), %xmm18
-; AVX512BW-ONLY-SLOW-NEXT:    vinserti32x4 $1, 448(%rcx), %ymm18, %ymm18
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 448(%rdi), %xmm25
-; AVX512BW-ONLY-SLOW-NEXT:    vinserti32x4 $1, 448(%rdx), %ymm25, %ymm25
-; AVX512BW-ONLY-SLOW-NEXT:    vpunpcklqdq {{.*#+}} ymm27 = ymm25[0],ymm18[0],ymm25[2],ymm18[2]
-; AVX512BW-ONLY-SLOW-NEXT:    vinserti64x4 $0, %ymm27, %zmm15, %zmm6
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm5, %zmm14 {%k1}
-; AVX512BW-ONLY-SLOW-NEXT:    vpunpckhqdq {{.*#+}} ymm18 = ymm25[1],ymm18[1],ymm25[3],ymm18[3]
-; AVX512BW-ONLY-SLOW-NEXT:    vinserti64x4 $0, %ymm18, %zmm14, %zmm5
-; AVX512BW-ONLY-SLOW-NEXT:    movq {{[0-9]+}}(%rsp), %rax
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm0, 3776(%rax)
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm31, 3712(%rax)
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm24, 3264(%rax)
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm20, 3200(%rax)
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm17, 2752(%rax)
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm8, 2688(%rax)
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm7, 2240(%rax)
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm26, 2176(%rax)
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm9, 1728(%rax)
-; AVX512BW-ONLY-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512BW-ONLY-SLOW-NEXT:    vmovaps %zmm0, 1664(%rax)
-; AVX512BW-ONLY-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512BW-ONLY-SLOW-NEXT:    vmovaps %zmm0, 1216(%rax)
-; AVX512BW-ONLY-SLOW-NEXT:    vmovups (%rsp), %zmm0 # 64-byte Reload
-; AVX512BW-ONLY-SLOW-NEXT:    vmovaps %zmm0, 1152(%rax)
-; AVX512BW-ONLY-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512BW-ONLY-SLOW-NEXT:    vmovaps %zmm0, 704(%rax)
-; AVX512BW-ONLY-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512BW-ONLY-SLOW-NEXT:    vmovaps %zmm0, 640(%rax)
-; AVX512BW-ONLY-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512BW-ONLY-SLOW-NEXT:    vmovaps %zmm0, 192(%rax)
-; AVX512BW-ONLY-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512BW-ONLY-SLOW-NEXT:    vmovaps %zmm0, 128(%rax)
-; AVX512BW-ONLY-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512BW-ONLY-SLOW-NEXT:    vmovaps %zmm0, 4032(%rax)
-; AVX512BW-ONLY-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512BW-ONLY-SLOW-NEXT:    vmovaps %zmm0, 3968(%rax)
-; AVX512BW-ONLY-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512BW-ONLY-SLOW-NEXT:    vmovaps %zmm0, 3904(%rax)
-; AVX512BW-ONLY-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512BW-ONLY-SLOW-NEXT:    vmovaps %zmm0, 3840(%rax)
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm5, 3648(%rax)
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm6, 3584(%rax)
-; AVX512BW-ONLY-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512BW-ONLY-SLOW-NEXT:    vmovaps %zmm0, 3520(%rax)
-; AVX512BW-ONLY-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512BW-ONLY-SLOW-NEXT:    vmovaps %zmm0, 3456(%rax)
-; AVX512BW-ONLY-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512BW-ONLY-SLOW-NEXT:    vmovaps %zmm0, 3392(%rax)
-; AVX512BW-ONLY-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512BW-ONLY-SLOW-NEXT:    vmovaps %zmm0, 3328(%rax)
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm21, 3136(%rax)
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm16, 3072(%rax)
-; AVX512BW-ONLY-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512BW-ONLY-SLOW-NEXT:    vmovaps %zmm0, 3008(%rax)
-; AVX512BW-ONLY-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512BW-ONLY-SLOW-NEXT:    vmovaps %zmm0, 2944(%rax)
-; AVX512BW-ONLY-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512BW-ONLY-SLOW-NEXT:    vmovaps %zmm0, 2880(%rax)
-; AVX512BW-ONLY-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512BW-ONLY-SLOW-NEXT:    vmovaps %zmm0, 2816(%rax)
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm13, 2624(%rax)
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm22, 2560(%rax)
-; AVX512BW-ONLY-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512BW-ONLY-SLOW-NEXT:    vmovaps %zmm0, 2496(%rax)
-; AVX512BW-ONLY-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512BW-ONLY-SLOW-NEXT:    vmovaps %zmm0, 2432(%rax)
-; AVX512BW-ONLY-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512BW-ONLY-SLOW-NEXT:    vmovaps %zmm0, 2368(%rax)
-; AVX512BW-ONLY-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512BW-ONLY-SLOW-NEXT:    vmovaps %zmm0, 2304(%rax)
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm23, 2112(%rax)
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm12, 2048(%rax)
-; AVX512BW-ONLY-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512BW-ONLY-SLOW-NEXT:    vmovaps %zmm0, 1984(%rax)
-; AVX512BW-ONLY-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512BW-ONLY-SLOW-NEXT:    vmovaps %zmm0, 1920(%rax)
-; AVX512BW-ONLY-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512BW-ONLY-SLOW-NEXT:    vmovaps %zmm0, 1856(%rax)
-; AVX512BW-ONLY-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512BW-ONLY-SLOW-NEXT:    vmovaps %zmm0, 1792(%rax)
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm1, 1600(%rax)
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm30, 1536(%rax)
-; AVX512BW-ONLY-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512BW-ONLY-SLOW-NEXT:    vmovaps %zmm0, 1472(%rax)
-; AVX512BW-ONLY-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512BW-ONLY-SLOW-NEXT:    vmovaps %zmm0, 1408(%rax)
-; AVX512BW-ONLY-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512BW-ONLY-SLOW-NEXT:    vmovaps %zmm0, 1344(%rax)
-; AVX512BW-ONLY-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512BW-ONLY-SLOW-NEXT:    vmovaps %zmm0, 1280(%rax)
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm19, 1088(%rax)
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm4, 1024(%rax)
-; AVX512BW-ONLY-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512BW-ONLY-SLOW-NEXT:    vmovaps %zmm0, 960(%rax)
-; AVX512BW-ONLY-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512BW-ONLY-SLOW-NEXT:    vmovaps %zmm0, 896(%rax)
-; AVX512BW-ONLY-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512BW-ONLY-SLOW-NEXT:    vmovaps %zmm0, 832(%rax)
-; AVX512BW-ONLY-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512BW-ONLY-SLOW-NEXT:    vmovaps %zmm0, 768(%rax)
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm11, 576(%rax)
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm10, 512(%rax)
-; AVX512BW-ONLY-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512BW-ONLY-SLOW-NEXT:    vmovaps %zmm0, 448(%rax)
-; AVX512BW-ONLY-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512BW-ONLY-SLOW-NEXT:    vmovaps %zmm0, 384(%rax)
-; AVX512BW-ONLY-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512BW-ONLY-SLOW-NEXT:    vmovaps %zmm0, 320(%rax)
-; AVX512BW-ONLY-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512BW-ONLY-SLOW-NEXT:    vmovaps %zmm0, 256(%rax)
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm2, 64(%rax)
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm3, (%rax)
-; AVX512BW-ONLY-SLOW-NEXT:    addq $5512, %rsp # imm = 0x1588
-; AVX512BW-ONLY-SLOW-NEXT:    vzeroupper
-; AVX512BW-ONLY-SLOW-NEXT:    retq
-;
-; AVX512BW-ONLY-FAST-LABEL: store_i64_stride8_vf64:
-; AVX512BW-ONLY-FAST:       # %bb.0:
-; AVX512BW-ONLY-FAST-NEXT:    subq $5512, %rsp # imm = 0x1588
-; AVX512BW-ONLY-FAST-NEXT:    movq {{[0-9]+}}(%rsp), %rax
-; AVX512BW-ONLY-FAST-NEXT:    movq {{[0-9]+}}(%rsp), %r10
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 128(%rdi), %zmm2
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 64(%rdi), %zmm4
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 (%rdi), %zmm8
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 128(%rsi), %zmm17
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 64(%rsi), %zmm19
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 (%rsi), %zmm22
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 64(%rdx), %zmm5
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 (%rdx), %zmm10
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 64(%rcx), %zmm20
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 (%rcx), %zmm11
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 (%r8), %zmm1
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 64(%r8), %zmm25
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 128(%r8), %zmm23
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 (%r9), %zmm28
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 64(%r9), %zmm26
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 128(%r9), %zmm24
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 (%r10), %zmm21
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 64(%r10), %zmm14
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 (%rax), %zmm27
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 64(%rax), %zmm16
-; AVX512BW-ONLY-FAST-NEXT:    movb $-64, %r11b
-; AVX512BW-ONLY-FAST-NEXT:    kmovd %r11d, %k1
-; AVX512BW-ONLY-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} zmm3 = [4,12,4,12,4,12,4,12]
-; AVX512BW-ONLY-FAST-NEXT:    # zmm3 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm21, %zmm0
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm27, %zmm3, %zmm0
-; AVX512BW-ONLY-FAST-NEXT:    vpunpcklqdq {{.*#+}} zmm12 = zmm1[0],zmm28[0],zmm1[2],zmm28[2],zmm1[4],zmm28[4],zmm1[6],zmm28[6]
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm0, %zmm12 {%k1}
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm8, %zmm0
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm22, %zmm3, %zmm0
-; AVX512BW-ONLY-FAST-NEXT:    vbroadcasti128 {{.*#+}} ymm6 = [4,12,4,12]
-; AVX512BW-ONLY-FAST-NEXT:    # ymm6 = mem[0,1,0,1]
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm10, %zmm15
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm11, %zmm6, %zmm15
-; AVX512BW-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm15[4,5,6,7]
-; AVX512BW-ONLY-FAST-NEXT:    vinserti64x4 $0, %ymm0, %zmm12, %zmm0
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} zmm9 = [5,13,5,13,5,13,5,13]
-; AVX512BW-ONLY-FAST-NEXT:    # zmm9 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm21, %zmm0
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm27, %zmm9, %zmm0
-; AVX512BW-ONLY-FAST-NEXT:    vpunpckhqdq {{.*#+}} zmm12 = zmm1[1],zmm28[1],zmm1[3],zmm28[3],zmm1[5],zmm28[5],zmm1[7],zmm28[7]
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm0, %zmm12 {%k1}
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm8, %zmm0
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm22, %zmm9, %zmm0
-; AVX512BW-ONLY-FAST-NEXT:    vbroadcasti128 {{.*#+}} ymm7 = [5,13,5,13]
-; AVX512BW-ONLY-FAST-NEXT:    # ymm7 = mem[0,1,0,1]
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm10, %zmm15
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm11, %zmm7, %zmm15
-; AVX512BW-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm15[4,5,6,7]
-; AVX512BW-ONLY-FAST-NEXT:    vinserti64x4 $0, %ymm0, %zmm12, %zmm0
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} zmm13 = [6,14,6,14,6,14,6,14]
-; AVX512BW-ONLY-FAST-NEXT:    # zmm13 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm1, %zmm0
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm1, %zmm29
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm28, %zmm13, %zmm0
-; AVX512BW-ONLY-FAST-NEXT:    vpunpcklqdq {{.*#+}} zmm0 {%k1} = zmm21[0],zmm27[0],zmm21[2],zmm27[2],zmm21[4],zmm27[4],zmm21[6],zmm27[6]
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm8, %zmm12
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm22, %zmm13, %zmm12
-; AVX512BW-ONLY-FAST-NEXT:    vbroadcasti128 {{.*#+}} ymm1 = [6,14,6,14]
-; AVX512BW-ONLY-FAST-NEXT:    # ymm1 = mem[0,1,0,1]
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm10, %zmm15
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm11, %zmm1, %zmm15
-; AVX512BW-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm12 = ymm12[0,1,2,3],ymm15[4,5,6,7]
-; AVX512BW-ONLY-FAST-NEXT:    vinserti64x4 $0, %ymm12, %zmm0, %zmm0
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} zmm18 = [7,15,7,15,7,15,7,15]
-; AVX512BW-ONLY-FAST-NEXT:    # zmm18 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm29, %zmm12
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm28, %zmm18, %zmm12
-; AVX512BW-ONLY-FAST-NEXT:    vpunpckhqdq {{.*#+}} zmm12 {%k1} = zmm21[1],zmm27[1],zmm21[3],zmm27[3],zmm21[5],zmm27[5],zmm21[7],zmm27[7]
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm22, %zmm18, %zmm8
-; AVX512BW-ONLY-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} ymm30 = [7,15,7,15]
-; AVX512BW-ONLY-FAST-NEXT:    # ymm30 = mem[0,1,2,3,0,1,2,3]
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm11, %zmm30, %zmm10
-; AVX512BW-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm8 = ymm8[0,1,2,3],ymm10[4,5,6,7]
-; AVX512BW-ONLY-FAST-NEXT:    vinserti64x4 $0, %ymm8, %zmm12, %zmm0
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm14, %zmm8
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm16, %zmm3, %zmm8
-; AVX512BW-ONLY-FAST-NEXT:    vpunpcklqdq {{.*#+}} zmm10 = zmm25[0],zmm26[0],zmm25[2],zmm26[2],zmm25[4],zmm26[4],zmm25[6],zmm26[6]
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm8, %zmm10 {%k1}
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm4, %zmm8
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm19, %zmm3, %zmm8
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm5, %zmm11
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm6, %zmm15
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm20, %zmm6, %zmm11
-; AVX512BW-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm8 = ymm8[0,1,2,3],ymm11[4,5,6,7]
-; AVX512BW-ONLY-FAST-NEXT:    vinserti64x4 $0, %ymm8, %zmm10, %zmm0
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm14, %zmm8
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm16, %zmm9, %zmm8
-; AVX512BW-ONLY-FAST-NEXT:    vpunpckhqdq {{.*#+}} zmm10 = zmm25[1],zmm26[1],zmm25[3],zmm26[3],zmm25[5],zmm26[5],zmm25[7],zmm26[7]
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm8, %zmm10 {%k1}
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm4, %zmm8
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm19, %zmm9, %zmm8
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm5, %zmm11
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm7, %zmm12
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm20, %zmm7, %zmm11
-; AVX512BW-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm8 = ymm8[0,1,2,3],ymm11[4,5,6,7]
-; AVX512BW-ONLY-FAST-NEXT:    vinserti64x4 $0, %ymm8, %zmm10, %zmm0
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm4, %zmm8
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm19, %zmm13, %zmm8
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm5, %zmm10
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm20, %zmm1, %zmm10
-; AVX512BW-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm8 = ymm8[0,1,2,3],ymm10[4,5,6,7]
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm25, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm25, %zmm10
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm26, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm26, %zmm13, %zmm10
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm14, %zmm11
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm14, (%rsp) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm16, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vpunpcklqdq {{.*#+}} zmm10 {%k1} = zmm14[0],zmm16[0],zmm14[2],zmm16[2],zmm14[4],zmm16[4],zmm14[6],zmm16[6]
-; AVX512BW-ONLY-FAST-NEXT:    vinserti64x4 $0, %ymm8, %zmm10, %zmm0
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 128(%r10), %zmm10
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm19, %zmm18, %zmm4
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 128(%rax), %zmm14
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm20, %zmm30, %zmm5
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm25, %zmm6
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm26, %zmm18, %zmm6
-; AVX512BW-ONLY-FAST-NEXT:    vpunpckhqdq {{.*#+}} zmm6 {%k1} = zmm11[1],zmm16[1],zmm11[3],zmm16[3],zmm11[5],zmm16[5],zmm11[7],zmm16[7]
-; AVX512BW-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm4 = ymm4[0,1,2,3],ymm5[4,5,6,7]
-; AVX512BW-ONLY-FAST-NEXT:    vinserti64x4 $0, %ymm4, %zmm6, %zmm0
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm10, %zmm4
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm14, %zmm3, %zmm4
-; AVX512BW-ONLY-FAST-NEXT:    vpunpcklqdq {{.*#+}} zmm5 = zmm23[0],zmm24[0],zmm23[2],zmm24[2],zmm23[4],zmm24[4],zmm23[6],zmm24[6]
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm4, %zmm5 {%k1}
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm2, %zmm6
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm17, %zmm3, %zmm6
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm3, %zmm11
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 128(%rdx), %zmm4
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 128(%rcx), %zmm7
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm4, %zmm8
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm7, %zmm15, %zmm8
-; AVX512BW-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm6 = ymm6[0,1,2,3],ymm8[4,5,6,7]
-; AVX512BW-ONLY-FAST-NEXT:    vinserti64x4 $0, %ymm6, %zmm5, %zmm0
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm10, %zmm5
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm14, %zmm9, %zmm5
-; AVX512BW-ONLY-FAST-NEXT:    vpunpckhqdq {{.*#+}} zmm6 = zmm23[1],zmm24[1],zmm23[3],zmm24[3],zmm23[5],zmm24[5],zmm23[7],zmm24[7]
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm5, %zmm6 {%k1}
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm2, %zmm5
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm17, %zmm9, %zmm5
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm4, %zmm8
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm7, %zmm12, %zmm8
-; AVX512BW-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm5 = ymm5[0,1,2,3],ymm8[4,5,6,7]
-; AVX512BW-ONLY-FAST-NEXT:    vinserti64x4 $0, %ymm5, %zmm6, %zmm0
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm2, %zmm5
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm17, %zmm13, %zmm5
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm4, %zmm6
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm7, %zmm1, %zmm6
-; AVX512BW-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm5 = ymm5[0,1,2,3],ymm6[4,5,6,7]
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm23, %zmm3
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm23, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm23, %zmm6
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm24, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm24, %zmm13, %zmm6
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm14, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vpunpcklqdq {{.*#+}} zmm6 {%k1} = zmm10[0],zmm14[0],zmm10[2],zmm14[2],zmm10[4],zmm14[4],zmm10[6],zmm14[6]
-; AVX512BW-ONLY-FAST-NEXT:    vinserti64x4 $0, %ymm5, %zmm6, %zmm0
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm17, %zmm18, %zmm2
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm7, %zmm30, %zmm4
-; AVX512BW-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm4[4,5,6,7]
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm24, %zmm18, %zmm3
-; AVX512BW-ONLY-FAST-NEXT:    vpunpckhqdq {{.*#+}} zmm3 {%k1} = zmm10[1],zmm14[1],zmm10[3],zmm14[3],zmm10[5],zmm14[5],zmm10[7],zmm14[7]
-; AVX512BW-ONLY-FAST-NEXT:    vinserti64x4 $0, %ymm2, %zmm3, %zmm0
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 192(%r10), %zmm10
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 192(%rax), %zmm26
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm10, %zmm2
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm26, %zmm11, %zmm2
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 192(%r8), %zmm23
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 192(%r9), %zmm29
-; AVX512BW-ONLY-FAST-NEXT:    vpunpcklqdq {{.*#+}} zmm4 = zmm23[0],zmm29[0],zmm23[2],zmm29[2],zmm23[4],zmm29[4],zmm23[6],zmm29[6]
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm2, %zmm4 {%k1}
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 192(%rdi), %zmm2
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 192(%rsi), %zmm5
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm2, %zmm6
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm5, %zmm11, %zmm6
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 192(%rdx), %zmm3
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 192(%rcx), %zmm7
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm3, %zmm8
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm7, %zmm15, %zmm8
-; AVX512BW-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm6 = ymm6[0,1,2,3],ymm8[4,5,6,7]
-; AVX512BW-ONLY-FAST-NEXT:    vinserti64x4 $0, %ymm6, %zmm4, %zmm0
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm10, %zmm4
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm26, %zmm9, %zmm4
-; AVX512BW-ONLY-FAST-NEXT:    vpunpckhqdq {{.*#+}} zmm6 = zmm23[1],zmm29[1],zmm23[3],zmm29[3],zmm23[5],zmm29[5],zmm23[7],zmm29[7]
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm4, %zmm6 {%k1}
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm2, %zmm4
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm5, %zmm9, %zmm4
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm3, %zmm8
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm7, %zmm12, %zmm8
-; AVX512BW-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm4 = ymm4[0,1,2,3],ymm8[4,5,6,7]
-; AVX512BW-ONLY-FAST-NEXT:    vinserti64x4 $0, %ymm4, %zmm6, %zmm0
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm2, %zmm4
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm5, %zmm13, %zmm4
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm3, %zmm6
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm7, %zmm1, %zmm6
-; AVX512BW-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm4 = ymm4[0,1,2,3],ymm6[4,5,6,7]
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm23, %zmm6
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm29, %zmm13, %zmm6
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vpunpcklqdq {{.*#+}} zmm6 {%k1} = zmm10[0],zmm26[0],zmm10[2],zmm26[2],zmm10[4],zmm26[4],zmm10[6],zmm26[6]
-; AVX512BW-ONLY-FAST-NEXT:    vinserti64x4 $0, %ymm4, %zmm6, %zmm0
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm5, %zmm18, %zmm2
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm7, %zmm30, %zmm3
-; AVX512BW-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm3[4,5,6,7]
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm23, %zmm3
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm29, %zmm18, %zmm3
-; AVX512BW-ONLY-FAST-NEXT:    vpunpckhqdq {{.*#+}} zmm3 {%k1} = zmm10[1],zmm26[1],zmm10[3],zmm26[3],zmm10[5],zmm26[5],zmm10[7],zmm26[7]
-; AVX512BW-ONLY-FAST-NEXT:    vinserti64x4 $0, %ymm2, %zmm3, %zmm0
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 256(%r10), %zmm10
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 256(%rax), %zmm22
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm10, %zmm2
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm22, %zmm11, %zmm2
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 256(%r8), %zmm0
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 256(%r9), %zmm24
-; AVX512BW-ONLY-FAST-NEXT:    vpunpcklqdq {{.*#+}} zmm4 = zmm0[0],zmm24[0],zmm0[2],zmm24[2],zmm0[4],zmm24[4],zmm0[6],zmm24[6]
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm0, %zmm17
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm2, %zmm4 {%k1}
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 256(%rdi), %zmm2
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 256(%rsi), %zmm5
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm2, %zmm6
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm5, %zmm11, %zmm6
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 256(%rdx), %zmm3
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 256(%rcx), %zmm7
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm3, %zmm8
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm7, %zmm15, %zmm8
-; AVX512BW-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm6 = ymm6[0,1,2,3],ymm8[4,5,6,7]
-; AVX512BW-ONLY-FAST-NEXT:    vinserti64x4 $0, %ymm6, %zmm4, %zmm0
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm10, %zmm4
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm22, %zmm9, %zmm4
-; AVX512BW-ONLY-FAST-NEXT:    vpunpckhqdq {{.*#+}} zmm6 = zmm17[1],zmm24[1],zmm17[3],zmm24[3],zmm17[5],zmm24[5],zmm17[7],zmm24[7]
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm4, %zmm6 {%k1}
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm2, %zmm4
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm5, %zmm9, %zmm4
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm3, %zmm8
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm7, %zmm12, %zmm8
-; AVX512BW-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm4 = ymm4[0,1,2,3],ymm8[4,5,6,7]
-; AVX512BW-ONLY-FAST-NEXT:    vinserti64x4 $0, %ymm4, %zmm6, %zmm0
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm2, %zmm4
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm5, %zmm13, %zmm4
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm3, %zmm6
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm7, %zmm1, %zmm6
-; AVX512BW-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm4 = ymm4[0,1,2,3],ymm6[4,5,6,7]
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm17, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm17, %zmm6
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm24, %zmm13, %zmm6
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vpunpcklqdq {{.*#+}} zmm6 {%k1} = zmm10[0],zmm22[0],zmm10[2],zmm22[2],zmm10[4],zmm22[4],zmm10[6],zmm22[6]
-; AVX512BW-ONLY-FAST-NEXT:    vinserti64x4 $0, %ymm4, %zmm6, %zmm0
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm5, %zmm18, %zmm2
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm7, %zmm30, %zmm3
-; AVX512BW-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm3[4,5,6,7]
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm17, %zmm3
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm24, %zmm18, %zmm3
-; AVX512BW-ONLY-FAST-NEXT:    vpunpckhqdq {{.*#+}} zmm3 {%k1} = zmm10[1],zmm22[1],zmm10[3],zmm22[3],zmm10[5],zmm22[5],zmm10[7],zmm22[7]
-; AVX512BW-ONLY-FAST-NEXT:    vinserti64x4 $0, %ymm2, %zmm3, %zmm0
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 320(%rdi), %zmm2
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 320(%rsi), %zmm3
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm2, %zmm5
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm3, %zmm11, %zmm5
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 320(%rdx), %zmm14
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 320(%rcx), %zmm6
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm14, %zmm7
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm6, %zmm15, %zmm7
-; AVX512BW-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm5 = ymm5[0,1,2,3],ymm7[4,5,6,7]
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm14, %zmm7
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm6, %zmm12, %zmm7
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm14, %zmm8
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm6, %zmm1, %zmm8
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm6, %zmm30, %zmm14
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 384(%rdx), %zmm0
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 384(%rcx), %zmm6
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm0, %zmm10
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm6, %zmm15, %zmm10
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm0, %zmm10
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm6, %zmm12, %zmm10
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm0, %zmm10
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm6, %zmm1, %zmm10
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm6, %zmm30, %zmm0
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 448(%rdx), %zmm0
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 448(%rcx), %zmm6
-; AVX512BW-ONLY-FAST-NEXT:    vpermi2q %zmm6, %zmm0, %zmm15
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm15, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vpermi2q %zmm6, %zmm0, %zmm12
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm12, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vpermi2q %zmm6, %zmm0, %zmm1
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm6, %zmm30, %zmm0
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 320(%r10), %zmm31
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 320(%rax), %zmm12
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm31, %zmm1
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm12, %zmm11, %zmm1
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 320(%r8), %zmm17
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 320(%r9), %zmm4
-; AVX512BW-ONLY-FAST-NEXT:    vpunpcklqdq {{.*#+}} zmm10 = zmm17[0],zmm4[0],zmm17[2],zmm4[2],zmm17[4],zmm4[4],zmm17[6],zmm4[6]
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm1, %zmm10 {%k1}
-; AVX512BW-ONLY-FAST-NEXT:    vinserti64x4 $0, %ymm5, %zmm10, %zmm0
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm31, %zmm1
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm12, %zmm9, %zmm1
-; AVX512BW-ONLY-FAST-NEXT:    vpunpckhqdq {{.*#+}} zmm5 = zmm17[1],zmm4[1],zmm17[3],zmm4[3],zmm17[5],zmm4[5],zmm17[7],zmm4[7]
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm1, %zmm5 {%k1}
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm2, %zmm1
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm3, %zmm9, %zmm1
-; AVX512BW-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm7[4,5,6,7]
-; AVX512BW-ONLY-FAST-NEXT:    vinserti64x4 $0, %ymm1, %zmm5, %zmm0
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm2, %zmm1
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm3, %zmm13, %zmm1
-; AVX512BW-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm8[4,5,6,7]
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm17, %zmm5
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm4, %zmm13, %zmm5
-; AVX512BW-ONLY-FAST-NEXT:    vpunpcklqdq {{.*#+}} zmm5 {%k1} = zmm31[0],zmm12[0],zmm31[2],zmm12[2],zmm31[4],zmm12[4],zmm31[6],zmm12[6]
-; AVX512BW-ONLY-FAST-NEXT:    vinserti64x4 $0, %ymm1, %zmm5, %zmm0
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm3, %zmm18, %zmm2
-; AVX512BW-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm1 = ymm2[0,1,2,3],ymm14[4,5,6,7]
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm17, %zmm2
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm4, %zmm18, %zmm2
-; AVX512BW-ONLY-FAST-NEXT:    vpunpckhqdq {{.*#+}} zmm2 {%k1} = zmm31[1],zmm12[1],zmm31[3],zmm12[3],zmm31[5],zmm12[5],zmm31[7],zmm12[7]
-; AVX512BW-ONLY-FAST-NEXT:    vinserti64x4 $0, %ymm1, %zmm2, %zmm0
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 384(%rdi), %zmm0
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 384(%rsi), %zmm1
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm0, %zmm3
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm11, %zmm2
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm1, %zmm11, %zmm3
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm0, %zmm3
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm1, %zmm9, %zmm3
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm0, %zmm3
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm1, %zmm13, %zmm3
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm1, %zmm18, %zmm0
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 384(%r10), %zmm11
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 384(%rax), %zmm20
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm11, %zmm0
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm20, %zmm2, %zmm0
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm11, %zmm0
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm20, %zmm9, %zmm0
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 384(%r8), %zmm30
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 384(%r9), %zmm0
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm30, %zmm1
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm0, %zmm13, %zmm1
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 448(%r10), %zmm16
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 448(%rax), %zmm8
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm16, %zmm1
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm8, %zmm2, %zmm1
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 448(%rdi), %zmm3
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 448(%rsi), %zmm1
-; AVX512BW-ONLY-FAST-NEXT:    vpermi2q %zmm1, %zmm3, %zmm2
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm16, %zmm2
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm8, %zmm9, %zmm2
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vpermi2q %zmm1, %zmm3, %zmm9
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 448(%r8), %zmm19
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 448(%r9), %zmm7
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm19, %zmm2
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm7, %zmm13, %zmm2
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vpermi2q %zmm1, %zmm3, %zmm13
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm13, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm1, %zmm18, %zmm3
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm30, %zmm1
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm0, %zmm18, %zmm1
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm1, %zmm5
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm19, %zmm1
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm7, %zmm18, %zmm1
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} zmm15 = [0,8,0,8,0,8,0,8]
-; AVX512BW-ONLY-FAST-NEXT:    # zmm15 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm21, %zmm1
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm27, %zmm15, %zmm1
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} zmm14 = [1,9,1,9,1,9,1,9]
-; AVX512BW-ONLY-FAST-NEXT:    # zmm14 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm21, %zmm1
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm27, %zmm14, %zmm1
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} zmm6 = [2,10,2,10,2,10,2,10]
-; AVX512BW-ONLY-FAST-NEXT:    # zmm6 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm21, %zmm1
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm27, %zmm6, %zmm1
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} zmm1 = [3,11,3,11,3,11,3,11]
-; AVX512BW-ONLY-FAST-NEXT:    # zmm1 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm27, %zmm1, %zmm21
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm21, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm2, %zmm3
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm28, %zmm15, %zmm3
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm2, %zmm3
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm28, %zmm14, %zmm3
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm2, %zmm25
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm28, %zmm6, %zmm25
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm28, %zmm1, %zmm2
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 (%rsp), %zmm27 # 64-byte Reload
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm27, %zmm2
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm3, %zmm15, %zmm2
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm27, %zmm2
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm3, %zmm14, %zmm2
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm27, %zmm2
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm3, %zmm6, %zmm2
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm3, %zmm1, %zmm27
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm27, (%rsp) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm9 # 64-byte Reload
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm9, %zmm3
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm2, %zmm15, %zmm3
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm9, %zmm3
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm2, %zmm14, %zmm3
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm9, %zmm18
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm2, %zmm6, %zmm18
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm2, %zmm1, %zmm9
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm2, %zmm3
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm9 # 64-byte Reload
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm9, %zmm15, %zmm3
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm2, %zmm3
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm9, %zmm14, %zmm3
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm2, %zmm3
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm9, %zmm6, %zmm3
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm9, %zmm1, %zmm2
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm9 # 64-byte Reload
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm9, %zmm3
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm2, %zmm15, %zmm3
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm9, %zmm3
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm2, %zmm14, %zmm3
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm9, %zmm13
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm2, %zmm6, %zmm13
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm2, %zmm1, %zmm9
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm2, %zmm3
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm26, %zmm15, %zmm3
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm2, %zmm3
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm26, %zmm14, %zmm3
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm2, %zmm3
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm26, %zmm6, %zmm3
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm26, %zmm1, %zmm2
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm23, %zmm3
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm29, %zmm15, %zmm3
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm23, %zmm3
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm29, %zmm14, %zmm3
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm23, %zmm10
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm29, %zmm6, %zmm10
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm29, %zmm1, %zmm23
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm23, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm23 # 64-byte Reload
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm23, %zmm2
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm22, %zmm15, %zmm2
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm23, %zmm2
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm22, %zmm14, %zmm2
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm23, %zmm2
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm22, %zmm6, %zmm2
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm22, %zmm1, %zmm23
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm2, %zmm3
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm24, %zmm15, %zmm3
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm2, %zmm3
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm24, %zmm14, %zmm3
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm2, %zmm26
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm24, %zmm6, %zmm26
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm24, %zmm1, %zmm2
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm31, %zmm2
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm12, %zmm15, %zmm2
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm31, %zmm2
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm12, %zmm14, %zmm2
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm31, %zmm2
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm12, %zmm6, %zmm2
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm12, %zmm1, %zmm31
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm17, %zmm29
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm4, %zmm15, %zmm29
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm17, %zmm2
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm4, %zmm14, %zmm2
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm17, %zmm24
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm4, %zmm6, %zmm24
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm4, %zmm1, %zmm17
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm11, %zmm2
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm20, %zmm15, %zmm2
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm11, %zmm2
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm20, %zmm14, %zmm2
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm9 # 64-byte Reload
-; AVX512BW-ONLY-FAST-NEXT:    vpunpcklqdq {{.*#+}} zmm9 {%k1} = zmm11[0],zmm20[0],zmm11[2],zmm20[2],zmm11[4],zmm20[4],zmm11[6],zmm20[6]
-; AVX512BW-ONLY-FAST-NEXT:    vpunpckhqdq {{.*#+}} zmm5 {%k1} = zmm11[1],zmm20[1],zmm11[3],zmm20[3],zmm11[5],zmm20[5],zmm11[7],zmm20[7]
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm5, %zmm27
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm11, %zmm22
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm20, %zmm6, %zmm22
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm20, %zmm1, %zmm11
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm30, %zmm21
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm0, %zmm15, %zmm21
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm30, %zmm28
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm0, %zmm14, %zmm28
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm30, %zmm20
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm0, %zmm6, %zmm20
-; AVX512BW-ONLY-FAST-NEXT:    vpunpcklqdq {{.*#+}} zmm2 = zmm30[0],zmm0[0],zmm30[2],zmm0[2],zmm30[4],zmm0[4],zmm30[6],zmm0[6]
-; AVX512BW-ONLY-FAST-NEXT:    vpunpckhqdq {{.*#+}} zmm3 = zmm30[1],zmm0[1],zmm30[3],zmm0[3],zmm30[5],zmm0[5],zmm30[7],zmm0[7]
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm0, %zmm1, %zmm30
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm16, %zmm0
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm8, %zmm15, %zmm0
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm16, %zmm0
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm8, %zmm14, %zmm0
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
-; AVX512BW-ONLY-FAST-NEXT:    vpunpcklqdq {{.*#+}} zmm4 {%k1} = zmm16[0],zmm8[0],zmm16[2],zmm8[2],zmm16[4],zmm8[4],zmm16[6],zmm8[6]
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
-; AVX512BW-ONLY-FAST-NEXT:    vpunpckhqdq {{.*#+}} zmm5 {%k1} = zmm16[1],zmm8[1],zmm16[3],zmm8[3],zmm16[5],zmm8[5],zmm16[7],zmm8[7]
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm16, %zmm12
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm8, %zmm6, %zmm12
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm8, %zmm1, %zmm16
-; AVX512BW-ONLY-FAST-NEXT:    vpermi2q %zmm7, %zmm19, %zmm15
-; AVX512BW-ONLY-FAST-NEXT:    vpermi2q %zmm7, %zmm19, %zmm14
-; AVX512BW-ONLY-FAST-NEXT:    vpermi2q %zmm7, %zmm19, %zmm6
-; AVX512BW-ONLY-FAST-NEXT:    vpunpcklqdq {{.*#+}} zmm8 = zmm19[0],zmm7[0],zmm19[2],zmm7[2],zmm19[4],zmm7[4],zmm19[6],zmm7[6]
-; AVX512BW-ONLY-FAST-NEXT:    vpunpckhqdq {{.*#+}} zmm0 = zmm19[1],zmm7[1],zmm19[3],zmm7[3],zmm19[5],zmm7[5],zmm19[7],zmm7[7]
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2q %zmm7, %zmm1, %zmm19
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512BW-ONLY-FAST-NEXT:    vpblendd $240, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm1 # 32-byte Folded Reload
-; AVX512BW-ONLY-FAST-NEXT:    # ymm1 = ymm1[0,1,2,3],mem[4,5,6,7]
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm7 # 64-byte Reload
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm7, %zmm2 {%k1}
-; AVX512BW-ONLY-FAST-NEXT:    vinserti64x4 $0, %ymm1, %zmm2, %zmm1
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512BW-ONLY-FAST-NEXT:    vpblendd $240, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm1 # 32-byte Folded Reload
-; AVX512BW-ONLY-FAST-NEXT:    # ymm1 = ymm1[0,1,2,3],mem[4,5,6,7]
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm2, %zmm3 {%k1}
-; AVX512BW-ONLY-FAST-NEXT:    vinserti64x4 $0, %ymm1, %zmm3, %zmm1
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512BW-ONLY-FAST-NEXT:    vpblendd $240, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm1 # 32-byte Folded Reload
-; AVX512BW-ONLY-FAST-NEXT:    # ymm1 = ymm1[0,1,2,3],mem[4,5,6,7]
-; AVX512BW-ONLY-FAST-NEXT:    vinserti64x4 $0, %ymm1, %zmm9, %zmm1
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512BW-ONLY-FAST-NEXT:    vpblendd $240, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm1 # 32-byte Folded Reload
-; AVX512BW-ONLY-FAST-NEXT:    # ymm1 = ymm1[0,1,2,3],mem[4,5,6,7]
-; AVX512BW-ONLY-FAST-NEXT:    vinserti64x4 $0, %ymm1, %zmm27, %zmm1
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512BW-ONLY-FAST-NEXT:    vpblendd $240, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm1 # 32-byte Folded Reload
-; AVX512BW-ONLY-FAST-NEXT:    # ymm1 = ymm1[0,1,2,3],mem[4,5,6,7]
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm2, %zmm8 {%k1}
-; AVX512BW-ONLY-FAST-NEXT:    vinserti64x4 $0, %ymm1, %zmm8, %zmm1
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512BW-ONLY-FAST-NEXT:    vpblendd $240, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm1 # 32-byte Folded Reload
-; AVX512BW-ONLY-FAST-NEXT:    # ymm1 = ymm1[0,1,2,3],mem[4,5,6,7]
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm2, %zmm0 {%k1}
-; AVX512BW-ONLY-FAST-NEXT:    vinserti64x4 $0, %ymm1, %zmm0, %zmm0
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512BW-ONLY-FAST-NEXT:    vpblendd $240, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
-; AVX512BW-ONLY-FAST-NEXT:    # ymm0 = ymm0[0,1,2,3],mem[4,5,6,7]
-; AVX512BW-ONLY-FAST-NEXT:    vinserti64x4 $0, %ymm0, %zmm4, %zmm0
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512BW-ONLY-FAST-NEXT:    vpblendd $240, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
-; AVX512BW-ONLY-FAST-NEXT:    # ymm0 = ymm0[0,1,2,3],mem[4,5,6,7]
-; AVX512BW-ONLY-FAST-NEXT:    vinserti64x4 $0, %ymm0, %zmm5, %zmm0
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm0, %zmm25 {%k1}
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa (%rcx), %ymm0
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa (%rdx), %ymm1
-; AVX512BW-ONLY-FAST-NEXT:    vpunpcklqdq {{.*#+}} ymm2 = ymm1[0],ymm0[0],ymm1[2],ymm0[2]
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa (%rsi), %ymm3
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa (%rdi), %ymm7
-; AVX512BW-ONLY-FAST-NEXT:    vpunpcklqdq {{.*#+}} ymm8 = ymm7[0],ymm3[0],ymm7[2],ymm3[2]
-; AVX512BW-ONLY-FAST-NEXT:    vperm2i128 {{.*#+}} ymm2 = ymm8[2,3],ymm2[2,3]
-; AVX512BW-ONLY-FAST-NEXT:    vinserti64x4 $0, %ymm2, %zmm25, %zmm2
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm2, %zmm4 {%k1}
-; AVX512BW-ONLY-FAST-NEXT:    vpunpckhqdq {{.*#+}} ymm0 = ymm1[1],ymm0[1],ymm1[3],ymm0[3]
-; AVX512BW-ONLY-FAST-NEXT:    vpunpckhqdq {{.*#+}} ymm1 = ymm7[1],ymm3[1],ymm7[3],ymm3[3]
-; AVX512BW-ONLY-FAST-NEXT:    vperm2i128 {{.*#+}} ymm0 = ymm1[2,3],ymm0[2,3]
-; AVX512BW-ONLY-FAST-NEXT:    vinserti64x4 $0, %ymm0, %zmm4, %zmm0
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm0, %zmm18 {%k1}
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa 64(%rcx), %ymm0
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa 64(%rdx), %ymm1
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa 64(%rsi), %ymm2
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa 64(%rdi), %ymm3
-; AVX512BW-ONLY-FAST-NEXT:    vpunpcklqdq {{.*#+}} ymm7 = ymm1[0],ymm0[0],ymm1[2],ymm0[2]
-; AVX512BW-ONLY-FAST-NEXT:    vpunpcklqdq {{.*#+}} ymm8 = ymm3[0],ymm2[0],ymm3[2],ymm2[2]
-; AVX512BW-ONLY-FAST-NEXT:    vperm2i128 {{.*#+}} ymm7 = ymm8[2,3],ymm7[2,3]
-; AVX512BW-ONLY-FAST-NEXT:    vinserti64x4 $0, %ymm7, %zmm18, %zmm4
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 (%rsp), %zmm4 # 64-byte Reload
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm4, %zmm5 {%k1}
-; AVX512BW-ONLY-FAST-NEXT:    vpunpckhqdq {{.*#+}} ymm0 = ymm1[1],ymm0[1],ymm1[3],ymm0[3]
-; AVX512BW-ONLY-FAST-NEXT:    vpunpckhqdq {{.*#+}} ymm1 = ymm3[1],ymm2[1],ymm3[3],ymm2[3]
-; AVX512BW-ONLY-FAST-NEXT:    vperm2i128 {{.*#+}} ymm0 = ymm1[2,3],ymm0[2,3]
-; AVX512BW-ONLY-FAST-NEXT:    vinserti64x4 $0, %ymm0, %zmm5, %zmm0
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm0, %zmm13 {%k1}
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa 128(%rcx), %ymm0
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa 128(%rdx), %ymm1
-; AVX512BW-ONLY-FAST-NEXT:    vpunpcklqdq {{.*#+}} ymm2 = ymm1[0],ymm0[0],ymm1[2],ymm0[2]
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa 128(%rsi), %ymm3
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa 128(%rdi), %ymm7
-; AVX512BW-ONLY-FAST-NEXT:    vpunpcklqdq {{.*#+}} ymm8 = ymm7[0],ymm3[0],ymm7[2],ymm3[2]
-; AVX512BW-ONLY-FAST-NEXT:    vperm2i128 {{.*#+}} ymm2 = ymm8[2,3],ymm2[2,3]
-; AVX512BW-ONLY-FAST-NEXT:    vinserti64x4 $0, %ymm2, %zmm13, %zmm2
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm2, (%rsp) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm2, %zmm4 {%k1}
-; AVX512BW-ONLY-FAST-NEXT:    vpunpckhqdq {{.*#+}} ymm0 = ymm1[1],ymm0[1],ymm1[3],ymm0[3]
-; AVX512BW-ONLY-FAST-NEXT:    vpunpckhqdq {{.*#+}} ymm1 = ymm7[1],ymm3[1],ymm7[3],ymm3[3]
-; AVX512BW-ONLY-FAST-NEXT:    vperm2i128 {{.*#+}} ymm0 = ymm1[2,3],ymm0[2,3]
-; AVX512BW-ONLY-FAST-NEXT:    vinserti64x4 $0, %ymm0, %zmm4, %zmm0
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm0, %zmm10 {%k1}
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa 192(%rcx), %ymm0
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa 192(%rdx), %ymm1
-; AVX512BW-ONLY-FAST-NEXT:    vpunpcklqdq {{.*#+}} ymm2 = ymm1[0],ymm0[0],ymm1[2],ymm0[2]
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa 192(%rsi), %ymm3
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa 192(%rdi), %ymm7
-; AVX512BW-ONLY-FAST-NEXT:    vpunpcklqdq {{.*#+}} ymm8 = ymm7[0],ymm3[0],ymm7[2],ymm3[2]
-; AVX512BW-ONLY-FAST-NEXT:    vperm2i128 {{.*#+}} ymm2 = ymm8[2,3],ymm2[2,3]
-; AVX512BW-ONLY-FAST-NEXT:    vinserti64x4 $0, %ymm2, %zmm10, %zmm2
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm2, %zmm4 {%k1}
-; AVX512BW-ONLY-FAST-NEXT:    vpunpckhqdq {{.*#+}} ymm0 = ymm1[1],ymm0[1],ymm1[3],ymm0[3]
-; AVX512BW-ONLY-FAST-NEXT:    vpunpckhqdq {{.*#+}} ymm1 = ymm7[1],ymm3[1],ymm7[3],ymm3[3]
-; AVX512BW-ONLY-FAST-NEXT:    vperm2i128 {{.*#+}} ymm0 = ymm1[2,3],ymm0[2,3]
-; AVX512BW-ONLY-FAST-NEXT:    vinserti64x4 $0, %ymm0, %zmm4, %zmm9
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm0, %zmm26 {%k1}
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa 256(%rcx), %ymm0
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa 256(%rdx), %ymm1
-; AVX512BW-ONLY-FAST-NEXT:    vpunpcklqdq {{.*#+}} ymm2 = ymm1[0],ymm0[0],ymm1[2],ymm0[2]
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa 256(%rsi), %ymm3
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa 256(%rdi), %ymm7
-; AVX512BW-ONLY-FAST-NEXT:    vpunpcklqdq {{.*#+}} ymm8 = ymm7[0],ymm3[0],ymm7[2],ymm3[2]
-; AVX512BW-ONLY-FAST-NEXT:    vperm2i128 {{.*#+}} ymm2 = ymm8[2,3],ymm2[2,3]
-; AVX512BW-ONLY-FAST-NEXT:    vinserti64x4 $0, %ymm2, %zmm26, %zmm26
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm23, %zmm2 {%k1}
-; AVX512BW-ONLY-FAST-NEXT:    vpunpckhqdq {{.*#+}} ymm0 = ymm1[1],ymm0[1],ymm1[3],ymm0[3]
-; AVX512BW-ONLY-FAST-NEXT:    vpunpckhqdq {{.*#+}} ymm1 = ymm7[1],ymm3[1],ymm7[3],ymm3[3]
-; AVX512BW-ONLY-FAST-NEXT:    vperm2i128 {{.*#+}} ymm0 = ymm1[2,3],ymm0[2,3]
-; AVX512BW-ONLY-FAST-NEXT:    vinserti64x4 $0, %ymm0, %zmm2, %zmm7
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm0, %zmm24 {%k1}
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa 320(%rcx), %ymm0
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa 320(%rdx), %ymm1
-; AVX512BW-ONLY-FAST-NEXT:    vpunpcklqdq {{.*#+}} ymm2 = ymm1[0],ymm0[0],ymm1[2],ymm0[2]
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa 320(%rsi), %ymm3
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 320(%rdi), %ymm23
-; AVX512BW-ONLY-FAST-NEXT:    vpunpcklqdq {{.*#+}} ymm8 = ymm23[0],ymm3[0],ymm23[2],ymm3[2]
-; AVX512BW-ONLY-FAST-NEXT:    vperm2i128 {{.*#+}} ymm2 = ymm8[2,3],ymm2[2,3]
-; AVX512BW-ONLY-FAST-NEXT:    vinserti64x4 $0, %ymm2, %zmm24, %zmm8
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm31, %zmm17 {%k1}
-; AVX512BW-ONLY-FAST-NEXT:    vpunpckhqdq {{.*#+}} ymm0 = ymm1[1],ymm0[1],ymm1[3],ymm0[3]
-; AVX512BW-ONLY-FAST-NEXT:    vpunpckhqdq {{.*#+}} ymm1 = ymm23[1],ymm3[1],ymm23[3],ymm3[3]
-; AVX512BW-ONLY-FAST-NEXT:    vperm2i128 {{.*#+}} ymm0 = ymm1[2,3],ymm0[2,3]
-; AVX512BW-ONLY-FAST-NEXT:    vinserti64x4 $0, %ymm0, %zmm17, %zmm17
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm22, %zmm20 {%k1}
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa 384(%rcx), %ymm0
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa 384(%rdx), %ymm1
-; AVX512BW-ONLY-FAST-NEXT:    vpunpcklqdq {{.*#+}} ymm2 = ymm1[0],ymm0[0],ymm1[2],ymm0[2]
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa 384(%rsi), %ymm3
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 384(%rdi), %ymm18
-; AVX512BW-ONLY-FAST-NEXT:    vpunpcklqdq {{.*#+}} ymm13 = ymm18[0],ymm3[0],ymm18[2],ymm3[2]
-; AVX512BW-ONLY-FAST-NEXT:    vperm2i128 {{.*#+}} ymm2 = ymm13[2,3],ymm2[2,3]
-; AVX512BW-ONLY-FAST-NEXT:    vinserti64x4 $0, %ymm2, %zmm20, %zmm20
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm11, %zmm30 {%k1}
-; AVX512BW-ONLY-FAST-NEXT:    vpunpckhqdq {{.*#+}} ymm0 = ymm1[1],ymm0[1],ymm1[3],ymm0[3]
-; AVX512BW-ONLY-FAST-NEXT:    vpunpckhqdq {{.*#+}} ymm1 = ymm18[1],ymm3[1],ymm18[3],ymm3[3]
-; AVX512BW-ONLY-FAST-NEXT:    vperm2i128 {{.*#+}} ymm0 = ymm1[2,3],ymm0[2,3]
-; AVX512BW-ONLY-FAST-NEXT:    vinserti64x4 $0, %ymm0, %zmm30, %zmm24
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm12, %zmm6 {%k1}
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa 448(%rcx), %ymm0
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa 448(%rdx), %ymm1
-; AVX512BW-ONLY-FAST-NEXT:    vpunpcklqdq {{.*#+}} ymm2 = ymm1[0],ymm0[0],ymm1[2],ymm0[2]
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa 448(%rsi), %ymm3
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa 448(%rdi), %ymm10
-; AVX512BW-ONLY-FAST-NEXT:    vpunpcklqdq {{.*#+}} ymm12 = ymm10[0],ymm3[0],ymm10[2],ymm3[2]
-; AVX512BW-ONLY-FAST-NEXT:    vperm2i128 {{.*#+}} ymm2 = ymm12[2,3],ymm2[2,3]
-; AVX512BW-ONLY-FAST-NEXT:    vinserti64x4 $0, %ymm2, %zmm6, %zmm31
-; AVX512BW-ONLY-FAST-NEXT:    vpunpckhqdq {{.*#+}} ymm0 = ymm1[1],ymm0[1],ymm1[3],ymm0[3]
-; AVX512BW-ONLY-FAST-NEXT:    vpunpckhqdq {{.*#+}} ymm1 = ymm10[1],ymm3[1],ymm10[3],ymm3[3]
-; AVX512BW-ONLY-FAST-NEXT:    vperm2i128 {{.*#+}} ymm0 = ymm1[2,3],ymm0[2,3]
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm16, %zmm19 {%k1}
-; AVX512BW-ONLY-FAST-NEXT:    vinserti64x4 $0, %ymm0, %zmm19, %zmm0
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm1, %zmm4 {%k1}
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa (%rsi), %xmm1
-; AVX512BW-ONLY-FAST-NEXT:    vinserti128 $1, (%rcx), %ymm1, %ymm1
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa (%rdi), %xmm2
-; AVX512BW-ONLY-FAST-NEXT:    vinserti128 $1, (%rdx), %ymm2, %ymm2
-; AVX512BW-ONLY-FAST-NEXT:    vpunpcklqdq {{.*#+}} ymm3 = ymm2[0],ymm1[0],ymm2[2],ymm1[2]
-; AVX512BW-ONLY-FAST-NEXT:    vinserti64x4 $0, %ymm3, %zmm4, %zmm3
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm10 # 64-byte Reload
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm4, %zmm10 {%k1}
-; AVX512BW-ONLY-FAST-NEXT:    vpunpckhqdq {{.*#+}} ymm1 = ymm2[1],ymm1[1],ymm2[3],ymm1[3]
-; AVX512BW-ONLY-FAST-NEXT:    vinserti64x4 $0, %ymm1, %zmm10, %zmm2
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm11 # 64-byte Reload
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm1, %zmm11 {%k1}
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa 64(%rsi), %xmm1
-; AVX512BW-ONLY-FAST-NEXT:    vinserti128 $1, 64(%rcx), %ymm1, %ymm1
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa 64(%rdi), %xmm4
-; AVX512BW-ONLY-FAST-NEXT:    vinserti128 $1, 64(%rdx), %ymm4, %ymm4
-; AVX512BW-ONLY-FAST-NEXT:    vpunpcklqdq {{.*#+}} ymm10 = ymm4[0],ymm1[0],ymm4[2],ymm1[2]
-; AVX512BW-ONLY-FAST-NEXT:    vinserti64x4 $0, %ymm10, %zmm11, %zmm10
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm11 # 64-byte Reload
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm12 # 64-byte Reload
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm11, %zmm12 {%k1}
-; AVX512BW-ONLY-FAST-NEXT:    vpunpckhqdq {{.*#+}} ymm1 = ymm4[1],ymm1[1],ymm4[3],ymm1[3]
-; AVX512BW-ONLY-FAST-NEXT:    vinserti64x4 $0, %ymm1, %zmm12, %zmm11
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm13 # 64-byte Reload
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm1, %zmm13 {%k1}
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa 128(%rsi), %xmm1
-; AVX512BW-ONLY-FAST-NEXT:    vinserti128 $1, 128(%rcx), %ymm1, %ymm1
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa 128(%rdi), %xmm4
-; AVX512BW-ONLY-FAST-NEXT:    vinserti128 $1, 128(%rdx), %ymm4, %ymm12
-; AVX512BW-ONLY-FAST-NEXT:    vpunpcklqdq {{.*#+}} ymm4 = ymm12[0],ymm1[0],ymm12[2],ymm1[2]
-; AVX512BW-ONLY-FAST-NEXT:    vinserti64x4 $0, %ymm4, %zmm13, %zmm4
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm13 # 64-byte Reload
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm18 # 64-byte Reload
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm13, %zmm18 {%k1}
-; AVX512BW-ONLY-FAST-NEXT:    vpunpckhqdq {{.*#+}} ymm1 = ymm12[1],ymm1[1],ymm12[3],ymm1[3]
-; AVX512BW-ONLY-FAST-NEXT:    vinserti64x4 $0, %ymm1, %zmm18, %zmm19
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm18 # 64-byte Reload
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm1, %zmm18 {%k1}
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa 192(%rsi), %xmm1
-; AVX512BW-ONLY-FAST-NEXT:    vinserti128 $1, 192(%rcx), %ymm1, %ymm1
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa 192(%rdi), %xmm12
-; AVX512BW-ONLY-FAST-NEXT:    vinserti128 $1, 192(%rdx), %ymm12, %ymm12
-; AVX512BW-ONLY-FAST-NEXT:    vpunpcklqdq {{.*#+}} ymm13 = ymm12[0],ymm1[0],ymm12[2],ymm1[2]
-; AVX512BW-ONLY-FAST-NEXT:    vinserti64x4 $0, %ymm13, %zmm18, %zmm30
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm13 # 64-byte Reload
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm18 # 64-byte Reload
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm13, %zmm18 {%k1}
-; AVX512BW-ONLY-FAST-NEXT:    vpunpckhqdq {{.*#+}} ymm1 = ymm12[1],ymm1[1],ymm12[3],ymm1[3]
-; AVX512BW-ONLY-FAST-NEXT:    vinserti64x4 $0, %ymm1, %zmm18, %zmm1
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm12 # 64-byte Reload
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm12, %zmm5 {%k1}
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa 256(%rsi), %xmm12
-; AVX512BW-ONLY-FAST-NEXT:    vinserti128 $1, 256(%rcx), %ymm12, %ymm13
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa 256(%rdi), %xmm12
-; AVX512BW-ONLY-FAST-NEXT:    vinserti32x4 $1, 256(%rdx), %ymm12, %ymm18
-; AVX512BW-ONLY-FAST-NEXT:    vpunpcklqdq {{.*#+}} ymm12 = ymm18[0],ymm13[0],ymm18[2],ymm13[2]
-; AVX512BW-ONLY-FAST-NEXT:    vinserti64x4 $0, %ymm12, %zmm5, %zmm12
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm23 # 64-byte Reload
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm23, %zmm5 {%k1}
-; AVX512BW-ONLY-FAST-NEXT:    vpunpckhqdq {{.*#+}} ymm13 = ymm18[1],ymm13[1],ymm18[3],ymm13[3]
-; AVX512BW-ONLY-FAST-NEXT:    vinserti64x4 $0, %ymm13, %zmm5, %zmm23
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm5, %zmm29 {%k1}
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa 320(%rsi), %xmm13
-; AVX512BW-ONLY-FAST-NEXT:    vinserti128 $1, 320(%rcx), %ymm13, %ymm13
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 320(%rdi), %xmm18
-; AVX512BW-ONLY-FAST-NEXT:    vinserti32x4 $1, 320(%rdx), %ymm18, %ymm18
-; AVX512BW-ONLY-FAST-NEXT:    vpunpcklqdq {{.*#+}} ymm27 = ymm18[0],ymm13[0],ymm18[2],ymm13[2]
-; AVX512BW-ONLY-FAST-NEXT:    vinserti64x4 $0, %ymm27, %zmm29, %zmm22
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm6 # 64-byte Reload
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm5, %zmm6 {%k1}
-; AVX512BW-ONLY-FAST-NEXT:    vpunpckhqdq {{.*#+}} ymm13 = ymm18[1],ymm13[1],ymm18[3],ymm13[3]
-; AVX512BW-ONLY-FAST-NEXT:    vinserti64x4 $0, %ymm13, %zmm6, %zmm13
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm5, %zmm21 {%k1}
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 384(%rsi), %xmm18
-; AVX512BW-ONLY-FAST-NEXT:    vinserti32x4 $1, 384(%rcx), %ymm18, %ymm18
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 384(%rdi), %xmm25
-; AVX512BW-ONLY-FAST-NEXT:    vinserti32x4 $1, 384(%rdx), %ymm25, %ymm25
-; AVX512BW-ONLY-FAST-NEXT:    vpunpcklqdq {{.*#+}} ymm27 = ymm25[0],ymm18[0],ymm25[2],ymm18[2]
-; AVX512BW-ONLY-FAST-NEXT:    vinserti64x4 $0, %ymm27, %zmm21, %zmm16
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm5, %zmm28 {%k1}
-; AVX512BW-ONLY-FAST-NEXT:    vpunpckhqdq {{.*#+}} ymm18 = ymm25[1],ymm18[1],ymm25[3],ymm18[3]
-; AVX512BW-ONLY-FAST-NEXT:    vinserti64x4 $0, %ymm18, %zmm28, %zmm21
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm5, %zmm15 {%k1}
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 448(%rsi), %xmm18
-; AVX512BW-ONLY-FAST-NEXT:    vinserti32x4 $1, 448(%rcx), %ymm18, %ymm18
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 448(%rdi), %xmm25
-; AVX512BW-ONLY-FAST-NEXT:    vinserti32x4 $1, 448(%rdx), %ymm25, %ymm25
-; AVX512BW-ONLY-FAST-NEXT:    vpunpcklqdq {{.*#+}} ymm27 = ymm25[0],ymm18[0],ymm25[2],ymm18[2]
-; AVX512BW-ONLY-FAST-NEXT:    vinserti64x4 $0, %ymm27, %zmm15, %zmm6
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm5, %zmm14 {%k1}
-; AVX512BW-ONLY-FAST-NEXT:    vpunpckhqdq {{.*#+}} ymm18 = ymm25[1],ymm18[1],ymm25[3],ymm18[3]
-; AVX512BW-ONLY-FAST-NEXT:    vinserti64x4 $0, %ymm18, %zmm14, %zmm5
-; AVX512BW-ONLY-FAST-NEXT:    movq {{[0-9]+}}(%rsp), %rax
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm0, 3776(%rax)
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm31, 3712(%rax)
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm24, 3264(%rax)
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm20, 3200(%rax)
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm17, 2752(%rax)
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm8, 2688(%rax)
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm7, 2240(%rax)
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm26, 2176(%rax)
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm9, 1728(%rax)
-; AVX512BW-ONLY-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512BW-ONLY-FAST-NEXT:    vmovaps %zmm0, 1664(%rax)
-; AVX512BW-ONLY-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512BW-ONLY-FAST-NEXT:    vmovaps %zmm0, 1216(%rax)
-; AVX512BW-ONLY-FAST-NEXT:    vmovups (%rsp), %zmm0 # 64-byte Reload
-; AVX512BW-ONLY-FAST-NEXT:    vmovaps %zmm0, 1152(%rax)
-; AVX512BW-ONLY-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512BW-ONLY-FAST-NEXT:    vmovaps %zmm0, 704(%rax)
-; AVX512BW-ONLY-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512BW-ONLY-FAST-NEXT:    vmovaps %zmm0, 640(%rax)
-; AVX512BW-ONLY-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512BW-ONLY-FAST-NEXT:    vmovaps %zmm0, 192(%rax)
-; AVX512BW-ONLY-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512BW-ONLY-FAST-NEXT:    vmovaps %zmm0, 128(%rax)
-; AVX512BW-ONLY-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512BW-ONLY-FAST-NEXT:    vmovaps %zmm0, 4032(%rax)
-; AVX512BW-ONLY-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512BW-ONLY-FAST-NEXT:    vmovaps %zmm0, 3968(%rax)
-; AVX512BW-ONLY-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512BW-ONLY-FAST-NEXT:    vmovaps %zmm0, 3904(%rax)
-; AVX512BW-ONLY-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512BW-ONLY-FAST-NEXT:    vmovaps %zmm0, 3840(%rax)
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm5, 3648(%rax)
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm6, 3584(%rax)
-; AVX512BW-ONLY-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512BW-ONLY-FAST-NEXT:    vmovaps %zmm0, 3520(%rax)
-; AVX512BW-ONLY-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512BW-ONLY-FAST-NEXT:    vmovaps %zmm0, 3456(%rax)
-; AVX512BW-ONLY-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512BW-ONLY-FAST-NEXT:    vmovaps %zmm0, 3392(%rax)
-; AVX512BW-ONLY-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512BW-ONLY-FAST-NEXT:    vmovaps %zmm0, 3328(%rax)
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm21, 3136(%rax)
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm16, 3072(%rax)
-; AVX512BW-ONLY-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512BW-ONLY-FAST-NEXT:    vmovaps %zmm0, 3008(%rax)
-; AVX512BW-ONLY-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512BW-ONLY-FAST-NEXT:    vmovaps %zmm0, 2944(%rax)
-; AVX512BW-ONLY-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512BW-ONLY-FAST-NEXT:    vmovaps %zmm0, 2880(%rax)
-; AVX512BW-ONLY-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512BW-ONLY-FAST-NEXT:    vmovaps %zmm0, 2816(%rax)
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm13, 2624(%rax)
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm22, 2560(%rax)
-; AVX512BW-ONLY-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512BW-ONLY-FAST-NEXT:    vmovaps %zmm0, 2496(%rax)
-; AVX512BW-ONLY-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512BW-ONLY-FAST-NEXT:    vmovaps %zmm0, 2432(%rax)
-; AVX512BW-ONLY-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512BW-ONLY-FAST-NEXT:    vmovaps %zmm0, 2368(%rax)
-; AVX512BW-ONLY-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512BW-ONLY-FAST-NEXT:    vmovaps %zmm0, 2304(%rax)
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm23, 2112(%rax)
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm12, 2048(%rax)
-; AVX512BW-ONLY-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512BW-ONLY-FAST-NEXT:    vmovaps %zmm0, 1984(%rax)
-; AVX512BW-ONLY-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512BW-ONLY-FAST-NEXT:    vmovaps %zmm0, 1920(%rax)
-; AVX512BW-ONLY-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512BW-ONLY-FAST-NEXT:    vmovaps %zmm0, 1856(%rax)
-; AVX512BW-ONLY-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512BW-ONLY-FAST-NEXT:    vmovaps %zmm0, 1792(%rax)
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm1, 1600(%rax)
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm30, 1536(%rax)
-; AVX512BW-ONLY-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512BW-ONLY-FAST-NEXT:    vmovaps %zmm0, 1472(%rax)
-; AVX512BW-ONLY-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512BW-ONLY-FAST-NEXT:    vmovaps %zmm0, 1408(%rax)
-; AVX512BW-ONLY-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512BW-ONLY-FAST-NEXT:    vmovaps %zmm0, 1344(%rax)
-; AVX512BW-ONLY-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512BW-ONLY-FAST-NEXT:    vmovaps %zmm0, 1280(%rax)
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm19, 1088(%rax)
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm4, 1024(%rax)
-; AVX512BW-ONLY-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512BW-ONLY-FAST-NEXT:    vmovaps %zmm0, 960(%rax)
-; AVX512BW-ONLY-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512BW-ONLY-FAST-NEXT:    vmovaps %zmm0, 896(%rax)
-; AVX512BW-ONLY-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512BW-ONLY-FAST-NEXT:    vmovaps %zmm0, 832(%rax)
-; AVX512BW-ONLY-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512BW-ONLY-FAST-NEXT:    vmovaps %zmm0, 768(%rax)
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm11, 576(%rax)
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm10, 512(%rax)
-; AVX512BW-ONLY-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512BW-ONLY-FAST-NEXT:    vmovaps %zmm0, 448(%rax)
-; AVX512BW-ONLY-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512BW-ONLY-FAST-NEXT:    vmovaps %zmm0, 384(%rax)
-; AVX512BW-ONLY-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512BW-ONLY-FAST-NEXT:    vmovaps %zmm0, 320(%rax)
-; AVX512BW-ONLY-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512BW-ONLY-FAST-NEXT:    vmovaps %zmm0, 256(%rax)
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm2, 64(%rax)
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm3, (%rax)
-; AVX512BW-ONLY-FAST-NEXT:    addq $5512, %rsp # imm = 0x1588
-; AVX512BW-ONLY-FAST-NEXT:    vzeroupper
-; AVX512BW-ONLY-FAST-NEXT:    retq
-;
-; AVX512DQBW-SLOW-LABEL: store_i64_stride8_vf64:
-; AVX512DQBW-SLOW:       # %bb.0:
-; AVX512DQBW-SLOW-NEXT:    subq $5512, %rsp # imm = 0x1588
-; AVX512DQBW-SLOW-NEXT:    movq {{[0-9]+}}(%rsp), %rax
-; AVX512DQBW-SLOW-NEXT:    movq {{[0-9]+}}(%rsp), %r10
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 128(%rdi), %zmm2
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 64(%rdi), %zmm4
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 (%rdi), %zmm8
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 128(%rsi), %zmm17
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 64(%rsi), %zmm19
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 (%rsi), %zmm22
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 64(%rdx), %zmm5
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 (%rdx), %zmm10
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 64(%rcx), %zmm20
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 (%rcx), %zmm11
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 (%r8), %zmm1
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 64(%r8), %zmm25
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 128(%r8), %zmm23
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 (%r9), %zmm28
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 64(%r9), %zmm26
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 128(%r9), %zmm24
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 (%r10), %zmm21
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 64(%r10), %zmm14
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 (%rax), %zmm27
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 64(%rax), %zmm16
-; AVX512DQBW-SLOW-NEXT:    movb $-64, %r11b
-; AVX512DQBW-SLOW-NEXT:    kmovd %r11d, %k1
-; AVX512DQBW-SLOW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm3 = [4,12,4,12,4,12,4,12]
-; AVX512DQBW-SLOW-NEXT:    # zmm3 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm21, %zmm0
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm27, %zmm3, %zmm0
-; AVX512DQBW-SLOW-NEXT:    vpunpcklqdq {{.*#+}} zmm12 = zmm1[0],zmm28[0],zmm1[2],zmm28[2],zmm1[4],zmm28[4],zmm1[6],zmm28[6]
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm0, %zmm12 {%k1}
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm8, %zmm0
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm22, %zmm3, %zmm0
-; AVX512DQBW-SLOW-NEXT:    vbroadcasti128 {{.*#+}} ymm6 = [4,12,4,12]
-; AVX512DQBW-SLOW-NEXT:    # ymm6 = mem[0,1,0,1]
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm10, %zmm15
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm11, %zmm6, %zmm15
-; AVX512DQBW-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm15[4,5,6,7]
-; AVX512DQBW-SLOW-NEXT:    vinserti64x4 $0, %ymm0, %zmm12, %zmm0
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm9 = [5,13,5,13,5,13,5,13]
-; AVX512DQBW-SLOW-NEXT:    # zmm9 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm21, %zmm0
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm27, %zmm9, %zmm0
-; AVX512DQBW-SLOW-NEXT:    vpunpckhqdq {{.*#+}} zmm12 = zmm1[1],zmm28[1],zmm1[3],zmm28[3],zmm1[5],zmm28[5],zmm1[7],zmm28[7]
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm0, %zmm12 {%k1}
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm8, %zmm0
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm22, %zmm9, %zmm0
-; AVX512DQBW-SLOW-NEXT:    vbroadcasti128 {{.*#+}} ymm7 = [5,13,5,13]
-; AVX512DQBW-SLOW-NEXT:    # ymm7 = mem[0,1,0,1]
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm10, %zmm15
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm11, %zmm7, %zmm15
-; AVX512DQBW-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm15[4,5,6,7]
-; AVX512DQBW-SLOW-NEXT:    vinserti64x4 $0, %ymm0, %zmm12, %zmm0
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm13 = [6,14,6,14,6,14,6,14]
-; AVX512DQBW-SLOW-NEXT:    # zmm13 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm1, %zmm0
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm1, %zmm29
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm28, %zmm13, %zmm0
-; AVX512DQBW-SLOW-NEXT:    vpunpcklqdq {{.*#+}} zmm0 {%k1} = zmm21[0],zmm27[0],zmm21[2],zmm27[2],zmm21[4],zmm27[4],zmm21[6],zmm27[6]
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm8, %zmm12
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm22, %zmm13, %zmm12
-; AVX512DQBW-SLOW-NEXT:    vbroadcasti128 {{.*#+}} ymm1 = [6,14,6,14]
-; AVX512DQBW-SLOW-NEXT:    # ymm1 = mem[0,1,0,1]
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm10, %zmm15
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm11, %zmm1, %zmm15
-; AVX512DQBW-SLOW-NEXT:    vpblendd {{.*#+}} ymm12 = ymm12[0,1,2,3],ymm15[4,5,6,7]
-; AVX512DQBW-SLOW-NEXT:    vinserti64x4 $0, %ymm12, %zmm0, %zmm0
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm18 = [7,15,7,15,7,15,7,15]
-; AVX512DQBW-SLOW-NEXT:    # zmm18 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm29, %zmm12
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm28, %zmm18, %zmm12
-; AVX512DQBW-SLOW-NEXT:    vpunpckhqdq {{.*#+}} zmm12 {%k1} = zmm21[1],zmm27[1],zmm21[3],zmm27[3],zmm21[5],zmm27[5],zmm21[7],zmm27[7]
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm22, %zmm18, %zmm8
-; AVX512DQBW-SLOW-NEXT:    vbroadcasti64x2 {{.*#+}} ymm30 = [7,15,7,15]
-; AVX512DQBW-SLOW-NEXT:    # ymm30 = mem[0,1,0,1]
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm11, %zmm30, %zmm10
-; AVX512DQBW-SLOW-NEXT:    vpblendd {{.*#+}} ymm8 = ymm8[0,1,2,3],ymm10[4,5,6,7]
-; AVX512DQBW-SLOW-NEXT:    vinserti64x4 $0, %ymm8, %zmm12, %zmm0
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm14, %zmm8
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm16, %zmm3, %zmm8
-; AVX512DQBW-SLOW-NEXT:    vpunpcklqdq {{.*#+}} zmm10 = zmm25[0],zmm26[0],zmm25[2],zmm26[2],zmm25[4],zmm26[4],zmm25[6],zmm26[6]
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm8, %zmm10 {%k1}
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm4, %zmm8
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm19, %zmm3, %zmm8
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm5, %zmm11
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm6, %zmm15
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm20, %zmm6, %zmm11
-; AVX512DQBW-SLOW-NEXT:    vpblendd {{.*#+}} ymm8 = ymm8[0,1,2,3],ymm11[4,5,6,7]
-; AVX512DQBW-SLOW-NEXT:    vinserti64x4 $0, %ymm8, %zmm10, %zmm0
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm14, %zmm8
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm16, %zmm9, %zmm8
-; AVX512DQBW-SLOW-NEXT:    vpunpckhqdq {{.*#+}} zmm10 = zmm25[1],zmm26[1],zmm25[3],zmm26[3],zmm25[5],zmm26[5],zmm25[7],zmm26[7]
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm8, %zmm10 {%k1}
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm4, %zmm8
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm19, %zmm9, %zmm8
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm5, %zmm11
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm7, %zmm12
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm20, %zmm7, %zmm11
-; AVX512DQBW-SLOW-NEXT:    vpblendd {{.*#+}} ymm8 = ymm8[0,1,2,3],ymm11[4,5,6,7]
-; AVX512DQBW-SLOW-NEXT:    vinserti64x4 $0, %ymm8, %zmm10, %zmm0
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm4, %zmm8
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm19, %zmm13, %zmm8
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm5, %zmm10
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm20, %zmm1, %zmm10
-; AVX512DQBW-SLOW-NEXT:    vpblendd {{.*#+}} ymm8 = ymm8[0,1,2,3],ymm10[4,5,6,7]
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm25, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm25, %zmm10
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm26, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm26, %zmm13, %zmm10
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm14, %zmm11
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm14, (%rsp) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm16, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vpunpcklqdq {{.*#+}} zmm10 {%k1} = zmm14[0],zmm16[0],zmm14[2],zmm16[2],zmm14[4],zmm16[4],zmm14[6],zmm16[6]
-; AVX512DQBW-SLOW-NEXT:    vinserti64x4 $0, %ymm8, %zmm10, %zmm0
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 128(%r10), %zmm10
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm19, %zmm18, %zmm4
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 128(%rax), %zmm14
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm20, %zmm30, %zmm5
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm25, %zmm6
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm26, %zmm18, %zmm6
-; AVX512DQBW-SLOW-NEXT:    vpunpckhqdq {{.*#+}} zmm6 {%k1} = zmm11[1],zmm16[1],zmm11[3],zmm16[3],zmm11[5],zmm16[5],zmm11[7],zmm16[7]
-; AVX512DQBW-SLOW-NEXT:    vpblendd {{.*#+}} ymm4 = ymm4[0,1,2,3],ymm5[4,5,6,7]
-; AVX512DQBW-SLOW-NEXT:    vinserti64x4 $0, %ymm4, %zmm6, %zmm0
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm10, %zmm4
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm14, %zmm3, %zmm4
-; AVX512DQBW-SLOW-NEXT:    vpunpcklqdq {{.*#+}} zmm5 = zmm23[0],zmm24[0],zmm23[2],zmm24[2],zmm23[4],zmm24[4],zmm23[6],zmm24[6]
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm4, %zmm5 {%k1}
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm2, %zmm6
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm17, %zmm3, %zmm6
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm3, %zmm11
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 128(%rdx), %zmm4
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 128(%rcx), %zmm7
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm4, %zmm8
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm7, %zmm15, %zmm8
-; AVX512DQBW-SLOW-NEXT:    vpblendd {{.*#+}} ymm6 = ymm6[0,1,2,3],ymm8[4,5,6,7]
-; AVX512DQBW-SLOW-NEXT:    vinserti64x4 $0, %ymm6, %zmm5, %zmm0
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm10, %zmm5
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm14, %zmm9, %zmm5
-; AVX512DQBW-SLOW-NEXT:    vpunpckhqdq {{.*#+}} zmm6 = zmm23[1],zmm24[1],zmm23[3],zmm24[3],zmm23[5],zmm24[5],zmm23[7],zmm24[7]
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm5, %zmm6 {%k1}
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm2, %zmm5
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm17, %zmm9, %zmm5
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm4, %zmm8
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm7, %zmm12, %zmm8
-; AVX512DQBW-SLOW-NEXT:    vpblendd {{.*#+}} ymm5 = ymm5[0,1,2,3],ymm8[4,5,6,7]
-; AVX512DQBW-SLOW-NEXT:    vinserti64x4 $0, %ymm5, %zmm6, %zmm0
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm2, %zmm5
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm17, %zmm13, %zmm5
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm4, %zmm6
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm7, %zmm1, %zmm6
-; AVX512DQBW-SLOW-NEXT:    vpblendd {{.*#+}} ymm5 = ymm5[0,1,2,3],ymm6[4,5,6,7]
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm23, %zmm3
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm23, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm23, %zmm6
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm24, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm24, %zmm13, %zmm6
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm14, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vpunpcklqdq {{.*#+}} zmm6 {%k1} = zmm10[0],zmm14[0],zmm10[2],zmm14[2],zmm10[4],zmm14[4],zmm10[6],zmm14[6]
-; AVX512DQBW-SLOW-NEXT:    vinserti64x4 $0, %ymm5, %zmm6, %zmm0
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm17, %zmm18, %zmm2
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm7, %zmm30, %zmm4
-; AVX512DQBW-SLOW-NEXT:    vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm4[4,5,6,7]
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm24, %zmm18, %zmm3
-; AVX512DQBW-SLOW-NEXT:    vpunpckhqdq {{.*#+}} zmm3 {%k1} = zmm10[1],zmm14[1],zmm10[3],zmm14[3],zmm10[5],zmm14[5],zmm10[7],zmm14[7]
-; AVX512DQBW-SLOW-NEXT:    vinserti64x4 $0, %ymm2, %zmm3, %zmm0
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 192(%r10), %zmm10
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 192(%rax), %zmm26
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm10, %zmm2
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm26, %zmm11, %zmm2
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 192(%r8), %zmm23
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 192(%r9), %zmm29
-; AVX512DQBW-SLOW-NEXT:    vpunpcklqdq {{.*#+}} zmm4 = zmm23[0],zmm29[0],zmm23[2],zmm29[2],zmm23[4],zmm29[4],zmm23[6],zmm29[6]
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm2, %zmm4 {%k1}
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 192(%rdi), %zmm2
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 192(%rsi), %zmm5
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm2, %zmm6
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm5, %zmm11, %zmm6
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 192(%rdx), %zmm3
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 192(%rcx), %zmm7
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm3, %zmm8
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm7, %zmm15, %zmm8
-; AVX512DQBW-SLOW-NEXT:    vpblendd {{.*#+}} ymm6 = ymm6[0,1,2,3],ymm8[4,5,6,7]
-; AVX512DQBW-SLOW-NEXT:    vinserti64x4 $0, %ymm6, %zmm4, %zmm0
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm10, %zmm4
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm26, %zmm9, %zmm4
-; AVX512DQBW-SLOW-NEXT:    vpunpckhqdq {{.*#+}} zmm6 = zmm23[1],zmm29[1],zmm23[3],zmm29[3],zmm23[5],zmm29[5],zmm23[7],zmm29[7]
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm4, %zmm6 {%k1}
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm2, %zmm4
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm5, %zmm9, %zmm4
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm3, %zmm8
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm7, %zmm12, %zmm8
-; AVX512DQBW-SLOW-NEXT:    vpblendd {{.*#+}} ymm4 = ymm4[0,1,2,3],ymm8[4,5,6,7]
-; AVX512DQBW-SLOW-NEXT:    vinserti64x4 $0, %ymm4, %zmm6, %zmm0
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm2, %zmm4
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm5, %zmm13, %zmm4
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm3, %zmm6
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm7, %zmm1, %zmm6
-; AVX512DQBW-SLOW-NEXT:    vpblendd {{.*#+}} ymm4 = ymm4[0,1,2,3],ymm6[4,5,6,7]
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm23, %zmm6
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm29, %zmm13, %zmm6
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vpunpcklqdq {{.*#+}} zmm6 {%k1} = zmm10[0],zmm26[0],zmm10[2],zmm26[2],zmm10[4],zmm26[4],zmm10[6],zmm26[6]
-; AVX512DQBW-SLOW-NEXT:    vinserti64x4 $0, %ymm4, %zmm6, %zmm0
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm5, %zmm18, %zmm2
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm7, %zmm30, %zmm3
-; AVX512DQBW-SLOW-NEXT:    vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm3[4,5,6,7]
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm23, %zmm3
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm29, %zmm18, %zmm3
-; AVX512DQBW-SLOW-NEXT:    vpunpckhqdq {{.*#+}} zmm3 {%k1} = zmm10[1],zmm26[1],zmm10[3],zmm26[3],zmm10[5],zmm26[5],zmm10[7],zmm26[7]
-; AVX512DQBW-SLOW-NEXT:    vinserti64x4 $0, %ymm2, %zmm3, %zmm0
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 256(%r10), %zmm10
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 256(%rax), %zmm22
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm10, %zmm2
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm22, %zmm11, %zmm2
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 256(%r8), %zmm0
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 256(%r9), %zmm24
-; AVX512DQBW-SLOW-NEXT:    vpunpcklqdq {{.*#+}} zmm4 = zmm0[0],zmm24[0],zmm0[2],zmm24[2],zmm0[4],zmm24[4],zmm0[6],zmm24[6]
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm0, %zmm17
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm2, %zmm4 {%k1}
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 256(%rdi), %zmm2
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 256(%rsi), %zmm5
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm2, %zmm6
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm5, %zmm11, %zmm6
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 256(%rdx), %zmm3
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 256(%rcx), %zmm7
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm3, %zmm8
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm7, %zmm15, %zmm8
-; AVX512DQBW-SLOW-NEXT:    vpblendd {{.*#+}} ymm6 = ymm6[0,1,2,3],ymm8[4,5,6,7]
-; AVX512DQBW-SLOW-NEXT:    vinserti64x4 $0, %ymm6, %zmm4, %zmm0
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm10, %zmm4
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm22, %zmm9, %zmm4
-; AVX512DQBW-SLOW-NEXT:    vpunpckhqdq {{.*#+}} zmm6 = zmm17[1],zmm24[1],zmm17[3],zmm24[3],zmm17[5],zmm24[5],zmm17[7],zmm24[7]
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm4, %zmm6 {%k1}
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm2, %zmm4
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm5, %zmm9, %zmm4
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm3, %zmm8
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm7, %zmm12, %zmm8
-; AVX512DQBW-SLOW-NEXT:    vpblendd {{.*#+}} ymm4 = ymm4[0,1,2,3],ymm8[4,5,6,7]
-; AVX512DQBW-SLOW-NEXT:    vinserti64x4 $0, %ymm4, %zmm6, %zmm0
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm2, %zmm4
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm5, %zmm13, %zmm4
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm3, %zmm6
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm7, %zmm1, %zmm6
-; AVX512DQBW-SLOW-NEXT:    vpblendd {{.*#+}} ymm4 = ymm4[0,1,2,3],ymm6[4,5,6,7]
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm17, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm17, %zmm6
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm24, %zmm13, %zmm6
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vpunpcklqdq {{.*#+}} zmm6 {%k1} = zmm10[0],zmm22[0],zmm10[2],zmm22[2],zmm10[4],zmm22[4],zmm10[6],zmm22[6]
-; AVX512DQBW-SLOW-NEXT:    vinserti64x4 $0, %ymm4, %zmm6, %zmm0
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm5, %zmm18, %zmm2
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm7, %zmm30, %zmm3
-; AVX512DQBW-SLOW-NEXT:    vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm3[4,5,6,7]
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm17, %zmm3
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm24, %zmm18, %zmm3
-; AVX512DQBW-SLOW-NEXT:    vpunpckhqdq {{.*#+}} zmm3 {%k1} = zmm10[1],zmm22[1],zmm10[3],zmm22[3],zmm10[5],zmm22[5],zmm10[7],zmm22[7]
-; AVX512DQBW-SLOW-NEXT:    vinserti64x4 $0, %ymm2, %zmm3, %zmm0
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 320(%rdi), %zmm2
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 320(%rsi), %zmm3
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm2, %zmm5
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm3, %zmm11, %zmm5
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 320(%rdx), %zmm14
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 320(%rcx), %zmm6
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm14, %zmm7
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm6, %zmm15, %zmm7
-; AVX512DQBW-SLOW-NEXT:    vpblendd {{.*#+}} ymm5 = ymm5[0,1,2,3],ymm7[4,5,6,7]
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm14, %zmm7
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm6, %zmm12, %zmm7
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm14, %zmm8
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm6, %zmm1, %zmm8
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm6, %zmm30, %zmm14
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 384(%rdx), %zmm0
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 384(%rcx), %zmm6
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm0, %zmm10
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm6, %zmm15, %zmm10
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm0, %zmm10
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm6, %zmm12, %zmm10
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm0, %zmm10
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm6, %zmm1, %zmm10
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm6, %zmm30, %zmm0
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 448(%rdx), %zmm0
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 448(%rcx), %zmm6
-; AVX512DQBW-SLOW-NEXT:    vpermi2q %zmm6, %zmm0, %zmm15
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm15, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vpermi2q %zmm6, %zmm0, %zmm12
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm12, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vpermi2q %zmm6, %zmm0, %zmm1
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm6, %zmm30, %zmm0
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 320(%r10), %zmm31
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 320(%rax), %zmm12
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm31, %zmm1
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm12, %zmm11, %zmm1
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 320(%r8), %zmm17
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 320(%r9), %zmm4
-; AVX512DQBW-SLOW-NEXT:    vpunpcklqdq {{.*#+}} zmm10 = zmm17[0],zmm4[0],zmm17[2],zmm4[2],zmm17[4],zmm4[4],zmm17[6],zmm4[6]
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm1, %zmm10 {%k1}
-; AVX512DQBW-SLOW-NEXT:    vinserti64x4 $0, %ymm5, %zmm10, %zmm0
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm31, %zmm1
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm12, %zmm9, %zmm1
-; AVX512DQBW-SLOW-NEXT:    vpunpckhqdq {{.*#+}} zmm5 = zmm17[1],zmm4[1],zmm17[3],zmm4[3],zmm17[5],zmm4[5],zmm17[7],zmm4[7]
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm1, %zmm5 {%k1}
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm2, %zmm1
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm3, %zmm9, %zmm1
-; AVX512DQBW-SLOW-NEXT:    vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm7[4,5,6,7]
-; AVX512DQBW-SLOW-NEXT:    vinserti64x4 $0, %ymm1, %zmm5, %zmm0
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm2, %zmm1
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm3, %zmm13, %zmm1
-; AVX512DQBW-SLOW-NEXT:    vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm8[4,5,6,7]
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm17, %zmm5
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm4, %zmm13, %zmm5
-; AVX512DQBW-SLOW-NEXT:    vpunpcklqdq {{.*#+}} zmm5 {%k1} = zmm31[0],zmm12[0],zmm31[2],zmm12[2],zmm31[4],zmm12[4],zmm31[6],zmm12[6]
-; AVX512DQBW-SLOW-NEXT:    vinserti64x4 $0, %ymm1, %zmm5, %zmm0
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm3, %zmm18, %zmm2
-; AVX512DQBW-SLOW-NEXT:    vpblendd {{.*#+}} ymm1 = ymm2[0,1,2,3],ymm14[4,5,6,7]
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm17, %zmm2
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm4, %zmm18, %zmm2
-; AVX512DQBW-SLOW-NEXT:    vpunpckhqdq {{.*#+}} zmm2 {%k1} = zmm31[1],zmm12[1],zmm31[3],zmm12[3],zmm31[5],zmm12[5],zmm31[7],zmm12[7]
-; AVX512DQBW-SLOW-NEXT:    vinserti64x4 $0, %ymm1, %zmm2, %zmm0
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 384(%rdi), %zmm0
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 384(%rsi), %zmm1
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm0, %zmm3
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm11, %zmm2
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm1, %zmm11, %zmm3
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm0, %zmm3
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm1, %zmm9, %zmm3
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm0, %zmm3
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm1, %zmm13, %zmm3
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm1, %zmm18, %zmm0
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 384(%r10), %zmm11
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 384(%rax), %zmm20
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm11, %zmm0
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm20, %zmm2, %zmm0
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm11, %zmm0
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm20, %zmm9, %zmm0
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 384(%r8), %zmm30
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 384(%r9), %zmm0
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm30, %zmm1
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm0, %zmm13, %zmm1
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 448(%r10), %zmm16
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 448(%rax), %zmm8
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm16, %zmm1
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm8, %zmm2, %zmm1
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 448(%rdi), %zmm3
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 448(%rsi), %zmm1
-; AVX512DQBW-SLOW-NEXT:    vpermi2q %zmm1, %zmm3, %zmm2
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm16, %zmm2
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm8, %zmm9, %zmm2
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vpermi2q %zmm1, %zmm3, %zmm9
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 448(%r8), %zmm19
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 448(%r9), %zmm7
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm19, %zmm2
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm7, %zmm13, %zmm2
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vpermi2q %zmm1, %zmm3, %zmm13
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm13, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm1, %zmm18, %zmm3
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm30, %zmm1
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm0, %zmm18, %zmm1
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm1, %zmm5
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm19, %zmm1
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm7, %zmm18, %zmm1
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm15 = [0,8,0,8,0,8,0,8]
-; AVX512DQBW-SLOW-NEXT:    # zmm15 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm21, %zmm1
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm27, %zmm15, %zmm1
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm14 = [1,9,1,9,1,9,1,9]
-; AVX512DQBW-SLOW-NEXT:    # zmm14 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm21, %zmm1
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm27, %zmm14, %zmm1
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm6 = [2,10,2,10,2,10,2,10]
-; AVX512DQBW-SLOW-NEXT:    # zmm6 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm21, %zmm1
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm27, %zmm6, %zmm1
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm1 = [3,11,3,11,3,11,3,11]
-; AVX512DQBW-SLOW-NEXT:    # zmm1 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm27, %zmm1, %zmm21
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm21, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm2, %zmm3
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm28, %zmm15, %zmm3
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm2, %zmm3
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm28, %zmm14, %zmm3
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm2, %zmm25
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm28, %zmm6, %zmm25
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm28, %zmm1, %zmm2
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 (%rsp), %zmm27 # 64-byte Reload
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm27, %zmm2
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm3, %zmm15, %zmm2
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm27, %zmm2
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm3, %zmm14, %zmm2
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm27, %zmm2
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm3, %zmm6, %zmm2
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm3, %zmm1, %zmm27
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm27, (%rsp) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm9 # 64-byte Reload
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm9, %zmm3
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm2, %zmm15, %zmm3
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm9, %zmm3
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm2, %zmm14, %zmm3
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm9, %zmm18
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm2, %zmm6, %zmm18
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm2, %zmm1, %zmm9
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm2, %zmm3
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm9 # 64-byte Reload
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm9, %zmm15, %zmm3
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm2, %zmm3
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm9, %zmm14, %zmm3
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm2, %zmm3
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm9, %zmm6, %zmm3
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm9, %zmm1, %zmm2
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm9 # 64-byte Reload
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm9, %zmm3
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm2, %zmm15, %zmm3
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm9, %zmm3
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm2, %zmm14, %zmm3
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm9, %zmm13
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm2, %zmm6, %zmm13
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm2, %zmm1, %zmm9
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm2, %zmm3
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm26, %zmm15, %zmm3
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm2, %zmm3
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm26, %zmm14, %zmm3
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm2, %zmm3
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm26, %zmm6, %zmm3
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm26, %zmm1, %zmm2
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm23, %zmm3
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm29, %zmm15, %zmm3
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm23, %zmm3
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm29, %zmm14, %zmm3
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm23, %zmm10
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm29, %zmm6, %zmm10
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm29, %zmm1, %zmm23
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm23, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm23 # 64-byte Reload
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm23, %zmm2
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm22, %zmm15, %zmm2
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm23, %zmm2
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm22, %zmm14, %zmm2
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm23, %zmm2
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm22, %zmm6, %zmm2
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm22, %zmm1, %zmm23
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm2, %zmm3
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm24, %zmm15, %zmm3
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm2, %zmm3
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm24, %zmm14, %zmm3
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm2, %zmm26
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm24, %zmm6, %zmm26
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm24, %zmm1, %zmm2
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm31, %zmm2
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm12, %zmm15, %zmm2
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm31, %zmm2
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm12, %zmm14, %zmm2
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm31, %zmm2
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm12, %zmm6, %zmm2
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm12, %zmm1, %zmm31
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm17, %zmm29
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm4, %zmm15, %zmm29
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm17, %zmm2
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm4, %zmm14, %zmm2
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm17, %zmm24
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm4, %zmm6, %zmm24
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm4, %zmm1, %zmm17
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm11, %zmm2
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm20, %zmm15, %zmm2
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm11, %zmm2
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm20, %zmm14, %zmm2
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm9 # 64-byte Reload
-; AVX512DQBW-SLOW-NEXT:    vpunpcklqdq {{.*#+}} zmm9 {%k1} = zmm11[0],zmm20[0],zmm11[2],zmm20[2],zmm11[4],zmm20[4],zmm11[6],zmm20[6]
-; AVX512DQBW-SLOW-NEXT:    vpunpckhqdq {{.*#+}} zmm5 {%k1} = zmm11[1],zmm20[1],zmm11[3],zmm20[3],zmm11[5],zmm20[5],zmm11[7],zmm20[7]
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm5, %zmm27
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm11, %zmm22
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm20, %zmm6, %zmm22
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm20, %zmm1, %zmm11
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm30, %zmm21
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm0, %zmm15, %zmm21
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm30, %zmm28
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm0, %zmm14, %zmm28
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm30, %zmm20
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm0, %zmm6, %zmm20
-; AVX512DQBW-SLOW-NEXT:    vpunpcklqdq {{.*#+}} zmm2 = zmm30[0],zmm0[0],zmm30[2],zmm0[2],zmm30[4],zmm0[4],zmm30[6],zmm0[6]
-; AVX512DQBW-SLOW-NEXT:    vpunpckhqdq {{.*#+}} zmm3 = zmm30[1],zmm0[1],zmm30[3],zmm0[3],zmm30[5],zmm0[5],zmm30[7],zmm0[7]
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm0, %zmm1, %zmm30
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm16, %zmm0
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm8, %zmm15, %zmm0
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm16, %zmm0
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm8, %zmm14, %zmm0
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
-; AVX512DQBW-SLOW-NEXT:    vpunpcklqdq {{.*#+}} zmm4 {%k1} = zmm16[0],zmm8[0],zmm16[2],zmm8[2],zmm16[4],zmm8[4],zmm16[6],zmm8[6]
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
-; AVX512DQBW-SLOW-NEXT:    vpunpckhqdq {{.*#+}} zmm5 {%k1} = zmm16[1],zmm8[1],zmm16[3],zmm8[3],zmm16[5],zmm8[5],zmm16[7],zmm8[7]
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm16, %zmm12
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm8, %zmm6, %zmm12
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm8, %zmm1, %zmm16
-; AVX512DQBW-SLOW-NEXT:    vpermi2q %zmm7, %zmm19, %zmm15
-; AVX512DQBW-SLOW-NEXT:    vpermi2q %zmm7, %zmm19, %zmm14
-; AVX512DQBW-SLOW-NEXT:    vpermi2q %zmm7, %zmm19, %zmm6
-; AVX512DQBW-SLOW-NEXT:    vpunpcklqdq {{.*#+}} zmm8 = zmm19[0],zmm7[0],zmm19[2],zmm7[2],zmm19[4],zmm7[4],zmm19[6],zmm7[6]
-; AVX512DQBW-SLOW-NEXT:    vpunpckhqdq {{.*#+}} zmm0 = zmm19[1],zmm7[1],zmm19[3],zmm7[3],zmm19[5],zmm7[5],zmm19[7],zmm7[7]
-; AVX512DQBW-SLOW-NEXT:    vpermt2q %zmm7, %zmm1, %zmm19
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512DQBW-SLOW-NEXT:    vpblendd $240, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm1 # 32-byte Folded Reload
-; AVX512DQBW-SLOW-NEXT:    # ymm1 = ymm1[0,1,2,3],mem[4,5,6,7]
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm7 # 64-byte Reload
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm7, %zmm2 {%k1}
-; AVX512DQBW-SLOW-NEXT:    vinserti64x4 $0, %ymm1, %zmm2, %zmm1
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512DQBW-SLOW-NEXT:    vpblendd $240, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm1 # 32-byte Folded Reload
-; AVX512DQBW-SLOW-NEXT:    # ymm1 = ymm1[0,1,2,3],mem[4,5,6,7]
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm2, %zmm3 {%k1}
-; AVX512DQBW-SLOW-NEXT:    vinserti64x4 $0, %ymm1, %zmm3, %zmm1
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512DQBW-SLOW-NEXT:    vpblendd $240, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm1 # 32-byte Folded Reload
-; AVX512DQBW-SLOW-NEXT:    # ymm1 = ymm1[0,1,2,3],mem[4,5,6,7]
-; AVX512DQBW-SLOW-NEXT:    vinserti64x4 $0, %ymm1, %zmm9, %zmm1
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512DQBW-SLOW-NEXT:    vpblendd $240, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm1 # 32-byte Folded Reload
-; AVX512DQBW-SLOW-NEXT:    # ymm1 = ymm1[0,1,2,3],mem[4,5,6,7]
-; AVX512DQBW-SLOW-NEXT:    vinserti64x4 $0, %ymm1, %zmm27, %zmm1
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512DQBW-SLOW-NEXT:    vpblendd $240, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm1 # 32-byte Folded Reload
-; AVX512DQBW-SLOW-NEXT:    # ymm1 = ymm1[0,1,2,3],mem[4,5,6,7]
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm2, %zmm8 {%k1}
-; AVX512DQBW-SLOW-NEXT:    vinserti64x4 $0, %ymm1, %zmm8, %zmm1
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512DQBW-SLOW-NEXT:    vpblendd $240, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm1 # 32-byte Folded Reload
-; AVX512DQBW-SLOW-NEXT:    # ymm1 = ymm1[0,1,2,3],mem[4,5,6,7]
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm2, %zmm0 {%k1}
-; AVX512DQBW-SLOW-NEXT:    vinserti64x4 $0, %ymm1, %zmm0, %zmm0
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQBW-SLOW-NEXT:    vpblendd $240, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
-; AVX512DQBW-SLOW-NEXT:    # ymm0 = ymm0[0,1,2,3],mem[4,5,6,7]
-; AVX512DQBW-SLOW-NEXT:    vinserti64x4 $0, %ymm0, %zmm4, %zmm0
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQBW-SLOW-NEXT:    vpblendd $240, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
-; AVX512DQBW-SLOW-NEXT:    # ymm0 = ymm0[0,1,2,3],mem[4,5,6,7]
-; AVX512DQBW-SLOW-NEXT:    vinserti64x4 $0, %ymm0, %zmm5, %zmm0
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm0, %zmm25 {%k1}
-; AVX512DQBW-SLOW-NEXT:    vmovdqa (%rcx), %ymm0
-; AVX512DQBW-SLOW-NEXT:    vmovdqa (%rdx), %ymm1
-; AVX512DQBW-SLOW-NEXT:    vpunpcklqdq {{.*#+}} ymm2 = ymm1[0],ymm0[0],ymm1[2],ymm0[2]
-; AVX512DQBW-SLOW-NEXT:    vmovdqa (%rsi), %ymm3
-; AVX512DQBW-SLOW-NEXT:    vmovdqa (%rdi), %ymm7
-; AVX512DQBW-SLOW-NEXT:    vpunpcklqdq {{.*#+}} ymm8 = ymm7[0],ymm3[0],ymm7[2],ymm3[2]
-; AVX512DQBW-SLOW-NEXT:    vperm2i128 {{.*#+}} ymm2 = ymm8[2,3],ymm2[2,3]
-; AVX512DQBW-SLOW-NEXT:    vinserti64x4 $0, %ymm2, %zmm25, %zmm2
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm2, %zmm4 {%k1}
-; AVX512DQBW-SLOW-NEXT:    vpunpckhqdq {{.*#+}} ymm0 = ymm1[1],ymm0[1],ymm1[3],ymm0[3]
-; AVX512DQBW-SLOW-NEXT:    vpunpckhqdq {{.*#+}} ymm1 = ymm7[1],ymm3[1],ymm7[3],ymm3[3]
-; AVX512DQBW-SLOW-NEXT:    vperm2i128 {{.*#+}} ymm0 = ymm1[2,3],ymm0[2,3]
-; AVX512DQBW-SLOW-NEXT:    vinserti64x4 $0, %ymm0, %zmm4, %zmm0
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm0, %zmm18 {%k1}
-; AVX512DQBW-SLOW-NEXT:    vmovdqa 64(%rcx), %ymm0
-; AVX512DQBW-SLOW-NEXT:    vmovdqa 64(%rdx), %ymm1
-; AVX512DQBW-SLOW-NEXT:    vmovdqa 64(%rsi), %ymm2
-; AVX512DQBW-SLOW-NEXT:    vmovdqa 64(%rdi), %ymm3
-; AVX512DQBW-SLOW-NEXT:    vpunpcklqdq {{.*#+}} ymm7 = ymm1[0],ymm0[0],ymm1[2],ymm0[2]
-; AVX512DQBW-SLOW-NEXT:    vpunpcklqdq {{.*#+}} ymm8 = ymm3[0],ymm2[0],ymm3[2],ymm2[2]
-; AVX512DQBW-SLOW-NEXT:    vperm2i128 {{.*#+}} ymm7 = ymm8[2,3],ymm7[2,3]
-; AVX512DQBW-SLOW-NEXT:    vinserti64x4 $0, %ymm7, %zmm18, %zmm4
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 (%rsp), %zmm4 # 64-byte Reload
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm4, %zmm5 {%k1}
-; AVX512DQBW-SLOW-NEXT:    vpunpckhqdq {{.*#+}} ymm0 = ymm1[1],ymm0[1],ymm1[3],ymm0[3]
-; AVX512DQBW-SLOW-NEXT:    vpunpckhqdq {{.*#+}} ymm1 = ymm3[1],ymm2[1],ymm3[3],ymm2[3]
-; AVX512DQBW-SLOW-NEXT:    vperm2i128 {{.*#+}} ymm0 = ymm1[2,3],ymm0[2,3]
-; AVX512DQBW-SLOW-NEXT:    vinserti64x4 $0, %ymm0, %zmm5, %zmm0
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm0, %zmm13 {%k1}
-; AVX512DQBW-SLOW-NEXT:    vmovdqa 128(%rcx), %ymm0
-; AVX512DQBW-SLOW-NEXT:    vmovdqa 128(%rdx), %ymm1
-; AVX512DQBW-SLOW-NEXT:    vpunpcklqdq {{.*#+}} ymm2 = ymm1[0],ymm0[0],ymm1[2],ymm0[2]
-; AVX512DQBW-SLOW-NEXT:    vmovdqa 128(%rsi), %ymm3
-; AVX512DQBW-SLOW-NEXT:    vmovdqa 128(%rdi), %ymm7
-; AVX512DQBW-SLOW-NEXT:    vpunpcklqdq {{.*#+}} ymm8 = ymm7[0],ymm3[0],ymm7[2],ymm3[2]
-; AVX512DQBW-SLOW-NEXT:    vperm2i128 {{.*#+}} ymm2 = ymm8[2,3],ymm2[2,3]
-; AVX512DQBW-SLOW-NEXT:    vinserti64x4 $0, %ymm2, %zmm13, %zmm2
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm2, (%rsp) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm2, %zmm4 {%k1}
-; AVX512DQBW-SLOW-NEXT:    vpunpckhqdq {{.*#+}} ymm0 = ymm1[1],ymm0[1],ymm1[3],ymm0[3]
-; AVX512DQBW-SLOW-NEXT:    vpunpckhqdq {{.*#+}} ymm1 = ymm7[1],ymm3[1],ymm7[3],ymm3[3]
-; AVX512DQBW-SLOW-NEXT:    vperm2i128 {{.*#+}} ymm0 = ymm1[2,3],ymm0[2,3]
-; AVX512DQBW-SLOW-NEXT:    vinserti64x4 $0, %ymm0, %zmm4, %zmm0
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm0, %zmm10 {%k1}
-; AVX512DQBW-SLOW-NEXT:    vmovdqa 192(%rcx), %ymm0
-; AVX512DQBW-SLOW-NEXT:    vmovdqa 192(%rdx), %ymm1
-; AVX512DQBW-SLOW-NEXT:    vpunpcklqdq {{.*#+}} ymm2 = ymm1[0],ymm0[0],ymm1[2],ymm0[2]
-; AVX512DQBW-SLOW-NEXT:    vmovdqa 192(%rsi), %ymm3
-; AVX512DQBW-SLOW-NEXT:    vmovdqa 192(%rdi), %ymm7
-; AVX512DQBW-SLOW-NEXT:    vpunpcklqdq {{.*#+}} ymm8 = ymm7[0],ymm3[0],ymm7[2],ymm3[2]
-; AVX512DQBW-SLOW-NEXT:    vperm2i128 {{.*#+}} ymm2 = ymm8[2,3],ymm2[2,3]
-; AVX512DQBW-SLOW-NEXT:    vinserti64x4 $0, %ymm2, %zmm10, %zmm2
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm2, %zmm4 {%k1}
-; AVX512DQBW-SLOW-NEXT:    vpunpckhqdq {{.*#+}} ymm0 = ymm1[1],ymm0[1],ymm1[3],ymm0[3]
-; AVX512DQBW-SLOW-NEXT:    vpunpckhqdq {{.*#+}} ymm1 = ymm7[1],ymm3[1],ymm7[3],ymm3[3]
-; AVX512DQBW-SLOW-NEXT:    vperm2i128 {{.*#+}} ymm0 = ymm1[2,3],ymm0[2,3]
-; AVX512DQBW-SLOW-NEXT:    vinserti64x4 $0, %ymm0, %zmm4, %zmm9
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm0, %zmm26 {%k1}
-; AVX512DQBW-SLOW-NEXT:    vmovdqa 256(%rcx), %ymm0
-; AVX512DQBW-SLOW-NEXT:    vmovdqa 256(%rdx), %ymm1
-; AVX512DQBW-SLOW-NEXT:    vpunpcklqdq {{.*#+}} ymm2 = ymm1[0],ymm0[0],ymm1[2],ymm0[2]
-; AVX512DQBW-SLOW-NEXT:    vmovdqa 256(%rsi), %ymm3
-; AVX512DQBW-SLOW-NEXT:    vmovdqa 256(%rdi), %ymm7
-; AVX512DQBW-SLOW-NEXT:    vpunpcklqdq {{.*#+}} ymm8 = ymm7[0],ymm3[0],ymm7[2],ymm3[2]
-; AVX512DQBW-SLOW-NEXT:    vperm2i128 {{.*#+}} ymm2 = ymm8[2,3],ymm2[2,3]
-; AVX512DQBW-SLOW-NEXT:    vinserti64x4 $0, %ymm2, %zmm26, %zmm26
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm23, %zmm2 {%k1}
-; AVX512DQBW-SLOW-NEXT:    vpunpckhqdq {{.*#+}} ymm0 = ymm1[1],ymm0[1],ymm1[3],ymm0[3]
-; AVX512DQBW-SLOW-NEXT:    vpunpckhqdq {{.*#+}} ymm1 = ymm7[1],ymm3[1],ymm7[3],ymm3[3]
-; AVX512DQBW-SLOW-NEXT:    vperm2i128 {{.*#+}} ymm0 = ymm1[2,3],ymm0[2,3]
-; AVX512DQBW-SLOW-NEXT:    vinserti64x4 $0, %ymm0, %zmm2, %zmm7
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm0, %zmm24 {%k1}
-; AVX512DQBW-SLOW-NEXT:    vmovdqa 320(%rcx), %ymm0
-; AVX512DQBW-SLOW-NEXT:    vmovdqa 320(%rdx), %ymm1
-; AVX512DQBW-SLOW-NEXT:    vpunpcklqdq {{.*#+}} ymm2 = ymm1[0],ymm0[0],ymm1[2],ymm0[2]
-; AVX512DQBW-SLOW-NEXT:    vmovdqa 320(%rsi), %ymm3
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 320(%rdi), %ymm23
-; AVX512DQBW-SLOW-NEXT:    vpunpcklqdq {{.*#+}} ymm8 = ymm23[0],ymm3[0],ymm23[2],ymm3[2]
-; AVX512DQBW-SLOW-NEXT:    vperm2i128 {{.*#+}} ymm2 = ymm8[2,3],ymm2[2,3]
-; AVX512DQBW-SLOW-NEXT:    vinserti64x4 $0, %ymm2, %zmm24, %zmm8
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm31, %zmm17 {%k1}
-; AVX512DQBW-SLOW-NEXT:    vpunpckhqdq {{.*#+}} ymm0 = ymm1[1],ymm0[1],ymm1[3],ymm0[3]
-; AVX512DQBW-SLOW-NEXT:    vpunpckhqdq {{.*#+}} ymm1 = ymm23[1],ymm3[1],ymm23[3],ymm3[3]
-; AVX512DQBW-SLOW-NEXT:    vperm2i128 {{.*#+}} ymm0 = ymm1[2,3],ymm0[2,3]
-; AVX512DQBW-SLOW-NEXT:    vinserti64x4 $0, %ymm0, %zmm17, %zmm17
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm22, %zmm20 {%k1}
-; AVX512DQBW-SLOW-NEXT:    vmovdqa 384(%rcx), %ymm0
-; AVX512DQBW-SLOW-NEXT:    vmovdqa 384(%rdx), %ymm1
-; AVX512DQBW-SLOW-NEXT:    vpunpcklqdq {{.*#+}} ymm2 = ymm1[0],ymm0[0],ymm1[2],ymm0[2]
-; AVX512DQBW-SLOW-NEXT:    vmovdqa 384(%rsi), %ymm3
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 384(%rdi), %ymm18
-; AVX512DQBW-SLOW-NEXT:    vpunpcklqdq {{.*#+}} ymm13 = ymm18[0],ymm3[0],ymm18[2],ymm3[2]
-; AVX512DQBW-SLOW-NEXT:    vperm2i128 {{.*#+}} ymm2 = ymm13[2,3],ymm2[2,3]
-; AVX512DQBW-SLOW-NEXT:    vinserti64x4 $0, %ymm2, %zmm20, %zmm20
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm11, %zmm30 {%k1}
-; AVX512DQBW-SLOW-NEXT:    vpunpckhqdq {{.*#+}} ymm0 = ymm1[1],ymm0[1],ymm1[3],ymm0[3]
-; AVX512DQBW-SLOW-NEXT:    vpunpckhqdq {{.*#+}} ymm1 = ymm18[1],ymm3[1],ymm18[3],ymm3[3]
-; AVX512DQBW-SLOW-NEXT:    vperm2i128 {{.*#+}} ymm0 = ymm1[2,3],ymm0[2,3]
-; AVX512DQBW-SLOW-NEXT:    vinserti64x4 $0, %ymm0, %zmm30, %zmm24
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm12, %zmm6 {%k1}
-; AVX512DQBW-SLOW-NEXT:    vmovdqa 448(%rcx), %ymm0
-; AVX512DQBW-SLOW-NEXT:    vmovdqa 448(%rdx), %ymm1
-; AVX512DQBW-SLOW-NEXT:    vpunpcklqdq {{.*#+}} ymm2 = ymm1[0],ymm0[0],ymm1[2],ymm0[2]
-; AVX512DQBW-SLOW-NEXT:    vmovdqa 448(%rsi), %ymm3
-; AVX512DQBW-SLOW-NEXT:    vmovdqa 448(%rdi), %ymm10
-; AVX512DQBW-SLOW-NEXT:    vpunpcklqdq {{.*#+}} ymm12 = ymm10[0],ymm3[0],ymm10[2],ymm3[2]
-; AVX512DQBW-SLOW-NEXT:    vperm2i128 {{.*#+}} ymm2 = ymm12[2,3],ymm2[2,3]
-; AVX512DQBW-SLOW-NEXT:    vinserti64x4 $0, %ymm2, %zmm6, %zmm31
-; AVX512DQBW-SLOW-NEXT:    vpunpckhqdq {{.*#+}} ymm0 = ymm1[1],ymm0[1],ymm1[3],ymm0[3]
-; AVX512DQBW-SLOW-NEXT:    vpunpckhqdq {{.*#+}} ymm1 = ymm10[1],ymm3[1],ymm10[3],ymm3[3]
-; AVX512DQBW-SLOW-NEXT:    vperm2i128 {{.*#+}} ymm0 = ymm1[2,3],ymm0[2,3]
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm16, %zmm19 {%k1}
-; AVX512DQBW-SLOW-NEXT:    vinserti64x4 $0, %ymm0, %zmm19, %zmm0
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm1, %zmm4 {%k1}
-; AVX512DQBW-SLOW-NEXT:    vmovdqa (%rsi), %xmm1
-; AVX512DQBW-SLOW-NEXT:    vinserti128 $1, (%rcx), %ymm1, %ymm1
-; AVX512DQBW-SLOW-NEXT:    vmovdqa (%rdi), %xmm2
-; AVX512DQBW-SLOW-NEXT:    vinserti128 $1, (%rdx), %ymm2, %ymm2
-; AVX512DQBW-SLOW-NEXT:    vpunpcklqdq {{.*#+}} ymm3 = ymm2[0],ymm1[0],ymm2[2],ymm1[2]
-; AVX512DQBW-SLOW-NEXT:    vinserti64x4 $0, %ymm3, %zmm4, %zmm3
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm10 # 64-byte Reload
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm4, %zmm10 {%k1}
-; AVX512DQBW-SLOW-NEXT:    vpunpckhqdq {{.*#+}} ymm1 = ymm2[1],ymm1[1],ymm2[3],ymm1[3]
-; AVX512DQBW-SLOW-NEXT:    vinserti64x4 $0, %ymm1, %zmm10, %zmm2
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm11 # 64-byte Reload
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm1, %zmm11 {%k1}
-; AVX512DQBW-SLOW-NEXT:    vmovdqa 64(%rsi), %xmm1
-; AVX512DQBW-SLOW-NEXT:    vinserti128 $1, 64(%rcx), %ymm1, %ymm1
-; AVX512DQBW-SLOW-NEXT:    vmovdqa 64(%rdi), %xmm4
-; AVX512DQBW-SLOW-NEXT:    vinserti128 $1, 64(%rdx), %ymm4, %ymm4
-; AVX512DQBW-SLOW-NEXT:    vpunpcklqdq {{.*#+}} ymm10 = ymm4[0],ymm1[0],ymm4[2],ymm1[2]
-; AVX512DQBW-SLOW-NEXT:    vinserti64x4 $0, %ymm10, %zmm11, %zmm10
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm11 # 64-byte Reload
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm12 # 64-byte Reload
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm11, %zmm12 {%k1}
-; AVX512DQBW-SLOW-NEXT:    vpunpckhqdq {{.*#+}} ymm1 = ymm4[1],ymm1[1],ymm4[3],ymm1[3]
-; AVX512DQBW-SLOW-NEXT:    vinserti64x4 $0, %ymm1, %zmm12, %zmm11
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm13 # 64-byte Reload
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm1, %zmm13 {%k1}
-; AVX512DQBW-SLOW-NEXT:    vmovdqa 128(%rsi), %xmm1
-; AVX512DQBW-SLOW-NEXT:    vinserti128 $1, 128(%rcx), %ymm1, %ymm1
-; AVX512DQBW-SLOW-NEXT:    vmovdqa 128(%rdi), %xmm4
-; AVX512DQBW-SLOW-NEXT:    vinserti128 $1, 128(%rdx), %ymm4, %ymm12
-; AVX512DQBW-SLOW-NEXT:    vpunpcklqdq {{.*#+}} ymm4 = ymm12[0],ymm1[0],ymm12[2],ymm1[2]
-; AVX512DQBW-SLOW-NEXT:    vinserti64x4 $0, %ymm4, %zmm13, %zmm4
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm13 # 64-byte Reload
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm18 # 64-byte Reload
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm13, %zmm18 {%k1}
-; AVX512DQBW-SLOW-NEXT:    vpunpckhqdq {{.*#+}} ymm1 = ymm12[1],ymm1[1],ymm12[3],ymm1[3]
-; AVX512DQBW-SLOW-NEXT:    vinserti64x4 $0, %ymm1, %zmm18, %zmm19
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm18 # 64-byte Reload
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm1, %zmm18 {%k1}
-; AVX512DQBW-SLOW-NEXT:    vmovdqa 192(%rsi), %xmm1
-; AVX512DQBW-SLOW-NEXT:    vinserti128 $1, 192(%rcx), %ymm1, %ymm1
-; AVX512DQBW-SLOW-NEXT:    vmovdqa 192(%rdi), %xmm12
-; AVX512DQBW-SLOW-NEXT:    vinserti128 $1, 192(%rdx), %ymm12, %ymm12
-; AVX512DQBW-SLOW-NEXT:    vpunpcklqdq {{.*#+}} ymm13 = ymm12[0],ymm1[0],ymm12[2],ymm1[2]
-; AVX512DQBW-SLOW-NEXT:    vinserti64x4 $0, %ymm13, %zmm18, %zmm30
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm13 # 64-byte Reload
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm18 # 64-byte Reload
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm13, %zmm18 {%k1}
-; AVX512DQBW-SLOW-NEXT:    vpunpckhqdq {{.*#+}} ymm1 = ymm12[1],ymm1[1],ymm12[3],ymm1[3]
-; AVX512DQBW-SLOW-NEXT:    vinserti64x4 $0, %ymm1, %zmm18, %zmm1
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm12 # 64-byte Reload
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm12, %zmm5 {%k1}
-; AVX512DQBW-SLOW-NEXT:    vmovdqa 256(%rsi), %xmm12
-; AVX512DQBW-SLOW-NEXT:    vinserti128 $1, 256(%rcx), %ymm12, %ymm13
-; AVX512DQBW-SLOW-NEXT:    vmovdqa 256(%rdi), %xmm12
-; AVX512DQBW-SLOW-NEXT:    vinserti32x4 $1, 256(%rdx), %ymm12, %ymm18
-; AVX512DQBW-SLOW-NEXT:    vpunpcklqdq {{.*#+}} ymm12 = ymm18[0],ymm13[0],ymm18[2],ymm13[2]
-; AVX512DQBW-SLOW-NEXT:    vinserti64x4 $0, %ymm12, %zmm5, %zmm12
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm23 # 64-byte Reload
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm23, %zmm5 {%k1}
-; AVX512DQBW-SLOW-NEXT:    vpunpckhqdq {{.*#+}} ymm13 = ymm18[1],ymm13[1],ymm18[3],ymm13[3]
-; AVX512DQBW-SLOW-NEXT:    vinserti64x4 $0, %ymm13, %zmm5, %zmm23
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm5, %zmm29 {%k1}
-; AVX512DQBW-SLOW-NEXT:    vmovdqa 320(%rsi), %xmm13
-; AVX512DQBW-SLOW-NEXT:    vinserti128 $1, 320(%rcx), %ymm13, %ymm13
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 320(%rdi), %xmm18
-; AVX512DQBW-SLOW-NEXT:    vinserti32x4 $1, 320(%rdx), %ymm18, %ymm18
-; AVX512DQBW-SLOW-NEXT:    vpunpcklqdq {{.*#+}} ymm27 = ymm18[0],ymm13[0],ymm18[2],ymm13[2]
-; AVX512DQBW-SLOW-NEXT:    vinserti64x4 $0, %ymm27, %zmm29, %zmm22
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm6 # 64-byte Reload
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm5, %zmm6 {%k1}
-; AVX512DQBW-SLOW-NEXT:    vpunpckhqdq {{.*#+}} ymm13 = ymm18[1],ymm13[1],ymm18[3],ymm13[3]
-; AVX512DQBW-SLOW-NEXT:    vinserti64x4 $0, %ymm13, %zmm6, %zmm13
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm5, %zmm21 {%k1}
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 384(%rsi), %xmm18
-; AVX512DQBW-SLOW-NEXT:    vinserti32x4 $1, 384(%rcx), %ymm18, %ymm18
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 384(%rdi), %xmm25
-; AVX512DQBW-SLOW-NEXT:    vinserti32x4 $1, 384(%rdx), %ymm25, %ymm25
-; AVX512DQBW-SLOW-NEXT:    vpunpcklqdq {{.*#+}} ymm27 = ymm25[0],ymm18[0],ymm25[2],ymm18[2]
-; AVX512DQBW-SLOW-NEXT:    vinserti64x4 $0, %ymm27, %zmm21, %zmm16
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm5, %zmm28 {%k1}
-; AVX512DQBW-SLOW-NEXT:    vpunpckhqdq {{.*#+}} ymm18 = ymm25[1],ymm18[1],ymm25[3],ymm18[3]
-; AVX512DQBW-SLOW-NEXT:    vinserti64x4 $0, %ymm18, %zmm28, %zmm21
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm5, %zmm15 {%k1}
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 448(%rsi), %xmm18
-; AVX512DQBW-SLOW-NEXT:    vinserti32x4 $1, 448(%rcx), %ymm18, %ymm18
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 448(%rdi), %xmm25
-; AVX512DQBW-SLOW-NEXT:    vinserti32x4 $1, 448(%rdx), %ymm25, %ymm25
-; AVX512DQBW-SLOW-NEXT:    vpunpcklqdq {{.*#+}} ymm27 = ymm25[0],ymm18[0],ymm25[2],ymm18[2]
-; AVX512DQBW-SLOW-NEXT:    vinserti64x4 $0, %ymm27, %zmm15, %zmm6
-; AVX512DQBW-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm5, %zmm14 {%k1}
-; AVX512DQBW-SLOW-NEXT:    vpunpckhqdq {{.*#+}} ymm18 = ymm25[1],ymm18[1],ymm25[3],ymm18[3]
-; AVX512DQBW-SLOW-NEXT:    vinserti64x4 $0, %ymm18, %zmm14, %zmm5
-; AVX512DQBW-SLOW-NEXT:    movq {{[0-9]+}}(%rsp), %rax
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm0, 3776(%rax)
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm31, 3712(%rax)
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm24, 3264(%rax)
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm20, 3200(%rax)
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm17, 2752(%rax)
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm8, 2688(%rax)
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm7, 2240(%rax)
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm26, 2176(%rax)
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm9, 1728(%rax)
-; AVX512DQBW-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQBW-SLOW-NEXT:    vmovaps %zmm0, 1664(%rax)
-; AVX512DQBW-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQBW-SLOW-NEXT:    vmovaps %zmm0, 1216(%rax)
-; AVX512DQBW-SLOW-NEXT:    vmovups (%rsp), %zmm0 # 64-byte Reload
-; AVX512DQBW-SLOW-NEXT:    vmovaps %zmm0, 1152(%rax)
-; AVX512DQBW-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQBW-SLOW-NEXT:    vmovaps %zmm0, 704(%rax)
-; AVX512DQBW-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQBW-SLOW-NEXT:    vmovaps %zmm0, 640(%rax)
-; AVX512DQBW-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQBW-SLOW-NEXT:    vmovaps %zmm0, 192(%rax)
-; AVX512DQBW-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQBW-SLOW-NEXT:    vmovaps %zmm0, 128(%rax)
-; AVX512DQBW-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQBW-SLOW-NEXT:    vmovaps %zmm0, 4032(%rax)
-; AVX512DQBW-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQBW-SLOW-NEXT:    vmovaps %zmm0, 3968(%rax)
-; AVX512DQBW-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQBW-SLOW-NEXT:    vmovaps %zmm0, 3904(%rax)
-; AVX512DQBW-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQBW-SLOW-NEXT:    vmovaps %zmm0, 3840(%rax)
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm5, 3648(%rax)
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm6, 3584(%rax)
-; AVX512DQBW-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQBW-SLOW-NEXT:    vmovaps %zmm0, 3520(%rax)
-; AVX512DQBW-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQBW-SLOW-NEXT:    vmovaps %zmm0, 3456(%rax)
-; AVX512DQBW-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQBW-SLOW-NEXT:    vmovaps %zmm0, 3392(%rax)
-; AVX512DQBW-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQBW-SLOW-NEXT:    vmovaps %zmm0, 3328(%rax)
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm21, 3136(%rax)
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm16, 3072(%rax)
-; AVX512DQBW-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQBW-SLOW-NEXT:    vmovaps %zmm0, 3008(%rax)
-; AVX512DQBW-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQBW-SLOW-NEXT:    vmovaps %zmm0, 2944(%rax)
-; AVX512DQBW-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQBW-SLOW-NEXT:    vmovaps %zmm0, 2880(%rax)
-; AVX512DQBW-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQBW-SLOW-NEXT:    vmovaps %zmm0, 2816(%rax)
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm13, 2624(%rax)
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm22, 2560(%rax)
-; AVX512DQBW-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQBW-SLOW-NEXT:    vmovaps %zmm0, 2496(%rax)
-; AVX512DQBW-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQBW-SLOW-NEXT:    vmovaps %zmm0, 2432(%rax)
-; AVX512DQBW-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQBW-SLOW-NEXT:    vmovaps %zmm0, 2368(%rax)
-; AVX512DQBW-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQBW-SLOW-NEXT:    vmovaps %zmm0, 2304(%rax)
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm23, 2112(%rax)
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm12, 2048(%rax)
-; AVX512DQBW-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQBW-SLOW-NEXT:    vmovaps %zmm0, 1984(%rax)
-; AVX512DQBW-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQBW-SLOW-NEXT:    vmovaps %zmm0, 1920(%rax)
-; AVX512DQBW-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQBW-SLOW-NEXT:    vmovaps %zmm0, 1856(%rax)
-; AVX512DQBW-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQBW-SLOW-NEXT:    vmovaps %zmm0, 1792(%rax)
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm1, 1600(%rax)
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm30, 1536(%rax)
-; AVX512DQBW-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQBW-SLOW-NEXT:    vmovaps %zmm0, 1472(%rax)
-; AVX512DQBW-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQBW-SLOW-NEXT:    vmovaps %zmm0, 1408(%rax)
-; AVX512DQBW-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQBW-SLOW-NEXT:    vmovaps %zmm0, 1344(%rax)
-; AVX512DQBW-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQBW-SLOW-NEXT:    vmovaps %zmm0, 1280(%rax)
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm19, 1088(%rax)
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm4, 1024(%rax)
-; AVX512DQBW-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQBW-SLOW-NEXT:    vmovaps %zmm0, 960(%rax)
-; AVX512DQBW-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQBW-SLOW-NEXT:    vmovaps %zmm0, 896(%rax)
-; AVX512DQBW-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQBW-SLOW-NEXT:    vmovaps %zmm0, 832(%rax)
-; AVX512DQBW-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQBW-SLOW-NEXT:    vmovaps %zmm0, 768(%rax)
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm11, 576(%rax)
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm10, 512(%rax)
-; AVX512DQBW-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQBW-SLOW-NEXT:    vmovaps %zmm0, 448(%rax)
-; AVX512DQBW-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQBW-SLOW-NEXT:    vmovaps %zmm0, 384(%rax)
-; AVX512DQBW-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQBW-SLOW-NEXT:    vmovaps %zmm0, 320(%rax)
-; AVX512DQBW-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQBW-SLOW-NEXT:    vmovaps %zmm0, 256(%rax)
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm2, 64(%rax)
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm3, (%rax)
-; AVX512DQBW-SLOW-NEXT:    addq $5512, %rsp # imm = 0x1588
-; AVX512DQBW-SLOW-NEXT:    vzeroupper
-; AVX512DQBW-SLOW-NEXT:    retq
-;
-; AVX512DQBW-FAST-LABEL: store_i64_stride8_vf64:
-; AVX512DQBW-FAST:       # %bb.0:
-; AVX512DQBW-FAST-NEXT:    subq $5512, %rsp # imm = 0x1588
-; AVX512DQBW-FAST-NEXT:    movq {{[0-9]+}}(%rsp), %rax
-; AVX512DQBW-FAST-NEXT:    movq {{[0-9]+}}(%rsp), %r10
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 128(%rdi), %zmm2
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 64(%rdi), %zmm4
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 (%rdi), %zmm8
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 128(%rsi), %zmm17
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 64(%rsi), %zmm19
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 (%rsi), %zmm22
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 64(%rdx), %zmm5
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 (%rdx), %zmm10
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 64(%rcx), %zmm20
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 (%rcx), %zmm11
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 (%r8), %zmm1
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 64(%r8), %zmm25
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 128(%r8), %zmm23
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 (%r9), %zmm28
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 64(%r9), %zmm26
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 128(%r9), %zmm24
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 (%r10), %zmm21
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 64(%r10), %zmm14
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 (%rax), %zmm27
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 64(%rax), %zmm16
-; AVX512DQBW-FAST-NEXT:    movb $-64, %r11b
-; AVX512DQBW-FAST-NEXT:    kmovd %r11d, %k1
-; AVX512DQBW-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} zmm3 = [4,12,4,12,4,12,4,12]
-; AVX512DQBW-FAST-NEXT:    # zmm3 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm21, %zmm0
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm27, %zmm3, %zmm0
-; AVX512DQBW-FAST-NEXT:    vpunpcklqdq {{.*#+}} zmm12 = zmm1[0],zmm28[0],zmm1[2],zmm28[2],zmm1[4],zmm28[4],zmm1[6],zmm28[6]
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm0, %zmm12 {%k1}
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm8, %zmm0
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm22, %zmm3, %zmm0
-; AVX512DQBW-FAST-NEXT:    vbroadcasti128 {{.*#+}} ymm6 = [4,12,4,12]
-; AVX512DQBW-FAST-NEXT:    # ymm6 = mem[0,1,0,1]
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm10, %zmm15
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm11, %zmm6, %zmm15
-; AVX512DQBW-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm15[4,5,6,7]
-; AVX512DQBW-FAST-NEXT:    vinserti64x4 $0, %ymm0, %zmm12, %zmm0
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} zmm9 = [5,13,5,13,5,13,5,13]
-; AVX512DQBW-FAST-NEXT:    # zmm9 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm21, %zmm0
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm27, %zmm9, %zmm0
-; AVX512DQBW-FAST-NEXT:    vpunpckhqdq {{.*#+}} zmm12 = zmm1[1],zmm28[1],zmm1[3],zmm28[3],zmm1[5],zmm28[5],zmm1[7],zmm28[7]
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm0, %zmm12 {%k1}
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm8, %zmm0
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm22, %zmm9, %zmm0
-; AVX512DQBW-FAST-NEXT:    vbroadcasti128 {{.*#+}} ymm7 = [5,13,5,13]
-; AVX512DQBW-FAST-NEXT:    # ymm7 = mem[0,1,0,1]
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm10, %zmm15
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm11, %zmm7, %zmm15
-; AVX512DQBW-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm15[4,5,6,7]
-; AVX512DQBW-FAST-NEXT:    vinserti64x4 $0, %ymm0, %zmm12, %zmm0
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} zmm13 = [6,14,6,14,6,14,6,14]
-; AVX512DQBW-FAST-NEXT:    # zmm13 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm1, %zmm0
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm1, %zmm29
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm28, %zmm13, %zmm0
-; AVX512DQBW-FAST-NEXT:    vpunpcklqdq {{.*#+}} zmm0 {%k1} = zmm21[0],zmm27[0],zmm21[2],zmm27[2],zmm21[4],zmm27[4],zmm21[6],zmm27[6]
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm8, %zmm12
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm22, %zmm13, %zmm12
-; AVX512DQBW-FAST-NEXT:    vbroadcasti128 {{.*#+}} ymm1 = [6,14,6,14]
-; AVX512DQBW-FAST-NEXT:    # ymm1 = mem[0,1,0,1]
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm10, %zmm15
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm11, %zmm1, %zmm15
-; AVX512DQBW-FAST-NEXT:    vpblendd {{.*#+}} ymm12 = ymm12[0,1,2,3],ymm15[4,5,6,7]
-; AVX512DQBW-FAST-NEXT:    vinserti64x4 $0, %ymm12, %zmm0, %zmm0
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} zmm18 = [7,15,7,15,7,15,7,15]
-; AVX512DQBW-FAST-NEXT:    # zmm18 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm29, %zmm12
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm28, %zmm18, %zmm12
-; AVX512DQBW-FAST-NEXT:    vpunpckhqdq {{.*#+}} zmm12 {%k1} = zmm21[1],zmm27[1],zmm21[3],zmm27[3],zmm21[5],zmm27[5],zmm21[7],zmm27[7]
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm22, %zmm18, %zmm8
-; AVX512DQBW-FAST-NEXT:    vbroadcasti64x2 {{.*#+}} ymm30 = [7,15,7,15]
-; AVX512DQBW-FAST-NEXT:    # ymm30 = mem[0,1,0,1]
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm11, %zmm30, %zmm10
-; AVX512DQBW-FAST-NEXT:    vpblendd {{.*#+}} ymm8 = ymm8[0,1,2,3],ymm10[4,5,6,7]
-; AVX512DQBW-FAST-NEXT:    vinserti64x4 $0, %ymm8, %zmm12, %zmm0
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm14, %zmm8
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm16, %zmm3, %zmm8
-; AVX512DQBW-FAST-NEXT:    vpunpcklqdq {{.*#+}} zmm10 = zmm25[0],zmm26[0],zmm25[2],zmm26[2],zmm25[4],zmm26[4],zmm25[6],zmm26[6]
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm8, %zmm10 {%k1}
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm4, %zmm8
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm19, %zmm3, %zmm8
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm5, %zmm11
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm6, %zmm15
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm20, %zmm6, %zmm11
-; AVX512DQBW-FAST-NEXT:    vpblendd {{.*#+}} ymm8 = ymm8[0,1,2,3],ymm11[4,5,6,7]
-; AVX512DQBW-FAST-NEXT:    vinserti64x4 $0, %ymm8, %zmm10, %zmm0
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm14, %zmm8
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm16, %zmm9, %zmm8
-; AVX512DQBW-FAST-NEXT:    vpunpckhqdq {{.*#+}} zmm10 = zmm25[1],zmm26[1],zmm25[3],zmm26[3],zmm25[5],zmm26[5],zmm25[7],zmm26[7]
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm8, %zmm10 {%k1}
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm4, %zmm8
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm19, %zmm9, %zmm8
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm5, %zmm11
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm7, %zmm12
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm20, %zmm7, %zmm11
-; AVX512DQBW-FAST-NEXT:    vpblendd {{.*#+}} ymm8 = ymm8[0,1,2,3],ymm11[4,5,6,7]
-; AVX512DQBW-FAST-NEXT:    vinserti64x4 $0, %ymm8, %zmm10, %zmm0
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm4, %zmm8
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm19, %zmm13, %zmm8
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm5, %zmm10
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm20, %zmm1, %zmm10
-; AVX512DQBW-FAST-NEXT:    vpblendd {{.*#+}} ymm8 = ymm8[0,1,2,3],ymm10[4,5,6,7]
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm25, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm25, %zmm10
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm26, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm26, %zmm13, %zmm10
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm14, %zmm11
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm14, (%rsp) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm16, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vpunpcklqdq {{.*#+}} zmm10 {%k1} = zmm14[0],zmm16[0],zmm14[2],zmm16[2],zmm14[4],zmm16[4],zmm14[6],zmm16[6]
-; AVX512DQBW-FAST-NEXT:    vinserti64x4 $0, %ymm8, %zmm10, %zmm0
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 128(%r10), %zmm10
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm19, %zmm18, %zmm4
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 128(%rax), %zmm14
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm20, %zmm30, %zmm5
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm25, %zmm6
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm26, %zmm18, %zmm6
-; AVX512DQBW-FAST-NEXT:    vpunpckhqdq {{.*#+}} zmm6 {%k1} = zmm11[1],zmm16[1],zmm11[3],zmm16[3],zmm11[5],zmm16[5],zmm11[7],zmm16[7]
-; AVX512DQBW-FAST-NEXT:    vpblendd {{.*#+}} ymm4 = ymm4[0,1,2,3],ymm5[4,5,6,7]
-; AVX512DQBW-FAST-NEXT:    vinserti64x4 $0, %ymm4, %zmm6, %zmm0
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm10, %zmm4
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm14, %zmm3, %zmm4
-; AVX512DQBW-FAST-NEXT:    vpunpcklqdq {{.*#+}} zmm5 = zmm23[0],zmm24[0],zmm23[2],zmm24[2],zmm23[4],zmm24[4],zmm23[6],zmm24[6]
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm4, %zmm5 {%k1}
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm2, %zmm6
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm17, %zmm3, %zmm6
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm3, %zmm11
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 128(%rdx), %zmm4
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 128(%rcx), %zmm7
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm4, %zmm8
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm7, %zmm15, %zmm8
-; AVX512DQBW-FAST-NEXT:    vpblendd {{.*#+}} ymm6 = ymm6[0,1,2,3],ymm8[4,5,6,7]
-; AVX512DQBW-FAST-NEXT:    vinserti64x4 $0, %ymm6, %zmm5, %zmm0
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm10, %zmm5
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm14, %zmm9, %zmm5
-; AVX512DQBW-FAST-NEXT:    vpunpckhqdq {{.*#+}} zmm6 = zmm23[1],zmm24[1],zmm23[3],zmm24[3],zmm23[5],zmm24[5],zmm23[7],zmm24[7]
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm5, %zmm6 {%k1}
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm2, %zmm5
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm17, %zmm9, %zmm5
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm4, %zmm8
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm7, %zmm12, %zmm8
-; AVX512DQBW-FAST-NEXT:    vpblendd {{.*#+}} ymm5 = ymm5[0,1,2,3],ymm8[4,5,6,7]
-; AVX512DQBW-FAST-NEXT:    vinserti64x4 $0, %ymm5, %zmm6, %zmm0
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm2, %zmm5
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm17, %zmm13, %zmm5
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm4, %zmm6
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm7, %zmm1, %zmm6
-; AVX512DQBW-FAST-NEXT:    vpblendd {{.*#+}} ymm5 = ymm5[0,1,2,3],ymm6[4,5,6,7]
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm23, %zmm3
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm23, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm23, %zmm6
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm24, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm24, %zmm13, %zmm6
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm14, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vpunpcklqdq {{.*#+}} zmm6 {%k1} = zmm10[0],zmm14[0],zmm10[2],zmm14[2],zmm10[4],zmm14[4],zmm10[6],zmm14[6]
-; AVX512DQBW-FAST-NEXT:    vinserti64x4 $0, %ymm5, %zmm6, %zmm0
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm17, %zmm18, %zmm2
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm7, %zmm30, %zmm4
-; AVX512DQBW-FAST-NEXT:    vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm4[4,5,6,7]
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm24, %zmm18, %zmm3
-; AVX512DQBW-FAST-NEXT:    vpunpckhqdq {{.*#+}} zmm3 {%k1} = zmm10[1],zmm14[1],zmm10[3],zmm14[3],zmm10[5],zmm14[5],zmm10[7],zmm14[7]
-; AVX512DQBW-FAST-NEXT:    vinserti64x4 $0, %ymm2, %zmm3, %zmm0
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 192(%r10), %zmm10
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 192(%rax), %zmm26
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm10, %zmm2
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm26, %zmm11, %zmm2
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 192(%r8), %zmm23
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 192(%r9), %zmm29
-; AVX512DQBW-FAST-NEXT:    vpunpcklqdq {{.*#+}} zmm4 = zmm23[0],zmm29[0],zmm23[2],zmm29[2],zmm23[4],zmm29[4],zmm23[6],zmm29[6]
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm2, %zmm4 {%k1}
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 192(%rdi), %zmm2
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 192(%rsi), %zmm5
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm2, %zmm6
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm5, %zmm11, %zmm6
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 192(%rdx), %zmm3
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 192(%rcx), %zmm7
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm3, %zmm8
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm7, %zmm15, %zmm8
-; AVX512DQBW-FAST-NEXT:    vpblendd {{.*#+}} ymm6 = ymm6[0,1,2,3],ymm8[4,5,6,7]
-; AVX512DQBW-FAST-NEXT:    vinserti64x4 $0, %ymm6, %zmm4, %zmm0
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm10, %zmm4
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm26, %zmm9, %zmm4
-; AVX512DQBW-FAST-NEXT:    vpunpckhqdq {{.*#+}} zmm6 = zmm23[1],zmm29[1],zmm23[3],zmm29[3],zmm23[5],zmm29[5],zmm23[7],zmm29[7]
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm4, %zmm6 {%k1}
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm2, %zmm4
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm5, %zmm9, %zmm4
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm3, %zmm8
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm7, %zmm12, %zmm8
-; AVX512DQBW-FAST-NEXT:    vpblendd {{.*#+}} ymm4 = ymm4[0,1,2,3],ymm8[4,5,6,7]
-; AVX512DQBW-FAST-NEXT:    vinserti64x4 $0, %ymm4, %zmm6, %zmm0
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm2, %zmm4
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm5, %zmm13, %zmm4
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm3, %zmm6
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm7, %zmm1, %zmm6
-; AVX512DQBW-FAST-NEXT:    vpblendd {{.*#+}} ymm4 = ymm4[0,1,2,3],ymm6[4,5,6,7]
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm23, %zmm6
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm29, %zmm13, %zmm6
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vpunpcklqdq {{.*#+}} zmm6 {%k1} = zmm10[0],zmm26[0],zmm10[2],zmm26[2],zmm10[4],zmm26[4],zmm10[6],zmm26[6]
-; AVX512DQBW-FAST-NEXT:    vinserti64x4 $0, %ymm4, %zmm6, %zmm0
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm5, %zmm18, %zmm2
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm7, %zmm30, %zmm3
-; AVX512DQBW-FAST-NEXT:    vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm3[4,5,6,7]
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm23, %zmm3
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm29, %zmm18, %zmm3
-; AVX512DQBW-FAST-NEXT:    vpunpckhqdq {{.*#+}} zmm3 {%k1} = zmm10[1],zmm26[1],zmm10[3],zmm26[3],zmm10[5],zmm26[5],zmm10[7],zmm26[7]
-; AVX512DQBW-FAST-NEXT:    vinserti64x4 $0, %ymm2, %zmm3, %zmm0
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 256(%r10), %zmm10
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 256(%rax), %zmm22
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm10, %zmm2
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm22, %zmm11, %zmm2
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 256(%r8), %zmm0
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 256(%r9), %zmm24
-; AVX512DQBW-FAST-NEXT:    vpunpcklqdq {{.*#+}} zmm4 = zmm0[0],zmm24[0],zmm0[2],zmm24[2],zmm0[4],zmm24[4],zmm0[6],zmm24[6]
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm0, %zmm17
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm2, %zmm4 {%k1}
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 256(%rdi), %zmm2
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 256(%rsi), %zmm5
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm2, %zmm6
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm5, %zmm11, %zmm6
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 256(%rdx), %zmm3
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 256(%rcx), %zmm7
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm3, %zmm8
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm7, %zmm15, %zmm8
-; AVX512DQBW-FAST-NEXT:    vpblendd {{.*#+}} ymm6 = ymm6[0,1,2,3],ymm8[4,5,6,7]
-; AVX512DQBW-FAST-NEXT:    vinserti64x4 $0, %ymm6, %zmm4, %zmm0
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm10, %zmm4
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm22, %zmm9, %zmm4
-; AVX512DQBW-FAST-NEXT:    vpunpckhqdq {{.*#+}} zmm6 = zmm17[1],zmm24[1],zmm17[3],zmm24[3],zmm17[5],zmm24[5],zmm17[7],zmm24[7]
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm4, %zmm6 {%k1}
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm2, %zmm4
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm5, %zmm9, %zmm4
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm3, %zmm8
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm7, %zmm12, %zmm8
-; AVX512DQBW-FAST-NEXT:    vpblendd {{.*#+}} ymm4 = ymm4[0,1,2,3],ymm8[4,5,6,7]
-; AVX512DQBW-FAST-NEXT:    vinserti64x4 $0, %ymm4, %zmm6, %zmm0
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm2, %zmm4
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm5, %zmm13, %zmm4
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm3, %zmm6
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm7, %zmm1, %zmm6
-; AVX512DQBW-FAST-NEXT:    vpblendd {{.*#+}} ymm4 = ymm4[0,1,2,3],ymm6[4,5,6,7]
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm17, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm17, %zmm6
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm24, %zmm13, %zmm6
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vpunpcklqdq {{.*#+}} zmm6 {%k1} = zmm10[0],zmm22[0],zmm10[2],zmm22[2],zmm10[4],zmm22[4],zmm10[6],zmm22[6]
-; AVX512DQBW-FAST-NEXT:    vinserti64x4 $0, %ymm4, %zmm6, %zmm0
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm5, %zmm18, %zmm2
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm7, %zmm30, %zmm3
-; AVX512DQBW-FAST-NEXT:    vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm3[4,5,6,7]
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm17, %zmm3
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm24, %zmm18, %zmm3
-; AVX512DQBW-FAST-NEXT:    vpunpckhqdq {{.*#+}} zmm3 {%k1} = zmm10[1],zmm22[1],zmm10[3],zmm22[3],zmm10[5],zmm22[5],zmm10[7],zmm22[7]
-; AVX512DQBW-FAST-NEXT:    vinserti64x4 $0, %ymm2, %zmm3, %zmm0
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 320(%rdi), %zmm2
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 320(%rsi), %zmm3
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm2, %zmm5
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm3, %zmm11, %zmm5
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 320(%rdx), %zmm14
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 320(%rcx), %zmm6
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm14, %zmm7
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm6, %zmm15, %zmm7
-; AVX512DQBW-FAST-NEXT:    vpblendd {{.*#+}} ymm5 = ymm5[0,1,2,3],ymm7[4,5,6,7]
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm14, %zmm7
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm6, %zmm12, %zmm7
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm14, %zmm8
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm6, %zmm1, %zmm8
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm6, %zmm30, %zmm14
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 384(%rdx), %zmm0
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 384(%rcx), %zmm6
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm0, %zmm10
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm6, %zmm15, %zmm10
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm0, %zmm10
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm6, %zmm12, %zmm10
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm0, %zmm10
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm6, %zmm1, %zmm10
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm6, %zmm30, %zmm0
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 448(%rdx), %zmm0
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 448(%rcx), %zmm6
-; AVX512DQBW-FAST-NEXT:    vpermi2q %zmm6, %zmm0, %zmm15
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm15, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vpermi2q %zmm6, %zmm0, %zmm12
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm12, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vpermi2q %zmm6, %zmm0, %zmm1
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm6, %zmm30, %zmm0
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 320(%r10), %zmm31
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 320(%rax), %zmm12
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm31, %zmm1
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm12, %zmm11, %zmm1
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 320(%r8), %zmm17
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 320(%r9), %zmm4
-; AVX512DQBW-FAST-NEXT:    vpunpcklqdq {{.*#+}} zmm10 = zmm17[0],zmm4[0],zmm17[2],zmm4[2],zmm17[4],zmm4[4],zmm17[6],zmm4[6]
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm1, %zmm10 {%k1}
-; AVX512DQBW-FAST-NEXT:    vinserti64x4 $0, %ymm5, %zmm10, %zmm0
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm31, %zmm1
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm12, %zmm9, %zmm1
-; AVX512DQBW-FAST-NEXT:    vpunpckhqdq {{.*#+}} zmm5 = zmm17[1],zmm4[1],zmm17[3],zmm4[3],zmm17[5],zmm4[5],zmm17[7],zmm4[7]
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm1, %zmm5 {%k1}
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm2, %zmm1
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm3, %zmm9, %zmm1
-; AVX512DQBW-FAST-NEXT:    vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm7[4,5,6,7]
-; AVX512DQBW-FAST-NEXT:    vinserti64x4 $0, %ymm1, %zmm5, %zmm0
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm2, %zmm1
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm3, %zmm13, %zmm1
-; AVX512DQBW-FAST-NEXT:    vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm8[4,5,6,7]
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm17, %zmm5
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm4, %zmm13, %zmm5
-; AVX512DQBW-FAST-NEXT:    vpunpcklqdq {{.*#+}} zmm5 {%k1} = zmm31[0],zmm12[0],zmm31[2],zmm12[2],zmm31[4],zmm12[4],zmm31[6],zmm12[6]
-; AVX512DQBW-FAST-NEXT:    vinserti64x4 $0, %ymm1, %zmm5, %zmm0
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm3, %zmm18, %zmm2
-; AVX512DQBW-FAST-NEXT:    vpblendd {{.*#+}} ymm1 = ymm2[0,1,2,3],ymm14[4,5,6,7]
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm17, %zmm2
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm4, %zmm18, %zmm2
-; AVX512DQBW-FAST-NEXT:    vpunpckhqdq {{.*#+}} zmm2 {%k1} = zmm31[1],zmm12[1],zmm31[3],zmm12[3],zmm31[5],zmm12[5],zmm31[7],zmm12[7]
-; AVX512DQBW-FAST-NEXT:    vinserti64x4 $0, %ymm1, %zmm2, %zmm0
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 384(%rdi), %zmm0
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 384(%rsi), %zmm1
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm0, %zmm3
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm11, %zmm2
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm1, %zmm11, %zmm3
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm0, %zmm3
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm1, %zmm9, %zmm3
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm0, %zmm3
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm1, %zmm13, %zmm3
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm1, %zmm18, %zmm0
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 384(%r10), %zmm11
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 384(%rax), %zmm20
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm11, %zmm0
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm20, %zmm2, %zmm0
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm11, %zmm0
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm20, %zmm9, %zmm0
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 384(%r8), %zmm30
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 384(%r9), %zmm0
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm30, %zmm1
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm0, %zmm13, %zmm1
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 448(%r10), %zmm16
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 448(%rax), %zmm8
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm16, %zmm1
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm8, %zmm2, %zmm1
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 448(%rdi), %zmm3
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 448(%rsi), %zmm1
-; AVX512DQBW-FAST-NEXT:    vpermi2q %zmm1, %zmm3, %zmm2
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm16, %zmm2
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm8, %zmm9, %zmm2
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vpermi2q %zmm1, %zmm3, %zmm9
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 448(%r8), %zmm19
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 448(%r9), %zmm7
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm19, %zmm2
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm7, %zmm13, %zmm2
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vpermi2q %zmm1, %zmm3, %zmm13
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm13, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm1, %zmm18, %zmm3
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm30, %zmm1
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm0, %zmm18, %zmm1
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm1, %zmm5
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm19, %zmm1
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm7, %zmm18, %zmm1
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} zmm15 = [0,8,0,8,0,8,0,8]
-; AVX512DQBW-FAST-NEXT:    # zmm15 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm21, %zmm1
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm27, %zmm15, %zmm1
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} zmm14 = [1,9,1,9,1,9,1,9]
-; AVX512DQBW-FAST-NEXT:    # zmm14 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm21, %zmm1
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm27, %zmm14, %zmm1
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} zmm6 = [2,10,2,10,2,10,2,10]
-; AVX512DQBW-FAST-NEXT:    # zmm6 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm21, %zmm1
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm27, %zmm6, %zmm1
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} zmm1 = [3,11,3,11,3,11,3,11]
-; AVX512DQBW-FAST-NEXT:    # zmm1 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm27, %zmm1, %zmm21
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm21, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm2, %zmm3
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm28, %zmm15, %zmm3
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm2, %zmm3
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm28, %zmm14, %zmm3
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm2, %zmm25
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm28, %zmm6, %zmm25
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm28, %zmm1, %zmm2
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 (%rsp), %zmm27 # 64-byte Reload
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm27, %zmm2
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm3, %zmm15, %zmm2
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm27, %zmm2
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm3, %zmm14, %zmm2
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm27, %zmm2
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm3, %zmm6, %zmm2
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm3, %zmm1, %zmm27
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm27, (%rsp) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm9 # 64-byte Reload
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm9, %zmm3
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm2, %zmm15, %zmm3
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm9, %zmm3
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm2, %zmm14, %zmm3
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm9, %zmm18
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm2, %zmm6, %zmm18
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm2, %zmm1, %zmm9
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm2, %zmm3
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm9 # 64-byte Reload
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm9, %zmm15, %zmm3
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm2, %zmm3
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm9, %zmm14, %zmm3
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm2, %zmm3
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm9, %zmm6, %zmm3
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm9, %zmm1, %zmm2
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm9 # 64-byte Reload
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm9, %zmm3
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm2, %zmm15, %zmm3
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm9, %zmm3
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm2, %zmm14, %zmm3
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm9, %zmm13
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm2, %zmm6, %zmm13
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm2, %zmm1, %zmm9
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm2, %zmm3
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm26, %zmm15, %zmm3
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm2, %zmm3
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm26, %zmm14, %zmm3
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm2, %zmm3
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm26, %zmm6, %zmm3
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm26, %zmm1, %zmm2
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm23, %zmm3
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm29, %zmm15, %zmm3
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm23, %zmm3
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm29, %zmm14, %zmm3
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm23, %zmm10
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm29, %zmm6, %zmm10
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm29, %zmm1, %zmm23
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm23, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm23 # 64-byte Reload
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm23, %zmm2
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm22, %zmm15, %zmm2
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm23, %zmm2
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm22, %zmm14, %zmm2
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm23, %zmm2
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm22, %zmm6, %zmm2
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm22, %zmm1, %zmm23
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm2, %zmm3
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm24, %zmm15, %zmm3
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm2, %zmm3
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm24, %zmm14, %zmm3
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm2, %zmm26
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm24, %zmm6, %zmm26
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm24, %zmm1, %zmm2
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm31, %zmm2
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm12, %zmm15, %zmm2
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm31, %zmm2
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm12, %zmm14, %zmm2
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm31, %zmm2
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm12, %zmm6, %zmm2
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm12, %zmm1, %zmm31
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm17, %zmm29
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm4, %zmm15, %zmm29
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm17, %zmm2
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm4, %zmm14, %zmm2
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm17, %zmm24
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm4, %zmm6, %zmm24
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm4, %zmm1, %zmm17
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm11, %zmm2
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm20, %zmm15, %zmm2
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm11, %zmm2
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm20, %zmm14, %zmm2
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm9 # 64-byte Reload
-; AVX512DQBW-FAST-NEXT:    vpunpcklqdq {{.*#+}} zmm9 {%k1} = zmm11[0],zmm20[0],zmm11[2],zmm20[2],zmm11[4],zmm20[4],zmm11[6],zmm20[6]
-; AVX512DQBW-FAST-NEXT:    vpunpckhqdq {{.*#+}} zmm5 {%k1} = zmm11[1],zmm20[1],zmm11[3],zmm20[3],zmm11[5],zmm20[5],zmm11[7],zmm20[7]
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm5, %zmm27
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm11, %zmm22
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm20, %zmm6, %zmm22
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm20, %zmm1, %zmm11
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm30, %zmm21
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm0, %zmm15, %zmm21
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm30, %zmm28
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm0, %zmm14, %zmm28
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm30, %zmm20
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm0, %zmm6, %zmm20
-; AVX512DQBW-FAST-NEXT:    vpunpcklqdq {{.*#+}} zmm2 = zmm30[0],zmm0[0],zmm30[2],zmm0[2],zmm30[4],zmm0[4],zmm30[6],zmm0[6]
-; AVX512DQBW-FAST-NEXT:    vpunpckhqdq {{.*#+}} zmm3 = zmm30[1],zmm0[1],zmm30[3],zmm0[3],zmm30[5],zmm0[5],zmm30[7],zmm0[7]
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm0, %zmm1, %zmm30
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm16, %zmm0
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm8, %zmm15, %zmm0
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm16, %zmm0
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm8, %zmm14, %zmm0
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
-; AVX512DQBW-FAST-NEXT:    vpunpcklqdq {{.*#+}} zmm4 {%k1} = zmm16[0],zmm8[0],zmm16[2],zmm8[2],zmm16[4],zmm8[4],zmm16[6],zmm8[6]
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
-; AVX512DQBW-FAST-NEXT:    vpunpckhqdq {{.*#+}} zmm5 {%k1} = zmm16[1],zmm8[1],zmm16[3],zmm8[3],zmm16[5],zmm8[5],zmm16[7],zmm8[7]
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm16, %zmm12
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm8, %zmm6, %zmm12
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm8, %zmm1, %zmm16
-; AVX512DQBW-FAST-NEXT:    vpermi2q %zmm7, %zmm19, %zmm15
-; AVX512DQBW-FAST-NEXT:    vpermi2q %zmm7, %zmm19, %zmm14
-; AVX512DQBW-FAST-NEXT:    vpermi2q %zmm7, %zmm19, %zmm6
-; AVX512DQBW-FAST-NEXT:    vpunpcklqdq {{.*#+}} zmm8 = zmm19[0],zmm7[0],zmm19[2],zmm7[2],zmm19[4],zmm7[4],zmm19[6],zmm7[6]
-; AVX512DQBW-FAST-NEXT:    vpunpckhqdq {{.*#+}} zmm0 = zmm19[1],zmm7[1],zmm19[3],zmm7[3],zmm19[5],zmm7[5],zmm19[7],zmm7[7]
-; AVX512DQBW-FAST-NEXT:    vpermt2q %zmm7, %zmm1, %zmm19
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512DQBW-FAST-NEXT:    vpblendd $240, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm1 # 32-byte Folded Reload
-; AVX512DQBW-FAST-NEXT:    # ymm1 = ymm1[0,1,2,3],mem[4,5,6,7]
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm7 # 64-byte Reload
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm7, %zmm2 {%k1}
-; AVX512DQBW-FAST-NEXT:    vinserti64x4 $0, %ymm1, %zmm2, %zmm1
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512DQBW-FAST-NEXT:    vpblendd $240, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm1 # 32-byte Folded Reload
-; AVX512DQBW-FAST-NEXT:    # ymm1 = ymm1[0,1,2,3],mem[4,5,6,7]
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm2, %zmm3 {%k1}
-; AVX512DQBW-FAST-NEXT:    vinserti64x4 $0, %ymm1, %zmm3, %zmm1
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512DQBW-FAST-NEXT:    vpblendd $240, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm1 # 32-byte Folded Reload
-; AVX512DQBW-FAST-NEXT:    # ymm1 = ymm1[0,1,2,3],mem[4,5,6,7]
-; AVX512DQBW-FAST-NEXT:    vinserti64x4 $0, %ymm1, %zmm9, %zmm1
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512DQBW-FAST-NEXT:    vpblendd $240, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm1 # 32-byte Folded Reload
-; AVX512DQBW-FAST-NEXT:    # ymm1 = ymm1[0,1,2,3],mem[4,5,6,7]
-; AVX512DQBW-FAST-NEXT:    vinserti64x4 $0, %ymm1, %zmm27, %zmm1
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512DQBW-FAST-NEXT:    vpblendd $240, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm1 # 32-byte Folded Reload
-; AVX512DQBW-FAST-NEXT:    # ymm1 = ymm1[0,1,2,3],mem[4,5,6,7]
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm2, %zmm8 {%k1}
-; AVX512DQBW-FAST-NEXT:    vinserti64x4 $0, %ymm1, %zmm8, %zmm1
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512DQBW-FAST-NEXT:    vpblendd $240, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm1 # 32-byte Folded Reload
-; AVX512DQBW-FAST-NEXT:    # ymm1 = ymm1[0,1,2,3],mem[4,5,6,7]
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm2, %zmm0 {%k1}
-; AVX512DQBW-FAST-NEXT:    vinserti64x4 $0, %ymm1, %zmm0, %zmm0
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQBW-FAST-NEXT:    vpblendd $240, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
-; AVX512DQBW-FAST-NEXT:    # ymm0 = ymm0[0,1,2,3],mem[4,5,6,7]
-; AVX512DQBW-FAST-NEXT:    vinserti64x4 $0, %ymm0, %zmm4, %zmm0
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQBW-FAST-NEXT:    vpblendd $240, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
-; AVX512DQBW-FAST-NEXT:    # ymm0 = ymm0[0,1,2,3],mem[4,5,6,7]
-; AVX512DQBW-FAST-NEXT:    vinserti64x4 $0, %ymm0, %zmm5, %zmm0
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm0, %zmm25 {%k1}
-; AVX512DQBW-FAST-NEXT:    vmovdqa (%rcx), %ymm0
-; AVX512DQBW-FAST-NEXT:    vmovdqa (%rdx), %ymm1
-; AVX512DQBW-FAST-NEXT:    vpunpcklqdq {{.*#+}} ymm2 = ymm1[0],ymm0[0],ymm1[2],ymm0[2]
-; AVX512DQBW-FAST-NEXT:    vmovdqa (%rsi), %ymm3
-; AVX512DQBW-FAST-NEXT:    vmovdqa (%rdi), %ymm7
-; AVX512DQBW-FAST-NEXT:    vpunpcklqdq {{.*#+}} ymm8 = ymm7[0],ymm3[0],ymm7[2],ymm3[2]
-; AVX512DQBW-FAST-NEXT:    vperm2i128 {{.*#+}} ymm2 = ymm8[2,3],ymm2[2,3]
-; AVX512DQBW-FAST-NEXT:    vinserti64x4 $0, %ymm2, %zmm25, %zmm2
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm2, %zmm4 {%k1}
-; AVX512DQBW-FAST-NEXT:    vpunpckhqdq {{.*#+}} ymm0 = ymm1[1],ymm0[1],ymm1[3],ymm0[3]
-; AVX512DQBW-FAST-NEXT:    vpunpckhqdq {{.*#+}} ymm1 = ymm7[1],ymm3[1],ymm7[3],ymm3[3]
-; AVX512DQBW-FAST-NEXT:    vperm2i128 {{.*#+}} ymm0 = ymm1[2,3],ymm0[2,3]
-; AVX512DQBW-FAST-NEXT:    vinserti64x4 $0, %ymm0, %zmm4, %zmm0
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm0, %zmm18 {%k1}
-; AVX512DQBW-FAST-NEXT:    vmovdqa 64(%rcx), %ymm0
-; AVX512DQBW-FAST-NEXT:    vmovdqa 64(%rdx), %ymm1
-; AVX512DQBW-FAST-NEXT:    vmovdqa 64(%rsi), %ymm2
-; AVX512DQBW-FAST-NEXT:    vmovdqa 64(%rdi), %ymm3
-; AVX512DQBW-FAST-NEXT:    vpunpcklqdq {{.*#+}} ymm7 = ymm1[0],ymm0[0],ymm1[2],ymm0[2]
-; AVX512DQBW-FAST-NEXT:    vpunpcklqdq {{.*#+}} ymm8 = ymm3[0],ymm2[0],ymm3[2],ymm2[2]
-; AVX512DQBW-FAST-NEXT:    vperm2i128 {{.*#+}} ymm7 = ymm8[2,3],ymm7[2,3]
-; AVX512DQBW-FAST-NEXT:    vinserti64x4 $0, %ymm7, %zmm18, %zmm4
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 (%rsp), %zmm4 # 64-byte Reload
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm4, %zmm5 {%k1}
-; AVX512DQBW-FAST-NEXT:    vpunpckhqdq {{.*#+}} ymm0 = ymm1[1],ymm0[1],ymm1[3],ymm0[3]
-; AVX512DQBW-FAST-NEXT:    vpunpckhqdq {{.*#+}} ymm1 = ymm3[1],ymm2[1],ymm3[3],ymm2[3]
-; AVX512DQBW-FAST-NEXT:    vperm2i128 {{.*#+}} ymm0 = ymm1[2,3],ymm0[2,3]
-; AVX512DQBW-FAST-NEXT:    vinserti64x4 $0, %ymm0, %zmm5, %zmm0
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm0, %zmm13 {%k1}
-; AVX512DQBW-FAST-NEXT:    vmovdqa 128(%rcx), %ymm0
-; AVX512DQBW-FAST-NEXT:    vmovdqa 128(%rdx), %ymm1
-; AVX512DQBW-FAST-NEXT:    vpunpcklqdq {{.*#+}} ymm2 = ymm1[0],ymm0[0],ymm1[2],ymm0[2]
-; AVX512DQBW-FAST-NEXT:    vmovdqa 128(%rsi), %ymm3
-; AVX512DQBW-FAST-NEXT:    vmovdqa 128(%rdi), %ymm7
-; AVX512DQBW-FAST-NEXT:    vpunpcklqdq {{.*#+}} ymm8 = ymm7[0],ymm3[0],ymm7[2],ymm3[2]
-; AVX512DQBW-FAST-NEXT:    vperm2i128 {{.*#+}} ymm2 = ymm8[2,3],ymm2[2,3]
-; AVX512DQBW-FAST-NEXT:    vinserti64x4 $0, %ymm2, %zmm13, %zmm2
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm2, (%rsp) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm2, %zmm4 {%k1}
-; AVX512DQBW-FAST-NEXT:    vpunpckhqdq {{.*#+}} ymm0 = ymm1[1],ymm0[1],ymm1[3],ymm0[3]
-; AVX512DQBW-FAST-NEXT:    vpunpckhqdq {{.*#+}} ymm1 = ymm7[1],ymm3[1],ymm7[3],ymm3[3]
-; AVX512DQBW-FAST-NEXT:    vperm2i128 {{.*#+}} ymm0 = ymm1[2,3],ymm0[2,3]
-; AVX512DQBW-FAST-NEXT:    vinserti64x4 $0, %ymm0, %zmm4, %zmm0
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm0, %zmm10 {%k1}
-; AVX512DQBW-FAST-NEXT:    vmovdqa 192(%rcx), %ymm0
-; AVX512DQBW-FAST-NEXT:    vmovdqa 192(%rdx), %ymm1
-; AVX512DQBW-FAST-NEXT:    vpunpcklqdq {{.*#+}} ymm2 = ymm1[0],ymm0[0],ymm1[2],ymm0[2]
-; AVX512DQBW-FAST-NEXT:    vmovdqa 192(%rsi), %ymm3
-; AVX512DQBW-FAST-NEXT:    vmovdqa 192(%rdi), %ymm7
-; AVX512DQBW-FAST-NEXT:    vpunpcklqdq {{.*#+}} ymm8 = ymm7[0],ymm3[0],ymm7[2],ymm3[2]
-; AVX512DQBW-FAST-NEXT:    vperm2i128 {{.*#+}} ymm2 = ymm8[2,3],ymm2[2,3]
-; AVX512DQBW-FAST-NEXT:    vinserti64x4 $0, %ymm2, %zmm10, %zmm2
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm2, %zmm4 {%k1}
-; AVX512DQBW-FAST-NEXT:    vpunpckhqdq {{.*#+}} ymm0 = ymm1[1],ymm0[1],ymm1[3],ymm0[3]
-; AVX512DQBW-FAST-NEXT:    vpunpckhqdq {{.*#+}} ymm1 = ymm7[1],ymm3[1],ymm7[3],ymm3[3]
-; AVX512DQBW-FAST-NEXT:    vperm2i128 {{.*#+}} ymm0 = ymm1[2,3],ymm0[2,3]
-; AVX512DQBW-FAST-NEXT:    vinserti64x4 $0, %ymm0, %zmm4, %zmm9
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm0, %zmm26 {%k1}
-; AVX512DQBW-FAST-NEXT:    vmovdqa 256(%rcx), %ymm0
-; AVX512DQBW-FAST-NEXT:    vmovdqa 256(%rdx), %ymm1
-; AVX512DQBW-FAST-NEXT:    vpunpcklqdq {{.*#+}} ymm2 = ymm1[0],ymm0[0],ymm1[2],ymm0[2]
-; AVX512DQBW-FAST-NEXT:    vmovdqa 256(%rsi), %ymm3
-; AVX512DQBW-FAST-NEXT:    vmovdqa 256(%rdi), %ymm7
-; AVX512DQBW-FAST-NEXT:    vpunpcklqdq {{.*#+}} ymm8 = ymm7[0],ymm3[0],ymm7[2],ymm3[2]
-; AVX512DQBW-FAST-NEXT:    vperm2i128 {{.*#+}} ymm2 = ymm8[2,3],ymm2[2,3]
-; AVX512DQBW-FAST-NEXT:    vinserti64x4 $0, %ymm2, %zmm26, %zmm26
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm23, %zmm2 {%k1}
-; AVX512DQBW-FAST-NEXT:    vpunpckhqdq {{.*#+}} ymm0 = ymm1[1],ymm0[1],ymm1[3],ymm0[3]
-; AVX512DQBW-FAST-NEXT:    vpunpckhqdq {{.*#+}} ymm1 = ymm7[1],ymm3[1],ymm7[3],ymm3[3]
-; AVX512DQBW-FAST-NEXT:    vperm2i128 {{.*#+}} ymm0 = ymm1[2,3],ymm0[2,3]
-; AVX512DQBW-FAST-NEXT:    vinserti64x4 $0, %ymm0, %zmm2, %zmm7
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm0, %zmm24 {%k1}
-; AVX512DQBW-FAST-NEXT:    vmovdqa 320(%rcx), %ymm0
-; AVX512DQBW-FAST-NEXT:    vmovdqa 320(%rdx), %ymm1
-; AVX512DQBW-FAST-NEXT:    vpunpcklqdq {{.*#+}} ymm2 = ymm1[0],ymm0[0],ymm1[2],ymm0[2]
-; AVX512DQBW-FAST-NEXT:    vmovdqa 320(%rsi), %ymm3
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 320(%rdi), %ymm23
-; AVX512DQBW-FAST-NEXT:    vpunpcklqdq {{.*#+}} ymm8 = ymm23[0],ymm3[0],ymm23[2],ymm3[2]
-; AVX512DQBW-FAST-NEXT:    vperm2i128 {{.*#+}} ymm2 = ymm8[2,3],ymm2[2,3]
-; AVX512DQBW-FAST-NEXT:    vinserti64x4 $0, %ymm2, %zmm24, %zmm8
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm31, %zmm17 {%k1}
-; AVX512DQBW-FAST-NEXT:    vpunpckhqdq {{.*#+}} ymm0 = ymm1[1],ymm0[1],ymm1[3],ymm0[3]
-; AVX512DQBW-FAST-NEXT:    vpunpckhqdq {{.*#+}} ymm1 = ymm23[1],ymm3[1],ymm23[3],ymm3[3]
-; AVX512DQBW-FAST-NEXT:    vperm2i128 {{.*#+}} ymm0 = ymm1[2,3],ymm0[2,3]
-; AVX512DQBW-FAST-NEXT:    vinserti64x4 $0, %ymm0, %zmm17, %zmm17
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm22, %zmm20 {%k1}
-; AVX512DQBW-FAST-NEXT:    vmovdqa 384(%rcx), %ymm0
-; AVX512DQBW-FAST-NEXT:    vmovdqa 384(%rdx), %ymm1
-; AVX512DQBW-FAST-NEXT:    vpunpcklqdq {{.*#+}} ymm2 = ymm1[0],ymm0[0],ymm1[2],ymm0[2]
-; AVX512DQBW-FAST-NEXT:    vmovdqa 384(%rsi), %ymm3
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 384(%rdi), %ymm18
-; AVX512DQBW-FAST-NEXT:    vpunpcklqdq {{.*#+}} ymm13 = ymm18[0],ymm3[0],ymm18[2],ymm3[2]
-; AVX512DQBW-FAST-NEXT:    vperm2i128 {{.*#+}} ymm2 = ymm13[2,3],ymm2[2,3]
-; AVX512DQBW-FAST-NEXT:    vinserti64x4 $0, %ymm2, %zmm20, %zmm20
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm11, %zmm30 {%k1}
-; AVX512DQBW-FAST-NEXT:    vpunpckhqdq {{.*#+}} ymm0 = ymm1[1],ymm0[1],ymm1[3],ymm0[3]
-; AVX512DQBW-FAST-NEXT:    vpunpckhqdq {{.*#+}} ymm1 = ymm18[1],ymm3[1],ymm18[3],ymm3[3]
-; AVX512DQBW-FAST-NEXT:    vperm2i128 {{.*#+}} ymm0 = ymm1[2,3],ymm0[2,3]
-; AVX512DQBW-FAST-NEXT:    vinserti64x4 $0, %ymm0, %zmm30, %zmm24
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm12, %zmm6 {%k1}
-; AVX512DQBW-FAST-NEXT:    vmovdqa 448(%rcx), %ymm0
-; AVX512DQBW-FAST-NEXT:    vmovdqa 448(%rdx), %ymm1
-; AVX512DQBW-FAST-NEXT:    vpunpcklqdq {{.*#+}} ymm2 = ymm1[0],ymm0[0],ymm1[2],ymm0[2]
-; AVX512DQBW-FAST-NEXT:    vmovdqa 448(%rsi), %ymm3
-; AVX512DQBW-FAST-NEXT:    vmovdqa 448(%rdi), %ymm10
-; AVX512DQBW-FAST-NEXT:    vpunpcklqdq {{.*#+}} ymm12 = ymm10[0],ymm3[0],ymm10[2],ymm3[2]
-; AVX512DQBW-FAST-NEXT:    vperm2i128 {{.*#+}} ymm2 = ymm12[2,3],ymm2[2,3]
-; AVX512DQBW-FAST-NEXT:    vinserti64x4 $0, %ymm2, %zmm6, %zmm31
-; AVX512DQBW-FAST-NEXT:    vpunpckhqdq {{.*#+}} ymm0 = ymm1[1],ymm0[1],ymm1[3],ymm0[3]
-; AVX512DQBW-FAST-NEXT:    vpunpckhqdq {{.*#+}} ymm1 = ymm10[1],ymm3[1],ymm10[3],ymm3[3]
-; AVX512DQBW-FAST-NEXT:    vperm2i128 {{.*#+}} ymm0 = ymm1[2,3],ymm0[2,3]
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm16, %zmm19 {%k1}
-; AVX512DQBW-FAST-NEXT:    vinserti64x4 $0, %ymm0, %zmm19, %zmm0
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm1, %zmm4 {%k1}
-; AVX512DQBW-FAST-NEXT:    vmovdqa (%rsi), %xmm1
-; AVX512DQBW-FAST-NEXT:    vinserti128 $1, (%rcx), %ymm1, %ymm1
-; AVX512DQBW-FAST-NEXT:    vmovdqa (%rdi), %xmm2
-; AVX512DQBW-FAST-NEXT:    vinserti128 $1, (%rdx), %ymm2, %ymm2
-; AVX512DQBW-FAST-NEXT:    vpunpcklqdq {{.*#+}} ymm3 = ymm2[0],ymm1[0],ymm2[2],ymm1[2]
-; AVX512DQBW-FAST-NEXT:    vinserti64x4 $0, %ymm3, %zmm4, %zmm3
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm10 # 64-byte Reload
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm4, %zmm10 {%k1}
-; AVX512DQBW-FAST-NEXT:    vpunpckhqdq {{.*#+}} ymm1 = ymm2[1],ymm1[1],ymm2[3],ymm1[3]
-; AVX512DQBW-FAST-NEXT:    vinserti64x4 $0, %ymm1, %zmm10, %zmm2
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm11 # 64-byte Reload
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm1, %zmm11 {%k1}
-; AVX512DQBW-FAST-NEXT:    vmovdqa 64(%rsi), %xmm1
-; AVX512DQBW-FAST-NEXT:    vinserti128 $1, 64(%rcx), %ymm1, %ymm1
-; AVX512DQBW-FAST-NEXT:    vmovdqa 64(%rdi), %xmm4
-; AVX512DQBW-FAST-NEXT:    vinserti128 $1, 64(%rdx), %ymm4, %ymm4
-; AVX512DQBW-FAST-NEXT:    vpunpcklqdq {{.*#+}} ymm10 = ymm4[0],ymm1[0],ymm4[2],ymm1[2]
-; AVX512DQBW-FAST-NEXT:    vinserti64x4 $0, %ymm10, %zmm11, %zmm10
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm11 # 64-byte Reload
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm12 # 64-byte Reload
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm11, %zmm12 {%k1}
-; AVX512DQBW-FAST-NEXT:    vpunpckhqdq {{.*#+}} ymm1 = ymm4[1],ymm1[1],ymm4[3],ymm1[3]
-; AVX512DQBW-FAST-NEXT:    vinserti64x4 $0, %ymm1, %zmm12, %zmm11
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm13 # 64-byte Reload
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm1, %zmm13 {%k1}
-; AVX512DQBW-FAST-NEXT:    vmovdqa 128(%rsi), %xmm1
-; AVX512DQBW-FAST-NEXT:    vinserti128 $1, 128(%rcx), %ymm1, %ymm1
-; AVX512DQBW-FAST-NEXT:    vmovdqa 128(%rdi), %xmm4
-; AVX512DQBW-FAST-NEXT:    vinserti128 $1, 128(%rdx), %ymm4, %ymm12
-; AVX512DQBW-FAST-NEXT:    vpunpcklqdq {{.*#+}} ymm4 = ymm12[0],ymm1[0],ymm12[2],ymm1[2]
-; AVX512DQBW-FAST-NEXT:    vinserti64x4 $0, %ymm4, %zmm13, %zmm4
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm13 # 64-byte Reload
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm18 # 64-byte Reload
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm13, %zmm18 {%k1}
-; AVX512DQBW-FAST-NEXT:    vpunpckhqdq {{.*#+}} ymm1 = ymm12[1],ymm1[1],ymm12[3],ymm1[3]
-; AVX512DQBW-FAST-NEXT:    vinserti64x4 $0, %ymm1, %zmm18, %zmm19
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm18 # 64-byte Reload
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm1, %zmm18 {%k1}
-; AVX512DQBW-FAST-NEXT:    vmovdqa 192(%rsi), %xmm1
-; AVX512DQBW-FAST-NEXT:    vinserti128 $1, 192(%rcx), %ymm1, %ymm1
-; AVX512DQBW-FAST-NEXT:    vmovdqa 192(%rdi), %xmm12
-; AVX512DQBW-FAST-NEXT:    vinserti128 $1, 192(%rdx), %ymm12, %ymm12
-; AVX512DQBW-FAST-NEXT:    vpunpcklqdq {{.*#+}} ymm13 = ymm12[0],ymm1[0],ymm12[2],ymm1[2]
-; AVX512DQBW-FAST-NEXT:    vinserti64x4 $0, %ymm13, %zmm18, %zmm30
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm13 # 64-byte Reload
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm18 # 64-byte Reload
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm13, %zmm18 {%k1}
-; AVX512DQBW-FAST-NEXT:    vpunpckhqdq {{.*#+}} ymm1 = ymm12[1],ymm1[1],ymm12[3],ymm1[3]
-; AVX512DQBW-FAST-NEXT:    vinserti64x4 $0, %ymm1, %zmm18, %zmm1
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm12 # 64-byte Reload
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm12, %zmm5 {%k1}
-; AVX512DQBW-FAST-NEXT:    vmovdqa 256(%rsi), %xmm12
-; AVX512DQBW-FAST-NEXT:    vinserti128 $1, 256(%rcx), %ymm12, %ymm13
-; AVX512DQBW-FAST-NEXT:    vmovdqa 256(%rdi), %xmm12
-; AVX512DQBW-FAST-NEXT:    vinserti32x4 $1, 256(%rdx), %ymm12, %ymm18
-; AVX512DQBW-FAST-NEXT:    vpunpcklqdq {{.*#+}} ymm12 = ymm18[0],ymm13[0],ymm18[2],ymm13[2]
-; AVX512DQBW-FAST-NEXT:    vinserti64x4 $0, %ymm12, %zmm5, %zmm12
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm23 # 64-byte Reload
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm23, %zmm5 {%k1}
-; AVX512DQBW-FAST-NEXT:    vpunpckhqdq {{.*#+}} ymm13 = ymm18[1],ymm13[1],ymm18[3],ymm13[3]
-; AVX512DQBW-FAST-NEXT:    vinserti64x4 $0, %ymm13, %zmm5, %zmm23
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm5, %zmm29 {%k1}
-; AVX512DQBW-FAST-NEXT:    vmovdqa 320(%rsi), %xmm13
-; AVX512DQBW-FAST-NEXT:    vinserti128 $1, 320(%rcx), %ymm13, %ymm13
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 320(%rdi), %xmm18
-; AVX512DQBW-FAST-NEXT:    vinserti32x4 $1, 320(%rdx), %ymm18, %ymm18
-; AVX512DQBW-FAST-NEXT:    vpunpcklqdq {{.*#+}} ymm27 = ymm18[0],ymm13[0],ymm18[2],ymm13[2]
-; AVX512DQBW-FAST-NEXT:    vinserti64x4 $0, %ymm27, %zmm29, %zmm22
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm6 # 64-byte Reload
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm5, %zmm6 {%k1}
-; AVX512DQBW-FAST-NEXT:    vpunpckhqdq {{.*#+}} ymm13 = ymm18[1],ymm13[1],ymm18[3],ymm13[3]
-; AVX512DQBW-FAST-NEXT:    vinserti64x4 $0, %ymm13, %zmm6, %zmm13
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm5, %zmm21 {%k1}
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 384(%rsi), %xmm18
-; AVX512DQBW-FAST-NEXT:    vinserti32x4 $1, 384(%rcx), %ymm18, %ymm18
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 384(%rdi), %xmm25
-; AVX512DQBW-FAST-NEXT:    vinserti32x4 $1, 384(%rdx), %ymm25, %ymm25
-; AVX512DQBW-FAST-NEXT:    vpunpcklqdq {{.*#+}} ymm27 = ymm25[0],ymm18[0],ymm25[2],ymm18[2]
-; AVX512DQBW-FAST-NEXT:    vinserti64x4 $0, %ymm27, %zmm21, %zmm16
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm5, %zmm28 {%k1}
-; AVX512DQBW-FAST-NEXT:    vpunpckhqdq {{.*#+}} ymm18 = ymm25[1],ymm18[1],ymm25[3],ymm18[3]
-; AVX512DQBW-FAST-NEXT:    vinserti64x4 $0, %ymm18, %zmm28, %zmm21
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm5, %zmm15 {%k1}
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 448(%rsi), %xmm18
-; AVX512DQBW-FAST-NEXT:    vinserti32x4 $1, 448(%rcx), %ymm18, %ymm18
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 448(%rdi), %xmm25
-; AVX512DQBW-FAST-NEXT:    vinserti32x4 $1, 448(%rdx), %ymm25, %ymm25
-; AVX512DQBW-FAST-NEXT:    vpunpcklqdq {{.*#+}} ymm27 = ymm25[0],ymm18[0],ymm25[2],ymm18[2]
-; AVX512DQBW-FAST-NEXT:    vinserti64x4 $0, %ymm27, %zmm15, %zmm6
-; AVX512DQBW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm5, %zmm14 {%k1}
-; AVX512DQBW-FAST-NEXT:    vpunpckhqdq {{.*#+}} ymm18 = ymm25[1],ymm18[1],ymm25[3],ymm18[3]
-; AVX512DQBW-FAST-NEXT:    vinserti64x4 $0, %ymm18, %zmm14, %zmm5
-; AVX512DQBW-FAST-NEXT:    movq {{[0-9]+}}(%rsp), %rax
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm0, 3776(%rax)
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm31, 3712(%rax)
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm24, 3264(%rax)
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm20, 3200(%rax)
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm17, 2752(%rax)
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm8, 2688(%rax)
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm7, 2240(%rax)
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm26, 2176(%rax)
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm9, 1728(%rax)
-; AVX512DQBW-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQBW-FAST-NEXT:    vmovaps %zmm0, 1664(%rax)
-; AVX512DQBW-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQBW-FAST-NEXT:    vmovaps %zmm0, 1216(%rax)
-; AVX512DQBW-FAST-NEXT:    vmovups (%rsp), %zmm0 # 64-byte Reload
-; AVX512DQBW-FAST-NEXT:    vmovaps %zmm0, 1152(%rax)
-; AVX512DQBW-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQBW-FAST-NEXT:    vmovaps %zmm0, 704(%rax)
-; AVX512DQBW-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQBW-FAST-NEXT:    vmovaps %zmm0, 640(%rax)
-; AVX512DQBW-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQBW-FAST-NEXT:    vmovaps %zmm0, 192(%rax)
-; AVX512DQBW-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQBW-FAST-NEXT:    vmovaps %zmm0, 128(%rax)
-; AVX512DQBW-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQBW-FAST-NEXT:    vmovaps %zmm0, 4032(%rax)
-; AVX512DQBW-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQBW-FAST-NEXT:    vmovaps %zmm0, 3968(%rax)
-; AVX512DQBW-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQBW-FAST-NEXT:    vmovaps %zmm0, 3904(%rax)
-; AVX512DQBW-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQBW-FAST-NEXT:    vmovaps %zmm0, 3840(%rax)
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm5, 3648(%rax)
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm6, 3584(%rax)
-; AVX512DQBW-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQBW-FAST-NEXT:    vmovaps %zmm0, 3520(%rax)
-; AVX512DQBW-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQBW-FAST-NEXT:    vmovaps %zmm0, 3456(%rax)
-; AVX512DQBW-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQBW-FAST-NEXT:    vmovaps %zmm0, 3392(%rax)
-; AVX512DQBW-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQBW-FAST-NEXT:    vmovaps %zmm0, 3328(%rax)
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm21, 3136(%rax)
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm16, 3072(%rax)
-; AVX512DQBW-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQBW-FAST-NEXT:    vmovaps %zmm0, 3008(%rax)
-; AVX512DQBW-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQBW-FAST-NEXT:    vmovaps %zmm0, 2944(%rax)
-; AVX512DQBW-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQBW-FAST-NEXT:    vmovaps %zmm0, 2880(%rax)
-; AVX512DQBW-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQBW-FAST-NEXT:    vmovaps %zmm0, 2816(%rax)
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm13, 2624(%rax)
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm22, 2560(%rax)
-; AVX512DQBW-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQBW-FAST-NEXT:    vmovaps %zmm0, 2496(%rax)
-; AVX512DQBW-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQBW-FAST-NEXT:    vmovaps %zmm0, 2432(%rax)
-; AVX512DQBW-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQBW-FAST-NEXT:    vmovaps %zmm0, 2368(%rax)
-; AVX512DQBW-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQBW-FAST-NEXT:    vmovaps %zmm0, 2304(%rax)
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm23, 2112(%rax)
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm12, 2048(%rax)
-; AVX512DQBW-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQBW-FAST-NEXT:    vmovaps %zmm0, 1984(%rax)
-; AVX512DQBW-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQBW-FAST-NEXT:    vmovaps %zmm0, 1920(%rax)
-; AVX512DQBW-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQBW-FAST-NEXT:    vmovaps %zmm0, 1856(%rax)
-; AVX512DQBW-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQBW-FAST-NEXT:    vmovaps %zmm0, 1792(%rax)
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm1, 1600(%rax)
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm30, 1536(%rax)
-; AVX512DQBW-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQBW-FAST-NEXT:    vmovaps %zmm0, 1472(%rax)
-; AVX512DQBW-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQBW-FAST-NEXT:    vmovaps %zmm0, 1408(%rax)
-; AVX512DQBW-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQBW-FAST-NEXT:    vmovaps %zmm0, 1344(%rax)
-; AVX512DQBW-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQBW-FAST-NEXT:    vmovaps %zmm0, 1280(%rax)
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm19, 1088(%rax)
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm4, 1024(%rax)
-; AVX512DQBW-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQBW-FAST-NEXT:    vmovaps %zmm0, 960(%rax)
-; AVX512DQBW-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQBW-FAST-NEXT:    vmovaps %zmm0, 896(%rax)
-; AVX512DQBW-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQBW-FAST-NEXT:    vmovaps %zmm0, 832(%rax)
-; AVX512DQBW-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQBW-FAST-NEXT:    vmovaps %zmm0, 768(%rax)
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm11, 576(%rax)
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm10, 512(%rax)
-; AVX512DQBW-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQBW-FAST-NEXT:    vmovaps %zmm0, 448(%rax)
-; AVX512DQBW-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQBW-FAST-NEXT:    vmovaps %zmm0, 384(%rax)
-; AVX512DQBW-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQBW-FAST-NEXT:    vmovaps %zmm0, 320(%rax)
-; AVX512DQBW-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQBW-FAST-NEXT:    vmovaps %zmm0, 256(%rax)
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm2, 64(%rax)
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm3, (%rax)
-; AVX512DQBW-FAST-NEXT:    addq $5512, %rsp # imm = 0x1588
-; AVX512DQBW-FAST-NEXT:    vzeroupper
-; AVX512DQBW-FAST-NEXT:    retq
+; AVX512DQBW-ONLY-LABEL: store_i64_stride8_vf64:
+; AVX512DQBW-ONLY:       # %bb.0:
+; AVX512DQBW-ONLY-NEXT:    subq $5512, %rsp # imm = 0x1588
+; AVX512DQBW-ONLY-NEXT:    movq {{[0-9]+}}(%rsp), %rax
+; AVX512DQBW-ONLY-NEXT:    movq {{[0-9]+}}(%rsp), %r10
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 128(%rdi), %zmm2
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 64(%rdi), %zmm4
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 (%rdi), %zmm8
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 128(%rsi), %zmm17
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 64(%rsi), %zmm19
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 (%rsi), %zmm22
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 64(%rdx), %zmm5
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 (%rdx), %zmm10
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 64(%rcx), %zmm20
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 (%rcx), %zmm11
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 (%r8), %zmm1
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 64(%r8), %zmm25
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 128(%r8), %zmm23
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 (%r9), %zmm28
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 64(%r9), %zmm26
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 128(%r9), %zmm24
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 (%r10), %zmm21
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 64(%r10), %zmm14
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 (%rax), %zmm27
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 64(%rax), %zmm16
+; AVX512DQBW-ONLY-NEXT:    movb $-64, %r11b
+; AVX512DQBW-ONLY-NEXT:    kmovd %r11d, %k1
+; AVX512DQBW-ONLY-NEXT:    vbroadcasti32x4 {{.*#+}} zmm3 = [4,12,4,12,4,12,4,12]
+; AVX512DQBW-ONLY-NEXT:    # zmm3 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm21, %zmm0
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm27, %zmm3, %zmm0
+; AVX512DQBW-ONLY-NEXT:    vpunpcklqdq {{.*#+}} zmm12 = zmm1[0],zmm28[0],zmm1[2],zmm28[2],zmm1[4],zmm28[4],zmm1[6],zmm28[6]
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm0, %zmm12 {%k1}
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm8, %zmm0
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm22, %zmm3, %zmm0
+; AVX512DQBW-ONLY-NEXT:    vbroadcasti128 {{.*#+}} ymm6 = [4,12,4,12]
+; AVX512DQBW-ONLY-NEXT:    # ymm6 = mem[0,1,0,1]
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm10, %zmm15
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm11, %zmm6, %zmm15
+; AVX512DQBW-ONLY-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm15[4,5,6,7]
+; AVX512DQBW-ONLY-NEXT:    vinserti64x4 $0, %ymm0, %zmm12, %zmm0
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-ONLY-NEXT:    vbroadcasti32x4 {{.*#+}} zmm9 = [5,13,5,13,5,13,5,13]
+; AVX512DQBW-ONLY-NEXT:    # zmm9 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm21, %zmm0
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm27, %zmm9, %zmm0
+; AVX512DQBW-ONLY-NEXT:    vpunpckhqdq {{.*#+}} zmm12 = zmm1[1],zmm28[1],zmm1[3],zmm28[3],zmm1[5],zmm28[5],zmm1[7],zmm28[7]
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm0, %zmm12 {%k1}
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm8, %zmm0
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm22, %zmm9, %zmm0
+; AVX512DQBW-ONLY-NEXT:    vbroadcasti128 {{.*#+}} ymm7 = [5,13,5,13]
+; AVX512DQBW-ONLY-NEXT:    # ymm7 = mem[0,1,0,1]
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm10, %zmm15
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm11, %zmm7, %zmm15
+; AVX512DQBW-ONLY-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm15[4,5,6,7]
+; AVX512DQBW-ONLY-NEXT:    vinserti64x4 $0, %ymm0, %zmm12, %zmm0
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-ONLY-NEXT:    vbroadcasti32x4 {{.*#+}} zmm13 = [6,14,6,14,6,14,6,14]
+; AVX512DQBW-ONLY-NEXT:    # zmm13 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm1, %zmm0
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm1, %zmm29
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm28, %zmm13, %zmm0
+; AVX512DQBW-ONLY-NEXT:    vpunpcklqdq {{.*#+}} zmm0 {%k1} = zmm21[0],zmm27[0],zmm21[2],zmm27[2],zmm21[4],zmm27[4],zmm21[6],zmm27[6]
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm8, %zmm12
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm22, %zmm13, %zmm12
+; AVX512DQBW-ONLY-NEXT:    vbroadcasti128 {{.*#+}} ymm1 = [6,14,6,14]
+; AVX512DQBW-ONLY-NEXT:    # ymm1 = mem[0,1,0,1]
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm10, %zmm15
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm11, %zmm1, %zmm15
+; AVX512DQBW-ONLY-NEXT:    vpblendd {{.*#+}} ymm12 = ymm12[0,1,2,3],ymm15[4,5,6,7]
+; AVX512DQBW-ONLY-NEXT:    vinserti64x4 $0, %ymm12, %zmm0, %zmm0
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-ONLY-NEXT:    vbroadcasti32x4 {{.*#+}} zmm18 = [7,15,7,15,7,15,7,15]
+; AVX512DQBW-ONLY-NEXT:    # zmm18 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm29, %zmm12
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm28, %zmm18, %zmm12
+; AVX512DQBW-ONLY-NEXT:    vpunpckhqdq {{.*#+}} zmm12 {%k1} = zmm21[1],zmm27[1],zmm21[3],zmm27[3],zmm21[5],zmm27[5],zmm21[7],zmm27[7]
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm22, %zmm18, %zmm8
+; AVX512DQBW-ONLY-NEXT:    vbroadcasti64x2 {{.*#+}} ymm30 = [7,15,7,15]
+; AVX512DQBW-ONLY-NEXT:    # ymm30 = mem[0,1,0,1]
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm11, %zmm30, %zmm10
+; AVX512DQBW-ONLY-NEXT:    vpblendd {{.*#+}} ymm8 = ymm8[0,1,2,3],ymm10[4,5,6,7]
+; AVX512DQBW-ONLY-NEXT:    vinserti64x4 $0, %ymm8, %zmm12, %zmm0
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm14, %zmm8
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm16, %zmm3, %zmm8
+; AVX512DQBW-ONLY-NEXT:    vpunpcklqdq {{.*#+}} zmm10 = zmm25[0],zmm26[0],zmm25[2],zmm26[2],zmm25[4],zmm26[4],zmm25[6],zmm26[6]
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm8, %zmm10 {%k1}
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm4, %zmm8
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm19, %zmm3, %zmm8
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm5, %zmm11
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm6, %zmm15
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm20, %zmm6, %zmm11
+; AVX512DQBW-ONLY-NEXT:    vpblendd {{.*#+}} ymm8 = ymm8[0,1,2,3],ymm11[4,5,6,7]
+; AVX512DQBW-ONLY-NEXT:    vinserti64x4 $0, %ymm8, %zmm10, %zmm0
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm14, %zmm8
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm16, %zmm9, %zmm8
+; AVX512DQBW-ONLY-NEXT:    vpunpckhqdq {{.*#+}} zmm10 = zmm25[1],zmm26[1],zmm25[3],zmm26[3],zmm25[5],zmm26[5],zmm25[7],zmm26[7]
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm8, %zmm10 {%k1}
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm4, %zmm8
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm19, %zmm9, %zmm8
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm5, %zmm11
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm7, %zmm12
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm20, %zmm7, %zmm11
+; AVX512DQBW-ONLY-NEXT:    vpblendd {{.*#+}} ymm8 = ymm8[0,1,2,3],ymm11[4,5,6,7]
+; AVX512DQBW-ONLY-NEXT:    vinserti64x4 $0, %ymm8, %zmm10, %zmm0
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm4, %zmm8
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm19, %zmm13, %zmm8
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm5, %zmm10
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm20, %zmm1, %zmm10
+; AVX512DQBW-ONLY-NEXT:    vpblendd {{.*#+}} ymm8 = ymm8[0,1,2,3],ymm10[4,5,6,7]
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 %zmm25, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm25, %zmm10
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 %zmm26, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm26, %zmm13, %zmm10
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm14, %zmm11
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 %zmm14, (%rsp) # 64-byte Spill
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 %zmm16, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-ONLY-NEXT:    vpunpcklqdq {{.*#+}} zmm10 {%k1} = zmm14[0],zmm16[0],zmm14[2],zmm16[2],zmm14[4],zmm16[4],zmm14[6],zmm16[6]
+; AVX512DQBW-ONLY-NEXT:    vinserti64x4 $0, %ymm8, %zmm10, %zmm0
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 128(%r10), %zmm10
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm19, %zmm18, %zmm4
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 128(%rax), %zmm14
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm20, %zmm30, %zmm5
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm25, %zmm6
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm26, %zmm18, %zmm6
+; AVX512DQBW-ONLY-NEXT:    vpunpckhqdq {{.*#+}} zmm6 {%k1} = zmm11[1],zmm16[1],zmm11[3],zmm16[3],zmm11[5],zmm16[5],zmm11[7],zmm16[7]
+; AVX512DQBW-ONLY-NEXT:    vpblendd {{.*#+}} ymm4 = ymm4[0,1,2,3],ymm5[4,5,6,7]
+; AVX512DQBW-ONLY-NEXT:    vinserti64x4 $0, %ymm4, %zmm6, %zmm0
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm10, %zmm4
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm14, %zmm3, %zmm4
+; AVX512DQBW-ONLY-NEXT:    vpunpcklqdq {{.*#+}} zmm5 = zmm23[0],zmm24[0],zmm23[2],zmm24[2],zmm23[4],zmm24[4],zmm23[6],zmm24[6]
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm4, %zmm5 {%k1}
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm2, %zmm6
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm17, %zmm3, %zmm6
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm3, %zmm11
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 128(%rdx), %zmm4
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 128(%rcx), %zmm7
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm4, %zmm8
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm7, %zmm15, %zmm8
+; AVX512DQBW-ONLY-NEXT:    vpblendd {{.*#+}} ymm6 = ymm6[0,1,2,3],ymm8[4,5,6,7]
+; AVX512DQBW-ONLY-NEXT:    vinserti64x4 $0, %ymm6, %zmm5, %zmm0
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm10, %zmm5
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm14, %zmm9, %zmm5
+; AVX512DQBW-ONLY-NEXT:    vpunpckhqdq {{.*#+}} zmm6 = zmm23[1],zmm24[1],zmm23[3],zmm24[3],zmm23[5],zmm24[5],zmm23[7],zmm24[7]
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm5, %zmm6 {%k1}
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm2, %zmm5
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm17, %zmm9, %zmm5
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm4, %zmm8
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm7, %zmm12, %zmm8
+; AVX512DQBW-ONLY-NEXT:    vpblendd {{.*#+}} ymm5 = ymm5[0,1,2,3],ymm8[4,5,6,7]
+; AVX512DQBW-ONLY-NEXT:    vinserti64x4 $0, %ymm5, %zmm6, %zmm0
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm2, %zmm5
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm17, %zmm13, %zmm5
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm4, %zmm6
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm7, %zmm1, %zmm6
+; AVX512DQBW-ONLY-NEXT:    vpblendd {{.*#+}} ymm5 = ymm5[0,1,2,3],ymm6[4,5,6,7]
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm23, %zmm3
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 %zmm23, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm23, %zmm6
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 %zmm24, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm24, %zmm13, %zmm6
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 %zmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 %zmm14, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-ONLY-NEXT:    vpunpcklqdq {{.*#+}} zmm6 {%k1} = zmm10[0],zmm14[0],zmm10[2],zmm14[2],zmm10[4],zmm14[4],zmm10[6],zmm14[6]
+; AVX512DQBW-ONLY-NEXT:    vinserti64x4 $0, %ymm5, %zmm6, %zmm0
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm17, %zmm18, %zmm2
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm7, %zmm30, %zmm4
+; AVX512DQBW-ONLY-NEXT:    vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm4[4,5,6,7]
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm24, %zmm18, %zmm3
+; AVX512DQBW-ONLY-NEXT:    vpunpckhqdq {{.*#+}} zmm3 {%k1} = zmm10[1],zmm14[1],zmm10[3],zmm14[3],zmm10[5],zmm14[5],zmm10[7],zmm14[7]
+; AVX512DQBW-ONLY-NEXT:    vinserti64x4 $0, %ymm2, %zmm3, %zmm0
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 192(%r10), %zmm10
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 192(%rax), %zmm26
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm10, %zmm2
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm26, %zmm11, %zmm2
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 192(%r8), %zmm23
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 192(%r9), %zmm29
+; AVX512DQBW-ONLY-NEXT:    vpunpcklqdq {{.*#+}} zmm4 = zmm23[0],zmm29[0],zmm23[2],zmm29[2],zmm23[4],zmm29[4],zmm23[6],zmm29[6]
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm2, %zmm4 {%k1}
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 192(%rdi), %zmm2
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 192(%rsi), %zmm5
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm2, %zmm6
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm5, %zmm11, %zmm6
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 192(%rdx), %zmm3
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 192(%rcx), %zmm7
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm3, %zmm8
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm7, %zmm15, %zmm8
+; AVX512DQBW-ONLY-NEXT:    vpblendd {{.*#+}} ymm6 = ymm6[0,1,2,3],ymm8[4,5,6,7]
+; AVX512DQBW-ONLY-NEXT:    vinserti64x4 $0, %ymm6, %zmm4, %zmm0
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm10, %zmm4
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm26, %zmm9, %zmm4
+; AVX512DQBW-ONLY-NEXT:    vpunpckhqdq {{.*#+}} zmm6 = zmm23[1],zmm29[1],zmm23[3],zmm29[3],zmm23[5],zmm29[5],zmm23[7],zmm29[7]
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm4, %zmm6 {%k1}
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm2, %zmm4
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm5, %zmm9, %zmm4
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm3, %zmm8
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm7, %zmm12, %zmm8
+; AVX512DQBW-ONLY-NEXT:    vpblendd {{.*#+}} ymm4 = ymm4[0,1,2,3],ymm8[4,5,6,7]
+; AVX512DQBW-ONLY-NEXT:    vinserti64x4 $0, %ymm4, %zmm6, %zmm0
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm2, %zmm4
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm5, %zmm13, %zmm4
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm3, %zmm6
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm7, %zmm1, %zmm6
+; AVX512DQBW-ONLY-NEXT:    vpblendd {{.*#+}} ymm4 = ymm4[0,1,2,3],ymm6[4,5,6,7]
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm23, %zmm6
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm29, %zmm13, %zmm6
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 %zmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-ONLY-NEXT:    vpunpcklqdq {{.*#+}} zmm6 {%k1} = zmm10[0],zmm26[0],zmm10[2],zmm26[2],zmm10[4],zmm26[4],zmm10[6],zmm26[6]
+; AVX512DQBW-ONLY-NEXT:    vinserti64x4 $0, %ymm4, %zmm6, %zmm0
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm5, %zmm18, %zmm2
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm7, %zmm30, %zmm3
+; AVX512DQBW-ONLY-NEXT:    vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm3[4,5,6,7]
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm23, %zmm3
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm29, %zmm18, %zmm3
+; AVX512DQBW-ONLY-NEXT:    vpunpckhqdq {{.*#+}} zmm3 {%k1} = zmm10[1],zmm26[1],zmm10[3],zmm26[3],zmm10[5],zmm26[5],zmm10[7],zmm26[7]
+; AVX512DQBW-ONLY-NEXT:    vinserti64x4 $0, %ymm2, %zmm3, %zmm0
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 256(%r10), %zmm10
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 256(%rax), %zmm22
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm10, %zmm2
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm22, %zmm11, %zmm2
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 256(%r8), %zmm0
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 256(%r9), %zmm24
+; AVX512DQBW-ONLY-NEXT:    vpunpcklqdq {{.*#+}} zmm4 = zmm0[0],zmm24[0],zmm0[2],zmm24[2],zmm0[4],zmm24[4],zmm0[6],zmm24[6]
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm0, %zmm17
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm2, %zmm4 {%k1}
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 256(%rdi), %zmm2
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 256(%rsi), %zmm5
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm2, %zmm6
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm5, %zmm11, %zmm6
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 256(%rdx), %zmm3
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 256(%rcx), %zmm7
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm3, %zmm8
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm7, %zmm15, %zmm8
+; AVX512DQBW-ONLY-NEXT:    vpblendd {{.*#+}} ymm6 = ymm6[0,1,2,3],ymm8[4,5,6,7]
+; AVX512DQBW-ONLY-NEXT:    vinserti64x4 $0, %ymm6, %zmm4, %zmm0
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm10, %zmm4
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm22, %zmm9, %zmm4
+; AVX512DQBW-ONLY-NEXT:    vpunpckhqdq {{.*#+}} zmm6 = zmm17[1],zmm24[1],zmm17[3],zmm24[3],zmm17[5],zmm24[5],zmm17[7],zmm24[7]
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm4, %zmm6 {%k1}
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm2, %zmm4
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm5, %zmm9, %zmm4
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm3, %zmm8
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm7, %zmm12, %zmm8
+; AVX512DQBW-ONLY-NEXT:    vpblendd {{.*#+}} ymm4 = ymm4[0,1,2,3],ymm8[4,5,6,7]
+; AVX512DQBW-ONLY-NEXT:    vinserti64x4 $0, %ymm4, %zmm6, %zmm0
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm2, %zmm4
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm5, %zmm13, %zmm4
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm3, %zmm6
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm7, %zmm1, %zmm6
+; AVX512DQBW-ONLY-NEXT:    vpblendd {{.*#+}} ymm4 = ymm4[0,1,2,3],ymm6[4,5,6,7]
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 %zmm17, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm17, %zmm6
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm24, %zmm13, %zmm6
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 %zmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-ONLY-NEXT:    vpunpcklqdq {{.*#+}} zmm6 {%k1} = zmm10[0],zmm22[0],zmm10[2],zmm22[2],zmm10[4],zmm22[4],zmm10[6],zmm22[6]
+; AVX512DQBW-ONLY-NEXT:    vinserti64x4 $0, %ymm4, %zmm6, %zmm0
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm5, %zmm18, %zmm2
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm7, %zmm30, %zmm3
+; AVX512DQBW-ONLY-NEXT:    vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm3[4,5,6,7]
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm17, %zmm3
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm24, %zmm18, %zmm3
+; AVX512DQBW-ONLY-NEXT:    vpunpckhqdq {{.*#+}} zmm3 {%k1} = zmm10[1],zmm22[1],zmm10[3],zmm22[3],zmm10[5],zmm22[5],zmm10[7],zmm22[7]
+; AVX512DQBW-ONLY-NEXT:    vinserti64x4 $0, %ymm2, %zmm3, %zmm0
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 320(%rdi), %zmm2
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 320(%rsi), %zmm3
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm2, %zmm5
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm3, %zmm11, %zmm5
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 320(%rdx), %zmm14
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 320(%rcx), %zmm6
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm14, %zmm7
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm6, %zmm15, %zmm7
+; AVX512DQBW-ONLY-NEXT:    vpblendd {{.*#+}} ymm5 = ymm5[0,1,2,3],ymm7[4,5,6,7]
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm14, %zmm7
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm6, %zmm12, %zmm7
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm14, %zmm8
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm6, %zmm1, %zmm8
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm6, %zmm30, %zmm14
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 384(%rdx), %zmm0
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 384(%rcx), %zmm6
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm0, %zmm10
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm6, %zmm15, %zmm10
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 %zmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm0, %zmm10
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm6, %zmm12, %zmm10
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 %zmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm0, %zmm10
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm6, %zmm1, %zmm10
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 %zmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm6, %zmm30, %zmm0
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 448(%rdx), %zmm0
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 448(%rcx), %zmm6
+; AVX512DQBW-ONLY-NEXT:    vpermi2q %zmm6, %zmm0, %zmm15
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 %zmm15, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-ONLY-NEXT:    vpermi2q %zmm6, %zmm0, %zmm12
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 %zmm12, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-ONLY-NEXT:    vpermi2q %zmm6, %zmm0, %zmm1
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm6, %zmm30, %zmm0
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 320(%r10), %zmm31
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 320(%rax), %zmm12
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm31, %zmm1
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm12, %zmm11, %zmm1
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 320(%r8), %zmm17
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 320(%r9), %zmm4
+; AVX512DQBW-ONLY-NEXT:    vpunpcklqdq {{.*#+}} zmm10 = zmm17[0],zmm4[0],zmm17[2],zmm4[2],zmm17[4],zmm4[4],zmm17[6],zmm4[6]
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm1, %zmm10 {%k1}
+; AVX512DQBW-ONLY-NEXT:    vinserti64x4 $0, %ymm5, %zmm10, %zmm0
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm31, %zmm1
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm12, %zmm9, %zmm1
+; AVX512DQBW-ONLY-NEXT:    vpunpckhqdq {{.*#+}} zmm5 = zmm17[1],zmm4[1],zmm17[3],zmm4[3],zmm17[5],zmm4[5],zmm17[7],zmm4[7]
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm1, %zmm5 {%k1}
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm2, %zmm1
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm3, %zmm9, %zmm1
+; AVX512DQBW-ONLY-NEXT:    vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm7[4,5,6,7]
+; AVX512DQBW-ONLY-NEXT:    vinserti64x4 $0, %ymm1, %zmm5, %zmm0
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm2, %zmm1
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm3, %zmm13, %zmm1
+; AVX512DQBW-ONLY-NEXT:    vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm8[4,5,6,7]
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm17, %zmm5
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm4, %zmm13, %zmm5
+; AVX512DQBW-ONLY-NEXT:    vpunpcklqdq {{.*#+}} zmm5 {%k1} = zmm31[0],zmm12[0],zmm31[2],zmm12[2],zmm31[4],zmm12[4],zmm31[6],zmm12[6]
+; AVX512DQBW-ONLY-NEXT:    vinserti64x4 $0, %ymm1, %zmm5, %zmm0
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm3, %zmm18, %zmm2
+; AVX512DQBW-ONLY-NEXT:    vpblendd {{.*#+}} ymm1 = ymm2[0,1,2,3],ymm14[4,5,6,7]
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm17, %zmm2
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm4, %zmm18, %zmm2
+; AVX512DQBW-ONLY-NEXT:    vpunpckhqdq {{.*#+}} zmm2 {%k1} = zmm31[1],zmm12[1],zmm31[3],zmm12[3],zmm31[5],zmm12[5],zmm31[7],zmm12[7]
+; AVX512DQBW-ONLY-NEXT:    vinserti64x4 $0, %ymm1, %zmm2, %zmm0
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 384(%rdi), %zmm0
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 384(%rsi), %zmm1
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm0, %zmm3
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm11, %zmm2
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm1, %zmm11, %zmm3
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm0, %zmm3
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm1, %zmm9, %zmm3
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm0, %zmm3
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm1, %zmm13, %zmm3
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm1, %zmm18, %zmm0
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 384(%r10), %zmm11
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 384(%rax), %zmm20
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm11, %zmm0
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm20, %zmm2, %zmm0
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm11, %zmm0
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm20, %zmm9, %zmm0
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 384(%r8), %zmm30
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 384(%r9), %zmm0
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm30, %zmm1
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm0, %zmm13, %zmm1
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 448(%r10), %zmm16
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 448(%rax), %zmm8
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm16, %zmm1
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm8, %zmm2, %zmm1
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 448(%rdi), %zmm3
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 448(%rsi), %zmm1
+; AVX512DQBW-ONLY-NEXT:    vpermi2q %zmm1, %zmm3, %zmm2
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm16, %zmm2
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm8, %zmm9, %zmm2
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-ONLY-NEXT:    vpermi2q %zmm1, %zmm3, %zmm9
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 %zmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 448(%r8), %zmm19
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 448(%r9), %zmm7
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm19, %zmm2
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm7, %zmm13, %zmm2
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-ONLY-NEXT:    vpermi2q %zmm1, %zmm3, %zmm13
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 %zmm13, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm1, %zmm18, %zmm3
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm30, %zmm1
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm0, %zmm18, %zmm1
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm1, %zmm5
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm19, %zmm1
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm7, %zmm18, %zmm1
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-ONLY-NEXT:    vbroadcasti32x4 {{.*#+}} zmm15 = [0,8,0,8,0,8,0,8]
+; AVX512DQBW-ONLY-NEXT:    # zmm15 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm21, %zmm1
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm27, %zmm15, %zmm1
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-ONLY-NEXT:    vbroadcasti32x4 {{.*#+}} zmm14 = [1,9,1,9,1,9,1,9]
+; AVX512DQBW-ONLY-NEXT:    # zmm14 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm21, %zmm1
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm27, %zmm14, %zmm1
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-ONLY-NEXT:    vbroadcasti32x4 {{.*#+}} zmm6 = [2,10,2,10,2,10,2,10]
+; AVX512DQBW-ONLY-NEXT:    # zmm6 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm21, %zmm1
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm27, %zmm6, %zmm1
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-ONLY-NEXT:    vbroadcasti32x4 {{.*#+}} zmm1 = [3,11,3,11,3,11,3,11]
+; AVX512DQBW-ONLY-NEXT:    # zmm1 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm27, %zmm1, %zmm21
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 %zmm21, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm2, %zmm3
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm28, %zmm15, %zmm3
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm2, %zmm3
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm28, %zmm14, %zmm3
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm2, %zmm25
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm28, %zmm6, %zmm25
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm28, %zmm1, %zmm2
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 (%rsp), %zmm27 # 64-byte Reload
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm27, %zmm2
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm3, %zmm15, %zmm2
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm27, %zmm2
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm3, %zmm14, %zmm2
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm27, %zmm2
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm3, %zmm6, %zmm2
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm3, %zmm1, %zmm27
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 %zmm27, (%rsp) # 64-byte Spill
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm9 # 64-byte Reload
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm9, %zmm3
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm2, %zmm15, %zmm3
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm9, %zmm3
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm2, %zmm14, %zmm3
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm9, %zmm18
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm2, %zmm6, %zmm18
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm2, %zmm1, %zmm9
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 %zmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm2, %zmm3
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm9 # 64-byte Reload
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm9, %zmm15, %zmm3
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm2, %zmm3
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm9, %zmm14, %zmm3
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm2, %zmm3
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm9, %zmm6, %zmm3
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm9, %zmm1, %zmm2
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm9 # 64-byte Reload
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm9, %zmm3
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm2, %zmm15, %zmm3
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm9, %zmm3
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm2, %zmm14, %zmm3
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm9, %zmm13
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm2, %zmm6, %zmm13
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm2, %zmm1, %zmm9
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 %zmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm2, %zmm3
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm26, %zmm15, %zmm3
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm2, %zmm3
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm26, %zmm14, %zmm3
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm2, %zmm3
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm26, %zmm6, %zmm3
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm26, %zmm1, %zmm2
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm23, %zmm3
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm29, %zmm15, %zmm3
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm23, %zmm3
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm29, %zmm14, %zmm3
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm23, %zmm10
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm29, %zmm6, %zmm10
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm29, %zmm1, %zmm23
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 %zmm23, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm23 # 64-byte Reload
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm23, %zmm2
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm22, %zmm15, %zmm2
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm23, %zmm2
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm22, %zmm14, %zmm2
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm23, %zmm2
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm22, %zmm6, %zmm2
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm22, %zmm1, %zmm23
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm2, %zmm3
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm24, %zmm15, %zmm3
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm2, %zmm3
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm24, %zmm14, %zmm3
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm2, %zmm26
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm24, %zmm6, %zmm26
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm24, %zmm1, %zmm2
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm31, %zmm2
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm12, %zmm15, %zmm2
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm31, %zmm2
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm12, %zmm14, %zmm2
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm31, %zmm2
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm12, %zmm6, %zmm2
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm12, %zmm1, %zmm31
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm17, %zmm29
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm4, %zmm15, %zmm29
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm17, %zmm2
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm4, %zmm14, %zmm2
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm17, %zmm24
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm4, %zmm6, %zmm24
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm4, %zmm1, %zmm17
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm11, %zmm2
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm20, %zmm15, %zmm2
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm11, %zmm2
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm20, %zmm14, %zmm2
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm9 # 64-byte Reload
+; AVX512DQBW-ONLY-NEXT:    vpunpcklqdq {{.*#+}} zmm9 {%k1} = zmm11[0],zmm20[0],zmm11[2],zmm20[2],zmm11[4],zmm20[4],zmm11[6],zmm20[6]
+; AVX512DQBW-ONLY-NEXT:    vpunpckhqdq {{.*#+}} zmm5 {%k1} = zmm11[1],zmm20[1],zmm11[3],zmm20[3],zmm11[5],zmm20[5],zmm11[7],zmm20[7]
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm5, %zmm27
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm11, %zmm22
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm20, %zmm6, %zmm22
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm20, %zmm1, %zmm11
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm30, %zmm21
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm0, %zmm15, %zmm21
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm30, %zmm28
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm0, %zmm14, %zmm28
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm30, %zmm20
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm0, %zmm6, %zmm20
+; AVX512DQBW-ONLY-NEXT:    vpunpcklqdq {{.*#+}} zmm2 = zmm30[0],zmm0[0],zmm30[2],zmm0[2],zmm30[4],zmm0[4],zmm30[6],zmm0[6]
+; AVX512DQBW-ONLY-NEXT:    vpunpckhqdq {{.*#+}} zmm3 = zmm30[1],zmm0[1],zmm30[3],zmm0[3],zmm30[5],zmm0[5],zmm30[7],zmm0[7]
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm0, %zmm1, %zmm30
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm16, %zmm0
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm8, %zmm15, %zmm0
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm16, %zmm0
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm8, %zmm14, %zmm0
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
+; AVX512DQBW-ONLY-NEXT:    vpunpcklqdq {{.*#+}} zmm4 {%k1} = zmm16[0],zmm8[0],zmm16[2],zmm8[2],zmm16[4],zmm8[4],zmm16[6],zmm8[6]
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
+; AVX512DQBW-ONLY-NEXT:    vpunpckhqdq {{.*#+}} zmm5 {%k1} = zmm16[1],zmm8[1],zmm16[3],zmm8[3],zmm16[5],zmm8[5],zmm16[7],zmm8[7]
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm16, %zmm12
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm8, %zmm6, %zmm12
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm8, %zmm1, %zmm16
+; AVX512DQBW-ONLY-NEXT:    vpermi2q %zmm7, %zmm19, %zmm15
+; AVX512DQBW-ONLY-NEXT:    vpermi2q %zmm7, %zmm19, %zmm14
+; AVX512DQBW-ONLY-NEXT:    vpermi2q %zmm7, %zmm19, %zmm6
+; AVX512DQBW-ONLY-NEXT:    vpunpcklqdq {{.*#+}} zmm8 = zmm19[0],zmm7[0],zmm19[2],zmm7[2],zmm19[4],zmm7[4],zmm19[6],zmm7[6]
+; AVX512DQBW-ONLY-NEXT:    vpunpckhqdq {{.*#+}} zmm0 = zmm19[1],zmm7[1],zmm19[3],zmm7[3],zmm19[5],zmm7[5],zmm19[7],zmm7[7]
+; AVX512DQBW-ONLY-NEXT:    vpermt2q %zmm7, %zmm1, %zmm19
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
+; AVX512DQBW-ONLY-NEXT:    vpblendd $240, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm1 # 32-byte Folded Reload
+; AVX512DQBW-ONLY-NEXT:    # ymm1 = ymm1[0,1,2,3],mem[4,5,6,7]
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm7 # 64-byte Reload
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm7, %zmm2 {%k1}
+; AVX512DQBW-ONLY-NEXT:    vinserti64x4 $0, %ymm1, %zmm2, %zmm1
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
+; AVX512DQBW-ONLY-NEXT:    vpblendd $240, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm1 # 32-byte Folded Reload
+; AVX512DQBW-ONLY-NEXT:    # ymm1 = ymm1[0,1,2,3],mem[4,5,6,7]
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm2, %zmm3 {%k1}
+; AVX512DQBW-ONLY-NEXT:    vinserti64x4 $0, %ymm1, %zmm3, %zmm1
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
+; AVX512DQBW-ONLY-NEXT:    vpblendd $240, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm1 # 32-byte Folded Reload
+; AVX512DQBW-ONLY-NEXT:    # ymm1 = ymm1[0,1,2,3],mem[4,5,6,7]
+; AVX512DQBW-ONLY-NEXT:    vinserti64x4 $0, %ymm1, %zmm9, %zmm1
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
+; AVX512DQBW-ONLY-NEXT:    vpblendd $240, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm1 # 32-byte Folded Reload
+; AVX512DQBW-ONLY-NEXT:    # ymm1 = ymm1[0,1,2,3],mem[4,5,6,7]
+; AVX512DQBW-ONLY-NEXT:    vinserti64x4 $0, %ymm1, %zmm27, %zmm1
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
+; AVX512DQBW-ONLY-NEXT:    vpblendd $240, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm1 # 32-byte Folded Reload
+; AVX512DQBW-ONLY-NEXT:    # ymm1 = ymm1[0,1,2,3],mem[4,5,6,7]
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm2, %zmm8 {%k1}
+; AVX512DQBW-ONLY-NEXT:    vinserti64x4 $0, %ymm1, %zmm8, %zmm1
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
+; AVX512DQBW-ONLY-NEXT:    vpblendd $240, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm1 # 32-byte Folded Reload
+; AVX512DQBW-ONLY-NEXT:    # ymm1 = ymm1[0,1,2,3],mem[4,5,6,7]
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm2, %zmm0 {%k1}
+; AVX512DQBW-ONLY-NEXT:    vinserti64x4 $0, %ymm1, %zmm0, %zmm0
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512DQBW-ONLY-NEXT:    vpblendd $240, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
+; AVX512DQBW-ONLY-NEXT:    # ymm0 = ymm0[0,1,2,3],mem[4,5,6,7]
+; AVX512DQBW-ONLY-NEXT:    vinserti64x4 $0, %ymm0, %zmm4, %zmm0
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512DQBW-ONLY-NEXT:    vpblendd $240, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
+; AVX512DQBW-ONLY-NEXT:    # ymm0 = ymm0[0,1,2,3],mem[4,5,6,7]
+; AVX512DQBW-ONLY-NEXT:    vinserti64x4 $0, %ymm0, %zmm5, %zmm0
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm0, %zmm25 {%k1}
+; AVX512DQBW-ONLY-NEXT:    vmovdqa (%rcx), %ymm0
+; AVX512DQBW-ONLY-NEXT:    vmovdqa (%rdx), %ymm1
+; AVX512DQBW-ONLY-NEXT:    vpunpcklqdq {{.*#+}} ymm2 = ymm1[0],ymm0[0],ymm1[2],ymm0[2]
+; AVX512DQBW-ONLY-NEXT:    vmovdqa (%rsi), %ymm3
+; AVX512DQBW-ONLY-NEXT:    vmovdqa (%rdi), %ymm7
+; AVX512DQBW-ONLY-NEXT:    vpunpcklqdq {{.*#+}} ymm8 = ymm7[0],ymm3[0],ymm7[2],ymm3[2]
+; AVX512DQBW-ONLY-NEXT:    vperm2i128 {{.*#+}} ymm2 = ymm8[2,3],ymm2[2,3]
+; AVX512DQBW-ONLY-NEXT:    vinserti64x4 $0, %ymm2, %zmm25, %zmm2
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm2, %zmm4 {%k1}
+; AVX512DQBW-ONLY-NEXT:    vpunpckhqdq {{.*#+}} ymm0 = ymm1[1],ymm0[1],ymm1[3],ymm0[3]
+; AVX512DQBW-ONLY-NEXT:    vpunpckhqdq {{.*#+}} ymm1 = ymm7[1],ymm3[1],ymm7[3],ymm3[3]
+; AVX512DQBW-ONLY-NEXT:    vperm2i128 {{.*#+}} ymm0 = ymm1[2,3],ymm0[2,3]
+; AVX512DQBW-ONLY-NEXT:    vinserti64x4 $0, %ymm0, %zmm4, %zmm0
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm0, %zmm18 {%k1}
+; AVX512DQBW-ONLY-NEXT:    vmovdqa 64(%rcx), %ymm0
+; AVX512DQBW-ONLY-NEXT:    vmovdqa 64(%rdx), %ymm1
+; AVX512DQBW-ONLY-NEXT:    vmovdqa 64(%rsi), %ymm2
+; AVX512DQBW-ONLY-NEXT:    vmovdqa 64(%rdi), %ymm3
+; AVX512DQBW-ONLY-NEXT:    vpunpcklqdq {{.*#+}} ymm7 = ymm1[0],ymm0[0],ymm1[2],ymm0[2]
+; AVX512DQBW-ONLY-NEXT:    vpunpcklqdq {{.*#+}} ymm8 = ymm3[0],ymm2[0],ymm3[2],ymm2[2]
+; AVX512DQBW-ONLY-NEXT:    vperm2i128 {{.*#+}} ymm7 = ymm8[2,3],ymm7[2,3]
+; AVX512DQBW-ONLY-NEXT:    vinserti64x4 $0, %ymm7, %zmm18, %zmm4
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 (%rsp), %zmm4 # 64-byte Reload
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm4, %zmm5 {%k1}
+; AVX512DQBW-ONLY-NEXT:    vpunpckhqdq {{.*#+}} ymm0 = ymm1[1],ymm0[1],ymm1[3],ymm0[3]
+; AVX512DQBW-ONLY-NEXT:    vpunpckhqdq {{.*#+}} ymm1 = ymm3[1],ymm2[1],ymm3[3],ymm2[3]
+; AVX512DQBW-ONLY-NEXT:    vperm2i128 {{.*#+}} ymm0 = ymm1[2,3],ymm0[2,3]
+; AVX512DQBW-ONLY-NEXT:    vinserti64x4 $0, %ymm0, %zmm5, %zmm0
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm0, %zmm13 {%k1}
+; AVX512DQBW-ONLY-NEXT:    vmovdqa 128(%rcx), %ymm0
+; AVX512DQBW-ONLY-NEXT:    vmovdqa 128(%rdx), %ymm1
+; AVX512DQBW-ONLY-NEXT:    vpunpcklqdq {{.*#+}} ymm2 = ymm1[0],ymm0[0],ymm1[2],ymm0[2]
+; AVX512DQBW-ONLY-NEXT:    vmovdqa 128(%rsi), %ymm3
+; AVX512DQBW-ONLY-NEXT:    vmovdqa 128(%rdi), %ymm7
+; AVX512DQBW-ONLY-NEXT:    vpunpcklqdq {{.*#+}} ymm8 = ymm7[0],ymm3[0],ymm7[2],ymm3[2]
+; AVX512DQBW-ONLY-NEXT:    vperm2i128 {{.*#+}} ymm2 = ymm8[2,3],ymm2[2,3]
+; AVX512DQBW-ONLY-NEXT:    vinserti64x4 $0, %ymm2, %zmm13, %zmm2
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 %zmm2, (%rsp) # 64-byte Spill
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm2, %zmm4 {%k1}
+; AVX512DQBW-ONLY-NEXT:    vpunpckhqdq {{.*#+}} ymm0 = ymm1[1],ymm0[1],ymm1[3],ymm0[3]
+; AVX512DQBW-ONLY-NEXT:    vpunpckhqdq {{.*#+}} ymm1 = ymm7[1],ymm3[1],ymm7[3],ymm3[3]
+; AVX512DQBW-ONLY-NEXT:    vperm2i128 {{.*#+}} ymm0 = ymm1[2,3],ymm0[2,3]
+; AVX512DQBW-ONLY-NEXT:    vinserti64x4 $0, %ymm0, %zmm4, %zmm0
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm0, %zmm10 {%k1}
+; AVX512DQBW-ONLY-NEXT:    vmovdqa 192(%rcx), %ymm0
+; AVX512DQBW-ONLY-NEXT:    vmovdqa 192(%rdx), %ymm1
+; AVX512DQBW-ONLY-NEXT:    vpunpcklqdq {{.*#+}} ymm2 = ymm1[0],ymm0[0],ymm1[2],ymm0[2]
+; AVX512DQBW-ONLY-NEXT:    vmovdqa 192(%rsi), %ymm3
+; AVX512DQBW-ONLY-NEXT:    vmovdqa 192(%rdi), %ymm7
+; AVX512DQBW-ONLY-NEXT:    vpunpcklqdq {{.*#+}} ymm8 = ymm7[0],ymm3[0],ymm7[2],ymm3[2]
+; AVX512DQBW-ONLY-NEXT:    vperm2i128 {{.*#+}} ymm2 = ymm8[2,3],ymm2[2,3]
+; AVX512DQBW-ONLY-NEXT:    vinserti64x4 $0, %ymm2, %zmm10, %zmm2
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm2, %zmm4 {%k1}
+; AVX512DQBW-ONLY-NEXT:    vpunpckhqdq {{.*#+}} ymm0 = ymm1[1],ymm0[1],ymm1[3],ymm0[3]
+; AVX512DQBW-ONLY-NEXT:    vpunpckhqdq {{.*#+}} ymm1 = ymm7[1],ymm3[1],ymm7[3],ymm3[3]
+; AVX512DQBW-ONLY-NEXT:    vperm2i128 {{.*#+}} ymm0 = ymm1[2,3],ymm0[2,3]
+; AVX512DQBW-ONLY-NEXT:    vinserti64x4 $0, %ymm0, %zmm4, %zmm9
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm0, %zmm26 {%k1}
+; AVX512DQBW-ONLY-NEXT:    vmovdqa 256(%rcx), %ymm0
+; AVX512DQBW-ONLY-NEXT:    vmovdqa 256(%rdx), %ymm1
+; AVX512DQBW-ONLY-NEXT:    vpunpcklqdq {{.*#+}} ymm2 = ymm1[0],ymm0[0],ymm1[2],ymm0[2]
+; AVX512DQBW-ONLY-NEXT:    vmovdqa 256(%rsi), %ymm3
+; AVX512DQBW-ONLY-NEXT:    vmovdqa 256(%rdi), %ymm7
+; AVX512DQBW-ONLY-NEXT:    vpunpcklqdq {{.*#+}} ymm8 = ymm7[0],ymm3[0],ymm7[2],ymm3[2]
+; AVX512DQBW-ONLY-NEXT:    vperm2i128 {{.*#+}} ymm2 = ymm8[2,3],ymm2[2,3]
+; AVX512DQBW-ONLY-NEXT:    vinserti64x4 $0, %ymm2, %zmm26, %zmm26
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm23, %zmm2 {%k1}
+; AVX512DQBW-ONLY-NEXT:    vpunpckhqdq {{.*#+}} ymm0 = ymm1[1],ymm0[1],ymm1[3],ymm0[3]
+; AVX512DQBW-ONLY-NEXT:    vpunpckhqdq {{.*#+}} ymm1 = ymm7[1],ymm3[1],ymm7[3],ymm3[3]
+; AVX512DQBW-ONLY-NEXT:    vperm2i128 {{.*#+}} ymm0 = ymm1[2,3],ymm0[2,3]
+; AVX512DQBW-ONLY-NEXT:    vinserti64x4 $0, %ymm0, %zmm2, %zmm7
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm0, %zmm24 {%k1}
+; AVX512DQBW-ONLY-NEXT:    vmovdqa 320(%rcx), %ymm0
+; AVX512DQBW-ONLY-NEXT:    vmovdqa 320(%rdx), %ymm1
+; AVX512DQBW-ONLY-NEXT:    vpunpcklqdq {{.*#+}} ymm2 = ymm1[0],ymm0[0],ymm1[2],ymm0[2]
+; AVX512DQBW-ONLY-NEXT:    vmovdqa 320(%rsi), %ymm3
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 320(%rdi), %ymm23
+; AVX512DQBW-ONLY-NEXT:    vpunpcklqdq {{.*#+}} ymm8 = ymm23[0],ymm3[0],ymm23[2],ymm3[2]
+; AVX512DQBW-ONLY-NEXT:    vperm2i128 {{.*#+}} ymm2 = ymm8[2,3],ymm2[2,3]
+; AVX512DQBW-ONLY-NEXT:    vinserti64x4 $0, %ymm2, %zmm24, %zmm8
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm31, %zmm17 {%k1}
+; AVX512DQBW-ONLY-NEXT:    vpunpckhqdq {{.*#+}} ymm0 = ymm1[1],ymm0[1],ymm1[3],ymm0[3]
+; AVX512DQBW-ONLY-NEXT:    vpunpckhqdq {{.*#+}} ymm1 = ymm23[1],ymm3[1],ymm23[3],ymm3[3]
+; AVX512DQBW-ONLY-NEXT:    vperm2i128 {{.*#+}} ymm0 = ymm1[2,3],ymm0[2,3]
+; AVX512DQBW-ONLY-NEXT:    vinserti64x4 $0, %ymm0, %zmm17, %zmm17
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm22, %zmm20 {%k1}
+; AVX512DQBW-ONLY-NEXT:    vmovdqa 384(%rcx), %ymm0
+; AVX512DQBW-ONLY-NEXT:    vmovdqa 384(%rdx), %ymm1
+; AVX512DQBW-ONLY-NEXT:    vpunpcklqdq {{.*#+}} ymm2 = ymm1[0],ymm0[0],ymm1[2],ymm0[2]
+; AVX512DQBW-ONLY-NEXT:    vmovdqa 384(%rsi), %ymm3
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 384(%rdi), %ymm18
+; AVX512DQBW-ONLY-NEXT:    vpunpcklqdq {{.*#+}} ymm13 = ymm18[0],ymm3[0],ymm18[2],ymm3[2]
+; AVX512DQBW-ONLY-NEXT:    vperm2i128 {{.*#+}} ymm2 = ymm13[2,3],ymm2[2,3]
+; AVX512DQBW-ONLY-NEXT:    vinserti64x4 $0, %ymm2, %zmm20, %zmm20
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm11, %zmm30 {%k1}
+; AVX512DQBW-ONLY-NEXT:    vpunpckhqdq {{.*#+}} ymm0 = ymm1[1],ymm0[1],ymm1[3],ymm0[3]
+; AVX512DQBW-ONLY-NEXT:    vpunpckhqdq {{.*#+}} ymm1 = ymm18[1],ymm3[1],ymm18[3],ymm3[3]
+; AVX512DQBW-ONLY-NEXT:    vperm2i128 {{.*#+}} ymm0 = ymm1[2,3],ymm0[2,3]
+; AVX512DQBW-ONLY-NEXT:    vinserti64x4 $0, %ymm0, %zmm30, %zmm24
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm12, %zmm6 {%k1}
+; AVX512DQBW-ONLY-NEXT:    vmovdqa 448(%rcx), %ymm0
+; AVX512DQBW-ONLY-NEXT:    vmovdqa 448(%rdx), %ymm1
+; AVX512DQBW-ONLY-NEXT:    vpunpcklqdq {{.*#+}} ymm2 = ymm1[0],ymm0[0],ymm1[2],ymm0[2]
+; AVX512DQBW-ONLY-NEXT:    vmovdqa 448(%rsi), %ymm3
+; AVX512DQBW-ONLY-NEXT:    vmovdqa 448(%rdi), %ymm10
+; AVX512DQBW-ONLY-NEXT:    vpunpcklqdq {{.*#+}} ymm12 = ymm10[0],ymm3[0],ymm10[2],ymm3[2]
+; AVX512DQBW-ONLY-NEXT:    vperm2i128 {{.*#+}} ymm2 = ymm12[2,3],ymm2[2,3]
+; AVX512DQBW-ONLY-NEXT:    vinserti64x4 $0, %ymm2, %zmm6, %zmm31
+; AVX512DQBW-ONLY-NEXT:    vpunpckhqdq {{.*#+}} ymm0 = ymm1[1],ymm0[1],ymm1[3],ymm0[3]
+; AVX512DQBW-ONLY-NEXT:    vpunpckhqdq {{.*#+}} ymm1 = ymm10[1],ymm3[1],ymm10[3],ymm3[3]
+; AVX512DQBW-ONLY-NEXT:    vperm2i128 {{.*#+}} ymm0 = ymm1[2,3],ymm0[2,3]
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm16, %zmm19 {%k1}
+; AVX512DQBW-ONLY-NEXT:    vinserti64x4 $0, %ymm0, %zmm19, %zmm0
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm1, %zmm4 {%k1}
+; AVX512DQBW-ONLY-NEXT:    vmovdqa (%rsi), %xmm1
+; AVX512DQBW-ONLY-NEXT:    vinserti128 $1, (%rcx), %ymm1, %ymm1
+; AVX512DQBW-ONLY-NEXT:    vmovdqa (%rdi), %xmm2
+; AVX512DQBW-ONLY-NEXT:    vinserti128 $1, (%rdx), %ymm2, %ymm2
+; AVX512DQBW-ONLY-NEXT:    vpunpcklqdq {{.*#+}} ymm3 = ymm2[0],ymm1[0],ymm2[2],ymm1[2]
+; AVX512DQBW-ONLY-NEXT:    vinserti64x4 $0, %ymm3, %zmm4, %zmm3
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm10 # 64-byte Reload
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm4, %zmm10 {%k1}
+; AVX512DQBW-ONLY-NEXT:    vpunpckhqdq {{.*#+}} ymm1 = ymm2[1],ymm1[1],ymm2[3],ymm1[3]
+; AVX512DQBW-ONLY-NEXT:    vinserti64x4 $0, %ymm1, %zmm10, %zmm2
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm11 # 64-byte Reload
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm1, %zmm11 {%k1}
+; AVX512DQBW-ONLY-NEXT:    vmovdqa 64(%rsi), %xmm1
+; AVX512DQBW-ONLY-NEXT:    vinserti128 $1, 64(%rcx), %ymm1, %ymm1
+; AVX512DQBW-ONLY-NEXT:    vmovdqa 64(%rdi), %xmm4
+; AVX512DQBW-ONLY-NEXT:    vinserti128 $1, 64(%rdx), %ymm4, %ymm4
+; AVX512DQBW-ONLY-NEXT:    vpunpcklqdq {{.*#+}} ymm10 = ymm4[0],ymm1[0],ymm4[2],ymm1[2]
+; AVX512DQBW-ONLY-NEXT:    vinserti64x4 $0, %ymm10, %zmm11, %zmm10
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm11 # 64-byte Reload
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm12 # 64-byte Reload
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm11, %zmm12 {%k1}
+; AVX512DQBW-ONLY-NEXT:    vpunpckhqdq {{.*#+}} ymm1 = ymm4[1],ymm1[1],ymm4[3],ymm1[3]
+; AVX512DQBW-ONLY-NEXT:    vinserti64x4 $0, %ymm1, %zmm12, %zmm11
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm13 # 64-byte Reload
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm1, %zmm13 {%k1}
+; AVX512DQBW-ONLY-NEXT:    vmovdqa 128(%rsi), %xmm1
+; AVX512DQBW-ONLY-NEXT:    vinserti128 $1, 128(%rcx), %ymm1, %ymm1
+; AVX512DQBW-ONLY-NEXT:    vmovdqa 128(%rdi), %xmm4
+; AVX512DQBW-ONLY-NEXT:    vinserti128 $1, 128(%rdx), %ymm4, %ymm12
+; AVX512DQBW-ONLY-NEXT:    vpunpcklqdq {{.*#+}} ymm4 = ymm12[0],ymm1[0],ymm12[2],ymm1[2]
+; AVX512DQBW-ONLY-NEXT:    vinserti64x4 $0, %ymm4, %zmm13, %zmm4
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm13 # 64-byte Reload
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm18 # 64-byte Reload
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm13, %zmm18 {%k1}
+; AVX512DQBW-ONLY-NEXT:    vpunpckhqdq {{.*#+}} ymm1 = ymm12[1],ymm1[1],ymm12[3],ymm1[3]
+; AVX512DQBW-ONLY-NEXT:    vinserti64x4 $0, %ymm1, %zmm18, %zmm19
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm18 # 64-byte Reload
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm1, %zmm18 {%k1}
+; AVX512DQBW-ONLY-NEXT:    vmovdqa 192(%rsi), %xmm1
+; AVX512DQBW-ONLY-NEXT:    vinserti128 $1, 192(%rcx), %ymm1, %ymm1
+; AVX512DQBW-ONLY-NEXT:    vmovdqa 192(%rdi), %xmm12
+; AVX512DQBW-ONLY-NEXT:    vinserti128 $1, 192(%rdx), %ymm12, %ymm12
+; AVX512DQBW-ONLY-NEXT:    vpunpcklqdq {{.*#+}} ymm13 = ymm12[0],ymm1[0],ymm12[2],ymm1[2]
+; AVX512DQBW-ONLY-NEXT:    vinserti64x4 $0, %ymm13, %zmm18, %zmm30
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm13 # 64-byte Reload
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm18 # 64-byte Reload
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm13, %zmm18 {%k1}
+; AVX512DQBW-ONLY-NEXT:    vpunpckhqdq {{.*#+}} ymm1 = ymm12[1],ymm1[1],ymm12[3],ymm1[3]
+; AVX512DQBW-ONLY-NEXT:    vinserti64x4 $0, %ymm1, %zmm18, %zmm1
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm12 # 64-byte Reload
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm12, %zmm5 {%k1}
+; AVX512DQBW-ONLY-NEXT:    vmovdqa 256(%rsi), %xmm12
+; AVX512DQBW-ONLY-NEXT:    vinserti128 $1, 256(%rcx), %ymm12, %ymm13
+; AVX512DQBW-ONLY-NEXT:    vmovdqa 256(%rdi), %xmm12
+; AVX512DQBW-ONLY-NEXT:    vinserti32x4 $1, 256(%rdx), %ymm12, %ymm18
+; AVX512DQBW-ONLY-NEXT:    vpunpcklqdq {{.*#+}} ymm12 = ymm18[0],ymm13[0],ymm18[2],ymm13[2]
+; AVX512DQBW-ONLY-NEXT:    vinserti64x4 $0, %ymm12, %zmm5, %zmm12
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm23 # 64-byte Reload
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm23, %zmm5 {%k1}
+; AVX512DQBW-ONLY-NEXT:    vpunpckhqdq {{.*#+}} ymm13 = ymm18[1],ymm13[1],ymm18[3],ymm13[3]
+; AVX512DQBW-ONLY-NEXT:    vinserti64x4 $0, %ymm13, %zmm5, %zmm23
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm5, %zmm29 {%k1}
+; AVX512DQBW-ONLY-NEXT:    vmovdqa 320(%rsi), %xmm13
+; AVX512DQBW-ONLY-NEXT:    vinserti128 $1, 320(%rcx), %ymm13, %ymm13
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 320(%rdi), %xmm18
+; AVX512DQBW-ONLY-NEXT:    vinserti32x4 $1, 320(%rdx), %ymm18, %ymm18
+; AVX512DQBW-ONLY-NEXT:    vpunpcklqdq {{.*#+}} ymm27 = ymm18[0],ymm13[0],ymm18[2],ymm13[2]
+; AVX512DQBW-ONLY-NEXT:    vinserti64x4 $0, %ymm27, %zmm29, %zmm22
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm6 # 64-byte Reload
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm5, %zmm6 {%k1}
+; AVX512DQBW-ONLY-NEXT:    vpunpckhqdq {{.*#+}} ymm13 = ymm18[1],ymm13[1],ymm18[3],ymm13[3]
+; AVX512DQBW-ONLY-NEXT:    vinserti64x4 $0, %ymm13, %zmm6, %zmm13
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm5, %zmm21 {%k1}
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 384(%rsi), %xmm18
+; AVX512DQBW-ONLY-NEXT:    vinserti32x4 $1, 384(%rcx), %ymm18, %ymm18
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 384(%rdi), %xmm25
+; AVX512DQBW-ONLY-NEXT:    vinserti32x4 $1, 384(%rdx), %ymm25, %ymm25
+; AVX512DQBW-ONLY-NEXT:    vpunpcklqdq {{.*#+}} ymm27 = ymm25[0],ymm18[0],ymm25[2],ymm18[2]
+; AVX512DQBW-ONLY-NEXT:    vinserti64x4 $0, %ymm27, %zmm21, %zmm16
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm5, %zmm28 {%k1}
+; AVX512DQBW-ONLY-NEXT:    vpunpckhqdq {{.*#+}} ymm18 = ymm25[1],ymm18[1],ymm25[3],ymm18[3]
+; AVX512DQBW-ONLY-NEXT:    vinserti64x4 $0, %ymm18, %zmm28, %zmm21
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm5, %zmm15 {%k1}
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 448(%rsi), %xmm18
+; AVX512DQBW-ONLY-NEXT:    vinserti32x4 $1, 448(%rcx), %ymm18, %ymm18
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 448(%rdi), %xmm25
+; AVX512DQBW-ONLY-NEXT:    vinserti32x4 $1, 448(%rdx), %ymm25, %ymm25
+; AVX512DQBW-ONLY-NEXT:    vpunpcklqdq {{.*#+}} ymm27 = ymm25[0],ymm18[0],ymm25[2],ymm18[2]
+; AVX512DQBW-ONLY-NEXT:    vinserti64x4 $0, %ymm27, %zmm15, %zmm6
+; AVX512DQBW-ONLY-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm5, %zmm14 {%k1}
+; AVX512DQBW-ONLY-NEXT:    vpunpckhqdq {{.*#+}} ymm18 = ymm25[1],ymm18[1],ymm25[3],ymm18[3]
+; AVX512DQBW-ONLY-NEXT:    vinserti64x4 $0, %ymm18, %zmm14, %zmm5
+; AVX512DQBW-ONLY-NEXT:    movq {{[0-9]+}}(%rsp), %rax
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm0, 3776(%rax)
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm31, 3712(%rax)
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm24, 3264(%rax)
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm20, 3200(%rax)
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm17, 2752(%rax)
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm8, 2688(%rax)
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm7, 2240(%rax)
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm26, 2176(%rax)
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm9, 1728(%rax)
+; AVX512DQBW-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512DQBW-ONLY-NEXT:    vmovaps %zmm0, 1664(%rax)
+; AVX512DQBW-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512DQBW-ONLY-NEXT:    vmovaps %zmm0, 1216(%rax)
+; AVX512DQBW-ONLY-NEXT:    vmovups (%rsp), %zmm0 # 64-byte Reload
+; AVX512DQBW-ONLY-NEXT:    vmovaps %zmm0, 1152(%rax)
+; AVX512DQBW-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512DQBW-ONLY-NEXT:    vmovaps %zmm0, 704(%rax)
+; AVX512DQBW-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512DQBW-ONLY-NEXT:    vmovaps %zmm0, 640(%rax)
+; AVX512DQBW-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512DQBW-ONLY-NEXT:    vmovaps %zmm0, 192(%rax)
+; AVX512DQBW-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512DQBW-ONLY-NEXT:    vmovaps %zmm0, 128(%rax)
+; AVX512DQBW-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512DQBW-ONLY-NEXT:    vmovaps %zmm0, 4032(%rax)
+; AVX512DQBW-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512DQBW-ONLY-NEXT:    vmovaps %zmm0, 3968(%rax)
+; AVX512DQBW-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512DQBW-ONLY-NEXT:    vmovaps %zmm0, 3904(%rax)
+; AVX512DQBW-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512DQBW-ONLY-NEXT:    vmovaps %zmm0, 3840(%rax)
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm5, 3648(%rax)
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm6, 3584(%rax)
+; AVX512DQBW-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512DQBW-ONLY-NEXT:    vmovaps %zmm0, 3520(%rax)
+; AVX512DQBW-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512DQBW-ONLY-NEXT:    vmovaps %zmm0, 3456(%rax)
+; AVX512DQBW-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512DQBW-ONLY-NEXT:    vmovaps %zmm0, 3392(%rax)
+; AVX512DQBW-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512DQBW-ONLY-NEXT:    vmovaps %zmm0, 3328(%rax)
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm21, 3136(%rax)
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm16, 3072(%rax)
+; AVX512DQBW-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512DQBW-ONLY-NEXT:    vmovaps %zmm0, 3008(%rax)
+; AVX512DQBW-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512DQBW-ONLY-NEXT:    vmovaps %zmm0, 2944(%rax)
+; AVX512DQBW-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512DQBW-ONLY-NEXT:    vmovaps %zmm0, 2880(%rax)
+; AVX512DQBW-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512DQBW-ONLY-NEXT:    vmovaps %zmm0, 2816(%rax)
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm13, 2624(%rax)
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm22, 2560(%rax)
+; AVX512DQBW-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512DQBW-ONLY-NEXT:    vmovaps %zmm0, 2496(%rax)
+; AVX512DQBW-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512DQBW-ONLY-NEXT:    vmovaps %zmm0, 2432(%rax)
+; AVX512DQBW-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512DQBW-ONLY-NEXT:    vmovaps %zmm0, 2368(%rax)
+; AVX512DQBW-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512DQBW-ONLY-NEXT:    vmovaps %zmm0, 2304(%rax)
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm23, 2112(%rax)
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm12, 2048(%rax)
+; AVX512DQBW-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512DQBW-ONLY-NEXT:    vmovaps %zmm0, 1984(%rax)
+; AVX512DQBW-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512DQBW-ONLY-NEXT:    vmovaps %zmm0, 1920(%rax)
+; AVX512DQBW-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512DQBW-ONLY-NEXT:    vmovaps %zmm0, 1856(%rax)
+; AVX512DQBW-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512DQBW-ONLY-NEXT:    vmovaps %zmm0, 1792(%rax)
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm1, 1600(%rax)
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm30, 1536(%rax)
+; AVX512DQBW-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512DQBW-ONLY-NEXT:    vmovaps %zmm0, 1472(%rax)
+; AVX512DQBW-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512DQBW-ONLY-NEXT:    vmovaps %zmm0, 1408(%rax)
+; AVX512DQBW-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512DQBW-ONLY-NEXT:    vmovaps %zmm0, 1344(%rax)
+; AVX512DQBW-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512DQBW-ONLY-NEXT:    vmovaps %zmm0, 1280(%rax)
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm19, 1088(%rax)
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm4, 1024(%rax)
+; AVX512DQBW-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512DQBW-ONLY-NEXT:    vmovaps %zmm0, 960(%rax)
+; AVX512DQBW-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512DQBW-ONLY-NEXT:    vmovaps %zmm0, 896(%rax)
+; AVX512DQBW-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512DQBW-ONLY-NEXT:    vmovaps %zmm0, 832(%rax)
+; AVX512DQBW-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512DQBW-ONLY-NEXT:    vmovaps %zmm0, 768(%rax)
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm11, 576(%rax)
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm10, 512(%rax)
+; AVX512DQBW-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512DQBW-ONLY-NEXT:    vmovaps %zmm0, 448(%rax)
+; AVX512DQBW-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512DQBW-ONLY-NEXT:    vmovaps %zmm0, 384(%rax)
+; AVX512DQBW-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512DQBW-ONLY-NEXT:    vmovaps %zmm0, 320(%rax)
+; AVX512DQBW-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512DQBW-ONLY-NEXT:    vmovaps %zmm0, 256(%rax)
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm2, 64(%rax)
+; AVX512DQBW-ONLY-NEXT:    vmovdqa64 %zmm3, (%rax)
+; AVX512DQBW-ONLY-NEXT:    addq $5512, %rsp # imm = 0x1588
+; AVX512DQBW-ONLY-NEXT:    vzeroupper
+; AVX512DQBW-ONLY-NEXT:    retq
   %in.vec0 = load <64 x i64>, ptr %in.vecptr0, align 64
   %in.vec1 = load <64 x i64>, ptr %in.vecptr1, align 64
   %in.vec2 = load <64 x i64>, ptr %in.vecptr2, align 64
@@ -16808,8 +12856,16 @@ define void @store_i64_stride8_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX512-FAST: {{.*}}
 ; AVX512-SLOW: {{.*}}
 ; AVX512BW-FAST: {{.*}}
+; AVX512BW-ONLY-FAST: {{.*}}
+; AVX512BW-ONLY-SLOW: {{.*}}
 ; AVX512BW-SLOW: {{.*}}
+; AVX512DQ-FAST: {{.*}}
+; AVX512DQ-SLOW: {{.*}}
+; AVX512DQBW-FAST: {{.*}}
+; AVX512DQBW-SLOW: {{.*}}
 ; AVX512F-FAST: {{.*}}
+; AVX512F-ONLY-FAST: {{.*}}
+; AVX512F-ONLY-SLOW: {{.*}}
 ; AVX512F-SLOW: {{.*}}
 ; FALLBACK0: {{.*}}
 ; FALLBACK1: {{.*}}

diff  --git a/llvm/test/CodeGen/X86/vector-interleaved-store-i8-stride-2.ll b/llvm/test/CodeGen/X86/vector-interleaved-store-i8-stride-2.ll
index 4aa54baa1ee15ad..3fbaf5846cc3f1e 100644
--- a/llvm/test/CodeGen/X86/vector-interleaved-store-i8-stride-2.ll
+++ b/llvm/test/CodeGen/X86/vector-interleaved-store-i8-stride-2.ll
@@ -4,14 +4,14 @@
 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx2 | FileCheck %s --check-prefixes=AVX,AVX1,AVX2,AVX2-ONLY,AVX2-SLOW,FALLBACK2
 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx2,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX1,AVX2,AVX2-ONLY,AVX2-FAST,FALLBACK3
 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx2,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX1,AVX2,AVX2-ONLY,AVX2-FAST-PERLANE,FALLBACK4
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512-SLOW,AVX512F-SLOW,AVX512F-ONLY-SLOW,FALLBACK5
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512-FAST,AVX512F-FAST,AVX512F-ONLY-FAST,FALLBACK6
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512-SLOW,AVX512F-SLOW,AVX512DQ-SLOW,FALLBACK7
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512-FAST,AVX512F-FAST,AVX512DQ-FAST,FALLBACK8
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512bw | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512-SLOW,AVX512BW-SLOW,AVX512BW-ONLY-SLOW,FALLBACK9
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512bw,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512-FAST,AVX512BW-FAST,AVX512BW-ONLY-FAST,FALLBACK10
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq,+avx512bw | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512-SLOW,AVX512BW-SLOW,AVX512DQBW-SLOW,FALLBACK11
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq,+avx512bw,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512-FAST,AVX512BW-FAST,AVX512DQBW-FAST,FALLBACK12
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512F-ONLY,AVX512-SLOW,AVX512F-SLOW,AVX512F-ONLY-SLOW,FALLBACK5
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512F-ONLY,AVX512-FAST,AVX512F-FAST,AVX512F-ONLY-FAST,FALLBACK6
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512DQ-ONLY,AVX512-SLOW,AVX512F-SLOW,AVX512DQ-SLOW,FALLBACK7
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512DQ-ONLY,AVX512-FAST,AVX512F-FAST,AVX512DQ-FAST,FALLBACK8
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512bw | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512BW-ONLY,AVX512-SLOW,AVX512BW-SLOW,AVX512BW-ONLY-SLOW,FALLBACK9
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512bw,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512BW-ONLY,AVX512-FAST,AVX512BW-FAST,AVX512BW-ONLY-FAST,FALLBACK10
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq,+avx512bw | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512DQBW-ONLY,AVX512-SLOW,AVX512BW-SLOW,AVX512DQBW-SLOW,FALLBACK11
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq,+avx512bw,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512DQBW-ONLY,AVX512-FAST,AVX512BW-FAST,AVX512DQBW-FAST,FALLBACK12
 
 ; These patterns are produced by LoopVectorizer for interleaved stores.
 
@@ -358,14 +358,18 @@ define void @store_i8_stride2_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %out.ve
 ; AVX512-FAST: {{.*}}
 ; AVX512-SLOW: {{.*}}
 ; AVX512BW-FAST: {{.*}}
+; AVX512BW-ONLY: {{.*}}
 ; AVX512BW-ONLY-FAST: {{.*}}
 ; AVX512BW-ONLY-SLOW: {{.*}}
 ; AVX512BW-SLOW: {{.*}}
 ; AVX512DQ-FAST: {{.*}}
+; AVX512DQ-ONLY: {{.*}}
 ; AVX512DQ-SLOW: {{.*}}
 ; AVX512DQBW-FAST: {{.*}}
+; AVX512DQBW-ONLY: {{.*}}
 ; AVX512DQBW-SLOW: {{.*}}
 ; AVX512F-FAST: {{.*}}
+; AVX512F-ONLY: {{.*}}
 ; AVX512F-ONLY-FAST: {{.*}}
 ; AVX512F-ONLY-SLOW: {{.*}}
 ; AVX512F-SLOW: {{.*}}

diff  --git a/llvm/test/CodeGen/X86/vector-interleaved-store-i8-stride-3.ll b/llvm/test/CodeGen/X86/vector-interleaved-store-i8-stride-3.ll
index 035db822be5180b..8ccb4ef56d9c744 100644
--- a/llvm/test/CodeGen/X86/vector-interleaved-store-i8-stride-3.ll
+++ b/llvm/test/CodeGen/X86/vector-interleaved-store-i8-stride-3.ll
@@ -4,14 +4,14 @@
 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx2 | FileCheck %s --check-prefixes=AVX,AVX1,AVX2,AVX2-ONLY,AVX2-SLOW,FALLBACK2
 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx2,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX1,AVX2,AVX2-ONLY,AVX2-FAST,FALLBACK3
 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx2,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX1,AVX2,AVX2-ONLY,AVX2-FAST-PERLANE,FALLBACK4
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512-SLOW,AVX512F-SLOW,AVX512F-ONLY-SLOW,FALLBACK5
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512-FAST,AVX512F-FAST,AVX512F-ONLY-FAST,FALLBACK6
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512-SLOW,AVX512F-SLOW,AVX512DQ-SLOW,FALLBACK7
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512-FAST,AVX512F-FAST,AVX512DQ-FAST,FALLBACK8
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512bw | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512-SLOW,AVX512BW-SLOW,AVX512BW-ONLY-SLOW,FALLBACK9
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512bw,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512-FAST,AVX512BW-FAST,AVX512BW-ONLY-FAST,FALLBACK10
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq,+avx512bw | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512-SLOW,AVX512BW-SLOW,AVX512DQBW-SLOW,FALLBACK11
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq,+avx512bw,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512-FAST,AVX512BW-FAST,AVX512DQBW-FAST,FALLBACK12
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512F-ONLY,AVX512-SLOW,AVX512F-SLOW,AVX512F-ONLY-SLOW,FALLBACK5
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512F-ONLY,AVX512-FAST,AVX512F-FAST,AVX512F-ONLY-FAST,FALLBACK6
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512DQ-ONLY,AVX512-SLOW,AVX512F-SLOW,AVX512DQ-SLOW,FALLBACK7
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512DQ-ONLY,AVX512-FAST,AVX512F-FAST,AVX512DQ-FAST,FALLBACK8
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512bw | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512BW-ONLY,AVX512-SLOW,AVX512BW-SLOW,AVX512BW-ONLY-SLOW,FALLBACK9
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512bw,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512BW-ONLY,AVX512-FAST,AVX512BW-FAST,AVX512BW-ONLY-FAST,FALLBACK10
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq,+avx512bw | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512DQBW-ONLY,AVX512-SLOW,AVX512BW-SLOW,AVX512DQBW-SLOW,FALLBACK11
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq,+avx512bw,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512DQBW-ONLY,AVX512-FAST,AVX512BW-FAST,AVX512DQBW-FAST,FALLBACK12
 
 ; These patterns are produced by LoopVectorizer for interleaved stores.
 
@@ -1116,14 +1116,18 @@ define void @store_i8_stride3_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
 ; AVX512-FAST: {{.*}}
 ; AVX512-SLOW: {{.*}}
 ; AVX512BW-FAST: {{.*}}
+; AVX512BW-ONLY: {{.*}}
 ; AVX512BW-ONLY-FAST: {{.*}}
 ; AVX512BW-ONLY-SLOW: {{.*}}
 ; AVX512BW-SLOW: {{.*}}
 ; AVX512DQ-FAST: {{.*}}
+; AVX512DQ-ONLY: {{.*}}
 ; AVX512DQ-SLOW: {{.*}}
 ; AVX512DQBW-FAST: {{.*}}
+; AVX512DQBW-ONLY: {{.*}}
 ; AVX512DQBW-SLOW: {{.*}}
 ; AVX512F-FAST: {{.*}}
+; AVX512F-ONLY: {{.*}}
 ; AVX512F-ONLY-FAST: {{.*}}
 ; AVX512F-ONLY-SLOW: {{.*}}
 ; AVX512F-SLOW: {{.*}}

diff  --git a/llvm/test/CodeGen/X86/vector-interleaved-store-i8-stride-4.ll b/llvm/test/CodeGen/X86/vector-interleaved-store-i8-stride-4.ll
index 9e7d970a6abcde6..a4ae8f06f384957 100644
--- a/llvm/test/CodeGen/X86/vector-interleaved-store-i8-stride-4.ll
+++ b/llvm/test/CodeGen/X86/vector-interleaved-store-i8-stride-4.ll
@@ -4,14 +4,14 @@
 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx2 | FileCheck %s --check-prefixes=AVX,AVX1,AVX2,AVX2-ONLY,AVX2-SLOW,FALLBACK2
 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx2,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX1,AVX2,AVX2-ONLY,AVX2-FAST,FALLBACK3
 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx2,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX1,AVX2,AVX2-ONLY,AVX2-FAST-PERLANE,FALLBACK4
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512-SLOW,AVX512F-SLOW,AVX512F-ONLY-SLOW,FALLBACK5
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512-FAST,AVX512F-FAST,AVX512F-ONLY-FAST,FALLBACK6
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512-SLOW,AVX512F-SLOW,AVX512DQ-SLOW,FALLBACK7
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512-FAST,AVX512F-FAST,AVX512DQ-FAST,FALLBACK8
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512bw | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512-SLOW,AVX512BW-SLOW,AVX512BW-ONLY-SLOW,FALLBACK9
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512bw,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512-FAST,AVX512BW-FAST,AVX512BW-ONLY-FAST,FALLBACK10
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq,+avx512bw | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512-SLOW,AVX512BW-SLOW,AVX512DQBW-SLOW,FALLBACK11
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq,+avx512bw,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512-FAST,AVX512BW-FAST,AVX512DQBW-FAST,FALLBACK12
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512F-ONLY,AVX512-SLOW,AVX512F-SLOW,AVX512F-ONLY-SLOW,FALLBACK5
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512F-ONLY,AVX512-FAST,AVX512F-FAST,AVX512F-ONLY-FAST,FALLBACK6
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512DQ-ONLY,AVX512-SLOW,AVX512F-SLOW,AVX512DQ-SLOW,FALLBACK7
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512DQ-ONLY,AVX512-FAST,AVX512F-FAST,AVX512DQ-FAST,FALLBACK8
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512bw | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512BW-ONLY,AVX512-SLOW,AVX512BW-SLOW,AVX512BW-ONLY-SLOW,FALLBACK9
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512bw,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512BW-ONLY,AVX512-FAST,AVX512BW-FAST,AVX512BW-ONLY-FAST,FALLBACK10
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq,+avx512bw | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512DQBW-ONLY,AVX512-SLOW,AVX512BW-SLOW,AVX512DQBW-SLOW,FALLBACK11
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq,+avx512bw,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512DQBW-ONLY,AVX512-FAST,AVX512BW-FAST,AVX512DQBW-FAST,FALLBACK12
 
 ; These patterns are produced by LoopVectorizer for interleaved stores.
 
@@ -730,14 +730,18 @@ define void @store_i8_stride4_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
 ; AVX512-FAST: {{.*}}
 ; AVX512-SLOW: {{.*}}
 ; AVX512BW-FAST: {{.*}}
+; AVX512BW-ONLY: {{.*}}
 ; AVX512BW-ONLY-FAST: {{.*}}
 ; AVX512BW-ONLY-SLOW: {{.*}}
 ; AVX512BW-SLOW: {{.*}}
 ; AVX512DQ-FAST: {{.*}}
+; AVX512DQ-ONLY: {{.*}}
 ; AVX512DQ-SLOW: {{.*}}
 ; AVX512DQBW-FAST: {{.*}}
+; AVX512DQBW-ONLY: {{.*}}
 ; AVX512DQBW-SLOW: {{.*}}
 ; AVX512F-FAST: {{.*}}
+; AVX512F-ONLY: {{.*}}
 ; AVX512F-ONLY-FAST: {{.*}}
 ; AVX512F-ONLY-SLOW: {{.*}}
 ; AVX512F-SLOW: {{.*}}

diff  --git a/llvm/test/CodeGen/X86/vector-interleaved-store-i8-stride-5.ll b/llvm/test/CodeGen/X86/vector-interleaved-store-i8-stride-5.ll
index 424365ce60d4a1a..f101f22c58b1f13 100644
--- a/llvm/test/CodeGen/X86/vector-interleaved-store-i8-stride-5.ll
+++ b/llvm/test/CodeGen/X86/vector-interleaved-store-i8-stride-5.ll
@@ -4,14 +4,14 @@
 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx2 | FileCheck %s --check-prefixes=AVX,AVX1,AVX2,AVX2-ONLY,AVX2-SLOW,FALLBACK2
 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx2,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX1,AVX2,AVX2-ONLY,AVX2-FAST,FALLBACK3
 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx2,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX1,AVX2,AVX2-ONLY,AVX2-FAST-PERLANE,FALLBACK4
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512-SLOW,AVX512F-SLOW,AVX512F-ONLY-SLOW,FALLBACK5
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512-FAST,AVX512F-FAST,AVX512F-ONLY-FAST,FALLBACK6
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512-SLOW,AVX512F-SLOW,AVX512DQ-SLOW,FALLBACK7
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512-FAST,AVX512F-FAST,AVX512DQ-FAST,FALLBACK8
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512bw | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512-SLOW,AVX512BW-SLOW,AVX512BW-ONLY-SLOW,FALLBACK9
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512bw,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512-FAST,AVX512BW-FAST,AVX512BW-ONLY-FAST,FALLBACK10
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq,+avx512bw | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512-SLOW,AVX512BW-SLOW,AVX512DQBW-SLOW,FALLBACK11
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq,+avx512bw,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512-FAST,AVX512BW-FAST,AVX512DQBW-FAST,FALLBACK12
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512F-ONLY,AVX512-SLOW,AVX512F-SLOW,AVX512F-ONLY-SLOW,FALLBACK5
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512F-ONLY,AVX512-FAST,AVX512F-FAST,AVX512F-ONLY-FAST,FALLBACK6
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512DQ-ONLY,AVX512-SLOW,AVX512F-SLOW,AVX512DQ-SLOW,FALLBACK7
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512DQ-ONLY,AVX512-FAST,AVX512F-FAST,AVX512DQ-FAST,FALLBACK8
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512bw | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512BW-ONLY,AVX512-SLOW,AVX512BW-SLOW,AVX512BW-ONLY-SLOW,FALLBACK9
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512bw,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512BW-ONLY,AVX512-FAST,AVX512BW-FAST,AVX512BW-ONLY-FAST,FALLBACK10
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq,+avx512bw | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512DQBW-ONLY,AVX512-SLOW,AVX512BW-SLOW,AVX512DQBW-SLOW,FALLBACK11
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq,+avx512bw,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512DQBW-ONLY,AVX512-FAST,AVX512BW-FAST,AVX512DQBW-FAST,FALLBACK12
 
 ; These patterns are produced by LoopVectorizer for interleaved stores.
 
@@ -4779,10 +4779,14 @@ define void @store_i8_stride5_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
 ; AVX512: {{.*}}
 ; AVX512-FAST: {{.*}}
 ; AVX512-SLOW: {{.*}}
+; AVX512BW-ONLY: {{.*}}
 ; AVX512BW-ONLY-FAST: {{.*}}
 ; AVX512DQ-FAST: {{.*}}
+; AVX512DQ-ONLY: {{.*}}
 ; AVX512DQ-SLOW: {{.*}}
 ; AVX512DQBW-FAST: {{.*}}
+; AVX512DQBW-ONLY: {{.*}}
+; AVX512F-ONLY: {{.*}}
 ; AVX512F-ONLY-FAST: {{.*}}
 ; AVX512F-ONLY-SLOW: {{.*}}
 ; FALLBACK0: {{.*}}

diff  --git a/llvm/test/CodeGen/X86/vector-interleaved-store-i8-stride-6.ll b/llvm/test/CodeGen/X86/vector-interleaved-store-i8-stride-6.ll
index c70b741fb29e857..493728470f30169 100644
--- a/llvm/test/CodeGen/X86/vector-interleaved-store-i8-stride-6.ll
+++ b/llvm/test/CodeGen/X86/vector-interleaved-store-i8-stride-6.ll
@@ -4,14 +4,14 @@
 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx2 | FileCheck %s --check-prefixes=AVX,AVX1,AVX2,AVX2-ONLY,AVX2-SLOW,FALLBACK2
 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx2,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX1,AVX2,AVX2-ONLY,AVX2-FAST,FALLBACK3
 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx2,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX1,AVX2,AVX2-ONLY,AVX2-FAST-PERLANE,FALLBACK4
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512-SLOW,AVX512F-SLOW,AVX512F-ONLY-SLOW,FALLBACK5
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512-FAST,AVX512F-FAST,AVX512F-ONLY-FAST,FALLBACK6
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512-SLOW,AVX512F-SLOW,AVX512DQ-SLOW,FALLBACK7
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512-FAST,AVX512F-FAST,AVX512DQ-FAST,FALLBACK8
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512bw | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512-SLOW,AVX512BW-SLOW,AVX512BW-ONLY-SLOW,FALLBACK9
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512bw,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512-FAST,AVX512BW-FAST,AVX512BW-ONLY-FAST,FALLBACK10
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq,+avx512bw | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512-SLOW,AVX512BW-SLOW,AVX512DQBW-SLOW,FALLBACK11
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq,+avx512bw,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512-FAST,AVX512BW-FAST,AVX512DQBW-FAST,FALLBACK12
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512F-ONLY,AVX512-SLOW,AVX512F-SLOW,AVX512F-ONLY-SLOW,FALLBACK5
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512F-ONLY,AVX512-FAST,AVX512F-FAST,AVX512F-ONLY-FAST,FALLBACK6
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512DQ-ONLY,AVX512-SLOW,AVX512F-SLOW,AVX512DQ-SLOW,FALLBACK7
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512DQ-ONLY,AVX512-FAST,AVX512F-FAST,AVX512DQ-FAST,FALLBACK8
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512bw | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512BW-ONLY,AVX512-SLOW,AVX512BW-SLOW,AVX512BW-ONLY-SLOW,FALLBACK9
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512bw,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512BW-ONLY,AVX512-FAST,AVX512BW-FAST,AVX512BW-ONLY-FAST,FALLBACK10
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq,+avx512bw | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512DQBW-ONLY,AVX512-SLOW,AVX512BW-SLOW,AVX512DQBW-SLOW,FALLBACK11
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq,+avx512bw,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512DQBW-ONLY,AVX512-FAST,AVX512BW-FAST,AVX512DQBW-FAST,FALLBACK12
 
 ; These patterns are produced by LoopVectorizer for interleaved stores.
 
@@ -5315,10 +5315,14 @@ define void @store_i8_stride6_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
 ; AVX512: {{.*}}
 ; AVX512-FAST: {{.*}}
 ; AVX512-SLOW: {{.*}}
+; AVX512BW-ONLY: {{.*}}
 ; AVX512BW-ONLY-FAST: {{.*}}
 ; AVX512DQ-FAST: {{.*}}
+; AVX512DQ-ONLY: {{.*}}
 ; AVX512DQ-SLOW: {{.*}}
 ; AVX512DQBW-FAST: {{.*}}
+; AVX512DQBW-ONLY: {{.*}}
+; AVX512F-ONLY: {{.*}}
 ; AVX512F-ONLY-FAST: {{.*}}
 ; AVX512F-ONLY-SLOW: {{.*}}
 ; FALLBACK0: {{.*}}

diff  --git a/llvm/test/CodeGen/X86/vector-interleaved-store-i8-stride-7.ll b/llvm/test/CodeGen/X86/vector-interleaved-store-i8-stride-7.ll
index cb65fe5fa68b0c0..bb74d0f8fe7ee14 100644
--- a/llvm/test/CodeGen/X86/vector-interleaved-store-i8-stride-7.ll
+++ b/llvm/test/CodeGen/X86/vector-interleaved-store-i8-stride-7.ll
@@ -4,14 +4,14 @@
 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx2 | FileCheck %s --check-prefixes=AVX,AVX1,AVX2,AVX2-ONLY,AVX2-SLOW,FALLBACK2
 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx2,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX1,AVX2,AVX2-ONLY,AVX2-FAST,FALLBACK3
 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx2,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX1,AVX2,AVX2-ONLY,AVX2-FAST-PERLANE,FALLBACK4
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512-SLOW,AVX512F-SLOW,AVX512F-ONLY-SLOW,FALLBACK5
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512-FAST,AVX512F-FAST,AVX512F-ONLY-FAST,FALLBACK6
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512-SLOW,AVX512F-SLOW,AVX512DQ-SLOW,FALLBACK7
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512-FAST,AVX512F-FAST,AVX512DQ-FAST,FALLBACK8
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512bw | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512-SLOW,AVX512BW-SLOW,AVX512BW-ONLY-SLOW,FALLBACK9
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512bw,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512-FAST,AVX512BW-FAST,AVX512BW-ONLY-FAST,FALLBACK10
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq,+avx512bw | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512-SLOW,AVX512BW-SLOW,AVX512DQBW-SLOW,FALLBACK11
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq,+avx512bw,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512-FAST,AVX512BW-FAST,AVX512DQBW-FAST,FALLBACK12
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512F-ONLY,AVX512-SLOW,AVX512F-SLOW,AVX512F-ONLY-SLOW,FALLBACK5
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512F-ONLY,AVX512-FAST,AVX512F-FAST,AVX512F-ONLY-FAST,FALLBACK6
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512DQ-ONLY,AVX512-SLOW,AVX512F-SLOW,AVX512DQ-SLOW,FALLBACK7
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512DQ-ONLY,AVX512-FAST,AVX512F-FAST,AVX512DQ-FAST,FALLBACK8
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512bw | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512BW-ONLY,AVX512-SLOW,AVX512BW-SLOW,AVX512BW-ONLY-SLOW,FALLBACK9
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512bw,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512BW-ONLY,AVX512-FAST,AVX512BW-FAST,AVX512BW-ONLY-FAST,FALLBACK10
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq,+avx512bw | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512DQBW-ONLY,AVX512-SLOW,AVX512BW-SLOW,AVX512DQBW-SLOW,FALLBACK11
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq,+avx512bw,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512DQBW-ONLY,AVX512-FAST,AVX512BW-FAST,AVX512DQBW-FAST,FALLBACK12
 
 ; These patterns are produced by LoopVectorizer for interleaved stores.
 
@@ -9658,8 +9658,12 @@ define void @store_i8_stride7_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
 ; AVX512: {{.*}}
 ; AVX512-FAST: {{.*}}
 ; AVX512-SLOW: {{.*}}
+; AVX512BW-ONLY: {{.*}}
 ; AVX512BW-ONLY-FAST: {{.*}}
+; AVX512DQ-ONLY: {{.*}}
 ; AVX512DQBW-FAST: {{.*}}
+; AVX512DQBW-ONLY: {{.*}}
+; AVX512F-ONLY: {{.*}}
 ; FALLBACK0: {{.*}}
 ; FALLBACK1: {{.*}}
 ; FALLBACK10: {{.*}}

diff  --git a/llvm/test/CodeGen/X86/vector-interleaved-store-i8-stride-8.ll b/llvm/test/CodeGen/X86/vector-interleaved-store-i8-stride-8.ll
index 86843b6c204f425..257c9a2e4eaa507 100644
--- a/llvm/test/CodeGen/X86/vector-interleaved-store-i8-stride-8.ll
+++ b/llvm/test/CodeGen/X86/vector-interleaved-store-i8-stride-8.ll
@@ -4,14 +4,14 @@
 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx2 | FileCheck %s --check-prefixes=AVX,AVX1,AVX2,AVX2-ONLY,AVX2-SLOW,FALLBACK2
 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx2,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX1,AVX2,AVX2-ONLY,AVX2-FAST,FALLBACK3
 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx2,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX1,AVX2,AVX2-ONLY,AVX2-FAST-PERLANE,FALLBACK4
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512-SLOW,AVX512F-SLOW,AVX512F-ONLY-SLOW,FALLBACK5
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512-FAST,AVX512F-FAST,AVX512F-ONLY-FAST,FALLBACK6
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512-SLOW,AVX512F-SLOW,AVX512DQ-SLOW,FALLBACK7
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512-FAST,AVX512F-FAST,AVX512DQ-FAST,FALLBACK8
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512bw | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512-SLOW,AVX512BW-SLOW,AVX512BW-ONLY-SLOW,FALLBACK9
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512bw,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512-FAST,AVX512BW-FAST,AVX512BW-ONLY-FAST,FALLBACK10
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq,+avx512bw | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512-SLOW,AVX512BW-SLOW,AVX512DQBW-SLOW,FALLBACK11
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq,+avx512bw,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512-FAST,AVX512BW-FAST,AVX512DQBW-FAST,FALLBACK12
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512F-ONLY,AVX512-SLOW,AVX512F-SLOW,AVX512F-ONLY-SLOW,FALLBACK5
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512F-ONLY,AVX512-FAST,AVX512F-FAST,AVX512F-ONLY-FAST,FALLBACK6
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512DQ-ONLY,AVX512-SLOW,AVX512F-SLOW,AVX512DQ-SLOW,FALLBACK7
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512DQ-ONLY,AVX512-FAST,AVX512F-FAST,AVX512DQ-FAST,FALLBACK8
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512bw | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512BW-ONLY,AVX512-SLOW,AVX512BW-SLOW,AVX512BW-ONLY-SLOW,FALLBACK9
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512bw,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512BW-ONLY,AVX512-FAST,AVX512BW-FAST,AVX512BW-ONLY-FAST,FALLBACK10
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq,+avx512bw | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512DQBW-ONLY,AVX512-SLOW,AVX512BW-SLOW,AVX512DQBW-SLOW,FALLBACK11
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq,+avx512bw,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512DQBW-ONLY,AVX512-FAST,AVX512BW-FAST,AVX512DQBW-FAST,FALLBACK12
 
 ; These patterns are produced by LoopVectorizer for interleaved stores.
 
@@ -6990,11 +6990,15 @@ define void @store_i8_stride8_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
 ; AVX1: {{.*}}
 ; AVX512-SLOW: {{.*}}
 ; AVX512BW: {{.*}}
+; AVX512BW-ONLY: {{.*}}
 ; AVX512BW-ONLY-FAST: {{.*}}
 ; AVX512BW-ONLY-SLOW: {{.*}}
+; AVX512DQ-ONLY: {{.*}}
 ; AVX512DQBW-FAST: {{.*}}
+; AVX512DQBW-ONLY: {{.*}}
 ; AVX512DQBW-SLOW: {{.*}}
 ; AVX512F: {{.*}}
+; AVX512F-ONLY: {{.*}}
 ; FALLBACK0: {{.*}}
 ; FALLBACK1: {{.*}}
 ; FALLBACK10: {{.*}}


        


More information about the llvm-commits mailing list