[llvm] [SDag] Notify listeners when deleting a node (PR #66991)

Sergei Barannikov via llvm-commits llvm-commits at lists.llvm.org
Thu Sep 21 01:55:48 PDT 2023


https://github.com/s-barannikov created https://github.com/llvm/llvm-project/pull/66991

SelectionDAG::DeleteNode does not notify listeners about node deletion. As a result, SelectionDAG::Legalize may skip legalization of some nodes resulting in "Legalized selection DAG" containing illegal nodes. These nodes will be legalized during subsequent DAG combining phase, but this may be too late for some patterns to match.

>From 443d8709a9f54d3c7b9003a2ed2ac365b424fe2c Mon Sep 17 00:00:00 2001
From: Sergei Barannikov <barannikov88 at gmail.com>
Date: Thu, 21 Sep 2023 11:54:39 +0300
Subject: [PATCH] [SDag] Notify listeners when deleting a node

SelectionDAG::DeleteNode does not notify listeners about node deletion.
As a result, SelectionDAG::Legalize may skip legalization of some nodes
resulting in "Legalized selection DAG" containing illegal nodes. These
nodes will be legalized during subsequent DAG combining phase, but this
may be too late for some patterns to match.
---
 .../lib/CodeGen/SelectionDAG/SelectionDAG.cpp |   3 +
 llvm/test/CodeGen/AMDGPU/half.ll              |  72 +--
 llvm/test/CodeGen/AMDGPU/load-constant-i1.ll  |  88 +--
 llvm/test/CodeGen/AMDGPU/load-constant-i16.ll | 133 ++---
 llvm/test/CodeGen/AMDGPU/load-constant-i8.ll  | 122 ++---
 llvm/test/CodeGen/AMDGPU/load-global-i16.ll   | 181 ++++---
 llvm/test/CodeGen/X86/pr40730.ll              |  20 +-
 .../vector-interleaved-load-i16-stride-5.ll   |  28 +-
 .../vector-interleaved-store-i16-stride-8.ll  | 504 +++++++++---------
 .../CodeGen/X86/vector-shuffle-256-v16.ll     |  56 +-
 .../zero_extend_vector_inreg_of_broadcast.ll  |  12 +-
 11 files changed, 606 insertions(+), 613 deletions(-)

diff --git a/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp b/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp
index 7fcd1f4f898911a..fd244f33e56c39b 100644
--- a/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp
@@ -1036,6 +1036,9 @@ void SelectionDAG::RemoveDeadNode(SDNode *N){
 }
 
 void SelectionDAG::DeleteNode(SDNode *N) {
+  for (DAGUpdateListener *DUL = UpdateListeners; DUL; DUL = DUL->Next)
+    DUL->NodeDeleted(N, nullptr);
+
   // First take this out of the appropriate CSE map.
   RemoveNodeFromCSEMaps(N);
 
diff --git a/llvm/test/CodeGen/AMDGPU/half.ll b/llvm/test/CodeGen/AMDGPU/half.ll
index e2d55990473c09c..e55c7bd3ac5e628 100644
--- a/llvm/test/CodeGen/AMDGPU/half.ll
+++ b/llvm/test/CodeGen/AMDGPU/half.ll
@@ -1969,19 +1969,19 @@ define amdgpu_kernel void @global_extload_v16f16_to_v16f64(ptr addrspace(1) %out
 ; VI-NEXT:    s_addc_u32 s3, s1, 0
 ; VI-NEXT:    v_mov_b32_e32 v15, s3
 ; VI-NEXT:    v_mov_b32_e32 v14, s2
-; VI-NEXT:    s_add_u32 s2, s0, 0x50
+; VI-NEXT:    s_add_u32 s2, s0, 0x70
 ; VI-NEXT:    s_addc_u32 s3, s1, 0
 ; VI-NEXT:    v_mov_b32_e32 v17, s3
 ; VI-NEXT:    v_mov_b32_e32 v16, s2
-; VI-NEXT:    s_add_u32 s2, s0, 64
+; VI-NEXT:    s_add_u32 s2, s0, 0x60
 ; VI-NEXT:    s_addc_u32 s3, s1, 0
 ; VI-NEXT:    v_mov_b32_e32 v19, s3
 ; VI-NEXT:    v_mov_b32_e32 v11, s1
 ; VI-NEXT:    v_mov_b32_e32 v18, s2
-; VI-NEXT:    s_add_u32 s2, s0, 0x70
+; VI-NEXT:    s_add_u32 s2, s0, 0x50
 ; VI-NEXT:    v_mov_b32_e32 v10, s0
 ; VI-NEXT:    s_addc_u32 s3, s1, 0
-; VI-NEXT:    s_add_u32 s0, s0, 0x60
+; VI-NEXT:    s_add_u32 s0, s0, 64
 ; VI-NEXT:    s_addc_u32 s1, s1, 0
 ; VI-NEXT:    s_waitcnt vmcnt(1)
 ; VI-NEXT:    v_cvt_f32_f16_e32 v22, v4
@@ -1995,15 +1995,15 @@ define amdgpu_kernel void @global_extload_v16f16_to_v16f64(ptr addrspace(1) %out
 ; VI-NEXT:    v_cvt_f64_f32_e32 v[4:5], v4
 ; VI-NEXT:    v_cvt_f64_f32_e32 v[6:7], v7
 ; VI-NEXT:    s_waitcnt vmcnt(0)
-; VI-NEXT:    v_cvt_f32_f16_e32 v26, v2
-; VI-NEXT:    v_cvt_f32_f16_sdwa v27, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1
-; VI-NEXT:    v_cvt_f32_f16_sdwa v28, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1
+; VI-NEXT:    v_cvt_f32_f16_e32 v26, v0
+; VI-NEXT:    v_cvt_f32_f16_sdwa v27, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1
+; VI-NEXT:    v_cvt_f32_f16_sdwa v28, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1
 ; VI-NEXT:    flat_store_dwordx4 v[8:9], v[4:7]
-; VI-NEXT:    v_cvt_f32_f16_e32 v8, v3
-; VI-NEXT:    v_cvt_f32_f16_e32 v29, v0
-; VI-NEXT:    v_cvt_f32_f16_sdwa v30, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1
-; VI-NEXT:    v_cvt_f32_f16_e32 v31, v1
-; VI-NEXT:    v_cvt_f32_f16_sdwa v32, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1
+; VI-NEXT:    v_cvt_f32_f16_e32 v8, v1
+; VI-NEXT:    v_cvt_f32_f16_e32 v29, v2
+; VI-NEXT:    v_cvt_f32_f16_sdwa v30, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1
+; VI-NEXT:    v_cvt_f32_f16_e32 v31, v3
+; VI-NEXT:    v_cvt_f32_f16_sdwa v32, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1
 ; VI-NEXT:    v_cvt_f64_f32_e32 v[0:1], v20
 ; VI-NEXT:    v_cvt_f64_f32_e32 v[2:3], v21
 ; VI-NEXT:    v_cvt_f64_f32_e32 v[4:5], v22
@@ -2040,40 +2040,40 @@ define amdgpu_kernel void @global_extload_v16f16_to_v16f64(ptr addrspace(1) %out
 ; GFX11-NEXT:    global_load_b128 v[0:3], v32, s[2:3]
 ; GFX11-NEXT:    global_load_b128 v[4:7], v32, s[2:3] offset:16
 ; GFX11-NEXT:    s_waitcnt vmcnt(1)
-; GFX11-NEXT:    v_cvt_f32_f16_e32 v10, v1
-; GFX11-NEXT:    s_waitcnt vmcnt(0)
-; GFX11-NEXT:    v_lshrrev_b32_e32 v23, 16, v5
 ; GFX11-NEXT:    v_lshrrev_b32_e32 v11, 16, v1
-; GFX11-NEXT:    v_lshrrev_b32_e32 v19, 16, v4
-; GFX11-NEXT:    v_cvt_f32_f16_e32 v15, v7
+; GFX11-NEXT:    s_waitcnt vmcnt(0)
+; GFX11-NEXT:    v_cvt_f32_f16_e32 v23, v7
 ; GFX11-NEXT:    v_lshrrev_b32_e32 v7, 16, v7
-; GFX11-NEXT:    v_cvt_f32_f16_e32 v14, v6
+; GFX11-NEXT:    v_cvt_f32_f16_e32 v22, v6
 ; GFX11-NEXT:    v_lshrrev_b32_e32 v6, 16, v6
+; GFX11-NEXT:    v_lshrrev_b32_e32 v19, 16, v5
+; GFX11-NEXT:    v_cvt_f32_f16_e32 v10, v1
+; GFX11-NEXT:    v_lshrrev_b32_e32 v15, 16, v4
 ; GFX11-NEXT:    v_cvt_f32_f16_e32 v13, v3
 ; GFX11-NEXT:    v_lshrrev_b32_e32 v3, 16, v3
 ; GFX11-NEXT:    v_cvt_f32_f16_e32 v12, v2
 ; GFX11-NEXT:    v_lshrrev_b32_e32 v2, 16, v2
-; GFX11-NEXT:    v_cvt_f32_f16_e32 v18, v4
-; GFX11-NEXT:    v_cvt_f32_f16_e32 v22, v5
-; GFX11-NEXT:    v_cvt_f64_f32_e32 v[4:5], v10
-; GFX11-NEXT:    v_cvt_f32_f16_e32 v10, v23
-; GFX11-NEXT:    v_cvt_f32_f16_e32 v34, v11
-; GFX11-NEXT:    v_cvt_f32_f16_e32 v11, v19
-; GFX11-NEXT:    v_lshrrev_b32_e32 v9, 16, v0
 ; GFX11-NEXT:    v_cvt_f32_f16_e32 v7, v7
 ; GFX11-NEXT:    v_cvt_f32_f16_e32 v6, v6
+; GFX11-NEXT:    v_lshrrev_b32_e32 v9, 16, v0
+; GFX11-NEXT:    v_cvt_f32_f16_e32 v18, v5
+; GFX11-NEXT:    v_cvt_f32_f16_e32 v34, v11
+; GFX11-NEXT:    v_cvt_f32_f16_e32 v11, v19
+; GFX11-NEXT:    v_cvt_f32_f16_e32 v14, v4
+; GFX11-NEXT:    v_cvt_f64_f32_e32 v[4:5], v10
+; GFX11-NEXT:    v_cvt_f32_f16_e32 v10, v15
 ; GFX11-NEXT:    v_cvt_f32_f16_e32 v8, v0
 ; GFX11-NEXT:    v_cvt_f32_f16_e32 v3, v3
 ; GFX11-NEXT:    v_cvt_f32_f16_e32 v2, v2
-; GFX11-NEXT:    v_cvt_f64_f32_e32 v[28:29], v22
-; GFX11-NEXT:    v_cvt_f64_f32_e32 v[30:31], v10
-; GFX11-NEXT:    v_cvt_f64_f32_e32 v[24:25], v18
-; GFX11-NEXT:    v_cvt_f64_f32_e32 v[26:27], v11
+; GFX11-NEXT:    v_cvt_f64_f32_e32 v[28:29], v23
+; GFX11-NEXT:    v_cvt_f64_f32_e32 v[30:31], v7
+; GFX11-NEXT:    v_cvt_f64_f32_e32 v[24:25], v22
+; GFX11-NEXT:    v_cvt_f64_f32_e32 v[26:27], v6
 ; GFX11-NEXT:    v_cvt_f32_f16_e32 v33, v9
-; GFX11-NEXT:    v_cvt_f64_f32_e32 v[20:21], v15
-; GFX11-NEXT:    v_cvt_f64_f32_e32 v[22:23], v7
+; GFX11-NEXT:    v_cvt_f64_f32_e32 v[20:21], v18
+; GFX11-NEXT:    v_cvt_f64_f32_e32 v[22:23], v11
 ; GFX11-NEXT:    v_cvt_f64_f32_e32 v[16:17], v14
-; GFX11-NEXT:    v_cvt_f64_f32_e32 v[18:19], v6
+; GFX11-NEXT:    v_cvt_f64_f32_e32 v[18:19], v10
 ; GFX11-NEXT:    v_cvt_f64_f32_e32 v[0:1], v8
 ; GFX11-NEXT:    v_cvt_f64_f32_e32 v[8:9], v12
 ; GFX11-NEXT:    v_cvt_f64_f32_e32 v[12:13], v13
@@ -2082,10 +2082,10 @@ define amdgpu_kernel void @global_extload_v16f16_to_v16f64(ptr addrspace(1) %out
 ; GFX11-NEXT:    v_cvt_f64_f32_e32 v[6:7], v34
 ; GFX11-NEXT:    v_cvt_f64_f32_e32 v[2:3], v33
 ; GFX11-NEXT:    s_clause 0x7
-; GFX11-NEXT:    global_store_b128 v32, v[28:31], s[0:1] offset:80
-; GFX11-NEXT:    global_store_b128 v32, v[24:27], s[0:1] offset:64
-; GFX11-NEXT:    global_store_b128 v32, v[20:23], s[0:1] offset:112
-; GFX11-NEXT:    global_store_b128 v32, v[16:19], s[0:1] offset:96
+; GFX11-NEXT:    global_store_b128 v32, v[28:31], s[0:1] offset:112
+; GFX11-NEXT:    global_store_b128 v32, v[24:27], s[0:1] offset:96
+; GFX11-NEXT:    global_store_b128 v32, v[20:23], s[0:1] offset:80
+; GFX11-NEXT:    global_store_b128 v32, v[16:19], s[0:1] offset:64
 ; GFX11-NEXT:    global_store_b128 v32, v[12:15], s[0:1] offset:48
 ; GFX11-NEXT:    global_store_b128 v32, v[8:11], s[0:1] offset:32
 ; GFX11-NEXT:    global_store_b128 v32, v[4:7], s[0:1] offset:16
diff --git a/llvm/test/CodeGen/AMDGPU/load-constant-i1.ll b/llvm/test/CodeGen/AMDGPU/load-constant-i1.ll
index 438b1bfe319a04e..f433bd86023645b 100644
--- a/llvm/test/CodeGen/AMDGPU/load-constant-i1.ll
+++ b/llvm/test/CodeGen/AMDGPU/load-constant-i1.ll
@@ -4526,33 +4526,33 @@ define amdgpu_kernel void @constant_zextload_v16i1_to_v16i64(ptr addrspace(1) %o
 ; GFX6-NEXT:    s_mov_b32 s0, s4
 ; GFX6-NEXT:    s_mov_b32 s1, s5
 ; GFX6-NEXT:    s_waitcnt vmcnt(0)
-; GFX6-NEXT:    v_bfe_u32 v2, v29, 11, 1
-; GFX6-NEXT:    v_bfe_u32 v0, v29, 10, 1
-; GFX6-NEXT:    buffer_store_dwordx4 v[0:3], off, s[0:3], 0 offset:80
-; GFX6-NEXT:    v_bfe_u32 v5, v29, 9, 1
+; GFX6-NEXT:    v_lshrrev_b32_e32 v2, 15, v29
+; GFX6-NEXT:    v_bfe_u32 v0, v29, 14, 1
+; GFX6-NEXT:    buffer_store_dwordx4 v[0:3], off, s[0:3], 0 offset:112
+; GFX6-NEXT:    v_bfe_u32 v5, v29, 13, 1
 ; GFX6-NEXT:    s_waitcnt expcnt(0)
-; GFX6-NEXT:    v_bfe_u32 v3, v29, 8, 1
-; GFX6-NEXT:    buffer_store_dwordx4 v[3:6], off, s[0:3], 0 offset:64
-; GFX6-NEXT:    v_lshrrev_b32_e32 v8, 15, v29
+; GFX6-NEXT:    v_bfe_u32 v3, v29, 12, 1
+; GFX6-NEXT:    buffer_store_dwordx4 v[3:6], off, s[0:3], 0 offset:96
+; GFX6-NEXT:    v_bfe_u32 v8, v29, 11, 1
 ; GFX6-NEXT:    s_waitcnt expcnt(0)
-; GFX6-NEXT:    v_bfe_u32 v6, v29, 14, 1
-; GFX6-NEXT:    buffer_store_dwordx4 v[6:9], off, s[0:3], 0 offset:112
-; GFX6-NEXT:    v_bfe_u32 v27, v29, 5, 1
-; GFX6-NEXT:    v_bfe_u32 v23, v29, 7, 1
-; GFX6-NEXT:    v_bfe_u32 v19, v29, 1, 1
-; GFX6-NEXT:    v_bfe_u32 v15, v29, 3, 1
-; GFX6-NEXT:    v_bfe_u32 v11, v29, 13, 1
-; GFX6-NEXT:    v_bfe_u32 v25, v29, 4, 1
-; GFX6-NEXT:    v_bfe_u32 v21, v29, 6, 1
-; GFX6-NEXT:    v_and_b32_e32 v17, 1, v29
-; GFX6-NEXT:    v_bfe_u32 v13, v29, 2, 1
+; GFX6-NEXT:    v_bfe_u32 v6, v29, 10, 1
+; GFX6-NEXT:    buffer_store_dwordx4 v[6:9], off, s[0:3], 0 offset:80
+; GFX6-NEXT:    v_bfe_u32 v27, v29, 1, 1
+; GFX6-NEXT:    v_bfe_u32 v23, v29, 3, 1
+; GFX6-NEXT:    v_bfe_u32 v19, v29, 5, 1
+; GFX6-NEXT:    v_bfe_u32 v15, v29, 7, 1
+; GFX6-NEXT:    v_bfe_u32 v11, v29, 9, 1
+; GFX6-NEXT:    v_and_b32_e32 v25, 1, v29
+; GFX6-NEXT:    v_bfe_u32 v21, v29, 2, 1
+; GFX6-NEXT:    v_bfe_u32 v17, v29, 4, 1
+; GFX6-NEXT:    v_bfe_u32 v13, v29, 6, 1
 ; GFX6-NEXT:    s_waitcnt expcnt(0)
-; GFX6-NEXT:    v_bfe_u32 v9, v29, 12, 1
-; GFX6-NEXT:    buffer_store_dwordx4 v[9:12], off, s[0:3], 0 offset:96
-; GFX6-NEXT:    buffer_store_dwordx4 v[13:16], off, s[0:3], 0 offset:16
-; GFX6-NEXT:    buffer_store_dwordx4 v[17:20], off, s[0:3], 0
-; GFX6-NEXT:    buffer_store_dwordx4 v[21:24], off, s[0:3], 0 offset:48
-; GFX6-NEXT:    buffer_store_dwordx4 v[25:28], off, s[0:3], 0 offset:32
+; GFX6-NEXT:    v_bfe_u32 v9, v29, 8, 1
+; GFX6-NEXT:    buffer_store_dwordx4 v[9:12], off, s[0:3], 0 offset:64
+; GFX6-NEXT:    buffer_store_dwordx4 v[13:16], off, s[0:3], 0 offset:48
+; GFX6-NEXT:    buffer_store_dwordx4 v[17:20], off, s[0:3], 0 offset:32
+; GFX6-NEXT:    buffer_store_dwordx4 v[21:24], off, s[0:3], 0 offset:16
+; GFX6-NEXT:    buffer_store_dwordx4 v[25:28], off, s[0:3], 0
 ; GFX6-NEXT:    s_endpgm
 ;
 ; GFX8-LABEL: constant_zextload_v16i1_to_v16i64:
@@ -4561,21 +4561,20 @@ define amdgpu_kernel void @constant_zextload_v16i1_to_v16i64(ptr addrspace(1) %o
 ; GFX8-NEXT:    v_mov_b32_e32 v17, 0
 ; GFX8-NEXT:    v_mov_b32_e32 v21, 0
 ; GFX8-NEXT:    v_mov_b32_e32 v19, v17
-; GFX8-NEXT:    v_mov_b32_e32 v13, v17
+; GFX8-NEXT:    v_mov_b32_e32 v4, 1
 ; GFX8-NEXT:    s_waitcnt lgkmcnt(0)
 ; GFX8-NEXT:    v_mov_b32_e32 v0, s2
 ; GFX8-NEXT:    v_mov_b32_e32 v1, s3
 ; GFX8-NEXT:    flat_load_ushort v2, v[0:1]
 ; GFX8-NEXT:    s_add_u32 s2, s0, 0x70
 ; GFX8-NEXT:    s_addc_u32 s3, s1, 0
-; GFX8-NEXT:    s_add_u32 s4, s0, 0x50
+; GFX8-NEXT:    s_add_u32 s4, s0, 0x60
 ; GFX8-NEXT:    s_addc_u32 s5, s1, 0
-; GFX8-NEXT:    v_mov_b32_e32 v0, s4
 ; GFX8-NEXT:    v_mov_b32_e32 v24, s3
-; GFX8-NEXT:    v_mov_b32_e32 v1, s5
 ; GFX8-NEXT:    v_mov_b32_e32 v23, s2
-; GFX8-NEXT:    s_add_u32 s2, s0, 64
+; GFX8-NEXT:    s_add_u32 s2, s0, 0x50
 ; GFX8-NEXT:    s_addc_u32 s3, s1, 0
+; GFX8-NEXT:    v_mov_b32_e32 v13, v17
 ; GFX8-NEXT:    v_mov_b32_e32 v9, v17
 ; GFX8-NEXT:    v_mov_b32_e32 v5, v17
 ; GFX8-NEXT:    v_mov_b32_e32 v22, 0
@@ -4584,24 +4583,26 @@ define amdgpu_kernel void @constant_zextload_v16i1_to_v16i64(ptr addrspace(1) %o
 ; GFX8-NEXT:    v_mov_b32_e32 v7, 0
 ; GFX8-NEXT:    v_mov_b32_e32 v11, 0
 ; GFX8-NEXT:    s_waitcnt vmcnt(0)
-; GFX8-NEXT:    v_lshrrev_b16_e32 v4, 10, v2
-; GFX8-NEXT:    v_and_b32_e32 v18, 1, v4
-; GFX8-NEXT:    v_lshrrev_b16_e32 v4, 11, v2
-; GFX8-NEXT:    v_and_b32_e32 v4, 1, v4
-; GFX8-NEXT:    v_and_b32_e32 v20, 0xffff, v4
-; GFX8-NEXT:    v_lshrrev_b16_e32 v4, 14, v2
+; GFX8-NEXT:    v_lshrrev_b16_e32 v0, 13, v2
+; GFX8-NEXT:    v_lshrrev_b16_e32 v1, 12, v2
+; GFX8-NEXT:    v_and_b32_e32 v0, 1, v0
+; GFX8-NEXT:    v_and_b32_e32 v18, 1, v1
+; GFX8-NEXT:    v_and_b32_e32 v20, 0xffff, v0
+; GFX8-NEXT:    v_mov_b32_e32 v0, s4
+; GFX8-NEXT:    v_mov_b32_e32 v1, s5
 ; GFX8-NEXT:    flat_store_dwordx4 v[0:1], v[18:21]
-; GFX8-NEXT:    v_mov_b32_e32 v0, 1
-; GFX8-NEXT:    v_and_b32_e32 v16, 1, v4
+; GFX8-NEXT:    v_lshrrev_b16_e32 v0, 14, v2
+; GFX8-NEXT:    v_and_b32_e32 v16, 1, v0
 ; GFX8-NEXT:    v_lshrrev_b16_e32 v18, 15, v2
 ; GFX8-NEXT:    flat_store_dwordx4 v[23:24], v[16:19]
+; GFX8-NEXT:    v_lshrrev_b16_e32 v0, 11, v2
 ; GFX8-NEXT:    v_mov_b32_e32 v24, s3
-; GFX8-NEXT:    v_and_b32_sdwa v16, v2, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_1 src1_sel:DWORD
-; GFX8-NEXT:    v_lshrrev_b16_e32 v0, 9, v2
-; GFX8-NEXT:    v_mov_b32_e32 v23, s2
+; GFX8-NEXT:    v_lshrrev_b16_e32 v6, 10, v2
 ; GFX8-NEXT:    v_and_b32_e32 v0, 1, v0
-; GFX8-NEXT:    s_add_u32 s2, s0, 0x60
+; GFX8-NEXT:    v_mov_b32_e32 v23, s2
+; GFX8-NEXT:    s_add_u32 s2, s0, 64
 ; GFX8-NEXT:    v_mov_b32_e32 v19, 0
+; GFX8-NEXT:    v_and_b32_e32 v16, 1, v6
 ; GFX8-NEXT:    v_and_b32_e32 v18, 0xffff, v0
 ; GFX8-NEXT:    s_addc_u32 s3, s1, 0
 ; GFX8-NEXT:    flat_store_dwordx4 v[23:24], v[16:19]
@@ -4610,17 +4611,16 @@ define amdgpu_kernel void @constant_zextload_v16i1_to_v16i64(ptr addrspace(1) %o
 ; GFX8-NEXT:    s_add_u32 s2, s0, 48
 ; GFX8-NEXT:    s_addc_u32 s3, s1, 0
 ; GFX8-NEXT:    v_mov_b32_e32 v26, s3
-; GFX8-NEXT:    v_lshrrev_b16_e32 v0, 12, v2
 ; GFX8-NEXT:    v_mov_b32_e32 v25, s2
 ; GFX8-NEXT:    s_add_u32 s2, s0, 32
-; GFX8-NEXT:    v_and_b32_e32 v19, 1, v0
-; GFX8-NEXT:    v_lshrrev_b16_e32 v0, 13, v2
+; GFX8-NEXT:    v_lshrrev_b16_e32 v0, 9, v2
 ; GFX8-NEXT:    v_mov_b32_e32 v20, v17
 ; GFX8-NEXT:    v_mov_b32_e32 v1, v17
 ; GFX8-NEXT:    v_mov_b32_e32 v17, s1
 ; GFX8-NEXT:    s_addc_u32 s3, s1, 0
 ; GFX8-NEXT:    v_and_b32_e32 v0, 1, v0
 ; GFX8-NEXT:    v_mov_b32_e32 v16, s0
+; GFX8-NEXT:    v_and_b32_sdwa v19, v2, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_1 src1_sel:DWORD
 ; GFX8-NEXT:    v_and_b32_e32 v21, 0xffff, v0
 ; GFX8-NEXT:    s_add_u32 s0, s0, 16
 ; GFX8-NEXT:    v_lshrrev_b16_e32 v6, 7, v2
diff --git a/llvm/test/CodeGen/AMDGPU/load-constant-i16.ll b/llvm/test/CodeGen/AMDGPU/load-constant-i16.ll
index bee3d455187ca78..ace5eafc9aa8478 100644
--- a/llvm/test/CodeGen/AMDGPU/load-constant-i16.ll
+++ b/llvm/test/CodeGen/AMDGPU/load-constant-i16.ll
@@ -5650,27 +5650,27 @@ define amdgpu_kernel void @constant_zextload_v16i16_to_v16i64(ptr addrspace(1) %
 ; GCN-NOHSA-SI-NEXT:    s_waitcnt lgkmcnt(0)
 ; GCN-NOHSA-SI-NEXT:    s_lshr_b32 s12, s1, 16
 ; GCN-NOHSA-SI-NEXT:    s_lshr_b32 s13, s3, 16
-; GCN-NOHSA-SI-NEXT:    s_lshr_b32 s14, s7, 16
-; GCN-NOHSA-SI-NEXT:    s_lshr_b32 s15, s5, 16
-; GCN-NOHSA-SI-NEXT:    s_lshr_b32 s16, s4, 16
-; GCN-NOHSA-SI-NEXT:    s_lshr_b32 s17, s6, 16
+; GCN-NOHSA-SI-NEXT:    s_lshr_b32 s14, s5, 16
+; GCN-NOHSA-SI-NEXT:    s_lshr_b32 s15, s7, 16
+; GCN-NOHSA-SI-NEXT:    s_lshr_b32 s16, s6, 16
+; GCN-NOHSA-SI-NEXT:    s_lshr_b32 s17, s4, 16
 ; GCN-NOHSA-SI-NEXT:    s_lshr_b32 s18, s2, 16
 ; GCN-NOHSA-SI-NEXT:    s_lshr_b32 s19, s0, 16
 ; GCN-NOHSA-SI-NEXT:    s_and_b32 s0, s0, 0xffff
 ; GCN-NOHSA-SI-NEXT:    s_and_b32 s2, s2, 0xffff
-; GCN-NOHSA-SI-NEXT:    s_and_b32 s6, s6, 0xffff
 ; GCN-NOHSA-SI-NEXT:    s_and_b32 s4, s4, 0xffff
+; GCN-NOHSA-SI-NEXT:    s_and_b32 s6, s6, 0xffff
 ; GCN-NOHSA-SI-NEXT:    s_and_b32 s1, s1, 0xffff
 ; GCN-NOHSA-SI-NEXT:    s_and_b32 s3, s3, 0xffff
-; GCN-NOHSA-SI-NEXT:    s_and_b32 s5, s5, 0xffff
 ; GCN-NOHSA-SI-NEXT:    s_and_b32 s7, s7, 0xffff
-; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v0, s5
+; GCN-NOHSA-SI-NEXT:    s_and_b32 s5, s5, 0xffff
+; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v0, s7
 ; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v2, s15
-; GCN-NOHSA-SI-NEXT:    buffer_store_dwordx4 v[0:3], off, s[8:11], 0 offset:80
+; GCN-NOHSA-SI-NEXT:    buffer_store_dwordx4 v[0:3], off, s[8:11], 0 offset:112
 ; GCN-NOHSA-SI-NEXT:    s_waitcnt expcnt(0)
-; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v0, s7
+; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v0, s5
 ; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v2, s14
-; GCN-NOHSA-SI-NEXT:    buffer_store_dwordx4 v[0:3], off, s[8:11], 0 offset:112
+; GCN-NOHSA-SI-NEXT:    buffer_store_dwordx4 v[0:3], off, s[8:11], 0 offset:80
 ; GCN-NOHSA-SI-NEXT:    s_waitcnt expcnt(0)
 ; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v0, s3
 ; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v2, s13
@@ -5680,13 +5680,13 @@ define amdgpu_kernel void @constant_zextload_v16i16_to_v16i64(ptr addrspace(1) %
 ; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v2, s12
 ; GCN-NOHSA-SI-NEXT:    buffer_store_dwordx4 v[0:3], off, s[8:11], 0 offset:16
 ; GCN-NOHSA-SI-NEXT:    s_waitcnt expcnt(0)
-; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v0, s4
+; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v0, s6
 ; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v2, s16
-; GCN-NOHSA-SI-NEXT:    buffer_store_dwordx4 v[0:3], off, s[8:11], 0 offset:64
+; GCN-NOHSA-SI-NEXT:    buffer_store_dwordx4 v[0:3], off, s[8:11], 0 offset:96
 ; GCN-NOHSA-SI-NEXT:    s_waitcnt expcnt(0)
-; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v0, s6
+; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v0, s4
 ; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v2, s17
-; GCN-NOHSA-SI-NEXT:    buffer_store_dwordx4 v[0:3], off, s[8:11], 0 offset:96
+; GCN-NOHSA-SI-NEXT:    buffer_store_dwordx4 v[0:3], off, s[8:11], 0 offset:64
 ; GCN-NOHSA-SI-NEXT:    s_waitcnt expcnt(0)
 ; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v0, s2
 ; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v2, s18
@@ -5707,33 +5707,33 @@ define amdgpu_kernel void @constant_zextload_v16i16_to_v16i64(ptr addrspace(1) %
 ; GCN-HSA-NEXT:    s_waitcnt lgkmcnt(0)
 ; GCN-HSA-NEXT:    s_lshr_b32 s12, s5, 16
 ; GCN-HSA-NEXT:    s_lshr_b32 s13, s7, 16
-; GCN-HSA-NEXT:    s_lshr_b32 s14, s11, 16
-; GCN-HSA-NEXT:    s_lshr_b32 s2, s9, 16
-; GCN-HSA-NEXT:    s_lshr_b32 s15, s8, 16
-; GCN-HSA-NEXT:    s_lshr_b32 s16, s10, 16
+; GCN-HSA-NEXT:    s_lshr_b32 s14, s9, 16
+; GCN-HSA-NEXT:    s_lshr_b32 s2, s11, 16
+; GCN-HSA-NEXT:    s_lshr_b32 s15, s10, 16
+; GCN-HSA-NEXT:    s_lshr_b32 s16, s8, 16
 ; GCN-HSA-NEXT:    s_lshr_b32 s17, s6, 16
 ; GCN-HSA-NEXT:    s_lshr_b32 s18, s4, 16
 ; GCN-HSA-NEXT:    s_and_b32 s4, s4, 0xffff
 ; GCN-HSA-NEXT:    s_and_b32 s6, s6, 0xffff
-; GCN-HSA-NEXT:    s_and_b32 s10, s10, 0xffff
 ; GCN-HSA-NEXT:    s_and_b32 s8, s8, 0xffff
+; GCN-HSA-NEXT:    s_and_b32 s10, s10, 0xffff
 ; GCN-HSA-NEXT:    s_and_b32 s5, s5, 0xffff
 ; GCN-HSA-NEXT:    s_and_b32 s7, s7, 0xffff
-; GCN-HSA-NEXT:    s_and_b32 s11, s11, 0xffff
-; GCN-HSA-NEXT:    s_and_b32 s3, s9, 0xffff
+; GCN-HSA-NEXT:    s_and_b32 s9, s9, 0xffff
+; GCN-HSA-NEXT:    s_and_b32 s3, s11, 0xffff
 ; GCN-HSA-NEXT:    v_mov_b32_e32 v2, s2
-; GCN-HSA-NEXT:    s_add_u32 s2, s0, 0x50
+; GCN-HSA-NEXT:    s_add_u32 s2, s0, 0x70
 ; GCN-HSA-NEXT:    v_mov_b32_e32 v0, s3
 ; GCN-HSA-NEXT:    s_addc_u32 s3, s1, 0
 ; GCN-HSA-NEXT:    v_mov_b32_e32 v5, s3
 ; GCN-HSA-NEXT:    v_mov_b32_e32 v4, s2
-; GCN-HSA-NEXT:    s_add_u32 s2, s0, 0x70
+; GCN-HSA-NEXT:    s_add_u32 s2, s0, 0x50
 ; GCN-HSA-NEXT:    s_addc_u32 s3, s1, 0
 ; GCN-HSA-NEXT:    flat_store_dwordx4 v[4:5], v[0:3]
 ; GCN-HSA-NEXT:    v_mov_b32_e32 v5, s3
 ; GCN-HSA-NEXT:    v_mov_b32_e32 v4, s2
 ; GCN-HSA-NEXT:    s_add_u32 s2, s0, 48
-; GCN-HSA-NEXT:    v_mov_b32_e32 v0, s11
+; GCN-HSA-NEXT:    v_mov_b32_e32 v0, s9
 ; GCN-HSA-NEXT:    v_mov_b32_e32 v2, s14
 ; GCN-HSA-NEXT:    s_addc_u32 s3, s1, 0
 ; GCN-HSA-NEXT:    flat_store_dwordx4 v[4:5], v[0:3]
@@ -5746,22 +5746,22 @@ define amdgpu_kernel void @constant_zextload_v16i16_to_v16i64(ptr addrspace(1) %
 ; GCN-HSA-NEXT:    flat_store_dwordx4 v[4:5], v[0:3]
 ; GCN-HSA-NEXT:    v_mov_b32_e32 v5, s3
 ; GCN-HSA-NEXT:    v_mov_b32_e32 v4, s2
-; GCN-HSA-NEXT:    s_add_u32 s2, s0, 64
+; GCN-HSA-NEXT:    s_add_u32 s2, s0, 0x60
 ; GCN-HSA-NEXT:    v_mov_b32_e32 v0, s5
 ; GCN-HSA-NEXT:    v_mov_b32_e32 v2, s12
 ; GCN-HSA-NEXT:    s_addc_u32 s3, s1, 0
 ; GCN-HSA-NEXT:    flat_store_dwordx4 v[4:5], v[0:3]
 ; GCN-HSA-NEXT:    v_mov_b32_e32 v5, s3
 ; GCN-HSA-NEXT:    v_mov_b32_e32 v4, s2
-; GCN-HSA-NEXT:    s_add_u32 s2, s0, 0x60
-; GCN-HSA-NEXT:    v_mov_b32_e32 v0, s8
+; GCN-HSA-NEXT:    s_add_u32 s2, s0, 64
+; GCN-HSA-NEXT:    v_mov_b32_e32 v0, s10
 ; GCN-HSA-NEXT:    v_mov_b32_e32 v2, s15
 ; GCN-HSA-NEXT:    s_addc_u32 s3, s1, 0
 ; GCN-HSA-NEXT:    flat_store_dwordx4 v[4:5], v[0:3]
 ; GCN-HSA-NEXT:    v_mov_b32_e32 v5, s3
 ; GCN-HSA-NEXT:    v_mov_b32_e32 v4, s2
 ; GCN-HSA-NEXT:    s_add_u32 s2, s0, 32
-; GCN-HSA-NEXT:    v_mov_b32_e32 v0, s10
+; GCN-HSA-NEXT:    v_mov_b32_e32 v0, s8
 ; GCN-HSA-NEXT:    v_mov_b32_e32 v2, s16
 ; GCN-HSA-NEXT:    s_addc_u32 s3, s1, 0
 ; GCN-HSA-NEXT:    flat_store_dwordx4 v[4:5], v[0:3]
@@ -5793,41 +5793,41 @@ define amdgpu_kernel void @constant_zextload_v16i16_to_v16i64(ptr addrspace(1) %
 ; GCN-NOHSA-VI-NEXT:    s_and_b32 s6, s6, 0xffff
 ; GCN-NOHSA-VI-NEXT:    s_lshr_b32 s15, s7, 16
 ; GCN-NOHSA-VI-NEXT:    s_and_b32 s7, s7, 0xffff
-; GCN-NOHSA-VI-NEXT:    s_lshr_b32 s16, s10, 16
-; GCN-NOHSA-VI-NEXT:    s_and_b32 s10, s10, 0xffff
-; GCN-NOHSA-VI-NEXT:    s_lshr_b32 s17, s11, 16
-; GCN-NOHSA-VI-NEXT:    s_and_b32 s11, s11, 0xffff
-; GCN-NOHSA-VI-NEXT:    s_lshr_b32 s18, s8, 16
+; GCN-NOHSA-VI-NEXT:    s_lshr_b32 s16, s8, 16
 ; GCN-NOHSA-VI-NEXT:    s_and_b32 s8, s8, 0xffff
-; GCN-NOHSA-VI-NEXT:    s_lshr_b32 s2, s9, 16
-; GCN-NOHSA-VI-NEXT:    s_and_b32 s3, s9, 0xffff
+; GCN-NOHSA-VI-NEXT:    s_lshr_b32 s17, s9, 16
+; GCN-NOHSA-VI-NEXT:    s_and_b32 s9, s9, 0xffff
+; GCN-NOHSA-VI-NEXT:    s_lshr_b32 s18, s10, 16
+; GCN-NOHSA-VI-NEXT:    s_and_b32 s10, s10, 0xffff
+; GCN-NOHSA-VI-NEXT:    s_lshr_b32 s2, s11, 16
+; GCN-NOHSA-VI-NEXT:    s_and_b32 s3, s11, 0xffff
 ; GCN-NOHSA-VI-NEXT:    v_mov_b32_e32 v2, s2
-; GCN-NOHSA-VI-NEXT:    s_add_u32 s2, s0, 0x50
+; GCN-NOHSA-VI-NEXT:    s_add_u32 s2, s0, 0x70
 ; GCN-NOHSA-VI-NEXT:    v_mov_b32_e32 v0, s3
 ; GCN-NOHSA-VI-NEXT:    s_addc_u32 s3, s1, 0
 ; GCN-NOHSA-VI-NEXT:    v_mov_b32_e32 v5, s3
 ; GCN-NOHSA-VI-NEXT:    v_mov_b32_e32 v4, s2
-; GCN-NOHSA-VI-NEXT:    s_add_u32 s2, s0, 64
+; GCN-NOHSA-VI-NEXT:    s_add_u32 s2, s0, 0x60
 ; GCN-NOHSA-VI-NEXT:    s_addc_u32 s3, s1, 0
 ; GCN-NOHSA-VI-NEXT:    flat_store_dwordx4 v[4:5], v[0:3]
 ; GCN-NOHSA-VI-NEXT:    v_mov_b32_e32 v5, s3
 ; GCN-NOHSA-VI-NEXT:    v_mov_b32_e32 v4, s2
-; GCN-NOHSA-VI-NEXT:    s_add_u32 s2, s0, 0x70
-; GCN-NOHSA-VI-NEXT:    v_mov_b32_e32 v0, s8
+; GCN-NOHSA-VI-NEXT:    s_add_u32 s2, s0, 0x50
+; GCN-NOHSA-VI-NEXT:    v_mov_b32_e32 v0, s10
 ; GCN-NOHSA-VI-NEXT:    v_mov_b32_e32 v2, s18
 ; GCN-NOHSA-VI-NEXT:    s_addc_u32 s3, s1, 0
 ; GCN-NOHSA-VI-NEXT:    flat_store_dwordx4 v[4:5], v[0:3]
 ; GCN-NOHSA-VI-NEXT:    v_mov_b32_e32 v5, s3
 ; GCN-NOHSA-VI-NEXT:    v_mov_b32_e32 v4, s2
-; GCN-NOHSA-VI-NEXT:    s_add_u32 s2, s0, 0x60
-; GCN-NOHSA-VI-NEXT:    v_mov_b32_e32 v0, s11
+; GCN-NOHSA-VI-NEXT:    s_add_u32 s2, s0, 64
+; GCN-NOHSA-VI-NEXT:    v_mov_b32_e32 v0, s9
 ; GCN-NOHSA-VI-NEXT:    v_mov_b32_e32 v2, s17
 ; GCN-NOHSA-VI-NEXT:    s_addc_u32 s3, s1, 0
 ; GCN-NOHSA-VI-NEXT:    flat_store_dwordx4 v[4:5], v[0:3]
 ; GCN-NOHSA-VI-NEXT:    v_mov_b32_e32 v5, s3
 ; GCN-NOHSA-VI-NEXT:    v_mov_b32_e32 v4, s2
 ; GCN-NOHSA-VI-NEXT:    s_add_u32 s2, s0, 48
-; GCN-NOHSA-VI-NEXT:    v_mov_b32_e32 v0, s10
+; GCN-NOHSA-VI-NEXT:    v_mov_b32_e32 v0, s8
 ; GCN-NOHSA-VI-NEXT:    v_mov_b32_e32 v2, s16
 ; GCN-NOHSA-VI-NEXT:    s_addc_u32 s3, s1, 0
 ; GCN-NOHSA-VI-NEXT:    flat_store_dwordx4 v[4:5], v[0:3]
@@ -6141,57 +6141,58 @@ define amdgpu_kernel void @constant_sextload_v16i16_to_v16i64(ptr addrspace(1) %
 ; GCN-NOHSA-VI-NEXT:    s_lshr_b32 s18, s1, 16
 ; GCN-NOHSA-VI-NEXT:    s_bfe_i64 s[16:17], s[2:3], 0x100000
 ; GCN-NOHSA-VI-NEXT:    s_lshr_b32 s2, s2, 16
-; GCN-NOHSA-VI-NEXT:    s_bfe_i64 s[22:23], s[6:7], 0x100000
-; GCN-NOHSA-VI-NEXT:    s_lshr_b32 s6, s6, 16
-; GCN-NOHSA-VI-NEXT:    s_bfe_i64 s[28:29], s[4:5], 0x100000
+; GCN-NOHSA-VI-NEXT:    s_bfe_i64 s[22:23], s[4:5], 0x100000
 ; GCN-NOHSA-VI-NEXT:    s_lshr_b32 s4, s4, 16
+; GCN-NOHSA-VI-NEXT:    s_bfe_i64 s[28:29], s[6:7], 0x100000
+; GCN-NOHSA-VI-NEXT:    s_lshr_b32 s6, s6, 16
 ; GCN-NOHSA-VI-NEXT:    s_bfe_i64 s[14:15], s[12:13], 0x100000
 ; GCN-NOHSA-VI-NEXT:    s_bfe_i64 s[12:13], s[18:19], 0x100000
 ; GCN-NOHSA-VI-NEXT:    s_bfe_i64 s[18:19], s[2:3], 0x100000
 ; GCN-NOHSA-VI-NEXT:    s_mov_b32 s2, s3
-; GCN-NOHSA-VI-NEXT:    s_bfe_i64 s[24:25], s[6:7], 0x100000
-; GCN-NOHSA-VI-NEXT:    s_mov_b32 s6, s7
-; GCN-NOHSA-VI-NEXT:    s_bfe_i64 s[30:31], s[4:5], 0x100000
+; GCN-NOHSA-VI-NEXT:    s_bfe_i64 s[24:25], s[4:5], 0x100000
 ; GCN-NOHSA-VI-NEXT:    s_mov_b32 s4, s5
+; GCN-NOHSA-VI-NEXT:    s_bfe_i64 s[30:31], s[6:7], 0x100000
+; GCN-NOHSA-VI-NEXT:    s_mov_b32 s6, s7
 ; GCN-NOHSA-VI-NEXT:    s_bfe_i64 s[10:11], s[0:1], 0x100000
 ; GCN-NOHSA-VI-NEXT:    s_lshr_b32 s0, s0, 16
 ; GCN-NOHSA-VI-NEXT:    s_bfe_i64 s[20:21], s[2:3], 0x100000
 ; GCN-NOHSA-VI-NEXT:    s_lshr_b32 s2, s3, 16
-; GCN-NOHSA-VI-NEXT:    s_bfe_i64 s[26:27], s[6:7], 0x100000
-; GCN-NOHSA-VI-NEXT:    s_lshr_b32 s6, s7, 16
-; GCN-NOHSA-VI-NEXT:    s_bfe_i64 s[34:35], s[4:5], 0x100000
+; GCN-NOHSA-VI-NEXT:    s_bfe_i64 s[26:27], s[4:5], 0x100000
 ; GCN-NOHSA-VI-NEXT:    s_lshr_b32 s4, s5, 16
+; GCN-NOHSA-VI-NEXT:    s_bfe_i64 s[34:35], s[6:7], 0x100000
+; GCN-NOHSA-VI-NEXT:    s_lshr_b32 s6, s7, 16
 ; GCN-NOHSA-VI-NEXT:    s_bfe_i64 s[0:1], s[0:1], 0x100000
 ; GCN-NOHSA-VI-NEXT:    s_bfe_i64 s[2:3], s[2:3], 0x100000
-; GCN-NOHSA-VI-NEXT:    s_bfe_i64 s[6:7], s[6:7], 0x100000
 ; GCN-NOHSA-VI-NEXT:    s_bfe_i64 s[4:5], s[4:5], 0x100000
-; GCN-NOHSA-VI-NEXT:    v_mov_b32_e32 v2, s4
-; GCN-NOHSA-VI-NEXT:    s_add_u32 s4, s8, 0x50
-; GCN-NOHSA-VI-NEXT:    v_mov_b32_e32 v3, s5
-; GCN-NOHSA-VI-NEXT:    s_addc_u32 s5, s9, 0
-; GCN-NOHSA-VI-NEXT:    v_mov_b32_e32 v4, s4
+; GCN-NOHSA-VI-NEXT:    s_bfe_i64 s[6:7], s[6:7], 0x100000
+; GCN-NOHSA-VI-NEXT:    v_mov_b32_e32 v2, s6
+; GCN-NOHSA-VI-NEXT:    s_add_u32 s6, s8, 0x70
+; GCN-NOHSA-VI-NEXT:    v_mov_b32_e32 v3, s7
+; GCN-NOHSA-VI-NEXT:    s_addc_u32 s7, s9, 0
+; GCN-NOHSA-VI-NEXT:    v_mov_b32_e32 v4, s6
 ; GCN-NOHSA-VI-NEXT:    v_mov_b32_e32 v0, s34
 ; GCN-NOHSA-VI-NEXT:    v_mov_b32_e32 v1, s35
-; GCN-NOHSA-VI-NEXT:    v_mov_b32_e32 v5, s5
-; GCN-NOHSA-VI-NEXT:    s_add_u32 s4, s8, 64
+; GCN-NOHSA-VI-NEXT:    v_mov_b32_e32 v5, s7
+; GCN-NOHSA-VI-NEXT:    s_add_u32 s6, s8, 0x60
 ; GCN-NOHSA-VI-NEXT:    flat_store_dwordx4 v[4:5], v[0:3]
-; GCN-NOHSA-VI-NEXT:    s_addc_u32 s5, s9, 0
-; GCN-NOHSA-VI-NEXT:    v_mov_b32_e32 v4, s4
+; GCN-NOHSA-VI-NEXT:    s_addc_u32 s7, s9, 0
+; GCN-NOHSA-VI-NEXT:    v_mov_b32_e32 v4, s6
 ; GCN-NOHSA-VI-NEXT:    v_mov_b32_e32 v0, s28
 ; GCN-NOHSA-VI-NEXT:    v_mov_b32_e32 v1, s29
 ; GCN-NOHSA-VI-NEXT:    v_mov_b32_e32 v2, s30
 ; GCN-NOHSA-VI-NEXT:    v_mov_b32_e32 v3, s31
-; GCN-NOHSA-VI-NEXT:    v_mov_b32_e32 v5, s5
-; GCN-NOHSA-VI-NEXT:    s_add_u32 s4, s8, 0x70
+; GCN-NOHSA-VI-NEXT:    v_mov_b32_e32 v5, s7
 ; GCN-NOHSA-VI-NEXT:    flat_store_dwordx4 v[4:5], v[0:3]
+; GCN-NOHSA-VI-NEXT:    s_nop 0
+; GCN-NOHSA-VI-NEXT:    v_mov_b32_e32 v2, s4
+; GCN-NOHSA-VI-NEXT:    s_add_u32 s4, s8, 0x50
+; GCN-NOHSA-VI-NEXT:    v_mov_b32_e32 v3, s5
 ; GCN-NOHSA-VI-NEXT:    s_addc_u32 s5, s9, 0
 ; GCN-NOHSA-VI-NEXT:    v_mov_b32_e32 v4, s4
 ; GCN-NOHSA-VI-NEXT:    v_mov_b32_e32 v0, s26
 ; GCN-NOHSA-VI-NEXT:    v_mov_b32_e32 v1, s27
-; GCN-NOHSA-VI-NEXT:    v_mov_b32_e32 v2, s6
-; GCN-NOHSA-VI-NEXT:    v_mov_b32_e32 v3, s7
 ; GCN-NOHSA-VI-NEXT:    v_mov_b32_e32 v5, s5
-; GCN-NOHSA-VI-NEXT:    s_add_u32 s4, s8, 0x60
+; GCN-NOHSA-VI-NEXT:    s_add_u32 s4, s8, 64
 ; GCN-NOHSA-VI-NEXT:    flat_store_dwordx4 v[4:5], v[0:3]
 ; GCN-NOHSA-VI-NEXT:    s_addc_u32 s5, s9, 0
 ; GCN-NOHSA-VI-NEXT:    v_mov_b32_e32 v4, s4
diff --git a/llvm/test/CodeGen/AMDGPU/load-constant-i8.ll b/llvm/test/CodeGen/AMDGPU/load-constant-i8.ll
index 66fc322e5e04b57..87c6885c6d4767c 100644
--- a/llvm/test/CodeGen/AMDGPU/load-constant-i8.ll
+++ b/llvm/test/CodeGen/AMDGPU/load-constant-i8.ll
@@ -5523,53 +5523,53 @@ define amdgpu_kernel void @constant_zextload_v16i8_to_v16i64(ptr addrspace(1) %o
 ; GFX6-NOHSA-NEXT:    s_mov_b32 s2, -1
 ; GFX6-NOHSA-NEXT:    v_mov_b32_e32 v3, v1
 ; GFX6-NOHSA-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX6-NOHSA-NEXT:    s_lshr_b32 s8, s5, 24
-; GFX6-NOHSA-NEXT:    s_lshr_b32 s9, s4, 24
-; GFX6-NOHSA-NEXT:    s_lshr_b32 s10, s7, 24
-; GFX6-NOHSA-NEXT:    s_lshr_b32 s11, s6, 24
-; GFX6-NOHSA-NEXT:    s_bfe_u32 s12, s6, 0x80008
-; GFX6-NOHSA-NEXT:    s_bfe_u32 s13, s7, 0x80008
-; GFX6-NOHSA-NEXT:    s_bfe_u32 s14, s4, 0x80008
-; GFX6-NOHSA-NEXT:    s_bfe_u32 s15, s5, 0x80008
-; GFX6-NOHSA-NEXT:    s_and_b32 s16, s5, 0xff
-; GFX6-NOHSA-NEXT:    s_and_b32 s17, s4, 0xff
-; GFX6-NOHSA-NEXT:    s_and_b32 s18, s7, 0xff
-; GFX6-NOHSA-NEXT:    s_and_b32 s19, s6, 0xff
-; GFX6-NOHSA-NEXT:    s_bfe_u32 s5, s5, 0x80010
+; GFX6-NOHSA-NEXT:    s_lshr_b32 s8, s4, 24
+; GFX6-NOHSA-NEXT:    s_lshr_b32 s9, s5, 24
+; GFX6-NOHSA-NEXT:    s_lshr_b32 s10, s6, 24
+; GFX6-NOHSA-NEXT:    s_lshr_b32 s11, s7, 24
+; GFX6-NOHSA-NEXT:    s_bfe_u32 s12, s7, 0x80008
+; GFX6-NOHSA-NEXT:    s_bfe_u32 s13, s6, 0x80008
+; GFX6-NOHSA-NEXT:    s_bfe_u32 s14, s5, 0x80008
+; GFX6-NOHSA-NEXT:    s_bfe_u32 s15, s4, 0x80008
+; GFX6-NOHSA-NEXT:    s_and_b32 s16, s4, 0xff
+; GFX6-NOHSA-NEXT:    s_and_b32 s17, s5, 0xff
+; GFX6-NOHSA-NEXT:    s_and_b32 s18, s6, 0xff
+; GFX6-NOHSA-NEXT:    s_and_b32 s19, s7, 0xff
 ; GFX6-NOHSA-NEXT:    s_bfe_u32 s4, s4, 0x80010
-; GFX6-NOHSA-NEXT:    s_bfe_u32 s6, s6, 0x80010
+; GFX6-NOHSA-NEXT:    s_bfe_u32 s5, s5, 0x80010
 ; GFX6-NOHSA-NEXT:    s_bfe_u32 s7, s7, 0x80010
-; GFX6-NOHSA-NEXT:    v_mov_b32_e32 v0, s6
+; GFX6-NOHSA-NEXT:    s_bfe_u32 s6, s6, 0x80010
+; GFX6-NOHSA-NEXT:    v_mov_b32_e32 v0, s7
 ; GFX6-NOHSA-NEXT:    v_mov_b32_e32 v2, s11
-; GFX6-NOHSA-NEXT:    buffer_store_dwordx4 v[0:3], off, s[0:3], 0 offset:80
+; GFX6-NOHSA-NEXT:    buffer_store_dwordx4 v[0:3], off, s[0:3], 0 offset:112
 ; GFX6-NOHSA-NEXT:    s_waitcnt expcnt(0)
-; GFX6-NOHSA-NEXT:    v_mov_b32_e32 v0, s7
+; GFX6-NOHSA-NEXT:    v_mov_b32_e32 v0, s6
 ; GFX6-NOHSA-NEXT:    v_mov_b32_e32 v2, s10
-; GFX6-NOHSA-NEXT:    buffer_store_dwordx4 v[0:3], off, s[0:3], 0 offset:112
+; GFX6-NOHSA-NEXT:    buffer_store_dwordx4 v[0:3], off, s[0:3], 0 offset:80
 ; GFX6-NOHSA-NEXT:    s_waitcnt expcnt(0)
-; GFX6-NOHSA-NEXT:    v_mov_b32_e32 v0, s4
+; GFX6-NOHSA-NEXT:    v_mov_b32_e32 v0, s5
 ; GFX6-NOHSA-NEXT:    v_mov_b32_e32 v2, s9
-; GFX6-NOHSA-NEXT:    buffer_store_dwordx4 v[0:3], off, s[0:3], 0 offset:16
+; GFX6-NOHSA-NEXT:    buffer_store_dwordx4 v[0:3], off, s[0:3], 0 offset:48
 ; GFX6-NOHSA-NEXT:    s_waitcnt expcnt(0)
-; GFX6-NOHSA-NEXT:    v_mov_b32_e32 v0, s5
+; GFX6-NOHSA-NEXT:    v_mov_b32_e32 v0, s4
 ; GFX6-NOHSA-NEXT:    v_mov_b32_e32 v2, s8
-; GFX6-NOHSA-NEXT:    buffer_store_dwordx4 v[0:3], off, s[0:3], 0 offset:48
+; GFX6-NOHSA-NEXT:    buffer_store_dwordx4 v[0:3], off, s[0:3], 0 offset:16
 ; GFX6-NOHSA-NEXT:    s_waitcnt expcnt(0)
 ; GFX6-NOHSA-NEXT:    v_mov_b32_e32 v0, s19
 ; GFX6-NOHSA-NEXT:    v_mov_b32_e32 v2, s12
-; GFX6-NOHSA-NEXT:    buffer_store_dwordx4 v[0:3], off, s[0:3], 0 offset:64
+; GFX6-NOHSA-NEXT:    buffer_store_dwordx4 v[0:3], off, s[0:3], 0 offset:96
 ; GFX6-NOHSA-NEXT:    s_waitcnt expcnt(0)
 ; GFX6-NOHSA-NEXT:    v_mov_b32_e32 v0, s18
 ; GFX6-NOHSA-NEXT:    v_mov_b32_e32 v2, s13
-; GFX6-NOHSA-NEXT:    buffer_store_dwordx4 v[0:3], off, s[0:3], 0 offset:96
+; GFX6-NOHSA-NEXT:    buffer_store_dwordx4 v[0:3], off, s[0:3], 0 offset:64
 ; GFX6-NOHSA-NEXT:    s_waitcnt expcnt(0)
 ; GFX6-NOHSA-NEXT:    v_mov_b32_e32 v0, s17
 ; GFX6-NOHSA-NEXT:    v_mov_b32_e32 v2, s14
-; GFX6-NOHSA-NEXT:    buffer_store_dwordx4 v[0:3], off, s[0:3], 0
+; GFX6-NOHSA-NEXT:    buffer_store_dwordx4 v[0:3], off, s[0:3], 0 offset:32
 ; GFX6-NOHSA-NEXT:    s_waitcnt expcnt(0)
 ; GFX6-NOHSA-NEXT:    v_mov_b32_e32 v0, s16
 ; GFX6-NOHSA-NEXT:    v_mov_b32_e32 v2, s15
-; GFX6-NOHSA-NEXT:    buffer_store_dwordx4 v[0:3], off, s[0:3], 0 offset:32
+; GFX6-NOHSA-NEXT:    buffer_store_dwordx4 v[0:3], off, s[0:3], 0
 ; GFX6-NOHSA-NEXT:    s_endpgm
 ;
 ; GFX7-HSA-LABEL: constant_zextload_v16i8_to_v16i64:
@@ -5580,70 +5580,70 @@ define amdgpu_kernel void @constant_zextload_v16i8_to_v16i64(ptr addrspace(1) %o
 ; GFX7-HSA-NEXT:    s_waitcnt lgkmcnt(0)
 ; GFX7-HSA-NEXT:    s_load_dwordx4 s[4:7], s[2:3], 0x0
 ; GFX7-HSA-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX7-HSA-NEXT:    s_lshr_b32 s8, s5, 24
-; GFX7-HSA-NEXT:    s_lshr_b32 s9, s4, 24
-; GFX7-HSA-NEXT:    s_lshr_b32 s10, s7, 24
-; GFX7-HSA-NEXT:    s_lshr_b32 s2, s6, 24
-; GFX7-HSA-NEXT:    s_bfe_u32 s11, s6, 0x80008
-; GFX7-HSA-NEXT:    s_bfe_u32 s12, s7, 0x80008
-; GFX7-HSA-NEXT:    s_bfe_u32 s13, s4, 0x80008
-; GFX7-HSA-NEXT:    s_bfe_u32 s14, s5, 0x80008
-; GFX7-HSA-NEXT:    s_and_b32 s15, s5, 0xff
-; GFX7-HSA-NEXT:    s_and_b32 s16, s4, 0xff
-; GFX7-HSA-NEXT:    s_and_b32 s17, s7, 0xff
-; GFX7-HSA-NEXT:    s_and_b32 s18, s6, 0xff
-; GFX7-HSA-NEXT:    s_bfe_u32 s5, s5, 0x80010
+; GFX7-HSA-NEXT:    s_lshr_b32 s8, s4, 24
+; GFX7-HSA-NEXT:    s_lshr_b32 s9, s5, 24
+; GFX7-HSA-NEXT:    s_lshr_b32 s10, s6, 24
+; GFX7-HSA-NEXT:    s_lshr_b32 s2, s7, 24
+; GFX7-HSA-NEXT:    s_bfe_u32 s11, s7, 0x80008
+; GFX7-HSA-NEXT:    s_bfe_u32 s12, s6, 0x80008
+; GFX7-HSA-NEXT:    s_bfe_u32 s13, s5, 0x80008
+; GFX7-HSA-NEXT:    s_bfe_u32 s14, s4, 0x80008
+; GFX7-HSA-NEXT:    s_and_b32 s15, s4, 0xff
+; GFX7-HSA-NEXT:    s_and_b32 s16, s5, 0xff
+; GFX7-HSA-NEXT:    s_and_b32 s17, s6, 0xff
+; GFX7-HSA-NEXT:    s_and_b32 s18, s7, 0xff
 ; GFX7-HSA-NEXT:    s_bfe_u32 s4, s4, 0x80010
-; GFX7-HSA-NEXT:    s_bfe_u32 s7, s7, 0x80010
-; GFX7-HSA-NEXT:    s_bfe_u32 s3, s6, 0x80010
+; GFX7-HSA-NEXT:    s_bfe_u32 s5, s5, 0x80010
+; GFX7-HSA-NEXT:    s_bfe_u32 s6, s6, 0x80010
+; GFX7-HSA-NEXT:    s_bfe_u32 s3, s7, 0x80010
 ; GFX7-HSA-NEXT:    v_mov_b32_e32 v2, s2
-; GFX7-HSA-NEXT:    s_add_u32 s2, s0, 0x50
+; GFX7-HSA-NEXT:    s_add_u32 s2, s0, 0x70
 ; GFX7-HSA-NEXT:    v_mov_b32_e32 v0, s3
 ; GFX7-HSA-NEXT:    s_addc_u32 s3, s1, 0
 ; GFX7-HSA-NEXT:    v_mov_b32_e32 v5, s3
 ; GFX7-HSA-NEXT:    v_mov_b32_e32 v4, s2
-; GFX7-HSA-NEXT:    s_add_u32 s2, s0, 0x70
+; GFX7-HSA-NEXT:    s_add_u32 s2, s0, 0x50
 ; GFX7-HSA-NEXT:    s_addc_u32 s3, s1, 0
 ; GFX7-HSA-NEXT:    flat_store_dwordx4 v[4:5], v[0:3]
 ; GFX7-HSA-NEXT:    v_mov_b32_e32 v5, s3
 ; GFX7-HSA-NEXT:    v_mov_b32_e32 v4, s2
-; GFX7-HSA-NEXT:    s_add_u32 s2, s0, 16
-; GFX7-HSA-NEXT:    v_mov_b32_e32 v0, s7
+; GFX7-HSA-NEXT:    s_add_u32 s2, s0, 48
+; GFX7-HSA-NEXT:    v_mov_b32_e32 v0, s6
 ; GFX7-HSA-NEXT:    v_mov_b32_e32 v2, s10
 ; GFX7-HSA-NEXT:    s_addc_u32 s3, s1, 0
 ; GFX7-HSA-NEXT:    flat_store_dwordx4 v[4:5], v[0:3]
 ; GFX7-HSA-NEXT:    v_mov_b32_e32 v5, s3
 ; GFX7-HSA-NEXT:    v_mov_b32_e32 v4, s2
-; GFX7-HSA-NEXT:    s_add_u32 s2, s0, 48
-; GFX7-HSA-NEXT:    v_mov_b32_e32 v0, s4
+; GFX7-HSA-NEXT:    s_add_u32 s2, s0, 16
+; GFX7-HSA-NEXT:    v_mov_b32_e32 v0, s5
 ; GFX7-HSA-NEXT:    v_mov_b32_e32 v2, s9
 ; GFX7-HSA-NEXT:    s_addc_u32 s3, s1, 0
 ; GFX7-HSA-NEXT:    flat_store_dwordx4 v[4:5], v[0:3]
 ; GFX7-HSA-NEXT:    v_mov_b32_e32 v5, s3
 ; GFX7-HSA-NEXT:    v_mov_b32_e32 v4, s2
-; GFX7-HSA-NEXT:    s_add_u32 s2, s0, 64
-; GFX7-HSA-NEXT:    v_mov_b32_e32 v0, s5
+; GFX7-HSA-NEXT:    s_add_u32 s2, s0, 0x60
+; GFX7-HSA-NEXT:    v_mov_b32_e32 v0, s4
 ; GFX7-HSA-NEXT:    v_mov_b32_e32 v2, s8
 ; GFX7-HSA-NEXT:    s_addc_u32 s3, s1, 0
 ; GFX7-HSA-NEXT:    flat_store_dwordx4 v[4:5], v[0:3]
 ; GFX7-HSA-NEXT:    v_mov_b32_e32 v5, s3
 ; GFX7-HSA-NEXT:    v_mov_b32_e32 v4, s2
-; GFX7-HSA-NEXT:    s_add_u32 s2, s0, 0x60
+; GFX7-HSA-NEXT:    s_add_u32 s2, s0, 64
 ; GFX7-HSA-NEXT:    v_mov_b32_e32 v0, s18
 ; GFX7-HSA-NEXT:    v_mov_b32_e32 v2, s11
 ; GFX7-HSA-NEXT:    s_addc_u32 s3, s1, 0
 ; GFX7-HSA-NEXT:    flat_store_dwordx4 v[4:5], v[0:3]
 ; GFX7-HSA-NEXT:    v_mov_b32_e32 v5, s3
+; GFX7-HSA-NEXT:    v_mov_b32_e32 v4, s2
+; GFX7-HSA-NEXT:    s_add_u32 s2, s0, 32
 ; GFX7-HSA-NEXT:    v_mov_b32_e32 v0, s17
 ; GFX7-HSA-NEXT:    v_mov_b32_e32 v2, s12
-; GFX7-HSA-NEXT:    v_mov_b32_e32 v4, s2
+; GFX7-HSA-NEXT:    s_addc_u32 s3, s1, 0
 ; GFX7-HSA-NEXT:    flat_store_dwordx4 v[4:5], v[0:3]
-; GFX7-HSA-NEXT:    v_mov_b32_e32 v5, s1
-; GFX7-HSA-NEXT:    v_mov_b32_e32 v4, s0
-; GFX7-HSA-NEXT:    s_add_u32 s0, s0, 32
+; GFX7-HSA-NEXT:    v_mov_b32_e32 v5, s3
 ; GFX7-HSA-NEXT:    v_mov_b32_e32 v0, s16
 ; GFX7-HSA-NEXT:    v_mov_b32_e32 v2, s13
-; GFX7-HSA-NEXT:    s_addc_u32 s1, s1, 0
+; GFX7-HSA-NEXT:    v_mov_b32_e32 v4, s2
 ; GFX7-HSA-NEXT:    flat_store_dwordx4 v[4:5], v[0:3]
 ; GFX7-HSA-NEXT:    v_mov_b32_e32 v5, s1
 ; GFX7-HSA-NEXT:    v_mov_b32_e32 v0, s15
@@ -5668,9 +5668,9 @@ define amdgpu_kernel void @constant_zextload_v16i8_to_v16i64(ptr addrspace(1) %o
 ; GFX8-NOHSA-NEXT:    v_lshrrev_b16_e64 v6, 8, s4
 ; GFX8-NOHSA-NEXT:    s_and_b32 s12, s5, 0xff
 ; GFX8-NOHSA-NEXT:    v_lshrrev_b16_e64 v7, 8, s5
-; GFX8-NOHSA-NEXT:    s_and_b32 s13, s7, 0xff
-; GFX8-NOHSA-NEXT:    s_and_b32 s14, s6, 0xff
-; GFX8-NOHSA-NEXT:    v_lshrrev_b16_e64 v9, 8, s6
+; GFX8-NOHSA-NEXT:    s_and_b32 s13, s6, 0xff
+; GFX8-NOHSA-NEXT:    v_lshrrev_b16_e64 v8, 8, s6
+; GFX8-NOHSA-NEXT:    s_and_b32 s14, s7, 0xff
 ; GFX8-NOHSA-NEXT:    s_bfe_u32 s4, s4, 0x80010
 ; GFX8-NOHSA-NEXT:    s_bfe_u32 s6, s6, 0x80010
 ; GFX8-NOHSA-NEXT:    s_bfe_u32 s5, s5, 0x80010
@@ -5700,20 +5700,20 @@ define amdgpu_kernel void @constant_zextload_v16i8_to_v16i64(ptr addrspace(1) %o
 ; GFX8-NOHSA-NEXT:    flat_store_dwordx4 v[4:5], v[0:3]
 ; GFX8-NOHSA-NEXT:    v_mov_b32_e32 v5, s3
 ; GFX8-NOHSA-NEXT:    v_mov_b32_e32 v4, s2
-; GFX8-NOHSA-NEXT:    s_add_u32 s2, s0, 64
+; GFX8-NOHSA-NEXT:    s_add_u32 s2, s0, 0x60
 ; GFX8-NOHSA-NEXT:    v_mov_b32_e32 v0, s4
 ; GFX8-NOHSA-NEXT:    v_mov_b32_e32 v2, s10
 ; GFX8-NOHSA-NEXT:    s_addc_u32 s3, s1, 0
 ; GFX8-NOHSA-NEXT:    flat_store_dwordx4 v[4:5], v[0:3]
 ; GFX8-NOHSA-NEXT:    v_mov_b32_e32 v5, s3
+; GFX8-NOHSA-NEXT:    v_lshrrev_b16_e64 v9, 8, s7
 ; GFX8-NOHSA-NEXT:    v_mov_b32_e32 v4, s2
-; GFX8-NOHSA-NEXT:    s_add_u32 s2, s0, 0x60
+; GFX8-NOHSA-NEXT:    s_add_u32 s2, s0, 64
 ; GFX8-NOHSA-NEXT:    v_mov_b32_e32 v0, s14
 ; GFX8-NOHSA-NEXT:    v_mov_b32_e32 v2, v9
 ; GFX8-NOHSA-NEXT:    s_addc_u32 s3, s1, 0
 ; GFX8-NOHSA-NEXT:    flat_store_dwordx4 v[4:5], v[0:3]
 ; GFX8-NOHSA-NEXT:    v_mov_b32_e32 v5, s3
-; GFX8-NOHSA-NEXT:    v_lshrrev_b16_e64 v8, 8, s7
 ; GFX8-NOHSA-NEXT:    v_mov_b32_e32 v4, s2
 ; GFX8-NOHSA-NEXT:    s_add_u32 s2, s0, 32
 ; GFX8-NOHSA-NEXT:    v_mov_b32_e32 v0, s13
diff --git a/llvm/test/CodeGen/AMDGPU/load-global-i16.ll b/llvm/test/CodeGen/AMDGPU/load-global-i16.ll
index e89c44d5b94a899..aeb6a42d8bb216c 100644
--- a/llvm/test/CodeGen/AMDGPU/load-global-i16.ll
+++ b/llvm/test/CodeGen/AMDGPU/load-global-i16.ll
@@ -6481,44 +6481,43 @@ define amdgpu_kernel void @global_zextload_v16i16_to_v16i64(ptr addrspace(1) %ou
 ; GCN-NOHSA-SI-NEXT:    v_lshrrev_b32_e32 v13, 16, v1
 ; GCN-NOHSA-SI-NEXT:    v_lshrrev_b32_e32 v17, 16, v3
 ; GCN-NOHSA-SI-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NOHSA-SI-NEXT:    v_lshrrev_b32_e32 v9, 16, v5
+; GCN-NOHSA-SI-NEXT:    v_lshrrev_b32_e32 v9, 16, v7
 ; GCN-NOHSA-SI-NEXT:    v_lshrrev_b32_e32 v21, 16, v2
 ; GCN-NOHSA-SI-NEXT:    v_lshrrev_b32_e32 v25, 16, v0
 ; GCN-NOHSA-SI-NEXT:    v_and_b32_e32 v23, 0xffff, v0
 ; GCN-NOHSA-SI-NEXT:    v_and_b32_e32 v19, 0xffff, v2
 ; GCN-NOHSA-SI-NEXT:    v_and_b32_e32 v11, 0xffff, v1
 ; GCN-NOHSA-SI-NEXT:    v_and_b32_e32 v15, 0xffff, v3
-; GCN-NOHSA-SI-NEXT:    v_lshrrev_b32_e32 v2, 16, v6
-; GCN-NOHSA-SI-NEXT:    v_and_b32_e32 v0, 0xffff, v6
-; GCN-NOHSA-SI-NEXT:    v_lshrrev_b32_e32 v6, 16, v4
-; GCN-NOHSA-SI-NEXT:    v_and_b32_e32 v4, 0xffff, v4
-; GCN-NOHSA-SI-NEXT:    v_lshrrev_b32_e32 v29, 16, v7
-; GCN-NOHSA-SI-NEXT:    v_and_b32_e32 v27, 0xffff, v7
-; GCN-NOHSA-SI-NEXT:    v_and_b32_e32 v7, 0xffff, v5
+; GCN-NOHSA-SI-NEXT:    v_lshrrev_b32_e32 v2, 16, v4
+; GCN-NOHSA-SI-NEXT:    v_and_b32_e32 v0, 0xffff, v4
+; GCN-NOHSA-SI-NEXT:    v_lshrrev_b32_e32 v29, 16, v6
+; GCN-NOHSA-SI-NEXT:    v_and_b32_e32 v27, 0xffff, v6
+; GCN-NOHSA-SI-NEXT:    v_lshrrev_b32_e32 v33, 16, v5
+; GCN-NOHSA-SI-NEXT:    v_and_b32_e32 v31, 0xffff, v5
+; GCN-NOHSA-SI-NEXT:    v_and_b32_e32 v7, 0xffff, v7
 ; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v10, v8
-; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v28, v8
-; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v30, v8
+; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v32, v8
+; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v34, v8
 ; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v16, v8
 ; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v18, v8
 ; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v12, v8
 ; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v14, v8
-; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v5, v8
+; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v28, v8
 ; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v1, v8
 ; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v20, v8
 ; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v24, v8
 ; GCN-NOHSA-SI-NEXT:    s_mov_b32 s0, s4
 ; GCN-NOHSA-SI-NEXT:    s_mov_b32 s1, s5
-; GCN-NOHSA-SI-NEXT:    buffer_store_dwordx4 v[7:10], off, s[0:3], 0 offset:80
+; GCN-NOHSA-SI-NEXT:    buffer_store_dwordx4 v[7:10], off, s[0:3], 0 offset:112
 ; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v26, 0
 ; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v22, 0
 ; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v3, 0
-; GCN-NOHSA-SI-NEXT:    s_waitcnt expcnt(0)
-; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v7, 0
-; GCN-NOHSA-SI-NEXT:    buffer_store_dwordx4 v[27:30], off, s[0:3], 0 offset:112
+; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v30, 0
+; GCN-NOHSA-SI-NEXT:    buffer_store_dwordx4 v[31:34], off, s[0:3], 0 offset:80
 ; GCN-NOHSA-SI-NEXT:    buffer_store_dwordx4 v[15:18], off, s[0:3], 0 offset:48
 ; GCN-NOHSA-SI-NEXT:    buffer_store_dwordx4 v[11:14], off, s[0:3], 0 offset:16
-; GCN-NOHSA-SI-NEXT:    buffer_store_dwordx4 v[4:7], off, s[0:3], 0 offset:64
-; GCN-NOHSA-SI-NEXT:    buffer_store_dwordx4 v[0:3], off, s[0:3], 0 offset:96
+; GCN-NOHSA-SI-NEXT:    buffer_store_dwordx4 v[27:30], off, s[0:3], 0 offset:96
+; GCN-NOHSA-SI-NEXT:    buffer_store_dwordx4 v[0:3], off, s[0:3], 0 offset:64
 ; GCN-NOHSA-SI-NEXT:    buffer_store_dwordx4 v[19:22], off, s[0:3], 0 offset:32
 ; GCN-NOHSA-SI-NEXT:    buffer_store_dwordx4 v[23:26], off, s[0:3], 0
 ; GCN-NOHSA-SI-NEXT:    s_endpgm
@@ -6545,7 +6544,7 @@ define amdgpu_kernel void @global_zextload_v16i16_to_v16i64(ptr addrspace(1) %ou
 ; GCN-HSA-NEXT:    s_addc_u32 s5, s1, 0
 ; GCN-HSA-NEXT:    v_mov_b32_e32 v10, s5
 ; GCN-HSA-NEXT:    v_mov_b32_e32 v9, s4
-; GCN-HSA-NEXT:    s_add_u32 s4, s0, 0x50
+; GCN-HSA-NEXT:    s_add_u32 s4, s0, 0x70
 ; GCN-HSA-NEXT:    s_addc_u32 s5, s1, 0
 ; GCN-HSA-NEXT:    v_mov_b32_e32 v13, s3
 ; GCN-HSA-NEXT:    v_mov_b32_e32 v12, s2
@@ -6558,23 +6557,23 @@ define amdgpu_kernel void @global_zextload_v16i16_to_v16i64(ptr addrspace(1) %ou
 ; GCN-HSA-NEXT:    flat_store_dwordx4 v[9:10], v[16:19]
 ; GCN-HSA-NEXT:    v_mov_b32_e32 v10, s5
 ; GCN-HSA-NEXT:    v_mov_b32_e32 v9, s4
-; GCN-HSA-NEXT:    s_add_u32 s4, s0, 0x70
+; GCN-HSA-NEXT:    s_add_u32 s4, s0, 0x50
 ; GCN-HSA-NEXT:    s_addc_u32 s5, s1, 0
 ; GCN-HSA-NEXT:    s_add_u32 s2, s0, 32
 ; GCN-HSA-NEXT:    s_addc_u32 s3, s1, 0
 ; GCN-HSA-NEXT:    v_mov_b32_e32 v23, s3
 ; GCN-HSA-NEXT:    s_waitcnt vmcnt(1)
-; GCN-HSA-NEXT:    v_lshrrev_b32_e32 v18, 16, v5
-; GCN-HSA-NEXT:    v_and_b32_e32 v16, 0xffff, v5
+; GCN-HSA-NEXT:    v_lshrrev_b32_e32 v18, 16, v7
+; GCN-HSA-NEXT:    v_and_b32_e32 v16, 0xffff, v7
 ; GCN-HSA-NEXT:    v_mov_b32_e32 v22, s2
-; GCN-HSA-NEXT:    s_add_u32 s2, s0, 64
+; GCN-HSA-NEXT:    s_add_u32 s2, s0, 0x60
 ; GCN-HSA-NEXT:    flat_store_dwordx4 v[9:10], v[16:19]
 ; GCN-HSA-NEXT:    v_mov_b32_e32 v10, s5
 ; GCN-HSA-NEXT:    s_addc_u32 s3, s1, 0
 ; GCN-HSA-NEXT:    v_mov_b32_e32 v9, s4
-; GCN-HSA-NEXT:    v_lshrrev_b32_e32 v18, 16, v7
-; GCN-HSA-NEXT:    v_and_b32_e32 v16, 0xffff, v7
-; GCN-HSA-NEXT:    s_add_u32 s0, s0, 0x60
+; GCN-HSA-NEXT:    v_lshrrev_b32_e32 v18, 16, v5
+; GCN-HSA-NEXT:    v_and_b32_e32 v16, 0xffff, v5
+; GCN-HSA-NEXT:    s_add_u32 s0, s0, 64
 ; GCN-HSA-NEXT:    flat_store_dwordx4 v[9:10], v[16:19]
 ; GCN-HSA-NEXT:    v_mov_b32_e32 v10, v8
 ; GCN-HSA-NEXT:    v_lshrrev_b32_e32 v9, 16, v3
@@ -6585,10 +6584,10 @@ define amdgpu_kernel void @global_zextload_v16i16_to_v16i64(ptr addrspace(1) %ou
 ; GCN-HSA-NEXT:    v_mov_b32_e32 v3, v8
 ; GCN-HSA-NEXT:    v_mov_b32_e32 v13, v8
 ; GCN-HSA-NEXT:    v_mov_b32_e32 v9, v8
-; GCN-HSA-NEXT:    v_lshrrev_b32_e32 v14, 16, v4
-; GCN-HSA-NEXT:    v_lshrrev_b32_e32 v10, 16, v6
-; GCN-HSA-NEXT:    v_and_b32_e32 v8, 0xffff, v6
-; GCN-HSA-NEXT:    v_and_b32_e32 v12, 0xffff, v4
+; GCN-HSA-NEXT:    v_lshrrev_b32_e32 v14, 16, v6
+; GCN-HSA-NEXT:    v_lshrrev_b32_e32 v10, 16, v4
+; GCN-HSA-NEXT:    v_and_b32_e32 v8, 0xffff, v4
+; GCN-HSA-NEXT:    v_and_b32_e32 v12, 0xffff, v6
 ; GCN-HSA-NEXT:    v_mov_b32_e32 v7, s3
 ; GCN-HSA-NEXT:    v_mov_b32_e32 v25, s1
 ; GCN-HSA-NEXT:    v_lshrrev_b32_e32 v4, 16, v2
@@ -6616,10 +6615,10 @@ define amdgpu_kernel void @global_zextload_v16i16_to_v16i64(ptr addrspace(1) %ou
 ; GCN-NOHSA-VI-NEXT:    s_mov_b32 s9, s7
 ; GCN-NOHSA-VI-NEXT:    buffer_load_dwordx4 v[0:3], off, s[8:11], 0
 ; GCN-NOHSA-VI-NEXT:    buffer_load_dwordx4 v[4:7], off, s[8:11], 0 offset:16
-; GCN-NOHSA-VI-NEXT:    v_mov_b32_e32 v30, 0
-; GCN-NOHSA-VI-NEXT:    v_mov_b32_e32 v28, 0
 ; GCN-NOHSA-VI-NEXT:    s_mov_b32 s0, s4
 ; GCN-NOHSA-VI-NEXT:    s_mov_b32 s1, s5
+; GCN-NOHSA-VI-NEXT:    v_mov_b32_e32 v31, 0
+; GCN-NOHSA-VI-NEXT:    v_mov_b32_e32 v29, 0
 ; GCN-NOHSA-VI-NEXT:    v_mov_b32_e32 v11, 0
 ; GCN-NOHSA-VI-NEXT:    v_mov_b32_e32 v9, 0
 ; GCN-NOHSA-VI-NEXT:    v_mov_b32_e32 v15, 0
@@ -6628,34 +6627,34 @@ define amdgpu_kernel void @global_zextload_v16i16_to_v16i64(ptr addrspace(1) %ou
 ; GCN-NOHSA-VI-NEXT:    v_mov_b32_e32 v17, 0
 ; GCN-NOHSA-VI-NEXT:    v_mov_b32_e32 v23, 0
 ; GCN-NOHSA-VI-NEXT:    v_mov_b32_e32 v21, 0
+; GCN-NOHSA-VI-NEXT:    v_mov_b32_e32 v27, 0
 ; GCN-NOHSA-VI-NEXT:    v_mov_b32_e32 v25, 0
 ; GCN-NOHSA-VI-NEXT:    s_waitcnt vmcnt(1)
 ; GCN-NOHSA-VI-NEXT:    v_lshrrev_b32_e32 v10, 16, v0
-; GCN-NOHSA-VI-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NOHSA-VI-NEXT:    v_lshrrev_b32_e32 v22, 16, v6
-; GCN-NOHSA-VI-NEXT:    v_and_b32_e32 v20, 0xffff, v6
-; GCN-NOHSA-VI-NEXT:    v_lshrrev_b32_e32 v26, 16, v7
-; GCN-NOHSA-VI-NEXT:    v_and_b32_e32 v24, 0xffff, v7
-; GCN-NOHSA-VI-NEXT:    v_lshrrev_b32_e32 v6, 16, v4
-; GCN-NOHSA-VI-NEXT:    v_and_b32_e32 v4, 0xffff, v4
-; GCN-NOHSA-VI-NEXT:    v_lshrrev_b32_e32 v29, 16, v5
-; GCN-NOHSA-VI-NEXT:    v_and_b32_e32 v27, 0xffff, v5
-; GCN-NOHSA-VI-NEXT:    v_mov_b32_e32 v7, 0
-; GCN-NOHSA-VI-NEXT:    v_mov_b32_e32 v5, 0
 ; GCN-NOHSA-VI-NEXT:    v_and_b32_e32 v8, 0xffff, v0
-; GCN-NOHSA-VI-NEXT:    v_lshrrev_b32_e32 v14, 16, v1
-; GCN-NOHSA-VI-NEXT:    v_and_b32_e32 v12, 0xffff, v1
 ; GCN-NOHSA-VI-NEXT:    v_lshrrev_b32_e32 v18, 16, v2
 ; GCN-NOHSA-VI-NEXT:    v_and_b32_e32 v16, 0xffff, v2
 ; GCN-NOHSA-VI-NEXT:    v_lshrrev_b32_e32 v2, 16, v3
 ; GCN-NOHSA-VI-NEXT:    v_and_b32_e32 v0, 0xffff, v3
-; GCN-NOHSA-VI-NEXT:    buffer_store_dwordx4 v[27:30], off, s[0:3], 0 offset:80
-; GCN-NOHSA-VI-NEXT:    v_mov_b32_e32 v3, 0
+; GCN-NOHSA-VI-NEXT:    s_waitcnt vmcnt(0)
+; GCN-NOHSA-VI-NEXT:    v_lshrrev_b32_e32 v22, 16, v4
+; GCN-NOHSA-VI-NEXT:    v_and_b32_e32 v20, 0xffff, v4
+; GCN-NOHSA-VI-NEXT:    v_lshrrev_b32_e32 v26, 16, v5
+; GCN-NOHSA-VI-NEXT:    v_and_b32_e32 v24, 0xffff, v5
+; GCN-NOHSA-VI-NEXT:    v_lshrrev_b32_e32 v30, 16, v6
+; GCN-NOHSA-VI-NEXT:    v_and_b32_e32 v28, 0xffff, v6
+; GCN-NOHSA-VI-NEXT:    v_lshrrev_b32_e32 v5, 16, v7
+; GCN-NOHSA-VI-NEXT:    v_and_b32_e32 v3, 0xffff, v7
+; GCN-NOHSA-VI-NEXT:    v_mov_b32_e32 v6, 0
+; GCN-NOHSA-VI-NEXT:    v_mov_b32_e32 v4, 0
+; GCN-NOHSA-VI-NEXT:    v_lshrrev_b32_e32 v14, 16, v1
+; GCN-NOHSA-VI-NEXT:    v_and_b32_e32 v12, 0xffff, v1
+; GCN-NOHSA-VI-NEXT:    buffer_store_dwordx4 v[3:6], off, s[0:3], 0 offset:112
 ; GCN-NOHSA-VI-NEXT:    v_mov_b32_e32 v1, 0
-; GCN-NOHSA-VI-NEXT:    v_mov_b32_e32 v27, 0
-; GCN-NOHSA-VI-NEXT:    buffer_store_dwordx4 v[4:7], off, s[0:3], 0 offset:64
-; GCN-NOHSA-VI-NEXT:    buffer_store_dwordx4 v[24:27], off, s[0:3], 0 offset:112
-; GCN-NOHSA-VI-NEXT:    buffer_store_dwordx4 v[20:23], off, s[0:3], 0 offset:96
+; GCN-NOHSA-VI-NEXT:    v_mov_b32_e32 v3, 0
+; GCN-NOHSA-VI-NEXT:    buffer_store_dwordx4 v[28:31], off, s[0:3], 0 offset:96
+; GCN-NOHSA-VI-NEXT:    buffer_store_dwordx4 v[24:27], off, s[0:3], 0 offset:80
+; GCN-NOHSA-VI-NEXT:    buffer_store_dwordx4 v[20:23], off, s[0:3], 0 offset:64
 ; GCN-NOHSA-VI-NEXT:    buffer_store_dwordx4 v[0:3], off, s[0:3], 0 offset:48
 ; GCN-NOHSA-VI-NEXT:    buffer_store_dwordx4 v[16:19], off, s[0:3], 0 offset:32
 ; GCN-NOHSA-VI-NEXT:    buffer_store_dwordx4 v[12:15], off, s[0:3], 0 offset:16
@@ -7001,55 +7000,55 @@ define amdgpu_kernel void @global_sextload_v16i16_to_v16i64(ptr addrspace(1) %ou
 ; GCN-NOHSA-VI-NEXT:    s_mov_b32 s0, s4
 ; GCN-NOHSA-VI-NEXT:    s_mov_b32 s1, s5
 ; GCN-NOHSA-VI-NEXT:    s_waitcnt vmcnt(1)
-; GCN-NOHSA-VI-NEXT:    v_bfe_i32 v9, v2, 0, 16
-; GCN-NOHSA-VI-NEXT:    v_lshrrev_b32_e32 v10, 16, v2
-; GCN-NOHSA-VI-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NOHSA-VI-NEXT:    v_lshrrev_b32_e32 v2, 16, v6
 ; GCN-NOHSA-VI-NEXT:    v_bfe_i32 v0, v1, 0, 16
 ; GCN-NOHSA-VI-NEXT:    v_lshrrev_b32_e32 v1, 16, v1
-; GCN-NOHSA-VI-NEXT:    v_bfe_i32 v16, v2, 0, 16
-; GCN-NOHSA-VI-NEXT:    v_bfe_i32 v14, v6, 0, 16
-; GCN-NOHSA-VI-NEXT:    v_ashrrev_i32_e32 v15, 31, v14
-; GCN-NOHSA-VI-NEXT:    v_ashrrev_i32_e32 v17, 31, v16
+; GCN-NOHSA-VI-NEXT:    v_bfe_i32 v9, v2, 0, 16
+; GCN-NOHSA-VI-NEXT:    v_lshrrev_b32_e32 v15, 16, v2
 ; GCN-NOHSA-VI-NEXT:    v_bfe_i32 v2, v1, 0, 16
-; GCN-NOHSA-VI-NEXT:    v_mov_b32_e32 v1, v8
-; GCN-NOHSA-VI-NEXT:    buffer_store_dwordx4 v[14:17], off, s[0:3], 0 offset:80
-; GCN-NOHSA-VI-NEXT:    v_bfe_i32 v13, v3, 0, 16
-; GCN-NOHSA-VI-NEXT:    v_bfe_i32 v17, v1, 0, 16
-; GCN-NOHSA-VI-NEXT:    v_lshrrev_b32_e32 v1, 16, v5
-; GCN-NOHSA-VI-NEXT:    v_lshrrev_b32_e32 v3, 16, v3
-; GCN-NOHSA-VI-NEXT:    v_mov_b32_e32 v6, v4
-; GCN-NOHSA-VI-NEXT:    v_lshrrev_b32_e32 v12, 16, v4
-; GCN-NOHSA-VI-NEXT:    v_lshrrev_b32_e32 v14, 16, v7
+; GCN-NOHSA-VI-NEXT:    v_lshrrev_b32_e32 v1, 16, v4
+; GCN-NOHSA-VI-NEXT:    s_waitcnt vmcnt(0)
+; GCN-NOHSA-VI-NEXT:    v_mov_b32_e32 v10, v8
 ; GCN-NOHSA-VI-NEXT:    v_lshrrev_b32_e32 v8, 16, v8
-; GCN-NOHSA-VI-NEXT:    v_bfe_i32 v25, v5, 0, 16
-; GCN-NOHSA-VI-NEXT:    v_bfe_i32 v27, v1, 0, 16
-; GCN-NOHSA-VI-NEXT:    v_bfe_i32 v11, v10, 0, 16
-; GCN-NOHSA-VI-NEXT:    v_bfe_i32 v15, v3, 0, 16
-; GCN-NOHSA-VI-NEXT:    v_bfe_i32 v4, v6, 0, 16
-; GCN-NOHSA-VI-NEXT:    v_bfe_i32 v6, v12, 0, 16
-; GCN-NOHSA-VI-NEXT:    v_bfe_i32 v19, v8, 0, 16
-; GCN-NOHSA-VI-NEXT:    v_bfe_i32 v23, v14, 0, 16
-; GCN-NOHSA-VI-NEXT:    v_bfe_i32 v21, v7, 0, 16
-; GCN-NOHSA-VI-NEXT:    v_ashrrev_i32_e32 v26, 31, v25
-; GCN-NOHSA-VI-NEXT:    v_ashrrev_i32_e32 v28, 31, v27
+; GCN-NOHSA-VI-NEXT:    v_bfe_i32 v24, v1, 0, 16
+; GCN-NOHSA-VI-NEXT:    v_lshrrev_b32_e32 v1, 16, v6
+; GCN-NOHSA-VI-NEXT:    v_bfe_i32 v10, v10, 0, 16
+; GCN-NOHSA-VI-NEXT:    v_bfe_i32 v12, v8, 0, 16
+; GCN-NOHSA-VI-NEXT:    v_bfe_i32 v28, v1, 0, 16
+; GCN-NOHSA-VI-NEXT:    v_lshrrev_b32_e32 v1, 16, v7
+; GCN-NOHSA-VI-NEXT:    v_lshrrev_b32_e32 v16, 16, v3
+; GCN-NOHSA-VI-NEXT:    v_mov_b32_e32 v17, v4
+; GCN-NOHSA-VI-NEXT:    v_bfe_i32 v14, v3, 0, 16
+; GCN-NOHSA-VI-NEXT:    v_ashrrev_i32_e32 v11, 31, v10
+; GCN-NOHSA-VI-NEXT:    v_ashrrev_i32_e32 v13, 31, v12
+; GCN-NOHSA-VI-NEXT:    v_lshrrev_b32_e32 v3, 16, v5
+; GCN-NOHSA-VI-NEXT:    v_bfe_i32 v26, v6, 0, 16
+; GCN-NOHSA-VI-NEXT:    v_bfe_i32 v4, v7, 0, 16
+; GCN-NOHSA-VI-NEXT:    v_bfe_i32 v6, v1, 0, 16
+; GCN-NOHSA-VI-NEXT:    buffer_store_dwordx4 v[10:13], off, s[0:3], 0 offset:112
+; GCN-NOHSA-VI-NEXT:    v_bfe_i32 v18, v5, 0, 16
+; GCN-NOHSA-VI-NEXT:    v_bfe_i32 v11, v15, 0, 16
+; GCN-NOHSA-VI-NEXT:    v_bfe_i32 v16, v16, 0, 16
+; GCN-NOHSA-VI-NEXT:    v_bfe_i32 v22, v17, 0, 16
+; GCN-NOHSA-VI-NEXT:    v_bfe_i32 v20, v3, 0, 16
+; GCN-NOHSA-VI-NEXT:    v_ashrrev_i32_e32 v5, 31, v4
+; GCN-NOHSA-VI-NEXT:    v_ashrrev_i32_e32 v7, 31, v6
 ; GCN-NOHSA-VI-NEXT:    v_ashrrev_i32_e32 v1, 31, v0
 ; GCN-NOHSA-VI-NEXT:    v_ashrrev_i32_e32 v10, 31, v9
-; GCN-NOHSA-VI-NEXT:    v_ashrrev_i32_e32 v14, 31, v13
-; GCN-NOHSA-VI-NEXT:    v_ashrrev_i32_e32 v22, 31, v21
+; GCN-NOHSA-VI-NEXT:    v_ashrrev_i32_e32 v15, 31, v14
+; GCN-NOHSA-VI-NEXT:    v_ashrrev_i32_e32 v19, 31, v18
+; GCN-NOHSA-VI-NEXT:    v_ashrrev_i32_e32 v27, 31, v26
 ; GCN-NOHSA-VI-NEXT:    v_ashrrev_i32_e32 v3, 31, v2
 ; GCN-NOHSA-VI-NEXT:    v_ashrrev_i32_e32 v12, 31, v11
-; GCN-NOHSA-VI-NEXT:    v_ashrrev_i32_e32 v16, 31, v15
-; GCN-NOHSA-VI-NEXT:    v_ashrrev_i32_e32 v5, 31, v4
-; GCN-NOHSA-VI-NEXT:    v_ashrrev_i32_e32 v7, 31, v6
-; GCN-NOHSA-VI-NEXT:    v_ashrrev_i32_e32 v24, 31, v23
-; GCN-NOHSA-VI-NEXT:    v_ashrrev_i32_e32 v18, 31, v17
-; GCN-NOHSA-VI-NEXT:    v_ashrrev_i32_e32 v20, 31, v19
-; GCN-NOHSA-VI-NEXT:    buffer_store_dwordx4 v[25:28], off, s[0:3], 0 offset:64
-; GCN-NOHSA-VI-NEXT:    buffer_store_dwordx4 v[17:20], off, s[0:3], 0 offset:112
-; GCN-NOHSA-VI-NEXT:    buffer_store_dwordx4 v[21:24], off, s[0:3], 0 offset:96
-; GCN-NOHSA-VI-NEXT:    buffer_store_dwordx4 v[4:7], off, s[0:3], 0 offset:48
-; GCN-NOHSA-VI-NEXT:    buffer_store_dwordx4 v[13:16], off, s[0:3], 0 offset:32
+; GCN-NOHSA-VI-NEXT:    v_ashrrev_i32_e32 v17, 31, v16
+; GCN-NOHSA-VI-NEXT:    v_ashrrev_i32_e32 v23, 31, v22
+; GCN-NOHSA-VI-NEXT:    v_ashrrev_i32_e32 v25, 31, v24
+; GCN-NOHSA-VI-NEXT:    v_ashrrev_i32_e32 v21, 31, v20
+; GCN-NOHSA-VI-NEXT:    v_ashrrev_i32_e32 v29, 31, v28
+; GCN-NOHSA-VI-NEXT:    buffer_store_dwordx4 v[4:7], off, s[0:3], 0 offset:96
+; GCN-NOHSA-VI-NEXT:    buffer_store_dwordx4 v[26:29], off, s[0:3], 0 offset:80
+; GCN-NOHSA-VI-NEXT:    buffer_store_dwordx4 v[18:21], off, s[0:3], 0 offset:64
+; GCN-NOHSA-VI-NEXT:    buffer_store_dwordx4 v[22:25], off, s[0:3], 0 offset:48
+; GCN-NOHSA-VI-NEXT:    buffer_store_dwordx4 v[14:17], off, s[0:3], 0 offset:32
 ; GCN-NOHSA-VI-NEXT:    buffer_store_dwordx4 v[9:12], off, s[0:3], 0 offset:16
 ; GCN-NOHSA-VI-NEXT:    buffer_store_dwordx4 v[0:3], off, s[0:3], 0
 ; GCN-NOHSA-VI-NEXT:    s_endpgm
diff --git a/llvm/test/CodeGen/X86/pr40730.ll b/llvm/test/CodeGen/X86/pr40730.ll
index 1d87a5773296a1d..146af802457b53b 100644
--- a/llvm/test/CodeGen/X86/pr40730.ll
+++ b/llvm/test/CodeGen/X86/pr40730.ll
@@ -17,20 +17,16 @@ define <8 x i32> @shuffle_v8i32_0dcd3f14(<8 x i32> %a, <8 x i32> %b) {
 }
 
 ; CHECK:      .LCPI1_0:
-; CHECK-NEXT: .quad   0x0000000e0000000d
-; CHECK-NEXT: .quad   0x0000000e0000000d
-; CHECK-NEXT: .quad   0x0000001000000000
-; CHECK-NEXT: .quad   0x0000000e0000000d
+; CHECK-NEXT: .zero   4
+; CHECK-NEXT: .zero   4
+; CHECK-NEXT: .zero   4
+; CHECK-NEXT: .zero   4
+; CHECK-NEXT: .long   13
+; CHECK-NEXT: .long   14
+; CHECK-NEXT: .zero   4
+; CHECK-NEXT: .long   16
 
 define <8 x i32> @shuffle_v8i32_0dcd3f14_constant(<8 x i32> %a0)  {
-; CHECK-LABEL: shuffle_v8i32_0dcd3f14_constant:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vextractf128 $1, %ymm0, %xmm1
-; CHECK-NEXT:    vblendps {{.*#+}} xmm1 = xmm1[0],xmm0[1,2,3]
-; CHECK-NEXT:    vshufps {{.*#+}} xmm1 = xmm1[3,1,1,0]
-; CHECK-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0
-; CHECK-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0],mem[1,2,3],ymm0[4],mem[5],ymm0[6,7]
-; CHECK-NEXT:    retq
   %res = shufflevector <8 x i32> %a0, <8 x i32> <i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16>, <8 x i32> <i32 0, i32 13, i32 12, i32 13, i32 3, i32 15, i32 1, i32 4>
   ret <8 x i32> %res
 }
diff --git a/llvm/test/CodeGen/X86/vector-interleaved-load-i16-stride-5.ll b/llvm/test/CodeGen/X86/vector-interleaved-load-i16-stride-5.ll
index 30ee74b33575224..93bac93901bb14d 100644
--- a/llvm/test/CodeGen/X86/vector-interleaved-load-i16-stride-5.ll
+++ b/llvm/test/CodeGen/X86/vector-interleaved-load-i16-stride-5.ll
@@ -398,19 +398,19 @@ define void @load_i16_stride5_vf4(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
 ; AVX512F-SLOW-NEXT:    vpinsrw $2, %eax, %xmm4, %xmm1
 ; AVX512F-SLOW-NEXT:    vmovd %xmm2, %eax
 ; AVX512F-SLOW-NEXT:    vpinsrw $3, %eax, %xmm1, %xmm1
-; AVX512F-SLOW-NEXT:    vmovdqa 16(%rdi), %xmm4
-; AVX512F-SLOW-NEXT:    vpblendd {{.*#+}} xmm5 = xmm2[0],xmm0[1,2,3]
-; AVX512F-SLOW-NEXT:    vpblendd {{.*#+}} xmm5 = xmm5[0,1],xmm4[2],xmm5[3]
-; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} xmm5 = xmm5[4,5,14,15,8,9,2,3,u,u,u,u,u,u,u,u]
+; AVX512F-SLOW-NEXT:    vpblendd {{.*#+}} xmm4 = xmm2[0],xmm0[1,2,3]
+; AVX512F-SLOW-NEXT:    vmovdqa 16(%rdi), %xmm5
+; AVX512F-SLOW-NEXT:    vpblendd {{.*#+}} xmm4 = xmm4[0,1],xmm5[2],xmm4[3]
+; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} xmm4 = xmm4[4,5,14,15,8,9,2,3,u,u,u,u,u,u,u,u]
 ; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} xmm6 = xmm0[0,1],xmm2[2],xmm0[3,4,5,6,7]
-; AVX512F-SLOW-NEXT:    vpblendd {{.*#+}} xmm6 = xmm4[0],xmm6[1],xmm4[2,3]
+; AVX512F-SLOW-NEXT:    vpblendd {{.*#+}} xmm6 = xmm5[0],xmm6[1],xmm5[2,3]
 ; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} xmm6 = xmm6[6,7,0,1,10,11,4,5,u,u,u,u,u,u,u,u]
-; AVX512F-SLOW-NEXT:    vpblendd {{.*#+}} xmm0 = xmm4[0,1],xmm0[2],xmm4[3]
+; AVX512F-SLOW-NEXT:    vpblendd {{.*#+}} xmm0 = xmm5[0,1],xmm0[2],xmm5[3]
 ; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} xmm0 = xmm0[8,9,2,3,12,13,u,u,u,u,u,u,u,u,u,u]
 ; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0,1,2],xmm2[3],xmm0[4,5,6,7]
 ; AVX512F-SLOW-NEXT:    vmovq %xmm3, (%rsi)
 ; AVX512F-SLOW-NEXT:    vmovq %xmm1, (%rdx)
-; AVX512F-SLOW-NEXT:    vmovq %xmm5, (%rcx)
+; AVX512F-SLOW-NEXT:    vmovq %xmm4, (%rcx)
 ; AVX512F-SLOW-NEXT:    vmovq %xmm6, (%r8)
 ; AVX512F-SLOW-NEXT:    vmovq %xmm0, (%r9)
 ; AVX512F-SLOW-NEXT:    retq
@@ -429,19 +429,19 @@ define void @load_i16_stride5_vf4(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
 ; AVX512F-FAST-NEXT:    vpinsrw $2, %eax, %xmm1, %xmm1
 ; AVX512F-FAST-NEXT:    vmovd %xmm2, %eax
 ; AVX512F-FAST-NEXT:    vpinsrw $3, %eax, %xmm1, %xmm1
-; AVX512F-FAST-NEXT:    vmovdqa 16(%rdi), %xmm4
-; AVX512F-FAST-NEXT:    vpblendd {{.*#+}} xmm5 = xmm2[0],xmm0[1,2,3]
-; AVX512F-FAST-NEXT:    vpblendd {{.*#+}} xmm5 = xmm5[0,1],xmm4[2],xmm5[3]
-; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} xmm5 = xmm5[4,5,14,15,8,9,2,3,u,u,u,u,u,u,u,u]
+; AVX512F-FAST-NEXT:    vpblendd {{.*#+}} xmm4 = xmm2[0],xmm0[1,2,3]
+; AVX512F-FAST-NEXT:    vmovdqa 16(%rdi), %xmm5
+; AVX512F-FAST-NEXT:    vpblendd {{.*#+}} xmm4 = xmm4[0,1],xmm5[2],xmm4[3]
+; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} xmm4 = xmm4[4,5,14,15,8,9,2,3,u,u,u,u,u,u,u,u]
 ; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} xmm6 = xmm0[0,1],xmm2[2],xmm0[3,4,5,6,7]
-; AVX512F-FAST-NEXT:    vpblendd {{.*#+}} xmm6 = xmm4[0],xmm6[1],xmm4[2,3]
+; AVX512F-FAST-NEXT:    vpblendd {{.*#+}} xmm6 = xmm5[0],xmm6[1],xmm5[2,3]
 ; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} xmm6 = xmm6[6,7,0,1,10,11,4,5,u,u,u,u,u,u,u,u]
-; AVX512F-FAST-NEXT:    vpblendd {{.*#+}} xmm0 = xmm4[0,1],xmm0[2],xmm4[3]
+; AVX512F-FAST-NEXT:    vpblendd {{.*#+}} xmm0 = xmm5[0,1],xmm0[2],xmm5[3]
 ; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} xmm0 = xmm0[8,9,2,3,12,13,u,u,u,u,u,u,u,u,u,u]
 ; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0,1,2],xmm2[3],xmm0[4,5,6,7]
 ; AVX512F-FAST-NEXT:    vmovq %xmm3, (%rsi)
 ; AVX512F-FAST-NEXT:    vmovq %xmm1, (%rdx)
-; AVX512F-FAST-NEXT:    vmovq %xmm5, (%rcx)
+; AVX512F-FAST-NEXT:    vmovq %xmm4, (%rcx)
 ; AVX512F-FAST-NEXT:    vmovq %xmm6, (%r8)
 ; AVX512F-FAST-NEXT:    vmovq %xmm0, (%r9)
 ; AVX512F-FAST-NEXT:    retq
diff --git a/llvm/test/CodeGen/X86/vector-interleaved-store-i16-stride-8.ll b/llvm/test/CodeGen/X86/vector-interleaved-store-i16-stride-8.ll
index e5a6eea44b8ad61..cf2dfaa3fdb49b5 100644
--- a/llvm/test/CodeGen/X86/vector-interleaved-store-i16-stride-8.ll
+++ b/llvm/test/CodeGen/X86/vector-interleaved-store-i16-stride-8.ll
@@ -3480,200 +3480,193 @@ define void @store_i16_stride8_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ;
 ; AVX512F-FAST-LABEL: store_i16_stride8_vf32:
 ; AVX512F-FAST:       # %bb.0:
-; AVX512F-FAST-NEXT:    subq $472, %rsp # imm = 0x1D8
+; AVX512F-FAST-NEXT:    subq $360, %rsp # imm = 0x168
 ; AVX512F-FAST-NEXT:    movq {{[0-9]+}}(%rsp), %rax
 ; AVX512F-FAST-NEXT:    movq {{[0-9]+}}(%rsp), %r10
 ; AVX512F-FAST-NEXT:    vmovdqa (%r10), %xmm1
-; AVX512F-FAST-NEXT:    vmovdqa (%rax), %xmm2
-; AVX512F-FAST-NEXT:    vpunpckhwd {{.*#+}} xmm0 = xmm2[4],xmm1[4],xmm2[5],xmm1[5],xmm2[6],xmm1[6],xmm2[7],xmm1[7]
-; AVX512F-FAST-NEXT:    vmovdqa64 %xmm2, %xmm26
-; AVX512F-FAST-NEXT:    vmovdqa64 %xmm1, %xmm27
+; AVX512F-FAST-NEXT:    vmovdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX512F-FAST-NEXT:    vmovdqa (%rax), %xmm0
+; AVX512F-FAST-NEXT:    vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX512F-FAST-NEXT:    vpunpckhwd {{.*#+}} xmm0 = xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
 ; AVX512F-FAST-NEXT:    vinserti32x4 $2, %xmm0, %zmm0, %zmm0
 ; AVX512F-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-FAST-NEXT:    vmovdqa (%r9), %xmm0
-; AVX512F-FAST-NEXT:    vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX512F-FAST-NEXT:    vmovdqa (%r9), %xmm1
 ; AVX512F-FAST-NEXT:    vmovdqa (%r8), %xmm2
-; AVX512F-FAST-NEXT:    vpunpckhwd {{.*#+}} xmm0 = xmm2[4],xmm0[4],xmm2[5],xmm0[5],xmm2[6],xmm0[6],xmm2[7],xmm0[7]
-; AVX512F-FAST-NEXT:    vmovdqa64 %xmm2, %xmm31
+; AVX512F-FAST-NEXT:    vpunpckhwd {{.*#+}} xmm0 = xmm2[4],xmm1[4],xmm2[5],xmm1[5],xmm2[6],xmm1[6],xmm2[7],xmm1[7]
+; AVX512F-FAST-NEXT:    vmovdqa64 %xmm2, %xmm30
+; AVX512F-FAST-NEXT:    vmovdqa64 %xmm1, %xmm31
 ; AVX512F-FAST-NEXT:    vinserti32x4 $2, %xmm0, %zmm0, %zmm0
 ; AVX512F-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512F-FAST-NEXT:    vmovdqa (%rcx), %xmm1
 ; AVX512F-FAST-NEXT:    vmovdqa (%rdx), %xmm2
 ; AVX512F-FAST-NEXT:    vpunpckhwd {{.*#+}} xmm0 = xmm2[4],xmm1[4],xmm2[5],xmm1[5],xmm2[6],xmm1[6],xmm2[7],xmm1[7]
-; AVX512F-FAST-NEXT:    vmovdqa64 %xmm2, %xmm20
-; AVX512F-FAST-NEXT:    vmovdqa64 %xmm1, %xmm21
+; AVX512F-FAST-NEXT:    vmovdqa64 %xmm2, %xmm21
+; AVX512F-FAST-NEXT:    vmovdqa64 %xmm1, %xmm22
 ; AVX512F-FAST-NEXT:    vinserti32x4 $2, %xmm0, %zmm0, %zmm0
 ; AVX512F-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-FAST-NEXT:    vmovdqa (%rsi), %xmm1
+; AVX512F-FAST-NEXT:    vmovdqa (%rsi), %xmm10
 ; AVX512F-FAST-NEXT:    vmovdqa (%rdi), %xmm2
-; AVX512F-FAST-NEXT:    vpunpckhwd {{.*#+}} xmm0 = xmm2[4],xmm1[4],xmm2[5],xmm1[5],xmm2[6],xmm1[6],xmm2[7],xmm1[7]
-; AVX512F-FAST-NEXT:    vmovdqa64 %xmm2, %xmm18
-; AVX512F-FAST-NEXT:    vmovdqa64 %xmm1, %xmm19
+; AVX512F-FAST-NEXT:    vpunpckhwd {{.*#+}} xmm0 = xmm2[4],xmm10[4],xmm2[5],xmm10[5],xmm2[6],xmm10[6],xmm2[7],xmm10[7]
+; AVX512F-FAST-NEXT:    vmovdqa64 %xmm2, %xmm19
 ; AVX512F-FAST-NEXT:    vinserti32x4 $2, %xmm0, %zmm0, %zmm0
 ; AVX512F-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512F-FAST-NEXT:    vmovdqa 32(%r10), %ymm0
 ; AVX512F-FAST-NEXT:    vmovdqa 32(%rax), %ymm1
 ; AVX512F-FAST-NEXT:    vpunpcklwd {{.*#+}} ymm2 = ymm1[0],ymm0[0],ymm1[1],ymm0[1],ymm1[2],ymm0[2],ymm1[3],ymm0[3],ymm1[8],ymm0[8],ymm1[9],ymm0[9],ymm1[10],ymm0[10],ymm1[11],ymm0[11]
-; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm2, %zmm2, %zmm24
+; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm2, %zmm2, %zmm25
 ; AVX512F-FAST-NEXT:    vmovdqa 32(%r9), %ymm2
 ; AVX512F-FAST-NEXT:    vmovdqa 32(%r8), %ymm4
 ; AVX512F-FAST-NEXT:    vpunpcklwd {{.*#+}} ymm3 = ymm4[0],ymm2[0],ymm4[1],ymm2[1],ymm4[2],ymm2[2],ymm4[3],ymm2[3],ymm4[8],ymm2[8],ymm4[9],ymm2[9],ymm4[10],ymm2[10],ymm4[11],ymm2[11]
-; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm3, %zmm3, %zmm25
+; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm3, %zmm3, %zmm26
 ; AVX512F-FAST-NEXT:    vmovdqa 32(%rcx), %ymm5
 ; AVX512F-FAST-NEXT:    vmovdqa 32(%rdx), %ymm6
 ; AVX512F-FAST-NEXT:    vpunpcklwd {{.*#+}} ymm3 = ymm6[0],ymm5[0],ymm6[1],ymm5[1],ymm6[2],ymm5[2],ymm6[3],ymm5[3],ymm6[8],ymm5[8],ymm6[9],ymm5[9],ymm6[10],ymm5[10],ymm6[11],ymm5[11]
-; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm3, %zmm3, %zmm22
+; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm3, %zmm3, %zmm23
 ; AVX512F-FAST-NEXT:    vmovdqa 32(%rsi), %ymm7
 ; AVX512F-FAST-NEXT:    vmovdqa 32(%rdi), %ymm8
 ; AVX512F-FAST-NEXT:    vpunpcklwd {{.*#+}} ymm3 = ymm8[0],ymm7[0],ymm8[1],ymm7[1],ymm8[2],ymm7[2],ymm8[3],ymm7[3],ymm8[8],ymm7[8],ymm8[9],ymm7[9],ymm8[10],ymm7[10],ymm8[11],ymm7[11]
-; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm3, %zmm3, %zmm23
+; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm3, %zmm3, %zmm24
 ; AVX512F-FAST-NEXT:    vpunpckhwd {{.*#+}} ymm1 = ymm1[4],ymm0[4],ymm1[5],ymm0[5],ymm1[6],ymm0[6],ymm1[7],ymm0[7],ymm1[12],ymm0[12],ymm1[13],ymm0[13],ymm1[14],ymm0[14],ymm1[15],ymm0[15]
 ; AVX512F-FAST-NEXT:    vmovdqa 32(%r10), %xmm0
-; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm1, %zmm1, %zmm16
+; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm1, %zmm1, %zmm17
 ; AVX512F-FAST-NEXT:    vmovdqa 32(%rax), %xmm3
 ; AVX512F-FAST-NEXT:    vpunpckhwd {{.*#+}} ymm2 = ymm4[4],ymm2[4],ymm4[5],ymm2[5],ymm4[6],ymm2[6],ymm4[7],ymm2[7],ymm4[12],ymm2[12],ymm4[13],ymm2[13],ymm4[14],ymm2[14],ymm4[15],ymm2[15]
 ; AVX512F-FAST-NEXT:    vmovdqa 32(%r9), %xmm1
-; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm2, %zmm2, %zmm17
+; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm2, %zmm2, %zmm18
 ; AVX512F-FAST-NEXT:    vmovdqa 32(%r8), %xmm4
-; AVX512F-FAST-NEXT:    vpunpckhwd {{.*#+}} ymm2 = ymm6[4],ymm5[4],ymm6[5],ymm5[5],ymm6[6],ymm5[6],ymm6[7],ymm5[7],ymm6[12],ymm5[12],ymm6[13],ymm5[13],ymm6[14],ymm5[14],ymm6[15],ymm5[15]
-; AVX512F-FAST-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-FAST-NEXT:    vpunpckhwd {{.*#+}} ymm5 = ymm6[4],ymm5[4],ymm6[5],ymm5[5],ymm6[6],ymm5[6],ymm6[7],ymm5[7],ymm6[12],ymm5[12],ymm6[13],ymm5[13],ymm6[14],ymm5[14],ymm6[15],ymm5[15]
 ; AVX512F-FAST-NEXT:    vmovdqa 32(%rcx), %xmm2
-; AVX512F-FAST-NEXT:    vpunpckhwd {{.*#+}} ymm5 = ymm8[4],ymm7[4],ymm8[5],ymm7[5],ymm8[6],ymm7[6],ymm8[7],ymm7[7],ymm8[12],ymm7[12],ymm8[13],ymm7[13],ymm8[14],ymm7[14],ymm8[15],ymm7[15]
-; AVX512F-FAST-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm11 = xmm3[0],xmm0[0],xmm3[1],xmm0[1],xmm3[2],xmm0[2],xmm3[3],xmm0[3]
+; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm5, %zmm5, %zmm16
+; AVX512F-FAST-NEXT:    vmovdqa 32(%rdx), %xmm5
+; AVX512F-FAST-NEXT:    vpunpckhwd {{.*#+}} ymm11 = ymm8[4],ymm7[4],ymm8[5],ymm7[5],ymm8[6],ymm7[6],ymm8[7],ymm7[7],ymm8[12],ymm7[12],ymm8[13],ymm7[13],ymm8[14],ymm7[14],ymm8[15],ymm7[15]
+; AVX512F-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm12 = xmm3[0],xmm0[0],xmm3[1],xmm0[1],xmm3[2],xmm0[2],xmm3[3],xmm0[3]
 ; AVX512F-FAST-NEXT:    vpunpckhwd {{.*#+}} xmm0 = xmm3[4],xmm0[4],xmm3[5],xmm0[5],xmm3[6],xmm0[6],xmm3[7],xmm0[7]
 ; AVX512F-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512F-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm15 = xmm4[0],xmm1[0],xmm4[1],xmm1[1],xmm4[2],xmm1[2],xmm4[3],xmm1[3]
 ; AVX512F-FAST-NEXT:    vpunpckhwd {{.*#+}} xmm0 = xmm4[4],xmm1[4],xmm4[5],xmm1[5],xmm4[6],xmm1[6],xmm4[7],xmm1[7]
 ; AVX512F-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-FAST-NEXT:    vmovdqa 32(%rdx), %xmm0
-; AVX512F-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm9 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3]
-; AVX512F-FAST-NEXT:    vpunpckhwd {{.*#+}} xmm0 = xmm0[4],xmm2[4],xmm0[5],xmm2[5],xmm0[6],xmm2[6],xmm0[7],xmm2[7]
+; AVX512F-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm9 = xmm5[0],xmm2[0],xmm5[1],xmm2[1],xmm5[2],xmm2[2],xmm5[3],xmm2[3]
+; AVX512F-FAST-NEXT:    vpunpckhwd {{.*#+}} xmm0 = xmm5[4],xmm2[4],xmm5[5],xmm2[5],xmm5[6],xmm2[6],xmm5[7],xmm2[7]
 ; AVX512F-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512F-FAST-NEXT:    vmovdqa 32(%rsi), %xmm0
 ; AVX512F-FAST-NEXT:    vmovdqa 32(%rdi), %xmm1
-; AVX512F-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm8 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
-; AVX512F-FAST-NEXT:    vpunpckhwd {{.*#+}} xmm14 = xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
+; AVX512F-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm7 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
+; AVX512F-FAST-NEXT:    vpunpckhwd {{.*#+}} xmm13 = xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
 ; AVX512F-FAST-NEXT:    vmovdqa (%r10), %ymm0
 ; AVX512F-FAST-NEXT:    vmovdqa (%rax), %ymm1
-; AVX512F-FAST-NEXT:    vpunpcklwd {{.*#+}} ymm12 = ymm1[0],ymm0[0],ymm1[1],ymm0[1],ymm1[2],ymm0[2],ymm1[3],ymm0[3],ymm1[8],ymm0[8],ymm1[9],ymm0[9],ymm1[10],ymm0[10],ymm1[11],ymm0[11]
-; AVX512F-FAST-NEXT:    vpunpckhwd {{.*#+}} ymm10 = ymm1[4],ymm0[4],ymm1[5],ymm0[5],ymm1[6],ymm0[6],ymm1[7],ymm0[7],ymm1[12],ymm0[12],ymm1[13],ymm0[13],ymm1[14],ymm0[14],ymm1[15],ymm0[15]
+; AVX512F-FAST-NEXT:    vpunpcklwd {{.*#+}} ymm14 = ymm1[0],ymm0[0],ymm1[1],ymm0[1],ymm1[2],ymm0[2],ymm1[3],ymm0[3],ymm1[8],ymm0[8],ymm1[9],ymm0[9],ymm1[10],ymm0[10],ymm1[11],ymm0[11]
+; AVX512F-FAST-NEXT:    vpunpckhwd {{.*#+}} ymm8 = ymm1[4],ymm0[4],ymm1[5],ymm0[5],ymm1[6],ymm0[6],ymm1[7],ymm0[7],ymm1[12],ymm0[12],ymm1[13],ymm0[13],ymm1[14],ymm0[14],ymm1[15],ymm0[15]
 ; AVX512F-FAST-NEXT:    vmovdqa (%r9), %ymm0
 ; AVX512F-FAST-NEXT:    vmovdqa (%r8), %ymm1
-; AVX512F-FAST-NEXT:    vpunpcklwd {{.*#+}} ymm6 = ymm1[0],ymm0[0],ymm1[1],ymm0[1],ymm1[2],ymm0[2],ymm1[3],ymm0[3],ymm1[8],ymm0[8],ymm1[9],ymm0[9],ymm1[10],ymm0[10],ymm1[11],ymm0[11]
-; AVX512F-FAST-NEXT:    vpunpckhwd {{.*#+}} ymm7 = ymm1[4],ymm0[4],ymm1[5],ymm0[5],ymm1[6],ymm0[6],ymm1[7],ymm0[7],ymm1[12],ymm0[12],ymm1[13],ymm0[13],ymm1[14],ymm0[14],ymm1[15],ymm0[15]
+; AVX512F-FAST-NEXT:    vpunpcklwd {{.*#+}} ymm5 = ymm1[0],ymm0[0],ymm1[1],ymm0[1],ymm1[2],ymm0[2],ymm1[3],ymm0[3],ymm1[8],ymm0[8],ymm1[9],ymm0[9],ymm1[10],ymm0[10],ymm1[11],ymm0[11]
+; AVX512F-FAST-NEXT:    vpunpckhwd {{.*#+}} ymm6 = ymm1[4],ymm0[4],ymm1[5],ymm0[5],ymm1[6],ymm0[6],ymm1[7],ymm0[7],ymm1[12],ymm0[12],ymm1[13],ymm0[13],ymm1[14],ymm0[14],ymm1[15],ymm0[15]
 ; AVX512F-FAST-NEXT:    vmovdqa (%rcx), %ymm0
 ; AVX512F-FAST-NEXT:    vmovdqa (%rdx), %ymm1
-; AVX512F-FAST-NEXT:    vpunpcklwd {{.*#+}} ymm5 = ymm1[0],ymm0[0],ymm1[1],ymm0[1],ymm1[2],ymm0[2],ymm1[3],ymm0[3],ymm1[8],ymm0[8],ymm1[9],ymm0[9],ymm1[10],ymm0[10],ymm1[11],ymm0[11]
-; AVX512F-FAST-NEXT:    vpunpckhwd {{.*#+}} ymm4 = ymm1[4],ymm0[4],ymm1[5],ymm0[5],ymm1[6],ymm0[6],ymm1[7],ymm0[7],ymm1[12],ymm0[12],ymm1[13],ymm0[13],ymm1[14],ymm0[14],ymm1[15],ymm0[15]
-; AVX512F-FAST-NEXT:    vmovdqa (%rsi), %ymm0
-; AVX512F-FAST-NEXT:    vmovdqa (%rdi), %ymm2
-; AVX512F-FAST-NEXT:    vpunpcklwd {{.*#+}} ymm3 = ymm2[0],ymm0[0],ymm2[1],ymm0[1],ymm2[2],ymm0[2],ymm2[3],ymm0[3],ymm2[8],ymm0[8],ymm2[9],ymm0[9],ymm2[10],ymm0[10],ymm2[11],ymm0[11]
-; AVX512F-FAST-NEXT:    vpunpckhwd {{.*#+}} ymm2 = ymm2[4],ymm0[4],ymm2[5],ymm0[5],ymm2[6],ymm0[6],ymm2[7],ymm0[7],ymm2[12],ymm0[12],ymm2[13],ymm0[13],ymm2[14],ymm0[14],ymm2[15],ymm0[15]
-; AVX512F-FAST-NEXT:    vmovdqa64 %xmm26, %xmm0
-; AVX512F-FAST-NEXT:    vmovdqa64 %xmm27, %xmm1
-; AVX512F-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm1 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
-; AVX512F-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm0, %zmm0, %zmm26
-; AVX512F-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm0, %zmm0, %zmm27
-; AVX512F-FAST-NEXT:    vinserti32x4 $2, %xmm11, %zmm11, %zmm30
+; AVX512F-FAST-NEXT:    vpunpcklwd {{.*#+}} ymm4 = ymm1[0],ymm0[0],ymm1[1],ymm0[1],ymm1[2],ymm0[2],ymm1[3],ymm0[3],ymm1[8],ymm0[8],ymm1[9],ymm0[9],ymm1[10],ymm0[10],ymm1[11],ymm0[11]
+; AVX512F-FAST-NEXT:    vpunpckhwd {{.*#+}} ymm3 = ymm1[4],ymm0[4],ymm1[5],ymm0[5],ymm1[6],ymm0[6],ymm1[7],ymm0[7],ymm1[12],ymm0[12],ymm1[13],ymm0[13],ymm1[14],ymm0[14],ymm1[15],ymm0[15]
+; AVX512F-FAST-NEXT:    vmovdqa (%rsi), %ymm1
+; AVX512F-FAST-NEXT:    vmovdqa (%rdi), %ymm0
+; AVX512F-FAST-NEXT:    vpunpcklwd {{.*#+}} ymm2 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[8],ymm1[8],ymm0[9],ymm1[9],ymm0[10],ymm1[10],ymm0[11],ymm1[11]
+; AVX512F-FAST-NEXT:    vpunpckhwd {{.*#+}} ymm1 = ymm0[4],ymm1[4],ymm0[5],ymm1[5],ymm0[6],ymm1[6],ymm0[7],ymm1[7],ymm0[12],ymm1[12],ymm0[13],ymm1[13],ymm0[14],ymm1[14],ymm0[15],ymm1[15]
+; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm11, %zmm11, %zmm11
+; AVX512F-FAST-NEXT:    vinserti32x4 $2, %xmm12, %zmm12, %zmm27
 ; AVX512F-FAST-NEXT:    vinserti32x4 $2, %xmm15, %zmm15, %zmm15
 ; AVX512F-FAST-NEXT:    vinserti32x4 $2, %xmm9, %zmm9, %zmm28
-; AVX512F-FAST-NEXT:    vinserti32x4 $2, %xmm8, %zmm8, %zmm29
-; AVX512F-FAST-NEXT:    vmovdqa64 %xmm31, %xmm0
-; AVX512F-FAST-NEXT:    vpunpcklwd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm9 # 16-byte Folded Reload
-; AVX512F-FAST-NEXT:    # xmm9 = xmm0[0],mem[0],xmm0[1],mem[1],xmm0[2],mem[2],xmm0[3],mem[3]
-; AVX512F-FAST-NEXT:    vmovdqa64 %xmm20, %xmm0
-; AVX512F-FAST-NEXT:    vmovdqa64 %xmm21, %xmm8
-; AVX512F-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm8 = xmm0[0],xmm8[0],xmm0[1],xmm8[1],xmm0[2],xmm8[2],xmm0[3],xmm8[3]
-; AVX512F-FAST-NEXT:    vmovdqa64 %xmm18, %xmm0
-; AVX512F-FAST-NEXT:    vmovdqa64 %xmm19, %xmm11
-; AVX512F-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm11 = xmm0[0],xmm11[0],xmm0[1],xmm11[1],xmm0[2],xmm11[2],xmm0[3],xmm11[3]
+; AVX512F-FAST-NEXT:    vinserti32x4 $2, %xmm7, %zmm7, %zmm29
+; AVX512F-FAST-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
+; AVX512F-FAST-NEXT:    vpunpcklwd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm7 # 16-byte Folded Reload
+; AVX512F-FAST-NEXT:    # xmm7 = xmm0[0],mem[0],xmm0[1],mem[1],xmm0[2],mem[2],xmm0[3],mem[3]
+; AVX512F-FAST-NEXT:    vmovdqa64 %xmm30, %xmm0
+; AVX512F-FAST-NEXT:    vmovdqa64 %xmm31, %xmm9
+; AVX512F-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm9 = xmm0[0],xmm9[0],xmm0[1],xmm9[1],xmm0[2],xmm9[2],xmm0[3],xmm9[3]
+; AVX512F-FAST-NEXT:    vmovdqa64 %xmm21, %xmm0
+; AVX512F-FAST-NEXT:    vmovdqa64 %xmm22, %xmm12
+; AVX512F-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm0 = xmm0[0],xmm12[0],xmm0[1],xmm12[1],xmm0[2],xmm12[2],xmm0[3],xmm12[3]
+; AVX512F-FAST-NEXT:    vmovdqa64 %xmm19, %xmm12
+; AVX512F-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm12 = xmm12[0],xmm10[0],xmm12[1],xmm10[1],xmm12[2],xmm10[2],xmm12[3],xmm10[3]
 ; AVX512F-FAST-NEXT:    movw $-30584, %ax # imm = 0x8888
 ; AVX512F-FAST-NEXT:    kmovw %eax, %k1
-; AVX512F-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm31 = [0,1,4,5,4,5,5,7,10,9,14,13,14,13,15,15]
-; AVX512F-FAST-NEXT:    vpermd %zmm25, %zmm31, %zmm25
-; AVX512F-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm0 = [0,0,4,4,4,4,6,5,8,10,12,14,12,14,14,15]
-; AVX512F-FAST-NEXT:    vpermd %zmm24, %zmm0, %zmm25 {%k1}
-; AVX512F-FAST-NEXT:    vpermd %zmm17, %zmm31, %zmm17
-; AVX512F-FAST-NEXT:    vpermd %zmm16, %zmm0, %zmm17 {%k1}
-; AVX512F-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm13 # 64-byte Reload
-; AVX512F-FAST-NEXT:    vinserti32x4 $2, %xmm13, %zmm13, %zmm16
-; AVX512F-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm13 # 64-byte Reload
-; AVX512F-FAST-NEXT:    vinserti32x4 $2, %xmm13, %zmm13, %zmm24
-; AVX512F-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm13 # 64-byte Reload
+; AVX512F-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm30 = [0,1,4,5,4,5,5,7,10,9,14,13,14,13,15,15]
+; AVX512F-FAST-NEXT:    vpermd %zmm26, %zmm30, %zmm26
+; AVX512F-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm31 = [0,0,4,4,4,4,6,5,8,10,12,14,12,14,14,15]
+; AVX512F-FAST-NEXT:    vpermd %zmm25, %zmm31, %zmm26 {%k1}
+; AVX512F-FAST-NEXT:    vpermd %zmm18, %zmm30, %zmm18
+; AVX512F-FAST-NEXT:    vpermd %zmm17, %zmm31, %zmm18 {%k1}
+; AVX512F-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm10 # 64-byte Reload
+; AVX512F-FAST-NEXT:    vinserti32x4 $2, %xmm10, %zmm10, %zmm17
+; AVX512F-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm10 # 64-byte Reload
+; AVX512F-FAST-NEXT:    vinserti32x4 $2, %xmm10, %zmm10, %zmm25
+; AVX512F-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm10 # 64-byte Reload
+; AVX512F-FAST-NEXT:    vinserti32x4 $2, %xmm10, %zmm10, %zmm10
 ; AVX512F-FAST-NEXT:    vinserti32x4 $2, %xmm13, %zmm13, %zmm13
-; AVX512F-FAST-NEXT:    vinserti32x4 $2, %xmm14, %zmm14, %zmm14
-; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm12, %zmm12, %zmm12
-; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm6, %zmm6, %zmm6
+; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm14, %zmm14, %zmm14
 ; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm5, %zmm5, %zmm5
-; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm3, %zmm3, %zmm3
-; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm10, %zmm10, %zmm10
-; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm7, %zmm7, %zmm7
 ; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm4, %zmm4, %zmm4
 ; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm2, %zmm2, %zmm2
-; AVX512F-FAST-NEXT:    vinserti32x4 $2, %xmm1, %zmm1, %zmm1
+; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm8, %zmm8, %zmm8
+; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm6, %zmm6, %zmm6
+; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm3, %zmm3, %zmm3
+; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm1, %zmm1, %zmm1
+; AVX512F-FAST-NEXT:    vinserti32x4 $2, %xmm7, %zmm7, %zmm7
 ; AVX512F-FAST-NEXT:    vinserti32x4 $2, %xmm9, %zmm9, %zmm9
-; AVX512F-FAST-NEXT:    vinserti32x4 $2, %xmm8, %zmm8, %zmm8
-; AVX512F-FAST-NEXT:    vinserti32x4 $2, %xmm11, %zmm11, %zmm11
-; AVX512F-FAST-NEXT:    vpermd %zmm6, %zmm31, %zmm6
-; AVX512F-FAST-NEXT:    vpermd %zmm12, %zmm0, %zmm6 {%k1}
-; AVX512F-FAST-NEXT:    vpermd %zmm7, %zmm31, %zmm7
+; AVX512F-FAST-NEXT:    vinserti32x4 $2, %xmm0, %zmm0, %zmm0
+; AVX512F-FAST-NEXT:    vinserti32x4 $2, %xmm12, %zmm12, %zmm12
+; AVX512F-FAST-NEXT:    vpermd %zmm5, %zmm30, %zmm5
+; AVX512F-FAST-NEXT:    vpermd %zmm14, %zmm31, %zmm5 {%k1}
+; AVX512F-FAST-NEXT:    vpermd %zmm6, %zmm30, %zmm6
 ; AVX512F-FAST-NEXT:    movw $8738, %ax # imm = 0x2222
 ; AVX512F-FAST-NEXT:    kmovw %eax, %k2
-; AVX512F-FAST-NEXT:    vpermd %zmm10, %zmm0, %zmm7 {%k1}
-; AVX512F-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm0 = [4,5,1,3,5,7,5,7,14,13,11,11,15,15,15,15]
-; AVX512F-FAST-NEXT:    vpermd %zmm23, %zmm0, %zmm10
-; AVX512F-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm12 = [4,4,2,1,6,5,6,5,12,14,10,11,14,15,14,15]
-; AVX512F-FAST-NEXT:    vpermd %zmm22, %zmm12, %zmm10 {%k2}
-; AVX512F-FAST-NEXT:    vpermd %zmm27, %zmm0, %zmm22
-; AVX512F-FAST-NEXT:    vpermd %zmm26, %zmm12, %zmm22 {%k2}
-; AVX512F-FAST-NEXT:    vpermd %zmm3, %zmm0, %zmm3
-; AVX512F-FAST-NEXT:    vpermd %zmm5, %zmm12, %zmm3 {%k2}
-; AVX512F-FAST-NEXT:    vpermd %zmm2, %zmm0, %zmm0
-; AVX512F-FAST-NEXT:    vpermd %zmm4, %zmm12, %zmm0 {%k2}
-; AVX512F-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm2 = <0,u,0,u,u,u,1,u,10,10,10,10,u,u,11,11>
-; AVX512F-FAST-NEXT:    vpermd {{[-0-9]+}}(%r{{[sb]}}p), %zmm2, %zmm4 # 64-byte Folded Reload
-; AVX512F-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm5 = <0,0,0,0,u,u,1,1,10,10,10,10,u,u,11,11>
-; AVX512F-FAST-NEXT:    vpermd {{[-0-9]+}}(%r{{[sb]}}p), %zmm5, %zmm4 {%k1} # 64-byte Folded Reload
-; AVX512F-FAST-NEXT:    vpermd %zmm15, %zmm2, %zmm12
-; AVX512F-FAST-NEXT:    vpermd %zmm30, %zmm5, %zmm12 {%k1}
-; AVX512F-FAST-NEXT:    vpermd %zmm24, %zmm2, %zmm15
-; AVX512F-FAST-NEXT:    vpermd %zmm16, %zmm5, %zmm15 {%k1}
-; AVX512F-FAST-NEXT:    vpermd %zmm9, %zmm2, %zmm2
-; AVX512F-FAST-NEXT:    vpermd %zmm1, %zmm5, %zmm2 {%k1}
-; AVX512F-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm1 = <0,u,1,u,1,u,u,u,10,u,11,u,11,u,u,u>
-; AVX512F-FAST-NEXT:    vpermd {{[-0-9]+}}(%r{{[sb]}}p), %zmm1, %zmm1 # 64-byte Folded Reload
-; AVX512F-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm5 = <0,0,1,1,1,1,u,u,10,10,11,11,11,11,u,u>
-; AVX512F-FAST-NEXT:    vpermd {{[-0-9]+}}(%r{{[sb]}}p), %zmm5, %zmm1 {%k2} # 64-byte Folded Reload
-; AVX512F-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm9 = <0,u,1,u,1,u,u,u,10,10,11,11,11,11,u,u>
-; AVX512F-FAST-NEXT:    vpermd %zmm29, %zmm9, %zmm16
-; AVX512F-FAST-NEXT:    vpermd %zmm28, %zmm5, %zmm16 {%k2}
-; AVX512F-FAST-NEXT:    vpermd %zmm14, %zmm9, %zmm14
-; AVX512F-FAST-NEXT:    vpermd %zmm13, %zmm5, %zmm14 {%k2}
-; AVX512F-FAST-NEXT:    vpermd %zmm11, %zmm9, %zmm9
-; AVX512F-FAST-NEXT:    vpermd %zmm8, %zmm5, %zmm9 {%k2}
+; AVX512F-FAST-NEXT:    vpermd %zmm8, %zmm31, %zmm6 {%k1}
+; AVX512F-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm8 = [4,5,1,3,5,7,5,7,14,13,11,11,15,15,15,15]
+; AVX512F-FAST-NEXT:    vpermd %zmm24, %zmm8, %zmm14
+; AVX512F-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm24 = [4,4,2,1,6,5,6,5,12,14,10,11,14,15,14,15]
+; AVX512F-FAST-NEXT:    vpermd %zmm23, %zmm24, %zmm14 {%k2}
+; AVX512F-FAST-NEXT:    vpermd %zmm11, %zmm8, %zmm11
+; AVX512F-FAST-NEXT:    vpermd %zmm16, %zmm24, %zmm11 {%k2}
+; AVX512F-FAST-NEXT:    vpermd %zmm2, %zmm8, %zmm2
+; AVX512F-FAST-NEXT:    vpermd %zmm4, %zmm24, %zmm2 {%k2}
+; AVX512F-FAST-NEXT:    vpermd %zmm1, %zmm8, %zmm1
+; AVX512F-FAST-NEXT:    vpermd %zmm3, %zmm24, %zmm1 {%k2}
+; AVX512F-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm3 = <0,u,0,u,u,u,1,u,10,10,10,10,u,u,11,11>
+; AVX512F-FAST-NEXT:    vpermd {{[-0-9]+}}(%r{{[sb]}}p), %zmm3, %zmm4 # 64-byte Folded Reload
+; AVX512F-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm8 = <0,0,0,0,u,u,1,1,10,10,10,10,u,u,11,11>
+; AVX512F-FAST-NEXT:    vpermd {{[-0-9]+}}(%r{{[sb]}}p), %zmm8, %zmm4 {%k1} # 64-byte Folded Reload
+; AVX512F-FAST-NEXT:    vpermd %zmm15, %zmm3, %zmm15
+; AVX512F-FAST-NEXT:    vpermd %zmm27, %zmm8, %zmm15 {%k1}
+; AVX512F-FAST-NEXT:    vpermd %zmm25, %zmm3, %zmm16
+; AVX512F-FAST-NEXT:    vpermd %zmm17, %zmm8, %zmm16 {%k1}
+; AVX512F-FAST-NEXT:    vpermd %zmm9, %zmm3, %zmm3
+; AVX512F-FAST-NEXT:    vpermd %zmm7, %zmm8, %zmm3 {%k1}
+; AVX512F-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm7 = <0,u,1,u,1,u,u,u,10,10,11,11,11,11,u,u>
+; AVX512F-FAST-NEXT:    vpermd {{[-0-9]+}}(%r{{[sb]}}p), %zmm7, %zmm8 # 64-byte Folded Reload
+; AVX512F-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm9 = <0,0,1,1,1,1,u,u,10,10,11,11,11,11,u,u>
+; AVX512F-FAST-NEXT:    vpermd {{[-0-9]+}}(%r{{[sb]}}p), %zmm9, %zmm8 {%k2} # 64-byte Folded Reload
+; AVX512F-FAST-NEXT:    vpermd %zmm29, %zmm7, %zmm17
+; AVX512F-FAST-NEXT:    vpermd %zmm28, %zmm9, %zmm17 {%k2}
+; AVX512F-FAST-NEXT:    vpermd %zmm13, %zmm7, %zmm13
+; AVX512F-FAST-NEXT:    vpermd %zmm10, %zmm9, %zmm13 {%k2}
+; AVX512F-FAST-NEXT:    vpermd %zmm12, %zmm7, %zmm7
+; AVX512F-FAST-NEXT:    vpermd %zmm0, %zmm9, %zmm7 {%k2}
 ; AVX512F-FAST-NEXT:    movb $-86, %al
 ; AVX512F-FAST-NEXT:    kmovw %eax, %k1
-; AVX512F-FAST-NEXT:    vmovdqa64 %zmm4, %zmm1 {%k1}
-; AVX512F-FAST-NEXT:    vmovdqa64 %zmm25, %zmm10 {%k1}
-; AVX512F-FAST-NEXT:    vmovdqa64 %zmm17, %zmm22 {%k1}
-; AVX512F-FAST-NEXT:    vmovdqa64 %zmm12, %zmm16 {%k1}
-; AVX512F-FAST-NEXT:    vmovdqa64 %zmm15, %zmm14 {%k1}
-; AVX512F-FAST-NEXT:    vmovdqa64 %zmm6, %zmm3 {%k1}
-; AVX512F-FAST-NEXT:    vmovdqa64 %zmm7, %zmm0 {%k1}
-; AVX512F-FAST-NEXT:    vmovdqa64 %zmm2, %zmm9 {%k1}
+; AVX512F-FAST-NEXT:    vmovdqa64 %zmm4, %zmm8 {%k1}
+; AVX512F-FAST-NEXT:    vmovdqa64 %zmm26, %zmm14 {%k1}
+; AVX512F-FAST-NEXT:    vmovdqa64 %zmm18, %zmm11 {%k1}
+; AVX512F-FAST-NEXT:    vmovdqa64 %zmm15, %zmm17 {%k1}
+; AVX512F-FAST-NEXT:    vmovdqa64 %zmm16, %zmm13 {%k1}
+; AVX512F-FAST-NEXT:    vmovdqa64 %zmm5, %zmm2 {%k1}
+; AVX512F-FAST-NEXT:    vmovdqa64 %zmm6, %zmm1 {%k1}
+; AVX512F-FAST-NEXT:    vmovdqa64 %zmm3, %zmm7 {%k1}
 ; AVX512F-FAST-NEXT:    movq {{[0-9]+}}(%rsp), %rax
-; AVX512F-FAST-NEXT:    vmovdqa64 %zmm9, (%rax)
-; AVX512F-FAST-NEXT:    vmovdqa64 %zmm0, 192(%rax)
-; AVX512F-FAST-NEXT:    vmovdqa64 %zmm3, 128(%rax)
-; AVX512F-FAST-NEXT:    vmovdqa64 %zmm14, 320(%rax)
-; AVX512F-FAST-NEXT:    vmovdqa64 %zmm16, 256(%rax)
-; AVX512F-FAST-NEXT:    vmovdqa64 %zmm22, 448(%rax)
-; AVX512F-FAST-NEXT:    vmovdqa64 %zmm10, 384(%rax)
-; AVX512F-FAST-NEXT:    vmovdqa64 %zmm1, 64(%rax)
-; AVX512F-FAST-NEXT:    addq $472, %rsp # imm = 0x1D8
+; AVX512F-FAST-NEXT:    vmovdqa64 %zmm7, (%rax)
+; AVX512F-FAST-NEXT:    vmovdqa64 %zmm1, 192(%rax)
+; AVX512F-FAST-NEXT:    vmovdqa64 %zmm2, 128(%rax)
+; AVX512F-FAST-NEXT:    vmovdqa64 %zmm13, 320(%rax)
+; AVX512F-FAST-NEXT:    vmovdqa64 %zmm17, 256(%rax)
+; AVX512F-FAST-NEXT:    vmovdqa64 %zmm11, 448(%rax)
+; AVX512F-FAST-NEXT:    vmovdqa64 %zmm14, 384(%rax)
+; AVX512F-FAST-NEXT:    vmovdqa64 %zmm8, 64(%rax)
+; AVX512F-FAST-NEXT:    addq $360, %rsp # imm = 0x168
 ; AVX512F-FAST-NEXT:    vzeroupper
 ; AVX512F-FAST-NEXT:    retq
 ;
@@ -7114,20 +7107,21 @@ define void @store_i16_stride8_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ;
 ; AVX512F-FAST-LABEL: store_i16_stride8_vf64:
 ; AVX512F-FAST:       # %bb.0:
-; AVX512F-FAST-NEXT:    subq $2312, %rsp # imm = 0x908
+; AVX512F-FAST-NEXT:    subq $2376, %rsp # imm = 0x948
 ; AVX512F-FAST-NEXT:    movq {{[0-9]+}}(%rsp), %rax
 ; AVX512F-FAST-NEXT:    movq {{[0-9]+}}(%rsp), %r10
-; AVX512F-FAST-NEXT:    vmovdqa (%r10), %xmm12
+; AVX512F-FAST-NEXT:    vmovdqa (%r10), %xmm2
+; AVX512F-FAST-NEXT:    vmovdqa %xmm2, (%rsp) # 16-byte Spill
 ; AVX512F-FAST-NEXT:    vmovdqa 64(%r10), %xmm0
 ; AVX512F-FAST-NEXT:    vmovdqa (%rax), %xmm1
 ; AVX512F-FAST-NEXT:    vmovdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX512F-FAST-NEXT:    vpunpckhwd {{.*#+}} xmm1 = xmm1[4],xmm12[4],xmm1[5],xmm12[5],xmm1[6],xmm12[6],xmm1[7],xmm12[7]
+; AVX512F-FAST-NEXT:    vpunpckhwd {{.*#+}} xmm1 = xmm1[4],xmm2[4],xmm1[5],xmm2[5],xmm1[6],xmm2[6],xmm1[7],xmm2[7]
 ; AVX512F-FAST-NEXT:    vinserti32x4 $2, %xmm1, %zmm1, %zmm1
 ; AVX512F-FAST-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512F-FAST-NEXT:    vmovdqa (%r9), %xmm2
 ; AVX512F-FAST-NEXT:    vmovdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
 ; AVX512F-FAST-NEXT:    vmovdqa (%r8), %xmm1
-; AVX512F-FAST-NEXT:    vmovdqa %xmm1, (%rsp) # 16-byte Spill
+; AVX512F-FAST-NEXT:    vmovdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
 ; AVX512F-FAST-NEXT:    vpunpckhwd {{.*#+}} xmm1 = xmm1[4],xmm2[4],xmm1[5],xmm2[5],xmm1[6],xmm2[6],xmm1[7],xmm2[7]
 ; AVX512F-FAST-NEXT:    vinserti32x4 $2, %xmm1, %zmm1, %zmm1
 ; AVX512F-FAST-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
@@ -7248,11 +7242,11 @@ define void @store_i16_stride8_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX512F-FAST-NEXT:    vmovdqa 32(%r10), %ymm0
 ; AVX512F-FAST-NEXT:    vmovdqa 32(%rax), %ymm2
 ; AVX512F-FAST-NEXT:    vpunpcklwd {{.*#+}} ymm3 = ymm2[0],ymm0[0],ymm2[1],ymm0[1],ymm2[2],ymm0[2],ymm2[3],ymm0[3],ymm2[8],ymm0[8],ymm2[9],ymm0[9],ymm2[10],ymm0[10],ymm2[11],ymm0[11]
-; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm3, %zmm3, %zmm27
+; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm3, %zmm3, %zmm17
 ; AVX512F-FAST-NEXT:    vmovdqa 32(%r9), %ymm3
 ; AVX512F-FAST-NEXT:    vmovdqa 32(%r8), %ymm4
 ; AVX512F-FAST-NEXT:    vpunpcklwd {{.*#+}} ymm5 = ymm4[0],ymm3[0],ymm4[1],ymm3[1],ymm4[2],ymm3[2],ymm4[3],ymm3[3],ymm4[8],ymm3[8],ymm4[9],ymm3[9],ymm4[10],ymm3[10],ymm4[11],ymm3[11]
-; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm5, %zmm5, %zmm17
+; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm5, %zmm5, %zmm15
 ; AVX512F-FAST-NEXT:    vmovdqa 32(%rcx), %ymm5
 ; AVX512F-FAST-NEXT:    vmovdqa 32(%rdx), %ymm6
 ; AVX512F-FAST-NEXT:    vpunpcklwd {{.*#+}} ymm7 = ymm6[0],ymm5[0],ymm6[1],ymm5[1],ymm6[2],ymm5[2],ymm6[3],ymm5[3],ymm6[8],ymm5[8],ymm6[9],ymm5[9],ymm6[10],ymm5[10],ymm6[11],ymm5[11]
@@ -7265,74 +7259,75 @@ define void @store_i16_stride8_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX512F-FAST-NEXT:    vpunpckhwd {{.*#+}} ymm3 = ymm4[4],ymm3[4],ymm4[5],ymm3[5],ymm4[6],ymm3[6],ymm4[7],ymm3[7],ymm4[12],ymm3[12],ymm4[13],ymm3[13],ymm4[14],ymm3[14],ymm4[15],ymm3[15]
 ; AVX512F-FAST-NEXT:    vpunpckhwd {{.*#+}} ymm0 = ymm6[4],ymm5[4],ymm6[5],ymm5[5],ymm6[6],ymm5[6],ymm6[7],ymm5[7],ymm6[12],ymm5[12],ymm6[13],ymm5[13],ymm6[14],ymm5[14],ymm6[15],ymm5[15]
 ; AVX512F-FAST-NEXT:    vpunpckhwd {{.*#+}} ymm2 = ymm8[4],ymm7[4],ymm8[5],ymm7[5],ymm8[6],ymm7[6],ymm8[7],ymm7[7],ymm8[12],ymm7[12],ymm8[13],ymm7[13],ymm8[14],ymm7[14],ymm8[15],ymm7[15]
-; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm9, %zmm9, %zmm15
-; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm3, %zmm3, %zmm13
+; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm9, %zmm9, %zmm13
+; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm3, %zmm3, %zmm7
 ; AVX512F-FAST-NEXT:    vmovdqa 32(%r10), %xmm3
 ; AVX512F-FAST-NEXT:    vmovdqa 32(%rax), %xmm4
 ; AVX512F-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm5 = xmm4[0],xmm3[0],xmm4[1],xmm3[1],xmm4[2],xmm3[2],xmm4[3],xmm3[3]
 ; AVX512F-FAST-NEXT:    vpunpckhwd {{.*#+}} xmm3 = xmm4[4],xmm3[4],xmm4[5],xmm3[5],xmm4[6],xmm3[6],xmm4[7],xmm3[7]
-; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm0, %zmm0, %zmm21
+; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm0, %zmm0, %zmm22
 ; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm2, %zmm2, %zmm9
 ; AVX512F-FAST-NEXT:    vmovdqa 32(%r9), %xmm0
 ; AVX512F-FAST-NEXT:    vmovdqa 32(%r8), %xmm2
 ; AVX512F-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm4 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3]
 ; AVX512F-FAST-NEXT:    vpunpckhwd {{.*#+}} xmm0 = xmm2[4],xmm0[4],xmm2[5],xmm0[5],xmm2[6],xmm0[6],xmm2[7],xmm0[7]
-; AVX512F-FAST-NEXT:    vinserti32x4 $2, %xmm5, %zmm5, %zmm26
-; AVX512F-FAST-NEXT:    vinserti32x4 $2, %xmm4, %zmm4, %zmm23
+; AVX512F-FAST-NEXT:    vinserti32x4 $2, %xmm5, %zmm5, %zmm27
+; AVX512F-FAST-NEXT:    vinserti32x4 $2, %xmm4, %zmm4, %zmm26
 ; AVX512F-FAST-NEXT:    vmovdqa 32(%rcx), %xmm2
 ; AVX512F-FAST-NEXT:    vmovdqa 32(%rdx), %xmm4
 ; AVX512F-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm1 = xmm4[0],xmm2[0],xmm4[1],xmm2[1],xmm4[2],xmm2[2],xmm4[3],xmm2[3]
 ; AVX512F-FAST-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512F-FAST-NEXT:    vpunpckhwd {{.*#+}} xmm1 = xmm4[4],xmm2[4],xmm4[5],xmm2[5],xmm4[6],xmm2[6],xmm4[7],xmm2[7]
 ; AVX512F-FAST-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-FAST-NEXT:    vinserti32x4 $2, %xmm3, %zmm3, %zmm22
-; AVX512F-FAST-NEXT:    vinserti32x4 $2, %xmm0, %zmm0, %zmm20
+; AVX512F-FAST-NEXT:    vinserti32x4 $2, %xmm3, %zmm3, %zmm23
+; AVX512F-FAST-NEXT:    vinserti32x4 $2, %xmm0, %zmm0, %zmm21
 ; AVX512F-FAST-NEXT:    vmovdqa 32(%rsi), %xmm0
 ; AVX512F-FAST-NEXT:    vmovdqa 32(%rdi), %xmm2
 ; AVX512F-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm1 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3]
 ; AVX512F-FAST-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512F-FAST-NEXT:    vpunpckhwd {{.*#+}} xmm0 = xmm2[4],xmm0[4],xmm2[5],xmm0[5],xmm2[6],xmm0[6],xmm2[7],xmm0[7]
 ; AVX512F-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-FAST-NEXT:    vmovdqa (%r10), %ymm6
+; AVX512F-FAST-NEXT:    vmovdqa (%r10), %ymm1
 ; AVX512F-FAST-NEXT:    vmovdqa (%rax), %ymm3
-; AVX512F-FAST-NEXT:    vpunpcklwd {{.*#+}} ymm2 = ymm3[0],ymm6[0],ymm3[1],ymm6[1],ymm3[2],ymm6[2],ymm3[3],ymm6[3],ymm3[8],ymm6[8],ymm3[9],ymm6[9],ymm3[10],ymm6[10],ymm3[11],ymm6[11]
-; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm2, %zmm2, %zmm7
+; AVX512F-FAST-NEXT:    vpunpcklwd {{.*#+}} ymm2 = ymm3[0],ymm1[0],ymm3[1],ymm1[1],ymm3[2],ymm1[2],ymm3[3],ymm1[3],ymm3[8],ymm1[8],ymm3[9],ymm1[9],ymm3[10],ymm1[10],ymm3[11],ymm1[11]
+; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm2, %zmm2, %zmm6
 ; AVX512F-FAST-NEXT:    vmovdqa (%r9), %ymm5
 ; AVX512F-FAST-NEXT:    vmovdqa (%r8), %ymm10
 ; AVX512F-FAST-NEXT:    vpunpcklwd {{.*#+}} ymm4 = ymm10[0],ymm5[0],ymm10[1],ymm5[1],ymm10[2],ymm5[2],ymm10[3],ymm5[3],ymm10[8],ymm5[8],ymm10[9],ymm5[9],ymm10[10],ymm5[10],ymm10[11],ymm5[11]
 ; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm4, %zmm4, %zmm19
 ; AVX512F-FAST-NEXT:    vmovdqa (%rcx), %ymm11
-; AVX512F-FAST-NEXT:    vmovdqa (%rdx), %ymm14
-; AVX512F-FAST-NEXT:    vpunpcklwd {{.*#+}} ymm4 = ymm14[0],ymm11[0],ymm14[1],ymm11[1],ymm14[2],ymm11[2],ymm14[3],ymm11[3],ymm14[8],ymm11[8],ymm14[9],ymm11[9],ymm14[10],ymm11[10],ymm14[11],ymm11[11]
-; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm4, %zmm4, %zmm16
-; AVX512F-FAST-NEXT:    vmovdqa (%rsi), %ymm1
+; AVX512F-FAST-NEXT:    vmovdqa (%rdx), %ymm12
+; AVX512F-FAST-NEXT:    vpunpcklwd {{.*#+}} ymm4 = ymm12[0],ymm11[0],ymm12[1],ymm11[1],ymm12[2],ymm11[2],ymm12[3],ymm11[3],ymm12[8],ymm11[8],ymm12[9],ymm11[9],ymm12[10],ymm11[10],ymm12[11],ymm11[11]
+; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm4, %zmm4, %zmm20
+; AVX512F-FAST-NEXT:    vmovdqa (%rsi), %ymm14
 ; AVX512F-FAST-NEXT:    vmovdqa (%rdi), %ymm0
-; AVX512F-FAST-NEXT:    vpunpcklwd {{.*#+}} ymm4 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[8],ymm1[8],ymm0[9],ymm1[9],ymm0[10],ymm1[10],ymm0[11],ymm1[11]
-; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm4, %zmm4, %zmm4
-; AVX512F-FAST-NEXT:    vpunpckhwd {{.*#+}} ymm3 = ymm3[4],ymm6[4],ymm3[5],ymm6[5],ymm3[6],ymm6[6],ymm3[7],ymm6[7],ymm3[12],ymm6[12],ymm3[13],ymm6[13],ymm3[14],ymm6[14],ymm3[15],ymm6[15]
-; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm3, %zmm3, %zmm6
-; AVX512F-FAST-NEXT:    vpunpckhwd {{.*#+}} ymm3 = ymm10[4],ymm5[4],ymm10[5],ymm5[5],ymm10[6],ymm5[6],ymm10[7],ymm5[7],ymm10[12],ymm5[12],ymm10[13],ymm5[13],ymm10[14],ymm5[14],ymm10[15],ymm5[15]
-; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm3, %zmm3, %zmm2
-; AVX512F-FAST-NEXT:    vpunpckhwd {{.*#+}} ymm3 = ymm14[4],ymm11[4],ymm14[5],ymm11[5],ymm14[6],ymm11[6],ymm14[7],ymm11[7],ymm14[12],ymm11[12],ymm14[13],ymm11[13],ymm14[14],ymm11[14],ymm14[15],ymm11[15]
-; AVX512F-FAST-NEXT:    vpunpckhwd {{.*#+}} ymm0 = ymm0[4],ymm1[4],ymm0[5],ymm1[5],ymm0[6],ymm1[6],ymm0[7],ymm1[7],ymm0[12],ymm1[12],ymm0[13],ymm1[13],ymm0[14],ymm1[14],ymm0[15],ymm1[15]
-; AVX512F-FAST-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
-; AVX512F-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm1 = xmm1[0],xmm12[0],xmm1[1],xmm12[1],xmm1[2],xmm12[2],xmm1[3],xmm12[3]
-; AVX512F-FAST-NEXT:    vmovdqa (%rsp), %xmm5 # 16-byte Reload
+; AVX512F-FAST-NEXT:    vpunpcklwd {{.*#+}} ymm4 = ymm0[0],ymm14[0],ymm0[1],ymm14[1],ymm0[2],ymm14[2],ymm0[3],ymm14[3],ymm0[8],ymm14[8],ymm0[9],ymm14[9],ymm0[10],ymm14[10],ymm0[11],ymm14[11]
+; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm4, %zmm4, %zmm16
+; AVX512F-FAST-NEXT:    vpunpckhwd {{.*#+}} ymm1 = ymm3[4],ymm1[4],ymm3[5],ymm1[5],ymm3[6],ymm1[6],ymm3[7],ymm1[7],ymm3[12],ymm1[12],ymm3[13],ymm1[13],ymm3[14],ymm1[14],ymm3[15],ymm1[15]
+; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm1, %zmm1, %zmm4
+; AVX512F-FAST-NEXT:    vpunpckhwd {{.*#+}} ymm1 = ymm10[4],ymm5[4],ymm10[5],ymm5[5],ymm10[6],ymm5[6],ymm10[7],ymm5[7],ymm10[12],ymm5[12],ymm10[13],ymm5[13],ymm10[14],ymm5[14],ymm10[15],ymm5[15]
+; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm1, %zmm1, %zmm2
+; AVX512F-FAST-NEXT:    vpunpckhwd {{.*#+}} ymm1 = ymm12[4],ymm11[4],ymm12[5],ymm11[5],ymm12[6],ymm11[6],ymm12[7],ymm11[7],ymm12[12],ymm11[12],ymm12[13],ymm11[13],ymm12[14],ymm11[14],ymm12[15],ymm11[15]
+; AVX512F-FAST-NEXT:    vpunpckhwd {{.*#+}} ymm0 = ymm0[4],ymm14[4],ymm0[5],ymm14[5],ymm0[6],ymm14[6],ymm0[7],ymm14[7],ymm0[12],ymm14[12],ymm0[13],ymm14[13],ymm0[14],ymm14[14],ymm0[15],ymm14[15]
+; AVX512F-FAST-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload
+; AVX512F-FAST-NEXT:    vpunpcklwd (%rsp), %xmm3, %xmm3 # 16-byte Folded Reload
+; AVX512F-FAST-NEXT:    # xmm3 = xmm3[0],mem[0],xmm3[1],mem[1],xmm3[2],mem[2],xmm3[3],mem[3]
+; AVX512F-FAST-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Reload
 ; AVX512F-FAST-NEXT:    vpunpcklwd {{[-0-9]+}}(%r{{[sb]}}p), %xmm5, %xmm5 # 16-byte Folded Reload
 ; AVX512F-FAST-NEXT:    # xmm5 = xmm5[0],mem[0],xmm5[1],mem[1],xmm5[2],mem[2],xmm5[3],mem[3]
-; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm3, %zmm3, %zmm10
+; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm1, %zmm1, %zmm11
 ; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm0, %zmm0, %zmm8
 ; AVX512F-FAST-NEXT:    vmovdqa (%rcx), %xmm0
-; AVX512F-FAST-NEXT:    vmovdqa (%rdx), %xmm3
-; AVX512F-FAST-NEXT:    vpunpckhwd {{.*#+}} xmm11 = xmm3[4],xmm0[4],xmm3[5],xmm0[5],xmm3[6],xmm0[6],xmm3[7],xmm0[7]
-; AVX512F-FAST-NEXT:    vmovdqu64 %zmm11, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm0 = xmm3[0],xmm0[0],xmm3[1],xmm0[1],xmm3[2],xmm0[2],xmm3[3],xmm0[3]
+; AVX512F-FAST-NEXT:    vmovdqa (%rdx), %xmm1
+; AVX512F-FAST-NEXT:    vpunpckhwd {{.*#+}} xmm10 = xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
+; AVX512F-FAST-NEXT:    vmovdqu64 %zmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
 ; AVX512F-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-FAST-NEXT:    vinserti32x4 $2, %xmm1, %zmm1, %zmm14
+; AVX512F-FAST-NEXT:    vinserti32x4 $2, %xmm3, %zmm3, %zmm14
 ; AVX512F-FAST-NEXT:    vinserti32x4 $2, %xmm5, %zmm5, %zmm12
 ; AVX512F-FAST-NEXT:    vmovdqa (%rsi), %xmm0
 ; AVX512F-FAST-NEXT:    vmovdqa (%rdi), %xmm1
-; AVX512F-FAST-NEXT:    vpunpckhwd {{.*#+}} xmm11 = xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
+; AVX512F-FAST-NEXT:    vpunpckhwd {{.*#+}} xmm5 = xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
 ; AVX512F-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
 ; AVX512F-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512F-FAST-NEXT:    movw $-30584, %ax # imm = 0x8888
@@ -7348,16 +7343,16 @@ define void @store_i16_stride8_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX512F-FAST-NEXT:    vpermd %zmm25, %zmm1, %zmm24 {%k2}
 ; AVX512F-FAST-NEXT:    vpermd %zmm18, %zmm0, %zmm25
 ; AVX512F-FAST-NEXT:    vpermd %zmm30, %zmm1, %zmm25 {%k2}
-; AVX512F-FAST-NEXT:    vpermd %zmm17, %zmm0, %zmm5
-; AVX512F-FAST-NEXT:    vpermd %zmm27, %zmm1, %zmm5 {%k2}
-; AVX512F-FAST-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-FAST-NEXT:    vpermd %zmm13, %zmm0, %zmm27
-; AVX512F-FAST-NEXT:    vpermd %zmm15, %zmm1, %zmm27 {%k2}
-; AVX512F-FAST-NEXT:    vpermd %zmm19, %zmm0, %zmm5
-; AVX512F-FAST-NEXT:    vpermd %zmm7, %zmm1, %zmm5 {%k2}
-; AVX512F-FAST-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-FAST-NEXT:    vpermd %zmm15, %zmm0, %zmm10
+; AVX512F-FAST-NEXT:    vpermd %zmm17, %zmm1, %zmm10 {%k2}
+; AVX512F-FAST-NEXT:    vmovdqu64 %zmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-FAST-NEXT:    vpermd %zmm7, %zmm0, %zmm30
+; AVX512F-FAST-NEXT:    vpermd %zmm13, %zmm1, %zmm30 {%k2}
+; AVX512F-FAST-NEXT:    vpermd %zmm19, %zmm0, %zmm7
+; AVX512F-FAST-NEXT:    vpermd %zmm6, %zmm1, %zmm7 {%k2}
+; AVX512F-FAST-NEXT:    vmovdqu64 %zmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512F-FAST-NEXT:    vpermd %zmm2, %zmm0, %zmm19
-; AVX512F-FAST-NEXT:    vpermd %zmm6, %zmm1, %zmm19 {%k2}
+; AVX512F-FAST-NEXT:    vpermd %zmm4, %zmm1, %zmm19 {%k2}
 ; AVX512F-FAST-NEXT:    movw $8738, %ax # imm = 0x2222
 ; AVX512F-FAST-NEXT:    kmovw %eax, %k1
 ; AVX512F-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm0 = [4,5,1,3,5,7,5,7,14,13,11,11,15,15,15,15]
@@ -7373,118 +7368,117 @@ define void @store_i16_stride8_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX512F-FAST-NEXT:    vpermd %zmm28, %zmm0, %zmm17
 ; AVX512F-FAST-NEXT:    vpermd %zmm29, %zmm1, %zmm17 {%k1}
 ; AVX512F-FAST-NEXT:    vpermd %zmm9, %zmm0, %zmm9
-; AVX512F-FAST-NEXT:    vpermd %zmm21, %zmm1, %zmm9 {%k1}
-; AVX512F-FAST-NEXT:    vpermd %zmm4, %zmm0, %zmm4
-; AVX512F-FAST-NEXT:    vpermd %zmm16, %zmm1, %zmm4 {%k1}
+; AVX512F-FAST-NEXT:    vpermd %zmm22, %zmm1, %zmm9 {%k1}
+; AVX512F-FAST-NEXT:    vpermd %zmm16, %zmm0, %zmm4
+; AVX512F-FAST-NEXT:    vpermd %zmm20, %zmm1, %zmm4 {%k1}
 ; AVX512F-FAST-NEXT:    vpermd %zmm8, %zmm0, %zmm8
-; AVX512F-FAST-NEXT:    vpermd %zmm10, %zmm1, %zmm8 {%k1}
+; AVX512F-FAST-NEXT:    vpermd %zmm11, %zmm1, %zmm8 {%k1}
 ; AVX512F-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm0 = <0,u,0,u,u,u,1,u,10,10,10,10,u,u,11,11>
 ; AVX512F-FAST-NEXT:    vpermd {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm1 # 64-byte Folded Reload
-; AVX512F-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm10 = <0,0,0,0,u,u,1,1,10,10,10,10,u,u,11,11>
-; AVX512F-FAST-NEXT:    vpermd {{[-0-9]+}}(%r{{[sb]}}p), %zmm10, %zmm1 {%k2} # 64-byte Folded Reload
+; AVX512F-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm11 = <0,0,0,0,u,u,1,1,10,10,10,10,u,u,11,11>
+; AVX512F-FAST-NEXT:    vpermd {{[-0-9]+}}(%r{{[sb]}}p), %zmm11, %zmm1 {%k2} # 64-byte Folded Reload
 ; AVX512F-FAST-NEXT:    vpermd {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm16 # 64-byte Folded Reload
-; AVX512F-FAST-NEXT:    vpermd {{[-0-9]+}}(%r{{[sb]}}p), %zmm10, %zmm16 {%k2} # 64-byte Folded Reload
-; AVX512F-FAST-NEXT:    vpermd {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm21 # 64-byte Folded Reload
-; AVX512F-FAST-NEXT:    vpermd {{[-0-9]+}}(%r{{[sb]}}p), %zmm10, %zmm21 {%k2} # 64-byte Folded Reload
+; AVX512F-FAST-NEXT:    vpermd {{[-0-9]+}}(%r{{[sb]}}p), %zmm11, %zmm16 {%k2} # 64-byte Folded Reload
+; AVX512F-FAST-NEXT:    vpermd {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm20 # 64-byte Folded Reload
+; AVX512F-FAST-NEXT:    vpermd {{[-0-9]+}}(%r{{[sb]}}p), %zmm11, %zmm20 {%k2} # 64-byte Folded Reload
+; AVX512F-FAST-NEXT:    vpermd {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm22 # 64-byte Folded Reload
+; AVX512F-FAST-NEXT:    vpermd {{[-0-9]+}}(%r{{[sb]}}p), %zmm11, %zmm22 {%k2} # 64-byte Folded Reload
 ; AVX512F-FAST-NEXT:    vpermd {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm28 # 64-byte Folded Reload
-; AVX512F-FAST-NEXT:    vpermd {{[-0-9]+}}(%r{{[sb]}}p), %zmm10, %zmm28 {%k2} # 64-byte Folded Reload
-; AVX512F-FAST-NEXT:    vpermd {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm29 # 64-byte Folded Reload
-; AVX512F-FAST-NEXT:    vpermd {{[-0-9]+}}(%r{{[sb]}}p), %zmm10, %zmm29 {%k2} # 64-byte Folded Reload
-; AVX512F-FAST-NEXT:    vpermd %zmm23, %zmm0, %zmm23
-; AVX512F-FAST-NEXT:    vpermd %zmm26, %zmm10, %zmm23 {%k2}
-; AVX512F-FAST-NEXT:    vpermd %zmm20, %zmm0, %zmm20
-; AVX512F-FAST-NEXT:    vpermd %zmm22, %zmm10, %zmm20 {%k2}
+; AVX512F-FAST-NEXT:    vpermd {{[-0-9]+}}(%r{{[sb]}}p), %zmm11, %zmm28 {%k2} # 64-byte Folded Reload
+; AVX512F-FAST-NEXT:    vpermd %zmm26, %zmm0, %zmm26
+; AVX512F-FAST-NEXT:    vpermd %zmm27, %zmm11, %zmm26 {%k2}
+; AVX512F-FAST-NEXT:    vpermd %zmm21, %zmm0, %zmm21
+; AVX512F-FAST-NEXT:    vpermd %zmm23, %zmm11, %zmm21 {%k2}
 ; AVX512F-FAST-NEXT:    vpermd %zmm12, %zmm0, %zmm0
-; AVX512F-FAST-NEXT:    vpermd %zmm14, %zmm10, %zmm0 {%k2}
-; AVX512F-FAST-NEXT:    vinserti32x4 $2, %xmm11, %zmm11, %zmm10
-; AVX512F-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm11 = <0,u,1,u,1,u,u,u,10,u,11,u,11,u,u,u>
-; AVX512F-FAST-NEXT:    vpermd %zmm10, %zmm11, %zmm12
-; AVX512F-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512F-FAST-NEXT:    vinserti32x4 $2, %xmm2, %zmm2, %zmm10
-; AVX512F-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm11 = <0,0,1,1,1,1,u,u,10,10,11,11,11,11,u,u>
-; AVX512F-FAST-NEXT:    vpermd %zmm10, %zmm11, %zmm12 {%k1}
-; AVX512F-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
-; AVX512F-FAST-NEXT:    vinserti32x4 $2, %xmm5, %zmm5, %zmm10
-; AVX512F-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
-; AVX512F-FAST-NEXT:    vinserti32x4 $2, %xmm5, %zmm5, %zmm14
-; AVX512F-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm22 = <0,u,1,u,1,u,u,u,10,10,11,11,11,11,u,u>
-; AVX512F-FAST-NEXT:    vpermd %zmm14, %zmm22, %zmm14
-; AVX512F-FAST-NEXT:    vpermd %zmm10, %zmm11, %zmm14 {%k1}
-; AVX512F-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
-; AVX512F-FAST-NEXT:    vinserti32x4 $2, %xmm5, %zmm5, %zmm10
-; AVX512F-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
-; AVX512F-FAST-NEXT:    vinserti32x4 $2, %xmm5, %zmm5, %zmm26
-; AVX512F-FAST-NEXT:    vpermd %zmm26, %zmm22, %zmm26
-; AVX512F-FAST-NEXT:    vpermd %zmm10, %zmm11, %zmm26 {%k1}
-; AVX512F-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
-; AVX512F-FAST-NEXT:    vinserti32x4 $2, %xmm5, %zmm5, %zmm10
-; AVX512F-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
-; AVX512F-FAST-NEXT:    vinserti32x4 $2, %xmm5, %zmm5, %zmm30
-; AVX512F-FAST-NEXT:    vpermd %zmm30, %zmm22, %zmm30
-; AVX512F-FAST-NEXT:    vpermd %zmm10, %zmm11, %zmm30 {%k1}
-; AVX512F-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
-; AVX512F-FAST-NEXT:    vinserti32x4 $2, %xmm5, %zmm5, %zmm10
-; AVX512F-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
-; AVX512F-FAST-NEXT:    vinserti32x4 $2, %xmm5, %zmm5, %zmm31
-; AVX512F-FAST-NEXT:    vpermd %zmm31, %zmm22, %zmm31
-; AVX512F-FAST-NEXT:    vpermd %zmm10, %zmm11, %zmm31 {%k1}
-; AVX512F-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
-; AVX512F-FAST-NEXT:    vinserti32x4 $2, %xmm5, %zmm5, %zmm10
+; AVX512F-FAST-NEXT:    vpermd %zmm14, %zmm11, %zmm0 {%k2}
 ; AVX512F-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512F-FAST-NEXT:    vinserti32x4 $2, %xmm2, %zmm2, %zmm5
-; AVX512F-FAST-NEXT:    vpermd %zmm5, %zmm22, %zmm5
-; AVX512F-FAST-NEXT:    vpermd %zmm10, %zmm11, %zmm5 {%k1}
+; AVX512F-FAST-NEXT:    vinserti32x4 $2, %xmm2, %zmm2, %zmm11
+; AVX512F-FAST-NEXT:    vinserti32x4 $2, %xmm5, %zmm5, %zmm5
+; AVX512F-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm12 = <0,u,1,u,1,u,u,u,10,10,11,11,11,11,u,u>
+; AVX512F-FAST-NEXT:    vpermd %zmm5, %zmm12, %zmm5
+; AVX512F-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm14 = <0,0,1,1,1,1,u,u,10,10,11,11,11,11,u,u>
+; AVX512F-FAST-NEXT:    vpermd %zmm11, %zmm14, %zmm5 {%k1}
 ; AVX512F-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm10 # 64-byte Reload
-; AVX512F-FAST-NEXT:    vinserti32x4 $2, %xmm10, %zmm10, %zmm10
+; AVX512F-FAST-NEXT:    vinserti32x4 $2, %xmm10, %zmm10, %zmm11
+; AVX512F-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm10 # 64-byte Reload
+; AVX512F-FAST-NEXT:    vinserti32x4 $2, %xmm10, %zmm10, %zmm23
+; AVX512F-FAST-NEXT:    vpermd %zmm23, %zmm12, %zmm23
+; AVX512F-FAST-NEXT:    vpermd %zmm11, %zmm14, %zmm23 {%k1}
+; AVX512F-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm10 # 64-byte Reload
+; AVX512F-FAST-NEXT:    vinserti32x4 $2, %xmm10, %zmm10, %zmm11
+; AVX512F-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm10 # 64-byte Reload
+; AVX512F-FAST-NEXT:    vinserti32x4 $2, %xmm10, %zmm10, %zmm27
+; AVX512F-FAST-NEXT:    vpermd %zmm27, %zmm12, %zmm27
+; AVX512F-FAST-NEXT:    vpermd %zmm11, %zmm14, %zmm27 {%k1}
+; AVX512F-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm10 # 64-byte Reload
+; AVX512F-FAST-NEXT:    vinserti32x4 $2, %xmm10, %zmm10, %zmm11
+; AVX512F-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm10 # 64-byte Reload
+; AVX512F-FAST-NEXT:    vinserti32x4 $2, %xmm10, %zmm10, %zmm29
+; AVX512F-FAST-NEXT:    vpermd %zmm29, %zmm12, %zmm29
+; AVX512F-FAST-NEXT:    vpermd %zmm11, %zmm14, %zmm29 {%k1}
+; AVX512F-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm10 # 64-byte Reload
+; AVX512F-FAST-NEXT:    vinserti32x4 $2, %xmm10, %zmm10, %zmm11
+; AVX512F-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm10 # 64-byte Reload
+; AVX512F-FAST-NEXT:    vinserti32x4 $2, %xmm10, %zmm10, %zmm31
+; AVX512F-FAST-NEXT:    vpermd %zmm31, %zmm12, %zmm31
+; AVX512F-FAST-NEXT:    vpermd %zmm11, %zmm14, %zmm31 {%k1}
+; AVX512F-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm10 # 64-byte Reload
+; AVX512F-FAST-NEXT:    vinserti32x4 $2, %xmm10, %zmm10, %zmm11
+; AVX512F-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
+; AVX512F-FAST-NEXT:    vinserti32x4 $2, %xmm2, %zmm2, %zmm10
+; AVX512F-FAST-NEXT:    vpermd %zmm10, %zmm12, %zmm10
+; AVX512F-FAST-NEXT:    vpermd %zmm11, %zmm14, %zmm10 {%k1}
+; AVX512F-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm11 # 64-byte Reload
+; AVX512F-FAST-NEXT:    vinserti32x4 $2, %xmm11, %zmm11, %zmm11
 ; AVX512F-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
 ; AVX512F-FAST-NEXT:    vinserti32x4 $2, %xmm2, %zmm2, %zmm18
-; AVX512F-FAST-NEXT:    vpermd %zmm18, %zmm22, %zmm18
-; AVX512F-FAST-NEXT:    vpermd %zmm10, %zmm11, %zmm18 {%k1}
+; AVX512F-FAST-NEXT:    vpermd %zmm18, %zmm12, %zmm18
+; AVX512F-FAST-NEXT:    vpermd %zmm11, %zmm14, %zmm18 {%k1}
 ; AVX512F-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512F-FAST-NEXT:    vinserti32x4 $2, %xmm2, %zmm2, %zmm10
-; AVX512F-FAST-NEXT:    vpermd %zmm10, %zmm22, %zmm10
+; AVX512F-FAST-NEXT:    vinserti32x4 $2, %xmm2, %zmm2, %zmm11
+; AVX512F-FAST-NEXT:    vpermd %zmm11, %zmm12, %zmm11
 ; AVX512F-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512F-FAST-NEXT:    vinserti32x4 $2, %xmm2, %zmm2, %zmm22
-; AVX512F-FAST-NEXT:    vpermd %zmm22, %zmm11, %zmm10 {%k1}
+; AVX512F-FAST-NEXT:    vinserti32x4 $2, %xmm2, %zmm2, %zmm12
+; AVX512F-FAST-NEXT:    vpermd %zmm12, %zmm14, %zmm11 {%k1}
 ; AVX512F-FAST-NEXT:    movb $-86, %al
 ; AVX512F-FAST-NEXT:    kmovw %eax, %k1
-; AVX512F-FAST-NEXT:    vmovdqa64 %zmm1, %zmm12 {%k1}
+; AVX512F-FAST-NEXT:    vmovdqa64 %zmm1, %zmm5 {%k1}
 ; AVX512F-FAST-NEXT:    vmovdqu64 (%rsp), %zmm1 # 64-byte Reload
 ; AVX512F-FAST-NEXT:    vmovdqa64 %zmm1, %zmm6 {%k1}
 ; AVX512F-FAST-NEXT:    vmovdqa64 %zmm3, %zmm7 {%k1}
-; AVX512F-FAST-NEXT:    vmovdqa64 %zmm16, %zmm14 {%k1}
-; AVX512F-FAST-NEXT:    vmovdqa64 %zmm21, %zmm26 {%k1}
+; AVX512F-FAST-NEXT:    vmovdqa64 %zmm16, %zmm23 {%k1}
+; AVX512F-FAST-NEXT:    vmovdqa64 %zmm20, %zmm27 {%k1}
 ; AVX512F-FAST-NEXT:    vmovdqa64 %zmm24, %zmm13 {%k1}
 ; AVX512F-FAST-NEXT:    vmovdqa64 %zmm25, %zmm15 {%k1}
-; AVX512F-FAST-NEXT:    vmovdqa64 %zmm28, %zmm30 {%k1}
-; AVX512F-FAST-NEXT:    vmovdqa64 %zmm29, %zmm31 {%k1}
+; AVX512F-FAST-NEXT:    vmovdqa64 %zmm22, %zmm29 {%k1}
+; AVX512F-FAST-NEXT:    vmovdqa64 %zmm28, %zmm31 {%k1}
 ; AVX512F-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
 ; AVX512F-FAST-NEXT:    vmovdqa64 %zmm1, %zmm17 {%k1}
-; AVX512F-FAST-NEXT:    vmovdqa64 %zmm27, %zmm9 {%k1}
-; AVX512F-FAST-NEXT:    vmovdqa64 %zmm23, %zmm5 {%k1}
-; AVX512F-FAST-NEXT:    vmovdqa64 %zmm20, %zmm18 {%k1}
+; AVX512F-FAST-NEXT:    vmovdqa64 %zmm30, %zmm9 {%k1}
+; AVX512F-FAST-NEXT:    vmovdqa64 %zmm26, %zmm10 {%k1}
+; AVX512F-FAST-NEXT:    vmovdqa64 %zmm21, %zmm18 {%k1}
 ; AVX512F-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
 ; AVX512F-FAST-NEXT:    vmovdqa64 %zmm1, %zmm4 {%k1}
 ; AVX512F-FAST-NEXT:    vmovdqa64 %zmm19, %zmm8 {%k1}
-; AVX512F-FAST-NEXT:    vmovdqa64 %zmm0, %zmm10 {%k1}
+; AVX512F-FAST-NEXT:    vmovdqa64 %zmm0, %zmm11 {%k1}
 ; AVX512F-FAST-NEXT:    movq {{[0-9]+}}(%rsp), %rax
-; AVX512F-FAST-NEXT:    vmovdqa64 %zmm10, (%rax)
+; AVX512F-FAST-NEXT:    vmovdqa64 %zmm11, (%rax)
 ; AVX512F-FAST-NEXT:    vmovdqa64 %zmm8, 192(%rax)
 ; AVX512F-FAST-NEXT:    vmovdqa64 %zmm4, 128(%rax)
 ; AVX512F-FAST-NEXT:    vmovdqa64 %zmm18, 320(%rax)
-; AVX512F-FAST-NEXT:    vmovdqa64 %zmm5, 256(%rax)
+; AVX512F-FAST-NEXT:    vmovdqa64 %zmm10, 256(%rax)
 ; AVX512F-FAST-NEXT:    vmovdqa64 %zmm9, 448(%rax)
 ; AVX512F-FAST-NEXT:    vmovdqa64 %zmm17, 384(%rax)
 ; AVX512F-FAST-NEXT:    vmovdqa64 %zmm31, 576(%rax)
-; AVX512F-FAST-NEXT:    vmovdqa64 %zmm30, 512(%rax)
+; AVX512F-FAST-NEXT:    vmovdqa64 %zmm29, 512(%rax)
 ; AVX512F-FAST-NEXT:    vmovdqa64 %zmm15, 704(%rax)
 ; AVX512F-FAST-NEXT:    vmovdqa64 %zmm13, 640(%rax)
-; AVX512F-FAST-NEXT:    vmovdqa64 %zmm26, 832(%rax)
-; AVX512F-FAST-NEXT:    vmovdqa64 %zmm14, 768(%rax)
+; AVX512F-FAST-NEXT:    vmovdqa64 %zmm27, 832(%rax)
+; AVX512F-FAST-NEXT:    vmovdqa64 %zmm23, 768(%rax)
 ; AVX512F-FAST-NEXT:    vmovdqa64 %zmm7, 960(%rax)
 ; AVX512F-FAST-NEXT:    vmovdqa64 %zmm6, 896(%rax)
-; AVX512F-FAST-NEXT:    vmovdqa64 %zmm12, 64(%rax)
-; AVX512F-FAST-NEXT:    addq $2312, %rsp # imm = 0x908
+; AVX512F-FAST-NEXT:    vmovdqa64 %zmm5, 64(%rax)
+; AVX512F-FAST-NEXT:    addq $2376, %rsp # imm = 0x948
 ; AVX512F-FAST-NEXT:    vzeroupper
 ; AVX512F-FAST-NEXT:    retq
 ;
diff --git a/llvm/test/CodeGen/X86/vector-shuffle-256-v16.ll b/llvm/test/CodeGen/X86/vector-shuffle-256-v16.ll
index f73081cfc404feb..7927a0e2aa549ce 100644
--- a/llvm/test/CodeGen/X86/vector-shuffle-256-v16.ll
+++ b/llvm/test/CodeGen/X86/vector-shuffle-256-v16.ll
@@ -4353,8 +4353,8 @@ define <16 x i16> @shuffle_v16i16_06_06_07_05_01_06_04_11_14_14_15_13_09_14_12_1
 ;
 ; AVX2-SLOW-LABEL: shuffle_v16i16_06_06_07_05_01_06_04_11_14_14_15_13_09_14_12_11:
 ; AVX2-SLOW:       # %bb.0:
-; AVX2-SLOW-NEXT:    vmovdqa {{.*#+}} ymm1 = [0,5,2,3,4,5,6,7]
-; AVX2-SLOW-NEXT:    vpermd %ymm0, %ymm1, %ymm0
+; AVX2-SLOW-NEXT:    vpermq {{.*#+}} ymm1 = ymm0[2,3,2,3]
+; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0],ymm1[1],ymm0[2,3,4],ymm1[5],ymm0[6,7]
 ; AVX2-SLOW-NEXT:    vpshufb {{.*#+}} ymm0 = ymm0[12,13,12,13,14,15,10,11,2,3,12,13,8,9,6,7,28,29,28,29,30,31,26,27,18,19,28,29,24,25,22,23]
 ; AVX2-SLOW-NEXT:    retq
 ;
@@ -4367,8 +4367,8 @@ define <16 x i16> @shuffle_v16i16_06_06_07_05_01_06_04_11_14_14_15_13_09_14_12_1
 ;
 ; AVX2-FAST-PERLANE-LABEL: shuffle_v16i16_06_06_07_05_01_06_04_11_14_14_15_13_09_14_12_11:
 ; AVX2-FAST-PERLANE:       # %bb.0:
-; AVX2-FAST-PERLANE-NEXT:    vmovdqa {{.*#+}} ymm1 = [0,5,2,3,4,5,6,7]
-; AVX2-FAST-PERLANE-NEXT:    vpermd %ymm0, %ymm1, %ymm0
+; AVX2-FAST-PERLANE-NEXT:    vpermq {{.*#+}} ymm1 = ymm0[2,3,2,3]
+; AVX2-FAST-PERLANE-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0],ymm1[1],ymm0[2,3,4],ymm1[5],ymm0[6,7]
 ; AVX2-FAST-PERLANE-NEXT:    vpshufb {{.*#+}} ymm0 = ymm0[12,13,12,13,14,15,10,11,2,3,12,13,8,9,6,7,28,29,28,29,30,31,26,27,18,19,28,29,24,25,22,23]
 ; AVX2-FAST-PERLANE-NEXT:    retq
 ;
@@ -4388,8 +4388,8 @@ define <16 x i16> @shuffle_v16i16_06_06_07_05_01_06_04_11_14_14_15_13_09_14_12_1
 ;
 ; XOPAVX2-LABEL: shuffle_v16i16_06_06_07_05_01_06_04_11_14_14_15_13_09_14_12_11:
 ; XOPAVX2:       # %bb.0:
-; XOPAVX2-NEXT:    vmovdqa {{.*#+}} ymm1 = [0,5,2,3,4,5,6,7]
-; XOPAVX2-NEXT:    vpermd %ymm0, %ymm1, %ymm0
+; XOPAVX2-NEXT:    vpermq {{.*#+}} ymm1 = ymm0[2,3,2,3]
+; XOPAVX2-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0],ymm1[1],ymm0[2,3,4],ymm1[5],ymm0[6,7]
 ; XOPAVX2-NEXT:    vpshufb {{.*#+}} ymm0 = ymm0[12,13,12,13,14,15,10,11,2,3,12,13,8,9,6,7,28,29,28,29,30,31,26,27,18,19,28,29,24,25,22,23]
 ; XOPAVX2-NEXT:    retq
   %shuffle = shufflevector <16 x i16> %a, <16 x i16> %b, <16 x i32> <i32 6, i32 6, i32 7, i32 5, i32 1, i32 6, i32 4, i32 11, i32 14, i32 14, i32 15, i32 13, i32 9, i32 14, i32 12, i32 11>
@@ -5099,8 +5099,8 @@ define <16 x i16> @shuffle_v16i16_03_07_01_00_02_07_03_13_11_15_09_08_10_15_11_1
 ;
 ; AVX2-SLOW-LABEL: shuffle_v16i16_03_07_01_00_02_07_03_13_11_15_09_08_10_15_11_13:
 ; AVX2-SLOW:       # %bb.0:
-; AVX2-SLOW-NEXT:    vmovdqa {{.*#+}} ymm1 = [0,1,6,3,4,5,6,7]
-; AVX2-SLOW-NEXT:    vpermd %ymm0, %ymm1, %ymm0
+; AVX2-SLOW-NEXT:    vpermq {{.*#+}} ymm1 = ymm0[2,3,2,3]
+; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1],ymm1[2],ymm0[3,4,5],ymm1[6],ymm0[7]
 ; AVX2-SLOW-NEXT:    vpshufb {{.*#+}} ymm0 = ymm0[6,7,14,15,2,3,0,1,4,5,14,15,6,7,10,11,22,23,30,31,18,19,16,17,20,21,30,31,22,23,26,27]
 ; AVX2-SLOW-NEXT:    retq
 ;
@@ -5113,8 +5113,8 @@ define <16 x i16> @shuffle_v16i16_03_07_01_00_02_07_03_13_11_15_09_08_10_15_11_1
 ;
 ; AVX2-FAST-PERLANE-LABEL: shuffle_v16i16_03_07_01_00_02_07_03_13_11_15_09_08_10_15_11_13:
 ; AVX2-FAST-PERLANE:       # %bb.0:
-; AVX2-FAST-PERLANE-NEXT:    vmovdqa {{.*#+}} ymm1 = [0,1,6,3,4,5,6,7]
-; AVX2-FAST-PERLANE-NEXT:    vpermd %ymm0, %ymm1, %ymm0
+; AVX2-FAST-PERLANE-NEXT:    vpermq {{.*#+}} ymm1 = ymm0[2,3,2,3]
+; AVX2-FAST-PERLANE-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1],ymm1[2],ymm0[3,4,5],ymm1[6],ymm0[7]
 ; AVX2-FAST-PERLANE-NEXT:    vpshufb {{.*#+}} ymm0 = ymm0[6,7,14,15,2,3,0,1,4,5,14,15,6,7,10,11,22,23,30,31,18,19,16,17,20,21,30,31,22,23,26,27]
 ; AVX2-FAST-PERLANE-NEXT:    retq
 ;
@@ -5134,8 +5134,8 @@ define <16 x i16> @shuffle_v16i16_03_07_01_00_02_07_03_13_11_15_09_08_10_15_11_1
 ;
 ; XOPAVX2-LABEL: shuffle_v16i16_03_07_01_00_02_07_03_13_11_15_09_08_10_15_11_13:
 ; XOPAVX2:       # %bb.0:
-; XOPAVX2-NEXT:    vmovdqa {{.*#+}} ymm1 = [0,1,6,3,4,5,6,7]
-; XOPAVX2-NEXT:    vpermd %ymm0, %ymm1, %ymm0
+; XOPAVX2-NEXT:    vpermq {{.*#+}} ymm1 = ymm0[2,3,2,3]
+; XOPAVX2-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1],ymm1[2],ymm0[3,4,5],ymm1[6],ymm0[7]
 ; XOPAVX2-NEXT:    vpshufb {{.*#+}} ymm0 = ymm0[6,7,14,15,2,3,0,1,4,5,14,15,6,7,10,11,22,23,30,31,18,19,16,17,20,21,30,31,22,23,26,27]
 ; XOPAVX2-NEXT:    retq
   %shuffle = shufflevector <16 x i16> %a, <16 x i16> %b, <16 x i32> <i32 3, i32 7, i32 1, i32 0, i32 2, i32 7, i32 3, i32 13, i32 11, i32 15, i32 9, i32 8, i32 10, i32 15, i32 11, i32 13>
@@ -6153,9 +6153,9 @@ define <16 x i16> @shuffle_v16i16_21_22_23_00_01_02_03_12_29_30_31_08_09_10_11_1
 ;
 ; AVX2-LABEL: shuffle_v16i16_21_22_23_00_01_02_03_12_29_30_31_08_09_10_11_12:
 ; AVX2:       # %bb.0:
-; AVX2-NEXT:    vpblendw {{.*#+}} ymm1 = ymm0[0,1,2,3,4],ymm1[5,6,7],ymm0[8,9,10,11,12],ymm1[13,14,15]
-; AVX2-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[2,3,2,3]
-; AVX2-NEXT:    vpblendw {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4],ymm1[5,6,7,8,9,10,11],ymm0[12],ymm1[13,14,15]
+; AVX2-NEXT:    vpblendw {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm1[5,6,7],ymm0[8,9,10,11,12],ymm1[13,14,15]
+; AVX2-NEXT:    vpermq {{.*#+}} ymm1 = ymm0[2,3,2,3]
+; AVX2-NEXT:    vpblendw {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4],ymm0[5,6,7,8,9,10,11],ymm1[12],ymm0[13,14,15]
 ; AVX2-NEXT:    vpalignr {{.*#+}} ymm0 = ymm0[10,11,12,13,14,15,0,1,2,3,4,5,6,7,8,9,26,27,28,29,30,31,16,17,18,19,20,21,22,23,24,25]
 ; AVX2-NEXT:    retq
 ;
@@ -6177,9 +6177,9 @@ define <16 x i16> @shuffle_v16i16_21_22_23_00_01_02_03_12_29_30_31_08_09_10_11_1
 ;
 ; XOPAVX2-LABEL: shuffle_v16i16_21_22_23_00_01_02_03_12_29_30_31_08_09_10_11_12:
 ; XOPAVX2:       # %bb.0:
-; XOPAVX2-NEXT:    vpblendw {{.*#+}} ymm1 = ymm0[0,1,2,3,4],ymm1[5,6,7],ymm0[8,9,10,11,12],ymm1[13,14,15]
-; XOPAVX2-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[2,3,2,3]
-; XOPAVX2-NEXT:    vpblendw {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4],ymm1[5,6,7,8,9,10,11],ymm0[12],ymm1[13,14,15]
+; XOPAVX2-NEXT:    vpblendw {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm1[5,6,7],ymm0[8,9,10,11,12],ymm1[13,14,15]
+; XOPAVX2-NEXT:    vpermq {{.*#+}} ymm1 = ymm0[2,3,2,3]
+; XOPAVX2-NEXT:    vpblendw {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4],ymm0[5,6,7,8,9,10,11],ymm1[12],ymm0[13,14,15]
 ; XOPAVX2-NEXT:    vpalignr {{.*#+}} ymm0 = ymm0[10,11,12,13,14,15,0,1,2,3,4,5,6,7,8,9,26,27,28,29,30,31,16,17,18,19,20,21,22,23,24,25]
 ; XOPAVX2-NEXT:    retq
   %shuffle = shufflevector <16 x i16> %a, <16 x i16> %b, <16 x i32> <i32 21, i32 22, i32 23, i32 0, i32 1, i32 2, i32 3, i32 12, i32 29, i32 30, i32 31, i32 8, i32 9, i32 10, i32 11, i32 12>
@@ -6335,9 +6335,9 @@ define <16 x i16> @shuffle_v16i16_19_20_21_22_23_00_01_10_27_28_29_30_31_08_09_1
 ;
 ; AVX2-LABEL: shuffle_v16i16_19_20_21_22_23_00_01_10_27_28_29_30_31_08_09_10:
 ; AVX2:       # %bb.0:
-; AVX2-NEXT:    vpblendw {{.*#+}} ymm1 = ymm0[0,1,2],ymm1[3,4,5,6,7],ymm0[8,9,10],ymm1[11,12,13,14,15]
-; AVX2-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[2,3,2,3]
-; AVX2-NEXT:    vpblendw {{.*#+}} ymm0 = ymm1[0,1],ymm0[2],ymm1[3,4,5,6,7,8,9],ymm0[10],ymm1[11,12,13,14,15]
+; AVX2-NEXT:    vpblendw {{.*#+}} ymm0 = ymm0[0,1,2],ymm1[3,4,5,6,7],ymm0[8,9,10],ymm1[11,12,13,14,15]
+; AVX2-NEXT:    vpermq {{.*#+}} ymm1 = ymm0[2,3,2,3]
+; AVX2-NEXT:    vpblendw {{.*#+}} ymm0 = ymm0[0,1],ymm1[2],ymm0[3,4,5,6,7,8,9],ymm1[10],ymm0[11,12,13,14,15]
 ; AVX2-NEXT:    vpalignr {{.*#+}} ymm0 = ymm0[6,7,8,9,10,11,12,13,14,15,0,1,2,3,4,5,22,23,24,25,26,27,28,29,30,31,16,17,18,19,20,21]
 ; AVX2-NEXT:    retq
 ;
@@ -6360,9 +6360,9 @@ define <16 x i16> @shuffle_v16i16_19_20_21_22_23_00_01_10_27_28_29_30_31_08_09_1
 ;
 ; XOPAVX2-LABEL: shuffle_v16i16_19_20_21_22_23_00_01_10_27_28_29_30_31_08_09_10:
 ; XOPAVX2:       # %bb.0:
-; XOPAVX2-NEXT:    vpblendw {{.*#+}} ymm1 = ymm0[0,1,2],ymm1[3,4,5,6,7],ymm0[8,9,10],ymm1[11,12,13,14,15]
-; XOPAVX2-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[2,3,2,3]
-; XOPAVX2-NEXT:    vpblendw {{.*#+}} ymm0 = ymm1[0,1],ymm0[2],ymm1[3,4,5,6,7,8,9],ymm0[10],ymm1[11,12,13,14,15]
+; XOPAVX2-NEXT:    vpblendw {{.*#+}} ymm0 = ymm0[0,1,2],ymm1[3,4,5,6,7],ymm0[8,9,10],ymm1[11,12,13,14,15]
+; XOPAVX2-NEXT:    vpermq {{.*#+}} ymm1 = ymm0[2,3,2,3]
+; XOPAVX2-NEXT:    vpblendw {{.*#+}} ymm0 = ymm0[0,1],ymm1[2],ymm0[3,4,5,6,7,8,9],ymm1[10],ymm0[11,12,13,14,15]
 ; XOPAVX2-NEXT:    vpalignr {{.*#+}} ymm0 = ymm0[6,7,8,9,10,11,12,13,14,15,0,1,2,3,4,5,22,23,24,25,26,27,28,29,30,31,16,17,18,19,20,21]
 ; XOPAVX2-NEXT:    retq
   %shuffle = shufflevector <16 x i16> %a, <16 x i16> %b, <16 x i32> <i32 19, i32 20, i32 21, i32 22, i32 23, i32 0, i32 1, i32 10, i32 27, i32 28, i32 29, i32 30, i32 31, i32 8, i32 9, i32 10>
@@ -6519,7 +6519,7 @@ define <16 x i16> @shuffle_v16i16_03_04_05_06_07_16_17_26_11_12_13_14_15_24_25_2
 ; AVX2-LABEL: shuffle_v16i16_03_04_05_06_07_16_17_26_11_12_13_14_15_24_25_26:
 ; AVX2:       # %bb.0:
 ; AVX2-NEXT:    vpblendw {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3,4,5,6,7],ymm1[8,9,10],ymm0[11,12,13,14,15]
-; AVX2-NEXT:    vpermq {{.*#+}} ymm1 = ymm1[2,3,2,3]
+; AVX2-NEXT:    vpermq {{.*#+}} ymm1 = ymm0[2,3,2,3]
 ; AVX2-NEXT:    vpblendw {{.*#+}} ymm0 = ymm0[0,1],ymm1[2],ymm0[3,4,5,6,7,8,9],ymm1[10],ymm0[11,12,13,14,15]
 ; AVX2-NEXT:    vpalignr {{.*#+}} ymm0 = ymm0[6,7,8,9,10,11,12,13,14,15,0,1,2,3,4,5,22,23,24,25,26,27,28,29,30,31,16,17,18,19,20,21]
 ; AVX2-NEXT:    retq
@@ -6543,7 +6543,7 @@ define <16 x i16> @shuffle_v16i16_03_04_05_06_07_16_17_26_11_12_13_14_15_24_25_2
 ; XOPAVX2-LABEL: shuffle_v16i16_03_04_05_06_07_16_17_26_11_12_13_14_15_24_25_26:
 ; XOPAVX2:       # %bb.0:
 ; XOPAVX2-NEXT:    vpblendw {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3,4,5,6,7],ymm1[8,9,10],ymm0[11,12,13,14,15]
-; XOPAVX2-NEXT:    vpermq {{.*#+}} ymm1 = ymm1[2,3,2,3]
+; XOPAVX2-NEXT:    vpermq {{.*#+}} ymm1 = ymm0[2,3,2,3]
 ; XOPAVX2-NEXT:    vpblendw {{.*#+}} ymm0 = ymm0[0,1],ymm1[2],ymm0[3,4,5,6,7,8,9],ymm1[10],ymm0[11,12,13,14,15]
 ; XOPAVX2-NEXT:    vpalignr {{.*#+}} ymm0 = ymm0[6,7,8,9,10,11,12,13,14,15,0,1,2,3,4,5,22,23,24,25,26,27,28,29,30,31,16,17,18,19,20,21]
 ; XOPAVX2-NEXT:    retq
@@ -6597,7 +6597,7 @@ define <16 x i16> @shuffle_v16i16_05_06_07_16_17_18_19_28_13_14_15_24_25_26_27_2
 ; AVX2-LABEL: shuffle_v16i16_05_06_07_16_17_18_19_28_13_14_15_24_25_26_27_28:
 ; AVX2:       # %bb.0:
 ; AVX2-NEXT:    vpblendw {{.*#+}} ymm0 = ymm1[0,1,2,3,4],ymm0[5,6,7],ymm1[8,9,10,11,12],ymm0[13,14,15]
-; AVX2-NEXT:    vpermq {{.*#+}} ymm1 = ymm1[2,3,2,3]
+; AVX2-NEXT:    vpermq {{.*#+}} ymm1 = ymm0[2,3,2,3]
 ; AVX2-NEXT:    vpblendw {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4],ymm0[5,6,7,8,9,10,11],ymm1[12],ymm0[13,14,15]
 ; AVX2-NEXT:    vpalignr {{.*#+}} ymm0 = ymm0[10,11,12,13,14,15,0,1,2,3,4,5,6,7,8,9,26,27,28,29,30,31,16,17,18,19,20,21,22,23,24,25]
 ; AVX2-NEXT:    retq
@@ -6622,7 +6622,7 @@ define <16 x i16> @shuffle_v16i16_05_06_07_16_17_18_19_28_13_14_15_24_25_26_27_2
 ; XOPAVX2-LABEL: shuffle_v16i16_05_06_07_16_17_18_19_28_13_14_15_24_25_26_27_28:
 ; XOPAVX2:       # %bb.0:
 ; XOPAVX2-NEXT:    vpblendw {{.*#+}} ymm0 = ymm1[0,1,2,3,4],ymm0[5,6,7],ymm1[8,9,10,11,12],ymm0[13,14,15]
-; XOPAVX2-NEXT:    vpermq {{.*#+}} ymm1 = ymm1[2,3,2,3]
+; XOPAVX2-NEXT:    vpermq {{.*#+}} ymm1 = ymm0[2,3,2,3]
 ; XOPAVX2-NEXT:    vpblendw {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4],ymm0[5,6,7,8,9,10,11],ymm1[12],ymm0[13,14,15]
 ; XOPAVX2-NEXT:    vpalignr {{.*#+}} ymm0 = ymm0[10,11,12,13,14,15,0,1,2,3,4,5,6,7,8,9,26,27,28,29,30,31,16,17,18,19,20,21,22,23,24,25]
 ; XOPAVX2-NEXT:    retq
diff --git a/llvm/test/CodeGen/X86/zero_extend_vector_inreg_of_broadcast.ll b/llvm/test/CodeGen/X86/zero_extend_vector_inreg_of_broadcast.ll
index 8f9d2169aa19b95..6f6dfddb64f2d41 100644
--- a/llvm/test/CodeGen/X86/zero_extend_vector_inreg_of_broadcast.ll
+++ b/llvm/test/CodeGen/X86/zero_extend_vector_inreg_of_broadcast.ll
@@ -4765,14 +4765,14 @@ define void @vec384_i32_widen_to_i64_factor2_broadcast_to_v6i64_factor6(ptr %in.
 ; AVX512BW-SLOW-LABEL: vec384_i32_widen_to_i64_factor2_broadcast_to_v6i64_factor6:
 ; AVX512BW-SLOW:       # %bb.0:
 ; AVX512BW-SLOW-NEXT:    vmovdqa64 (%rdi), %zmm0
+; AVX512BW-SLOW-NEXT:    vmovdqa {{.*#+}} ymm1 = [0,29,0,31,0,1,0,1]
 ; AVX512BW-SLOW-NEXT:    vpaddb (%rsi), %zmm0, %zmm0
-; AVX512BW-SLOW-NEXT:    vmovdqa {{.*#+}} xmm1 = [0,13,0,15]
-; AVX512BW-SLOW-NEXT:    vpermd %zmm0, %zmm1, %zmm1
+; AVX512BW-SLOW-NEXT:    vpermi2d %zmm0, %zmm0, %zmm1
+; AVX512BW-SLOW-NEXT:    vpxor %xmm2, %xmm2, %xmm2
+; AVX512BW-SLOW-NEXT:    vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3,4],ymm2[5],ymm1[6],ymm2[7]
 ; AVX512BW-SLOW-NEXT:    vpbroadcastd %xmm0, %xmm0
 ; AVX512BW-SLOW-NEXT:    vpxor %xmm2, %xmm2, %xmm2
 ; AVX512BW-SLOW-NEXT:    vpblendd {{.*#+}} xmm0 = xmm0[0],xmm2[1],xmm0[2],xmm2[3]
-; AVX512BW-SLOW-NEXT:    vpxor %xmm2, %xmm2, %xmm2
-; AVX512BW-SLOW-NEXT:    vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3,4],ymm2[5],ymm1[6],ymm2[7]
 ; AVX512BW-SLOW-NEXT:    vinserti32x4 $2, %xmm0, %zmm1, %zmm0
 ; AVX512BW-SLOW-NEXT:    vpaddb (%rdx), %zmm0, %zmm0
 ; AVX512BW-SLOW-NEXT:    vmovdqa64 %zmm0, (%rcx)
@@ -4782,9 +4782,9 @@ define void @vec384_i32_widen_to_i64_factor2_broadcast_to_v6i64_factor6(ptr %in.
 ; AVX512BW-FAST-LABEL: vec384_i32_widen_to_i64_factor2_broadcast_to_v6i64_factor6:
 ; AVX512BW-FAST:       # %bb.0:
 ; AVX512BW-FAST-NEXT:    vmovdqa64 (%rdi), %zmm0
+; AVX512BW-FAST-NEXT:    vmovdqa {{.*#+}} ymm1 = [0,29,0,31,0,1,0,1]
 ; AVX512BW-FAST-NEXT:    vpaddb (%rsi), %zmm0, %zmm0
-; AVX512BW-FAST-NEXT:    vmovdqa {{.*#+}} xmm1 = [0,13,0,15]
-; AVX512BW-FAST-NEXT:    vpermd %zmm0, %zmm1, %zmm1
+; AVX512BW-FAST-NEXT:    vpermi2d %zmm0, %zmm0, %zmm1
 ; AVX512BW-FAST-NEXT:    vpxor %xmm2, %xmm2, %xmm2
 ; AVX512BW-FAST-NEXT:    vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3,4],ymm2[5],ymm1[6],ymm2[7]
 ; AVX512BW-FAST-NEXT:    vpshufb {{.*#+}} xmm0 = xmm0[0,1,2,3],zero,zero,zero,zero,xmm0[0,1,2,3],zero,zero,zero,zero



More information about the llvm-commits mailing list