[llvm] [DAG] visitFREEZE - revisit frozen node after merging with unfrozen uses (PR #188206)

Simon Pilgrim via llvm-commits llvm-commits at lists.llvm.org
Sat Apr 4 08:55:23 PDT 2026


https://github.com/RKSimon updated https://github.com/llvm/llvm-project/pull/188206

>From fc2da7d59b98af3b70101d2c9e173592b890d4dc Mon Sep 17 00:00:00 2001
From: Simon Pilgrim <llvm-dev at redking.me.uk>
Date: Tue, 24 Mar 2026 09:54:00 +0000
Subject: [PATCH] [DAG] visitFREEZE - revisit frozen node after merging with
 unfrozen uses

After merging other uses of a N to all use FREEZE(N), make sure we revisit FREEZE(N) to attempt to push the FREEZE through the (now hasOneUse()) operand.
---
 llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp |    2 +
 llvm/test/CodeGen/AMDGPU/fptoi.i128.ll        |   28 +-
 llvm/test/CodeGen/AMDGPU/load-constant-i1.ll  |  594 ++-
 llvm/test/CodeGen/AMDGPU/load-constant-i16.ll |  802 ++--
 llvm/test/CodeGen/AMDGPU/load-constant-i8.ll  | 1870 ++++----
 llvm/test/CodeGen/AMDGPU/load-global-i16.ll   | 1082 +++--
 llvm/test/CodeGen/AMDGPU/load-global-i8.ll    | 1636 +++----
 llvm/test/CodeGen/AMDGPU/load-local-i16.ll    |   30 +-
 llvm/test/CodeGen/AMDGPU/srem.ll              |  375 +-
 llvm/test/CodeGen/RISCV/abds.ll               |  248 +-
 llvm/test/CodeGen/RISCV/abdu-neg.ll           |  508 +--
 llvm/test/CodeGen/RISCV/fpclamptosat.ll       |  368 +-
 llvm/test/CodeGen/RISCV/iabs.ll               |   24 +-
 llvm/test/CodeGen/RISCV/idiv_large.ll         |  820 ++--
 llvm/test/CodeGen/RISCV/rvv/setcc-fp-vp.ll    | 1210 ++++--
 llvm/test/CodeGen/RISCV/rvv/vfma-vp.ll        | 3830 ++++++++++-------
 llvm/test/CodeGen/RISCV/rvv/vp-splice.ll      |   78 +-
 llvm/test/CodeGen/X86/bmi-select-distrib.ll   |   36 +-
 .../CodeGen/X86/insertelement-var-index.ll    |   52 +-
 llvm/test/CodeGen/X86/midpoint-int-vec-256.ll |  702 +--
 llvm/test/CodeGen/X86/midpoint-int-vec-512.ll |  472 +-
 llvm/test/CodeGen/X86/vector-fshl-256.ll      |   32 +-
 llvm/test/CodeGen/X86/vector-fshl-512.ll      |   48 +-
 llvm/test/CodeGen/X86/vector-fshl-rot-256.ll  |    6 +-
 llvm/test/CodeGen/X86/vector-fshl-rot-512.ll  |   54 +-
 llvm/test/CodeGen/X86/vector-fshr-128.ll      |  166 +-
 llvm/test/CodeGen/X86/vector-fshr-256.ll      |  132 +-
 llvm/test/CodeGen/X86/vector-fshr-rot-128.ll  |   19 +-
 llvm/test/CodeGen/X86/vector-fshr-rot-256.ll  |    6 +-
 llvm/test/CodeGen/X86/vector-rotate-256.ll    |    6 +-
 llvm/test/CodeGen/X86/vector-rotate-512.ll    |   54 +-
 llvm/test/CodeGen/X86/vector-shift-lut.ll     |  100 +-
 llvm/test/CodeGen/X86/vector-shift-shl-256.ll |   76 +-
 llvm/test/CodeGen/X86/vector-shift-shl-512.ll |   38 +-
 34 files changed, 8547 insertions(+), 6957 deletions(-)

diff --git a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
index 383e45c5ea3a8..9e997089baa36 100644
--- a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
@@ -17573,6 +17573,8 @@ SDValue DAGCombiner::visitFREEZE(SDNode *N) {
     // creating a cycle in a DAG. Let's undo that by mutating the freeze.
     assert(N->getOperand(0) == FrozenN0 && "Expected cycle in DAG");
     DAG.UpdateNodeOperands(N, N0);
+    // Revisit the node.
+    AddToWorklist(N);
     return FrozenN0;
   }
 
diff --git a/llvm/test/CodeGen/AMDGPU/fptoi.i128.ll b/llvm/test/CodeGen/AMDGPU/fptoi.i128.ll
index dbe34d19cf3eb..cef03bb7d1ced 100644
--- a/llvm/test/CodeGen/AMDGPU/fptoi.i128.ll
+++ b/llvm/test/CodeGen/AMDGPU/fptoi.i128.ll
@@ -684,25 +684,15 @@ define i128 @fptoui_f32_to_i128(float %x) {
 }
 
 define i128 @fptosi_f16_to_i128(half %x) {
-; SDAG-LABEL: fptosi_f16_to_i128:
-; SDAG:       ; %bb.0:
-; SDAG-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; SDAG-NEXT:    v_cvt_f32_f16_e32 v0, v0
-; SDAG-NEXT:    v_cvt_i32_f32_e32 v0, v0
-; SDAG-NEXT:    v_ashrrev_i32_e32 v1, 31, v0
-; SDAG-NEXT:    v_ashrrev_i32_e32 v2, 31, v1
-; SDAG-NEXT:    v_mov_b32_e32 v3, v2
-; SDAG-NEXT:    s_setpc_b64 s[30:31]
-;
-; GISEL-LABEL: fptosi_f16_to_i128:
-; GISEL:       ; %bb.0:
-; GISEL-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GISEL-NEXT:    v_cvt_f32_f16_e32 v0, v0
-; GISEL-NEXT:    v_cvt_i32_f32_e32 v0, v0
-; GISEL-NEXT:    v_ashrrev_i32_e32 v1, 31, v0
-; GISEL-NEXT:    v_mov_b32_e32 v2, v1
-; GISEL-NEXT:    v_mov_b32_e32 v3, v1
-; GISEL-NEXT:    s_setpc_b64 s[30:31]
+; GCN-LABEL: fptosi_f16_to_i128:
+; GCN:       ; %bb.0:
+; GCN-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT:    v_cvt_f32_f16_e32 v0, v0
+; GCN-NEXT:    v_cvt_i32_f32_e32 v0, v0
+; GCN-NEXT:    v_ashrrev_i32_e32 v1, 31, v0
+; GCN-NEXT:    v_mov_b32_e32 v2, v1
+; GCN-NEXT:    v_mov_b32_e32 v3, v1
+; GCN-NEXT:    s_setpc_b64 s[30:31]
   %cvt = fptosi half %x to i128
   ret i128 %cvt
 }
diff --git a/llvm/test/CodeGen/AMDGPU/load-constant-i1.ll b/llvm/test/CodeGen/AMDGPU/load-constant-i1.ll
index 4b9fdf3a768dc..fa0e4921343f3 100644
--- a/llvm/test/CodeGen/AMDGPU/load-constant-i1.ll
+++ b/llvm/test/CodeGen/AMDGPU/load-constant-i1.ll
@@ -10142,348 +10142,346 @@ define amdgpu_kernel void @constant_sextload_v64i1_to_v64i64(ptr addrspace(1) %o
 ; GFX6-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x9
 ; GFX6-NEXT:    s_waitcnt lgkmcnt(0)
 ; GFX6-NEXT:    s_load_dwordx2 s[4:5], s[2:3], 0x0
-; GFX6-NEXT:    s_mov_b32 s3, 0xf000
 ; GFX6-NEXT:    s_mov_b32 s7, 0
+; GFX6-NEXT:    s_mov_b32 s3, 0xf000
+; GFX6-NEXT:    s_mov_b32 s9, s7
+; GFX6-NEXT:    s_mov_b32 s11, s7
+; GFX6-NEXT:    s_mov_b32 s13, s7
+; GFX6-NEXT:    s_mov_b32 s17, s7
+; GFX6-NEXT:    s_mov_b32 s19, s7
 ; GFX6-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX6-NEXT:    s_ashr_i32 s6, s5, 31
-; GFX6-NEXT:    s_bfe_i64 s[66:67], s[4:5], 0x10000
+; GFX6-NEXT:    s_lshr_b32 s6, s5, 30
+; GFX6-NEXT:    s_lshr_b32 s8, s5, 28
+; GFX6-NEXT:    s_lshr_b32 s10, s5, 29
+; GFX6-NEXT:    s_lshr_b32 s12, s5, 26
+; GFX6-NEXT:    s_lshr_b32 s16, s5, 27
+; GFX6-NEXT:    s_mov_b32 s18, s5
+; GFX6-NEXT:    s_bfe_i64 s[14:15], s[4:5], 0x10000
+; GFX6-NEXT:    s_bfe_i64 s[44:45], s[18:19], 0x10000
+; GFX6-NEXT:    s_ashr_i32 s18, s5, 31
+; GFX6-NEXT:    s_bfe_i64 s[28:29], s[16:17], 0x10000
+; GFX6-NEXT:    s_bfe_i64 s[36:37], s[12:13], 0x10000
+; GFX6-NEXT:    s_bfe_i64 s[38:39], s[10:11], 0x10000
+; GFX6-NEXT:    s_bfe_i64 s[40:41], s[8:9], 0x10000
+; GFX6-NEXT:    s_bfe_i64 s[42:43], s[6:7], 0x10000
 ; GFX6-NEXT:    s_mov_b32 s2, -1
-; GFX6-NEXT:    s_mov_b32 s69, s7
-; GFX6-NEXT:    s_mov_b32 s27, s7
-; GFX6-NEXT:    s_mov_b32 s37, s7
 ; GFX6-NEXT:    s_mov_b32 s31, s7
-; GFX6-NEXT:    s_mov_b32 s29, s7
-; GFX6-NEXT:    s_mov_b32 s23, s7
 ; GFX6-NEXT:    s_mov_b32 s35, s7
 ; GFX6-NEXT:    s_mov_b32 s25, s7
-; GFX6-NEXT:    s_mov_b32 s19, s7
+; GFX6-NEXT:    s_mov_b32 s27, s7
 ; GFX6-NEXT:    s_mov_b32 s21, s7
-; GFX6-NEXT:    s_mov_b32 s9, s7
-; GFX6-NEXT:    s_mov_b32 s11, s7
-; GFX6-NEXT:    s_mov_b32 s13, s7
-; GFX6-NEXT:    s_mov_b32 s15, s7
-; GFX6-NEXT:    s_mov_b32 s17, s7
-; GFX6-NEXT:    s_mov_b32 s39, s7
-; GFX6-NEXT:    s_mov_b32 s41, s7
-; GFX6-NEXT:    s_mov_b32 s43, s7
+; GFX6-NEXT:    s_mov_b32 s23, s7
+; GFX6-NEXT:    v_mov_b32_e32 v4, s18
+; GFX6-NEXT:    v_mov_b32_e32 v0, s44
+; GFX6-NEXT:    v_mov_b32_e32 v1, s45
 ; GFX6-NEXT:    s_mov_b32 s45, s7
+; GFX6-NEXT:    v_mov_b32_e32 v6, s14
+; GFX6-NEXT:    v_mov_b32_e32 v7, s15
 ; GFX6-NEXT:    s_mov_b32 s47, s7
-; GFX6-NEXT:    s_mov_b32 s49, s7
-; GFX6-NEXT:    s_mov_b32 s51, s7
-; GFX6-NEXT:    s_mov_b32 s53, s7
-; GFX6-NEXT:    s_mov_b32 s55, s7
-; GFX6-NEXT:    s_mov_b32 s57, s7
-; GFX6-NEXT:    s_mov_b32 s59, s7
-; GFX6-NEXT:    s_mov_b32 s61, s7
-; GFX6-NEXT:    s_mov_b32 s63, s7
-; GFX6-NEXT:    v_mov_b32_e32 v6, s6
-; GFX6-NEXT:    s_mov_b32 s65, s7
-; GFX6-NEXT:    v_mov_b32_e32 v2, s66
-; GFX6-NEXT:    v_mov_b32_e32 v3, s67
-; GFX6-NEXT:    s_mov_b32 s67, s7
-; GFX6-NEXT:    s_lshr_b32 s6, s5, 30
-; GFX6-NEXT:    s_mov_b32 s68, s5
-; GFX6-NEXT:    s_bfe_i64 s[68:69], s[68:69], 0x10000
-; GFX6-NEXT:    s_bfe_i64 s[6:7], s[6:7], 0x10000
-; GFX6-NEXT:    v_mov_b32_e32 v0, s68
-; GFX6-NEXT:    v_mov_b32_e32 v1, s69
-; GFX6-NEXT:    s_lshr_b32 s8, s4, 30
+; GFX6-NEXT:    v_mov_b32_e32 v2, s42
+; GFX6-NEXT:    v_mov_b32_e32 v3, s43
+; GFX6-NEXT:    s_mov_b32 s43, s7
+; GFX6-NEXT:    v_mov_b32_e32 v8, s40
+; GFX6-NEXT:    v_mov_b32_e32 v9, s41
+; GFX6-NEXT:    s_mov_b32 s41, s7
+; GFX6-NEXT:    v_mov_b32_e32 v10, s38
+; GFX6-NEXT:    v_mov_b32_e32 v11, s39
+; GFX6-NEXT:    s_mov_b32 s39, s7
+; GFX6-NEXT:    v_mov_b32_e32 v12, s36
+; GFX6-NEXT:    v_mov_b32_e32 v13, s37
+; GFX6-NEXT:    s_mov_b32 s15, s7
+; GFX6-NEXT:    v_mov_b32_e32 v14, s28
+; GFX6-NEXT:    v_mov_b32_e32 v15, s29
+; GFX6-NEXT:    s_mov_b32 s37, s7
+; GFX6-NEXT:    s_lshr_b32 s30, s5, 24
+; GFX6-NEXT:    s_lshr_b32 s34, s5, 25
+; GFX6-NEXT:    s_bfe_i64 s[34:35], s[34:35], 0x10000
+; GFX6-NEXT:    s_bfe_i64 s[28:29], s[30:31], 0x10000
+; GFX6-NEXT:    v_mov_b32_e32 v5, s18
+; GFX6-NEXT:    buffer_store_dwordx4 v[2:5], off, s[0:3], 0 offset:496
+; GFX6-NEXT:    s_waitcnt expcnt(0)
+; GFX6-NEXT:    v_mov_b32_e32 v2, s28
+; GFX6-NEXT:    v_mov_b32_e32 v3, s29
+; GFX6-NEXT:    s_mov_b32 s29, s7
+; GFX6-NEXT:    v_mov_b32_e32 v4, s34
+; GFX6-NEXT:    v_mov_b32_e32 v5, s35
+; GFX6-NEXT:    s_lshr_b32 s24, s5, 22
+; GFX6-NEXT:    s_lshr_b32 s26, s5, 23
+; GFX6-NEXT:    s_bfe_i64 s[26:27], s[26:27], 0x10000
+; GFX6-NEXT:    s_bfe_i64 s[24:25], s[24:25], 0x10000
+; GFX6-NEXT:    buffer_store_dwordx4 v[8:11], off, s[0:3], 0 offset:480
+; GFX6-NEXT:    s_waitcnt expcnt(0)
+; GFX6-NEXT:    v_mov_b32_e32 v8, s24
+; GFX6-NEXT:    v_mov_b32_e32 v9, s25
+; GFX6-NEXT:    s_mov_b32 s25, s7
+; GFX6-NEXT:    v_mov_b32_e32 v10, s26
+; GFX6-NEXT:    v_mov_b32_e32 v11, s27
+; GFX6-NEXT:    s_mov_b32 s27, s7
+; GFX6-NEXT:    s_lshr_b32 s20, s5, 20
+; GFX6-NEXT:    s_lshr_b32 s22, s5, 21
+; GFX6-NEXT:    s_bfe_i64 s[22:23], s[22:23], 0x10000
+; GFX6-NEXT:    s_bfe_i64 s[20:21], s[20:21], 0x10000
+; GFX6-NEXT:    buffer_store_dwordx4 v[12:15], off, s[0:3], 0 offset:464
+; GFX6-NEXT:    s_waitcnt expcnt(0)
+; GFX6-NEXT:    v_mov_b32_e32 v12, s20
+; GFX6-NEXT:    v_mov_b32_e32 v13, s21
+; GFX6-NEXT:    s_mov_b32 s35, s7
+; GFX6-NEXT:    v_mov_b32_e32 v14, s22
+; GFX6-NEXT:    v_mov_b32_e32 v15, s23
+; GFX6-NEXT:    s_mov_b32 s21, s7
+; GFX6-NEXT:    s_mov_b32 s23, s7
+; GFX6-NEXT:    s_lshr_b32 s16, s5, 18
+; GFX6-NEXT:    s_lshr_b32 s18, s5, 19
+; GFX6-NEXT:    s_lshr_b32 s10, s5, 16
+; GFX6-NEXT:    s_lshr_b32 s12, s5, 17
+; GFX6-NEXT:    s_lshr_b32 s8, s5, 14
+; GFX6-NEXT:    s_lshr_b32 s44, s5, 15
+; GFX6-NEXT:    s_lshr_b32 s46, s5, 12
+; GFX6-NEXT:    s_lshr_b32 s42, s5, 13
+; GFX6-NEXT:    s_lshr_b32 s40, s5, 10
+; GFX6-NEXT:    s_lshr_b32 s38, s5, 11
+; GFX6-NEXT:    s_lshr_b32 s14, s5, 8
+; GFX6-NEXT:    s_lshr_b32 s36, s5, 9
+; GFX6-NEXT:    s_lshr_b32 s28, s5, 6
+; GFX6-NEXT:    s_lshr_b32 s30, s5, 7
+; GFX6-NEXT:    s_lshr_b32 s24, s5, 4
+; GFX6-NEXT:    s_lshr_b32 s26, s5, 5
+; GFX6-NEXT:    s_lshr_b32 s34, s5, 2
+; GFX6-NEXT:    s_lshr_b32 s20, s5, 3
+; GFX6-NEXT:    s_lshr_b32 s22, s5, 1
+; GFX6-NEXT:    s_bfe_i64 s[6:7], s[18:19], 0x10000
+; GFX6-NEXT:    s_bfe_i64 s[16:17], s[16:17], 0x10000
+; GFX6-NEXT:    buffer_store_dwordx4 v[2:5], off, s[0:3], 0 offset:448
+; GFX6-NEXT:    s_waitcnt expcnt(0)
+; GFX6-NEXT:    v_mov_b32_e32 v2, s16
+; GFX6-NEXT:    v_mov_b32_e32 v3, s17
+; GFX6-NEXT:    s_lshr_b32 s16, s4, 30
 ; GFX6-NEXT:    v_mov_b32_e32 v4, s6
 ; GFX6-NEXT:    v_mov_b32_e32 v5, s7
-; GFX6-NEXT:    s_lshr_b32 s6, s4, 31
-; GFX6-NEXT:    s_bfe_i64 s[68:69], s[8:9], 0x10000
-; GFX6-NEXT:    s_bfe_i64 s[6:7], s[6:7], 0x10000
-; GFX6-NEXT:    v_mov_b32_e32 v8, s68
-; GFX6-NEXT:    v_mov_b32_e32 v9, s69
-; GFX6-NEXT:    s_lshr_b32 s8, s4, 28
+; GFX6-NEXT:    s_lshr_b32 s18, s4, 31
+; GFX6-NEXT:    s_bfe_i64 s[6:7], s[12:13], 0x10000
+; GFX6-NEXT:    s_bfe_i64 s[10:11], s[10:11], 0x10000
+; GFX6-NEXT:    buffer_store_dwordx4 v[8:11], off, s[0:3], 0 offset:432
+; GFX6-NEXT:    s_waitcnt expcnt(0)
+; GFX6-NEXT:    v_mov_b32_e32 v8, s10
+; GFX6-NEXT:    v_mov_b32_e32 v9, s11
+; GFX6-NEXT:    s_lshr_b32 s10, s4, 28
 ; GFX6-NEXT:    v_mov_b32_e32 v10, s6
 ; GFX6-NEXT:    v_mov_b32_e32 v11, s7
-; GFX6-NEXT:    s_lshr_b32 s6, s4, 29
-; GFX6-NEXT:    s_bfe_i64 s[68:69], s[8:9], 0x10000
-; GFX6-NEXT:    s_bfe_i64 s[6:7], s[6:7], 0x10000
-; GFX6-NEXT:    v_mov_b32_e32 v12, s68
-; GFX6-NEXT:    v_mov_b32_e32 v13, s69
+; GFX6-NEXT:    s_lshr_b32 s12, s4, 29
+; GFX6-NEXT:    s_bfe_i64 s[6:7], s[44:45], 0x10000
+; GFX6-NEXT:    s_bfe_i64 s[8:9], s[8:9], 0x10000
+; GFX6-NEXT:    buffer_store_dwordx4 v[12:15], off, s[0:3], 0 offset:416
+; GFX6-NEXT:    s_waitcnt expcnt(0)
+; GFX6-NEXT:    v_mov_b32_e32 v12, s8
+; GFX6-NEXT:    v_mov_b32_e32 v13, s9
 ; GFX6-NEXT:    s_lshr_b32 s8, s4, 26
 ; GFX6-NEXT:    v_mov_b32_e32 v14, s6
 ; GFX6-NEXT:    v_mov_b32_e32 v15, s7
-; GFX6-NEXT:    s_lshr_b32 s6, s4, 27
-; GFX6-NEXT:    s_bfe_i64 s[68:69], s[8:9], 0x10000
-; GFX6-NEXT:    s_bfe_i64 s[6:7], s[6:7], 0x10000
-; GFX6-NEXT:    v_mov_b32_e32 v16, s68
-; GFX6-NEXT:    v_mov_b32_e32 v17, s69
-; GFX6-NEXT:    s_lshr_b32 s8, s4, 24
-; GFX6-NEXT:    v_mov_b32_e32 v18, s6
-; GFX6-NEXT:    v_mov_b32_e32 v19, s7
-; GFX6-NEXT:    s_lshr_b32 s6, s4, 25
-; GFX6-NEXT:    s_bfe_i64 s[68:69], s[8:9], 0x10000
-; GFX6-NEXT:    s_bfe_i64 s[6:7], s[6:7], 0x10000
-; GFX6-NEXT:    v_mov_b32_e32 v20, s68
-; GFX6-NEXT:    v_mov_b32_e32 v21, s69
-; GFX6-NEXT:    s_lshr_b32 s8, s4, 22
-; GFX6-NEXT:    v_mov_b32_e32 v22, s6
-; GFX6-NEXT:    v_mov_b32_e32 v23, s7
-; GFX6-NEXT:    s_lshr_b32 s6, s4, 23
-; GFX6-NEXT:    s_bfe_i64 s[68:69], s[8:9], 0x10000
-; GFX6-NEXT:    s_bfe_i64 s[6:7], s[6:7], 0x10000
-; GFX6-NEXT:    v_mov_b32_e32 v24, s68
-; GFX6-NEXT:    v_mov_b32_e32 v25, s69
-; GFX6-NEXT:    s_lshr_b32 s8, s4, 20
-; GFX6-NEXT:    v_mov_b32_e32 v26, s6
-; GFX6-NEXT:    v_mov_b32_e32 v27, s7
-; GFX6-NEXT:    s_lshr_b32 s6, s4, 21
-; GFX6-NEXT:    v_mov_b32_e32 v7, v6
-; GFX6-NEXT:    s_bfe_i64 s[6:7], s[6:7], 0x10000
-; GFX6-NEXT:    s_bfe_i64 s[68:69], s[8:9], 0x10000
-; GFX6-NEXT:    buffer_store_dwordx4 v[4:7], off, s[0:3], 0 offset:496
-; GFX6-NEXT:    v_mov_b32_e32 v28, s68
-; GFX6-NEXT:    v_mov_b32_e32 v29, s69
-; GFX6-NEXT:    s_lshr_b32 s8, s4, 18
-; GFX6-NEXT:    v_mov_b32_e32 v30, s6
-; GFX6-NEXT:    v_mov_b32_e32 v31, s7
-; GFX6-NEXT:    s_lshr_b32 s6, s4, 19
-; GFX6-NEXT:    s_bfe_i64 s[68:69], s[8:9], 0x10000
-; GFX6-NEXT:    s_bfe_i64 s[6:7], s[6:7], 0x10000
-; GFX6-NEXT:    buffer_store_dwordx4 v[8:11], off, s[0:3], 0 offset:240
-; GFX6-NEXT:    v_mov_b32_e32 v32, s68
-; GFX6-NEXT:    v_mov_b32_e32 v33, s69
-; GFX6-NEXT:    s_lshr_b32 s8, s4, 16
-; GFX6-NEXT:    v_mov_b32_e32 v34, s6
-; GFX6-NEXT:    v_mov_b32_e32 v35, s7
-; GFX6-NEXT:    s_lshr_b32 s6, s4, 17
-; GFX6-NEXT:    s_bfe_i64 s[6:7], s[6:7], 0x10000
-; GFX6-NEXT:    s_bfe_i64 s[68:69], s[8:9], 0x10000
-; GFX6-NEXT:    buffer_store_dwordx4 v[12:15], off, s[0:3], 0 offset:224
-; GFX6-NEXT:    v_mov_b32_e32 v36, s68
-; GFX6-NEXT:    v_mov_b32_e32 v37, s69
-; GFX6-NEXT:    s_lshr_b32 s8, s4, 14
-; GFX6-NEXT:    v_mov_b32_e32 v38, s6
-; GFX6-NEXT:    v_mov_b32_e32 v39, s7
-; GFX6-NEXT:    s_lshr_b32 s6, s4, 15
-; GFX6-NEXT:    s_bfe_i64 s[6:7], s[6:7], 0x10000
-; GFX6-NEXT:    s_bfe_i64 s[68:69], s[8:9], 0x10000
-; GFX6-NEXT:    buffer_store_dwordx4 v[16:19], off, s[0:3], 0 offset:208
-; GFX6-NEXT:    s_waitcnt expcnt(3)
-; GFX6-NEXT:    v_mov_b32_e32 v4, s68
-; GFX6-NEXT:    v_mov_b32_e32 v5, s69
-; GFX6-NEXT:    s_lshr_b32 s8, s4, 12
-; GFX6-NEXT:    v_mov_b32_e32 v6, s6
-; GFX6-NEXT:    v_mov_b32_e32 v7, s7
-; GFX6-NEXT:    s_lshr_b32 s6, s4, 13
-; GFX6-NEXT:    s_bfe_i64 s[68:69], s[8:9], 0x10000
-; GFX6-NEXT:    s_bfe_i64 s[6:7], s[6:7], 0x10000
-; GFX6-NEXT:    buffer_store_dwordx4 v[20:23], off, s[0:3], 0 offset:192
-; GFX6-NEXT:    s_waitcnt expcnt(3)
-; GFX6-NEXT:    v_mov_b32_e32 v8, s68
-; GFX6-NEXT:    v_mov_b32_e32 v9, s69
-; GFX6-NEXT:    s_lshr_b32 s8, s4, 10
+; GFX6-NEXT:    s_lshr_b32 s44, s4, 27
+; GFX6-NEXT:    s_bfe_i64 s[6:7], s[42:43], 0x10000
+; GFX6-NEXT:    s_bfe_i64 s[42:43], s[46:47], 0x10000
+; GFX6-NEXT:    buffer_store_dwordx4 v[2:5], off, s[0:3], 0 offset:400
+; GFX6-NEXT:    s_waitcnt expcnt(0)
+; GFX6-NEXT:    v_mov_b32_e32 v2, s42
+; GFX6-NEXT:    v_mov_b32_e32 v3, s43
+; GFX6-NEXT:    s_lshr_b32 s42, s4, 24
+; GFX6-NEXT:    v_mov_b32_e32 v4, s6
+; GFX6-NEXT:    v_mov_b32_e32 v5, s7
+; GFX6-NEXT:    s_lshr_b32 s46, s4, 25
+; GFX6-NEXT:    s_bfe_i64 s[6:7], s[38:39], 0x10000
+; GFX6-NEXT:    s_bfe_i64 s[38:39], s[40:41], 0x10000
+; GFX6-NEXT:    buffer_store_dwordx4 v[8:11], off, s[0:3], 0 offset:384
+; GFX6-NEXT:    s_waitcnt expcnt(0)
+; GFX6-NEXT:    v_mov_b32_e32 v8, s38
+; GFX6-NEXT:    v_mov_b32_e32 v9, s39
+; GFX6-NEXT:    s_lshr_b32 s38, s4, 22
 ; GFX6-NEXT:    v_mov_b32_e32 v10, s6
 ; GFX6-NEXT:    v_mov_b32_e32 v11, s7
-; GFX6-NEXT:    s_lshr_b32 s6, s4, 11
-; GFX6-NEXT:    s_bfe_i64 s[6:7], s[6:7], 0x10000
-; GFX6-NEXT:    s_bfe_i64 s[68:69], s[8:9], 0x10000
-; GFX6-NEXT:    buffer_store_dwordx4 v[24:27], off, s[0:3], 0 offset:176
-; GFX6-NEXT:    s_waitcnt expcnt(3)
-; GFX6-NEXT:    v_mov_b32_e32 v12, s68
-; GFX6-NEXT:    v_mov_b32_e32 v13, s69
-; GFX6-NEXT:    s_lshr_b32 s8, s4, 8
+; GFX6-NEXT:    s_lshr_b32 s40, s4, 23
+; GFX6-NEXT:    s_bfe_i64 s[6:7], s[36:37], 0x10000
+; GFX6-NEXT:    s_bfe_i64 s[14:15], s[14:15], 0x10000
+; GFX6-NEXT:    buffer_store_dwordx4 v[12:15], off, s[0:3], 0 offset:368
+; GFX6-NEXT:    s_waitcnt expcnt(0)
+; GFX6-NEXT:    v_mov_b32_e32 v12, s14
+; GFX6-NEXT:    v_mov_b32_e32 v13, s15
+; GFX6-NEXT:    s_lshr_b32 s14, s4, 20
 ; GFX6-NEXT:    v_mov_b32_e32 v14, s6
 ; GFX6-NEXT:    v_mov_b32_e32 v15, s7
-; GFX6-NEXT:    s_lshr_b32 s6, s4, 9
-; GFX6-NEXT:    s_bfe_i64 s[6:7], s[6:7], 0x10000
-; GFX6-NEXT:    s_bfe_i64 s[68:69], s[8:9], 0x10000
-; GFX6-NEXT:    buffer_store_dwordx4 v[28:31], off, s[0:3], 0 offset:160
-; GFX6-NEXT:    s_waitcnt expcnt(3)
-; GFX6-NEXT:    v_mov_b32_e32 v16, s68
-; GFX6-NEXT:    v_mov_b32_e32 v17, s69
-; GFX6-NEXT:    s_lshr_b32 s8, s4, 6
-; GFX6-NEXT:    v_mov_b32_e32 v18, s6
-; GFX6-NEXT:    v_mov_b32_e32 v19, s7
-; GFX6-NEXT:    s_lshr_b32 s6, s4, 7
-; GFX6-NEXT:    s_bfe_i64 s[6:7], s[6:7], 0x10000
-; GFX6-NEXT:    s_bfe_i64 s[68:69], s[8:9], 0x10000
-; GFX6-NEXT:    buffer_store_dwordx4 v[32:35], off, s[0:3], 0 offset:144
-; GFX6-NEXT:    s_waitcnt expcnt(3)
-; GFX6-NEXT:    v_mov_b32_e32 v20, s68
-; GFX6-NEXT:    v_mov_b32_e32 v21, s69
-; GFX6-NEXT:    s_lshr_b32 s8, s4, 4
-; GFX6-NEXT:    v_mov_b32_e32 v22, s6
-; GFX6-NEXT:    v_mov_b32_e32 v23, s7
-; GFX6-NEXT:    s_lshr_b32 s6, s4, 5
-; GFX6-NEXT:    s_bfe_i64 s[6:7], s[6:7], 0x10000
-; GFX6-NEXT:    s_bfe_i64 s[68:69], s[8:9], 0x10000
-; GFX6-NEXT:    buffer_store_dwordx4 v[36:39], off, s[0:3], 0 offset:128
-; GFX6-NEXT:    s_waitcnt expcnt(3)
-; GFX6-NEXT:    v_mov_b32_e32 v24, s68
-; GFX6-NEXT:    v_mov_b32_e32 v25, s69
-; GFX6-NEXT:    s_lshr_b32 s68, s4, 2
-; GFX6-NEXT:    v_mov_b32_e32 v26, s6
-; GFX6-NEXT:    v_mov_b32_e32 v27, s7
-; GFX6-NEXT:    s_lshr_b32 s6, s4, 3
-; GFX6-NEXT:    s_lshr_b32 s26, s5, 29
-; GFX6-NEXT:    s_lshr_b32 s36, s5, 28
-; GFX6-NEXT:    s_lshr_b32 s30, s5, 26
-; GFX6-NEXT:    s_lshr_b32 s28, s5, 27
-; GFX6-NEXT:    s_lshr_b32 s22, s5, 25
-; GFX6-NEXT:    s_lshr_b32 s34, s5, 24
-; GFX6-NEXT:    s_lshr_b32 s24, s5, 22
-; GFX6-NEXT:    s_lshr_b32 s18, s5, 23
-; GFX6-NEXT:    s_lshr_b32 s20, s5, 20
-; GFX6-NEXT:    s_lshr_b32 s8, s5, 21
-; GFX6-NEXT:    s_lshr_b32 s10, s5, 18
-; GFX6-NEXT:    s_lshr_b32 s12, s5, 19
-; GFX6-NEXT:    s_lshr_b32 s14, s5, 17
-; GFX6-NEXT:    s_lshr_b32 s16, s5, 16
-; GFX6-NEXT:    s_lshr_b32 s38, s5, 14
-; GFX6-NEXT:    s_lshr_b32 s40, s5, 15
-; GFX6-NEXT:    s_lshr_b32 s42, s5, 12
-; GFX6-NEXT:    s_lshr_b32 s44, s5, 13
-; GFX6-NEXT:    s_lshr_b32 s46, s5, 10
-; GFX6-NEXT:    s_lshr_b32 s48, s5, 11
-; GFX6-NEXT:    s_lshr_b32 s50, s5, 8
-; GFX6-NEXT:    s_lshr_b32 s52, s5, 9
-; GFX6-NEXT:    s_lshr_b32 s54, s5, 6
-; GFX6-NEXT:    s_lshr_b32 s56, s5, 7
-; GFX6-NEXT:    s_lshr_b32 s58, s5, 4
-; GFX6-NEXT:    s_lshr_b32 s60, s5, 5
-; GFX6-NEXT:    s_lshr_b32 s62, s5, 2
-; GFX6-NEXT:    s_lshr_b32 s64, s5, 3
-; GFX6-NEXT:    s_lshr_b32 s66, s5, 1
-; GFX6-NEXT:    s_lshr_b32 s4, s4, 1
-; GFX6-NEXT:    s_bfe_i64 s[70:71], s[6:7], 0x10000
-; GFX6-NEXT:    s_bfe_i64 s[68:69], s[68:69], 0x10000
-; GFX6-NEXT:    s_bfe_i64 s[36:37], s[36:37], 0x10000
-; GFX6-NEXT:    s_bfe_i64 s[72:73], s[4:5], 0x10000
+; GFX6-NEXT:    s_lshr_b32 s6, s4, 21
 ; GFX6-NEXT:    s_bfe_i64 s[30:31], s[30:31], 0x10000
-; GFX6-NEXT:    s_bfe_i64 s[26:27], s[26:27], 0x10000
-; GFX6-NEXT:    s_bfe_i64 s[34:35], s[34:35], 0x10000
 ; GFX6-NEXT:    s_bfe_i64 s[28:29], s[28:29], 0x10000
+; GFX6-NEXT:    buffer_store_dwordx4 v[2:5], off, s[0:3], 0 offset:352
+; GFX6-NEXT:    v_mov_b32_e32 v16, s28
+; GFX6-NEXT:    v_mov_b32_e32 v17, s29
+; GFX6-NEXT:    s_lshr_b32 s28, s4, 18
+; GFX6-NEXT:    v_mov_b32_e32 v18, s30
+; GFX6-NEXT:    v_mov_b32_e32 v19, s31
+; GFX6-NEXT:    s_lshr_b32 s30, s4, 19
+; GFX6-NEXT:    s_bfe_i64 s[26:27], s[26:27], 0x10000
 ; GFX6-NEXT:    s_bfe_i64 s[24:25], s[24:25], 0x10000
-; GFX6-NEXT:    s_bfe_i64 s[22:23], s[22:23], 0x10000
+; GFX6-NEXT:    buffer_store_dwordx4 v[8:11], off, s[0:3], 0 offset:336
+; GFX6-NEXT:    s_waitcnt expcnt(0)
+; GFX6-NEXT:    v_mov_b32_e32 v8, s24
+; GFX6-NEXT:    v_mov_b32_e32 v9, s25
+; GFX6-NEXT:    s_lshr_b32 s24, s4, 16
+; GFX6-NEXT:    v_mov_b32_e32 v10, s26
+; GFX6-NEXT:    v_mov_b32_e32 v11, s27
+; GFX6-NEXT:    s_lshr_b32 s26, s4, 17
+; GFX6-NEXT:    s_bfe_i64 s[34:35], s[34:35], 0x10000
+; GFX6-NEXT:    buffer_store_dwordx4 v[12:15], off, s[0:3], 0 offset:320
+; GFX6-NEXT:    s_waitcnt expcnt(0)
+; GFX6-NEXT:    v_mov_b32_e32 v12, s34
+; GFX6-NEXT:    v_mov_b32_e32 v13, s35
+; GFX6-NEXT:    s_lshr_b32 s34, s4, 14
 ; GFX6-NEXT:    s_bfe_i64 s[20:21], s[20:21], 0x10000
-; GFX6-NEXT:    s_bfe_i64 s[74:75], s[18:19], 0x10000
-; GFX6-NEXT:    s_bfe_i64 s[4:5], s[66:67], 0x10000
-; GFX6-NEXT:    s_bfe_i64 s[6:7], s[64:65], 0x10000
-; GFX6-NEXT:    s_bfe_i64 s[18:19], s[62:63], 0x10000
-; GFX6-NEXT:    s_bfe_i64 s[60:61], s[60:61], 0x10000
-; GFX6-NEXT:    s_bfe_i64 s[58:59], s[58:59], 0x10000
-; GFX6-NEXT:    s_bfe_i64 s[56:57], s[56:57], 0x10000
-; GFX6-NEXT:    s_bfe_i64 s[54:55], s[54:55], 0x10000
-; GFX6-NEXT:    s_bfe_i64 s[52:53], s[52:53], 0x10000
-; GFX6-NEXT:    s_bfe_i64 s[50:51], s[50:51], 0x10000
-; GFX6-NEXT:    s_bfe_i64 s[48:49], s[48:49], 0x10000
-; GFX6-NEXT:    s_bfe_i64 s[46:47], s[46:47], 0x10000
-; GFX6-NEXT:    s_bfe_i64 s[44:45], s[44:45], 0x10000
+; GFX6-NEXT:    s_bfe_i64 s[22:23], s[22:23], 0x10000
+; GFX6-NEXT:    v_mov_b32_e32 v14, s20
+; GFX6-NEXT:    v_mov_b32_e32 v15, s21
+; GFX6-NEXT:    s_lshr_b32 s20, s4, 15
+; GFX6-NEXT:    v_mov_b32_e32 v2, s22
+; GFX6-NEXT:    v_mov_b32_e32 v3, s23
+; GFX6-NEXT:    s_lshr_b32 s22, s4, 12
+; GFX6-NEXT:    s_bfe_i64 s[18:19], s[18:19], 0x10000
+; GFX6-NEXT:    s_bfe_i64 s[16:17], s[16:17], 0x10000
+; GFX6-NEXT:    buffer_store_dwordx4 v[16:19], off, s[0:3], 0 offset:304
+; GFX6-NEXT:    s_waitcnt expcnt(0)
+; GFX6-NEXT:    v_mov_b32_e32 v16, s16
+; GFX6-NEXT:    v_mov_b32_e32 v17, s17
+; GFX6-NEXT:    s_lshr_b32 s16, s4, 13
+; GFX6-NEXT:    v_mov_b32_e32 v18, s18
+; GFX6-NEXT:    v_mov_b32_e32 v19, s19
+; GFX6-NEXT:    s_lshr_b32 s18, s4, 10
+; GFX6-NEXT:    s_bfe_i64 s[12:13], s[12:13], 0x10000
+; GFX6-NEXT:    s_bfe_i64 s[10:11], s[10:11], 0x10000
+; GFX6-NEXT:    buffer_store_dwordx4 v[8:11], off, s[0:3], 0 offset:288
+; GFX6-NEXT:    s_waitcnt expcnt(0)
+; GFX6-NEXT:    v_mov_b32_e32 v8, s10
+; GFX6-NEXT:    v_mov_b32_e32 v9, s11
+; GFX6-NEXT:    s_lshr_b32 s10, s4, 11
+; GFX6-NEXT:    v_mov_b32_e32 v10, s12
+; GFX6-NEXT:    v_mov_b32_e32 v11, s13
+; GFX6-NEXT:    s_lshr_b32 s12, s4, 8
+; GFX6-NEXT:    s_bfe_i64 s[36:37], s[44:45], 0x10000
+; GFX6-NEXT:    s_bfe_i64 s[8:9], s[8:9], 0x10000
+; GFX6-NEXT:    buffer_store_dwordx4 v[12:15], off, s[0:3], 0 offset:272
+; GFX6-NEXT:    s_waitcnt expcnt(0)
+; GFX6-NEXT:    v_mov_b32_e32 v12, s8
+; GFX6-NEXT:    v_mov_b32_e32 v13, s9
+; GFX6-NEXT:    s_lshr_b32 s8, s4, 9
+; GFX6-NEXT:    v_mov_b32_e32 v14, s36
+; GFX6-NEXT:    v_mov_b32_e32 v15, s37
+; GFX6-NEXT:    s_lshr_b32 s36, s4, 6
+; GFX6-NEXT:    s_bfe_i64 s[44:45], s[46:47], 0x10000
 ; GFX6-NEXT:    s_bfe_i64 s[42:43], s[42:43], 0x10000
+; GFX6-NEXT:    buffer_store_dwordx4 v[0:3], off, s[0:3], 0 offset:256
+; GFX6-NEXT:    s_waitcnt expcnt(0)
+; GFX6-NEXT:    v_mov_b32_e32 v0, s42
+; GFX6-NEXT:    v_mov_b32_e32 v1, s43
+; GFX6-NEXT:    s_lshr_b32 s42, s4, 7
+; GFX6-NEXT:    v_mov_b32_e32 v2, s44
+; GFX6-NEXT:    v_mov_b32_e32 v3, s45
+; GFX6-NEXT:    s_lshr_b32 s44, s4, 4
 ; GFX6-NEXT:    s_bfe_i64 s[40:41], s[40:41], 0x10000
 ; GFX6-NEXT:    s_bfe_i64 s[38:39], s[38:39], 0x10000
-; GFX6-NEXT:    s_bfe_i64 s[16:17], s[16:17], 0x10000
+; GFX6-NEXT:    buffer_store_dwordx4 v[16:19], off, s[0:3], 0 offset:240
+; GFX6-NEXT:    s_waitcnt expcnt(0)
+; GFX6-NEXT:    v_mov_b32_e32 v16, s38
+; GFX6-NEXT:    v_mov_b32_e32 v17, s39
+; GFX6-NEXT:    s_lshr_b32 s38, s4, 5
+; GFX6-NEXT:    v_mov_b32_e32 v18, s40
+; GFX6-NEXT:    v_mov_b32_e32 v19, s41
+; GFX6-NEXT:    s_lshr_b32 s40, s4, 2
 ; GFX6-NEXT:    s_bfe_i64 s[14:15], s[14:15], 0x10000
+; GFX6-NEXT:    buffer_store_dwordx4 v[8:11], off, s[0:3], 0 offset:224
+; GFX6-NEXT:    s_waitcnt expcnt(0)
+; GFX6-NEXT:    v_mov_b32_e32 v8, s14
+; GFX6-NEXT:    v_mov_b32_e32 v9, s15
+; GFX6-NEXT:    s_lshr_b32 s14, s4, 3
+; GFX6-NEXT:    s_lshr_b32 s4, s4, 1
+; GFX6-NEXT:    s_bfe_i64 s[4:5], s[4:5], 0x10000
+; GFX6-NEXT:    s_bfe_i64 s[14:15], s[14:15], 0x10000
+; GFX6-NEXT:    s_bfe_i64 s[40:41], s[40:41], 0x10000
+; GFX6-NEXT:    s_bfe_i64 s[38:39], s[38:39], 0x10000
+; GFX6-NEXT:    s_bfe_i64 s[44:45], s[44:45], 0x10000
+; GFX6-NEXT:    s_bfe_i64 s[42:43], s[42:43], 0x10000
+; GFX6-NEXT:    s_bfe_i64 s[36:37], s[36:37], 0x10000
+; GFX6-NEXT:    s_bfe_i64 s[8:9], s[8:9], 0x10000
 ; GFX6-NEXT:    s_bfe_i64 s[12:13], s[12:13], 0x10000
 ; GFX6-NEXT:    s_bfe_i64 s[10:11], s[10:11], 0x10000
-; GFX6-NEXT:    s_bfe_i64 s[8:9], s[8:9], 0x10000
-; GFX6-NEXT:    buffer_store_dwordx4 v[4:7], off, s[0:3], 0 offset:112
-; GFX6-NEXT:    buffer_store_dwordx4 v[8:11], off, s[0:3], 0 offset:96
-; GFX6-NEXT:    s_waitcnt expcnt(1)
-; GFX6-NEXT:    v_mov_b32_e32 v6, s68
-; GFX6-NEXT:    v_mov_b32_e32 v7, s69
-; GFX6-NEXT:    s_waitcnt expcnt(0)
-; GFX6-NEXT:    v_mov_b32_e32 v8, s70
-; GFX6-NEXT:    v_mov_b32_e32 v9, s71
-; GFX6-NEXT:    v_mov_b32_e32 v10, s36
-; GFX6-NEXT:    buffer_store_dwordx4 v[12:15], off, s[0:3], 0 offset:80
-; GFX6-NEXT:    v_mov_b32_e32 v4, s72
-; GFX6-NEXT:    v_mov_b32_e32 v5, s73
-; GFX6-NEXT:    v_mov_b32_e32 v11, s37
-; GFX6-NEXT:    buffer_store_dwordx4 v[16:19], off, s[0:3], 0 offset:64
-; GFX6-NEXT:    buffer_store_dwordx4 v[20:23], off, s[0:3], 0 offset:48
-; GFX6-NEXT:    s_waitcnt expcnt(2)
-; GFX6-NEXT:    v_mov_b32_e32 v14, s30
-; GFX6-NEXT:    v_mov_b32_e32 v12, s26
-; GFX6-NEXT:    v_mov_b32_e32 v13, s27
-; GFX6-NEXT:    v_mov_b32_e32 v15, s31
-; GFX6-NEXT:    buffer_store_dwordx4 v[24:27], off, s[0:3], 0 offset:32
+; GFX6-NEXT:    s_bfe_i64 s[18:19], s[18:19], 0x10000
+; GFX6-NEXT:    s_bfe_i64 s[16:17], s[16:17], 0x10000
+; GFX6-NEXT:    s_bfe_i64 s[22:23], s[22:23], 0x10000
+; GFX6-NEXT:    s_bfe_i64 s[20:21], s[20:21], 0x10000
+; GFX6-NEXT:    s_bfe_i64 s[34:35], s[34:35], 0x10000
+; GFX6-NEXT:    s_bfe_i64 s[26:27], s[26:27], 0x10000
+; GFX6-NEXT:    s_bfe_i64 s[24:25], s[24:25], 0x10000
+; GFX6-NEXT:    s_bfe_i64 s[30:31], s[30:31], 0x10000
+; GFX6-NEXT:    s_bfe_i64 s[28:29], s[28:29], 0x10000
+; GFX6-NEXT:    s_bfe_i64 s[6:7], s[6:7], 0x10000
+; GFX6-NEXT:    buffer_store_dwordx4 v[12:15], off, s[0:3], 0 offset:208
+; GFX6-NEXT:    buffer_store_dwordx4 v[0:3], off, s[0:3], 0 offset:192
+; GFX6-NEXT:    buffer_store_dwordx4 v[16:19], off, s[0:3], 0 offset:176
+; GFX6-NEXT:    v_mov_b32_e32 v10, s6
+; GFX6-NEXT:    v_mov_b32_e32 v11, s7
+; GFX6-NEXT:    buffer_store_dwordx4 v[8:11], off, s[0:3], 0 offset:160
 ; GFX6-NEXT:    s_waitcnt expcnt(2)
-; GFX6-NEXT:    v_mov_b32_e32 v18, s34
-; GFX6-NEXT:    v_mov_b32_e32 v16, s28
-; GFX6-NEXT:    v_mov_b32_e32 v17, s29
-; GFX6-NEXT:    v_mov_b32_e32 v19, s35
-; GFX6-NEXT:    buffer_store_dwordx4 v[6:9], off, s[0:3], 0 offset:16
+; GFX6-NEXT:    v_mov_b32_e32 v0, s28
+; GFX6-NEXT:    v_mov_b32_e32 v1, s29
+; GFX6-NEXT:    v_mov_b32_e32 v2, s30
+; GFX6-NEXT:    v_mov_b32_e32 v3, s31
+; GFX6-NEXT:    buffer_store_dwordx4 v[0:3], off, s[0:3], 0 offset:144
 ; GFX6-NEXT:    s_waitcnt expcnt(0)
-; GFX6-NEXT:    v_mov_b32_e32 v6, s24
-; GFX6-NEXT:    v_mov_b32_e32 v20, s22
-; GFX6-NEXT:    v_mov_b32_e32 v21, s23
-; GFX6-NEXT:    v_mov_b32_e32 v7, s25
-; GFX6-NEXT:    buffer_store_dwordx4 v[2:5], off, s[0:3], 0
+; GFX6-NEXT:    v_mov_b32_e32 v0, s24
+; GFX6-NEXT:    v_mov_b32_e32 v1, s25
+; GFX6-NEXT:    v_mov_b32_e32 v2, s26
+; GFX6-NEXT:    v_mov_b32_e32 v3, s27
+; GFX6-NEXT:    buffer_store_dwordx4 v[0:3], off, s[0:3], 0 offset:128
 ; GFX6-NEXT:    s_waitcnt expcnt(0)
+; GFX6-NEXT:    v_mov_b32_e32 v0, s34
+; GFX6-NEXT:    v_mov_b32_e32 v1, s35
 ; GFX6-NEXT:    v_mov_b32_e32 v2, s20
-; GFX6-NEXT:    v_mov_b32_e32 v8, s74
-; GFX6-NEXT:    v_mov_b32_e32 v9, s75
 ; GFX6-NEXT:    v_mov_b32_e32 v3, s21
-; GFX6-NEXT:    buffer_store_dwordx4 v[10:13], off, s[0:3], 0 offset:480
-; GFX6-NEXT:    buffer_store_dwordx4 v[14:17], off, s[0:3], 0 offset:464
-; GFX6-NEXT:    buffer_store_dwordx4 v[18:21], off, s[0:3], 0 offset:448
-; GFX6-NEXT:    buffer_store_dwordx4 v[6:9], off, s[0:3], 0 offset:432
-; GFX6-NEXT:    v_mov_b32_e32 v4, s8
-; GFX6-NEXT:    v_mov_b32_e32 v5, s9
-; GFX6-NEXT:    buffer_store_dwordx4 v[2:5], off, s[0:3], 0 offset:416
-; GFX6-NEXT:    s_waitcnt expcnt(0)
-; GFX6-NEXT:    v_mov_b32_e32 v2, s10
-; GFX6-NEXT:    v_mov_b32_e32 v3, s11
-; GFX6-NEXT:    v_mov_b32_e32 v4, s12
-; GFX6-NEXT:    v_mov_b32_e32 v5, s13
-; GFX6-NEXT:    buffer_store_dwordx4 v[2:5], off, s[0:3], 0 offset:400
+; GFX6-NEXT:    buffer_store_dwordx4 v[0:3], off, s[0:3], 0 offset:112
 ; GFX6-NEXT:    s_waitcnt expcnt(0)
+; GFX6-NEXT:    v_mov_b32_e32 v0, s22
+; GFX6-NEXT:    v_mov_b32_e32 v1, s23
 ; GFX6-NEXT:    v_mov_b32_e32 v2, s16
 ; GFX6-NEXT:    v_mov_b32_e32 v3, s17
-; GFX6-NEXT:    v_mov_b32_e32 v4, s14
-; GFX6-NEXT:    v_mov_b32_e32 v5, s15
-; GFX6-NEXT:    buffer_store_dwordx4 v[2:5], off, s[0:3], 0 offset:384
+; GFX6-NEXT:    buffer_store_dwordx4 v[0:3], off, s[0:3], 0 offset:96
 ; GFX6-NEXT:    s_waitcnt expcnt(0)
-; GFX6-NEXT:    v_mov_b32_e32 v2, s38
-; GFX6-NEXT:    v_mov_b32_e32 v3, s39
-; GFX6-NEXT:    v_mov_b32_e32 v4, s40
-; GFX6-NEXT:    v_mov_b32_e32 v5, s41
-; GFX6-NEXT:    buffer_store_dwordx4 v[2:5], off, s[0:3], 0 offset:368
+; GFX6-NEXT:    v_mov_b32_e32 v0, s18
+; GFX6-NEXT:    v_mov_b32_e32 v1, s19
+; GFX6-NEXT:    v_mov_b32_e32 v2, s10
+; GFX6-NEXT:    v_mov_b32_e32 v3, s11
+; GFX6-NEXT:    buffer_store_dwordx4 v[0:3], off, s[0:3], 0 offset:80
 ; GFX6-NEXT:    s_waitcnt expcnt(0)
+; GFX6-NEXT:    v_mov_b32_e32 v0, s12
+; GFX6-NEXT:    v_mov_b32_e32 v1, s13
+; GFX6-NEXT:    v_mov_b32_e32 v2, s8
+; GFX6-NEXT:    v_mov_b32_e32 v3, s9
+; GFX6-NEXT:    buffer_store_dwordx4 v[0:3], off, s[0:3], 0 offset:64
+; GFX6-NEXT:    s_waitcnt expcnt(0)
+; GFX6-NEXT:    v_mov_b32_e32 v0, s36
+; GFX6-NEXT:    v_mov_b32_e32 v1, s37
 ; GFX6-NEXT:    v_mov_b32_e32 v2, s42
 ; GFX6-NEXT:    v_mov_b32_e32 v3, s43
-; GFX6-NEXT:    v_mov_b32_e32 v4, s44
-; GFX6-NEXT:    v_mov_b32_e32 v5, s45
-; GFX6-NEXT:    buffer_store_dwordx4 v[2:5], off, s[0:3], 0 offset:352
-; GFX6-NEXT:    s_waitcnt expcnt(0)
-; GFX6-NEXT:    v_mov_b32_e32 v2, s46
-; GFX6-NEXT:    v_mov_b32_e32 v3, s47
-; GFX6-NEXT:    v_mov_b32_e32 v4, s48
-; GFX6-NEXT:    v_mov_b32_e32 v5, s49
-; GFX6-NEXT:    buffer_store_dwordx4 v[2:5], off, s[0:3], 0 offset:336
-; GFX6-NEXT:    s_waitcnt expcnt(0)
-; GFX6-NEXT:    v_mov_b32_e32 v2, s50
-; GFX6-NEXT:    v_mov_b32_e32 v3, s51
-; GFX6-NEXT:    v_mov_b32_e32 v4, s52
-; GFX6-NEXT:    v_mov_b32_e32 v5, s53
-; GFX6-NEXT:    buffer_store_dwordx4 v[2:5], off, s[0:3], 0 offset:320
-; GFX6-NEXT:    s_waitcnt expcnt(0)
-; GFX6-NEXT:    v_mov_b32_e32 v2, s54
-; GFX6-NEXT:    v_mov_b32_e32 v3, s55
-; GFX6-NEXT:    v_mov_b32_e32 v4, s56
-; GFX6-NEXT:    v_mov_b32_e32 v5, s57
-; GFX6-NEXT:    buffer_store_dwordx4 v[2:5], off, s[0:3], 0 offset:304
-; GFX6-NEXT:    s_waitcnt expcnt(0)
-; GFX6-NEXT:    v_mov_b32_e32 v2, s58
-; GFX6-NEXT:    v_mov_b32_e32 v3, s59
-; GFX6-NEXT:    v_mov_b32_e32 v4, s60
-; GFX6-NEXT:    v_mov_b32_e32 v5, s61
-; GFX6-NEXT:    buffer_store_dwordx4 v[2:5], off, s[0:3], 0 offset:288
+; GFX6-NEXT:    buffer_store_dwordx4 v[0:3], off, s[0:3], 0 offset:48
 ; GFX6-NEXT:    s_waitcnt expcnt(0)
-; GFX6-NEXT:    v_mov_b32_e32 v2, s18
-; GFX6-NEXT:    v_mov_b32_e32 v3, s19
-; GFX6-NEXT:    v_mov_b32_e32 v4, s6
-; GFX6-NEXT:    v_mov_b32_e32 v5, s7
-; GFX6-NEXT:    buffer_store_dwordx4 v[2:5], off, s[0:3], 0 offset:272
+; GFX6-NEXT:    v_mov_b32_e32 v0, s44
+; GFX6-NEXT:    v_mov_b32_e32 v1, s45
+; GFX6-NEXT:    v_mov_b32_e32 v2, s38
+; GFX6-NEXT:    v_mov_b32_e32 v3, s39
+; GFX6-NEXT:    buffer_store_dwordx4 v[0:3], off, s[0:3], 0 offset:32
 ; GFX6-NEXT:    s_waitcnt expcnt(0)
-; GFX6-NEXT:    v_mov_b32_e32 v2, s4
-; GFX6-NEXT:    v_mov_b32_e32 v3, s5
-; GFX6-NEXT:    buffer_store_dwordx4 v[0:3], off, s[0:3], 0 offset:256
+; GFX6-NEXT:    v_mov_b32_e32 v0, s40
+; GFX6-NEXT:    v_mov_b32_e32 v1, s41
+; GFX6-NEXT:    v_mov_b32_e32 v2, s14
+; GFX6-NEXT:    v_mov_b32_e32 v3, s15
+; GFX6-NEXT:    buffer_store_dwordx4 v[0:3], off, s[0:3], 0 offset:16
+; GFX6-NEXT:    v_mov_b32_e32 v8, s4
+; GFX6-NEXT:    v_mov_b32_e32 v9, s5
+; GFX6-NEXT:    buffer_store_dwordx4 v[6:9], off, s[0:3], 0
 ; GFX6-NEXT:    s_endpgm
 ;
 ; GFX8-LABEL: constant_sextload_v64i1_to_v64i64:
diff --git a/llvm/test/CodeGen/AMDGPU/load-constant-i16.ll b/llvm/test/CodeGen/AMDGPU/load-constant-i16.ll
index 84804eeaa9a75..9be06f3b6422c 100644
--- a/llvm/test/CodeGen/AMDGPU/load-constant-i16.ll
+++ b/llvm/test/CodeGen/AMDGPU/load-constant-i16.ll
@@ -6549,8 +6549,8 @@ define amdgpu_kernel void @constant_sextload_v8i16_to_v8i64(ptr addrspace(1) %ou
 ; GCN-NOHSA-SI-NEXT:    s_mov_b32 s11, s9
 ; GCN-NOHSA-SI-NEXT:    s_waitcnt lgkmcnt(0)
 ; GCN-NOHSA-SI-NEXT:    s_mov_b32 s8, s7
-; GCN-NOHSA-SI-NEXT:    s_lshr_b32 s12, s6, 16
 ; GCN-NOHSA-SI-NEXT:    s_mov_b32 s10, s5
+; GCN-NOHSA-SI-NEXT:    s_lshr_b32 s12, s6, 16
 ; GCN-NOHSA-SI-NEXT:    s_lshr_b32 s14, s4, 16
 ; GCN-NOHSA-SI-NEXT:    s_bfe_i64 s[16:17], s[4:5], 0x100000
 ; GCN-NOHSA-SI-NEXT:    s_bfe_i64 s[18:19], s[6:7], 0x100000
@@ -6568,14 +6568,14 @@ define amdgpu_kernel void @constant_sextload_v8i16_to_v8i64(ptr addrspace(1) %ou
 ; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v3, s20
 ; GCN-NOHSA-SI-NEXT:    buffer_store_dwordx4 v[0:3], off, s[0:3], 0 offset:48
 ; GCN-NOHSA-SI-NEXT:    s_waitcnt expcnt(0)
+; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v0, s4
+; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v1, s5
+; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v2, s15
+; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v3, s13
+; GCN-NOHSA-SI-NEXT:    buffer_store_dwordx4 v[0:3], off, s[0:3], 0 offset:16
+; GCN-NOHSA-SI-NEXT:    s_waitcnt expcnt(0)
 ; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v0, s18
 ; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v1, s19
-; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v2, s4
-; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v3, s5
-; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v4, s15
-; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v5, s13
-; GCN-NOHSA-SI-NEXT:    buffer_store_dwordx4 v[2:5], off, s[0:3], 0 offset:16
-; GCN-NOHSA-SI-NEXT:    s_waitcnt expcnt(0)
 ; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v4, s16
 ; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v5, s17
 ; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v2, s10
@@ -6598,17 +6598,17 @@ define amdgpu_kernel void @constant_sextload_v8i16_to_v8i64(ptr addrspace(1) %ou
 ; GCN-HSA-NEXT:    s_mov_b32 s9, s3
 ; GCN-HSA-NEXT:    s_waitcnt lgkmcnt(0)
 ; GCN-HSA-NEXT:    s_mov_b32 s2, s7
-; GCN-HSA-NEXT:    s_lshr_b32 s10, s6, 16
 ; GCN-HSA-NEXT:    s_mov_b32 s8, s5
+; GCN-HSA-NEXT:    s_lshr_b32 s10, s6, 16
 ; GCN-HSA-NEXT:    s_lshr_b32 s12, s4, 16
-; GCN-HSA-NEXT:    s_ashr_i32 s11, s5, 16
 ; GCN-HSA-NEXT:    s_bfe_i64 s[14:15], s[4:5], 0x100000
 ; GCN-HSA-NEXT:    s_bfe_i64 s[16:17], s[6:7], 0x100000
-; GCN-HSA-NEXT:    s_ashr_i32 s6, s5, 31
 ; GCN-HSA-NEXT:    s_bfe_i64 s[12:13], s[12:13], 0x100000
+; GCN-HSA-NEXT:    s_bfe_i64 s[10:11], s[10:11], 0x100000
+; GCN-HSA-NEXT:    s_ashr_i32 s6, s5, 31
+; GCN-HSA-NEXT:    s_ashr_i32 s18, s5, 16
 ; GCN-HSA-NEXT:    s_bfe_i64 s[4:5], s[8:9], 0x100000
-; GCN-HSA-NEXT:    s_bfe_i64 s[8:9], s[10:11], 0x100000
-; GCN-HSA-NEXT:    s_ashr_i32 s10, s7, 31
+; GCN-HSA-NEXT:    s_ashr_i32 s8, s7, 31
 ; GCN-HSA-NEXT:    s_ashr_i32 s7, s7, 16
 ; GCN-HSA-NEXT:    s_bfe_i64 s[2:3], s[2:3], 0x100000
 ; GCN-HSA-NEXT:    v_mov_b32_e32 v0, s2
@@ -6617,25 +6617,25 @@ define amdgpu_kernel void @constant_sextload_v8i16_to_v8i64(ptr addrspace(1) %ou
 ; GCN-HSA-NEXT:    s_addc_u32 s3, s1, 0
 ; GCN-HSA-NEXT:    v_mov_b32_e32 v5, s3
 ; GCN-HSA-NEXT:    v_mov_b32_e32 v4, s2
-; GCN-HSA-NEXT:    s_add_u32 s2, s0, 32
+; GCN-HSA-NEXT:    s_add_u32 s2, s0, 16
 ; GCN-HSA-NEXT:    v_mov_b32_e32 v2, s7
-; GCN-HSA-NEXT:    v_mov_b32_e32 v3, s10
+; GCN-HSA-NEXT:    v_mov_b32_e32 v3, s8
 ; GCN-HSA-NEXT:    s_addc_u32 s3, s1, 0
 ; GCN-HSA-NEXT:    flat_store_dwordx4 v[4:5], v[0:3]
 ; GCN-HSA-NEXT:    v_mov_b32_e32 v5, s3
 ; GCN-HSA-NEXT:    v_mov_b32_e32 v4, s2
-; GCN-HSA-NEXT:    s_add_u32 s2, s0, 16
-; GCN-HSA-NEXT:    v_mov_b32_e32 v0, s16
-; GCN-HSA-NEXT:    v_mov_b32_e32 v1, s17
-; GCN-HSA-NEXT:    v_mov_b32_e32 v2, s8
-; GCN-HSA-NEXT:    v_mov_b32_e32 v3, s9
-; GCN-HSA-NEXT:    s_addc_u32 s3, s1, 0
-; GCN-HSA-NEXT:    flat_store_dwordx4 v[4:5], v[0:3]
-; GCN-HSA-NEXT:    v_mov_b32_e32 v5, s3
+; GCN-HSA-NEXT:    s_add_u32 s2, s0, 32
 ; GCN-HSA-NEXT:    v_mov_b32_e32 v0, s4
 ; GCN-HSA-NEXT:    v_mov_b32_e32 v1, s5
-; GCN-HSA-NEXT:    v_mov_b32_e32 v2, s11
+; GCN-HSA-NEXT:    v_mov_b32_e32 v2, s18
 ; GCN-HSA-NEXT:    v_mov_b32_e32 v3, s6
+; GCN-HSA-NEXT:    s_addc_u32 s3, s1, 0
+; GCN-HSA-NEXT:    flat_store_dwordx4 v[4:5], v[0:3]
+; GCN-HSA-NEXT:    v_mov_b32_e32 v5, s3
+; GCN-HSA-NEXT:    v_mov_b32_e32 v0, s16
+; GCN-HSA-NEXT:    v_mov_b32_e32 v1, s17
+; GCN-HSA-NEXT:    v_mov_b32_e32 v2, s10
+; GCN-HSA-NEXT:    v_mov_b32_e32 v3, s11
 ; GCN-HSA-NEXT:    v_mov_b32_e32 v4, s2
 ; GCN-HSA-NEXT:    flat_store_dwordx4 v[4:5], v[0:3]
 ; GCN-HSA-NEXT:    v_mov_b32_e32 v5, s1
@@ -7177,12 +7177,12 @@ define amdgpu_kernel void @constant_sextload_v16i16_to_v16i64(ptr addrspace(1) %
 ; GCN-NOHSA-SI-NEXT:    s_mov_b32 s19, s13
 ; GCN-NOHSA-SI-NEXT:    s_waitcnt lgkmcnt(0)
 ; GCN-NOHSA-SI-NEXT:    s_mov_b32 s12, s7
-; GCN-NOHSA-SI-NEXT:    s_lshr_b32 s20, s6, 16
 ; GCN-NOHSA-SI-NEXT:    s_mov_b32 s14, s5
-; GCN-NOHSA-SI-NEXT:    s_lshr_b32 s22, s4, 16
 ; GCN-NOHSA-SI-NEXT:    s_mov_b32 s16, s3
-; GCN-NOHSA-SI-NEXT:    s_lshr_b32 s24, s2, 16
 ; GCN-NOHSA-SI-NEXT:    s_mov_b32 s18, s1
+; GCN-NOHSA-SI-NEXT:    s_lshr_b32 s20, s6, 16
+; GCN-NOHSA-SI-NEXT:    s_lshr_b32 s22, s4, 16
+; GCN-NOHSA-SI-NEXT:    s_lshr_b32 s24, s2, 16
 ; GCN-NOHSA-SI-NEXT:    s_lshr_b32 s26, s0, 16
 ; GCN-NOHSA-SI-NEXT:    s_bfe_i64 s[28:29], s[0:1], 0x100000
 ; GCN-NOHSA-SI-NEXT:    s_bfe_i64 s[30:31], s[2:3], 0x100000
@@ -7210,30 +7210,30 @@ define amdgpu_kernel void @constant_sextload_v16i16_to_v16i64(ptr addrspace(1) %
 ; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v3, s39
 ; GCN-NOHSA-SI-NEXT:    buffer_store_dwordx4 v[0:3], off, s[8:11], 0 offset:112
 ; GCN-NOHSA-SI-NEXT:    s_waitcnt expcnt(0)
+; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v0, s4
+; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v1, s5
+; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v2, s38
+; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v3, s33
+; GCN-NOHSA-SI-NEXT:    buffer_store_dwordx4 v[0:3], off, s[8:11], 0 offset:80
+; GCN-NOHSA-SI-NEXT:    s_waitcnt expcnt(0)
+; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v0, s2
+; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v1, s3
+; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v2, s27
+; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v3, s25
+; GCN-NOHSA-SI-NEXT:    buffer_store_dwordx4 v[0:3], off, s[8:11], 0 offset:48
+; GCN-NOHSA-SI-NEXT:    s_waitcnt expcnt(0)
+; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v0, s0
+; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v1, s1
+; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v2, s23
+; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v3, s21
+; GCN-NOHSA-SI-NEXT:    buffer_store_dwordx4 v[0:3], off, s[8:11], 0 offset:16
+; GCN-NOHSA-SI-NEXT:    s_waitcnt expcnt(0)
 ; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v0, s36
 ; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v1, s37
-; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v2, s4
-; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v3, s5
-; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v4, s38
-; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v5, s33
-; GCN-NOHSA-SI-NEXT:    buffer_store_dwordx4 v[2:5], off, s[8:11], 0 offset:80
-; GCN-NOHSA-SI-NEXT:    s_waitcnt expcnt(0)
 ; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v4, s34
 ; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v5, s35
-; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v6, s2
-; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v7, s3
-; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v8, s27
-; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v9, s25
-; GCN-NOHSA-SI-NEXT:    buffer_store_dwordx4 v[6:9], off, s[8:11], 0 offset:48
-; GCN-NOHSA-SI-NEXT:    s_waitcnt expcnt(0)
 ; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v8, s30
 ; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v9, s31
-; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v10, s0
-; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v11, s1
-; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v12, s23
-; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v13, s21
-; GCN-NOHSA-SI-NEXT:    buffer_store_dwordx4 v[10:13], off, s[8:11], 0 offset:16
-; GCN-NOHSA-SI-NEXT:    s_waitcnt expcnt(0)
 ; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v12, s28
 ; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v13, s29
 ; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v2, s18
@@ -7254,28 +7254,28 @@ define amdgpu_kernel void @constant_sextload_v16i16_to_v16i64(ptr addrspace(1) %
 ; GCN-HSA:       ; %bb.0:
 ; GCN-HSA-NEXT:    s_load_dwordx4 s[8:11], s[8:9], 0x0
 ; GCN-HSA-NEXT:    s_add_i32 s12, s12, s17
-; GCN-HSA-NEXT:    s_lshr_b32 flat_scratch_hi, s12, 8
 ; GCN-HSA-NEXT:    s_mov_b32 flat_scratch_lo, s13
+; GCN-HSA-NEXT:    s_lshr_b32 flat_scratch_hi, s12, 8
+; GCN-HSA-NEXT:    s_mov_b32 s13, 0
 ; GCN-HSA-NEXT:    s_waitcnt lgkmcnt(0)
 ; GCN-HSA-NEXT:    s_load_dwordx8 s[0:7], s[10:11], 0x0
-; GCN-HSA-NEXT:    s_mov_b32 s11, 0
-; GCN-HSA-NEXT:    s_mov_b32 s15, s11
-; GCN-HSA-NEXT:    s_mov_b32 s19, s11
-; GCN-HSA-NEXT:    s_mov_b32 s23, s11
+; GCN-HSA-NEXT:    s_mov_b32 s15, s13
+; GCN-HSA-NEXT:    s_mov_b32 s17, s13
+; GCN-HSA-NEXT:    s_mov_b32 s19, s13
 ; GCN-HSA-NEXT:    s_waitcnt lgkmcnt(0)
-; GCN-HSA-NEXT:    s_mov_b32 s10, s7
-; GCN-HSA-NEXT:    s_lshr_b32 s12, s6, 16
+; GCN-HSA-NEXT:    s_mov_b32 s12, s7
 ; GCN-HSA-NEXT:    s_mov_b32 s14, s5
-; GCN-HSA-NEXT:    s_lshr_b32 s16, s4, 16
-; GCN-HSA-NEXT:    s_ashr_i32 s25, s1, 31
+; GCN-HSA-NEXT:    s_mov_b32 s16, s3
+; GCN-HSA-NEXT:    s_mov_b32 s18, s1
+; GCN-HSA-NEXT:    s_ashr_i32 s27, s1, 31
 ; GCN-HSA-NEXT:    s_ashr_i32 s29, s3, 31
 ; GCN-HSA-NEXT:    s_ashr_i32 s30, s3, 16
-; GCN-HSA-NEXT:    s_mov_b32 s18, s3
-; GCN-HSA-NEXT:    s_lshr_b32 s20, s2, 16
-; GCN-HSA-NEXT:    s_mov_b32 s22, s1
-; GCN-HSA-NEXT:    s_lshr_b32 s24, s0, 16
-; GCN-HSA-NEXT:    s_bfe_i64 s[26:27], s[2:3], 0x100000
-; GCN-HSA-NEXT:    s_bfe_i64 s[2:3], s[10:11], 0x100000
+; GCN-HSA-NEXT:    s_lshr_b32 s20, s6, 16
+; GCN-HSA-NEXT:    s_lshr_b32 s22, s4, 16
+; GCN-HSA-NEXT:    s_lshr_b32 s24, s2, 16
+; GCN-HSA-NEXT:    s_lshr_b32 s26, s0, 16
+; GCN-HSA-NEXT:    s_bfe_i64 s[10:11], s[2:3], 0x100000
+; GCN-HSA-NEXT:    s_bfe_i64 s[2:3], s[12:13], 0x100000
 ; GCN-HSA-NEXT:    s_ashr_i32 s28, s1, 16
 ; GCN-HSA-NEXT:    s_ashr_i32 s31, s5, 31
 ; GCN-HSA-NEXT:    s_ashr_i32 s33, s5, 16
@@ -7286,36 +7286,55 @@ define amdgpu_kernel void @constant_sextload_v16i16_to_v16i64(ptr addrspace(1) %
 ; GCN-HSA-NEXT:    s_bfe_i64 s[6:7], s[6:7], 0x100000
 ; GCN-HSA-NEXT:    v_mov_b32_e32 v0, s2
 ; GCN-HSA-NEXT:    v_mov_b32_e32 v1, s3
-; GCN-HSA-NEXT:    s_bfe_i64 s[2:3], s[24:25], 0x100000
-; GCN-HSA-NEXT:    s_bfe_i64 s[10:11], s[22:23], 0x100000
+; GCN-HSA-NEXT:    s_bfe_i64 s[2:3], s[26:27], 0x100000
+; GCN-HSA-NEXT:    s_bfe_i64 s[12:13], s[24:25], 0x100000
+; GCN-HSA-NEXT:    s_bfe_i64 s[22:23], s[22:23], 0x100000
 ; GCN-HSA-NEXT:    s_bfe_i64 s[20:21], s[20:21], 0x100000
 ; GCN-HSA-NEXT:    s_bfe_i64 s[18:19], s[18:19], 0x100000
 ; GCN-HSA-NEXT:    s_bfe_i64 s[16:17], s[16:17], 0x100000
 ; GCN-HSA-NEXT:    s_bfe_i64 s[14:15], s[14:15], 0x100000
-; GCN-HSA-NEXT:    s_bfe_i64 s[12:13], s[12:13], 0x100000
-; GCN-HSA-NEXT:    s_add_u32 s22, s8, 0x70
-; GCN-HSA-NEXT:    s_addc_u32 s23, s9, 0
-; GCN-HSA-NEXT:    v_mov_b32_e32 v4, s6
-; GCN-HSA-NEXT:    s_add_u32 s6, s8, 0x60
-; GCN-HSA-NEXT:    v_mov_b32_e32 v5, s7
-; GCN-HSA-NEXT:    s_addc_u32 s7, s9, 0
-; GCN-HSA-NEXT:    v_mov_b32_e32 v8, s22
-; GCN-HSA-NEXT:    v_mov_b32_e32 v11, s7
-; GCN-HSA-NEXT:    v_mov_b32_e32 v9, s23
-; GCN-HSA-NEXT:    v_mov_b32_e32 v10, s6
+; GCN-HSA-NEXT:    s_add_u32 s24, s8, 0x70
+; GCN-HSA-NEXT:    s_addc_u32 s25, s9, 0
+; GCN-HSA-NEXT:    v_mov_b32_e32 v4, s14
+; GCN-HSA-NEXT:    s_add_u32 s14, s8, 0x50
+; GCN-HSA-NEXT:    v_mov_b32_e32 v8, s24
+; GCN-HSA-NEXT:    v_mov_b32_e32 v5, s15
+; GCN-HSA-NEXT:    s_addc_u32 s15, s9, 0
+; GCN-HSA-NEXT:    v_mov_b32_e32 v10, s14
+; GCN-HSA-NEXT:    v_mov_b32_e32 v9, s25
+; GCN-HSA-NEXT:    v_mov_b32_e32 v11, s15
 ; GCN-HSA-NEXT:    v_mov_b32_e32 v2, s35
 ; GCN-HSA-NEXT:    v_mov_b32_e32 v3, s34
-; GCN-HSA-NEXT:    s_add_u32 s6, s8, 0x50
-; GCN-HSA-NEXT:    v_mov_b32_e32 v6, s12
-; GCN-HSA-NEXT:    v_mov_b32_e32 v7, s13
+; GCN-HSA-NEXT:    s_add_u32 s14, s8, 48
+; GCN-HSA-NEXT:    v_mov_b32_e32 v6, s33
+; GCN-HSA-NEXT:    v_mov_b32_e32 v7, s31
 ; GCN-HSA-NEXT:    flat_store_dwordx4 v[8:9], v[0:3]
 ; GCN-HSA-NEXT:    flat_store_dwordx4 v[10:11], v[4:7]
+; GCN-HSA-NEXT:    s_addc_u32 s15, s9, 0
+; GCN-HSA-NEXT:    v_mov_b32_e32 v4, s14
+; GCN-HSA-NEXT:    v_mov_b32_e32 v0, s16
+; GCN-HSA-NEXT:    v_mov_b32_e32 v1, s17
+; GCN-HSA-NEXT:    v_mov_b32_e32 v2, s30
+; GCN-HSA-NEXT:    v_mov_b32_e32 v3, s29
+; GCN-HSA-NEXT:    v_mov_b32_e32 v5, s15
+; GCN-HSA-NEXT:    s_add_u32 s14, s8, 16
+; GCN-HSA-NEXT:    flat_store_dwordx4 v[4:5], v[0:3]
+; GCN-HSA-NEXT:    s_addc_u32 s15, s9, 0
+; GCN-HSA-NEXT:    v_mov_b32_e32 v4, s14
+; GCN-HSA-NEXT:    v_mov_b32_e32 v0, s18
+; GCN-HSA-NEXT:    v_mov_b32_e32 v1, s19
+; GCN-HSA-NEXT:    v_mov_b32_e32 v2, s28
+; GCN-HSA-NEXT:    v_mov_b32_e32 v3, s27
+; GCN-HSA-NEXT:    v_mov_b32_e32 v5, s15
+; GCN-HSA-NEXT:    flat_store_dwordx4 v[4:5], v[0:3]
+; GCN-HSA-NEXT:    s_nop 0
+; GCN-HSA-NEXT:    v_mov_b32_e32 v0, s6
+; GCN-HSA-NEXT:    s_add_u32 s6, s8, 0x60
+; GCN-HSA-NEXT:    v_mov_b32_e32 v1, s7
 ; GCN-HSA-NEXT:    s_addc_u32 s7, s9, 0
 ; GCN-HSA-NEXT:    v_mov_b32_e32 v4, s6
-; GCN-HSA-NEXT:    v_mov_b32_e32 v0, s14
-; GCN-HSA-NEXT:    v_mov_b32_e32 v1, s15
-; GCN-HSA-NEXT:    v_mov_b32_e32 v2, s33
-; GCN-HSA-NEXT:    v_mov_b32_e32 v3, s31
+; GCN-HSA-NEXT:    v_mov_b32_e32 v2, s20
+; GCN-HSA-NEXT:    v_mov_b32_e32 v3, s21
 ; GCN-HSA-NEXT:    v_mov_b32_e32 v5, s7
 ; GCN-HSA-NEXT:    flat_store_dwordx4 v[4:5], v[0:3]
 ; GCN-HSA-NEXT:    s_nop 0
@@ -7324,35 +7343,17 @@ define amdgpu_kernel void @constant_sextload_v16i16_to_v16i64(ptr addrspace(1) %
 ; GCN-HSA-NEXT:    v_mov_b32_e32 v1, s5
 ; GCN-HSA-NEXT:    s_addc_u32 s5, s9, 0
 ; GCN-HSA-NEXT:    v_mov_b32_e32 v4, s4
-; GCN-HSA-NEXT:    v_mov_b32_e32 v2, s16
-; GCN-HSA-NEXT:    v_mov_b32_e32 v3, s17
-; GCN-HSA-NEXT:    v_mov_b32_e32 v5, s5
-; GCN-HSA-NEXT:    s_add_u32 s4, s8, 48
-; GCN-HSA-NEXT:    flat_store_dwordx4 v[4:5], v[0:3]
-; GCN-HSA-NEXT:    s_addc_u32 s5, s9, 0
-; GCN-HSA-NEXT:    v_mov_b32_e32 v4, s4
-; GCN-HSA-NEXT:    v_mov_b32_e32 v0, s18
-; GCN-HSA-NEXT:    v_mov_b32_e32 v1, s19
-; GCN-HSA-NEXT:    v_mov_b32_e32 v2, s30
-; GCN-HSA-NEXT:    v_mov_b32_e32 v3, s29
+; GCN-HSA-NEXT:    v_mov_b32_e32 v2, s22
+; GCN-HSA-NEXT:    v_mov_b32_e32 v3, s23
 ; GCN-HSA-NEXT:    v_mov_b32_e32 v5, s5
 ; GCN-HSA-NEXT:    s_add_u32 s4, s8, 32
 ; GCN-HSA-NEXT:    flat_store_dwordx4 v[4:5], v[0:3]
 ; GCN-HSA-NEXT:    s_addc_u32 s5, s9, 0
 ; GCN-HSA-NEXT:    v_mov_b32_e32 v4, s4
-; GCN-HSA-NEXT:    v_mov_b32_e32 v0, s26
-; GCN-HSA-NEXT:    v_mov_b32_e32 v1, s27
-; GCN-HSA-NEXT:    v_mov_b32_e32 v2, s20
-; GCN-HSA-NEXT:    v_mov_b32_e32 v3, s21
-; GCN-HSA-NEXT:    v_mov_b32_e32 v5, s5
-; GCN-HSA-NEXT:    s_add_u32 s4, s8, 16
-; GCN-HSA-NEXT:    flat_store_dwordx4 v[4:5], v[0:3]
-; GCN-HSA-NEXT:    s_addc_u32 s5, s9, 0
-; GCN-HSA-NEXT:    v_mov_b32_e32 v4, s4
 ; GCN-HSA-NEXT:    v_mov_b32_e32 v0, s10
 ; GCN-HSA-NEXT:    v_mov_b32_e32 v1, s11
-; GCN-HSA-NEXT:    v_mov_b32_e32 v2, s28
-; GCN-HSA-NEXT:    v_mov_b32_e32 v3, s25
+; GCN-HSA-NEXT:    v_mov_b32_e32 v2, s12
+; GCN-HSA-NEXT:    v_mov_b32_e32 v3, s13
 ; GCN-HSA-NEXT:    v_mov_b32_e32 v5, s5
 ; GCN-HSA-NEXT:    flat_store_dwordx4 v[4:5], v[0:3]
 ; GCN-HSA-NEXT:    v_mov_b32_e32 v4, s8
@@ -8323,161 +8324,158 @@ define amdgpu_kernel void @constant_sextload_v32i16_to_v32i64(ptr addrspace(1) %
 ; GCN-NOHSA-SI-NEXT:    s_load_dwordx4 s[16:19], s[4:5], 0x9
 ; GCN-NOHSA-SI-NEXT:    s_waitcnt lgkmcnt(0)
 ; GCN-NOHSA-SI-NEXT:    s_load_dwordx16 s[0:15], s[18:19], 0x0
-; GCN-NOHSA-SI-NEXT:    s_mov_b32 s53, 0
-; GCN-NOHSA-SI-NEXT:    s_mov_b32 s19, s53
+; GCN-NOHSA-SI-NEXT:    s_mov_b32 s45, 0
+; GCN-NOHSA-SI-NEXT:    s_mov_b32 s19, s45
+; GCN-NOHSA-SI-NEXT:    s_mov_b32 s21, s45
+; GCN-NOHSA-SI-NEXT:    s_mov_b32 s25, s45
 ; GCN-NOHSA-SI-NEXT:    s_waitcnt lgkmcnt(0)
-; GCN-NOHSA-SI-NEXT:    s_mov_b32 s52, s15
+; GCN-NOHSA-SI-NEXT:    s_mov_b32 s44, s15
 ; GCN-NOHSA-SI-NEXT:    s_mov_b32 s18, s13
-; GCN-NOHSA-SI-NEXT:    s_mov_b32 s40, s11
-; GCN-NOHSA-SI-NEXT:    s_mov_b32 s42, s9
-; GCN-NOHSA-SI-NEXT:    s_ashr_i32 s23, s1, 31
-; GCN-NOHSA-SI-NEXT:    s_ashr_i32 s25, s1, 16
-; GCN-NOHSA-SI-NEXT:    s_ashr_i32 s27, s3, 31
-; GCN-NOHSA-SI-NEXT:    s_ashr_i32 s29, s3, 16
-; GCN-NOHSA-SI-NEXT:    s_ashr_i32 s31, s5, 31
-; GCN-NOHSA-SI-NEXT:    s_ashr_i32 s33, s5, 16
-; GCN-NOHSA-SI-NEXT:    s_ashr_i32 s35, s7, 31
-; GCN-NOHSA-SI-NEXT:    s_ashr_i32 s37, s7, 16
-; GCN-NOHSA-SI-NEXT:    s_ashr_i32 s39, s9, 31
-; GCN-NOHSA-SI-NEXT:    s_ashr_i32 s56, s9, 16
-; GCN-NOHSA-SI-NEXT:    s_ashr_i32 s57, s11, 31
-; GCN-NOHSA-SI-NEXT:    s_ashr_i32 s58, s11, 16
-; GCN-NOHSA-SI-NEXT:    s_ashr_i32 s59, s13, 31
-; GCN-NOHSA-SI-NEXT:    s_ashr_i32 s60, s13, 16
+; GCN-NOHSA-SI-NEXT:    s_mov_b32 s20, s11
+; GCN-NOHSA-SI-NEXT:    s_mov_b32 s24, s9
+; GCN-NOHSA-SI-NEXT:    s_mov_b32 s22, s7
+; GCN-NOHSA-SI-NEXT:    s_ashr_i32 s33, s1, 31
+; GCN-NOHSA-SI-NEXT:    s_ashr_i32 s39, s1, 16
+; GCN-NOHSA-SI-NEXT:    s_ashr_i32 s41, s3, 31
+; GCN-NOHSA-SI-NEXT:    s_ashr_i32 s43, s3, 16
+; GCN-NOHSA-SI-NEXT:    s_ashr_i32 s27, s5, 31
+; GCN-NOHSA-SI-NEXT:    s_ashr_i32 s29, s5, 16
+; GCN-NOHSA-SI-NEXT:    s_ashr_i32 s31, s7, 31
+; GCN-NOHSA-SI-NEXT:    s_ashr_i32 s35, s7, 16
+; GCN-NOHSA-SI-NEXT:    s_ashr_i32 s37, s9, 31
+; GCN-NOHSA-SI-NEXT:    s_ashr_i32 s58, s9, 16
+; GCN-NOHSA-SI-NEXT:    s_bfe_i64 s[50:51], s[24:25], 0x100000
+; GCN-NOHSA-SI-NEXT:    s_ashr_i32 s59, s11, 31
+; GCN-NOHSA-SI-NEXT:    s_ashr_i32 s60, s11, 16
+; GCN-NOHSA-SI-NEXT:    s_bfe_i64 s[52:53], s[20:21], 0x100000
 ; GCN-NOHSA-SI-NEXT:    s_bfe_i64 s[54:55], s[18:19], 0x100000
-; GCN-NOHSA-SI-NEXT:    s_ashr_i32 s61, s15, 31
-; GCN-NOHSA-SI-NEXT:    s_ashr_i32 s62, s15, 16
-; GCN-NOHSA-SI-NEXT:    s_lshr_b32 s24, s14, 16
-; GCN-NOHSA-SI-NEXT:    s_lshr_b32 s22, s12, 16
-; GCN-NOHSA-SI-NEXT:    s_lshr_b32 s26, s10, 16
-; GCN-NOHSA-SI-NEXT:    s_lshr_b32 s28, s8, 16
-; GCN-NOHSA-SI-NEXT:    s_mov_b32 s50, s7
-; GCN-NOHSA-SI-NEXT:    s_lshr_b32 s30, s6, 16
-; GCN-NOHSA-SI-NEXT:    s_mov_b32 s44, s5
-; GCN-NOHSA-SI-NEXT:    s_lshr_b32 s34, s4, 16
+; GCN-NOHSA-SI-NEXT:    s_ashr_i32 s61, s13, 31
+; GCN-NOHSA-SI-NEXT:    s_ashr_i32 s62, s13, 16
+; GCN-NOHSA-SI-NEXT:    s_ashr_i32 s63, s15, 31
+; GCN-NOHSA-SI-NEXT:    s_ashr_i32 s64, s15, 16
+; GCN-NOHSA-SI-NEXT:    s_mov_b32 s56, s5
 ; GCN-NOHSA-SI-NEXT:    s_mov_b32 s46, s3
-; GCN-NOHSA-SI-NEXT:    s_lshr_b32 s36, s2, 16
 ; GCN-NOHSA-SI-NEXT:    s_mov_b32 s48, s1
-; GCN-NOHSA-SI-NEXT:    s_lshr_b32 s38, s0, 16
+; GCN-NOHSA-SI-NEXT:    s_lshr_b32 s28, s14, 16
+; GCN-NOHSA-SI-NEXT:    s_lshr_b32 s34, s12, 16
+; GCN-NOHSA-SI-NEXT:    s_lshr_b32 s36, s10, 16
+; GCN-NOHSA-SI-NEXT:    s_lshr_b32 s30, s8, 16
+; GCN-NOHSA-SI-NEXT:    s_lshr_b32 s26, s6, 16
+; GCN-NOHSA-SI-NEXT:    s_lshr_b32 s38, s4, 16
+; GCN-NOHSA-SI-NEXT:    s_lshr_b32 s40, s2, 16
+; GCN-NOHSA-SI-NEXT:    s_lshr_b32 s42, s0, 16
 ; GCN-NOHSA-SI-NEXT:    s_bfe_i64 s[18:19], s[0:1], 0x100000
 ; GCN-NOHSA-SI-NEXT:    s_bfe_i64 s[20:21], s[2:3], 0x100000
 ; GCN-NOHSA-SI-NEXT:    s_bfe_i64 s[4:5], s[4:5], 0x100000
 ; GCN-NOHSA-SI-NEXT:    s_bfe_i64 s[6:7], s[6:7], 0x100000
-; GCN-NOHSA-SI-NEXT:    s_bfe_i64 s[10:11], s[10:11], 0x100000
-; GCN-NOHSA-SI-NEXT:    s_bfe_i64 s[2:3], s[12:13], 0x100000
-; GCN-NOHSA-SI-NEXT:    s_bfe_i64 s[12:13], s[14:15], 0x100000
-; GCN-NOHSA-SI-NEXT:    s_bfe_i64 s[14:15], s[52:53], 0x100000
 ; GCN-NOHSA-SI-NEXT:    s_bfe_i64 s[8:9], s[8:9], 0x100000
+; GCN-NOHSA-SI-NEXT:    s_bfe_i64 s[10:11], s[10:11], 0x100000
+; GCN-NOHSA-SI-NEXT:    s_bfe_i64 s[12:13], s[12:13], 0x100000
+; GCN-NOHSA-SI-NEXT:    s_bfe_i64 s[2:3], s[44:45], 0x100000
+; GCN-NOHSA-SI-NEXT:    s_bfe_i64 s[24:25], s[14:15], 0x100000
 ; GCN-NOHSA-SI-NEXT:    s_mov_b32 s0, s16
 ; GCN-NOHSA-SI-NEXT:    s_mov_b32 s1, s17
-; GCN-NOHSA-SI-NEXT:    s_mov_b32 s41, s53
-; GCN-NOHSA-SI-NEXT:    s_mov_b32 s43, s53
-; GCN-NOHSA-SI-NEXT:    s_mov_b32 s51, s53
-; GCN-NOHSA-SI-NEXT:    s_mov_b32 s45, s53
-; GCN-NOHSA-SI-NEXT:    s_mov_b32 s47, s53
-; GCN-NOHSA-SI-NEXT:    s_mov_b32 s49, s53
-; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v6, s14
-; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v7, s15
-; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v0, s12
-; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v1, s13
-; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v10, s54
-; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v11, s55
-; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v4, s2
-; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v5, s3
+; GCN-NOHSA-SI-NEXT:    s_mov_b32 s23, s45
+; GCN-NOHSA-SI-NEXT:    s_mov_b32 s57, s45
+; GCN-NOHSA-SI-NEXT:    s_mov_b32 s47, s45
+; GCN-NOHSA-SI-NEXT:    s_mov_b32 s49, s45
+; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v0, s2
+; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v1, s3
+; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v4, s54
+; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v5, s55
+; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v8, s52
+; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v9, s53
+; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v12, s50
+; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v13, s51
 ; GCN-NOHSA-SI-NEXT:    s_mov_b32 s3, 0xf000
 ; GCN-NOHSA-SI-NEXT:    s_mov_b32 s2, -1
-; GCN-NOHSA-SI-NEXT:    s_bfe_i64 s[12:13], s[40:41], 0x100000
+; GCN-NOHSA-SI-NEXT:    s_bfe_i64 s[14:15], s[22:23], 0x100000
+; GCN-NOHSA-SI-NEXT:    s_bfe_i64 s[16:17], s[56:57], 0x100000
+; GCN-NOHSA-SI-NEXT:    s_bfe_i64 s[44:45], s[48:49], 0x100000
+; GCN-NOHSA-SI-NEXT:    s_bfe_i64 s[46:47], s[46:47], 0x100000
+; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v16, s14
+; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v17, s15
+; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v2, s64
+; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v3, s63
+; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v6, s62
+; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v7, s61
+; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v10, s60
+; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v11, s59
+; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v14, s58
+; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v15, s37
+; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v18, s35
+; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v19, s31
+; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v20, s16
+; GCN-NOHSA-SI-NEXT:    buffer_store_dwordx4 v[0:3], off, s[0:3], 0 offset:240
+; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v21, s17
+; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v22, s29
+; GCN-NOHSA-SI-NEXT:    buffer_store_dwordx4 v[4:7], off, s[0:3], 0 offset:208
+; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v23, s27
 ; GCN-NOHSA-SI-NEXT:    s_bfe_i64 s[14:15], s[42:43], 0x100000
-; GCN-NOHSA-SI-NEXT:    s_bfe_i64 s[16:17], s[50:51], 0x100000
-; GCN-NOHSA-SI-NEXT:    s_bfe_i64 s[40:41], s[48:49], 0x100000
-; GCN-NOHSA-SI-NEXT:    s_bfe_i64 s[42:43], s[46:47], 0x100000
-; GCN-NOHSA-SI-NEXT:    s_bfe_i64 s[44:45], s[44:45], 0x100000
-; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v14, s12
-; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v15, s13
-; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v8, s62
-; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v9, s61
-; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v12, s60
-; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v13, s59
-; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v16, s58
-; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v17, s57
-; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v18, s10
-; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v20, s14
-; GCN-NOHSA-SI-NEXT:    buffer_store_dwordx4 v[6:9], off, s[0:3], 0 offset:240
-; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v19, s11
-; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v21, s15
-; GCN-NOHSA-SI-NEXT:    buffer_store_dwordx4 v[10:13], off, s[0:3], 0 offset:208
-; GCN-NOHSA-SI-NEXT:    s_waitcnt expcnt(1)
-; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v6, s16
-; GCN-NOHSA-SI-NEXT:    buffer_store_dwordx4 v[14:17], off, s[0:3], 0 offset:176
-; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v22, s56
-; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v23, s39
-; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v7, s17
-; GCN-NOHSA-SI-NEXT:    buffer_store_dwordx4 v[20:23], off, s[0:3], 0 offset:144
-; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v8, s37
-; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v9, s35
-; GCN-NOHSA-SI-NEXT:    buffer_store_dwordx4 v[6:9], off, s[0:3], 0 offset:112
-; GCN-NOHSA-SI-NEXT:    s_waitcnt expcnt(0)
-; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v6, s44
-; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v7, s45
-; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v8, s33
-; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v9, s31
-; GCN-NOHSA-SI-NEXT:    buffer_store_dwordx4 v[6:9], off, s[0:3], 0 offset:80
-; GCN-NOHSA-SI-NEXT:    s_waitcnt expcnt(0)
-; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v6, s42
-; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v7, s43
-; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v8, s29
-; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v9, s27
-; GCN-NOHSA-SI-NEXT:    buffer_store_dwordx4 v[6:9], off, s[0:3], 0 offset:48
+; GCN-NOHSA-SI-NEXT:    s_bfe_i64 s[16:17], s[40:41], 0x100000
+; GCN-NOHSA-SI-NEXT:    s_bfe_i64 s[22:23], s[38:39], 0x100000
+; GCN-NOHSA-SI-NEXT:    s_bfe_i64 s[26:27], s[26:27], 0x100000
+; GCN-NOHSA-SI-NEXT:    s_bfe_i64 s[30:31], s[30:31], 0x100000
+; GCN-NOHSA-SI-NEXT:    s_bfe_i64 s[36:37], s[36:37], 0x100000
+; GCN-NOHSA-SI-NEXT:    s_bfe_i64 s[34:35], s[34:35], 0x100000
+; GCN-NOHSA-SI-NEXT:    s_bfe_i64 s[28:29], s[28:29], 0x100000
+; GCN-NOHSA-SI-NEXT:    buffer_store_dwordx4 v[8:11], off, s[0:3], 0 offset:176
+; GCN-NOHSA-SI-NEXT:    buffer_store_dwordx4 v[12:15], off, s[0:3], 0 offset:144
+; GCN-NOHSA-SI-NEXT:    buffer_store_dwordx4 v[16:19], off, s[0:3], 0 offset:112
+; GCN-NOHSA-SI-NEXT:    buffer_store_dwordx4 v[20:23], off, s[0:3], 0 offset:80
+; GCN-NOHSA-SI-NEXT:    s_waitcnt expcnt(5)
+; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v0, s46
+; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v1, s47
+; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v2, s43
+; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v3, s41
+; GCN-NOHSA-SI-NEXT:    buffer_store_dwordx4 v[0:3], off, s[0:3], 0 offset:48
 ; GCN-NOHSA-SI-NEXT:    s_waitcnt expcnt(0)
-; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v6, s40
-; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v7, s41
-; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v8, s25
-; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v9, s23
-; GCN-NOHSA-SI-NEXT:    buffer_store_dwordx4 v[6:9], off, s[0:3], 0 offset:16
+; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v0, s44
+; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v1, s45
+; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v2, s39
+; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v3, s33
+; GCN-NOHSA-SI-NEXT:    buffer_store_dwordx4 v[0:3], off, s[0:3], 0 offset:16
 ; GCN-NOHSA-SI-NEXT:    s_waitcnt expcnt(0)
-; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v8, s8
-; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v9, s9
-; GCN-NOHSA-SI-NEXT:    s_bfe_i64 s[8:9], s[38:39], 0x100000
-; GCN-NOHSA-SI-NEXT:    s_bfe_i64 s[10:11], s[36:37], 0x100000
-; GCN-NOHSA-SI-NEXT:    s_bfe_i64 s[12:13], s[34:35], 0x100000
-; GCN-NOHSA-SI-NEXT:    s_bfe_i64 s[14:15], s[30:31], 0x100000
-; GCN-NOHSA-SI-NEXT:    s_bfe_i64 s[16:17], s[28:29], 0x100000
-; GCN-NOHSA-SI-NEXT:    s_bfe_i64 s[26:27], s[26:27], 0x100000
-; GCN-NOHSA-SI-NEXT:    s_bfe_i64 s[24:25], s[24:25], 0x100000
-; GCN-NOHSA-SI-NEXT:    s_bfe_i64 s[22:23], s[22:23], 0x100000
-; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v12, s6
-; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v13, s7
-; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v16, s4
-; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v17, s5
-; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v22, s20
-; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v23, s21
-; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v2, s24
-; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v3, s25
+; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v0, s24
+; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v1, s25
+; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v4, s12
+; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v5, s13
+; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v8, s10
+; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v9, s11
+; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v12, s8
+; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v13, s9
+; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v16, s6
+; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v17, s7
+; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v20, s4
+; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v21, s5
+; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v24, s20
+; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v25, s21
+; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v2, s28
+; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v3, s29
 ; GCN-NOHSA-SI-NEXT:    buffer_store_dwordx4 v[0:3], off, s[0:3], 0 offset:224
 ; GCN-NOHSA-SI-NEXT:    s_waitcnt expcnt(0)
 ; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v0, s18
 ; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v1, s19
-; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v6, s22
-; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v7, s23
+; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v6, s34
+; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v7, s35
 ; GCN-NOHSA-SI-NEXT:    buffer_store_dwordx4 v[4:7], off, s[0:3], 0 offset:192
-; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v20, s26
-; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v21, s27
-; GCN-NOHSA-SI-NEXT:    buffer_store_dwordx4 v[18:21], off, s[0:3], 0 offset:160
-; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v10, s16
-; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v11, s17
-; GCN-NOHSA-SI-NEXT:    buffer_store_dwordx4 v[8:11], off, s[0:3], 0 offset:128
-; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v14, s14
-; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v15, s15
-; GCN-NOHSA-SI-NEXT:    buffer_store_dwordx4 v[12:15], off, s[0:3], 0 offset:96
-; GCN-NOHSA-SI-NEXT:    s_waitcnt expcnt(2)
-; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v18, s12
-; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v19, s13
-; GCN-NOHSA-SI-NEXT:    buffer_store_dwordx4 v[16:19], off, s[0:3], 0 offset:64
-; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v24, s10
-; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v25, s11
-; GCN-NOHSA-SI-NEXT:    buffer_store_dwordx4 v[22:25], off, s[0:3], 0 offset:32
-; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v2, s8
-; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v3, s9
+; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v10, s36
+; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v11, s37
+; GCN-NOHSA-SI-NEXT:    buffer_store_dwordx4 v[8:11], off, s[0:3], 0 offset:160
+; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v14, s30
+; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v15, s31
+; GCN-NOHSA-SI-NEXT:    buffer_store_dwordx4 v[12:15], off, s[0:3], 0 offset:128
+; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v18, s26
+; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v19, s27
+; GCN-NOHSA-SI-NEXT:    buffer_store_dwordx4 v[16:19], off, s[0:3], 0 offset:96
+; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v22, s22
+; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v23, s23
+; GCN-NOHSA-SI-NEXT:    buffer_store_dwordx4 v[20:23], off, s[0:3], 0 offset:64
+; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v26, s16
+; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v27, s17
+; GCN-NOHSA-SI-NEXT:    buffer_store_dwordx4 v[24:27], off, s[0:3], 0 offset:32
+; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v2, s14
+; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v3, s15
 ; GCN-NOHSA-SI-NEXT:    buffer_store_dwordx4 v[0:3], off, s[0:3], 0
 ; GCN-NOHSA-SI-NEXT:    s_endpgm
 ;
@@ -8487,175 +8485,185 @@ define amdgpu_kernel void @constant_sextload_v32i16_to_v32i64(ptr addrspace(1) %
 ; GCN-HSA-NEXT:    s_load_dwordx4 s[16:19], s[8:9], 0x0
 ; GCN-HSA-NEXT:    s_mov_b32 flat_scratch_lo, s13
 ; GCN-HSA-NEXT:    s_lshr_b32 flat_scratch_hi, s12, 8
-; GCN-HSA-NEXT:    s_mov_b32 s25, 0
-; GCN-HSA-NEXT:    s_mov_b32 s51, s25
+; GCN-HSA-NEXT:    s_mov_b32 s31, 0
+; GCN-HSA-NEXT:    s_mov_b32 s45, s31
 ; GCN-HSA-NEXT:    s_waitcnt lgkmcnt(0)
 ; GCN-HSA-NEXT:    s_load_dwordx16 s[0:15], s[18:19], 0x0
-; GCN-HSA-NEXT:    s_mov_b32 s55, s25
-; GCN-HSA-NEXT:    s_mov_b32 s31, s25
-; GCN-HSA-NEXT:    s_mov_b32 s57, s25
-; GCN-HSA-NEXT:    s_mov_b32 s61, s25
+; GCN-HSA-NEXT:    s_mov_b32 s47, s31
+; GCN-HSA-NEXT:    s_mov_b32 s49, s31
+; GCN-HSA-NEXT:    s_mov_b32 s51, s31
+; GCN-HSA-NEXT:    s_mov_b32 s53, s31
 ; GCN-HSA-NEXT:    s_waitcnt lgkmcnt(0)
-; GCN-HSA-NEXT:    s_mov_b32 s24, s15
-; GCN-HSA-NEXT:    s_ashr_i32 s39, s3, 31
-; GCN-HSA-NEXT:    s_ashr_i32 s40, s3, 16
-; GCN-HSA-NEXT:    s_ashr_i32 s59, s11, 16
-; GCN-HSA-NEXT:    s_ashr_i32 s63, s13, 31
-; GCN-HSA-NEXT:    s_ashr_i32 s67, s13, 16
-; GCN-HSA-NEXT:    s_ashr_i32 s71, s15, 31
-; GCN-HSA-NEXT:    s_lshr_b32 s48, s14, 16
-; GCN-HSA-NEXT:    s_mov_b32 s50, s13
-; GCN-HSA-NEXT:    s_lshr_b32 s52, s12, 16
-; GCN-HSA-NEXT:    s_mov_b32 s54, s11
-; GCN-HSA-NEXT:    s_lshr_b32 s34, s10, 16
-; GCN-HSA-NEXT:    s_mov_b32 s30, s9
-; GCN-HSA-NEXT:    s_lshr_b32 s28, s8, 16
-; GCN-HSA-NEXT:    s_mov_b32 s56, s7
-; GCN-HSA-NEXT:    s_lshr_b32 s58, s6, 16
-; GCN-HSA-NEXT:    s_mov_b32 s60, s5
+; GCN-HSA-NEXT:    s_mov_b32 s30, s15
+; GCN-HSA-NEXT:    s_ashr_i32 s43, s5, 31
+; GCN-HSA-NEXT:    s_ashr_i32 s57, s5, 16
+; GCN-HSA-NEXT:    s_ashr_i32 s59, s7, 31
+; GCN-HSA-NEXT:    s_ashr_i32 s61, s7, 16
+; GCN-HSA-NEXT:    s_ashr_i32 s63, s9, 31
+; GCN-HSA-NEXT:    s_ashr_i32 s65, s9, 16
+; GCN-HSA-NEXT:    s_ashr_i32 s67, s11, 31
+; GCN-HSA-NEXT:    s_mov_b32 s44, s13
+; GCN-HSA-NEXT:    s_mov_b32 s46, s11
+; GCN-HSA-NEXT:    s_mov_b32 s48, s9
+; GCN-HSA-NEXT:    s_mov_b32 s50, s7
+; GCN-HSA-NEXT:    s_mov_b32 s52, s5
+; GCN-HSA-NEXT:    s_mov_b32 s38, s3
+; GCN-HSA-NEXT:    s_mov_b32 s36, s1
+; GCN-HSA-NEXT:    s_lshr_b32 s34, s14, 16
+; GCN-HSA-NEXT:    s_lshr_b32 s54, s12, 16
+; GCN-HSA-NEXT:    s_lshr_b32 s56, s10, 16
+; GCN-HSA-NEXT:    s_lshr_b32 s58, s8, 16
+; GCN-HSA-NEXT:    s_lshr_b32 s60, s6, 16
 ; GCN-HSA-NEXT:    s_lshr_b32 s62, s4, 16
-; GCN-HSA-NEXT:    s_mov_b32 s64, s3
-; GCN-HSA-NEXT:    s_lshr_b32 s66, s2, 16
-; GCN-HSA-NEXT:    s_mov_b32 s68, s1
-; GCN-HSA-NEXT:    s_lshr_b32 s70, s0, 16
-; GCN-HSA-NEXT:    s_bfe_i64 s[18:19], s[2:3], 0x100000
-; GCN-HSA-NEXT:    s_mov_b32 s65, s25
-; GCN-HSA-NEXT:    s_mov_b32 s69, s25
-; GCN-HSA-NEXT:    s_bfe_i64 s[2:3], s[24:25], 0x100000
+; GCN-HSA-NEXT:    s_lshr_b32 s64, s2, 16
+; GCN-HSA-NEXT:    s_lshr_b32 s66, s0, 16
+; GCN-HSA-NEXT:    s_bfe_i64 s[18:19], s[4:5], 0x100000
+; GCN-HSA-NEXT:    s_mov_b32 s39, s31
+; GCN-HSA-NEXT:    s_mov_b32 s37, s31
+; GCN-HSA-NEXT:    s_bfe_i64 s[4:5], s[30:31], 0x100000
 ; GCN-HSA-NEXT:    s_ashr_i32 s33, s1, 31
-; GCN-HSA-NEXT:    s_ashr_i32 s38, s1, 16
-; GCN-HSA-NEXT:    s_ashr_i32 s41, s5, 31
-; GCN-HSA-NEXT:    s_ashr_i32 s42, s5, 16
-; GCN-HSA-NEXT:    s_ashr_i32 s43, s7, 31
-; GCN-HSA-NEXT:    s_ashr_i32 s44, s7, 16
-; GCN-HSA-NEXT:    s_ashr_i32 s45, s9, 31
-; GCN-HSA-NEXT:    s_ashr_i32 s46, s9, 16
-; GCN-HSA-NEXT:    s_ashr_i32 s47, s11, 31
-; GCN-HSA-NEXT:    s_ashr_i32 s76, s15, 16
+; GCN-HSA-NEXT:    s_ashr_i32 s40, s1, 16
+; GCN-HSA-NEXT:    s_ashr_i32 s41, s3, 31
+; GCN-HSA-NEXT:    s_ashr_i32 s42, s3, 16
+; GCN-HSA-NEXT:    s_ashr_i32 s68, s11, 16
+; GCN-HSA-NEXT:    s_ashr_i32 s69, s13, 31
+; GCN-HSA-NEXT:    s_ashr_i32 s70, s13, 16
+; GCN-HSA-NEXT:    s_ashr_i32 s71, s15, 31
+; GCN-HSA-NEXT:    s_ashr_i32 s72, s15, 16
 ; GCN-HSA-NEXT:    s_bfe_i64 s[0:1], s[0:1], 0x100000
-; GCN-HSA-NEXT:    s_bfe_i64 s[20:21], s[4:5], 0x100000
-; GCN-HSA-NEXT:    s_bfe_i64 s[22:23], s[6:7], 0x100000
-; GCN-HSA-NEXT:    s_bfe_i64 s[26:27], s[8:9], 0x100000
-; GCN-HSA-NEXT:    s_bfe_i64 s[36:37], s[10:11], 0x100000
-; GCN-HSA-NEXT:    s_bfe_i64 s[72:73], s[12:13], 0x100000
-; GCN-HSA-NEXT:    s_bfe_i64 s[74:75], s[14:15], 0x100000
-; GCN-HSA-NEXT:    v_mov_b32_e32 v0, s2
-; GCN-HSA-NEXT:    v_mov_b32_e32 v1, s3
-; GCN-HSA-NEXT:    s_bfe_i64 s[2:3], s[70:71], 0x100000
-; GCN-HSA-NEXT:    s_bfe_i64 s[4:5], s[68:69], 0x100000
-; GCN-HSA-NEXT:    s_bfe_i64 s[6:7], s[66:67], 0x100000
-; GCN-HSA-NEXT:    s_bfe_i64 s[8:9], s[64:65], 0x100000
-; GCN-HSA-NEXT:    s_bfe_i64 s[10:11], s[62:63], 0x100000
-; GCN-HSA-NEXT:    s_bfe_i64 s[12:13], s[60:61], 0x100000
-; GCN-HSA-NEXT:    s_bfe_i64 s[14:15], s[58:59], 0x100000
-; GCN-HSA-NEXT:    s_bfe_i64 s[24:25], s[56:57], 0x100000
-; GCN-HSA-NEXT:    s_bfe_i64 s[28:29], s[28:29], 0x100000
-; GCN-HSA-NEXT:    s_bfe_i64 s[30:31], s[30:31], 0x100000
+; GCN-HSA-NEXT:    s_bfe_i64 s[2:3], s[2:3], 0x100000
+; GCN-HSA-NEXT:    s_bfe_i64 s[20:21], s[6:7], 0x100000
+; GCN-HSA-NEXT:    s_bfe_i64 s[22:23], s[8:9], 0x100000
+; GCN-HSA-NEXT:    s_bfe_i64 s[24:25], s[10:11], 0x100000
+; GCN-HSA-NEXT:    s_bfe_i64 s[26:27], s[12:13], 0x100000
+; GCN-HSA-NEXT:    s_bfe_i64 s[28:29], s[14:15], 0x100000
+; GCN-HSA-NEXT:    v_mov_b32_e32 v0, s4
+; GCN-HSA-NEXT:    v_mov_b32_e32 v1, s5
+; GCN-HSA-NEXT:    s_bfe_i64 s[4:5], s[66:67], 0x100000
+; GCN-HSA-NEXT:    s_bfe_i64 s[6:7], s[64:65], 0x100000
+; GCN-HSA-NEXT:    s_bfe_i64 s[8:9], s[62:63], 0x100000
+; GCN-HSA-NEXT:    s_bfe_i64 s[10:11], s[60:61], 0x100000
+; GCN-HSA-NEXT:    s_bfe_i64 s[12:13], s[58:59], 0x100000
+; GCN-HSA-NEXT:    s_bfe_i64 s[14:15], s[56:57], 0x100000
+; GCN-HSA-NEXT:    s_bfe_i64 s[30:31], s[54:55], 0x100000
 ; GCN-HSA-NEXT:    s_bfe_i64 s[34:35], s[34:35], 0x100000
-; GCN-HSA-NEXT:    s_bfe_i64 s[54:55], s[54:55], 0x100000
+; GCN-HSA-NEXT:    s_bfe_i64 s[36:37], s[36:37], 0x100000
+; GCN-HSA-NEXT:    s_bfe_i64 s[38:39], s[38:39], 0x100000
 ; GCN-HSA-NEXT:    s_bfe_i64 s[52:53], s[52:53], 0x100000
 ; GCN-HSA-NEXT:    s_bfe_i64 s[50:51], s[50:51], 0x100000
 ; GCN-HSA-NEXT:    s_bfe_i64 s[48:49], s[48:49], 0x100000
-; GCN-HSA-NEXT:    s_add_u32 s56, s16, 0xf0
-; GCN-HSA-NEXT:    s_addc_u32 s57, s17, 0
-; GCN-HSA-NEXT:    v_mov_b32_e32 v6, s48
-; GCN-HSA-NEXT:    s_add_u32 s48, s16, 0xe0
-; GCN-HSA-NEXT:    v_mov_b32_e32 v7, s49
-; GCN-HSA-NEXT:    s_addc_u32 s49, s17, 0
-; GCN-HSA-NEXT:    v_mov_b32_e32 v24, s48
-; GCN-HSA-NEXT:    v_mov_b32_e32 v25, s49
-; GCN-HSA-NEXT:    s_add_u32 s48, s16, 0xd0
-; GCN-HSA-NEXT:    s_addc_u32 s49, s17, 0
-; GCN-HSA-NEXT:    v_mov_b32_e32 v26, s48
-; GCN-HSA-NEXT:    v_mov_b32_e32 v27, s49
-; GCN-HSA-NEXT:    s_add_u32 s48, s16, 0xc0
-; GCN-HSA-NEXT:    s_addc_u32 s49, s17, 0
-; GCN-HSA-NEXT:    v_mov_b32_e32 v18, s56
-; GCN-HSA-NEXT:    v_mov_b32_e32 v20, s36
-; GCN-HSA-NEXT:    s_add_u32 s36, s16, 0xb0
-; GCN-HSA-NEXT:    v_mov_b32_e32 v19, s57
-; GCN-HSA-NEXT:    v_mov_b32_e32 v21, s37
-; GCN-HSA-NEXT:    v_mov_b32_e32 v2, s76
+; GCN-HSA-NEXT:    s_bfe_i64 s[46:47], s[46:47], 0x100000
+; GCN-HSA-NEXT:    s_bfe_i64 s[44:45], s[44:45], 0x100000
+; GCN-HSA-NEXT:    s_add_u32 s54, s16, 0xf0
+; GCN-HSA-NEXT:    s_addc_u32 s55, s17, 0
+; GCN-HSA-NEXT:    v_mov_b32_e32 v4, s44
+; GCN-HSA-NEXT:    s_add_u32 s44, s16, 0xd0
+; GCN-HSA-NEXT:    v_mov_b32_e32 v5, s45
+; GCN-HSA-NEXT:    s_addc_u32 s45, s17, 0
+; GCN-HSA-NEXT:    v_mov_b32_e32 v23, s44
+; GCN-HSA-NEXT:    v_mov_b32_e32 v24, s45
+; GCN-HSA-NEXT:    s_add_u32 s44, s16, 0xb0
+; GCN-HSA-NEXT:    s_addc_u32 s45, s17, 0
+; GCN-HSA-NEXT:    v_mov_b32_e32 v25, s44
+; GCN-HSA-NEXT:    v_mov_b32_e32 v26, s45
+; GCN-HSA-NEXT:    s_add_u32 s44, s16, 0x90
+; GCN-HSA-NEXT:    s_addc_u32 s45, s17, 0
+; GCN-HSA-NEXT:    v_mov_b32_e32 v27, s44
+; GCN-HSA-NEXT:    v_mov_b32_e32 v18, s54
+; GCN-HSA-NEXT:    v_mov_b32_e32 v28, s45
+; GCN-HSA-NEXT:    s_add_u32 s44, s16, 0x70
+; GCN-HSA-NEXT:    v_mov_b32_e32 v19, s55
+; GCN-HSA-NEXT:    v_mov_b32_e32 v2, s72
 ; GCN-HSA-NEXT:    v_mov_b32_e32 v3, s71
-; GCN-HSA-NEXT:    s_addc_u32 s37, s17, 0
+; GCN-HSA-NEXT:    s_addc_u32 s45, s17, 0
 ; GCN-HSA-NEXT:    flat_store_dwordx4 v[18:19], v[0:3]
-; GCN-HSA-NEXT:    v_mov_b32_e32 v4, s74
-; GCN-HSA-NEXT:    v_mov_b32_e32 v0, s30
-; GCN-HSA-NEXT:    s_add_u32 s30, s16, 0xa0
-; GCN-HSA-NEXT:    v_mov_b32_e32 v5, s75
-; GCN-HSA-NEXT:    v_mov_b32_e32 v1, s31
-; GCN-HSA-NEXT:    s_addc_u32 s31, s17, 0
-; GCN-HSA-NEXT:    flat_store_dwordx4 v[24:25], v[4:7]
-; GCN-HSA-NEXT:    v_mov_b32_e32 v8, s50
-; GCN-HSA-NEXT:    v_mov_b32_e32 v4, s26
-; GCN-HSA-NEXT:    s_add_u32 s26, s16, 0x90
-; GCN-HSA-NEXT:    v_mov_b32_e32 v5, s27
-; GCN-HSA-NEXT:    s_addc_u32 s27, s17, 0
-; GCN-HSA-NEXT:    v_mov_b32_e32 v24, s26
-; GCN-HSA-NEXT:    v_mov_b32_e32 v25, s27
-; GCN-HSA-NEXT:    s_add_u32 s26, s16, 0x80
-; GCN-HSA-NEXT:    v_mov_b32_e32 v9, s51
-; GCN-HSA-NEXT:    v_mov_b32_e32 v10, s67
-; GCN-HSA-NEXT:    v_mov_b32_e32 v11, s63
+; GCN-HSA-NEXT:    v_mov_b32_e32 v6, s70
+; GCN-HSA-NEXT:    v_mov_b32_e32 v0, s38
+; GCN-HSA-NEXT:    s_add_u32 s38, s16, 0x50
+; GCN-HSA-NEXT:    v_mov_b32_e32 v7, s69
+; GCN-HSA-NEXT:    v_mov_b32_e32 v1, s39
+; GCN-HSA-NEXT:    s_addc_u32 s39, s17, 0
+; GCN-HSA-NEXT:    v_mov_b32_e32 v8, s46
+; GCN-HSA-NEXT:    v_mov_b32_e32 v9, s47
+; GCN-HSA-NEXT:    v_mov_b32_e32 v10, s68
+; GCN-HSA-NEXT:    v_mov_b32_e32 v11, s67
+; GCN-HSA-NEXT:    flat_store_dwordx4 v[23:24], v[4:7]
+; GCN-HSA-NEXT:    flat_store_dwordx4 v[25:26], v[8:11]
+; GCN-HSA-NEXT:    v_mov_b32_e32 v4, s36
+; GCN-HSA-NEXT:    s_add_u32 s36, s16, 48
+; GCN-HSA-NEXT:    v_mov_b32_e32 v5, s37
+; GCN-HSA-NEXT:    s_addc_u32 s37, s17, 0
+; GCN-HSA-NEXT:    v_mov_b32_e32 v24, s36
+; GCN-HSA-NEXT:    v_mov_b32_e32 v25, s37
+; GCN-HSA-NEXT:    s_add_u32 s36, s16, 16
+; GCN-HSA-NEXT:    s_addc_u32 s37, s17, 0
+; GCN-HSA-NEXT:    v_mov_b32_e32 v8, s28
+; GCN-HSA-NEXT:    s_add_u32 s28, s16, 0xe0
+; GCN-HSA-NEXT:    v_mov_b32_e32 v12, s48
+; GCN-HSA-NEXT:    v_mov_b32_e32 v13, s49
+; GCN-HSA-NEXT:    v_mov_b32_e32 v14, s65
+; GCN-HSA-NEXT:    v_mov_b32_e32 v15, s63
+; GCN-HSA-NEXT:    v_mov_b32_e32 v29, s44
+; GCN-HSA-NEXT:    v_mov_b32_e32 v9, s29
+; GCN-HSA-NEXT:    s_addc_u32 s29, s17, 0
+; GCN-HSA-NEXT:    v_mov_b32_e32 v16, s50
+; GCN-HSA-NEXT:    v_mov_b32_e32 v17, s51
+; GCN-HSA-NEXT:    v_mov_b32_e32 v30, s45
+; GCN-HSA-NEXT:    v_mov_b32_e32 v18, s61
+; GCN-HSA-NEXT:    v_mov_b32_e32 v19, s59
+; GCN-HSA-NEXT:    v_mov_b32_e32 v10, s38
+; GCN-HSA-NEXT:    flat_store_dwordx4 v[27:28], v[12:15]
+; GCN-HSA-NEXT:    v_mov_b32_e32 v20, s52
+; GCN-HSA-NEXT:    v_mov_b32_e32 v12, s26
+; GCN-HSA-NEXT:    s_add_u32 s26, s16, 0xc0
+; GCN-HSA-NEXT:    v_mov_b32_e32 v21, s53
+; GCN-HSA-NEXT:    v_mov_b32_e32 v22, s57
+; GCN-HSA-NEXT:    v_mov_b32_e32 v23, s43
+; GCN-HSA-NEXT:    v_mov_b32_e32 v11, s39
+; GCN-HSA-NEXT:    v_mov_b32_e32 v2, s42
+; GCN-HSA-NEXT:    v_mov_b32_e32 v3, s41
+; GCN-HSA-NEXT:    v_mov_b32_e32 v26, s36
+; GCN-HSA-NEXT:    flat_store_dwordx4 v[29:30], v[16:19]
+; GCN-HSA-NEXT:    v_mov_b32_e32 v13, s27
+; GCN-HSA-NEXT:    v_mov_b32_e32 v16, s28
 ; GCN-HSA-NEXT:    s_addc_u32 s27, s17, 0
-; GCN-HSA-NEXT:    v_mov_b32_e32 v28, s48
-; GCN-HSA-NEXT:    flat_store_dwordx4 v[26:27], v[8:11]
-; GCN-HSA-NEXT:    v_mov_b32_e32 v12, s72
-; GCN-HSA-NEXT:    v_mov_b32_e32 v8, s24
-; GCN-HSA-NEXT:    s_add_u32 s24, s16, 0x70
-; GCN-HSA-NEXT:    v_mov_b32_e32 v13, s73
-; GCN-HSA-NEXT:    v_mov_b32_e32 v14, s52
-; GCN-HSA-NEXT:    v_mov_b32_e32 v15, s53
-; GCN-HSA-NEXT:    v_mov_b32_e32 v29, s49
-; GCN-HSA-NEXT:    v_mov_b32_e32 v9, s25
-; GCN-HSA-NEXT:    s_addc_u32 s25, s17, 0
-; GCN-HSA-NEXT:    v_mov_b32_e32 v30, s36
-; GCN-HSA-NEXT:    flat_store_dwordx4 v[28:29], v[12:15]
-; GCN-HSA-NEXT:    v_mov_b32_e32 v16, s54
-; GCN-HSA-NEXT:    v_mov_b32_e32 v14, s14
-; GCN-HSA-NEXT:    s_add_u32 s14, s16, 0x60
-; GCN-HSA-NEXT:    v_mov_b32_e32 v17, s55
-; GCN-HSA-NEXT:    v_mov_b32_e32 v31, s37
-; GCN-HSA-NEXT:    v_mov_b32_e32 v18, s59
-; GCN-HSA-NEXT:    v_mov_b32_e32 v19, s47
-; GCN-HSA-NEXT:    v_mov_b32_e32 v10, s30
-; GCN-HSA-NEXT:    v_mov_b32_e32 v15, s15
-; GCN-HSA-NEXT:    s_addc_u32 s15, s17, 0
-; GCN-HSA-NEXT:    v_mov_b32_e32 v22, s34
-; GCN-HSA-NEXT:    v_mov_b32_e32 v23, s35
-; GCN-HSA-NEXT:    v_mov_b32_e32 v11, s31
-; GCN-HSA-NEXT:    v_mov_b32_e32 v2, s46
-; GCN-HSA-NEXT:    v_mov_b32_e32 v3, s45
-; GCN-HSA-NEXT:    v_mov_b32_e32 v26, s26
-; GCN-HSA-NEXT:    flat_store_dwordx4 v[30:31], v[16:19]
-; GCN-HSA-NEXT:    v_mov_b32_e32 v27, s27
-; GCN-HSA-NEXT:    v_mov_b32_e32 v16, s24
-; GCN-HSA-NEXT:    v_mov_b32_e32 v19, s15
-; GCN-HSA-NEXT:    v_mov_b32_e32 v6, s28
-; GCN-HSA-NEXT:    v_mov_b32_e32 v7, s29
+; GCN-HSA-NEXT:    v_mov_b32_e32 v18, s26
+; GCN-HSA-NEXT:    v_mov_b32_e32 v27, s37
+; GCN-HSA-NEXT:    v_mov_b32_e32 v6, s40
+; GCN-HSA-NEXT:    v_mov_b32_e32 v7, s33
 ; GCN-HSA-NEXT:    flat_store_dwordx4 v[10:11], v[20:23]
-; GCN-HSA-NEXT:    v_mov_b32_e32 v12, s22
-; GCN-HSA-NEXT:    v_mov_b32_e32 v10, s44
-; GCN-HSA-NEXT:    v_mov_b32_e32 v11, s43
-; GCN-HSA-NEXT:    v_mov_b32_e32 v13, s23
-; GCN-HSA-NEXT:    v_mov_b32_e32 v17, s25
-; GCN-HSA-NEXT:    v_mov_b32_e32 v18, s14
+; GCN-HSA-NEXT:    v_mov_b32_e32 v10, s34
+; GCN-HSA-NEXT:    v_mov_b32_e32 v11, s35
+; GCN-HSA-NEXT:    v_mov_b32_e32 v17, s29
+; GCN-HSA-NEXT:    v_mov_b32_e32 v14, s30
+; GCN-HSA-NEXT:    v_mov_b32_e32 v15, s31
+; GCN-HSA-NEXT:    v_mov_b32_e32 v19, s27
 ; GCN-HSA-NEXT:    flat_store_dwordx4 v[24:25], v[0:3]
 ; GCN-HSA-NEXT:    flat_store_dwordx4 v[26:27], v[4:7]
 ; GCN-HSA-NEXT:    flat_store_dwordx4 v[16:17], v[8:11]
 ; GCN-HSA-NEXT:    flat_store_dwordx4 v[18:19], v[12:15]
-; GCN-HSA-NEXT:    v_mov_b32_e32 v0, s12
-; GCN-HSA-NEXT:    s_add_u32 s12, s16, 0x50
-; GCN-HSA-NEXT:    v_mov_b32_e32 v1, s13
+; GCN-HSA-NEXT:    v_mov_b32_e32 v2, s14
+; GCN-HSA-NEXT:    s_add_u32 s14, s16, 0xa0
+; GCN-HSA-NEXT:    v_mov_b32_e32 v3, s15
+; GCN-HSA-NEXT:    s_addc_u32 s15, s17, 0
+; GCN-HSA-NEXT:    v_mov_b32_e32 v4, s14
+; GCN-HSA-NEXT:    v_mov_b32_e32 v0, s24
+; GCN-HSA-NEXT:    v_mov_b32_e32 v1, s25
+; GCN-HSA-NEXT:    v_mov_b32_e32 v5, s15
+; GCN-HSA-NEXT:    flat_store_dwordx4 v[4:5], v[0:3]
+; GCN-HSA-NEXT:    s_nop 0
+; GCN-HSA-NEXT:    v_mov_b32_e32 v2, s12
+; GCN-HSA-NEXT:    s_add_u32 s12, s16, 0x80
+; GCN-HSA-NEXT:    v_mov_b32_e32 v3, s13
 ; GCN-HSA-NEXT:    s_addc_u32 s13, s17, 0
 ; GCN-HSA-NEXT:    v_mov_b32_e32 v4, s12
-; GCN-HSA-NEXT:    v_mov_b32_e32 v2, s42
-; GCN-HSA-NEXT:    v_mov_b32_e32 v3, s41
+; GCN-HSA-NEXT:    v_mov_b32_e32 v0, s22
+; GCN-HSA-NEXT:    v_mov_b32_e32 v1, s23
 ; GCN-HSA-NEXT:    v_mov_b32_e32 v5, s13
 ; GCN-HSA-NEXT:    flat_store_dwordx4 v[4:5], v[0:3]
 ; GCN-HSA-NEXT:    s_nop 0
 ; GCN-HSA-NEXT:    v_mov_b32_e32 v2, s10
-; GCN-HSA-NEXT:    s_add_u32 s10, s16, 64
+; GCN-HSA-NEXT:    s_add_u32 s10, s16, 0x60
 ; GCN-HSA-NEXT:    v_mov_b32_e32 v3, s11
 ; GCN-HSA-NEXT:    s_addc_u32 s11, s17, 0
 ; GCN-HSA-NEXT:    v_mov_b32_e32 v4, s10
@@ -8664,40 +8672,30 @@ define amdgpu_kernel void @constant_sextload_v32i16_to_v32i64(ptr addrspace(1) %
 ; GCN-HSA-NEXT:    v_mov_b32_e32 v5, s11
 ; GCN-HSA-NEXT:    flat_store_dwordx4 v[4:5], v[0:3]
 ; GCN-HSA-NEXT:    s_nop 0
-; GCN-HSA-NEXT:    v_mov_b32_e32 v0, s8
-; GCN-HSA-NEXT:    s_add_u32 s8, s16, 48
-; GCN-HSA-NEXT:    v_mov_b32_e32 v1, s9
+; GCN-HSA-NEXT:    v_mov_b32_e32 v2, s8
+; GCN-HSA-NEXT:    s_add_u32 s8, s16, 64
+; GCN-HSA-NEXT:    v_mov_b32_e32 v3, s9
 ; GCN-HSA-NEXT:    s_addc_u32 s9, s17, 0
 ; GCN-HSA-NEXT:    v_mov_b32_e32 v4, s8
-; GCN-HSA-NEXT:    v_mov_b32_e32 v2, s40
-; GCN-HSA-NEXT:    v_mov_b32_e32 v3, s39
+; GCN-HSA-NEXT:    v_mov_b32_e32 v0, s18
+; GCN-HSA-NEXT:    v_mov_b32_e32 v1, s19
 ; GCN-HSA-NEXT:    v_mov_b32_e32 v5, s9
 ; GCN-HSA-NEXT:    flat_store_dwordx4 v[4:5], v[0:3]
 ; GCN-HSA-NEXT:    s_nop 0
+; GCN-HSA-NEXT:    v_mov_b32_e32 v0, s2
+; GCN-HSA-NEXT:    s_add_u32 s2, s16, 32
+; GCN-HSA-NEXT:    v_mov_b32_e32 v1, s3
+; GCN-HSA-NEXT:    s_addc_u32 s3, s17, 0
+; GCN-HSA-NEXT:    v_mov_b32_e32 v5, s3
 ; GCN-HSA-NEXT:    v_mov_b32_e32 v2, s6
-; GCN-HSA-NEXT:    s_add_u32 s6, s16, 32
 ; GCN-HSA-NEXT:    v_mov_b32_e32 v3, s7
-; GCN-HSA-NEXT:    s_addc_u32 s7, s17, 0
-; GCN-HSA-NEXT:    v_mov_b32_e32 v4, s6
-; GCN-HSA-NEXT:    v_mov_b32_e32 v0, s18
-; GCN-HSA-NEXT:    v_mov_b32_e32 v1, s19
-; GCN-HSA-NEXT:    v_mov_b32_e32 v5, s7
-; GCN-HSA-NEXT:    flat_store_dwordx4 v[4:5], v[0:3]
-; GCN-HSA-NEXT:    s_nop 0
-; GCN-HSA-NEXT:    v_mov_b32_e32 v0, s4
-; GCN-HSA-NEXT:    s_add_u32 s4, s16, 16
-; GCN-HSA-NEXT:    v_mov_b32_e32 v1, s5
-; GCN-HSA-NEXT:    s_addc_u32 s5, s17, 0
-; GCN-HSA-NEXT:    v_mov_b32_e32 v4, s4
-; GCN-HSA-NEXT:    v_mov_b32_e32 v2, s38
-; GCN-HSA-NEXT:    v_mov_b32_e32 v3, s33
-; GCN-HSA-NEXT:    v_mov_b32_e32 v5, s5
+; GCN-HSA-NEXT:    v_mov_b32_e32 v4, s2
 ; GCN-HSA-NEXT:    flat_store_dwordx4 v[4:5], v[0:3]
 ; GCN-HSA-NEXT:    v_mov_b32_e32 v4, s16
 ; GCN-HSA-NEXT:    v_mov_b32_e32 v0, s0
 ; GCN-HSA-NEXT:    v_mov_b32_e32 v1, s1
-; GCN-HSA-NEXT:    v_mov_b32_e32 v2, s2
-; GCN-HSA-NEXT:    v_mov_b32_e32 v3, s3
+; GCN-HSA-NEXT:    v_mov_b32_e32 v2, s4
+; GCN-HSA-NEXT:    v_mov_b32_e32 v3, s5
 ; GCN-HSA-NEXT:    v_mov_b32_e32 v5, s17
 ; GCN-HSA-NEXT:    flat_store_dwordx4 v[4:5], v[0:3]
 ; GCN-HSA-NEXT:    s_endpgm
diff --git a/llvm/test/CodeGen/AMDGPU/load-constant-i8.ll b/llvm/test/CodeGen/AMDGPU/load-constant-i8.ll
index 51acb275ee8c5..3d2949c5a192d 100644
--- a/llvm/test/CodeGen/AMDGPU/load-constant-i8.ll
+++ b/llvm/test/CodeGen/AMDGPU/load-constant-i8.ll
@@ -6401,41 +6401,41 @@ define amdgpu_kernel void @constant_sextload_v8i8_to_v8i64(ptr addrspace(1) %out
 ; GFX6-NOHSA-NEXT:    s_mov_b32 s11, s7
 ; GFX6-NOHSA-NEXT:    s_waitcnt lgkmcnt(0)
 ; GFX6-NOHSA-NEXT:    s_lshr_b32 s6, s5, 16
+; GFX6-NOHSA-NEXT:    s_lshr_b32 s8, s5, 8
+; GFX6-NOHSA-NEXT:    s_mov_b32 s10, s5
 ; GFX6-NOHSA-NEXT:    s_lshr_b32 s12, s4, 16
 ; GFX6-NOHSA-NEXT:    s_lshr_b32 s14, s4, 24
 ; GFX6-NOHSA-NEXT:    s_lshr_b32 s16, s4, 8
-; GFX6-NOHSA-NEXT:    s_lshr_b32 s8, s5, 8
-; GFX6-NOHSA-NEXT:    s_mov_b32 s10, s5
-; GFX6-NOHSA-NEXT:    s_bfe_i64 s[10:11], s[10:11], 0x80000
 ; GFX6-NOHSA-NEXT:    s_bfe_i64 s[18:19], s[4:5], 0x80000
+; GFX6-NOHSA-NEXT:    s_bfe_i64 s[10:11], s[10:11], 0x80000
 ; GFX6-NOHSA-NEXT:    s_ashr_i32 s17, s5, 31
 ; GFX6-NOHSA-NEXT:    s_ashr_i32 s20, s5, 24
-; GFX6-NOHSA-NEXT:    s_bfe_i64 s[4:5], s[8:9], 0x80000
-; GFX6-NOHSA-NEXT:    s_bfe_i64 s[8:9], s[16:17], 0x80000
+; GFX6-NOHSA-NEXT:    s_bfe_i64 s[4:5], s[16:17], 0x80000
 ; GFX6-NOHSA-NEXT:    s_bfe_i64 s[14:15], s[14:15], 0x80000
-; GFX6-NOHSA-NEXT:    s_bfe_i64 s[6:7], s[6:7], 0x80000
 ; GFX6-NOHSA-NEXT:    s_bfe_i64 s[12:13], s[12:13], 0x80000
+; GFX6-NOHSA-NEXT:    s_bfe_i64 s[6:7], s[6:7], 0x80000
+; GFX6-NOHSA-NEXT:    s_bfe_i64 s[8:9], s[8:9], 0x80000
 ; GFX6-NOHSA-NEXT:    v_mov_b32_e32 v2, s20
 ; GFX6-NOHSA-NEXT:    v_mov_b32_e32 v3, s17
-; GFX6-NOHSA-NEXT:    v_mov_b32_e32 v4, s18
-; GFX6-NOHSA-NEXT:    v_mov_b32_e32 v5, s19
-; GFX6-NOHSA-NEXT:    v_mov_b32_e32 v8, s10
-; GFX6-NOHSA-NEXT:    v_mov_b32_e32 v9, s11
+; GFX6-NOHSA-NEXT:    v_mov_b32_e32 v4, s10
+; GFX6-NOHSA-NEXT:    v_mov_b32_e32 v5, s11
+; GFX6-NOHSA-NEXT:    v_mov_b32_e32 v8, s18
+; GFX6-NOHSA-NEXT:    v_mov_b32_e32 v9, s19
 ; GFX6-NOHSA-NEXT:    v_mov_b32_e32 v0, s6
 ; GFX6-NOHSA-NEXT:    v_mov_b32_e32 v1, s7
 ; GFX6-NOHSA-NEXT:    buffer_store_dwordx4 v[0:3], off, s[0:3], 0 offset:48
-; GFX6-NOHSA-NEXT:    s_waitcnt expcnt(0)
+; GFX6-NOHSA-NEXT:    v_mov_b32_e32 v6, s8
+; GFX6-NOHSA-NEXT:    v_mov_b32_e32 v7, s9
+; GFX6-NOHSA-NEXT:    buffer_store_dwordx4 v[4:7], off, s[0:3], 0 offset:32
+; GFX6-NOHSA-NEXT:    s_waitcnt expcnt(1)
 ; GFX6-NOHSA-NEXT:    v_mov_b32_e32 v0, s12
 ; GFX6-NOHSA-NEXT:    v_mov_b32_e32 v1, s13
 ; GFX6-NOHSA-NEXT:    v_mov_b32_e32 v2, s14
 ; GFX6-NOHSA-NEXT:    v_mov_b32_e32 v3, s15
 ; GFX6-NOHSA-NEXT:    buffer_store_dwordx4 v[0:3], off, s[0:3], 0 offset:16
-; GFX6-NOHSA-NEXT:    v_mov_b32_e32 v6, s8
-; GFX6-NOHSA-NEXT:    v_mov_b32_e32 v7, s9
-; GFX6-NOHSA-NEXT:    buffer_store_dwordx4 v[4:7], off, s[0:3], 0
 ; GFX6-NOHSA-NEXT:    v_mov_b32_e32 v10, s4
 ; GFX6-NOHSA-NEXT:    v_mov_b32_e32 v11, s5
-; GFX6-NOHSA-NEXT:    buffer_store_dwordx4 v[8:11], off, s[0:3], 0 offset:32
+; GFX6-NOHSA-NEXT:    buffer_store_dwordx4 v[8:11], off, s[0:3], 0
 ; GFX6-NOHSA-NEXT:    s_endpgm
 ;
 ; GFX7-HSA-LABEL: constant_sextload_v8i8_to_v8i64:
@@ -6451,17 +6451,17 @@ define amdgpu_kernel void @constant_sextload_v8i8_to_v8i64(ptr addrspace(1) %out
 ; GFX7-HSA-NEXT:    s_mov_b32 flat_scratch_lo, s13
 ; GFX7-HSA-NEXT:    s_waitcnt lgkmcnt(0)
 ; GFX7-HSA-NEXT:    s_lshr_b32 s4, s3, 16
+; GFX7-HSA-NEXT:    s_lshr_b32 s6, s3, 8
+; GFX7-HSA-NEXT:    s_mov_b32 s8, s3
 ; GFX7-HSA-NEXT:    s_lshr_b32 s10, s2, 16
 ; GFX7-HSA-NEXT:    s_lshr_b32 s12, s2, 24
 ; GFX7-HSA-NEXT:    s_lshr_b32 s14, s2, 8
-; GFX7-HSA-NEXT:    s_lshr_b32 s6, s3, 8
-; GFX7-HSA-NEXT:    s_mov_b32 s8, s3
-; GFX7-HSA-NEXT:    s_bfe_i64 s[8:9], s[8:9], 0x80000
-; GFX7-HSA-NEXT:    s_bfe_i64 s[6:7], s[6:7], 0x80000
 ; GFX7-HSA-NEXT:    s_bfe_i64 s[16:17], s[2:3], 0x80000
 ; GFX7-HSA-NEXT:    s_bfe_i64 s[14:15], s[14:15], 0x80000
 ; GFX7-HSA-NEXT:    s_bfe_i64 s[12:13], s[12:13], 0x80000
 ; GFX7-HSA-NEXT:    s_bfe_i64 s[10:11], s[10:11], 0x80000
+; GFX7-HSA-NEXT:    s_bfe_i64 s[8:9], s[8:9], 0x80000
+; GFX7-HSA-NEXT:    s_bfe_i64 s[6:7], s[6:7], 0x80000
 ; GFX7-HSA-NEXT:    s_ashr_i32 s18, s3, 31
 ; GFX7-HSA-NEXT:    s_ashr_i32 s19, s3, 24
 ; GFX7-HSA-NEXT:    s_bfe_i64 s[2:3], s[4:5], 0x80000
@@ -6471,12 +6471,21 @@ define amdgpu_kernel void @constant_sextload_v8i8_to_v8i64(ptr addrspace(1) %out
 ; GFX7-HSA-NEXT:    s_addc_u32 s3, s1, 0
 ; GFX7-HSA-NEXT:    v_mov_b32_e32 v5, s3
 ; GFX7-HSA-NEXT:    v_mov_b32_e32 v4, s2
-; GFX7-HSA-NEXT:    s_add_u32 s2, s0, 16
+; GFX7-HSA-NEXT:    s_add_u32 s2, s0, 32
 ; GFX7-HSA-NEXT:    v_mov_b32_e32 v2, s19
 ; GFX7-HSA-NEXT:    v_mov_b32_e32 v3, s18
 ; GFX7-HSA-NEXT:    s_addc_u32 s3, s1, 0
 ; GFX7-HSA-NEXT:    flat_store_dwordx4 v[4:5], v[0:3]
 ; GFX7-HSA-NEXT:    v_mov_b32_e32 v5, s3
+; GFX7-HSA-NEXT:    v_mov_b32_e32 v4, s2
+; GFX7-HSA-NEXT:    s_add_u32 s2, s0, 16
+; GFX7-HSA-NEXT:    v_mov_b32_e32 v0, s8
+; GFX7-HSA-NEXT:    v_mov_b32_e32 v1, s9
+; GFX7-HSA-NEXT:    v_mov_b32_e32 v2, s6
+; GFX7-HSA-NEXT:    v_mov_b32_e32 v3, s7
+; GFX7-HSA-NEXT:    s_addc_u32 s3, s1, 0
+; GFX7-HSA-NEXT:    flat_store_dwordx4 v[4:5], v[0:3]
+; GFX7-HSA-NEXT:    v_mov_b32_e32 v5, s3
 ; GFX7-HSA-NEXT:    v_mov_b32_e32 v0, s10
 ; GFX7-HSA-NEXT:    v_mov_b32_e32 v1, s11
 ; GFX7-HSA-NEXT:    v_mov_b32_e32 v2, s12
@@ -6484,19 +6493,10 @@ define amdgpu_kernel void @constant_sextload_v8i8_to_v8i64(ptr addrspace(1) %out
 ; GFX7-HSA-NEXT:    v_mov_b32_e32 v4, s2
 ; GFX7-HSA-NEXT:    flat_store_dwordx4 v[4:5], v[0:3]
 ; GFX7-HSA-NEXT:    v_mov_b32_e32 v5, s1
-; GFX7-HSA-NEXT:    v_mov_b32_e32 v4, s0
-; GFX7-HSA-NEXT:    s_add_u32 s0, s0, 32
 ; GFX7-HSA-NEXT:    v_mov_b32_e32 v0, s16
 ; GFX7-HSA-NEXT:    v_mov_b32_e32 v1, s17
 ; GFX7-HSA-NEXT:    v_mov_b32_e32 v2, s14
 ; GFX7-HSA-NEXT:    v_mov_b32_e32 v3, s15
-; GFX7-HSA-NEXT:    s_addc_u32 s1, s1, 0
-; GFX7-HSA-NEXT:    flat_store_dwordx4 v[4:5], v[0:3]
-; GFX7-HSA-NEXT:    v_mov_b32_e32 v5, s1
-; GFX7-HSA-NEXT:    v_mov_b32_e32 v0, s8
-; GFX7-HSA-NEXT:    v_mov_b32_e32 v1, s9
-; GFX7-HSA-NEXT:    v_mov_b32_e32 v2, s6
-; GFX7-HSA-NEXT:    v_mov_b32_e32 v3, s7
 ; GFX7-HSA-NEXT:    v_mov_b32_e32 v4, s0
 ; GFX7-HSA-NEXT:    flat_store_dwordx4 v[4:5], v[0:3]
 ; GFX7-HSA-NEXT:    s_endpgm
@@ -6510,16 +6510,16 @@ define amdgpu_kernel void @constant_sextload_v8i8_to_v8i64(ptr addrspace(1) %out
 ; GFX8-NOHSA-NEXT:    s_load_dwordx2 s[2:3], s[2:3], 0x0
 ; GFX8-NOHSA-NEXT:    s_waitcnt lgkmcnt(0)
 ; GFX8-NOHSA-NEXT:    s_lshr_b32 s4, s3, 16
-; GFX8-NOHSA-NEXT:    s_lshr_b32 s8, s2, 16
-; GFX8-NOHSA-NEXT:    s_lshr_b32 s10, s2, 24
-; GFX8-NOHSA-NEXT:    s_lshr_b32 s12, s2, 8
-; GFX8-NOHSA-NEXT:    s_lshr_b32 s14, s3, 8
+; GFX8-NOHSA-NEXT:    s_lshr_b32 s8, s3, 8
 ; GFX8-NOHSA-NEXT:    s_mov_b32 s6, s3
-; GFX8-NOHSA-NEXT:    s_bfe_i64 s[6:7], s[6:7], 0x80000
-; GFX8-NOHSA-NEXT:    s_bfe_i64 s[14:15], s[14:15], 0x80000
+; GFX8-NOHSA-NEXT:    s_lshr_b32 s10, s2, 16
+; GFX8-NOHSA-NEXT:    s_lshr_b32 s12, s2, 24
+; GFX8-NOHSA-NEXT:    s_lshr_b32 s14, s2, 8
 ; GFX8-NOHSA-NEXT:    s_bfe_i64 s[16:17], s[2:3], 0x80000
+; GFX8-NOHSA-NEXT:    s_bfe_i64 s[14:15], s[14:15], 0x80000
 ; GFX8-NOHSA-NEXT:    s_bfe_i64 s[12:13], s[12:13], 0x80000
 ; GFX8-NOHSA-NEXT:    s_bfe_i64 s[10:11], s[10:11], 0x80000
+; GFX8-NOHSA-NEXT:    s_bfe_i64 s[6:7], s[6:7], 0x80000
 ; GFX8-NOHSA-NEXT:    s_bfe_i64 s[8:9], s[8:9], 0x80000
 ; GFX8-NOHSA-NEXT:    s_ashr_i32 s18, s3, 31
 ; GFX8-NOHSA-NEXT:    s_ashr_i32 s19, s3, 24
@@ -6530,30 +6530,30 @@ define amdgpu_kernel void @constant_sextload_v8i8_to_v8i64(ptr addrspace(1) %out
 ; GFX8-NOHSA-NEXT:    s_addc_u32 s3, s1, 0
 ; GFX8-NOHSA-NEXT:    v_mov_b32_e32 v5, s3
 ; GFX8-NOHSA-NEXT:    v_mov_b32_e32 v4, s2
-; GFX8-NOHSA-NEXT:    s_add_u32 s2, s0, 16
+; GFX8-NOHSA-NEXT:    s_add_u32 s2, s0, 32
 ; GFX8-NOHSA-NEXT:    v_mov_b32_e32 v2, s19
 ; GFX8-NOHSA-NEXT:    v_mov_b32_e32 v3, s18
 ; GFX8-NOHSA-NEXT:    s_addc_u32 s3, s1, 0
 ; GFX8-NOHSA-NEXT:    flat_store_dwordx4 v[4:5], v[0:3]
 ; GFX8-NOHSA-NEXT:    v_mov_b32_e32 v5, s3
-; GFX8-NOHSA-NEXT:    v_mov_b32_e32 v0, s8
-; GFX8-NOHSA-NEXT:    v_mov_b32_e32 v1, s9
-; GFX8-NOHSA-NEXT:    v_mov_b32_e32 v2, s10
-; GFX8-NOHSA-NEXT:    v_mov_b32_e32 v3, s11
 ; GFX8-NOHSA-NEXT:    v_mov_b32_e32 v4, s2
+; GFX8-NOHSA-NEXT:    s_add_u32 s2, s0, 16
+; GFX8-NOHSA-NEXT:    v_mov_b32_e32 v0, s6
+; GFX8-NOHSA-NEXT:    v_mov_b32_e32 v1, s7
+; GFX8-NOHSA-NEXT:    v_mov_b32_e32 v2, s8
+; GFX8-NOHSA-NEXT:    v_mov_b32_e32 v3, s9
+; GFX8-NOHSA-NEXT:    s_addc_u32 s3, s1, 0
 ; GFX8-NOHSA-NEXT:    flat_store_dwordx4 v[4:5], v[0:3]
-; GFX8-NOHSA-NEXT:    v_mov_b32_e32 v5, s1
-; GFX8-NOHSA-NEXT:    v_mov_b32_e32 v4, s0
-; GFX8-NOHSA-NEXT:    s_add_u32 s0, s0, 32
-; GFX8-NOHSA-NEXT:    v_mov_b32_e32 v0, s16
-; GFX8-NOHSA-NEXT:    v_mov_b32_e32 v1, s17
+; GFX8-NOHSA-NEXT:    v_mov_b32_e32 v5, s3
+; GFX8-NOHSA-NEXT:    v_mov_b32_e32 v0, s10
+; GFX8-NOHSA-NEXT:    v_mov_b32_e32 v1, s11
 ; GFX8-NOHSA-NEXT:    v_mov_b32_e32 v2, s12
 ; GFX8-NOHSA-NEXT:    v_mov_b32_e32 v3, s13
-; GFX8-NOHSA-NEXT:    s_addc_u32 s1, s1, 0
+; GFX8-NOHSA-NEXT:    v_mov_b32_e32 v4, s2
 ; GFX8-NOHSA-NEXT:    flat_store_dwordx4 v[4:5], v[0:3]
 ; GFX8-NOHSA-NEXT:    v_mov_b32_e32 v5, s1
-; GFX8-NOHSA-NEXT:    v_mov_b32_e32 v0, s6
-; GFX8-NOHSA-NEXT:    v_mov_b32_e32 v1, s7
+; GFX8-NOHSA-NEXT:    v_mov_b32_e32 v0, s16
+; GFX8-NOHSA-NEXT:    v_mov_b32_e32 v1, s17
 ; GFX8-NOHSA-NEXT:    v_mov_b32_e32 v2, s14
 ; GFX8-NOHSA-NEXT:    v_mov_b32_e32 v3, s15
 ; GFX8-NOHSA-NEXT:    v_mov_b32_e32 v4, s0
@@ -6626,34 +6626,34 @@ define amdgpu_kernel void @constant_sextload_v8i8_to_v8i64(ptr addrspace(1) %out
 ; GFX12-NEXT:    s_load_b64 s[2:3], s[2:3], 0x0
 ; GFX12-NEXT:    s_wait_kmcnt 0x0
 ; GFX12-NEXT:    s_lshr_b32 s4, s3, 16
-; GFX12-NEXT:    s_lshr_b32 s8, s2, 16
-; GFX12-NEXT:    s_lshr_b32 s10, s2, 24
-; GFX12-NEXT:    s_lshr_b32 s12, s2, 8
-; GFX12-NEXT:    s_bfe_i64 s[10:11], s[10:11], 0x80000
-; GFX12-NEXT:    s_bfe_i64 s[8:9], s[8:9], 0x80000
-; GFX12-NEXT:    s_bfe_i64 s[4:5], s[4:5], 0x80000
-; GFX12-NEXT:    s_lshr_b32 s14, s3, 8
+; GFX12-NEXT:    s_lshr_b32 s8, s3, 8
 ; GFX12-NEXT:    s_mov_b32 s6, s3
+; GFX12-NEXT:    s_lshr_b32 s10, s2, 16
+; GFX12-NEXT:    s_lshr_b32 s12, s2, 24
 ; GFX12-NEXT:    s_bfe_i64 s[16:17], s[2:3], 0x80000
 ; GFX12-NEXT:    s_ashr_i32 s15, s3, 31
 ; GFX12-NEXT:    s_ashr_i32 s18, s3, 24
-; GFX12-NEXT:    s_bfe_i64 s[12:13], s[12:13], 0x80000
+; GFX12-NEXT:    s_bfe_i64 s[4:5], s[4:5], 0x80000
+; GFX12-NEXT:    s_lshr_b32 s14, s2, 8
+; GFX12-NEXT:    s_bfe_i64 s[6:7], s[6:7], 0x80000
+; GFX12-NEXT:    s_bfe_i64 s[8:9], s[8:9], 0x80000
 ; GFX12-NEXT:    v_dual_mov_b32 v16, 0 :: v_dual_mov_b32 v3, s15
-; GFX12-NEXT:    v_dual_mov_b32 v0, s4 :: v_dual_mov_b32 v9, s9
-; GFX12-NEXT:    v_dual_mov_b32 v8, s8 :: v_dual_mov_b32 v11, s11
-; GFX12-NEXT:    v_dual_mov_b32 v10, s10 :: v_dual_mov_b32 v7, s13
-; GFX12-NEXT:    s_bfe_i64 s[2:3], s[6:7], 0x80000
-; GFX12-NEXT:    s_bfe_i64 s[6:7], s[14:15], 0x80000
+; GFX12-NEXT:    s_bfe_i64 s[12:13], s[12:13], 0x80000
+; GFX12-NEXT:    s_bfe_i64 s[10:11], s[10:11], 0x80000
 ; GFX12-NEXT:    v_dual_mov_b32 v2, s18 :: v_dual_mov_b32 v5, s17
 ; GFX12-NEXT:    v_dual_mov_b32 v4, s16 :: v_dual_mov_b32 v1, s5
-; GFX12-NEXT:    v_dual_mov_b32 v6, s12 :: v_dual_mov_b32 v13, s3
-; GFX12-NEXT:    v_dual_mov_b32 v12, s2 :: v_dual_mov_b32 v15, s7
-; GFX12-NEXT:    v_mov_b32_e32 v14, s6
+; GFX12-NEXT:    v_dual_mov_b32 v0, s4 :: v_dual_mov_b32 v9, s7
+; GFX12-NEXT:    s_bfe_i64 s[2:3], s[14:15], 0x80000
+; GFX12-NEXT:    v_dual_mov_b32 v8, s6 :: v_dual_mov_b32 v11, s9
+; GFX12-NEXT:    v_dual_mov_b32 v10, s8 :: v_dual_mov_b32 v13, s11
+; GFX12-NEXT:    v_dual_mov_b32 v12, s10 :: v_dual_mov_b32 v15, s13
+; GFX12-NEXT:    v_dual_mov_b32 v14, s12 :: v_dual_mov_b32 v7, s3
+; GFX12-NEXT:    v_mov_b32_e32 v6, s2
 ; GFX12-NEXT:    s_clause 0x3
-; GFX12-NEXT:    global_store_b128 v16, v[8:11], s[0:1] offset:16
-; GFX12-NEXT:    global_store_b128 v16, v[4:7], s[0:1]
 ; GFX12-NEXT:    global_store_b128 v16, v[0:3], s[0:1] offset:48
-; GFX12-NEXT:    global_store_b128 v16, v[12:15], s[0:1] offset:32
+; GFX12-NEXT:    global_store_b128 v16, v[8:11], s[0:1] offset:32
+; GFX12-NEXT:    global_store_b128 v16, v[12:15], s[0:1] offset:16
+; GFX12-NEXT:    global_store_b128 v16, v[4:7], s[0:1]
 ; GFX12-NEXT:    s_endpgm
   %load = load <8 x i8>, ptr addrspace(4) %in
   %ext = sext <8 x i8> %load to <8 x i64>
@@ -7042,133 +7042,130 @@ define amdgpu_kernel void @constant_sextload_v16i8_to_v16i64(ptr addrspace(1) %o
 ; GFX6-NOHSA-NEXT:    s_waitcnt lgkmcnt(0)
 ; GFX6-NOHSA-NEXT:    s_load_dwordx4 s[4:7], s[2:3], 0x0
 ; GFX6-NOHSA-NEXT:    s_mov_b32 s3, 0xf000
-; GFX6-NOHSA-NEXT:    s_mov_b32 s13, 0
+; GFX6-NOHSA-NEXT:    s_mov_b32 s11, 0
 ; GFX6-NOHSA-NEXT:    s_mov_b32 s2, -1
-; GFX6-NOHSA-NEXT:    s_mov_b32 s11, s13
-; GFX6-NOHSA-NEXT:    s_mov_b32 s15, s13
-; GFX6-NOHSA-NEXT:    s_mov_b32 s17, s13
-; GFX6-NOHSA-NEXT:    s_mov_b32 s19, s13
-; GFX6-NOHSA-NEXT:    s_mov_b32 s9, s13
+; GFX6-NOHSA-NEXT:    s_mov_b32 s13, s11
+; GFX6-NOHSA-NEXT:    s_mov_b32 s15, s11
+; GFX6-NOHSA-NEXT:    s_mov_b32 s17, s11
+; GFX6-NOHSA-NEXT:    s_mov_b32 s19, s11
+; GFX6-NOHSA-NEXT:    s_mov_b32 s21, s11
 ; GFX6-NOHSA-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX6-NOHSA-NEXT:    s_lshr_b32 s20, s6, 16
-; GFX6-NOHSA-NEXT:    s_lshr_b32 s22, s6, 24
-; GFX6-NOHSA-NEXT:    s_lshr_b32 s24, s6, 8
-; GFX6-NOHSA-NEXT:    s_lshr_b32 s26, s4, 16
-; GFX6-NOHSA-NEXT:    s_lshr_b32 s28, s4, 24
-; GFX6-NOHSA-NEXT:    s_lshr_b32 s30, s4, 8
-; GFX6-NOHSA-NEXT:    s_lshr_b32 s12, s7, 16
-; GFX6-NOHSA-NEXT:    s_lshr_b32 s10, s7, 8
+; GFX6-NOHSA-NEXT:    s_lshr_b32 s10, s7, 16
+; GFX6-NOHSA-NEXT:    s_lshr_b32 s12, s7, 8
 ; GFX6-NOHSA-NEXT:    s_mov_b32 s14, s7
+; GFX6-NOHSA-NEXT:    s_lshr_b32 s22, s6, 16
+; GFX6-NOHSA-NEXT:    s_lshr_b32 s24, s6, 24
+; GFX6-NOHSA-NEXT:    s_lshr_b32 s26, s6, 8
 ; GFX6-NOHSA-NEXT:    s_lshr_b32 s16, s5, 16
 ; GFX6-NOHSA-NEXT:    s_lshr_b32 s18, s5, 8
-; GFX6-NOHSA-NEXT:    s_mov_b32 s8, s5
-; GFX6-NOHSA-NEXT:    s_bfe_i64 s[34:35], s[4:5], 0x80000
+; GFX6-NOHSA-NEXT:    s_mov_b32 s20, s5
+; GFX6-NOHSA-NEXT:    s_lshr_b32 s28, s4, 16
+; GFX6-NOHSA-NEXT:    s_lshr_b32 s30, s4, 24
+; GFX6-NOHSA-NEXT:    s_lshr_b32 s34, s4, 8
+; GFX6-NOHSA-NEXT:    s_bfe_i64 s[8:9], s[4:5], 0x80000
 ; GFX6-NOHSA-NEXT:    s_bfe_i64 s[36:37], s[6:7], 0x80000
-; GFX6-NOHSA-NEXT:    s_bfe_i64 s[8:9], s[8:9], 0x80000
-; GFX6-NOHSA-NEXT:    s_ashr_i32 s29, s5, 31
-; GFX6-NOHSA-NEXT:    s_ashr_i32 s31, s5, 24
+; GFX6-NOHSA-NEXT:    s_bfe_i64 s[20:21], s[20:21], 0x80000
+; GFX6-NOHSA-NEXT:    s_ashr_i32 s31, s5, 31
+; GFX6-NOHSA-NEXT:    s_ashr_i32 s33, s5, 24
 ; GFX6-NOHSA-NEXT:    s_bfe_i64 s[14:15], s[14:15], 0x80000
-; GFX6-NOHSA-NEXT:    s_ashr_i32 s33, s7, 31
+; GFX6-NOHSA-NEXT:    s_ashr_i32 s35, s7, 31
 ; GFX6-NOHSA-NEXT:    s_ashr_i32 s38, s7, 24
-; GFX6-NOHSA-NEXT:    s_bfe_i64 s[4:5], s[18:19], 0x80000
-; GFX6-NOHSA-NEXT:    s_bfe_i64 s[6:7], s[16:17], 0x80000
-; GFX6-NOHSA-NEXT:    s_bfe_i64 s[10:11], s[10:11], 0x80000
-; GFX6-NOHSA-NEXT:    s_bfe_i64 s[12:13], s[12:13], 0x80000
-; GFX6-NOHSA-NEXT:    s_bfe_i64 s[16:17], s[30:31], 0x80000
-; GFX6-NOHSA-NEXT:    s_bfe_i64 s[18:19], s[28:29], 0x80000
+; GFX6-NOHSA-NEXT:    s_bfe_i64 s[4:5], s[34:35], 0x80000
+; GFX6-NOHSA-NEXT:    s_bfe_i64 s[6:7], s[30:31], 0x80000
+; GFX6-NOHSA-NEXT:    s_bfe_i64 s[28:29], s[28:29], 0x80000
+; GFX6-NOHSA-NEXT:    s_bfe_i64 s[18:19], s[18:19], 0x80000
+; GFX6-NOHSA-NEXT:    s_bfe_i64 s[16:17], s[16:17], 0x80000
 ; GFX6-NOHSA-NEXT:    s_bfe_i64 s[26:27], s[26:27], 0x80000
-; GFX6-NOHSA-NEXT:    s_bfe_i64 s[22:23], s[22:23], 0x80000
-; GFX6-NOHSA-NEXT:    s_bfe_i64 s[20:21], s[20:21], 0x80000
 ; GFX6-NOHSA-NEXT:    s_bfe_i64 s[24:25], s[24:25], 0x80000
-; GFX6-NOHSA-NEXT:    v_mov_b32_e32 v0, s36
-; GFX6-NOHSA-NEXT:    v_mov_b32_e32 v1, s37
-; GFX6-NOHSA-NEXT:    v_mov_b32_e32 v4, s34
-; GFX6-NOHSA-NEXT:    v_mov_b32_e32 v5, s35
-; GFX6-NOHSA-NEXT:    v_mov_b32_e32 v8, s38
-; GFX6-NOHSA-NEXT:    v_mov_b32_e32 v9, s33
-; GFX6-NOHSA-NEXT:    v_mov_b32_e32 v10, s14
-; GFX6-NOHSA-NEXT:    v_mov_b32_e32 v11, s15
-; GFX6-NOHSA-NEXT:    v_mov_b32_e32 v12, s20
-; GFX6-NOHSA-NEXT:    v_mov_b32_e32 v13, s21
-; GFX6-NOHSA-NEXT:    v_mov_b32_e32 v14, s22
-; GFX6-NOHSA-NEXT:    v_mov_b32_e32 v15, s23
-; GFX6-NOHSA-NEXT:    buffer_store_dwordx4 v[12:15], off, s[0:3], 0 offset:80
-; GFX6-NOHSA-NEXT:    s_waitcnt expcnt(0)
-; GFX6-NOHSA-NEXT:    v_mov_b32_e32 v14, s31
-; GFX6-NOHSA-NEXT:    v_mov_b32_e32 v15, s29
-; GFX6-NOHSA-NEXT:    v_mov_b32_e32 v2, s24
-; GFX6-NOHSA-NEXT:    v_mov_b32_e32 v3, s25
-; GFX6-NOHSA-NEXT:    buffer_store_dwordx4 v[0:3], off, s[0:3], 0 offset:64
-; GFX6-NOHSA-NEXT:    s_waitcnt expcnt(0)
-; GFX6-NOHSA-NEXT:    v_mov_b32_e32 v0, s26
-; GFX6-NOHSA-NEXT:    v_mov_b32_e32 v1, s27
-; GFX6-NOHSA-NEXT:    v_mov_b32_e32 v2, s18
-; GFX6-NOHSA-NEXT:    v_mov_b32_e32 v3, s19
-; GFX6-NOHSA-NEXT:    buffer_store_dwordx4 v[0:3], off, s[0:3], 0 offset:16
+; GFX6-NOHSA-NEXT:    s_bfe_i64 s[22:23], s[22:23], 0x80000
+; GFX6-NOHSA-NEXT:    s_bfe_i64 s[10:11], s[10:11], 0x80000
+; GFX6-NOHSA-NEXT:    s_bfe_i64 s[12:13], s[12:13], 0x80000
+; GFX6-NOHSA-NEXT:    v_mov_b32_e32 v2, s38
+; GFX6-NOHSA-NEXT:    v_mov_b32_e32 v3, s35
+; GFX6-NOHSA-NEXT:    v_mov_b32_e32 v4, s14
+; GFX6-NOHSA-NEXT:    v_mov_b32_e32 v5, s15
+; GFX6-NOHSA-NEXT:    v_mov_b32_e32 v8, s36
+; GFX6-NOHSA-NEXT:    v_mov_b32_e32 v9, s37
+; GFX6-NOHSA-NEXT:    v_mov_b32_e32 v12, s33
+; GFX6-NOHSA-NEXT:    v_mov_b32_e32 v13, s31
+; GFX6-NOHSA-NEXT:    v_mov_b32_e32 v14, s20
+; GFX6-NOHSA-NEXT:    v_mov_b32_e32 v15, s21
+; GFX6-NOHSA-NEXT:    v_mov_b32_e32 v0, s10
+; GFX6-NOHSA-NEXT:    v_mov_b32_e32 v1, s11
+; GFX6-NOHSA-NEXT:    buffer_store_dwordx4 v[0:3], off, s[0:3], 0 offset:112
 ; GFX6-NOHSA-NEXT:    s_waitcnt expcnt(0)
 ; GFX6-NOHSA-NEXT:    v_mov_b32_e32 v0, s8
 ; GFX6-NOHSA-NEXT:    v_mov_b32_e32 v1, s9
-; GFX6-NOHSA-NEXT:    v_mov_b32_e32 v6, s16
-; GFX6-NOHSA-NEXT:    v_mov_b32_e32 v7, s17
-; GFX6-NOHSA-NEXT:    buffer_store_dwordx4 v[4:7], off, s[0:3], 0
-; GFX6-NOHSA-NEXT:    s_waitcnt expcnt(0)
 ; GFX6-NOHSA-NEXT:    v_mov_b32_e32 v6, s12
 ; GFX6-NOHSA-NEXT:    v_mov_b32_e32 v7, s13
-; GFX6-NOHSA-NEXT:    buffer_store_dwordx4 v[6:9], off, s[0:3], 0 offset:112
-; GFX6-NOHSA-NEXT:    v_mov_b32_e32 v12, s10
-; GFX6-NOHSA-NEXT:    v_mov_b32_e32 v13, s11
-; GFX6-NOHSA-NEXT:    buffer_store_dwordx4 v[10:13], off, s[0:3], 0 offset:96
+; GFX6-NOHSA-NEXT:    buffer_store_dwordx4 v[4:7], off, s[0:3], 0 offset:96
+; GFX6-NOHSA-NEXT:    v_mov_b32_e32 v2, s22
+; GFX6-NOHSA-NEXT:    v_mov_b32_e32 v3, s23
+; GFX6-NOHSA-NEXT:    s_waitcnt expcnt(0)
+; GFX6-NOHSA-NEXT:    v_mov_b32_e32 v4, s24
+; GFX6-NOHSA-NEXT:    v_mov_b32_e32 v5, s25
+; GFX6-NOHSA-NEXT:    buffer_store_dwordx4 v[2:5], off, s[0:3], 0 offset:80
+; GFX6-NOHSA-NEXT:    v_mov_b32_e32 v10, s26
+; GFX6-NOHSA-NEXT:    v_mov_b32_e32 v11, s27
+; GFX6-NOHSA-NEXT:    buffer_store_dwordx4 v[8:11], off, s[0:3], 0 offset:64
+; GFX6-NOHSA-NEXT:    s_waitcnt expcnt(0)
+; GFX6-NOHSA-NEXT:    v_mov_b32_e32 v10, s16
+; GFX6-NOHSA-NEXT:    v_mov_b32_e32 v11, s17
+; GFX6-NOHSA-NEXT:    buffer_store_dwordx4 v[10:13], off, s[0:3], 0 offset:48
+; GFX6-NOHSA-NEXT:    v_mov_b32_e32 v16, s18
+; GFX6-NOHSA-NEXT:    v_mov_b32_e32 v17, s19
+; GFX6-NOHSA-NEXT:    buffer_store_dwordx4 v[14:17], off, s[0:3], 0 offset:32
+; GFX6-NOHSA-NEXT:    v_mov_b32_e32 v2, s28
+; GFX6-NOHSA-NEXT:    v_mov_b32_e32 v3, s29
+; GFX6-NOHSA-NEXT:    v_mov_b32_e32 v4, s6
+; GFX6-NOHSA-NEXT:    v_mov_b32_e32 v5, s7
+; GFX6-NOHSA-NEXT:    buffer_store_dwordx4 v[2:5], off, s[0:3], 0 offset:16
 ; GFX6-NOHSA-NEXT:    s_waitcnt expcnt(0)
-; GFX6-NOHSA-NEXT:    v_mov_b32_e32 v12, s6
-; GFX6-NOHSA-NEXT:    v_mov_b32_e32 v13, s7
-; GFX6-NOHSA-NEXT:    buffer_store_dwordx4 v[12:15], off, s[0:3], 0 offset:48
 ; GFX6-NOHSA-NEXT:    v_mov_b32_e32 v2, s4
 ; GFX6-NOHSA-NEXT:    v_mov_b32_e32 v3, s5
-; GFX6-NOHSA-NEXT:    buffer_store_dwordx4 v[0:3], off, s[0:3], 0 offset:32
+; GFX6-NOHSA-NEXT:    buffer_store_dwordx4 v[0:3], off, s[0:3], 0
 ; GFX6-NOHSA-NEXT:    s_endpgm
 ;
 ; GFX7-HSA-LABEL: constant_sextload_v16i8_to_v16i64:
 ; GFX7-HSA:       ; %bb.0:
 ; GFX7-HSA-NEXT:    s_load_dwordx4 s[0:3], s[8:9], 0x0
 ; GFX7-HSA-NEXT:    s_add_i32 s12, s12, s17
-; GFX7-HSA-NEXT:    s_mov_b32 s19, 0
+; GFX7-HSA-NEXT:    s_mov_b32 s9, 0
+; GFX7-HSA-NEXT:    s_mov_b32 flat_scratch_lo, s13
 ; GFX7-HSA-NEXT:    s_lshr_b32 flat_scratch_hi, s12, 8
-; GFX7-HSA-NEXT:    s_mov_b32 s21, s19
 ; GFX7-HSA-NEXT:    s_waitcnt lgkmcnt(0)
 ; GFX7-HSA-NEXT:    s_load_dwordx4 s[4:7], s[2:3], 0x0
-; GFX7-HSA-NEXT:    s_mov_b32 s23, s19
-; GFX7-HSA-NEXT:    s_mov_b32 s25, s19
-; GFX7-HSA-NEXT:    s_mov_b32 s27, s19
-; GFX7-HSA-NEXT:    s_mov_b32 s29, s19
+; GFX7-HSA-NEXT:    s_mov_b32 s11, s9
+; GFX7-HSA-NEXT:    s_mov_b32 s13, s9
+; GFX7-HSA-NEXT:    s_mov_b32 s21, s9
+; GFX7-HSA-NEXT:    s_mov_b32 s23, s9
 ; GFX7-HSA-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX7-HSA-NEXT:    s_lshr_b32 s2, s6, 16
-; GFX7-HSA-NEXT:    s_lshr_b32 s8, s6, 24
-; GFX7-HSA-NEXT:    s_bfe_i64 s[2:3], s[2:3], 0x80000
-; GFX7-HSA-NEXT:    s_lshr_b32 s10, s6, 8
-; GFX7-HSA-NEXT:    s_lshr_b32 s12, s4, 16
-; GFX7-HSA-NEXT:    s_lshr_b32 s14, s4, 24
-; GFX7-HSA-NEXT:    s_lshr_b32 s16, s4, 8
-; GFX7-HSA-NEXT:    s_lshr_b32 s18, s7, 16
-; GFX7-HSA-NEXT:    s_lshr_b32 s20, s7, 8
-; GFX7-HSA-NEXT:    s_mov_b32 s22, s7
-; GFX7-HSA-NEXT:    s_lshr_b32 s24, s5, 16
-; GFX7-HSA-NEXT:    s_lshr_b32 s26, s5, 8
-; GFX7-HSA-NEXT:    s_mov_b32 s28, s5
-; GFX7-HSA-NEXT:    v_mov_b32_e32 v0, s2
-; GFX7-HSA-NEXT:    v_mov_b32_e32 v1, s3
-; GFX7-HSA-NEXT:    s_bfe_i64 s[2:3], s[8:9], 0x80000
-; GFX7-HSA-NEXT:    s_mov_b32 flat_scratch_lo, s13
-; GFX7-HSA-NEXT:    s_ashr_i32 s33, s5, 31
-; GFX7-HSA-NEXT:    s_ashr_i32 s34, s5, 24
-; GFX7-HSA-NEXT:    s_ashr_i32 s35, s7, 31
+; GFX7-HSA-NEXT:    s_lshr_b32 s8, s7, 16
+; GFX7-HSA-NEXT:    s_lshr_b32 s10, s7, 8
+; GFX7-HSA-NEXT:    s_mov_b32 s12, s7
+; GFX7-HSA-NEXT:    s_lshr_b32 s14, s6, 16
+; GFX7-HSA-NEXT:    s_lshr_b32 s16, s6, 24
+; GFX7-HSA-NEXT:    s_lshr_b32 s18, s6, 8
+; GFX7-HSA-NEXT:    s_lshr_b32 s20, s5, 16
+; GFX7-HSA-NEXT:    s_lshr_b32 s22, s5, 8
+; GFX7-HSA-NEXT:    s_ashr_i32 s29, s5, 31
+; GFX7-HSA-NEXT:    s_ashr_i32 s31, s5, 24
+; GFX7-HSA-NEXT:    s_mov_b32 s24, s5
+; GFX7-HSA-NEXT:    s_lshr_b32 s26, s4, 16
+; GFX7-HSA-NEXT:    s_lshr_b32 s28, s4, 24
+; GFX7-HSA-NEXT:    s_lshr_b32 s30, s4, 8
+; GFX7-HSA-NEXT:    s_bfe_i64 s[2:3], s[4:5], 0x80000
+; GFX7-HSA-NEXT:    s_mov_b32 s25, s9
+; GFX7-HSA-NEXT:    s_bfe_i64 s[4:5], s[8:9], 0x80000
+; GFX7-HSA-NEXT:    s_ashr_i32 s33, s7, 31
 ; GFX7-HSA-NEXT:    s_ashr_i32 s36, s7, 24
-; GFX7-HSA-NEXT:    s_bfe_i64 s[30:31], s[4:5], 0x80000
-; GFX7-HSA-NEXT:    s_bfe_i64 s[6:7], s[6:7], 0x80000
-; GFX7-HSA-NEXT:    v_mov_b32_e32 v2, s2
-; GFX7-HSA-NEXT:    v_mov_b32_e32 v3, s3
-; GFX7-HSA-NEXT:    s_bfe_i64 s[4:5], s[28:29], 0x80000
-; GFX7-HSA-NEXT:    s_bfe_i64 s[2:3], s[26:27], 0x80000
-; GFX7-HSA-NEXT:    s_bfe_i64 s[8:9], s[24:25], 0x80000
+; GFX7-HSA-NEXT:    s_bfe_i64 s[34:35], s[6:7], 0x80000
+; GFX7-HSA-NEXT:    v_mov_b32_e32 v0, s4
+; GFX7-HSA-NEXT:    v_mov_b32_e32 v1, s5
+; GFX7-HSA-NEXT:    s_bfe_i64 s[4:5], s[30:31], 0x80000
+; GFX7-HSA-NEXT:    s_bfe_i64 s[6:7], s[28:29], 0x80000
+; GFX7-HSA-NEXT:    s_bfe_i64 s[8:9], s[26:27], 0x80000
+; GFX7-HSA-NEXT:    s_bfe_i64 s[24:25], s[24:25], 0x80000
 ; GFX7-HSA-NEXT:    s_bfe_i64 s[22:23], s[22:23], 0x80000
 ; GFX7-HSA-NEXT:    s_bfe_i64 s[20:21], s[20:21], 0x80000
 ; GFX7-HSA-NEXT:    s_bfe_i64 s[18:19], s[18:19], 0x80000
@@ -7176,70 +7173,73 @@ define amdgpu_kernel void @constant_sextload_v16i8_to_v16i64(ptr addrspace(1) %o
 ; GFX7-HSA-NEXT:    s_bfe_i64 s[14:15], s[14:15], 0x80000
 ; GFX7-HSA-NEXT:    s_bfe_i64 s[12:13], s[12:13], 0x80000
 ; GFX7-HSA-NEXT:    s_bfe_i64 s[10:11], s[10:11], 0x80000
-; GFX7-HSA-NEXT:    s_add_u32 s24, s0, 0x50
-; GFX7-HSA-NEXT:    s_addc_u32 s25, s1, 0
-; GFX7-HSA-NEXT:    v_mov_b32_e32 v4, s6
-; GFX7-HSA-NEXT:    s_add_u32 s6, s0, 64
-; GFX7-HSA-NEXT:    v_mov_b32_e32 v5, s7
-; GFX7-HSA-NEXT:    s_addc_u32 s7, s1, 0
-; GFX7-HSA-NEXT:    v_mov_b32_e32 v8, s24
-; GFX7-HSA-NEXT:    v_mov_b32_e32 v11, s7
-; GFX7-HSA-NEXT:    v_mov_b32_e32 v9, s25
-; GFX7-HSA-NEXT:    v_mov_b32_e32 v10, s6
-; GFX7-HSA-NEXT:    s_add_u32 s6, s0, 16
+; GFX7-HSA-NEXT:    s_add_u32 s26, s0, 0x70
+; GFX7-HSA-NEXT:    s_addc_u32 s27, s1, 0
 ; GFX7-HSA-NEXT:    v_mov_b32_e32 v6, s10
+; GFX7-HSA-NEXT:    s_add_u32 s10, s0, 0x60
+; GFX7-HSA-NEXT:    v_mov_b32_e32 v8, s26
 ; GFX7-HSA-NEXT:    v_mov_b32_e32 v7, s11
+; GFX7-HSA-NEXT:    s_addc_u32 s11, s1, 0
+; GFX7-HSA-NEXT:    v_mov_b32_e32 v10, s10
+; GFX7-HSA-NEXT:    v_mov_b32_e32 v9, s27
+; GFX7-HSA-NEXT:    v_mov_b32_e32 v4, s12
+; GFX7-HSA-NEXT:    v_mov_b32_e32 v5, s13
+; GFX7-HSA-NEXT:    v_mov_b32_e32 v11, s11
+; GFX7-HSA-NEXT:    v_mov_b32_e32 v2, s36
+; GFX7-HSA-NEXT:    v_mov_b32_e32 v3, s33
+; GFX7-HSA-NEXT:    s_add_u32 s10, s0, 0x50
 ; GFX7-HSA-NEXT:    flat_store_dwordx4 v[8:9], v[0:3]
 ; GFX7-HSA-NEXT:    flat_store_dwordx4 v[10:11], v[4:7]
-; GFX7-HSA-NEXT:    s_addc_u32 s7, s1, 0
-; GFX7-HSA-NEXT:    v_mov_b32_e32 v4, s6
-; GFX7-HSA-NEXT:    v_mov_b32_e32 v0, s12
-; GFX7-HSA-NEXT:    v_mov_b32_e32 v1, s13
-; GFX7-HSA-NEXT:    v_mov_b32_e32 v2, s14
-; GFX7-HSA-NEXT:    v_mov_b32_e32 v3, s15
-; GFX7-HSA-NEXT:    v_mov_b32_e32 v5, s7
-; GFX7-HSA-NEXT:    flat_store_dwordx4 v[4:5], v[0:3]
-; GFX7-HSA-NEXT:    v_mov_b32_e32 v5, s1
-; GFX7-HSA-NEXT:    v_mov_b32_e32 v0, s30
-; GFX7-HSA-NEXT:    v_mov_b32_e32 v1, s31
+; GFX7-HSA-NEXT:    s_addc_u32 s11, s1, 0
+; GFX7-HSA-NEXT:    v_mov_b32_e32 v4, s10
+; GFX7-HSA-NEXT:    v_mov_b32_e32 v0, s14
+; GFX7-HSA-NEXT:    v_mov_b32_e32 v1, s15
 ; GFX7-HSA-NEXT:    v_mov_b32_e32 v2, s16
 ; GFX7-HSA-NEXT:    v_mov_b32_e32 v3, s17
-; GFX7-HSA-NEXT:    v_mov_b32_e32 v4, s0
-; GFX7-HSA-NEXT:    s_add_u32 s6, s0, 0x70
+; GFX7-HSA-NEXT:    v_mov_b32_e32 v5, s11
+; GFX7-HSA-NEXT:    s_add_u32 s10, s0, 64
 ; GFX7-HSA-NEXT:    flat_store_dwordx4 v[4:5], v[0:3]
-; GFX7-HSA-NEXT:    s_addc_u32 s7, s1, 0
-; GFX7-HSA-NEXT:    v_mov_b32_e32 v4, s6
-; GFX7-HSA-NEXT:    v_mov_b32_e32 v0, s18
-; GFX7-HSA-NEXT:    v_mov_b32_e32 v1, s19
-; GFX7-HSA-NEXT:    v_mov_b32_e32 v2, s36
-; GFX7-HSA-NEXT:    v_mov_b32_e32 v3, s35
-; GFX7-HSA-NEXT:    v_mov_b32_e32 v5, s7
-; GFX7-HSA-NEXT:    s_add_u32 s6, s0, 0x60
+; GFX7-HSA-NEXT:    s_addc_u32 s11, s1, 0
+; GFX7-HSA-NEXT:    v_mov_b32_e32 v4, s10
+; GFX7-HSA-NEXT:    v_mov_b32_e32 v0, s34
+; GFX7-HSA-NEXT:    v_mov_b32_e32 v1, s35
+; GFX7-HSA-NEXT:    v_mov_b32_e32 v2, s18
+; GFX7-HSA-NEXT:    v_mov_b32_e32 v3, s19
+; GFX7-HSA-NEXT:    v_mov_b32_e32 v5, s11
+; GFX7-HSA-NEXT:    s_add_u32 s10, s0, 48
 ; GFX7-HSA-NEXT:    flat_store_dwordx4 v[4:5], v[0:3]
-; GFX7-HSA-NEXT:    s_addc_u32 s7, s1, 0
-; GFX7-HSA-NEXT:    v_mov_b32_e32 v4, s6
-; GFX7-HSA-NEXT:    v_mov_b32_e32 v5, s7
-; GFX7-HSA-NEXT:    s_add_u32 s6, s0, 48
-; GFX7-HSA-NEXT:    v_mov_b32_e32 v0, s22
-; GFX7-HSA-NEXT:    v_mov_b32_e32 v1, s23
-; GFX7-HSA-NEXT:    v_mov_b32_e32 v2, s20
-; GFX7-HSA-NEXT:    v_mov_b32_e32 v3, s21
-; GFX7-HSA-NEXT:    s_addc_u32 s7, s1, 0
+; GFX7-HSA-NEXT:    s_addc_u32 s11, s1, 0
+; GFX7-HSA-NEXT:    v_mov_b32_e32 v4, s10
+; GFX7-HSA-NEXT:    v_mov_b32_e32 v0, s20
+; GFX7-HSA-NEXT:    v_mov_b32_e32 v1, s21
+; GFX7-HSA-NEXT:    v_mov_b32_e32 v2, s31
+; GFX7-HSA-NEXT:    v_mov_b32_e32 v3, s29
+; GFX7-HSA-NEXT:    v_mov_b32_e32 v5, s11
+; GFX7-HSA-NEXT:    s_add_u32 s10, s0, 32
+; GFX7-HSA-NEXT:    flat_store_dwordx4 v[4:5], v[0:3]
+; GFX7-HSA-NEXT:    s_addc_u32 s11, s1, 0
+; GFX7-HSA-NEXT:    v_mov_b32_e32 v4, s10
+; GFX7-HSA-NEXT:    v_mov_b32_e32 v0, s24
+; GFX7-HSA-NEXT:    v_mov_b32_e32 v1, s25
+; GFX7-HSA-NEXT:    v_mov_b32_e32 v2, s22
+; GFX7-HSA-NEXT:    v_mov_b32_e32 v3, s23
+; GFX7-HSA-NEXT:    v_mov_b32_e32 v5, s11
 ; GFX7-HSA-NEXT:    flat_store_dwordx4 v[4:5], v[0:3]
+; GFX7-HSA-NEXT:    s_nop 0
+; GFX7-HSA-NEXT:    v_mov_b32_e32 v2, s6
+; GFX7-HSA-NEXT:    s_add_u32 s6, s0, 16
+; GFX7-HSA-NEXT:    v_mov_b32_e32 v3, s7
+; GFX7-HSA-NEXT:    s_addc_u32 s7, s1, 0
 ; GFX7-HSA-NEXT:    v_mov_b32_e32 v4, s6
-; GFX7-HSA-NEXT:    s_add_u32 s0, s0, 32
 ; GFX7-HSA-NEXT:    v_mov_b32_e32 v0, s8
 ; GFX7-HSA-NEXT:    v_mov_b32_e32 v1, s9
-; GFX7-HSA-NEXT:    v_mov_b32_e32 v2, s34
-; GFX7-HSA-NEXT:    v_mov_b32_e32 v3, s33
 ; GFX7-HSA-NEXT:    v_mov_b32_e32 v5, s7
-; GFX7-HSA-NEXT:    s_addc_u32 s1, s1, 0
 ; GFX7-HSA-NEXT:    flat_store_dwordx4 v[4:5], v[0:3]
 ; GFX7-HSA-NEXT:    v_mov_b32_e32 v5, s1
-; GFX7-HSA-NEXT:    v_mov_b32_e32 v0, s4
-; GFX7-HSA-NEXT:    v_mov_b32_e32 v1, s5
-; GFX7-HSA-NEXT:    v_mov_b32_e32 v2, s2
-; GFX7-HSA-NEXT:    v_mov_b32_e32 v3, s3
+; GFX7-HSA-NEXT:    v_mov_b32_e32 v0, s2
+; GFX7-HSA-NEXT:    v_mov_b32_e32 v1, s3
+; GFX7-HSA-NEXT:    v_mov_b32_e32 v2, s4
+; GFX7-HSA-NEXT:    v_mov_b32_e32 v3, s5
 ; GFX7-HSA-NEXT:    v_mov_b32_e32 v4, s0
 ; GFX7-HSA-NEXT:    flat_store_dwordx4 v[4:5], v[0:3]
 ; GFX7-HSA-NEXT:    s_endpgm
@@ -7247,94 +7247,77 @@ define amdgpu_kernel void @constant_sextload_v16i8_to_v16i64(ptr addrspace(1) %o
 ; GFX8-NOHSA-LABEL: constant_sextload_v16i8_to_v16i64:
 ; GFX8-NOHSA:       ; %bb.0:
 ; GFX8-NOHSA-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x24
-; GFX8-NOHSA-NEXT:    s_mov_b32 s11, 0
-; GFX8-NOHSA-NEXT:    s_mov_b32 s27, s11
-; GFX8-NOHSA-NEXT:    s_mov_b32 s29, s11
+; GFX8-NOHSA-NEXT:    s_mov_b32 s19, 0
+; GFX8-NOHSA-NEXT:    s_mov_b32 s21, s19
+; GFX8-NOHSA-NEXT:    s_mov_b32 s31, s19
+; GFX8-NOHSA-NEXT:    s_mov_b32 s15, s19
 ; GFX8-NOHSA-NEXT:    s_waitcnt lgkmcnt(0)
 ; GFX8-NOHSA-NEXT:    s_load_dwordx4 s[4:7], s[2:3], 0x0
-; GFX8-NOHSA-NEXT:    s_mov_b32 s3, s11
 ; GFX8-NOHSA-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX8-NOHSA-NEXT:    s_lshr_b32 s12, s6, 16
-; GFX8-NOHSA-NEXT:    s_lshr_b32 s14, s6, 24
-; GFX8-NOHSA-NEXT:    s_lshr_b32 s16, s6, 8
-; GFX8-NOHSA-NEXT:    s_lshr_b32 s18, s4, 16
-; GFX8-NOHSA-NEXT:    s_lshr_b32 s20, s4, 24
-; GFX8-NOHSA-NEXT:    s_lshr_b32 s22, s4, 8
-; GFX8-NOHSA-NEXT:    s_lshr_b32 s10, s7, 16
-; GFX8-NOHSA-NEXT:    s_lshr_b32 s24, s7, 8
-; GFX8-NOHSA-NEXT:    s_mov_b32 s26, s7
-; GFX8-NOHSA-NEXT:    s_lshr_b32 s28, s5, 16
-; GFX8-NOHSA-NEXT:    s_lshr_b32 s8, s5, 8
-; GFX8-NOHSA-NEXT:    s_mov_b32 s2, s5
-; GFX8-NOHSA-NEXT:    s_ashr_i32 s23, s5, 31
-; GFX8-NOHSA-NEXT:    s_bfe_i64 s[30:31], s[4:5], 0x80000
+; GFX8-NOHSA-NEXT:    s_lshr_b32 s18, s7, 16
+; GFX8-NOHSA-NEXT:    s_lshr_b32 s22, s7, 8
+; GFX8-NOHSA-NEXT:    s_mov_b32 s20, s7
+; GFX8-NOHSA-NEXT:    s_lshr_b32 s24, s6, 16
+; GFX8-NOHSA-NEXT:    s_lshr_b32 s26, s6, 24
+; GFX8-NOHSA-NEXT:    s_lshr_b32 s28, s6, 8
+; GFX8-NOHSA-NEXT:    s_lshr_b32 s30, s5, 16
+; GFX8-NOHSA-NEXT:    s_lshr_b32 s16, s5, 8
+; GFX8-NOHSA-NEXT:    s_mov_b32 s14, s5
+; GFX8-NOHSA-NEXT:    s_lshr_b32 s12, s4, 16
+; GFX8-NOHSA-NEXT:    s_lshr_b32 s10, s4, 24
+; GFX8-NOHSA-NEXT:    s_lshr_b32 s8, s4, 8
+; GFX8-NOHSA-NEXT:    s_bfe_i64 s[2:3], s[4:5], 0x80000
 ; GFX8-NOHSA-NEXT:    s_bfe_i64 s[34:35], s[6:7], 0x80000
-; GFX8-NOHSA-NEXT:    s_bfe_i64 s[2:3], s[2:3], 0x80000
 ; GFX8-NOHSA-NEXT:    s_bfe_i64 s[8:9], s[8:9], 0x80000
-; GFX8-NOHSA-NEXT:    s_ashr_i32 s33, s5, 24
-; GFX8-NOHSA-NEXT:    s_bfe_i64 s[4:5], s[28:29], 0x80000
+; GFX8-NOHSA-NEXT:    s_bfe_i64 s[10:11], s[10:11], 0x80000
+; GFX8-NOHSA-NEXT:    s_bfe_i64 s[12:13], s[12:13], 0x80000
+; GFX8-NOHSA-NEXT:    s_bfe_i64 s[14:15], s[14:15], 0x80000
+; GFX8-NOHSA-NEXT:    s_bfe_i64 s[16:17], s[16:17], 0x80000
+; GFX8-NOHSA-NEXT:    s_ashr_i32 s33, s5, 31
+; GFX8-NOHSA-NEXT:    s_ashr_i32 s36, s5, 24
+; GFX8-NOHSA-NEXT:    s_bfe_i64 s[4:5], s[30:31], 0x80000
+; GFX8-NOHSA-NEXT:    s_bfe_i64 s[28:29], s[28:29], 0x80000
 ; GFX8-NOHSA-NEXT:    s_bfe_i64 s[26:27], s[26:27], 0x80000
 ; GFX8-NOHSA-NEXT:    s_bfe_i64 s[24:25], s[24:25], 0x80000
-; GFX8-NOHSA-NEXT:    s_ashr_i32 s28, s7, 31
-; GFX8-NOHSA-NEXT:    s_ashr_i32 s29, s7, 24
-; GFX8-NOHSA-NEXT:    s_bfe_i64 s[6:7], s[10:11], 0x80000
-; GFX8-NOHSA-NEXT:    s_bfe_i64 s[10:11], s[22:23], 0x80000
 ; GFX8-NOHSA-NEXT:    s_bfe_i64 s[20:21], s[20:21], 0x80000
-; GFX8-NOHSA-NEXT:    s_bfe_i64 s[18:19], s[18:19], 0x80000
-; GFX8-NOHSA-NEXT:    s_bfe_i64 s[16:17], s[16:17], 0x80000
-; GFX8-NOHSA-NEXT:    s_bfe_i64 s[14:15], s[14:15], 0x80000
-; GFX8-NOHSA-NEXT:    s_bfe_i64 s[12:13], s[12:13], 0x80000
-; GFX8-NOHSA-NEXT:    v_mov_b32_e32 v0, s12
-; GFX8-NOHSA-NEXT:    s_add_u32 s12, s0, 0x50
-; GFX8-NOHSA-NEXT:    v_mov_b32_e32 v1, s13
-; GFX8-NOHSA-NEXT:    s_addc_u32 s13, s1, 0
-; GFX8-NOHSA-NEXT:    v_mov_b32_e32 v4, s12
-; GFX8-NOHSA-NEXT:    v_mov_b32_e32 v2, s14
-; GFX8-NOHSA-NEXT:    v_mov_b32_e32 v3, s15
-; GFX8-NOHSA-NEXT:    v_mov_b32_e32 v5, s13
-; GFX8-NOHSA-NEXT:    s_add_u32 s12, s0, 64
-; GFX8-NOHSA-NEXT:    flat_store_dwordx4 v[4:5], v[0:3]
-; GFX8-NOHSA-NEXT:    s_addc_u32 s13, s1, 0
-; GFX8-NOHSA-NEXT:    v_mov_b32_e32 v4, s12
-; GFX8-NOHSA-NEXT:    v_mov_b32_e32 v0, s34
-; GFX8-NOHSA-NEXT:    v_mov_b32_e32 v1, s35
-; GFX8-NOHSA-NEXT:    v_mov_b32_e32 v2, s16
-; GFX8-NOHSA-NEXT:    v_mov_b32_e32 v3, s17
-; GFX8-NOHSA-NEXT:    v_mov_b32_e32 v5, s13
-; GFX8-NOHSA-NEXT:    s_add_u32 s12, s0, 16
-; GFX8-NOHSA-NEXT:    flat_store_dwordx4 v[4:5], v[0:3]
-; GFX8-NOHSA-NEXT:    s_addc_u32 s13, s1, 0
-; GFX8-NOHSA-NEXT:    v_mov_b32_e32 v4, s12
-; GFX8-NOHSA-NEXT:    v_mov_b32_e32 v0, s18
-; GFX8-NOHSA-NEXT:    v_mov_b32_e32 v1, s19
-; GFX8-NOHSA-NEXT:    v_mov_b32_e32 v2, s20
-; GFX8-NOHSA-NEXT:    v_mov_b32_e32 v3, s21
-; GFX8-NOHSA-NEXT:    v_mov_b32_e32 v5, s13
-; GFX8-NOHSA-NEXT:    flat_store_dwordx4 v[4:5], v[0:3]
-; GFX8-NOHSA-NEXT:    v_mov_b32_e32 v5, s1
-; GFX8-NOHSA-NEXT:    v_mov_b32_e32 v0, s30
-; GFX8-NOHSA-NEXT:    v_mov_b32_e32 v1, s31
-; GFX8-NOHSA-NEXT:    v_mov_b32_e32 v2, s10
-; GFX8-NOHSA-NEXT:    v_mov_b32_e32 v3, s11
-; GFX8-NOHSA-NEXT:    v_mov_b32_e32 v4, s0
-; GFX8-NOHSA-NEXT:    flat_store_dwordx4 v[4:5], v[0:3]
-; GFX8-NOHSA-NEXT:    s_nop 0
+; GFX8-NOHSA-NEXT:    s_bfe_i64 s[22:23], s[22:23], 0x80000
+; GFX8-NOHSA-NEXT:    s_ashr_i32 s30, s7, 31
+; GFX8-NOHSA-NEXT:    s_ashr_i32 s31, s7, 24
+; GFX8-NOHSA-NEXT:    s_bfe_i64 s[6:7], s[18:19], 0x80000
 ; GFX8-NOHSA-NEXT:    v_mov_b32_e32 v0, s6
 ; GFX8-NOHSA-NEXT:    s_add_u32 s6, s0, 0x70
 ; GFX8-NOHSA-NEXT:    v_mov_b32_e32 v1, s7
 ; GFX8-NOHSA-NEXT:    s_addc_u32 s7, s1, 0
 ; GFX8-NOHSA-NEXT:    v_mov_b32_e32 v4, s6
-; GFX8-NOHSA-NEXT:    v_mov_b32_e32 v2, s29
-; GFX8-NOHSA-NEXT:    v_mov_b32_e32 v3, s28
+; GFX8-NOHSA-NEXT:    v_mov_b32_e32 v2, s31
+; GFX8-NOHSA-NEXT:    v_mov_b32_e32 v3, s30
 ; GFX8-NOHSA-NEXT:    v_mov_b32_e32 v5, s7
 ; GFX8-NOHSA-NEXT:    s_add_u32 s6, s0, 0x60
 ; GFX8-NOHSA-NEXT:    flat_store_dwordx4 v[4:5], v[0:3]
 ; GFX8-NOHSA-NEXT:    s_addc_u32 s7, s1, 0
 ; GFX8-NOHSA-NEXT:    v_mov_b32_e32 v4, s6
-; GFX8-NOHSA-NEXT:    v_mov_b32_e32 v0, s26
-; GFX8-NOHSA-NEXT:    v_mov_b32_e32 v1, s27
-; GFX8-NOHSA-NEXT:    v_mov_b32_e32 v2, s24
-; GFX8-NOHSA-NEXT:    v_mov_b32_e32 v3, s25
+; GFX8-NOHSA-NEXT:    v_mov_b32_e32 v0, s20
+; GFX8-NOHSA-NEXT:    v_mov_b32_e32 v1, s21
+; GFX8-NOHSA-NEXT:    v_mov_b32_e32 v2, s22
+; GFX8-NOHSA-NEXT:    v_mov_b32_e32 v3, s23
+; GFX8-NOHSA-NEXT:    v_mov_b32_e32 v5, s7
+; GFX8-NOHSA-NEXT:    s_add_u32 s6, s0, 0x50
+; GFX8-NOHSA-NEXT:    flat_store_dwordx4 v[4:5], v[0:3]
+; GFX8-NOHSA-NEXT:    s_addc_u32 s7, s1, 0
+; GFX8-NOHSA-NEXT:    v_mov_b32_e32 v4, s6
+; GFX8-NOHSA-NEXT:    v_mov_b32_e32 v0, s24
+; GFX8-NOHSA-NEXT:    v_mov_b32_e32 v1, s25
+; GFX8-NOHSA-NEXT:    v_mov_b32_e32 v2, s26
+; GFX8-NOHSA-NEXT:    v_mov_b32_e32 v3, s27
+; GFX8-NOHSA-NEXT:    v_mov_b32_e32 v5, s7
+; GFX8-NOHSA-NEXT:    s_add_u32 s6, s0, 64
+; GFX8-NOHSA-NEXT:    flat_store_dwordx4 v[4:5], v[0:3]
+; GFX8-NOHSA-NEXT:    s_addc_u32 s7, s1, 0
+; GFX8-NOHSA-NEXT:    v_mov_b32_e32 v4, s6
+; GFX8-NOHSA-NEXT:    v_mov_b32_e32 v0, s34
+; GFX8-NOHSA-NEXT:    v_mov_b32_e32 v1, s35
+; GFX8-NOHSA-NEXT:    v_mov_b32_e32 v2, s28
+; GFX8-NOHSA-NEXT:    v_mov_b32_e32 v3, s29
 ; GFX8-NOHSA-NEXT:    v_mov_b32_e32 v5, s7
 ; GFX8-NOHSA-NEXT:    flat_store_dwordx4 v[4:5], v[0:3]
 ; GFX8-NOHSA-NEXT:    s_nop 0
@@ -7343,11 +7326,27 @@ define amdgpu_kernel void @constant_sextload_v16i8_to_v16i64(ptr addrspace(1) %o
 ; GFX8-NOHSA-NEXT:    v_mov_b32_e32 v1, s5
 ; GFX8-NOHSA-NEXT:    s_addc_u32 s5, s1, 0
 ; GFX8-NOHSA-NEXT:    v_mov_b32_e32 v4, s4
-; GFX8-NOHSA-NEXT:    s_add_u32 s0, s0, 32
-; GFX8-NOHSA-NEXT:    v_mov_b32_e32 v2, s33
-; GFX8-NOHSA-NEXT:    v_mov_b32_e32 v3, s23
+; GFX8-NOHSA-NEXT:    v_mov_b32_e32 v2, s36
+; GFX8-NOHSA-NEXT:    v_mov_b32_e32 v3, s33
+; GFX8-NOHSA-NEXT:    v_mov_b32_e32 v5, s5
+; GFX8-NOHSA-NEXT:    s_add_u32 s4, s0, 32
+; GFX8-NOHSA-NEXT:    flat_store_dwordx4 v[4:5], v[0:3]
+; GFX8-NOHSA-NEXT:    s_addc_u32 s5, s1, 0
+; GFX8-NOHSA-NEXT:    v_mov_b32_e32 v4, s4
+; GFX8-NOHSA-NEXT:    v_mov_b32_e32 v0, s14
+; GFX8-NOHSA-NEXT:    v_mov_b32_e32 v1, s15
+; GFX8-NOHSA-NEXT:    v_mov_b32_e32 v2, s16
+; GFX8-NOHSA-NEXT:    v_mov_b32_e32 v3, s17
+; GFX8-NOHSA-NEXT:    v_mov_b32_e32 v5, s5
+; GFX8-NOHSA-NEXT:    s_add_u32 s4, s0, 16
+; GFX8-NOHSA-NEXT:    flat_store_dwordx4 v[4:5], v[0:3]
+; GFX8-NOHSA-NEXT:    s_addc_u32 s5, s1, 0
+; GFX8-NOHSA-NEXT:    v_mov_b32_e32 v4, s4
+; GFX8-NOHSA-NEXT:    v_mov_b32_e32 v0, s12
+; GFX8-NOHSA-NEXT:    v_mov_b32_e32 v1, s13
+; GFX8-NOHSA-NEXT:    v_mov_b32_e32 v2, s10
+; GFX8-NOHSA-NEXT:    v_mov_b32_e32 v3, s11
 ; GFX8-NOHSA-NEXT:    v_mov_b32_e32 v5, s5
-; GFX8-NOHSA-NEXT:    s_addc_u32 s1, s1, 0
 ; GFX8-NOHSA-NEXT:    flat_store_dwordx4 v[4:5], v[0:3]
 ; GFX8-NOHSA-NEXT:    v_mov_b32_e32 v5, s1
 ; GFX8-NOHSA-NEXT:    v_mov_b32_e32 v0, s2
@@ -7460,73 +7459,72 @@ define amdgpu_kernel void @constant_sextload_v16i8_to_v16i64(ptr addrspace(1) %o
 ; GFX12-LABEL: constant_sextload_v16i8_to_v16i64:
 ; GFX12:       ; %bb.0:
 ; GFX12-NEXT:    s_load_b128 s[0:3], s[4:5], 0x24
+; GFX12-NEXT:    s_mov_b32 s9, 0
+; GFX12-NEXT:    s_delay_alu instid0(SALU_CYCLE_1)
+; GFX12-NEXT:    s_mov_b32 s11, s9
+; GFX12-NEXT:    s_mov_b32 s13, s9
+; GFX12-NEXT:    s_mov_b32 s15, s9
 ; GFX12-NEXT:    s_wait_kmcnt 0x0
 ; GFX12-NEXT:    s_load_b128 s[4:7], s[2:3], 0x0
-; GFX12-NEXT:    s_mov_b32 s3, 0
-; GFX12-NEXT:    s_delay_alu instid0(SALU_CYCLE_1)
-; GFX12-NEXT:    s_mov_b32 s9, s3
-; GFX12-NEXT:    s_mov_b32 s13, s3
-; GFX12-NEXT:    s_mov_b32 s11, s3
 ; GFX12-NEXT:    s_wait_kmcnt 0x0
-; GFX12-NEXT:    s_lshr_b32 s14, s6, 16
-; GFX12-NEXT:    s_lshr_b32 s16, s6, 24
-; GFX12-NEXT:    s_lshr_b32 s18, s6, 8
-; GFX12-NEXT:    s_bfe_i64 s[34:35], s[6:7], 0x80000
+; GFX12-NEXT:    s_lshr_b32 s8, s7, 16
+; GFX12-NEXT:    s_lshr_b32 s16, s7, 8
+; GFX12-NEXT:    s_mov_b32 s10, s7
+; GFX12-NEXT:    s_ashr_i32 s33, s7, 31
+; GFX12-NEXT:    s_ashr_i32 s36, s7, 24
+; GFX12-NEXT:    s_bfe_i64 s[8:9], s[8:9], 0x80000
+; GFX12-NEXT:    s_lshr_b32 s18, s6, 16
+; GFX12-NEXT:    s_bfe_i64 s[10:11], s[10:11], 0x80000
 ; GFX12-NEXT:    s_bfe_i64 s[16:17], s[16:17], 0x80000
-; GFX12-NEXT:    s_bfe_i64 s[14:15], s[14:15], 0x80000
-; GFX12-NEXT:    s_lshr_b32 s20, s4, 16
+; GFX12-NEXT:    v_dual_mov_b32 v24, 0 :: v_dual_mov_b32 v3, s33
+; GFX12-NEXT:    s_lshr_b32 s20, s6, 24
+; GFX12-NEXT:    v_dual_mov_b32 v2, s36 :: v_dual_mov_b32 v1, s9
+; GFX12-NEXT:    v_dual_mov_b32 v0, s8 :: v_dual_mov_b32 v5, s11
+; GFX12-NEXT:    s_lshr_b32 s22, s6, 8
 ; GFX12-NEXT:    s_bfe_i64 s[18:19], s[18:19], 0x80000
-; GFX12-NEXT:    v_dual_mov_b32 v24, 0 :: v_dual_mov_b32 v1, s35
-; GFX12-NEXT:    s_lshr_b32 s22, s4, 24
-; GFX12-NEXT:    v_dual_mov_b32 v0, s34 :: v_dual_mov_b32 v5, s15
-; GFX12-NEXT:    v_dual_mov_b32 v4, s14 :: v_dual_mov_b32 v7, s17
-; GFX12-NEXT:    v_dual_mov_b32 v6, s16 :: v_dual_mov_b32 v3, s19
-; GFX12-NEXT:    s_lshr_b32 s24, s4, 8
+; GFX12-NEXT:    v_dual_mov_b32 v4, s10 :: v_dual_mov_b32 v7, s17
+; GFX12-NEXT:    v_dual_mov_b32 v6, s16 :: v_dual_mov_b32 v9, s19
+; GFX12-NEXT:    s_lshr_b32 s12, s5, 16
 ; GFX12-NEXT:    s_bfe_i64 s[20:21], s[20:21], 0x80000
-; GFX12-NEXT:    s_delay_alu instid0(SALU_CYCLE_1)
-; GFX12-NEXT:    v_dual_mov_b32 v2, s18 :: v_dual_mov_b32 v9, s21
-; GFX12-NEXT:    s_lshr_b32 s2, s7, 16
+; GFX12-NEXT:    s_lshr_b32 s24, s5, 8
+; GFX12-NEXT:    s_mov_b32 s14, s5
+; GFX12-NEXT:    s_bfe_i64 s[34:35], s[6:7], 0x80000
 ; GFX12-NEXT:    s_bfe_i64 s[22:23], s[22:23], 0x80000
-; GFX12-NEXT:    s_lshr_b32 s26, s7, 8
-; GFX12-NEXT:    s_mov_b32 s8, s7
-; GFX12-NEXT:    s_mov_b32 s12, s5
-; GFX12-NEXT:    s_bfe_i64 s[30:31], s[4:5], 0x80000
-; GFX12-NEXT:    s_ashr_i32 s27, s5, 31
+; GFX12-NEXT:    s_lshr_b32 s26, s4, 16
+; GFX12-NEXT:    s_lshr_b32 s28, s4, 24
+; GFX12-NEXT:    s_ashr_i32 s29, s5, 31
+; GFX12-NEXT:    s_ashr_i32 s31, s5, 24
+; GFX12-NEXT:    s_bfe_i64 s[12:13], s[12:13], 0x80000
+; GFX12-NEXT:    v_dual_mov_b32 v8, s18 :: v_dual_mov_b32 v11, s21
+; GFX12-NEXT:    v_mov_b32_e32 v10, s20
+; GFX12-NEXT:    s_lshr_b32 s30, s4, 8
+; GFX12-NEXT:    s_bfe_i64 s[14:15], s[14:15], 0x80000
 ; GFX12-NEXT:    s_bfe_i64 s[24:25], s[24:25], 0x80000
-; GFX12-NEXT:    s_lshr_b32 s10, s5, 16
-; GFX12-NEXT:    s_ashr_i32 s33, s7, 31
-; GFX12-NEXT:    s_ashr_i32 s36, s7, 24
-; GFX12-NEXT:    s_bfe_i64 s[2:3], s[2:3], 0x80000
-; GFX12-NEXT:    v_dual_mov_b32 v8, s20 :: v_dual_mov_b32 v11, s23
-; GFX12-NEXT:    v_mov_b32_e32 v10, s22
-; GFX12-NEXT:    s_lshr_b32 s28, s5, 8
-; GFX12-NEXT:    s_ashr_i32 s29, s5, 24
-; GFX12-NEXT:    s_bfe_i64 s[4:5], s[12:13], 0x80000
-; GFX12-NEXT:    s_bfe_i64 s[8:9], s[8:9], 0x80000
-; GFX12-NEXT:    s_bfe_i64 s[12:13], s[26:27], 0x80000
 ; GFX12-NEXT:    s_clause 0x1
-; GFX12-NEXT:    global_store_b128 v24, v[4:7], s[0:1] offset:80
-; GFX12-NEXT:    global_store_b128 v24, v[0:3], s[0:1] offset:64
-; GFX12-NEXT:    v_dual_mov_b32 v0, s30 :: v_dual_mov_b32 v3, s25
-; GFX12-NEXT:    v_dual_mov_b32 v1, s31 :: v_dual_mov_b32 v2, s24
-; GFX12-NEXT:    v_mov_b32_e32 v5, s3
-; GFX12-NEXT:    s_bfe_i64 s[10:11], s[10:11], 0x80000
-; GFX12-NEXT:    v_dual_mov_b32 v4, s2 :: v_dual_mov_b32 v7, s33
-; GFX12-NEXT:    v_dual_mov_b32 v6, s36 :: v_dual_mov_b32 v13, s9
+; GFX12-NEXT:    global_store_b128 v24, v[0:3], s[0:1] offset:112
+; GFX12-NEXT:    global_store_b128 v24, v[4:7], s[0:1] offset:96
+; GFX12-NEXT:    v_dual_mov_b32 v0, s34 :: v_dual_mov_b32 v3, s23
+; GFX12-NEXT:    v_dual_mov_b32 v1, s35 :: v_dual_mov_b32 v2, s22
+; GFX12-NEXT:    v_mov_b32_e32 v5, s13
 ; GFX12-NEXT:    s_bfe_i64 s[6:7], s[28:29], 0x80000
-; GFX12-NEXT:    v_dual_mov_b32 v12, s8 :: v_dual_mov_b32 v15, s13
-; GFX12-NEXT:    v_dual_mov_b32 v14, s12 :: v_dual_mov_b32 v17, s11
-; GFX12-NEXT:    v_dual_mov_b32 v16, s10 :: v_dual_mov_b32 v19, s27
-; GFX12-NEXT:    v_dual_mov_b32 v18, s29 :: v_dual_mov_b32 v21, s5
-; GFX12-NEXT:    v_dual_mov_b32 v20, s4 :: v_dual_mov_b32 v23, s7
-; GFX12-NEXT:    v_mov_b32_e32 v22, s6
+; GFX12-NEXT:    s_bfe_i64 s[26:27], s[26:27], 0x80000
+; GFX12-NEXT:    v_dual_mov_b32 v4, s12 :: v_dual_mov_b32 v7, s29
+; GFX12-NEXT:    v_dual_mov_b32 v6, s31 :: v_dual_mov_b32 v13, s15
+; GFX12-NEXT:    s_bfe_i64 s[2:3], s[4:5], 0x80000
+; GFX12-NEXT:    s_bfe_i64 s[4:5], s[30:31], 0x80000
+; GFX12-NEXT:    v_dual_mov_b32 v12, s14 :: v_dual_mov_b32 v15, s25
+; GFX12-NEXT:    v_dual_mov_b32 v14, s24 :: v_dual_mov_b32 v17, s27
+; GFX12-NEXT:    v_dual_mov_b32 v16, s26 :: v_dual_mov_b32 v19, s7
+; GFX12-NEXT:    v_dual_mov_b32 v18, s6 :: v_dual_mov_b32 v21, s3
+; GFX12-NEXT:    v_dual_mov_b32 v20, s2 :: v_dual_mov_b32 v23, s5
+; GFX12-NEXT:    v_mov_b32_e32 v22, s4
 ; GFX12-NEXT:    s_clause 0x5
-; GFX12-NEXT:    global_store_b128 v24, v[8:11], s[0:1] offset:16
-; GFX12-NEXT:    global_store_b128 v24, v[0:3], s[0:1]
-; GFX12-NEXT:    global_store_b128 v24, v[4:7], s[0:1] offset:112
-; GFX12-NEXT:    global_store_b128 v24, v[12:15], s[0:1] offset:96
-; GFX12-NEXT:    global_store_b128 v24, v[16:19], s[0:1] offset:48
-; GFX12-NEXT:    global_store_b128 v24, v[20:23], s[0:1] offset:32
+; GFX12-NEXT:    global_store_b128 v24, v[8:11], s[0:1] offset:80
+; GFX12-NEXT:    global_store_b128 v24, v[0:3], s[0:1] offset:64
+; GFX12-NEXT:    global_store_b128 v24, v[4:7], s[0:1] offset:48
+; GFX12-NEXT:    global_store_b128 v24, v[12:15], s[0:1] offset:32
+; GFX12-NEXT:    global_store_b128 v24, v[16:19], s[0:1] offset:16
+; GFX12-NEXT:    global_store_b128 v24, v[20:23], s[0:1]
 ; GFX12-NEXT:    s_endpgm
   %load = load <16 x i8>, ptr addrspace(4) %in
   %ext = sext <16 x i8> %load to <16 x i64>
@@ -8237,494 +8235,484 @@ define amdgpu_kernel void @constant_sextload_v32i8_to_v32i64(ptr addrspace(1) %o
 ; GFX6-NOHSA-NEXT:    s_load_dwordx4 s[8:11], s[4:5], 0x9
 ; GFX6-NOHSA-NEXT:    s_waitcnt lgkmcnt(0)
 ; GFX6-NOHSA-NEXT:    s_load_dwordx8 s[0:7], s[10:11], 0x0
-; GFX6-NOHSA-NEXT:    s_mov_b32 s11, 0
-; GFX6-NOHSA-NEXT:    s_mov_b32 s29, s11
+; GFX6-NOHSA-NEXT:    s_mov_b32 s13, 0
+; GFX6-NOHSA-NEXT:    s_mov_b32 s27, s13
+; GFX6-NOHSA-NEXT:    s_mov_b32 s11, s13
 ; GFX6-NOHSA-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX6-NOHSA-NEXT:    s_lshr_b32 s30, s6, 16
-; GFX6-NOHSA-NEXT:    s_lshr_b32 s34, s6, 24
-; GFX6-NOHSA-NEXT:    s_lshr_b32 s20, s6, 8
-; GFX6-NOHSA-NEXT:    s_lshr_b32 s26, s4, 16
-; GFX6-NOHSA-NEXT:    s_lshr_b32 s22, s4, 24
-; GFX6-NOHSA-NEXT:    s_lshr_b32 s24, s4, 8
-; GFX6-NOHSA-NEXT:    s_lshr_b32 s12, s2, 16
-; GFX6-NOHSA-NEXT:    s_lshr_b32 s14, s2, 24
-; GFX6-NOHSA-NEXT:    s_lshr_b32 s16, s2, 8
-; GFX6-NOHSA-NEXT:    s_lshr_b32 s18, s0, 16
-; GFX6-NOHSA-NEXT:    s_lshr_b32 s10, s7, 16
-; GFX6-NOHSA-NEXT:    s_mov_b32 s28, s7
-; GFX6-NOHSA-NEXT:    s_ashr_i32 s13, s1, 31
-; GFX6-NOHSA-NEXT:    s_ashr_i32 s15, s1, 24
-; GFX6-NOHSA-NEXT:    s_ashr_i32 s17, s3, 31
-; GFX6-NOHSA-NEXT:    s_ashr_i32 s19, s3, 24
-; GFX6-NOHSA-NEXT:    s_ashr_i32 s21, s5, 31
-; GFX6-NOHSA-NEXT:    s_ashr_i32 s23, s5, 24
-; GFX6-NOHSA-NEXT:    s_bfe_i64 s[48:49], s[28:29], 0x80000
-; GFX6-NOHSA-NEXT:    s_ashr_i32 s25, s7, 31
+; GFX6-NOHSA-NEXT:    s_lshr_b32 s12, s7, 16
+; GFX6-NOHSA-NEXT:    s_lshr_b32 s14, s7, 8
+; GFX6-NOHSA-NEXT:    s_mov_b32 s26, s7
+; GFX6-NOHSA-NEXT:    s_lshr_b32 s24, s6, 16
+; GFX6-NOHSA-NEXT:    s_lshr_b32 s18, s6, 24
+; GFX6-NOHSA-NEXT:    s_lshr_b32 s22, s6, 8
+; GFX6-NOHSA-NEXT:    s_lshr_b32 s20, s5, 16
+; GFX6-NOHSA-NEXT:    s_lshr_b32 s16, s5, 8
+; GFX6-NOHSA-NEXT:    s_mov_b32 s10, s5
+; GFX6-NOHSA-NEXT:    s_ashr_i32 s25, s1, 31
+; GFX6-NOHSA-NEXT:    s_ashr_i32 s33, s1, 24
+; GFX6-NOHSA-NEXT:    s_ashr_i32 s19, s3, 31
+; GFX6-NOHSA-NEXT:    s_ashr_i32 s15, s3, 24
+; GFX6-NOHSA-NEXT:    s_bfe_i64 s[10:11], s[10:11], 0x80000
+; GFX6-NOHSA-NEXT:    s_ashr_i32 s23, s5, 31
+; GFX6-NOHSA-NEXT:    s_ashr_i32 s17, s5, 24
+; GFX6-NOHSA-NEXT:    s_bfe_i64 s[58:59], s[26:27], 0x80000
+; GFX6-NOHSA-NEXT:    s_ashr_i32 s21, s7, 31
 ; GFX6-NOHSA-NEXT:    s_ashr_i32 s27, s7, 24
-; GFX6-NOHSA-NEXT:    s_bfe_i64 s[52:53], s[34:35], 0x80000
-; GFX6-NOHSA-NEXT:    s_bfe_i64 s[54:55], s[30:31], 0x80000
-; GFX6-NOHSA-NEXT:    s_lshr_b32 s28, s0, 24
-; GFX6-NOHSA-NEXT:    s_lshr_b32 s30, s0, 8
-; GFX6-NOHSA-NEXT:    s_lshr_b32 s34, s7, 8
-; GFX6-NOHSA-NEXT:    s_lshr_b32 s36, s5, 16
-; GFX6-NOHSA-NEXT:    s_lshr_b32 s38, s5, 8
-; GFX6-NOHSA-NEXT:    s_mov_b32 s44, s5
-; GFX6-NOHSA-NEXT:    s_lshr_b32 s40, s3, 16
-; GFX6-NOHSA-NEXT:    s_lshr_b32 s42, s3, 8
-; GFX6-NOHSA-NEXT:    s_mov_b32 s50, s3
-; GFX6-NOHSA-NEXT:    s_lshr_b32 s46, s1, 16
-; GFX6-NOHSA-NEXT:    s_bfe_i64 s[56:57], s[0:1], 0x80000
-; GFX6-NOHSA-NEXT:    s_bfe_i64 s[2:3], s[2:3], 0x80000
-; GFX6-NOHSA-NEXT:    s_bfe_i64 s[58:59], s[4:5], 0x80000
-; GFX6-NOHSA-NEXT:    s_bfe_i64 s[60:61], s[6:7], 0x80000
-; GFX6-NOHSA-NEXT:    s_lshr_b32 s4, s1, 8
-; GFX6-NOHSA-NEXT:    s_mov_b32 s6, s1
-; GFX6-NOHSA-NEXT:    s_mov_b32 s0, s8
-; GFX6-NOHSA-NEXT:    s_mov_b32 s1, s9
-; GFX6-NOHSA-NEXT:    v_mov_b32_e32 v0, s60
-; GFX6-NOHSA-NEXT:    v_mov_b32_e32 v1, s61
-; GFX6-NOHSA-NEXT:    v_mov_b32_e32 v4, s58
-; GFX6-NOHSA-NEXT:    v_mov_b32_e32 v5, s59
-; GFX6-NOHSA-NEXT:    v_mov_b32_e32 v8, s2
-; GFX6-NOHSA-NEXT:    v_mov_b32_e32 v9, s3
-; GFX6-NOHSA-NEXT:    v_mov_b32_e32 v12, s56
-; GFX6-NOHSA-NEXT:    v_mov_b32_e32 v13, s57
-; GFX6-NOHSA-NEXT:    v_mov_b32_e32 v16, s48
-; GFX6-NOHSA-NEXT:    v_mov_b32_e32 v17, s49
-; GFX6-NOHSA-NEXT:    v_mov_b32_e32 v18, s54
-; GFX6-NOHSA-NEXT:    v_mov_b32_e32 v19, s55
-; GFX6-NOHSA-NEXT:    v_mov_b32_e32 v20, s52
-; GFX6-NOHSA-NEXT:    v_mov_b32_e32 v21, s53
-; GFX6-NOHSA-NEXT:    v_mov_b32_e32 v22, s27
-; GFX6-NOHSA-NEXT:    v_mov_b32_e32 v23, s25
-; GFX6-NOHSA-NEXT:    s_mov_b32 s3, 0xf000
-; GFX6-NOHSA-NEXT:    s_bfe_i64 s[8:9], s[26:27], 0x80000
-; GFX6-NOHSA-NEXT:    s_mov_b32 s2, -1
-; GFX6-NOHSA-NEXT:    v_mov_b32_e32 v24, s8
-; GFX6-NOHSA-NEXT:    buffer_store_dwordx4 v[18:21], off, s[0:3], 0 offset:208
-; GFX6-NOHSA-NEXT:    s_bfe_i64 s[24:25], s[24:25], 0x80000
-; GFX6-NOHSA-NEXT:    s_bfe_i64 s[26:27], s[20:21], 0x80000
-; GFX6-NOHSA-NEXT:    s_bfe_i64 s[48:49], s[22:23], 0x80000
-; GFX6-NOHSA-NEXT:    v_mov_b32_e32 v2, s26
-; GFX6-NOHSA-NEXT:    v_mov_b32_e32 v3, s27
-; GFX6-NOHSA-NEXT:    v_mov_b32_e32 v25, s9
-; GFX6-NOHSA-NEXT:    buffer_store_dwordx4 v[0:3], off, s[0:3], 0 offset:192
-; GFX6-NOHSA-NEXT:    v_mov_b32_e32 v26, s48
-; GFX6-NOHSA-NEXT:    v_mov_b32_e32 v27, s49
-; GFX6-NOHSA-NEXT:    v_mov_b32_e32 v6, s24
-; GFX6-NOHSA-NEXT:    v_mov_b32_e32 v7, s25
-; GFX6-NOHSA-NEXT:    buffer_store_dwordx4 v[24:27], off, s[0:3], 0 offset:144
-; GFX6-NOHSA-NEXT:    s_mov_b32 s35, s11
-; GFX6-NOHSA-NEXT:    s_mov_b32 s37, s11
-; GFX6-NOHSA-NEXT:    s_mov_b32 s39, s11
-; GFX6-NOHSA-NEXT:    s_mov_b32 s45, s11
-; GFX6-NOHSA-NEXT:    s_mov_b32 s41, s11
-; GFX6-NOHSA-NEXT:    s_mov_b32 s43, s11
-; GFX6-NOHSA-NEXT:    s_mov_b32 s51, s11
-; GFX6-NOHSA-NEXT:    s_mov_b32 s47, s11
-; GFX6-NOHSA-NEXT:    s_mov_b32 s5, s11
-; GFX6-NOHSA-NEXT:    s_mov_b32 s7, s11
-; GFX6-NOHSA-NEXT:    s_bfe_i64 s[24:25], s[6:7], 0x80000
-; GFX6-NOHSA-NEXT:    s_bfe_i64 s[26:27], s[50:51], 0x80000
-; GFX6-NOHSA-NEXT:    s_bfe_i64 s[44:45], s[44:45], 0x80000
+; GFX6-NOHSA-NEXT:    s_lshr_b32 s42, s4, 16
+; GFX6-NOHSA-NEXT:    s_lshr_b32 s38, s4, 24
+; GFX6-NOHSA-NEXT:    s_lshr_b32 s26, s4, 8
+; GFX6-NOHSA-NEXT:    s_lshr_b32 s28, s3, 16
+; GFX6-NOHSA-NEXT:    s_lshr_b32 s30, s3, 8
+; GFX6-NOHSA-NEXT:    s_mov_b32 s52, s3
+; GFX6-NOHSA-NEXT:    s_lshr_b32 s34, s2, 16
+; GFX6-NOHSA-NEXT:    s_lshr_b32 s36, s2, 24
+; GFX6-NOHSA-NEXT:    s_lshr_b32 s40, s2, 8
+; GFX6-NOHSA-NEXT:    s_lshr_b32 s44, s1, 16
+; GFX6-NOHSA-NEXT:    s_lshr_b32 s46, s1, 8
+; GFX6-NOHSA-NEXT:    s_mov_b32 s56, s1
+; GFX6-NOHSA-NEXT:    s_lshr_b32 s48, s0, 16
+; GFX6-NOHSA-NEXT:    s_lshr_b32 s50, s0, 24
+; GFX6-NOHSA-NEXT:    s_lshr_b32 s54, s0, 8
+; GFX6-NOHSA-NEXT:    s_bfe_i64 s[0:1], s[0:1], 0x80000
 ; GFX6-NOHSA-NEXT:    s_bfe_i64 s[4:5], s[4:5], 0x80000
-; GFX6-NOHSA-NEXT:    s_bfe_i64 s[6:7], s[46:47], 0x80000
-; GFX6-NOHSA-NEXT:    s_bfe_i64 s[8:9], s[42:43], 0x80000
-; GFX6-NOHSA-NEXT:    s_bfe_i64 s[40:41], s[40:41], 0x80000
+; GFX6-NOHSA-NEXT:    s_bfe_i64 s[6:7], s[6:7], 0x80000
+; GFX6-NOHSA-NEXT:    s_bfe_i64 s[2:3], s[2:3], 0x80000
+; GFX6-NOHSA-NEXT:    v_mov_b32_e32 v10, s58
+; GFX6-NOHSA-NEXT:    v_mov_b32_e32 v11, s59
+; GFX6-NOHSA-NEXT:    v_mov_b32_e32 v8, s6
+; GFX6-NOHSA-NEXT:    v_mov_b32_e32 v9, s7
+; GFX6-NOHSA-NEXT:    v_mov_b32_e32 v2, s10
+; GFX6-NOHSA-NEXT:    v_mov_b32_e32 v3, s11
+; GFX6-NOHSA-NEXT:    v_mov_b32_e32 v0, s4
+; GFX6-NOHSA-NEXT:    v_mov_b32_e32 v1, s5
+; GFX6-NOHSA-NEXT:    v_mov_b32_e32 v18, s27
+; GFX6-NOHSA-NEXT:    v_mov_b32_e32 v19, s21
+; GFX6-NOHSA-NEXT:    v_mov_b32_e32 v6, s17
+; GFX6-NOHSA-NEXT:    v_mov_b32_e32 v14, s15
+; GFX6-NOHSA-NEXT:    s_mov_b32 s11, 0xf000
+; GFX6-NOHSA-NEXT:    s_mov_b32 s15, s13
+; GFX6-NOHSA-NEXT:    s_mov_b32 s21, s13
+; GFX6-NOHSA-NEXT:    s_mov_b32 s17, s13
+; GFX6-NOHSA-NEXT:    s_mov_b32 s29, s13
+; GFX6-NOHSA-NEXT:    s_mov_b32 s31, s13
+; GFX6-NOHSA-NEXT:    s_mov_b32 s53, s13
+; GFX6-NOHSA-NEXT:    s_mov_b32 s45, s13
+; GFX6-NOHSA-NEXT:    s_mov_b32 s47, s13
+; GFX6-NOHSA-NEXT:    s_mov_b32 s57, s13
+; GFX6-NOHSA-NEXT:    s_bfe_i64 s[4:5], s[12:13], 0x80000
+; GFX6-NOHSA-NEXT:    s_mov_b32 s10, -1
+; GFX6-NOHSA-NEXT:    v_mov_b32_e32 v7, s23
+; GFX6-NOHSA-NEXT:    v_mov_b32_e32 v15, s19
+; GFX6-NOHSA-NEXT:    v_mov_b32_e32 v16, s4
+; GFX6-NOHSA-NEXT:    v_mov_b32_e32 v17, s5
+; GFX6-NOHSA-NEXT:    buffer_store_dwordx4 v[16:19], off, s[8:11], 0 offset:240
+; GFX6-NOHSA-NEXT:    s_bfe_i64 s[56:57], s[56:57], 0x80000
+; GFX6-NOHSA-NEXT:    s_bfe_i64 s[52:53], s[52:53], 0x80000
+; GFX6-NOHSA-NEXT:    s_bfe_i64 s[58:59], s[24:25], 0x80000
+; GFX6-NOHSA-NEXT:    s_bfe_i64 s[60:61], s[14:15], 0x80000
+; GFX6-NOHSA-NEXT:    s_bfe_i64 s[62:63], s[22:23], 0x80000
+; GFX6-NOHSA-NEXT:    s_bfe_i64 s[64:65], s[18:19], 0x80000
+; GFX6-NOHSA-NEXT:    s_bfe_i64 s[66:67], s[20:21], 0x80000
+; GFX6-NOHSA-NEXT:    s_bfe_i64 s[42:43], s[42:43], 0x80000
+; GFX6-NOHSA-NEXT:    s_bfe_i64 s[68:69], s[16:17], 0x80000
 ; GFX6-NOHSA-NEXT:    s_bfe_i64 s[38:39], s[38:39], 0x80000
-; GFX6-NOHSA-NEXT:    s_bfe_i64 s[36:37], s[36:37], 0x80000
-; GFX6-NOHSA-NEXT:    s_bfe_i64 s[34:35], s[34:35], 0x80000
-; GFX6-NOHSA-NEXT:    s_bfe_i64 s[10:11], s[10:11], 0x80000
+; GFX6-NOHSA-NEXT:    s_bfe_i64 s[4:5], s[54:55], 0x80000
+; GFX6-NOHSA-NEXT:    s_bfe_i64 s[6:7], s[50:51], 0x80000
+; GFX6-NOHSA-NEXT:    s_bfe_i64 s[12:13], s[48:49], 0x80000
+; GFX6-NOHSA-NEXT:    s_bfe_i64 s[14:15], s[46:47], 0x80000
+; GFX6-NOHSA-NEXT:    s_bfe_i64 s[16:17], s[44:45], 0x80000
+; GFX6-NOHSA-NEXT:    s_bfe_i64 s[18:19], s[40:41], 0x80000
+; GFX6-NOHSA-NEXT:    s_bfe_i64 s[20:21], s[36:37], 0x80000
+; GFX6-NOHSA-NEXT:    s_bfe_i64 s[22:23], s[34:35], 0x80000
 ; GFX6-NOHSA-NEXT:    s_bfe_i64 s[30:31], s[30:31], 0x80000
 ; GFX6-NOHSA-NEXT:    s_bfe_i64 s[28:29], s[28:29], 0x80000
-; GFX6-NOHSA-NEXT:    s_bfe_i64 s[42:43], s[18:19], 0x80000
-; GFX6-NOHSA-NEXT:    s_bfe_i64 s[46:47], s[16:17], 0x80000
-; GFX6-NOHSA-NEXT:    s_bfe_i64 s[48:49], s[14:15], 0x80000
-; GFX6-NOHSA-NEXT:    s_bfe_i64 s[50:51], s[12:13], 0x80000
-; GFX6-NOHSA-NEXT:    buffer_store_dwordx4 v[4:7], off, s[0:3], 0 offset:128
-; GFX6-NOHSA-NEXT:    s_waitcnt expcnt(2)
-; GFX6-NOHSA-NEXT:    v_mov_b32_e32 v0, s50
-; GFX6-NOHSA-NEXT:    v_mov_b32_e32 v1, s51
-; GFX6-NOHSA-NEXT:    v_mov_b32_e32 v2, s48
-; GFX6-NOHSA-NEXT:    v_mov_b32_e32 v3, s49
-; GFX6-NOHSA-NEXT:    buffer_store_dwordx4 v[0:3], off, s[0:3], 0 offset:80
+; GFX6-NOHSA-NEXT:    s_bfe_i64 s[26:27], s[26:27], 0x80000
+; GFX6-NOHSA-NEXT:    v_mov_b32_e32 v12, s60
+; GFX6-NOHSA-NEXT:    v_mov_b32_e32 v13, s61
+; GFX6-NOHSA-NEXT:    buffer_store_dwordx4 v[10:13], off, s[8:11], 0 offset:224
 ; GFX6-NOHSA-NEXT:    s_waitcnt expcnt(0)
-; GFX6-NOHSA-NEXT:    v_mov_b32_e32 v2, s23
-; GFX6-NOHSA-NEXT:    v_mov_b32_e32 v3, s21
-; GFX6-NOHSA-NEXT:    v_mov_b32_e32 v10, s46
-; GFX6-NOHSA-NEXT:    v_mov_b32_e32 v11, s47
-; GFX6-NOHSA-NEXT:    buffer_store_dwordx4 v[8:11], off, s[0:3], 0 offset:64
-; GFX6-NOHSA-NEXT:    v_mov_b32_e32 v4, s42
-; GFX6-NOHSA-NEXT:    v_mov_b32_e32 v5, s43
-; GFX6-NOHSA-NEXT:    v_mov_b32_e32 v6, s28
-; GFX6-NOHSA-NEXT:    v_mov_b32_e32 v7, s29
-; GFX6-NOHSA-NEXT:    buffer_store_dwordx4 v[4:7], off, s[0:3], 0 offset:16
+; GFX6-NOHSA-NEXT:    v_mov_b32_e32 v10, s58
+; GFX6-NOHSA-NEXT:    v_mov_b32_e32 v11, s59
+; GFX6-NOHSA-NEXT:    v_mov_b32_e32 v12, s64
+; GFX6-NOHSA-NEXT:    v_mov_b32_e32 v13, s65
+; GFX6-NOHSA-NEXT:    buffer_store_dwordx4 v[10:13], off, s[8:11], 0 offset:208
+; GFX6-NOHSA-NEXT:    v_mov_b32_e32 v16, s52
+; GFX6-NOHSA-NEXT:    v_mov_b32_e32 v17, s53
 ; GFX6-NOHSA-NEXT:    s_waitcnt expcnt(0)
-; GFX6-NOHSA-NEXT:    v_mov_b32_e32 v4, s44
-; GFX6-NOHSA-NEXT:    v_mov_b32_e32 v5, s45
-; GFX6-NOHSA-NEXT:    v_mov_b32_e32 v14, s30
-; GFX6-NOHSA-NEXT:    v_mov_b32_e32 v15, s31
-; GFX6-NOHSA-NEXT:    buffer_store_dwordx4 v[12:15], off, s[0:3], 0
-; GFX6-NOHSA-NEXT:    v_mov_b32_e32 v8, s19
-; GFX6-NOHSA-NEXT:    v_mov_b32_e32 v9, s17
-; GFX6-NOHSA-NEXT:    v_mov_b32_e32 v20, s10
-; GFX6-NOHSA-NEXT:    v_mov_b32_e32 v21, s11
-; GFX6-NOHSA-NEXT:    buffer_store_dwordx4 v[20:23], off, s[0:3], 0 offset:240
-; GFX6-NOHSA-NEXT:    v_mov_b32_e32 v10, s26
-; GFX6-NOHSA-NEXT:    v_mov_b32_e32 v11, s27
-; GFX6-NOHSA-NEXT:    v_mov_b32_e32 v18, s34
-; GFX6-NOHSA-NEXT:    v_mov_b32_e32 v19, s35
-; GFX6-NOHSA-NEXT:    buffer_store_dwordx4 v[16:19], off, s[0:3], 0 offset:224
-; GFX6-NOHSA-NEXT:    s_waitcnt expcnt(2)
-; GFX6-NOHSA-NEXT:    v_mov_b32_e32 v14, s15
-; GFX6-NOHSA-NEXT:    v_mov_b32_e32 v15, s13
-; GFX6-NOHSA-NEXT:    v_mov_b32_e32 v0, s36
-; GFX6-NOHSA-NEXT:    v_mov_b32_e32 v1, s37
-; GFX6-NOHSA-NEXT:    buffer_store_dwordx4 v[0:3], off, s[0:3], 0 offset:176
+; GFX6-NOHSA-NEXT:    v_mov_b32_e32 v10, s62
+; GFX6-NOHSA-NEXT:    v_mov_b32_e32 v11, s63
+; GFX6-NOHSA-NEXT:    buffer_store_dwordx4 v[8:11], off, s[8:11], 0 offset:192
 ; GFX6-NOHSA-NEXT:    s_waitcnt expcnt(0)
-; GFX6-NOHSA-NEXT:    v_mov_b32_e32 v0, s24
-; GFX6-NOHSA-NEXT:    v_mov_b32_e32 v1, s25
-; GFX6-NOHSA-NEXT:    v_mov_b32_e32 v6, s38
-; GFX6-NOHSA-NEXT:    v_mov_b32_e32 v7, s39
-; GFX6-NOHSA-NEXT:    buffer_store_dwordx4 v[4:7], off, s[0:3], 0 offset:160
+; GFX6-NOHSA-NEXT:    v_mov_b32_e32 v8, s2
+; GFX6-NOHSA-NEXT:    v_mov_b32_e32 v9, s3
+; GFX6-NOHSA-NEXT:    v_mov_b32_e32 v4, s66
+; GFX6-NOHSA-NEXT:    v_mov_b32_e32 v5, s67
+; GFX6-NOHSA-NEXT:    buffer_store_dwordx4 v[4:7], off, s[8:11], 0 offset:176
 ; GFX6-NOHSA-NEXT:    s_waitcnt expcnt(0)
-; GFX6-NOHSA-NEXT:    v_mov_b32_e32 v6, s40
-; GFX6-NOHSA-NEXT:    v_mov_b32_e32 v7, s41
-; GFX6-NOHSA-NEXT:    buffer_store_dwordx4 v[6:9], off, s[0:3], 0 offset:112
-; GFX6-NOHSA-NEXT:    v_mov_b32_e32 v12, s8
-; GFX6-NOHSA-NEXT:    v_mov_b32_e32 v13, s9
-; GFX6-NOHSA-NEXT:    buffer_store_dwordx4 v[10:13], off, s[0:3], 0 offset:96
+; GFX6-NOHSA-NEXT:    v_mov_b32_e32 v6, s33
+; GFX6-NOHSA-NEXT:    v_mov_b32_e32 v7, s25
+; GFX6-NOHSA-NEXT:    v_mov_b32_e32 v4, s68
+; GFX6-NOHSA-NEXT:    v_mov_b32_e32 v5, s69
+; GFX6-NOHSA-NEXT:    buffer_store_dwordx4 v[2:5], off, s[8:11], 0 offset:160
+; GFX6-NOHSA-NEXT:    s_waitcnt expcnt(0)
+; GFX6-NOHSA-NEXT:    v_mov_b32_e32 v2, s42
+; GFX6-NOHSA-NEXT:    v_mov_b32_e32 v3, s43
+; GFX6-NOHSA-NEXT:    v_mov_b32_e32 v4, s38
+; GFX6-NOHSA-NEXT:    v_mov_b32_e32 v5, s39
+; GFX6-NOHSA-NEXT:    buffer_store_dwordx4 v[2:5], off, s[8:11], 0 offset:144
+; GFX6-NOHSA-NEXT:    v_mov_b32_e32 v20, s56
+; GFX6-NOHSA-NEXT:    v_mov_b32_e32 v21, s57
+; GFX6-NOHSA-NEXT:    s_waitcnt expcnt(0)
+; GFX6-NOHSA-NEXT:    v_mov_b32_e32 v2, s26
+; GFX6-NOHSA-NEXT:    v_mov_b32_e32 v3, s27
+; GFX6-NOHSA-NEXT:    buffer_store_dwordx4 v[0:3], off, s[8:11], 0 offset:128
+; GFX6-NOHSA-NEXT:    s_waitcnt expcnt(0)
+; GFX6-NOHSA-NEXT:    v_mov_b32_e32 v0, s0
+; GFX6-NOHSA-NEXT:    v_mov_b32_e32 v1, s1
+; GFX6-NOHSA-NEXT:    v_mov_b32_e32 v12, s28
+; GFX6-NOHSA-NEXT:    v_mov_b32_e32 v13, s29
+; GFX6-NOHSA-NEXT:    buffer_store_dwordx4 v[12:15], off, s[8:11], 0 offset:112
+; GFX6-NOHSA-NEXT:    v_mov_b32_e32 v18, s30
+; GFX6-NOHSA-NEXT:    v_mov_b32_e32 v19, s31
+; GFX6-NOHSA-NEXT:    buffer_store_dwordx4 v[16:19], off, s[8:11], 0 offset:96
+; GFX6-NOHSA-NEXT:    v_mov_b32_e32 v2, s22
+; GFX6-NOHSA-NEXT:    v_mov_b32_e32 v3, s23
+; GFX6-NOHSA-NEXT:    v_mov_b32_e32 v4, s20
+; GFX6-NOHSA-NEXT:    v_mov_b32_e32 v5, s21
+; GFX6-NOHSA-NEXT:    buffer_store_dwordx4 v[2:5], off, s[8:11], 0 offset:80
+; GFX6-NOHSA-NEXT:    v_mov_b32_e32 v10, s18
+; GFX6-NOHSA-NEXT:    v_mov_b32_e32 v11, s19
+; GFX6-NOHSA-NEXT:    buffer_store_dwordx4 v[8:11], off, s[8:11], 0 offset:64
+; GFX6-NOHSA-NEXT:    s_waitcnt expcnt(1)
+; GFX6-NOHSA-NEXT:    v_mov_b32_e32 v4, s16
+; GFX6-NOHSA-NEXT:    v_mov_b32_e32 v5, s17
+; GFX6-NOHSA-NEXT:    buffer_store_dwordx4 v[4:7], off, s[8:11], 0 offset:48
+; GFX6-NOHSA-NEXT:    v_mov_b32_e32 v22, s14
+; GFX6-NOHSA-NEXT:    v_mov_b32_e32 v23, s15
+; GFX6-NOHSA-NEXT:    buffer_store_dwordx4 v[20:23], off, s[8:11], 0 offset:32
+; GFX6-NOHSA-NEXT:    v_mov_b32_e32 v2, s12
+; GFX6-NOHSA-NEXT:    v_mov_b32_e32 v3, s13
+; GFX6-NOHSA-NEXT:    s_waitcnt expcnt(1)
+; GFX6-NOHSA-NEXT:    v_mov_b32_e32 v4, s6
+; GFX6-NOHSA-NEXT:    v_mov_b32_e32 v5, s7
+; GFX6-NOHSA-NEXT:    buffer_store_dwordx4 v[2:5], off, s[8:11], 0 offset:16
 ; GFX6-NOHSA-NEXT:    s_waitcnt expcnt(0)
-; GFX6-NOHSA-NEXT:    v_mov_b32_e32 v12, s6
-; GFX6-NOHSA-NEXT:    v_mov_b32_e32 v13, s7
-; GFX6-NOHSA-NEXT:    buffer_store_dwordx4 v[12:15], off, s[0:3], 0 offset:48
 ; GFX6-NOHSA-NEXT:    v_mov_b32_e32 v2, s4
 ; GFX6-NOHSA-NEXT:    v_mov_b32_e32 v3, s5
-; GFX6-NOHSA-NEXT:    buffer_store_dwordx4 v[0:3], off, s[0:3], 0 offset:32
+; GFX6-NOHSA-NEXT:    buffer_store_dwordx4 v[0:3], off, s[8:11], 0
 ; GFX6-NOHSA-NEXT:    s_endpgm
 ;
 ; GFX7-HSA-LABEL: constant_sextload_v32i8_to_v32i64:
 ; GFX7-HSA:       ; %bb.0:
 ; GFX7-HSA-NEXT:    s_load_dwordx4 s[8:11], s[8:9], 0x0
 ; GFX7-HSA-NEXT:    s_add_i32 s12, s12, s17
+; GFX7-HSA-NEXT:    s_mov_b32 flat_scratch_lo, s13
 ; GFX7-HSA-NEXT:    s_lshr_b32 flat_scratch_hi, s12, 8
-; GFX7-HSA-NEXT:    s_mov_b32 s27, 0
-; GFX7-HSA-NEXT:    s_mov_b32 s23, s27
 ; GFX7-HSA-NEXT:    s_waitcnt lgkmcnt(0)
 ; GFX7-HSA-NEXT:    s_load_dwordx8 s[0:7], s[10:11], 0x0
-; GFX7-HSA-NEXT:    s_mov_b32 s25, s27
-; GFX7-HSA-NEXT:    s_mov_b32 s21, s27
-; GFX7-HSA-NEXT:    s_mov_b32 s15, s27
-; GFX7-HSA-NEXT:    s_mov_b32 s17, s27
+; GFX7-HSA-NEXT:    s_mov_b32 s11, 0
+; GFX7-HSA-NEXT:    s_mov_b32 s37, s11
+; GFX7-HSA-NEXT:    s_mov_b32 s41, s11
+; GFX7-HSA-NEXT:    s_mov_b32 s55, s11
 ; GFX7-HSA-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX7-HSA-NEXT:    s_lshr_b32 s12, s6, 16
-; GFX7-HSA-NEXT:    s_lshr_b32 s10, s6, 24
-; GFX7-HSA-NEXT:    s_ashr_i32 s29, s1, 31
-; GFX7-HSA-NEXT:    s_ashr_i32 s33, s1, 24
-; GFX7-HSA-NEXT:    s_lshr_b32 s40, s0, 16
-; GFX7-HSA-NEXT:    s_lshr_b32 s38, s0, 24
-; GFX7-HSA-NEXT:    s_lshr_b32 s30, s0, 8
-; GFX7-HSA-NEXT:    s_lshr_b32 s66, s1, 16
-; GFX7-HSA-NEXT:    s_lshr_b32 s68, s1, 8
-; GFX7-HSA-NEXT:    s_mov_b32 s70, s1
-; GFX7-HSA-NEXT:    s_bfe_i64 s[18:19], s[0:1], 0x80000
-; GFX7-HSA-NEXT:    s_bfe_i64 s[0:1], s[12:13], 0x80000
-; GFX7-HSA-NEXT:    s_lshr_b32 s28, s6, 8
-; GFX7-HSA-NEXT:    s_lshr_b32 s34, s4, 16
-; GFX7-HSA-NEXT:    s_ashr_i32 s35, s3, 31
-; GFX7-HSA-NEXT:    s_lshr_b32 s52, s4, 24
-; GFX7-HSA-NEXT:    s_lshr_b32 s54, s4, 8
-; GFX7-HSA-NEXT:    s_lshr_b32 s56, s2, 16
-; GFX7-HSA-NEXT:    s_lshr_b32 s58, s2, 24
-; GFX7-HSA-NEXT:    s_lshr_b32 s44, s2, 8
-; GFX7-HSA-NEXT:    s_lshr_b32 s26, s7, 16
-; GFX7-HSA-NEXT:    s_lshr_b32 s22, s7, 8
-; GFX7-HSA-NEXT:    s_mov_b32 s24, s7
-; GFX7-HSA-NEXT:    s_lshr_b32 s20, s5, 16
-; GFX7-HSA-NEXT:    s_lshr_b32 s14, s5, 8
-; GFX7-HSA-NEXT:    s_mov_b32 s16, s5
-; GFX7-HSA-NEXT:    s_lshr_b32 s60, s3, 16
-; GFX7-HSA-NEXT:    s_lshr_b32 s62, s3, 8
-; GFX7-HSA-NEXT:    s_mov_b32 s64, s3
-; GFX7-HSA-NEXT:    v_mov_b32_e32 v0, s0
-; GFX7-HSA-NEXT:    v_mov_b32_e32 v1, s1
-; GFX7-HSA-NEXT:    s_bfe_i64 s[0:1], s[10:11], 0x80000
-; GFX7-HSA-NEXT:    s_mov_b32 s61, s27
-; GFX7-HSA-NEXT:    s_mov_b32 s63, s27
-; GFX7-HSA-NEXT:    s_mov_b32 s65, s27
-; GFX7-HSA-NEXT:    s_mov_b32 s67, s27
-; GFX7-HSA-NEXT:    s_mov_b32 s69, s27
-; GFX7-HSA-NEXT:    s_mov_b32 s71, s27
-; GFX7-HSA-NEXT:    s_mov_b32 flat_scratch_lo, s13
-; GFX7-HSA-NEXT:    s_ashr_i32 s46, s3, 24
-; GFX7-HSA-NEXT:    s_ashr_i32 s47, s5, 31
-; GFX7-HSA-NEXT:    s_ashr_i32 s48, s5, 24
-; GFX7-HSA-NEXT:    s_ashr_i32 s49, s7, 31
-; GFX7-HSA-NEXT:    s_ashr_i32 s50, s7, 24
-; GFX7-HSA-NEXT:    s_bfe_i64 s[36:37], s[2:3], 0x80000
-; GFX7-HSA-NEXT:    s_bfe_i64 s[42:43], s[4:5], 0x80000
-; GFX7-HSA-NEXT:    s_bfe_i64 s[72:73], s[6:7], 0x80000
-; GFX7-HSA-NEXT:    v_mov_b32_e32 v2, s0
-; GFX7-HSA-NEXT:    v_mov_b32_e32 v3, s1
-; GFX7-HSA-NEXT:    s_bfe_i64 s[2:3], s[70:71], 0x80000
-; GFX7-HSA-NEXT:    s_bfe_i64 s[0:1], s[68:69], 0x80000
-; GFX7-HSA-NEXT:    s_bfe_i64 s[4:5], s[66:67], 0x80000
-; GFX7-HSA-NEXT:    s_bfe_i64 s[10:11], s[64:65], 0x80000
-; GFX7-HSA-NEXT:    s_bfe_i64 s[6:7], s[62:63], 0x80000
-; GFX7-HSA-NEXT:    s_bfe_i64 s[12:13], s[60:61], 0x80000
+; GFX7-HSA-NEXT:    s_lshr_b32 s10, s7, 16
+; GFX7-HSA-NEXT:    s_lshr_b32 s36, s7, 8
+; GFX7-HSA-NEXT:    s_mov_b32 s40, s7
+; GFX7-HSA-NEXT:    s_lshr_b32 s44, s6, 16
+; GFX7-HSA-NEXT:    s_ashr_i32 s45, s1, 24
+; GFX7-HSA-NEXT:    s_ashr_i32 s48, s3, 31
+; GFX7-HSA-NEXT:    s_ashr_i32 s49, s3, 24
+; GFX7-HSA-NEXT:    s_ashr_i32 s63, s5, 31
+; GFX7-HSA-NEXT:    s_ashr_i32 s65, s5, 24
+; GFX7-HSA-NEXT:    s_lshr_b32 s50, s6, 24
+; GFX7-HSA-NEXT:    s_lshr_b32 s52, s6, 8
+; GFX7-HSA-NEXT:    s_lshr_b32 s54, s5, 16
+; GFX7-HSA-NEXT:    s_lshr_b32 s46, s5, 8
+; GFX7-HSA-NEXT:    s_mov_b32 s56, s5
+; GFX7-HSA-NEXT:    s_lshr_b32 s42, s4, 16
+; GFX7-HSA-NEXT:    s_lshr_b32 s38, s4, 24
+; GFX7-HSA-NEXT:    s_lshr_b32 s34, s4, 8
+; GFX7-HSA-NEXT:    s_lshr_b32 s30, s3, 16
+; GFX7-HSA-NEXT:    s_lshr_b32 s24, s3, 8
+; GFX7-HSA-NEXT:    s_mov_b32 s26, s3
+; GFX7-HSA-NEXT:    s_lshr_b32 s22, s2, 16
+; GFX7-HSA-NEXT:    s_lshr_b32 s20, s2, 24
+; GFX7-HSA-NEXT:    s_lshr_b32 s18, s2, 8
+; GFX7-HSA-NEXT:    s_lshr_b32 s16, s1, 16
+; GFX7-HSA-NEXT:    s_lshr_b32 s58, s1, 8
+; GFX7-HSA-NEXT:    s_mov_b32 s12, s1
+; GFX7-HSA-NEXT:    s_lshr_b32 s60, s0, 16
+; GFX7-HSA-NEXT:    s_lshr_b32 s62, s0, 24
+; GFX7-HSA-NEXT:    s_lshr_b32 s64, s0, 8
+; GFX7-HSA-NEXT:    s_bfe_i64 s[14:15], s[2:3], 0x80000
+; GFX7-HSA-NEXT:    s_mov_b32 s47, s11
+; GFX7-HSA-NEXT:    s_mov_b32 s57, s11
+; GFX7-HSA-NEXT:    s_mov_b32 s31, s11
+; GFX7-HSA-NEXT:    s_mov_b32 s25, s11
+; GFX7-HSA-NEXT:    s_mov_b32 s27, s11
+; GFX7-HSA-NEXT:    s_mov_b32 s17, s11
+; GFX7-HSA-NEXT:    s_mov_b32 s59, s11
+; GFX7-HSA-NEXT:    s_mov_b32 s13, s11
+; GFX7-HSA-NEXT:    s_bfe_i64 s[2:3], s[10:11], 0x80000
+; GFX7-HSA-NEXT:    s_ashr_i32 s33, s1, 31
+; GFX7-HSA-NEXT:    s_ashr_i32 s68, s7, 31
+; GFX7-HSA-NEXT:    s_ashr_i32 s69, s7, 24
+; GFX7-HSA-NEXT:    s_bfe_i64 s[0:1], s[0:1], 0x80000
+; GFX7-HSA-NEXT:    s_bfe_i64 s[28:29], s[4:5], 0x80000
+; GFX7-HSA-NEXT:    s_bfe_i64 s[66:67], s[6:7], 0x80000
+; GFX7-HSA-NEXT:    v_mov_b32_e32 v0, s2
+; GFX7-HSA-NEXT:    v_mov_b32_e32 v1, s3
+; GFX7-HSA-NEXT:    s_bfe_i64 s[2:3], s[64:65], 0x80000
+; GFX7-HSA-NEXT:    s_bfe_i64 s[4:5], s[62:63], 0x80000
+; GFX7-HSA-NEXT:    s_bfe_i64 s[6:7], s[60:61], 0x80000
+; GFX7-HSA-NEXT:    s_bfe_i64 s[12:13], s[12:13], 0x80000
+; GFX7-HSA-NEXT:    s_bfe_i64 s[10:11], s[58:59], 0x80000
 ; GFX7-HSA-NEXT:    s_bfe_i64 s[16:17], s[16:17], 0x80000
-; GFX7-HSA-NEXT:    s_bfe_i64 s[14:15], s[14:15], 0x80000
+; GFX7-HSA-NEXT:    s_bfe_i64 s[18:19], s[18:19], 0x80000
 ; GFX7-HSA-NEXT:    s_bfe_i64 s[20:21], s[20:21], 0x80000
-; GFX7-HSA-NEXT:    s_bfe_i64 s[24:25], s[24:25], 0x80000
 ; GFX7-HSA-NEXT:    s_bfe_i64 s[22:23], s[22:23], 0x80000
 ; GFX7-HSA-NEXT:    s_bfe_i64 s[26:27], s[26:27], 0x80000
+; GFX7-HSA-NEXT:    s_bfe_i64 s[24:25], s[24:25], 0x80000
 ; GFX7-HSA-NEXT:    s_bfe_i64 s[30:31], s[30:31], 0x80000
+; GFX7-HSA-NEXT:    s_bfe_i64 s[34:35], s[34:35], 0x80000
 ; GFX7-HSA-NEXT:    s_bfe_i64 s[38:39], s[38:39], 0x80000
-; GFX7-HSA-NEXT:    s_bfe_i64 s[40:41], s[40:41], 0x80000
-; GFX7-HSA-NEXT:    s_bfe_i64 s[44:45], s[44:45], 0x80000
-; GFX7-HSA-NEXT:    s_bfe_i64 s[58:59], s[58:59], 0x80000
+; GFX7-HSA-NEXT:    s_bfe_i64 s[42:43], s[42:43], 0x80000
 ; GFX7-HSA-NEXT:    s_bfe_i64 s[56:57], s[56:57], 0x80000
+; GFX7-HSA-NEXT:    s_bfe_i64 s[46:47], s[46:47], 0x80000
 ; GFX7-HSA-NEXT:    s_bfe_i64 s[54:55], s[54:55], 0x80000
 ; GFX7-HSA-NEXT:    s_bfe_i64 s[52:53], s[52:53], 0x80000
-; GFX7-HSA-NEXT:    s_bfe_i64 s[60:61], s[34:35], 0x80000
-; GFX7-HSA-NEXT:    s_bfe_i64 s[62:63], s[28:29], 0x80000
-; GFX7-HSA-NEXT:    s_add_u32 s64, s8, 0xd0
-; GFX7-HSA-NEXT:    s_addc_u32 s65, s9, 0
-; GFX7-HSA-NEXT:    v_mov_b32_e32 v6, s62
-; GFX7-HSA-NEXT:    s_add_u32 s62, s8, 0xc0
-; GFX7-HSA-NEXT:    v_mov_b32_e32 v7, s63
-; GFX7-HSA-NEXT:    s_addc_u32 s63, s9, 0
-; GFX7-HSA-NEXT:    v_mov_b32_e32 v10, s52
-; GFX7-HSA-NEXT:    s_add_u32 s52, s8, 0x90
-; GFX7-HSA-NEXT:    v_mov_b32_e32 v11, s53
-; GFX7-HSA-NEXT:    s_addc_u32 s53, s9, 0
-; GFX7-HSA-NEXT:    v_mov_b32_e32 v12, s42
-; GFX7-HSA-NEXT:    s_add_u32 s42, s8, 0x80
-; GFX7-HSA-NEXT:    v_mov_b32_e32 v13, s43
-; GFX7-HSA-NEXT:    s_addc_u32 s43, s9, 0
-; GFX7-HSA-NEXT:    v_mov_b32_e32 v20, s36
-; GFX7-HSA-NEXT:    s_add_u32 s36, s8, 0x50
-; GFX7-HSA-NEXT:    v_mov_b32_e32 v21, s37
+; GFX7-HSA-NEXT:    s_bfe_i64 s[50:51], s[50:51], 0x80000
+; GFX7-HSA-NEXT:    s_bfe_i64 s[58:59], s[44:45], 0x80000
+; GFX7-HSA-NEXT:    s_bfe_i64 s[40:41], s[40:41], 0x80000
+; GFX7-HSA-NEXT:    s_bfe_i64 s[36:37], s[36:37], 0x80000
+; GFX7-HSA-NEXT:    s_add_u32 s60, s8, 0xf0
+; GFX7-HSA-NEXT:    s_addc_u32 s61, s9, 0
+; GFX7-HSA-NEXT:    v_mov_b32_e32 v6, s36
+; GFX7-HSA-NEXT:    s_add_u32 s36, s8, 0xe0
+; GFX7-HSA-NEXT:    v_mov_b32_e32 v7, s37
+; GFX7-HSA-NEXT:    s_addc_u32 s37, s9, 0
+; GFX7-HSA-NEXT:    v_mov_b32_e32 v24, s36
+; GFX7-HSA-NEXT:    v_mov_b32_e32 v25, s37
+; GFX7-HSA-NEXT:    s_add_u32 s36, s8, 0xd0
+; GFX7-HSA-NEXT:    s_addc_u32 s37, s9, 0
+; GFX7-HSA-NEXT:    v_mov_b32_e32 v26, s36
+; GFX7-HSA-NEXT:    v_mov_b32_e32 v27, s37
+; GFX7-HSA-NEXT:    s_add_u32 s36, s8, 0xc0
+; GFX7-HSA-NEXT:    s_addc_u32 s37, s9, 0
+; GFX7-HSA-NEXT:    v_mov_b32_e32 v28, s36
+; GFX7-HSA-NEXT:    v_mov_b32_e32 v29, s37
+; GFX7-HSA-NEXT:    s_add_u32 s36, s8, 0xb0
 ; GFX7-HSA-NEXT:    s_addc_u32 s37, s9, 0
 ; GFX7-HSA-NEXT:    v_mov_b32_e32 v30, s36
-; GFX7-HSA-NEXT:    v_mov_b32_e32 v24, s62
 ; GFX7-HSA-NEXT:    v_mov_b32_e32 v31, s37
-; GFX7-HSA-NEXT:    s_add_u32 s36, s8, 64
-; GFX7-HSA-NEXT:    v_mov_b32_e32 v4, s72
-; GFX7-HSA-NEXT:    v_mov_b32_e32 v5, s73
-; GFX7-HSA-NEXT:    v_mov_b32_e32 v25, s63
-; GFX7-HSA-NEXT:    v_mov_b32_e32 v26, s52
+; GFX7-HSA-NEXT:    s_add_u32 s36, s8, 0xa0
+; GFX7-HSA-NEXT:    v_mov_b32_e32 v4, s40
+; GFX7-HSA-NEXT:    v_mov_b32_e32 v5, s41
 ; GFX7-HSA-NEXT:    s_addc_u32 s37, s9, 0
-; GFX7-HSA-NEXT:    v_mov_b32_e32 v8, s60
-; GFX7-HSA-NEXT:    v_mov_b32_e32 v9, s61
-; GFX7-HSA-NEXT:    v_mov_b32_e32 v27, s53
-; GFX7-HSA-NEXT:    v_mov_b32_e32 v16, s56
-; GFX7-HSA-NEXT:    v_mov_b32_e32 v17, s57
-; GFX7-HSA-NEXT:    v_mov_b32_e32 v18, s58
-; GFX7-HSA-NEXT:    v_mov_b32_e32 v19, s59
 ; GFX7-HSA-NEXT:    flat_store_dwordx4 v[24:25], v[4:7]
+; GFX7-HSA-NEXT:    v_mov_b32_e32 v8, s58
+; GFX7-HSA-NEXT:    v_mov_b32_e32 v4, s28
+; GFX7-HSA-NEXT:    s_add_u32 s28, s8, 0x90
+; GFX7-HSA-NEXT:    v_mov_b32_e32 v5, s29
+; GFX7-HSA-NEXT:    s_addc_u32 s29, s9, 0
+; GFX7-HSA-NEXT:    v_mov_b32_e32 v24, s28
+; GFX7-HSA-NEXT:    v_mov_b32_e32 v9, s59
+; GFX7-HSA-NEXT:    v_mov_b32_e32 v10, s50
+; GFX7-HSA-NEXT:    v_mov_b32_e32 v11, s51
+; GFX7-HSA-NEXT:    v_mov_b32_e32 v25, s29
+; GFX7-HSA-NEXT:    s_add_u32 s28, s8, 0x80
 ; GFX7-HSA-NEXT:    flat_store_dwordx4 v[26:27], v[8:11]
-; GFX7-HSA-NEXT:    v_mov_b32_e32 v4, s18
-; GFX7-HSA-NEXT:    s_add_u32 s18, s8, 16
-; GFX7-HSA-NEXT:    v_mov_b32_e32 v5, s19
-; GFX7-HSA-NEXT:    s_addc_u32 s19, s9, 0
-; GFX7-HSA-NEXT:    flat_store_dwordx4 v[30:31], v[16:19]
-; GFX7-HSA-NEXT:    v_mov_b32_e32 v22, s64
-; GFX7-HSA-NEXT:    v_mov_b32_e32 v16, s18
-; GFX7-HSA-NEXT:    v_mov_b32_e32 v17, s19
-; GFX7-HSA-NEXT:    s_add_u32 s18, s8, 0xf0
-; GFX7-HSA-NEXT:    s_addc_u32 s19, s9, 0
-; GFX7-HSA-NEXT:    v_mov_b32_e32 v18, s18
-; GFX7-HSA-NEXT:    v_mov_b32_e32 v23, s65
-; GFX7-HSA-NEXT:    v_mov_b32_e32 v24, s36
-; GFX7-HSA-NEXT:    v_mov_b32_e32 v19, s19
-; GFX7-HSA-NEXT:    s_add_u32 s18, s8, 0xe0
-; GFX7-HSA-NEXT:    flat_store_dwordx4 v[22:23], v[0:3]
-; GFX7-HSA-NEXT:    v_mov_b32_e32 v22, s44
-; GFX7-HSA-NEXT:    v_mov_b32_e32 v23, s45
-; GFX7-HSA-NEXT:    v_mov_b32_e32 v0, s40
-; GFX7-HSA-NEXT:    v_mov_b32_e32 v1, s41
-; GFX7-HSA-NEXT:    v_mov_b32_e32 v25, s37
+; GFX7-HSA-NEXT:    s_addc_u32 s29, s9, 0
+; GFX7-HSA-NEXT:    v_mov_b32_e32 v26, s28
+; GFX7-HSA-NEXT:    v_mov_b32_e32 v18, s60
+; GFX7-HSA-NEXT:    v_mov_b32_e32 v27, s29
+; GFX7-HSA-NEXT:    s_add_u32 s28, s8, 0x70
+; GFX7-HSA-NEXT:    v_mov_b32_e32 v19, s61
+; GFX7-HSA-NEXT:    v_mov_b32_e32 v12, s66
+; GFX7-HSA-NEXT:    v_mov_b32_e32 v13, s67
+; GFX7-HSA-NEXT:    v_mov_b32_e32 v14, s52
+; GFX7-HSA-NEXT:    v_mov_b32_e32 v15, s53
+; GFX7-HSA-NEXT:    v_mov_b32_e32 v2, s69
+; GFX7-HSA-NEXT:    v_mov_b32_e32 v3, s68
+; GFX7-HSA-NEXT:    s_addc_u32 s29, s9, 0
+; GFX7-HSA-NEXT:    v_mov_b32_e32 v16, s54
+; GFX7-HSA-NEXT:    v_mov_b32_e32 v17, s55
+; GFX7-HSA-NEXT:    flat_store_dwordx4 v[18:19], v[0:3]
+; GFX7-HSA-NEXT:    v_mov_b32_e32 v18, s65
+; GFX7-HSA-NEXT:    v_mov_b32_e32 v19, s63
+; GFX7-HSA-NEXT:    v_mov_b32_e32 v10, s36
+; GFX7-HSA-NEXT:    flat_store_dwordx4 v[28:29], v[12:15]
+; GFX7-HSA-NEXT:    v_mov_b32_e32 v20, s56
+; GFX7-HSA-NEXT:    v_mov_b32_e32 v14, s24
+; GFX7-HSA-NEXT:    s_add_u32 s24, s8, 0x60
+; GFX7-HSA-NEXT:    v_mov_b32_e32 v21, s57
+; GFX7-HSA-NEXT:    v_mov_b32_e32 v22, s46
+; GFX7-HSA-NEXT:    v_mov_b32_e32 v23, s47
+; GFX7-HSA-NEXT:    v_mov_b32_e32 v0, s42
+; GFX7-HSA-NEXT:    v_mov_b32_e32 v1, s43
+; GFX7-HSA-NEXT:    v_mov_b32_e32 v11, s37
 ; GFX7-HSA-NEXT:    v_mov_b32_e32 v2, s38
 ; GFX7-HSA-NEXT:    v_mov_b32_e32 v3, s39
-; GFX7-HSA-NEXT:    s_addc_u32 s19, s9, 0
-; GFX7-HSA-NEXT:    v_mov_b32_e32 v28, s42
-; GFX7-HSA-NEXT:    flat_store_dwordx4 v[24:25], v[20:23]
-; GFX7-HSA-NEXT:    flat_store_dwordx4 v[16:17], v[0:3]
-; GFX7-HSA-NEXT:    v_mov_b32_e32 v21, s19
-; GFX7-HSA-NEXT:    v_mov_b32_e32 v0, s8
-; GFX7-HSA-NEXT:    v_mov_b32_e32 v14, s54
-; GFX7-HSA-NEXT:    v_mov_b32_e32 v15, s55
-; GFX7-HSA-NEXT:    v_mov_b32_e32 v29, s43
-; GFX7-HSA-NEXT:    v_mov_b32_e32 v6, s30
-; GFX7-HSA-NEXT:    v_mov_b32_e32 v7, s31
-; GFX7-HSA-NEXT:    v_mov_b32_e32 v20, s18
-; GFX7-HSA-NEXT:    v_mov_b32_e32 v1, s9
-; GFX7-HSA-NEXT:    s_add_u32 s18, s8, 0xb0
-; GFX7-HSA-NEXT:    flat_store_dwordx4 v[28:29], v[12:15]
-; GFX7-HSA-NEXT:    v_mov_b32_e32 v8, s26
-; GFX7-HSA-NEXT:    v_mov_b32_e32 v9, s27
-; GFX7-HSA-NEXT:    v_mov_b32_e32 v10, s50
-; GFX7-HSA-NEXT:    v_mov_b32_e32 v11, s49
-; GFX7-HSA-NEXT:    v_mov_b32_e32 v12, s24
-; GFX7-HSA-NEXT:    v_mov_b32_e32 v13, s25
-; GFX7-HSA-NEXT:    v_mov_b32_e32 v14, s22
-; GFX7-HSA-NEXT:    v_mov_b32_e32 v15, s23
-; GFX7-HSA-NEXT:    flat_store_dwordx4 v[0:1], v[4:7]
-; GFX7-HSA-NEXT:    flat_store_dwordx4 v[18:19], v[8:11]
-; GFX7-HSA-NEXT:    flat_store_dwordx4 v[20:21], v[12:15]
-; GFX7-HSA-NEXT:    s_addc_u32 s19, s9, 0
-; GFX7-HSA-NEXT:    v_mov_b32_e32 v4, s18
-; GFX7-HSA-NEXT:    v_mov_b32_e32 v0, s20
-; GFX7-HSA-NEXT:    v_mov_b32_e32 v1, s21
-; GFX7-HSA-NEXT:    v_mov_b32_e32 v2, s48
-; GFX7-HSA-NEXT:    v_mov_b32_e32 v3, s47
-; GFX7-HSA-NEXT:    v_mov_b32_e32 v5, s19
+; GFX7-HSA-NEXT:    flat_store_dwordx4 v[30:31], v[16:19]
+; GFX7-HSA-NEXT:    v_mov_b32_e32 v15, s25
+; GFX7-HSA-NEXT:    v_mov_b32_e32 v16, s28
+; GFX7-HSA-NEXT:    s_addc_u32 s25, s9, 0
+; GFX7-HSA-NEXT:    v_mov_b32_e32 v18, s24
+; GFX7-HSA-NEXT:    v_mov_b32_e32 v6, s34
+; GFX7-HSA-NEXT:    v_mov_b32_e32 v7, s35
+; GFX7-HSA-NEXT:    v_mov_b32_e32 v8, s30
+; GFX7-HSA-NEXT:    v_mov_b32_e32 v9, s31
+; GFX7-HSA-NEXT:    flat_store_dwordx4 v[10:11], v[20:23]
+; GFX7-HSA-NEXT:    v_mov_b32_e32 v12, s26
+; GFX7-HSA-NEXT:    v_mov_b32_e32 v10, s49
+; GFX7-HSA-NEXT:    v_mov_b32_e32 v11, s48
+; GFX7-HSA-NEXT:    v_mov_b32_e32 v13, s27
+; GFX7-HSA-NEXT:    v_mov_b32_e32 v17, s29
+; GFX7-HSA-NEXT:    v_mov_b32_e32 v19, s25
+; GFX7-HSA-NEXT:    flat_store_dwordx4 v[24:25], v[0:3]
+; GFX7-HSA-NEXT:    flat_store_dwordx4 v[26:27], v[4:7]
+; GFX7-HSA-NEXT:    flat_store_dwordx4 v[16:17], v[8:11]
+; GFX7-HSA-NEXT:    flat_store_dwordx4 v[18:19], v[12:15]
+; GFX7-HSA-NEXT:    v_mov_b32_e32 v2, s20
+; GFX7-HSA-NEXT:    s_add_u32 s20, s8, 0x50
+; GFX7-HSA-NEXT:    v_mov_b32_e32 v3, s21
+; GFX7-HSA-NEXT:    s_addc_u32 s21, s9, 0
+; GFX7-HSA-NEXT:    v_mov_b32_e32 v4, s20
+; GFX7-HSA-NEXT:    v_mov_b32_e32 v0, s22
+; GFX7-HSA-NEXT:    v_mov_b32_e32 v1, s23
+; GFX7-HSA-NEXT:    v_mov_b32_e32 v5, s21
 ; GFX7-HSA-NEXT:    flat_store_dwordx4 v[4:5], v[0:3]
 ; GFX7-HSA-NEXT:    s_nop 0
-; GFX7-HSA-NEXT:    v_mov_b32_e32 v2, s14
-; GFX7-HSA-NEXT:    s_add_u32 s14, s8, 0xa0
-; GFX7-HSA-NEXT:    v_mov_b32_e32 v3, s15
+; GFX7-HSA-NEXT:    v_mov_b32_e32 v0, s14
+; GFX7-HSA-NEXT:    s_add_u32 s14, s8, 64
+; GFX7-HSA-NEXT:    v_mov_b32_e32 v1, s15
+; GFX7-HSA-NEXT:    s_addc_u32 s15, s9, 0
+; GFX7-HSA-NEXT:    v_mov_b32_e32 v4, s14
+; GFX7-HSA-NEXT:    v_mov_b32_e32 v2, s18
+; GFX7-HSA-NEXT:    v_mov_b32_e32 v3, s19
+; GFX7-HSA-NEXT:    v_mov_b32_e32 v5, s15
+; GFX7-HSA-NEXT:    s_add_u32 s14, s8, 48
+; GFX7-HSA-NEXT:    flat_store_dwordx4 v[4:5], v[0:3]
 ; GFX7-HSA-NEXT:    s_addc_u32 s15, s9, 0
 ; GFX7-HSA-NEXT:    v_mov_b32_e32 v4, s14
 ; GFX7-HSA-NEXT:    v_mov_b32_e32 v0, s16
 ; GFX7-HSA-NEXT:    v_mov_b32_e32 v1, s17
+; GFX7-HSA-NEXT:    v_mov_b32_e32 v2, s45
+; GFX7-HSA-NEXT:    v_mov_b32_e32 v3, s33
 ; GFX7-HSA-NEXT:    v_mov_b32_e32 v5, s15
 ; GFX7-HSA-NEXT:    flat_store_dwordx4 v[4:5], v[0:3]
 ; GFX7-HSA-NEXT:    s_nop 0
+; GFX7-HSA-NEXT:    v_mov_b32_e32 v2, s10
+; GFX7-HSA-NEXT:    s_add_u32 s10, s8, 32
+; GFX7-HSA-NEXT:    v_mov_b32_e32 v3, s11
+; GFX7-HSA-NEXT:    s_addc_u32 s11, s9, 0
+; GFX7-HSA-NEXT:    v_mov_b32_e32 v4, s10
 ; GFX7-HSA-NEXT:    v_mov_b32_e32 v0, s12
-; GFX7-HSA-NEXT:    s_add_u32 s12, s8, 0x70
 ; GFX7-HSA-NEXT:    v_mov_b32_e32 v1, s13
-; GFX7-HSA-NEXT:    s_addc_u32 s13, s9, 0
-; GFX7-HSA-NEXT:    v_mov_b32_e32 v4, s12
-; GFX7-HSA-NEXT:    v_mov_b32_e32 v2, s46
-; GFX7-HSA-NEXT:    v_mov_b32_e32 v3, s35
-; GFX7-HSA-NEXT:    v_mov_b32_e32 v5, s13
+; GFX7-HSA-NEXT:    v_mov_b32_e32 v5, s11
 ; GFX7-HSA-NEXT:    flat_store_dwordx4 v[4:5], v[0:3]
 ; GFX7-HSA-NEXT:    s_nop 0
-; GFX7-HSA-NEXT:    v_mov_b32_e32 v2, s6
-; GFX7-HSA-NEXT:    s_add_u32 s6, s8, 0x60
-; GFX7-HSA-NEXT:    v_mov_b32_e32 v3, s7
-; GFX7-HSA-NEXT:    s_addc_u32 s7, s9, 0
-; GFX7-HSA-NEXT:    v_mov_b32_e32 v4, s6
-; GFX7-HSA-NEXT:    v_mov_b32_e32 v0, s10
-; GFX7-HSA-NEXT:    v_mov_b32_e32 v1, s11
-; GFX7-HSA-NEXT:    v_mov_b32_e32 v5, s7
-; GFX7-HSA-NEXT:    flat_store_dwordx4 v[4:5], v[0:3]
-; GFX7-HSA-NEXT:    s_nop 0
-; GFX7-HSA-NEXT:    v_mov_b32_e32 v0, s4
-; GFX7-HSA-NEXT:    s_add_u32 s4, s8, 48
-; GFX7-HSA-NEXT:    v_mov_b32_e32 v1, s5
+; GFX7-HSA-NEXT:    v_mov_b32_e32 v2, s4
+; GFX7-HSA-NEXT:    s_add_u32 s4, s8, 16
+; GFX7-HSA-NEXT:    v_mov_b32_e32 v3, s5
 ; GFX7-HSA-NEXT:    s_addc_u32 s5, s9, 0
 ; GFX7-HSA-NEXT:    v_mov_b32_e32 v4, s4
-; GFX7-HSA-NEXT:    v_mov_b32_e32 v2, s33
-; GFX7-HSA-NEXT:    v_mov_b32_e32 v3, s29
+; GFX7-HSA-NEXT:    v_mov_b32_e32 v0, s6
+; GFX7-HSA-NEXT:    v_mov_b32_e32 v1, s7
 ; GFX7-HSA-NEXT:    v_mov_b32_e32 v5, s5
 ; GFX7-HSA-NEXT:    flat_store_dwordx4 v[4:5], v[0:3]
-; GFX7-HSA-NEXT:    s_nop 0
-; GFX7-HSA-NEXT:    v_mov_b32_e32 v2, s0
-; GFX7-HSA-NEXT:    s_add_u32 s0, s8, 32
-; GFX7-HSA-NEXT:    v_mov_b32_e32 v3, s1
-; GFX7-HSA-NEXT:    s_addc_u32 s1, s9, 0
-; GFX7-HSA-NEXT:    v_mov_b32_e32 v5, s1
-; GFX7-HSA-NEXT:    v_mov_b32_e32 v0, s2
-; GFX7-HSA-NEXT:    v_mov_b32_e32 v1, s3
-; GFX7-HSA-NEXT:    v_mov_b32_e32 v4, s0
+; GFX7-HSA-NEXT:    v_mov_b32_e32 v4, s8
+; GFX7-HSA-NEXT:    v_mov_b32_e32 v0, s0
+; GFX7-HSA-NEXT:    v_mov_b32_e32 v1, s1
+; GFX7-HSA-NEXT:    v_mov_b32_e32 v2, s2
+; GFX7-HSA-NEXT:    v_mov_b32_e32 v3, s3
+; GFX7-HSA-NEXT:    v_mov_b32_e32 v5, s9
 ; GFX7-HSA-NEXT:    flat_store_dwordx4 v[4:5], v[0:3]
 ; GFX7-HSA-NEXT:    s_endpgm
 ;
 ; GFX8-NOHSA-LABEL: constant_sextload_v32i8_to_v32i64:
 ; GFX8-NOHSA:       ; %bb.0:
 ; GFX8-NOHSA-NEXT:    s_load_dwordx4 s[8:11], s[4:5], 0x24
-; GFX8-NOHSA-NEXT:    s_mov_b32 s63, 0
-; GFX8-NOHSA-NEXT:    s_mov_b32 s23, s63
-; GFX8-NOHSA-NEXT:    s_mov_b32 s65, s63
-; GFX8-NOHSA-NEXT:    s_mov_b32 s19, s63
+; GFX8-NOHSA-NEXT:    s_mov_b32 s47, 0
+; GFX8-NOHSA-NEXT:    s_mov_b32 s53, s47
+; GFX8-NOHSA-NEXT:    s_mov_b32 s63, s47
+; GFX8-NOHSA-NEXT:    s_mov_b32 s49, s47
 ; GFX8-NOHSA-NEXT:    s_waitcnt lgkmcnt(0)
 ; GFX8-NOHSA-NEXT:    s_load_dwordx8 s[0:7], s[10:11], 0x0
-; GFX8-NOHSA-NEXT:    s_mov_b32 s67, s63
-; GFX8-NOHSA-NEXT:    s_mov_b32 s15, s63
-; GFX8-NOHSA-NEXT:    s_mov_b32 s45, s63
-; GFX8-NOHSA-NEXT:    s_mov_b32 s11, s63
+; GFX8-NOHSA-NEXT:    s_mov_b32 s65, s47
+; GFX8-NOHSA-NEXT:    s_mov_b32 s31, s47
+; GFX8-NOHSA-NEXT:    s_mov_b32 s67, s47
+; GFX8-NOHSA-NEXT:    s_mov_b32 s19, s47
 ; GFX8-NOHSA-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX8-NOHSA-NEXT:    s_lshr_b32 s46, s6, 16
-; GFX8-NOHSA-NEXT:    s_lshr_b32 s48, s6, 24
-; GFX8-NOHSA-NEXT:    s_lshr_b32 s50, s6, 8
-; GFX8-NOHSA-NEXT:    s_lshr_b32 s52, s4, 16
-; GFX8-NOHSA-NEXT:    s_lshr_b32 s54, s4, 24
-; GFX8-NOHSA-NEXT:    s_lshr_b32 s56, s4, 8
-; GFX8-NOHSA-NEXT:    s_lshr_b32 s58, s2, 16
-; GFX8-NOHSA-NEXT:    s_lshr_b32 s60, s2, 24
-; GFX8-NOHSA-NEXT:    s_lshr_b32 s40, s2, 8
-; GFX8-NOHSA-NEXT:    s_lshr_b32 s36, s0, 16
-; GFX8-NOHSA-NEXT:    s_lshr_b32 s34, s0, 24
-; GFX8-NOHSA-NEXT:    s_lshr_b32 s28, s0, 8
-; GFX8-NOHSA-NEXT:    s_lshr_b32 s62, s7, 16
-; GFX8-NOHSA-NEXT:    s_lshr_b32 s24, s7, 8
-; GFX8-NOHSA-NEXT:    s_mov_b32 s22, s7
-; GFX8-NOHSA-NEXT:    s_lshr_b32 s64, s5, 16
-; GFX8-NOHSA-NEXT:    s_lshr_b32 s20, s5, 8
-; GFX8-NOHSA-NEXT:    s_mov_b32 s18, s5
-; GFX8-NOHSA-NEXT:    s_lshr_b32 s66, s3, 16
-; GFX8-NOHSA-NEXT:    s_lshr_b32 s16, s3, 8
-; GFX8-NOHSA-NEXT:    s_mov_b32 s14, s3
-; GFX8-NOHSA-NEXT:    s_lshr_b32 s44, s1, 16
-; GFX8-NOHSA-NEXT:    s_lshr_b32 s12, s1, 8
-; GFX8-NOHSA-NEXT:    s_mov_b32 s10, s1
-; GFX8-NOHSA-NEXT:    s_bfe_i64 s[26:27], s[0:1], 0x80000
-; GFX8-NOHSA-NEXT:    s_bfe_i64 s[30:31], s[2:3], 0x80000
-; GFX8-NOHSA-NEXT:    s_bfe_i64 s[38:39], s[4:5], 0x80000
-; GFX8-NOHSA-NEXT:    s_bfe_i64 s[68:69], s[6:7], 0x80000
-; GFX8-NOHSA-NEXT:    s_bfe_i64 s[10:11], s[10:11], 0x80000
+; GFX8-NOHSA-NEXT:    s_lshr_b32 s46, s7, 16
+; GFX8-NOHSA-NEXT:    s_lshr_b32 s54, s7, 8
+; GFX8-NOHSA-NEXT:    s_mov_b32 s52, s7
+; GFX8-NOHSA-NEXT:    s_lshr_b32 s56, s6, 16
+; GFX8-NOHSA-NEXT:    s_lshr_b32 s58, s6, 24
+; GFX8-NOHSA-NEXT:    s_lshr_b32 s60, s6, 8
+; GFX8-NOHSA-NEXT:    s_lshr_b32 s62, s5, 16
+; GFX8-NOHSA-NEXT:    s_lshr_b32 s50, s5, 8
+; GFX8-NOHSA-NEXT:    s_mov_b32 s48, s5
+; GFX8-NOHSA-NEXT:    s_lshr_b32 s42, s4, 16
+; GFX8-NOHSA-NEXT:    s_lshr_b32 s40, s4, 24
+; GFX8-NOHSA-NEXT:    s_lshr_b32 s38, s4, 8
+; GFX8-NOHSA-NEXT:    s_lshr_b32 s64, s3, 16
+; GFX8-NOHSA-NEXT:    s_lshr_b32 s34, s3, 8
+; GFX8-NOHSA-NEXT:    s_mov_b32 s30, s3
+; GFX8-NOHSA-NEXT:    s_lshr_b32 s28, s2, 16
+; GFX8-NOHSA-NEXT:    s_lshr_b32 s26, s2, 24
+; GFX8-NOHSA-NEXT:    s_lshr_b32 s24, s2, 8
+; GFX8-NOHSA-NEXT:    s_lshr_b32 s66, s1, 16
+; GFX8-NOHSA-NEXT:    s_lshr_b32 s20, s1, 8
+; GFX8-NOHSA-NEXT:    s_mov_b32 s18, s1
+; GFX8-NOHSA-NEXT:    s_lshr_b32 s16, s0, 16
+; GFX8-NOHSA-NEXT:    s_lshr_b32 s14, s0, 24
+; GFX8-NOHSA-NEXT:    s_lshr_b32 s12, s0, 8
+; GFX8-NOHSA-NEXT:    s_bfe_i64 s[10:11], s[0:1], 0x80000
+; GFX8-NOHSA-NEXT:    s_bfe_i64 s[22:23], s[2:3], 0x80000
+; GFX8-NOHSA-NEXT:    s_bfe_i64 s[36:37], s[4:5], 0x80000
+; GFX8-NOHSA-NEXT:    s_bfe_i64 s[44:45], s[6:7], 0x80000
 ; GFX8-NOHSA-NEXT:    s_bfe_i64 s[12:13], s[12:13], 0x80000
-; GFX8-NOHSA-NEXT:    s_ashr_i32 s33, s1, 31
-; GFX8-NOHSA-NEXT:    s_ashr_i32 s42, s1, 24
-; GFX8-NOHSA-NEXT:    s_bfe_i64 s[0:1], s[44:45], 0x80000
 ; GFX8-NOHSA-NEXT:    s_bfe_i64 s[14:15], s[14:15], 0x80000
 ; GFX8-NOHSA-NEXT:    s_bfe_i64 s[16:17], s[16:17], 0x80000
-; GFX8-NOHSA-NEXT:    s_ashr_i32 s43, s3, 31
-; GFX8-NOHSA-NEXT:    s_ashr_i32 s44, s3, 24
-; GFX8-NOHSA-NEXT:    s_bfe_i64 s[2:3], s[66:67], 0x80000
 ; GFX8-NOHSA-NEXT:    s_bfe_i64 s[18:19], s[18:19], 0x80000
 ; GFX8-NOHSA-NEXT:    s_bfe_i64 s[20:21], s[20:21], 0x80000
-; GFX8-NOHSA-NEXT:    s_ashr_i32 s45, s5, 31
-; GFX8-NOHSA-NEXT:    s_ashr_i32 s66, s5, 24
-; GFX8-NOHSA-NEXT:    s_bfe_i64 s[4:5], s[64:65], 0x80000
-; GFX8-NOHSA-NEXT:    s_bfe_i64 s[22:23], s[22:23], 0x80000
+; GFX8-NOHSA-NEXT:    s_ashr_i32 s4, s1, 31
+; GFX8-NOHSA-NEXT:    s_ashr_i32 s6, s1, 24
+; GFX8-NOHSA-NEXT:    s_bfe_i64 s[0:1], s[66:67], 0x80000
 ; GFX8-NOHSA-NEXT:    s_bfe_i64 s[24:25], s[24:25], 0x80000
-; GFX8-NOHSA-NEXT:    s_ashr_i32 s64, s7, 31
-; GFX8-NOHSA-NEXT:    s_ashr_i32 s65, s7, 24
-; GFX8-NOHSA-NEXT:    s_bfe_i64 s[6:7], s[62:63], 0x80000
+; GFX8-NOHSA-NEXT:    s_bfe_i64 s[26:27], s[26:27], 0x80000
 ; GFX8-NOHSA-NEXT:    s_bfe_i64 s[28:29], s[28:29], 0x80000
+; GFX8-NOHSA-NEXT:    s_bfe_i64 s[30:31], s[30:31], 0x80000
 ; GFX8-NOHSA-NEXT:    s_bfe_i64 s[34:35], s[34:35], 0x80000
-; GFX8-NOHSA-NEXT:    s_bfe_i64 s[36:37], s[36:37], 0x80000
+; GFX8-NOHSA-NEXT:    s_ashr_i32 s33, s3, 31
+; GFX8-NOHSA-NEXT:    s_ashr_i32 s66, s3, 24
+; GFX8-NOHSA-NEXT:    s_bfe_i64 s[2:3], s[64:65], 0x80000
+; GFX8-NOHSA-NEXT:    s_bfe_i64 s[38:39], s[38:39], 0x80000
 ; GFX8-NOHSA-NEXT:    s_bfe_i64 s[40:41], s[40:41], 0x80000
+; GFX8-NOHSA-NEXT:    s_bfe_i64 s[42:43], s[42:43], 0x80000
+; GFX8-NOHSA-NEXT:    s_bfe_i64 s[48:49], s[48:49], 0x80000
+; GFX8-NOHSA-NEXT:    s_bfe_i64 s[50:51], s[50:51], 0x80000
+; GFX8-NOHSA-NEXT:    s_ashr_i32 s64, s5, 31
+; GFX8-NOHSA-NEXT:    s_ashr_i32 s5, s5, 24
+; GFX8-NOHSA-NEXT:    s_bfe_i64 s[62:63], s[62:63], 0x80000
 ; GFX8-NOHSA-NEXT:    s_bfe_i64 s[60:61], s[60:61], 0x80000
 ; GFX8-NOHSA-NEXT:    s_bfe_i64 s[58:59], s[58:59], 0x80000
 ; GFX8-NOHSA-NEXT:    s_bfe_i64 s[56:57], s[56:57], 0x80000
-; GFX8-NOHSA-NEXT:    s_bfe_i64 s[54:55], s[54:55], 0x80000
 ; GFX8-NOHSA-NEXT:    s_bfe_i64 s[52:53], s[52:53], 0x80000
-; GFX8-NOHSA-NEXT:    s_bfe_i64 s[50:51], s[50:51], 0x80000
-; GFX8-NOHSA-NEXT:    s_bfe_i64 s[48:49], s[48:49], 0x80000
+; GFX8-NOHSA-NEXT:    s_bfe_i64 s[54:55], s[54:55], 0x80000
+; GFX8-NOHSA-NEXT:    s_ashr_i32 s65, s7, 31
+; GFX8-NOHSA-NEXT:    s_ashr_i32 s7, s7, 24
 ; GFX8-NOHSA-NEXT:    s_bfe_i64 s[46:47], s[46:47], 0x80000
 ; GFX8-NOHSA-NEXT:    v_mov_b32_e32 v0, s46
-; GFX8-NOHSA-NEXT:    s_add_u32 s46, s8, 0xd0
+; GFX8-NOHSA-NEXT:    s_add_u32 s46, s8, 0xf0
 ; GFX8-NOHSA-NEXT:    v_mov_b32_e32 v1, s47
 ; GFX8-NOHSA-NEXT:    s_addc_u32 s47, s9, 0
 ; GFX8-NOHSA-NEXT:    v_mov_b32_e32 v4, s46
-; GFX8-NOHSA-NEXT:    v_mov_b32_e32 v2, s48
-; GFX8-NOHSA-NEXT:    v_mov_b32_e32 v3, s49
-; GFX8-NOHSA-NEXT:    v_mov_b32_e32 v5, s47
-; GFX8-NOHSA-NEXT:    s_add_u32 s46, s8, 0xc0
-; GFX8-NOHSA-NEXT:    flat_store_dwordx4 v[4:5], v[0:3]
-; GFX8-NOHSA-NEXT:    s_addc_u32 s47, s9, 0
-; GFX8-NOHSA-NEXT:    v_mov_b32_e32 v4, s46
-; GFX8-NOHSA-NEXT:    v_mov_b32_e32 v0, s68
-; GFX8-NOHSA-NEXT:    v_mov_b32_e32 v1, s69
-; GFX8-NOHSA-NEXT:    v_mov_b32_e32 v2, s50
-; GFX8-NOHSA-NEXT:    v_mov_b32_e32 v3, s51
+; GFX8-NOHSA-NEXT:    v_mov_b32_e32 v2, s7
+; GFX8-NOHSA-NEXT:    v_mov_b32_e32 v3, s65
 ; GFX8-NOHSA-NEXT:    v_mov_b32_e32 v5, s47
-; GFX8-NOHSA-NEXT:    s_add_u32 s46, s8, 0x90
+; GFX8-NOHSA-NEXT:    s_add_u32 s46, s8, 0xe0
 ; GFX8-NOHSA-NEXT:    flat_store_dwordx4 v[4:5], v[0:3]
 ; GFX8-NOHSA-NEXT:    s_addc_u32 s47, s9, 0
 ; GFX8-NOHSA-NEXT:    v_mov_b32_e32 v4, s46
@@ -8733,89 +8721,63 @@ define amdgpu_kernel void @constant_sextload_v32i8_to_v32i64(ptr addrspace(1) %o
 ; GFX8-NOHSA-NEXT:    v_mov_b32_e32 v2, s54
 ; GFX8-NOHSA-NEXT:    v_mov_b32_e32 v3, s55
 ; GFX8-NOHSA-NEXT:    v_mov_b32_e32 v5, s47
+; GFX8-NOHSA-NEXT:    s_add_u32 s46, s8, 0xd0
 ; GFX8-NOHSA-NEXT:    flat_store_dwordx4 v[4:5], v[0:3]
-; GFX8-NOHSA-NEXT:    s_nop 0
-; GFX8-NOHSA-NEXT:    v_mov_b32_e32 v0, s38
-; GFX8-NOHSA-NEXT:    s_add_u32 s38, s8, 0x80
-; GFX8-NOHSA-NEXT:    v_mov_b32_e32 v1, s39
-; GFX8-NOHSA-NEXT:    s_addc_u32 s39, s9, 0
-; GFX8-NOHSA-NEXT:    v_mov_b32_e32 v4, s38
-; GFX8-NOHSA-NEXT:    v_mov_b32_e32 v2, s56
-; GFX8-NOHSA-NEXT:    v_mov_b32_e32 v3, s57
-; GFX8-NOHSA-NEXT:    v_mov_b32_e32 v5, s39
-; GFX8-NOHSA-NEXT:    s_add_u32 s38, s8, 0x50
+; GFX8-NOHSA-NEXT:    s_addc_u32 s47, s9, 0
+; GFX8-NOHSA-NEXT:    v_mov_b32_e32 v4, s46
+; GFX8-NOHSA-NEXT:    v_mov_b32_e32 v0, s56
+; GFX8-NOHSA-NEXT:    v_mov_b32_e32 v1, s57
+; GFX8-NOHSA-NEXT:    v_mov_b32_e32 v2, s58
+; GFX8-NOHSA-NEXT:    v_mov_b32_e32 v3, s59
+; GFX8-NOHSA-NEXT:    v_mov_b32_e32 v5, s47
 ; GFX8-NOHSA-NEXT:    flat_store_dwordx4 v[4:5], v[0:3]
-; GFX8-NOHSA-NEXT:    s_addc_u32 s39, s9, 0
-; GFX8-NOHSA-NEXT:    v_mov_b32_e32 v4, s38
-; GFX8-NOHSA-NEXT:    v_mov_b32_e32 v0, s58
-; GFX8-NOHSA-NEXT:    v_mov_b32_e32 v1, s59
+; GFX8-NOHSA-NEXT:    s_nop 0
+; GFX8-NOHSA-NEXT:    v_mov_b32_e32 v0, s44
+; GFX8-NOHSA-NEXT:    s_add_u32 s44, s8, 0xc0
+; GFX8-NOHSA-NEXT:    v_mov_b32_e32 v1, s45
+; GFX8-NOHSA-NEXT:    s_addc_u32 s45, s9, 0
+; GFX8-NOHSA-NEXT:    v_mov_b32_e32 v4, s44
 ; GFX8-NOHSA-NEXT:    v_mov_b32_e32 v2, s60
 ; GFX8-NOHSA-NEXT:    v_mov_b32_e32 v3, s61
-; GFX8-NOHSA-NEXT:    v_mov_b32_e32 v5, s39
+; GFX8-NOHSA-NEXT:    v_mov_b32_e32 v5, s45
+; GFX8-NOHSA-NEXT:    s_add_u32 s44, s8, 0xb0
+; GFX8-NOHSA-NEXT:    flat_store_dwordx4 v[4:5], v[0:3]
+; GFX8-NOHSA-NEXT:    s_addc_u32 s45, s9, 0
+; GFX8-NOHSA-NEXT:    v_mov_b32_e32 v4, s44
+; GFX8-NOHSA-NEXT:    v_mov_b32_e32 v0, s62
+; GFX8-NOHSA-NEXT:    v_mov_b32_e32 v1, s63
+; GFX8-NOHSA-NEXT:    v_mov_b32_e32 v2, s5
+; GFX8-NOHSA-NEXT:    v_mov_b32_e32 v3, s64
+; GFX8-NOHSA-NEXT:    v_mov_b32_e32 v5, s45
+; GFX8-NOHSA-NEXT:    s_add_u32 s44, s8, 0xa0
+; GFX8-NOHSA-NEXT:    flat_store_dwordx4 v[4:5], v[0:3]
+; GFX8-NOHSA-NEXT:    s_addc_u32 s45, s9, 0
+; GFX8-NOHSA-NEXT:    v_mov_b32_e32 v4, s44
+; GFX8-NOHSA-NEXT:    v_mov_b32_e32 v0, s48
+; GFX8-NOHSA-NEXT:    v_mov_b32_e32 v1, s49
+; GFX8-NOHSA-NEXT:    v_mov_b32_e32 v2, s50
+; GFX8-NOHSA-NEXT:    v_mov_b32_e32 v3, s51
+; GFX8-NOHSA-NEXT:    v_mov_b32_e32 v5, s45
 ; GFX8-NOHSA-NEXT:    flat_store_dwordx4 v[4:5], v[0:3]
 ; GFX8-NOHSA-NEXT:    s_nop 0
-; GFX8-NOHSA-NEXT:    v_mov_b32_e32 v0, s30
-; GFX8-NOHSA-NEXT:    s_add_u32 s30, s8, 64
-; GFX8-NOHSA-NEXT:    v_mov_b32_e32 v1, s31
-; GFX8-NOHSA-NEXT:    s_addc_u32 s31, s9, 0
-; GFX8-NOHSA-NEXT:    v_mov_b32_e32 v4, s30
 ; GFX8-NOHSA-NEXT:    v_mov_b32_e32 v2, s40
+; GFX8-NOHSA-NEXT:    s_add_u32 s40, s8, 0x90
 ; GFX8-NOHSA-NEXT:    v_mov_b32_e32 v3, s41
-; GFX8-NOHSA-NEXT:    v_mov_b32_e32 v5, s31
-; GFX8-NOHSA-NEXT:    s_add_u32 s30, s8, 16
+; GFX8-NOHSA-NEXT:    s_addc_u32 s41, s9, 0
+; GFX8-NOHSA-NEXT:    v_mov_b32_e32 v4, s40
+; GFX8-NOHSA-NEXT:    v_mov_b32_e32 v0, s42
+; GFX8-NOHSA-NEXT:    v_mov_b32_e32 v1, s43
+; GFX8-NOHSA-NEXT:    v_mov_b32_e32 v5, s41
 ; GFX8-NOHSA-NEXT:    flat_store_dwordx4 v[4:5], v[0:3]
-; GFX8-NOHSA-NEXT:    s_addc_u32 s31, s9, 0
-; GFX8-NOHSA-NEXT:    v_mov_b32_e32 v4, s30
+; GFX8-NOHSA-NEXT:    s_nop 0
 ; GFX8-NOHSA-NEXT:    v_mov_b32_e32 v0, s36
+; GFX8-NOHSA-NEXT:    s_add_u32 s36, s8, 0x80
 ; GFX8-NOHSA-NEXT:    v_mov_b32_e32 v1, s37
-; GFX8-NOHSA-NEXT:    v_mov_b32_e32 v2, s34
-; GFX8-NOHSA-NEXT:    v_mov_b32_e32 v3, s35
-; GFX8-NOHSA-NEXT:    v_mov_b32_e32 v5, s31
-; GFX8-NOHSA-NEXT:    flat_store_dwordx4 v[4:5], v[0:3]
-; GFX8-NOHSA-NEXT:    v_mov_b32_e32 v4, s8
-; GFX8-NOHSA-NEXT:    v_mov_b32_e32 v0, s26
-; GFX8-NOHSA-NEXT:    v_mov_b32_e32 v1, s27
-; GFX8-NOHSA-NEXT:    v_mov_b32_e32 v2, s28
-; GFX8-NOHSA-NEXT:    v_mov_b32_e32 v3, s29
-; GFX8-NOHSA-NEXT:    v_mov_b32_e32 v5, s9
-; GFX8-NOHSA-NEXT:    flat_store_dwordx4 v[4:5], v[0:3]
-; GFX8-NOHSA-NEXT:    s_nop 0
-; GFX8-NOHSA-NEXT:    v_mov_b32_e32 v0, s6
-; GFX8-NOHSA-NEXT:    s_add_u32 s6, s8, 0xf0
-; GFX8-NOHSA-NEXT:    v_mov_b32_e32 v1, s7
-; GFX8-NOHSA-NEXT:    s_addc_u32 s7, s9, 0
-; GFX8-NOHSA-NEXT:    v_mov_b32_e32 v4, s6
-; GFX8-NOHSA-NEXT:    v_mov_b32_e32 v2, s65
-; GFX8-NOHSA-NEXT:    v_mov_b32_e32 v3, s64
-; GFX8-NOHSA-NEXT:    v_mov_b32_e32 v5, s7
-; GFX8-NOHSA-NEXT:    s_add_u32 s6, s8, 0xe0
-; GFX8-NOHSA-NEXT:    flat_store_dwordx4 v[4:5], v[0:3]
-; GFX8-NOHSA-NEXT:    s_addc_u32 s7, s9, 0
-; GFX8-NOHSA-NEXT:    v_mov_b32_e32 v4, s6
-; GFX8-NOHSA-NEXT:    v_mov_b32_e32 v0, s22
-; GFX8-NOHSA-NEXT:    v_mov_b32_e32 v1, s23
-; GFX8-NOHSA-NEXT:    v_mov_b32_e32 v2, s24
-; GFX8-NOHSA-NEXT:    v_mov_b32_e32 v3, s25
-; GFX8-NOHSA-NEXT:    v_mov_b32_e32 v5, s7
-; GFX8-NOHSA-NEXT:    flat_store_dwordx4 v[4:5], v[0:3]
-; GFX8-NOHSA-NEXT:    s_nop 0
-; GFX8-NOHSA-NEXT:    v_mov_b32_e32 v0, s4
-; GFX8-NOHSA-NEXT:    s_add_u32 s4, s8, 0xb0
-; GFX8-NOHSA-NEXT:    v_mov_b32_e32 v1, s5
-; GFX8-NOHSA-NEXT:    s_addc_u32 s5, s9, 0
-; GFX8-NOHSA-NEXT:    v_mov_b32_e32 v4, s4
-; GFX8-NOHSA-NEXT:    v_mov_b32_e32 v2, s66
-; GFX8-NOHSA-NEXT:    v_mov_b32_e32 v3, s45
-; GFX8-NOHSA-NEXT:    v_mov_b32_e32 v5, s5
-; GFX8-NOHSA-NEXT:    s_add_u32 s4, s8, 0xa0
-; GFX8-NOHSA-NEXT:    flat_store_dwordx4 v[4:5], v[0:3]
-; GFX8-NOHSA-NEXT:    s_addc_u32 s5, s9, 0
-; GFX8-NOHSA-NEXT:    v_mov_b32_e32 v4, s4
-; GFX8-NOHSA-NEXT:    v_mov_b32_e32 v0, s18
-; GFX8-NOHSA-NEXT:    v_mov_b32_e32 v1, s19
-; GFX8-NOHSA-NEXT:    v_mov_b32_e32 v2, s20
-; GFX8-NOHSA-NEXT:    v_mov_b32_e32 v3, s21
-; GFX8-NOHSA-NEXT:    v_mov_b32_e32 v5, s5
+; GFX8-NOHSA-NEXT:    s_addc_u32 s37, s9, 0
+; GFX8-NOHSA-NEXT:    v_mov_b32_e32 v4, s36
+; GFX8-NOHSA-NEXT:    v_mov_b32_e32 v2, s38
+; GFX8-NOHSA-NEXT:    v_mov_b32_e32 v3, s39
+; GFX8-NOHSA-NEXT:    v_mov_b32_e32 v5, s37
 ; GFX8-NOHSA-NEXT:    flat_store_dwordx4 v[4:5], v[0:3]
 ; GFX8-NOHSA-NEXT:    s_nop 0
 ; GFX8-NOHSA-NEXT:    v_mov_b32_e32 v0, s2
@@ -8825,15 +8787,33 @@ define amdgpu_kernel void @constant_sextload_v32i8_to_v32i64(ptr addrspace(1) %o
 ; GFX8-NOHSA-NEXT:    v_mov_b32_e32 v5, s3
 ; GFX8-NOHSA-NEXT:    v_mov_b32_e32 v4, s2
 ; GFX8-NOHSA-NEXT:    s_add_u32 s2, s8, 0x60
-; GFX8-NOHSA-NEXT:    v_mov_b32_e32 v2, s44
-; GFX8-NOHSA-NEXT:    v_mov_b32_e32 v3, s43
+; GFX8-NOHSA-NEXT:    v_mov_b32_e32 v2, s66
+; GFX8-NOHSA-NEXT:    v_mov_b32_e32 v3, s33
 ; GFX8-NOHSA-NEXT:    s_addc_u32 s3, s9, 0
 ; GFX8-NOHSA-NEXT:    flat_store_dwordx4 v[4:5], v[0:3]
 ; GFX8-NOHSA-NEXT:    v_mov_b32_e32 v5, s3
-; GFX8-NOHSA-NEXT:    v_mov_b32_e32 v0, s14
-; GFX8-NOHSA-NEXT:    v_mov_b32_e32 v1, s15
-; GFX8-NOHSA-NEXT:    v_mov_b32_e32 v2, s16
-; GFX8-NOHSA-NEXT:    v_mov_b32_e32 v3, s17
+; GFX8-NOHSA-NEXT:    v_mov_b32_e32 v4, s2
+; GFX8-NOHSA-NEXT:    s_add_u32 s2, s8, 0x50
+; GFX8-NOHSA-NEXT:    v_mov_b32_e32 v0, s30
+; GFX8-NOHSA-NEXT:    v_mov_b32_e32 v1, s31
+; GFX8-NOHSA-NEXT:    v_mov_b32_e32 v2, s34
+; GFX8-NOHSA-NEXT:    v_mov_b32_e32 v3, s35
+; GFX8-NOHSA-NEXT:    s_addc_u32 s3, s9, 0
+; GFX8-NOHSA-NEXT:    flat_store_dwordx4 v[4:5], v[0:3]
+; GFX8-NOHSA-NEXT:    v_mov_b32_e32 v5, s3
+; GFX8-NOHSA-NEXT:    v_mov_b32_e32 v4, s2
+; GFX8-NOHSA-NEXT:    s_add_u32 s2, s8, 64
+; GFX8-NOHSA-NEXT:    v_mov_b32_e32 v0, s28
+; GFX8-NOHSA-NEXT:    v_mov_b32_e32 v1, s29
+; GFX8-NOHSA-NEXT:    v_mov_b32_e32 v2, s26
+; GFX8-NOHSA-NEXT:    v_mov_b32_e32 v3, s27
+; GFX8-NOHSA-NEXT:    s_addc_u32 s3, s9, 0
+; GFX8-NOHSA-NEXT:    flat_store_dwordx4 v[4:5], v[0:3]
+; GFX8-NOHSA-NEXT:    v_mov_b32_e32 v5, s3
+; GFX8-NOHSA-NEXT:    v_mov_b32_e32 v0, s22
+; GFX8-NOHSA-NEXT:    v_mov_b32_e32 v1, s23
+; GFX8-NOHSA-NEXT:    v_mov_b32_e32 v2, s24
+; GFX8-NOHSA-NEXT:    v_mov_b32_e32 v3, s25
 ; GFX8-NOHSA-NEXT:    v_mov_b32_e32 v4, s2
 ; GFX8-NOHSA-NEXT:    flat_store_dwordx4 v[4:5], v[0:3]
 ; GFX8-NOHSA-NEXT:    s_nop 0
@@ -8844,16 +8824,32 @@ define amdgpu_kernel void @constant_sextload_v32i8_to_v32i64(ptr addrspace(1) %o
 ; GFX8-NOHSA-NEXT:    v_mov_b32_e32 v5, s1
 ; GFX8-NOHSA-NEXT:    v_mov_b32_e32 v4, s0
 ; GFX8-NOHSA-NEXT:    s_add_u32 s0, s8, 32
-; GFX8-NOHSA-NEXT:    v_mov_b32_e32 v2, s42
-; GFX8-NOHSA-NEXT:    v_mov_b32_e32 v3, s33
+; GFX8-NOHSA-NEXT:    v_mov_b32_e32 v2, s6
+; GFX8-NOHSA-NEXT:    v_mov_b32_e32 v3, s4
 ; GFX8-NOHSA-NEXT:    s_addc_u32 s1, s9, 0
 ; GFX8-NOHSA-NEXT:    flat_store_dwordx4 v[4:5], v[0:3]
 ; GFX8-NOHSA-NEXT:    v_mov_b32_e32 v5, s1
+; GFX8-NOHSA-NEXT:    v_mov_b32_e32 v4, s0
+; GFX8-NOHSA-NEXT:    s_add_u32 s0, s8, 16
+; GFX8-NOHSA-NEXT:    v_mov_b32_e32 v0, s18
+; GFX8-NOHSA-NEXT:    v_mov_b32_e32 v1, s19
+; GFX8-NOHSA-NEXT:    v_mov_b32_e32 v2, s20
+; GFX8-NOHSA-NEXT:    v_mov_b32_e32 v3, s21
+; GFX8-NOHSA-NEXT:    s_addc_u32 s1, s9, 0
+; GFX8-NOHSA-NEXT:    flat_store_dwordx4 v[4:5], v[0:3]
+; GFX8-NOHSA-NEXT:    v_mov_b32_e32 v5, s1
+; GFX8-NOHSA-NEXT:    v_mov_b32_e32 v0, s16
+; GFX8-NOHSA-NEXT:    v_mov_b32_e32 v1, s17
+; GFX8-NOHSA-NEXT:    v_mov_b32_e32 v2, s14
+; GFX8-NOHSA-NEXT:    v_mov_b32_e32 v3, s15
+; GFX8-NOHSA-NEXT:    v_mov_b32_e32 v4, s0
+; GFX8-NOHSA-NEXT:    flat_store_dwordx4 v[4:5], v[0:3]
+; GFX8-NOHSA-NEXT:    v_mov_b32_e32 v4, s8
 ; GFX8-NOHSA-NEXT:    v_mov_b32_e32 v0, s10
 ; GFX8-NOHSA-NEXT:    v_mov_b32_e32 v1, s11
 ; GFX8-NOHSA-NEXT:    v_mov_b32_e32 v2, s12
 ; GFX8-NOHSA-NEXT:    v_mov_b32_e32 v3, s13
-; GFX8-NOHSA-NEXT:    v_mov_b32_e32 v4, s0
+; GFX8-NOHSA-NEXT:    v_mov_b32_e32 v5, s9
 ; GFX8-NOHSA-NEXT:    flat_store_dwordx4 v[4:5], v[0:3]
 ; GFX8-NOHSA-NEXT:    s_endpgm
 ;
@@ -9049,133 +9045,133 @@ define amdgpu_kernel void @constant_sextload_v32i8_to_v32i64(ptr addrspace(1) %o
 ; GFX12-LABEL: constant_sextload_v32i8_to_v32i64:
 ; GFX12:       ; %bb.0:
 ; GFX12-NEXT:    s_load_b128 s[8:11], s[4:5], 0x24
-; GFX12-NEXT:    s_mov_b32 s29, 0
+; GFX12-NEXT:    s_mov_b32 s41, 0
 ; GFX12-NEXT:    s_delay_alu instid0(SALU_CYCLE_1)
-; GFX12-NEXT:    s_mov_b32 s27, s29
-; GFX12-NEXT:    s_mov_b32 s21, s29
-; GFX12-NEXT:    s_mov_b32 s25, s29
-; GFX12-NEXT:    s_mov_b32 s19, s29
-; GFX12-NEXT:    s_mov_b32 s15, s29
-; GFX12-NEXT:    s_mov_b32 s13, s29
+; GFX12-NEXT:    s_mov_b32 s47, s41
+; GFX12-NEXT:    s_mov_b32 s25, s41
+; GFX12-NEXT:    s_mov_b32 s43, s41
+; GFX12-NEXT:    s_mov_b32 s31, s41
+; GFX12-NEXT:    s_mov_b32 s45, s41
+; GFX12-NEXT:    s_mov_b32 s37, s41
+; GFX12-NEXT:    s_mov_b32 s19, s41
 ; GFX12-NEXT:    s_wait_kmcnt 0x0
 ; GFX12-NEXT:    s_load_b256 s[0:7], s[10:11], 0x0
-; GFX12-NEXT:    s_mov_b32 s11, s29
 ; GFX12-NEXT:    s_wait_kmcnt 0x0
-; GFX12-NEXT:    s_lshr_b32 s30, s6, 16
-; GFX12-NEXT:    s_lshr_b32 s34, s6, 24
-; GFX12-NEXT:    s_lshr_b32 s36, s6, 8
-; GFX12-NEXT:    s_lshr_b32 s38, s4, 16
-; GFX12-NEXT:    s_lshr_b32 s40, s4, 24
-; GFX12-NEXT:    s_bfe_i64 s[30:31], s[30:31], 0x80000
-; GFX12-NEXT:    s_bfe_i64 s[34:35], s[34:35], 0x80000
-; GFX12-NEXT:    s_lshr_b32 s42, s4, 8
+; GFX12-NEXT:    s_lshr_b32 s40, s7, 16
+; GFX12-NEXT:    s_lshr_b32 s54, s6, 8
+; GFX12-NEXT:    s_lshr_b32 s62, s4, 8
+; GFX12-NEXT:    s_lshr_b32 s46, s3, 16
+; GFX12-NEXT:    s_lshr_b32 s64, s2, 8
+; GFX12-NEXT:    s_lshr_b32 s24, s1, 16
+; GFX12-NEXT:    s_ashr_i32 s55, s3, 31
+; GFX12-NEXT:    s_ashr_i32 s63, s3, 24
+; GFX12-NEXT:    s_lshr_b32 s48, s7, 8
+; GFX12-NEXT:    s_mov_b32 s42, s7
+; GFX12-NEXT:    s_lshr_b32 s50, s6, 16
+; GFX12-NEXT:    s_lshr_b32 s52, s6, 24
+; GFX12-NEXT:    s_lshr_b32 s30, s5, 16
+; GFX12-NEXT:    s_lshr_b32 s56, s5, 8
+; GFX12-NEXT:    s_mov_b32 s44, s5
+; GFX12-NEXT:    s_lshr_b32 s58, s4, 16
+; GFX12-NEXT:    s_lshr_b32 s60, s4, 24
+; GFX12-NEXT:    s_lshr_b32 s38, s3, 8
+; GFX12-NEXT:    s_mov_b32 s36, s3
+; GFX12-NEXT:    s_lshr_b32 s28, s2, 16
+; GFX12-NEXT:    s_lshr_b32 s26, s2, 24
+; GFX12-NEXT:    s_lshr_b32 s20, s1, 8
+; GFX12-NEXT:    s_mov_b32 s18, s1
+; GFX12-NEXT:    s_lshr_b32 s16, s0, 16
+; GFX12-NEXT:    s_lshr_b32 s14, s0, 24
+; GFX12-NEXT:    s_lshr_b32 s12, s0, 8
+; GFX12-NEXT:    s_bfe_i64 s[10:11], s[0:1], 0x80000
+; GFX12-NEXT:    s_bfe_i64 s[22:23], s[2:3], 0x80000
+; GFX12-NEXT:    s_bfe_i64 s[34:35], s[4:5], 0x80000
 ; GFX12-NEXT:    s_bfe_i64 s[66:67], s[6:7], 0x80000
-; GFX12-NEXT:    s_bfe_i64 s[36:37], s[36:37], 0x80000
-; GFX12-NEXT:    v_dual_mov_b32 v24, 0 :: v_dual_mov_b32 v1, s31
+; GFX12-NEXT:    s_ashr_i32 s6, s1, 31
+; GFX12-NEXT:    s_ashr_i32 s33, s1, 24
+; GFX12-NEXT:    s_bfe_i64 s[0:1], s[24:25], 0x80000
+; GFX12-NEXT:    s_bfe_i64 s[24:25], s[64:65], 0x80000
+; GFX12-NEXT:    s_bfe_i64 s[2:3], s[46:47], 0x80000
+; GFX12-NEXT:    s_bfe_i64 s[46:47], s[62:63], 0x80000
+; GFX12-NEXT:    s_ashr_i32 s62, s5, 31
+; GFX12-NEXT:    s_ashr_i32 s64, s5, 24
+; GFX12-NEXT:    s_bfe_i64 s[4:5], s[54:55], 0x80000
+; GFX12-NEXT:    s_ashr_i32 s54, s7, 31
 ; GFX12-NEXT:    s_bfe_i64 s[40:41], s[40:41], 0x80000
-; GFX12-NEXT:    s_bfe_i64 s[38:39], s[38:39], 0x80000
-; GFX12-NEXT:    v_dual_mov_b32 v0, s30 :: v_dual_mov_b32 v3, s35
-; GFX12-NEXT:    v_dual_mov_b32 v2, s34 :: v_dual_mov_b32 v5, s67
-; GFX12-NEXT:    s_lshr_b32 s44, s2, 16
-; GFX12-NEXT:    s_lshr_b32 s46, s2, 24
-; GFX12-NEXT:    s_bfe_i64 s[64:65], s[4:5], 0x80000
-; GFX12-NEXT:    s_ashr_i32 s45, s1, 24
+; GFX12-NEXT:    s_ashr_i32 s7, s7, 24
 ; GFX12-NEXT:    s_bfe_i64 s[42:43], s[42:43], 0x80000
-; GFX12-NEXT:    v_dual_mov_b32 v4, s66 :: v_dual_mov_b32 v7, s37
-; GFX12-NEXT:    v_dual_mov_b32 v6, s36 :: v_dual_mov_b32 v9, s39
-; GFX12-NEXT:    s_lshr_b32 s48, s2, 8
-; GFX12-NEXT:    v_dual_mov_b32 v8, s38 :: v_dual_mov_b32 v11, s41
-; GFX12-NEXT:    v_dual_mov_b32 v10, s40 :: v_dual_mov_b32 v13, s65
-; GFX12-NEXT:    s_lshr_b32 s50, s0, 16
-; GFX12-NEXT:    s_lshr_b32 s52, s0, 24
-; GFX12-NEXT:    s_bfe_i64 s[46:47], s[46:47], 0x80000
-; GFX12-NEXT:    v_dual_mov_b32 v12, s64 :: v_dual_mov_b32 v15, s43
-; GFX12-NEXT:    s_bfe_i64 s[30:31], s[44:45], 0x80000
-; GFX12-NEXT:    v_mov_b32_e32 v14, s42
-; GFX12-NEXT:    s_lshr_b32 s54, s0, 8
-; GFX12-NEXT:    s_lshr_b32 s28, s7, 16
-; GFX12-NEXT:    s_lshr_b32 s56, s7, 8
-; GFX12-NEXT:    s_lshr_b32 s26, s5, 16
-; GFX12-NEXT:    s_lshr_b32 s58, s5, 8
-; GFX12-NEXT:    s_lshr_b32 s20, s3, 16
-; GFX12-NEXT:    s_lshr_b32 s60, s1, 8
-; GFX12-NEXT:    s_bfe_i64 s[62:63], s[2:3], 0x80000
-; GFX12-NEXT:    s_ashr_i32 s55, s3, 31
-; GFX12-NEXT:    s_ashr_i32 s57, s3, 24
 ; GFX12-NEXT:    s_bfe_i64 s[48:49], s[48:49], 0x80000
+; GFX12-NEXT:    v_dual_mov_b32 v24, 0 :: v_dual_mov_b32 v1, s41
 ; GFX12-NEXT:    s_bfe_i64 s[52:53], s[52:53], 0x80000
 ; GFX12-NEXT:    s_bfe_i64 s[50:51], s[50:51], 0x80000
+; GFX12-NEXT:    v_dual_mov_b32 v0, s40 :: v_dual_mov_b32 v3, s54
+; GFX12-NEXT:    v_dual_mov_b32 v2, s7 :: v_dual_mov_b32 v5, s43
+; GFX12-NEXT:    v_dual_mov_b32 v4, s42 :: v_dual_mov_b32 v7, s49
+; GFX12-NEXT:    v_dual_mov_b32 v6, s48 :: v_dual_mov_b32 v9, s51
+; GFX12-NEXT:    v_dual_mov_b32 v8, s50 :: v_dual_mov_b32 v11, s53
+; GFX12-NEXT:    v_dual_mov_b32 v10, s52 :: v_dual_mov_b32 v13, s67
+; GFX12-NEXT:    v_dual_mov_b32 v12, s66 :: v_dual_mov_b32 v15, s5
+; GFX12-NEXT:    v_mov_b32_e32 v14, s4
+; GFX12-NEXT:    s_bfe_i64 s[4:5], s[30:31], 0x80000
+; GFX12-NEXT:    s_bfe_i64 s[44:45], s[44:45], 0x80000
+; GFX12-NEXT:    s_bfe_i64 s[56:57], s[56:57], 0x80000
+; GFX12-NEXT:    s_bfe_i64 s[60:61], s[60:61], 0x80000
+; GFX12-NEXT:    s_bfe_i64 s[58:59], s[58:59], 0x80000
 ; GFX12-NEXT:    s_clause 0x3
-; GFX12-NEXT:    global_store_b128 v24, v[0:3], s[8:9] offset:208
-; GFX12-NEXT:    global_store_b128 v24, v[4:7], s[8:9] offset:192
+; GFX12-NEXT:    global_store_b128 v24, v[0:3], s[8:9] offset:240
+; GFX12-NEXT:    global_store_b128 v24, v[4:7], s[8:9] offset:224
+; GFX12-NEXT:    global_store_b128 v24, v[8:11], s[8:9] offset:208
+; GFX12-NEXT:    global_store_b128 v24, v[12:15], s[8:9] offset:192
+; GFX12-NEXT:    s_wait_alu depctr_sa_sdst(0)
+; GFX12-NEXT:    v_dual_mov_b32 v0, s4 :: v_dual_mov_b32 v3, s62
+; GFX12-NEXT:    v_dual_mov_b32 v1, s5 :: v_dual_mov_b32 v2, s64
+; GFX12-NEXT:    v_dual_mov_b32 v5, s45 :: v_dual_mov_b32 v4, s44
+; GFX12-NEXT:    v_dual_mov_b32 v7, s57 :: v_dual_mov_b32 v6, s56
+; GFX12-NEXT:    v_dual_mov_b32 v9, s59 :: v_dual_mov_b32 v8, s58
+; GFX12-NEXT:    v_dual_mov_b32 v11, s61 :: v_dual_mov_b32 v10, s60
+; GFX12-NEXT:    v_mov_b32_e32 v13, s35
+; GFX12-NEXT:    s_bfe_i64 s[26:27], s[26:27], 0x80000
+; GFX12-NEXT:    s_bfe_i64 s[28:29], s[28:29], 0x80000
+; GFX12-NEXT:    s_bfe_i64 s[36:37], s[36:37], 0x80000
+; GFX12-NEXT:    s_bfe_i64 s[38:39], s[38:39], 0x80000
+; GFX12-NEXT:    v_dual_mov_b32 v12, s34 :: v_dual_mov_b32 v15, s47
+; GFX12-NEXT:    v_dual_mov_b32 v14, s46 :: v_dual_mov_b32 v17, s3
+; GFX12-NEXT:    v_dual_mov_b32 v16, s2 :: v_dual_mov_b32 v19, s55
+; GFX12-NEXT:    v_dual_mov_b32 v18, s63 :: v_dual_mov_b32 v21, s37
+; GFX12-NEXT:    v_dual_mov_b32 v20, s36 :: v_dual_mov_b32 v23, s39
+; GFX12-NEXT:    v_mov_b32_e32 v22, s38
+; GFX12-NEXT:    s_clause 0x5
+; GFX12-NEXT:    global_store_b128 v24, v[0:3], s[8:9] offset:176
+; GFX12-NEXT:    global_store_b128 v24, v[4:7], s[8:9] offset:160
 ; GFX12-NEXT:    global_store_b128 v24, v[8:11], s[8:9] offset:144
 ; GFX12-NEXT:    global_store_b128 v24, v[12:15], s[8:9] offset:128
-; GFX12-NEXT:    s_wait_alu depctr_sa_sdst(0)
-; GFX12-NEXT:    v_dual_mov_b32 v0, s30 :: v_dual_mov_b32 v3, s47
-; GFX12-NEXT:    v_dual_mov_b32 v1, s31 :: v_dual_mov_b32 v2, s46
-; GFX12-NEXT:    v_mov_b32_e32 v5, s63
-; GFX12-NEXT:    s_mov_b32 s24, s7
-; GFX12-NEXT:    s_mov_b32 s18, s5
-; GFX12-NEXT:    s_lshr_b32 s16, s3, 8
-; GFX12-NEXT:    s_mov_b32 s14, s3
-; GFX12-NEXT:    s_lshr_b32 s12, s1, 16
-; GFX12-NEXT:    s_mov_b32 s10, s1
-; GFX12-NEXT:    s_bfe_i64 s[22:23], s[0:1], 0x80000
-; GFX12-NEXT:    s_ashr_i32 s33, s1, 31
-; GFX12-NEXT:    s_bfe_i64 s[0:1], s[60:61], 0x80000
-; GFX12-NEXT:    s_bfe_i64 s[2:3], s[20:21], 0x80000
-; GFX12-NEXT:    s_bfe_i64 s[20:21], s[58:59], 0x80000
-; GFX12-NEXT:    s_ashr_i32 s58, s5, 31
-; GFX12-NEXT:    s_ashr_i32 s59, s5, 24
-; GFX12-NEXT:    s_bfe_i64 s[4:5], s[26:27], 0x80000
-; GFX12-NEXT:    s_bfe_i64 s[26:27], s[56:57], 0x80000
-; GFX12-NEXT:    s_ashr_i32 s56, s7, 31
-; GFX12-NEXT:    s_ashr_i32 s60, s7, 24
-; GFX12-NEXT:    s_bfe_i64 s[6:7], s[28:29], 0x80000
-; GFX12-NEXT:    s_bfe_i64 s[28:29], s[54:55], 0x80000
-; GFX12-NEXT:    v_dual_mov_b32 v4, s62 :: v_dual_mov_b32 v7, s49
-; GFX12-NEXT:    v_dual_mov_b32 v6, s48 :: v_dual_mov_b32 v9, s51
-; GFX12-NEXT:    v_dual_mov_b32 v8, s50 :: v_dual_mov_b32 v11, s53
-; GFX12-NEXT:    v_dual_mov_b32 v10, s52 :: v_dual_mov_b32 v13, s23
-; GFX12-NEXT:    s_bfe_i64 s[24:25], s[24:25], 0x80000
-; GFX12-NEXT:    v_dual_mov_b32 v12, s22 :: v_dual_mov_b32 v15, s29
-; GFX12-NEXT:    v_dual_mov_b32 v14, s28 :: v_dual_mov_b32 v17, s7
+; GFX12-NEXT:    global_store_b128 v24, v[16:19], s[8:9] offset:112
+; GFX12-NEXT:    global_store_b128 v24, v[20:23], s[8:9] offset:96
+; GFX12-NEXT:    v_dual_mov_b32 v0, s28 :: v_dual_mov_b32 v3, s27
+; GFX12-NEXT:    v_dual_mov_b32 v1, s29 :: v_dual_mov_b32 v2, s26
+; GFX12-NEXT:    v_mov_b32_e32 v5, s23
 ; GFX12-NEXT:    s_bfe_i64 s[18:19], s[18:19], 0x80000
-; GFX12-NEXT:    v_dual_mov_b32 v16, s6 :: v_dual_mov_b32 v19, s56
-; GFX12-NEXT:    v_dual_mov_b32 v18, s60 :: v_dual_mov_b32 v21, s25
-; GFX12-NEXT:    v_dual_mov_b32 v20, s24 :: v_dual_mov_b32 v23, s27
-; GFX12-NEXT:    v_mov_b32_e32 v22, s26
-; GFX12-NEXT:    s_clause 0x5
-; GFX12-NEXT:    global_store_b128 v24, v[0:3], s[8:9] offset:80
-; GFX12-NEXT:    global_store_b128 v24, v[4:7], s[8:9] offset:64
-; GFX12-NEXT:    global_store_b128 v24, v[8:11], s[8:9] offset:16
-; GFX12-NEXT:    global_store_b128 v24, v[12:15], s[8:9]
-; GFX12-NEXT:    global_store_b128 v24, v[16:19], s[8:9] offset:240
-; GFX12-NEXT:    global_store_b128 v24, v[20:23], s[8:9] offset:224
-; GFX12-NEXT:    v_dual_mov_b32 v0, s4 :: v_dual_mov_b32 v3, s58
-; GFX12-NEXT:    v_dual_mov_b32 v1, s5 :: v_dual_mov_b32 v2, s59
-; GFX12-NEXT:    v_mov_b32_e32 v5, s19
+; GFX12-NEXT:    s_bfe_i64 s[20:21], s[20:21], 0x80000
+; GFX12-NEXT:    v_dual_mov_b32 v4, s22 :: v_dual_mov_b32 v7, s25
+; GFX12-NEXT:    v_dual_mov_b32 v6, s24 :: v_dual_mov_b32 v9, s1
 ; GFX12-NEXT:    s_bfe_i64 s[14:15], s[14:15], 0x80000
 ; GFX12-NEXT:    s_bfe_i64 s[16:17], s[16:17], 0x80000
-; GFX12-NEXT:    v_dual_mov_b32 v4, s18 :: v_dual_mov_b32 v7, s21
-; GFX12-NEXT:    v_dual_mov_b32 v6, s20 :: v_dual_mov_b32 v9, s3
+; GFX12-NEXT:    v_dual_mov_b32 v8, s0 :: v_dual_mov_b32 v11, s6
+; GFX12-NEXT:    v_dual_mov_b32 v10, s33 :: v_dual_mov_b32 v13, s19
 ; GFX12-NEXT:    s_bfe_i64 s[12:13], s[12:13], 0x80000
-; GFX12-NEXT:    v_dual_mov_b32 v8, s2 :: v_dual_mov_b32 v11, s55
-; GFX12-NEXT:    v_dual_mov_b32 v10, s57 :: v_dual_mov_b32 v13, s15
-; GFX12-NEXT:    s_bfe_i64 s[10:11], s[10:11], 0x80000
-; GFX12-NEXT:    v_dual_mov_b32 v12, s14 :: v_dual_mov_b32 v15, s17
-; GFX12-NEXT:    v_dual_mov_b32 v14, s16 :: v_dual_mov_b32 v17, s13
-; GFX12-NEXT:    v_dual_mov_b32 v16, s12 :: v_dual_mov_b32 v19, s33
-; GFX12-NEXT:    v_dual_mov_b32 v18, s45 :: v_dual_mov_b32 v21, s11
-; GFX12-NEXT:    v_dual_mov_b32 v20, s10 :: v_dual_mov_b32 v23, s1
-; GFX12-NEXT:    v_mov_b32_e32 v22, s0
+; GFX12-NEXT:    v_dual_mov_b32 v12, s18 :: v_dual_mov_b32 v15, s21
+; GFX12-NEXT:    v_dual_mov_b32 v14, s20 :: v_dual_mov_b32 v17, s17
+; GFX12-NEXT:    v_dual_mov_b32 v16, s16 :: v_dual_mov_b32 v19, s15
+; GFX12-NEXT:    v_dual_mov_b32 v18, s14 :: v_dual_mov_b32 v21, s11
+; GFX12-NEXT:    v_dual_mov_b32 v20, s10 :: v_dual_mov_b32 v23, s13
+; GFX12-NEXT:    v_mov_b32_e32 v22, s12
 ; GFX12-NEXT:    s_clause 0x5
-; GFX12-NEXT:    global_store_b128 v24, v[0:3], s[8:9] offset:176
-; GFX12-NEXT:    global_store_b128 v24, v[4:7], s[8:9] offset:160
-; GFX12-NEXT:    global_store_b128 v24, v[8:11], s[8:9] offset:112
-; GFX12-NEXT:    global_store_b128 v24, v[12:15], s[8:9] offset:96
-; GFX12-NEXT:    global_store_b128 v24, v[16:19], s[8:9] offset:48
-; GFX12-NEXT:    global_store_b128 v24, v[20:23], s[8:9] offset:32
+; GFX12-NEXT:    global_store_b128 v24, v[0:3], s[8:9] offset:80
+; GFX12-NEXT:    global_store_b128 v24, v[4:7], s[8:9] offset:64
+; GFX12-NEXT:    global_store_b128 v24, v[8:11], s[8:9] offset:48
+; GFX12-NEXT:    global_store_b128 v24, v[12:15], s[8:9] offset:32
+; GFX12-NEXT:    global_store_b128 v24, v[16:19], s[8:9] offset:16
+; GFX12-NEXT:    global_store_b128 v24, v[20:23], s[8:9]
 ; GFX12-NEXT:    s_endpgm
   %load = load <32 x i8>, ptr addrspace(4) %in
   %ext = sext <32 x i8> %load to <32 x i64>
diff --git a/llvm/test/CodeGen/AMDGPU/load-global-i16.ll b/llvm/test/CodeGen/AMDGPU/load-global-i16.ll
index 388006281abdc..ab09bd7288e9f 100644
--- a/llvm/test/CodeGen/AMDGPU/load-global-i16.ll
+++ b/llvm/test/CodeGen/AMDGPU/load-global-i16.ll
@@ -6356,31 +6356,51 @@ define amdgpu_kernel void @global_sextload_v8i16_to_v8i64(ptr addrspace(1) %out,
 ; GCN-NOHSA-SI-NEXT:    s_mov_b32 s8, s6
 ; GCN-NOHSA-SI-NEXT:    s_mov_b32 s9, s7
 ; GCN-NOHSA-SI-NEXT:    buffer_load_dwordx4 v[0:3], off, s[8:11], 0
+; GCN-NOHSA-SI-NEXT:    s_mov_b32 s7, 0
+; GCN-NOHSA-SI-NEXT:    s_mov_b32 s9, s7
 ; GCN-NOHSA-SI-NEXT:    s_mov_b32 s0, s4
 ; GCN-NOHSA-SI-NEXT:    s_mov_b32 s1, s5
 ; GCN-NOHSA-SI-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NOHSA-SI-NEXT:    v_lshrrev_b32_e32 v6, 16, v2
-; GCN-NOHSA-SI-NEXT:    v_lshrrev_b32_e32 v7, 16, v0
-; GCN-NOHSA-SI-NEXT:    v_bfe_i32 v0, v0, 0, 16
-; GCN-NOHSA-SI-NEXT:    v_bfe_i32 v4, v2, 0, 16
-; GCN-NOHSA-SI-NEXT:    v_ashrrev_i32_e32 v11, 31, v1
-; GCN-NOHSA-SI-NEXT:    v_ashrrev_i32_e32 v10, 16, v1
-; GCN-NOHSA-SI-NEXT:    v_bfe_i32 v8, v1, 0, 16
-; GCN-NOHSA-SI-NEXT:    v_ashrrev_i32_e32 v15, 31, v3
-; GCN-NOHSA-SI-NEXT:    v_ashrrev_i32_e32 v14, 16, v3
-; GCN-NOHSA-SI-NEXT:    v_bfe_i32 v12, v3, 0, 16
-; GCN-NOHSA-SI-NEXT:    v_ashrrev_i32_e32 v1, 31, v0
-; GCN-NOHSA-SI-NEXT:    v_ashrrev_i32_e32 v5, 31, v4
-; GCN-NOHSA-SI-NEXT:    v_bfe_i32 v2, v7, 0, 16
-; GCN-NOHSA-SI-NEXT:    v_ashrrev_i32_e32 v9, 31, v8
-; GCN-NOHSA-SI-NEXT:    v_bfe_i32 v6, v6, 0, 16
-; GCN-NOHSA-SI-NEXT:    v_ashrrev_i32_e32 v13, 31, v12
-; GCN-NOHSA-SI-NEXT:    v_ashrrev_i32_e32 v3, 31, v2
-; GCN-NOHSA-SI-NEXT:    v_ashrrev_i32_e32 v7, 31, v6
-; GCN-NOHSA-SI-NEXT:    buffer_store_dwordx4 v[12:15], off, s[0:3], 0 offset:48
-; GCN-NOHSA-SI-NEXT:    buffer_store_dwordx4 v[8:11], off, s[0:3], 0 offset:16
-; GCN-NOHSA-SI-NEXT:    buffer_store_dwordx4 v[4:7], off, s[0:3], 0 offset:32
-; GCN-NOHSA-SI-NEXT:    buffer_store_dwordx4 v[0:3], off, s[0:3], 0
+; GCN-NOHSA-SI-NEXT:    v_readfirstlane_b32 s4, v0
+; GCN-NOHSA-SI-NEXT:    v_readfirstlane_b32 s5, v1
+; GCN-NOHSA-SI-NEXT:    v_readfirstlane_b32 s11, v3
+; GCN-NOHSA-SI-NEXT:    v_readfirstlane_b32 s10, v2
+; GCN-NOHSA-SI-NEXT:    s_mov_b32 s6, s11
+; GCN-NOHSA-SI-NEXT:    s_mov_b32 s8, s5
+; GCN-NOHSA-SI-NEXT:    s_lshr_b32 s12, s10, 16
+; GCN-NOHSA-SI-NEXT:    s_lshr_b32 s14, s4, 16
+; GCN-NOHSA-SI-NEXT:    s_bfe_i64 s[16:17], s[4:5], 0x100000
+; GCN-NOHSA-SI-NEXT:    s_bfe_i64 s[18:19], s[10:11], 0x100000
+; GCN-NOHSA-SI-NEXT:    s_ashr_i32 s13, s5, 31
+; GCN-NOHSA-SI-NEXT:    s_ashr_i32 s15, s5, 16
+; GCN-NOHSA-SI-NEXT:    s_bfe_i64 s[4:5], s[8:9], 0x100000
+; GCN-NOHSA-SI-NEXT:    s_ashr_i32 s20, s11, 31
+; GCN-NOHSA-SI-NEXT:    s_ashr_i32 s21, s11, 16
+; GCN-NOHSA-SI-NEXT:    s_bfe_i64 s[6:7], s[6:7], 0x100000
+; GCN-NOHSA-SI-NEXT:    s_bfe_i64 s[8:9], s[14:15], 0x100000
+; GCN-NOHSA-SI-NEXT:    s_bfe_i64 s[10:11], s[12:13], 0x100000
+; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v0, s6
+; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v1, s7
+; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v2, s21
+; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v3, s20
+; GCN-NOHSA-SI-NEXT:    buffer_store_dwordx4 v[0:3], off, s[0:3], 0 offset:48
+; GCN-NOHSA-SI-NEXT:    s_waitcnt expcnt(0)
+; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v0, s4
+; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v1, s5
+; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v2, s15
+; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v3, s13
+; GCN-NOHSA-SI-NEXT:    buffer_store_dwordx4 v[0:3], off, s[0:3], 0 offset:16
+; GCN-NOHSA-SI-NEXT:    s_waitcnt expcnt(0)
+; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v0, s18
+; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v1, s19
+; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v4, s16
+; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v5, s17
+; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v2, s10
+; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v3, s11
+; GCN-NOHSA-SI-NEXT:    buffer_store_dwordx4 v[0:3], off, s[0:3], 0 offset:32
+; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v6, s8
+; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v7, s9
+; GCN-NOHSA-SI-NEXT:    buffer_store_dwordx4 v[4:7], off, s[0:3], 0
 ; GCN-NOHSA-SI-NEXT:    s_endpgm
 ;
 ; GCN-HSA-LABEL: global_sextload_v8i16_to_v8i64:
@@ -6393,43 +6413,61 @@ define amdgpu_kernel void @global_sextload_v8i16_to_v8i64(ptr addrspace(1) %out,
 ; GCN-HSA-NEXT:    v_mov_b32_e32 v0, s2
 ; GCN-HSA-NEXT:    v_mov_b32_e32 v1, s3
 ; GCN-HSA-NEXT:    flat_load_dwordx4 v[0:3], v[0:1]
+; GCN-HSA-NEXT:    s_mov_b32 s3, 0
+; GCN-HSA-NEXT:    s_mov_b32 s5, s3
+; GCN-HSA-NEXT:    v_mov_b32_e32 v17, s1
+; GCN-HSA-NEXT:    v_mov_b32_e32 v16, s0
+; GCN-HSA-NEXT:    s_waitcnt vmcnt(0)
+; GCN-HSA-NEXT:    v_readfirstlane_b32 s6, v0
+; GCN-HSA-NEXT:    v_readfirstlane_b32 s7, v1
+; GCN-HSA-NEXT:    v_readfirstlane_b32 s8, v2
+; GCN-HSA-NEXT:    v_readfirstlane_b32 s9, v3
+; GCN-HSA-NEXT:    s_mov_b32 s2, s9
+; GCN-HSA-NEXT:    s_mov_b32 s4, s7
+; GCN-HSA-NEXT:    s_lshr_b32 s10, s8, 16
+; GCN-HSA-NEXT:    s_lshr_b32 s12, s6, 16
+; GCN-HSA-NEXT:    s_ashr_i32 s11, s7, 31
+; GCN-HSA-NEXT:    s_ashr_i32 s13, s7, 16
+; GCN-HSA-NEXT:    s_bfe_i64 s[14:15], s[6:7], 0x100000
+; GCN-HSA-NEXT:    s_bfe_i64 s[16:17], s[8:9], 0x100000
+; GCN-HSA-NEXT:    s_ashr_i32 s18, s9, 31
+; GCN-HSA-NEXT:    s_ashr_i32 s19, s9, 16
+; GCN-HSA-NEXT:    s_bfe_i64 s[6:7], s[12:13], 0x100000
+; GCN-HSA-NEXT:    s_bfe_i64 s[8:9], s[10:11], 0x100000
+; GCN-HSA-NEXT:    s_bfe_i64 s[4:5], s[4:5], 0x100000
+; GCN-HSA-NEXT:    s_bfe_i64 s[2:3], s[2:3], 0x100000
+; GCN-HSA-NEXT:    v_mov_b32_e32 v0, s2
 ; GCN-HSA-NEXT:    s_add_u32 s2, s0, 48
+; GCN-HSA-NEXT:    v_mov_b32_e32 v1, s3
 ; GCN-HSA-NEXT:    s_addc_u32 s3, s1, 0
 ; GCN-HSA-NEXT:    v_mov_b32_e32 v19, s3
+; GCN-HSA-NEXT:    v_mov_b32_e32 v2, s19
+; GCN-HSA-NEXT:    v_mov_b32_e32 v3, s18
 ; GCN-HSA-NEXT:    v_mov_b32_e32 v18, s2
-; GCN-HSA-NEXT:    s_add_u32 s2, s0, 32
-; GCN-HSA-NEXT:    v_mov_b32_e32 v17, s1
+; GCN-HSA-NEXT:    s_add_u32 s2, s0, 16
+; GCN-HSA-NEXT:    flat_store_dwordx4 v[18:19], v[0:3]
 ; GCN-HSA-NEXT:    s_addc_u32 s3, s1, 0
-; GCN-HSA-NEXT:    v_mov_b32_e32 v16, s0
-; GCN-HSA-NEXT:    s_add_u32 s0, s0, 16
+; GCN-HSA-NEXT:    v_mov_b32_e32 v0, s2
+; GCN-HSA-NEXT:    v_mov_b32_e32 v6, s13
+; GCN-HSA-NEXT:    v_mov_b32_e32 v7, s11
+; GCN-HSA-NEXT:    v_mov_b32_e32 v4, s4
+; GCN-HSA-NEXT:    v_mov_b32_e32 v5, s5
+; GCN-HSA-NEXT:    v_mov_b32_e32 v1, s3
+; GCN-HSA-NEXT:    s_add_u32 s0, s0, 32
+; GCN-HSA-NEXT:    flat_store_dwordx4 v[0:1], v[4:7]
 ; GCN-HSA-NEXT:    s_addc_u32 s1, s1, 0
-; GCN-HSA-NEXT:    v_mov_b32_e32 v21, s3
-; GCN-HSA-NEXT:    v_mov_b32_e32 v23, s1
-; GCN-HSA-NEXT:    v_mov_b32_e32 v20, s2
-; GCN-HSA-NEXT:    v_mov_b32_e32 v22, s0
-; GCN-HSA-NEXT:    s_waitcnt vmcnt(0)
-; GCN-HSA-NEXT:    v_lshrrev_b32_e32 v9, 16, v2
-; GCN-HSA-NEXT:    v_lshrrev_b32_e32 v5, 16, v0
-; GCN-HSA-NEXT:    v_bfe_i32 v8, v3, 0, 16
-; GCN-HSA-NEXT:    v_bfe_i32 v0, v0, 0, 16
-; GCN-HSA-NEXT:    v_bfe_i32 v4, v1, 0, 16
-; GCN-HSA-NEXT:    v_ashrrev_i32_e32 v11, 31, v3
-; GCN-HSA-NEXT:    v_ashrrev_i32_e32 v10, 16, v3
-; GCN-HSA-NEXT:    v_bfe_i32 v12, v2, 0, 16
-; GCN-HSA-NEXT:    v_bfe_i32 v2, v5, 0, 16
-; GCN-HSA-NEXT:    v_bfe_i32 v14, v9, 0, 16
-; GCN-HSA-NEXT:    v_ashrrev_i32_e32 v9, 31, v8
-; GCN-HSA-NEXT:    v_ashrrev_i32_e32 v7, 31, v1
-; GCN-HSA-NEXT:    v_ashrrev_i32_e32 v6, 16, v1
-; GCN-HSA-NEXT:    v_ashrrev_i32_e32 v1, 31, v0
-; GCN-HSA-NEXT:    v_ashrrev_i32_e32 v13, 31, v12
-; GCN-HSA-NEXT:    v_ashrrev_i32_e32 v5, 31, v4
-; GCN-HSA-NEXT:    v_ashrrev_i32_e32 v3, 31, v2
-; GCN-HSA-NEXT:    v_ashrrev_i32_e32 v15, 31, v14
-; GCN-HSA-NEXT:    flat_store_dwordx4 v[18:19], v[8:11]
-; GCN-HSA-NEXT:    flat_store_dwordx4 v[22:23], v[4:7]
-; GCN-HSA-NEXT:    flat_store_dwordx4 v[20:21], v[12:15]
-; GCN-HSA-NEXT:    flat_store_dwordx4 v[16:17], v[0:3]
+; GCN-HSA-NEXT:    v_mov_b32_e32 v0, s0
+; GCN-HSA-NEXT:    v_mov_b32_e32 v8, s16
+; GCN-HSA-NEXT:    v_mov_b32_e32 v9, s17
+; GCN-HSA-NEXT:    v_mov_b32_e32 v10, s8
+; GCN-HSA-NEXT:    v_mov_b32_e32 v11, s9
+; GCN-HSA-NEXT:    v_mov_b32_e32 v1, s1
+; GCN-HSA-NEXT:    v_mov_b32_e32 v12, s14
+; GCN-HSA-NEXT:    v_mov_b32_e32 v13, s15
+; GCN-HSA-NEXT:    v_mov_b32_e32 v14, s6
+; GCN-HSA-NEXT:    v_mov_b32_e32 v15, s7
+; GCN-HSA-NEXT:    flat_store_dwordx4 v[0:1], v[8:11]
+; GCN-HSA-NEXT:    flat_store_dwordx4 v[16:17], v[12:15]
 ; GCN-HSA-NEXT:    s_endpgm
 ;
 ; GCN-NOHSA-VI-LABEL: global_sextload_v8i16_to_v8i64:
@@ -6970,149 +7008,224 @@ define amdgpu_kernel void @global_sextload_v16i16_to_v16i64(ptr addrspace(1) %ou
 ; GCN-NOHSA-SI-NEXT:    s_mov_b32 s8, s6
 ; GCN-NOHSA-SI-NEXT:    s_mov_b32 s9, s7
 ; GCN-NOHSA-SI-NEXT:    buffer_load_dwordx4 v[0:3], off, s[8:11], 0 offset:16
+; GCN-NOHSA-SI-NEXT:    s_mov_b32 s7, 0
 ; GCN-NOHSA-SI-NEXT:    buffer_load_dwordx4 v[4:7], off, s[8:11], 0
+; GCN-NOHSA-SI-NEXT:    s_mov_b32 s9, s7
+; GCN-NOHSA-SI-NEXT:    s_mov_b32 s11, s7
+; GCN-NOHSA-SI-NEXT:    s_mov_b32 s13, s7
 ; GCN-NOHSA-SI-NEXT:    s_mov_b32 s0, s4
 ; GCN-NOHSA-SI-NEXT:    s_mov_b32 s1, s5
 ; GCN-NOHSA-SI-NEXT:    s_waitcnt vmcnt(1)
-; GCN-NOHSA-SI-NEXT:    v_lshrrev_b32_e32 v9, 16, v0
+; GCN-NOHSA-SI-NEXT:    v_readfirstlane_b32 s4, v0
+; GCN-NOHSA-SI-NEXT:    v_readfirstlane_b32 s5, v1
+; GCN-NOHSA-SI-NEXT:    v_readfirstlane_b32 s14, v2
+; GCN-NOHSA-SI-NEXT:    v_readfirstlane_b32 s15, v3
 ; GCN-NOHSA-SI-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NOHSA-SI-NEXT:    v_lshrrev_b32_e32 v10, 16, v6
-; GCN-NOHSA-SI-NEXT:    v_lshrrev_b32_e32 v11, 16, v4
-; GCN-NOHSA-SI-NEXT:    v_bfe_i32 v4, v4, 0, 16
-; GCN-NOHSA-SI-NEXT:    v_bfe_i32 v8, v6, 0, 16
-; GCN-NOHSA-SI-NEXT:    v_ashrrev_i32_e32 v15, 31, v5
-; GCN-NOHSA-SI-NEXT:    v_ashrrev_i32_e32 v14, 16, v5
-; GCN-NOHSA-SI-NEXT:    v_bfe_i32 v12, v5, 0, 16
-; GCN-NOHSA-SI-NEXT:    v_ashrrev_i32_e32 v19, 31, v7
-; GCN-NOHSA-SI-NEXT:    v_ashrrev_i32_e32 v18, 16, v7
-; GCN-NOHSA-SI-NEXT:    v_bfe_i32 v16, v7, 0, 16
-; GCN-NOHSA-SI-NEXT:    v_bfe_i32 v20, v0, 0, 16
-; GCN-NOHSA-SI-NEXT:    v_bfe_i32 v6, v11, 0, 16
-; GCN-NOHSA-SI-NEXT:    v_bfe_i32 v10, v10, 0, 16
-; GCN-NOHSA-SI-NEXT:    v_bfe_i32 v22, v9, 0, 16
-; GCN-NOHSA-SI-NEXT:    v_ashrrev_i32_e32 v26, 31, v3
-; GCN-NOHSA-SI-NEXT:    v_ashrrev_i32_e32 v25, 16, v3
-; GCN-NOHSA-SI-NEXT:    v_bfe_i32 v23, v3, 0, 16
-; GCN-NOHSA-SI-NEXT:    v_ashrrev_i32_e32 v24, 31, v23
-; GCN-NOHSA-SI-NEXT:    buffer_store_dwordx4 v[23:26], off, s[0:3], 0 offset:112
-; GCN-NOHSA-SI-NEXT:    v_ashrrev_i32_e32 v27, 31, v1
+; GCN-NOHSA-SI-NEXT:    v_readfirstlane_b32 s16, v4
+; GCN-NOHSA-SI-NEXT:    v_readfirstlane_b32 s17, v5
+; GCN-NOHSA-SI-NEXT:    v_readfirstlane_b32 s19, v7
+; GCN-NOHSA-SI-NEXT:    v_readfirstlane_b32 s18, v6
+; GCN-NOHSA-SI-NEXT:    s_mov_b32 s6, s15
+; GCN-NOHSA-SI-NEXT:    s_mov_b32 s8, s5
+; GCN-NOHSA-SI-NEXT:    s_mov_b32 s10, s19
+; GCN-NOHSA-SI-NEXT:    s_mov_b32 s12, s17
+; GCN-NOHSA-SI-NEXT:    s_lshr_b32 s20, s14, 16
+; GCN-NOHSA-SI-NEXT:    s_lshr_b32 s22, s4, 16
+; GCN-NOHSA-SI-NEXT:    s_lshr_b32 s24, s18, 16
+; GCN-NOHSA-SI-NEXT:    s_lshr_b32 s26, s16, 16
+; GCN-NOHSA-SI-NEXT:    s_bfe_i64 s[28:29], s[16:17], 0x100000
+; GCN-NOHSA-SI-NEXT:    s_bfe_i64 s[30:31], s[18:19], 0x100000
+; GCN-NOHSA-SI-NEXT:    s_bfe_i64 s[34:35], s[4:5], 0x100000
+; GCN-NOHSA-SI-NEXT:    s_bfe_i64 s[36:37], s[14:15], 0x100000
+; GCN-NOHSA-SI-NEXT:    s_ashr_i32 s21, s17, 31
+; GCN-NOHSA-SI-NEXT:    s_ashr_i32 s23, s17, 16
+; GCN-NOHSA-SI-NEXT:    s_bfe_i64 s[12:13], s[12:13], 0x100000
+; GCN-NOHSA-SI-NEXT:    s_ashr_i32 s25, s19, 31
+; GCN-NOHSA-SI-NEXT:    s_ashr_i32 s27, s19, 16
+; GCN-NOHSA-SI-NEXT:    s_bfe_i64 s[10:11], s[10:11], 0x100000
+; GCN-NOHSA-SI-NEXT:    s_ashr_i32 s33, s5, 31
+; GCN-NOHSA-SI-NEXT:    s_ashr_i32 s38, s5, 16
+; GCN-NOHSA-SI-NEXT:    s_bfe_i64 s[4:5], s[8:9], 0x100000
+; GCN-NOHSA-SI-NEXT:    s_ashr_i32 s39, s15, 31
+; GCN-NOHSA-SI-NEXT:    s_ashr_i32 s40, s15, 16
+; GCN-NOHSA-SI-NEXT:    s_bfe_i64 s[6:7], s[6:7], 0x100000
+; GCN-NOHSA-SI-NEXT:    s_bfe_i64 s[8:9], s[26:27], 0x100000
+; GCN-NOHSA-SI-NEXT:    s_bfe_i64 s[14:15], s[24:25], 0x100000
+; GCN-NOHSA-SI-NEXT:    s_bfe_i64 s[16:17], s[22:23], 0x100000
+; GCN-NOHSA-SI-NEXT:    s_bfe_i64 s[18:19], s[20:21], 0x100000
+; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v0, s6
+; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v1, s7
+; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v2, s40
+; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v3, s39
+; GCN-NOHSA-SI-NEXT:    buffer_store_dwordx4 v[0:3], off, s[0:3], 0 offset:112
 ; GCN-NOHSA-SI-NEXT:    s_waitcnt expcnt(0)
-; GCN-NOHSA-SI-NEXT:    v_ashrrev_i32_e32 v26, 16, v1
-; GCN-NOHSA-SI-NEXT:    v_bfe_i32 v24, v1, 0, 16
-; GCN-NOHSA-SI-NEXT:    v_lshrrev_b32_e32 v1, 16, v2
-; GCN-NOHSA-SI-NEXT:    v_bfe_i32 v0, v2, 0, 16
-; GCN-NOHSA-SI-NEXT:    v_bfe_i32 v2, v1, 0, 16
-; GCN-NOHSA-SI-NEXT:    v_ashrrev_i32_e32 v5, 31, v4
-; GCN-NOHSA-SI-NEXT:    v_ashrrev_i32_e32 v9, 31, v8
-; GCN-NOHSA-SI-NEXT:    v_ashrrev_i32_e32 v21, 31, v20
-; GCN-NOHSA-SI-NEXT:    v_ashrrev_i32_e32 v1, 31, v0
-; GCN-NOHSA-SI-NEXT:    v_ashrrev_i32_e32 v13, 31, v12
-; GCN-NOHSA-SI-NEXT:    v_ashrrev_i32_e32 v17, 31, v16
-; GCN-NOHSA-SI-NEXT:    v_ashrrev_i32_e32 v25, 31, v24
-; GCN-NOHSA-SI-NEXT:    v_ashrrev_i32_e32 v7, 31, v6
-; GCN-NOHSA-SI-NEXT:    v_ashrrev_i32_e32 v11, 31, v10
-; GCN-NOHSA-SI-NEXT:    v_ashrrev_i32_e32 v23, 31, v22
-; GCN-NOHSA-SI-NEXT:    v_ashrrev_i32_e32 v3, 31, v2
-; GCN-NOHSA-SI-NEXT:    buffer_store_dwordx4 v[24:27], off, s[0:3], 0 offset:80
-; GCN-NOHSA-SI-NEXT:    buffer_store_dwordx4 v[16:19], off, s[0:3], 0 offset:48
-; GCN-NOHSA-SI-NEXT:    buffer_store_dwordx4 v[12:15], off, s[0:3], 0 offset:16
+; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v0, s4
+; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v1, s5
+; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v2, s38
+; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v3, s33
+; GCN-NOHSA-SI-NEXT:    buffer_store_dwordx4 v[0:3], off, s[0:3], 0 offset:80
+; GCN-NOHSA-SI-NEXT:    s_waitcnt expcnt(0)
+; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v0, s10
+; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v1, s11
+; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v2, s27
+; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v3, s25
+; GCN-NOHSA-SI-NEXT:    buffer_store_dwordx4 v[0:3], off, s[0:3], 0 offset:48
+; GCN-NOHSA-SI-NEXT:    s_waitcnt expcnt(0)
+; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v0, s12
+; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v1, s13
+; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v2, s23
+; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v3, s21
+; GCN-NOHSA-SI-NEXT:    buffer_store_dwordx4 v[0:3], off, s[0:3], 0 offset:16
+; GCN-NOHSA-SI-NEXT:    s_waitcnt expcnt(0)
+; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v0, s36
+; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v1, s37
+; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v4, s34
+; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v5, s35
+; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v8, s30
+; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v9, s31
+; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v12, s28
+; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v13, s29
+; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v2, s18
+; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v3, s19
 ; GCN-NOHSA-SI-NEXT:    buffer_store_dwordx4 v[0:3], off, s[0:3], 0 offset:96
-; GCN-NOHSA-SI-NEXT:    buffer_store_dwordx4 v[20:23], off, s[0:3], 0 offset:64
+; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v6, s16
+; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v7, s17
+; GCN-NOHSA-SI-NEXT:    buffer_store_dwordx4 v[4:7], off, s[0:3], 0 offset:64
+; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v10, s14
+; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v11, s15
 ; GCN-NOHSA-SI-NEXT:    buffer_store_dwordx4 v[8:11], off, s[0:3], 0 offset:32
-; GCN-NOHSA-SI-NEXT:    buffer_store_dwordx4 v[4:7], off, s[0:3], 0
+; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v14, s8
+; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v15, s9
+; GCN-NOHSA-SI-NEXT:    buffer_store_dwordx4 v[12:15], off, s[0:3], 0
 ; GCN-NOHSA-SI-NEXT:    s_endpgm
 ;
 ; GCN-HSA-LABEL: global_sextload_v16i16_to_v16i64:
 ; GCN-HSA:       ; %bb.0:
 ; GCN-HSA-NEXT:    s_load_dwordx4 s[0:3], s[8:9], 0x0
 ; GCN-HSA-NEXT:    s_add_i32 s12, s12, s17
-; GCN-HSA-NEXT:    s_mov_b32 flat_scratch_lo, s13
 ; GCN-HSA-NEXT:    s_lshr_b32 flat_scratch_hi, s12, 8
+; GCN-HSA-NEXT:    s_mov_b32 flat_scratch_lo, s13
+; GCN-HSA-NEXT:    s_mov_b32 s7, 0
 ; GCN-HSA-NEXT:    s_waitcnt lgkmcnt(0)
 ; GCN-HSA-NEXT:    v_mov_b32_e32 v0, s2
 ; GCN-HSA-NEXT:    v_mov_b32_e32 v1, s3
-; GCN-HSA-NEXT:    flat_load_dwordx4 v[4:7], v[0:1]
 ; GCN-HSA-NEXT:    s_add_u32 s2, s2, 16
 ; GCN-HSA-NEXT:    s_addc_u32 s3, s3, 0
-; GCN-HSA-NEXT:    v_mov_b32_e32 v0, s2
-; GCN-HSA-NEXT:    v_mov_b32_e32 v1, s3
+; GCN-HSA-NEXT:    v_mov_b32_e32 v5, s3
 ; GCN-HSA-NEXT:    flat_load_dwordx4 v[0:3], v[0:1]
-; GCN-HSA-NEXT:    s_add_u32 s2, s0, 48
-; GCN-HSA-NEXT:    s_addc_u32 s3, s1, 0
-; GCN-HSA-NEXT:    v_mov_b32_e32 v15, s3
+; GCN-HSA-NEXT:    v_mov_b32_e32 v4, s2
+; GCN-HSA-NEXT:    flat_load_dwordx4 v[4:7], v[4:5]
+; GCN-HSA-NEXT:    s_mov_b32 s9, s7
+; GCN-HSA-NEXT:    s_mov_b32 s3, s7
+; GCN-HSA-NEXT:    s_mov_b32 s5, s7
+; GCN-HSA-NEXT:    v_mov_b32_e32 v19, s1
+; GCN-HSA-NEXT:    v_mov_b32_e32 v18, s0
+; GCN-HSA-NEXT:    s_waitcnt vmcnt(1)
+; GCN-HSA-NEXT:    v_readfirstlane_b32 s10, v0
+; GCN-HSA-NEXT:    v_readfirstlane_b32 s11, v1
+; GCN-HSA-NEXT:    v_readfirstlane_b32 s12, v2
+; GCN-HSA-NEXT:    v_readfirstlane_b32 s13, v3
+; GCN-HSA-NEXT:    s_mov_b32 s6, s13
+; GCN-HSA-NEXT:    s_mov_b32 s8, s11
+; GCN-HSA-NEXT:    s_lshr_b32 s2, s12, 16
+; GCN-HSA-NEXT:    s_lshr_b32 s4, s10, 16
+; GCN-HSA-NEXT:    s_bfe_i64 s[14:15], s[10:11], 0x100000
+; GCN-HSA-NEXT:    s_bfe_i64 s[16:17], s[12:13], 0x100000
+; GCN-HSA-NEXT:    s_ashr_i32 s22, s11, 31
+; GCN-HSA-NEXT:    s_ashr_i32 s23, s11, 16
+; GCN-HSA-NEXT:    s_ashr_i32 s24, s13, 31
+; GCN-HSA-NEXT:    s_ashr_i32 s25, s13, 16
+; GCN-HSA-NEXT:    s_waitcnt vmcnt(0)
+; GCN-HSA-NEXT:    v_readfirstlane_b32 s10, v4
+; GCN-HSA-NEXT:    v_readfirstlane_b32 s11, v5
+; GCN-HSA-NEXT:    v_readfirstlane_b32 s12, v6
+; GCN-HSA-NEXT:    v_readfirstlane_b32 s13, v7
+; GCN-HSA-NEXT:    s_bfe_i64 s[18:19], s[4:5], 0x100000
+; GCN-HSA-NEXT:    s_bfe_i64 s[20:21], s[2:3], 0x100000
+; GCN-HSA-NEXT:    s_bfe_i64 s[8:9], s[8:9], 0x100000
+; GCN-HSA-NEXT:    s_bfe_i64 s[6:7], s[6:7], 0x100000
+; GCN-HSA-NEXT:    v_mov_b32_e32 v8, s16
+; GCN-HSA-NEXT:    v_mov_b32_e32 v12, s14
+; GCN-HSA-NEXT:    s_mov_b32 s2, s13
+; GCN-HSA-NEXT:    s_mov_b32 s4, s11
+; GCN-HSA-NEXT:    s_lshr_b32 s14, s12, 16
+; GCN-HSA-NEXT:    s_lshr_b32 s16, s10, 16
+; GCN-HSA-NEXT:    v_mov_b32_e32 v2, s25
+; GCN-HSA-NEXT:    v_mov_b32_e32 v3, s24
+; GCN-HSA-NEXT:    v_mov_b32_e32 v6, s23
+; GCN-HSA-NEXT:    v_mov_b32_e32 v7, s22
+; GCN-HSA-NEXT:    s_bfe_i64 s[22:23], s[10:11], 0x100000
+; GCN-HSA-NEXT:    s_bfe_i64 s[24:25], s[12:13], 0x100000
+; GCN-HSA-NEXT:    s_ashr_i32 s10, s11, 31
+; GCN-HSA-NEXT:    s_ashr_i32 s11, s11, 16
+; GCN-HSA-NEXT:    s_ashr_i32 s12, s13, 31
+; GCN-HSA-NEXT:    s_ashr_i32 s13, s13, 16
+; GCN-HSA-NEXT:    v_mov_b32_e32 v0, s6
+; GCN-HSA-NEXT:    v_mov_b32_e32 v1, s7
+; GCN-HSA-NEXT:    v_mov_b32_e32 v4, s8
+; GCN-HSA-NEXT:    v_mov_b32_e32 v5, s9
+; GCN-HSA-NEXT:    s_bfe_i64 s[6:7], s[16:17], 0x100000
+; GCN-HSA-NEXT:    s_bfe_i64 s[8:9], s[14:15], 0x100000
+; GCN-HSA-NEXT:    s_bfe_i64 s[4:5], s[4:5], 0x100000
+; GCN-HSA-NEXT:    s_bfe_i64 s[2:3], s[2:3], 0x100000
+; GCN-HSA-NEXT:    v_mov_b32_e32 v21, s10
+; GCN-HSA-NEXT:    s_add_u32 s10, s0, 48
+; GCN-HSA-NEXT:    v_mov_b32_e32 v13, s15
+; GCN-HSA-NEXT:    v_mov_b32_e32 v14, s18
+; GCN-HSA-NEXT:    v_mov_b32_e32 v15, s19
+; GCN-HSA-NEXT:    v_mov_b32_e32 v20, s11
+; GCN-HSA-NEXT:    s_addc_u32 s11, s1, 0
+; GCN-HSA-NEXT:    flat_store_dwordx4 v[18:19], v[12:15]
+; GCN-HSA-NEXT:    v_mov_b32_e32 v16, s13
+; GCN-HSA-NEXT:    v_mov_b32_e32 v13, s11
 ; GCN-HSA-NEXT:    v_mov_b32_e32 v14, s2
-; GCN-HSA-NEXT:    s_add_u32 s2, s0, 32
-; GCN-HSA-NEXT:    s_addc_u32 s3, s1, 0
-; GCN-HSA-NEXT:    v_mov_b32_e32 v17, s3
-; GCN-HSA-NEXT:    v_mov_b32_e32 v16, s2
 ; GCN-HSA-NEXT:    s_add_u32 s2, s0, 16
+; GCN-HSA-NEXT:    v_mov_b32_e32 v12, s10
+; GCN-HSA-NEXT:    v_mov_b32_e32 v15, s3
 ; GCN-HSA-NEXT:    s_addc_u32 s3, s1, 0
-; GCN-HSA-NEXT:    v_mov_b32_e32 v19, s3
-; GCN-HSA-NEXT:    v_mov_b32_e32 v18, s2
+; GCN-HSA-NEXT:    flat_store_dwordx4 v[12:13], v[0:3]
+; GCN-HSA-NEXT:    v_mov_b32_e32 v13, s3
+; GCN-HSA-NEXT:    v_mov_b32_e32 v12, s2
 ; GCN-HSA-NEXT:    s_add_u32 s2, s0, 0x70
 ; GCN-HSA-NEXT:    s_addc_u32 s3, s1, 0
-; GCN-HSA-NEXT:    v_mov_b32_e32 v21, s3
-; GCN-HSA-NEXT:    v_mov_b32_e32 v20, s2
-; GCN-HSA-NEXT:    s_add_u32 s2, s0, 0x60
-; GCN-HSA-NEXT:    s_addc_u32 s3, s1, 0
-; GCN-HSA-NEXT:    v_mov_b32_e32 v23, s3
-; GCN-HSA-NEXT:    v_mov_b32_e32 v22, s2
+; GCN-HSA-NEXT:    flat_store_dwordx4 v[12:13], v[4:7]
+; GCN-HSA-NEXT:    v_mov_b32_e32 v17, s12
+; GCN-HSA-NEXT:    v_mov_b32_e32 v5, s3
+; GCN-HSA-NEXT:    v_mov_b32_e32 v4, s2
 ; GCN-HSA-NEXT:    s_add_u32 s2, s0, 0x50
-; GCN-HSA-NEXT:    v_mov_b32_e32 v13, s1
 ; GCN-HSA-NEXT:    s_addc_u32 s3, s1, 0
-; GCN-HSA-NEXT:    v_mov_b32_e32 v12, s0
+; GCN-HSA-NEXT:    flat_store_dwordx4 v[4:5], v[14:17]
+; GCN-HSA-NEXT:    v_mov_b32_e32 v5, s3
+; GCN-HSA-NEXT:    v_mov_b32_e32 v4, s2
+; GCN-HSA-NEXT:    s_add_u32 s2, s0, 32
+; GCN-HSA-NEXT:    v_mov_b32_e32 v18, s4
+; GCN-HSA-NEXT:    v_mov_b32_e32 v19, s5
+; GCN-HSA-NEXT:    s_addc_u32 s3, s1, 0
+; GCN-HSA-NEXT:    flat_store_dwordx4 v[4:5], v[18:21]
+; GCN-HSA-NEXT:    v_mov_b32_e32 v5, s3
+; GCN-HSA-NEXT:    v_mov_b32_e32 v4, s2
+; GCN-HSA-NEXT:    s_add_u32 s2, s0, 0x60
+; GCN-HSA-NEXT:    v_mov_b32_e32 v9, s17
+; GCN-HSA-NEXT:    v_mov_b32_e32 v10, s20
+; GCN-HSA-NEXT:    v_mov_b32_e32 v11, s21
+; GCN-HSA-NEXT:    s_addc_u32 s3, s1, 0
+; GCN-HSA-NEXT:    flat_store_dwordx4 v[4:5], v[8:11]
+; GCN-HSA-NEXT:    v_mov_b32_e32 v5, s3
 ; GCN-HSA-NEXT:    s_add_u32 s0, s0, 64
+; GCN-HSA-NEXT:    v_mov_b32_e32 v22, s24
+; GCN-HSA-NEXT:    v_mov_b32_e32 v23, s25
+; GCN-HSA-NEXT:    v_mov_b32_e32 v24, s8
+; GCN-HSA-NEXT:    v_mov_b32_e32 v25, s9
+; GCN-HSA-NEXT:    v_mov_b32_e32 v4, s2
 ; GCN-HSA-NEXT:    s_addc_u32 s1, s1, 0
-; GCN-HSA-NEXT:    v_mov_b32_e32 v25, s3
-; GCN-HSA-NEXT:    v_mov_b32_e32 v27, s1
-; GCN-HSA-NEXT:    v_mov_b32_e32 v24, s2
-; GCN-HSA-NEXT:    v_mov_b32_e32 v26, s0
-; GCN-HSA-NEXT:    s_waitcnt vmcnt(1)
-; GCN-HSA-NEXT:    v_ashrrev_i32_e32 v10, 31, v7
-; GCN-HSA-NEXT:    v_ashrrev_i32_e32 v9, 16, v7
-; GCN-HSA-NEXT:    v_bfe_i32 v7, v7, 0, 16
-; GCN-HSA-NEXT:    v_ashrrev_i32_e32 v8, 31, v7
-; GCN-HSA-NEXT:    flat_store_dwordx4 v[14:15], v[7:10]
-; GCN-HSA-NEXT:    v_lshrrev_b32_e32 v11, 16, v6
-; GCN-HSA-NEXT:    v_bfe_i32 v7, v5, 0, 16
-; GCN-HSA-NEXT:    v_ashrrev_i32_e32 v10, 31, v5
-; GCN-HSA-NEXT:    v_ashrrev_i32_e32 v9, 16, v5
-; GCN-HSA-NEXT:    v_ashrrev_i32_e32 v8, 31, v7
-; GCN-HSA-NEXT:    v_lshrrev_b32_e32 v14, 16, v4
-; GCN-HSA-NEXT:    flat_store_dwordx4 v[18:19], v[7:10]
-; GCN-HSA-NEXT:    v_bfe_i32 v4, v4, 0, 16
-; GCN-HSA-NEXT:    v_bfe_i32 v7, v6, 0, 16
-; GCN-HSA-NEXT:    v_bfe_i32 v9, v11, 0, 16
-; GCN-HSA-NEXT:    v_ashrrev_i32_e32 v8, 31, v7
-; GCN-HSA-NEXT:    v_bfe_i32 v6, v14, 0, 16
-; GCN-HSA-NEXT:    v_ashrrev_i32_e32 v10, 31, v9
-; GCN-HSA-NEXT:    v_ashrrev_i32_e32 v5, 31, v4
-; GCN-HSA-NEXT:    flat_store_dwordx4 v[16:17], v[7:10]
-; GCN-HSA-NEXT:    s_waitcnt vmcnt(3)
-; GCN-HSA-NEXT:    v_lshrrev_b32_e32 v14, 16, v2
-; GCN-HSA-NEXT:    v_ashrrev_i32_e32 v7, 31, v6
-; GCN-HSA-NEXT:    v_lshrrev_b32_e32 v15, 16, v0
-; GCN-HSA-NEXT:    flat_store_dwordx4 v[12:13], v[4:7]
-; GCN-HSA-NEXT:    v_bfe_i32 v0, v0, 0, 16
-; GCN-HSA-NEXT:    v_bfe_i32 v4, v3, 0, 16
-; GCN-HSA-NEXT:    v_bfe_i32 v8, v1, 0, 16
-; GCN-HSA-NEXT:    v_ashrrev_i32_e32 v7, 31, v3
-; GCN-HSA-NEXT:    v_ashrrev_i32_e32 v6, 16, v3
-; GCN-HSA-NEXT:    v_bfe_i32 v12, v2, 0, 16
-; GCN-HSA-NEXT:    v_bfe_i32 v2, v15, 0, 16
-; GCN-HSA-NEXT:    v_bfe_i32 v14, v14, 0, 16
-; GCN-HSA-NEXT:    v_ashrrev_i32_e32 v5, 31, v4
-; GCN-HSA-NEXT:    v_ashrrev_i32_e32 v11, 31, v1
-; GCN-HSA-NEXT:    v_ashrrev_i32_e32 v10, 16, v1
-; GCN-HSA-NEXT:    v_ashrrev_i32_e32 v1, 31, v0
-; GCN-HSA-NEXT:    v_ashrrev_i32_e32 v13, 31, v12
-; GCN-HSA-NEXT:    v_ashrrev_i32_e32 v9, 31, v8
-; GCN-HSA-NEXT:    v_ashrrev_i32_e32 v3, 31, v2
-; GCN-HSA-NEXT:    v_ashrrev_i32_e32 v15, 31, v14
-; GCN-HSA-NEXT:    flat_store_dwordx4 v[20:21], v[4:7]
-; GCN-HSA-NEXT:    flat_store_dwordx4 v[24:25], v[8:11]
-; GCN-HSA-NEXT:    flat_store_dwordx4 v[22:23], v[12:15]
-; GCN-HSA-NEXT:    flat_store_dwordx4 v[26:27], v[0:3]
+; GCN-HSA-NEXT:    flat_store_dwordx4 v[4:5], v[22:25]
+; GCN-HSA-NEXT:    v_mov_b32_e32 v5, s1
+; GCN-HSA-NEXT:    v_mov_b32_e32 v0, s22
+; GCN-HSA-NEXT:    v_mov_b32_e32 v1, s23
+; GCN-HSA-NEXT:    v_mov_b32_e32 v2, s6
+; GCN-HSA-NEXT:    v_mov_b32_e32 v3, s7
+; GCN-HSA-NEXT:    v_mov_b32_e32 v4, s0
+; GCN-HSA-NEXT:    flat_store_dwordx4 v[4:5], v[0:3]
 ; GCN-HSA-NEXT:    s_endpgm
 ;
 ; GCN-NOHSA-VI-LABEL: global_sextload_v16i16_to_v16i64:
@@ -8094,296 +8207,441 @@ define amdgpu_kernel void @global_sextload_v32i16_to_v32i64(ptr addrspace(1) %ou
 ; GCN-NOHSA-SI-NEXT:    s_load_dwordx4 s[4:7], s[4:5], 0x9
 ; GCN-NOHSA-SI-NEXT:    s_mov_b32 s3, 0xf000
 ; GCN-NOHSA-SI-NEXT:    s_mov_b32 s2, -1
-; GCN-NOHSA-SI-NEXT:    s_mov_b32 s10, s2
-; GCN-NOHSA-SI-NEXT:    s_mov_b32 s11, s3
+; GCN-NOHSA-SI-NEXT:    s_mov_b32 s19, 0
+; GCN-NOHSA-SI-NEXT:    s_mov_b32 s30, s2
+; GCN-NOHSA-SI-NEXT:    s_mov_b32 s31, s3
 ; GCN-NOHSA-SI-NEXT:    s_waitcnt lgkmcnt(0)
-; GCN-NOHSA-SI-NEXT:    s_mov_b32 s8, s6
-; GCN-NOHSA-SI-NEXT:    s_mov_b32 s9, s7
-; GCN-NOHSA-SI-NEXT:    buffer_load_dwordx4 v[12:15], off, s[8:11], 0 offset:48
-; GCN-NOHSA-SI-NEXT:    buffer_load_dwordx4 v[0:3], off, s[8:11], 0 offset:32
+; GCN-NOHSA-SI-NEXT:    s_mov_b32 s28, s6
+; GCN-NOHSA-SI-NEXT:    s_mov_b32 s29, s7
+; GCN-NOHSA-SI-NEXT:    buffer_load_dwordx4 v[0:3], off, s[28:31], 0 offset:48
+; GCN-NOHSA-SI-NEXT:    buffer_load_dwordx4 v[4:7], off, s[28:31], 0 offset:32
+; GCN-NOHSA-SI-NEXT:    s_mov_b32 s27, s19
+; GCN-NOHSA-SI-NEXT:    s_mov_b32 s11, s19
+; GCN-NOHSA-SI-NEXT:    s_mov_b32 s13, s19
+; GCN-NOHSA-SI-NEXT:    s_mov_b32 s15, s19
+; GCN-NOHSA-SI-NEXT:    s_mov_b32 s17, s19
+; GCN-NOHSA-SI-NEXT:    s_mov_b32 s21, s19
+; GCN-NOHSA-SI-NEXT:    s_mov_b32 s23, s19
 ; GCN-NOHSA-SI-NEXT:    s_mov_b32 s0, s4
 ; GCN-NOHSA-SI-NEXT:    s_mov_b32 s1, s5
-; GCN-NOHSA-SI-NEXT:    buffer_load_dwordx4 v[8:11], off, s[8:11], 0
-; GCN-NOHSA-SI-NEXT:    buffer_load_dwordx4 v[4:7], off, s[8:11], 0 offset:16
+; GCN-NOHSA-SI-NEXT:    buffer_load_dwordx4 v[8:11], off, s[28:31], 0
+; GCN-NOHSA-SI-NEXT:    buffer_load_dwordx4 v[12:15], off, s[28:31], 0 offset:16
 ; GCN-NOHSA-SI-NEXT:    s_waitcnt vmcnt(3)
-; GCN-NOHSA-SI-NEXT:    v_lshrrev_b32_e32 v20, 16, v14
-; GCN-NOHSA-SI-NEXT:    v_lshrrev_b32_e32 v21, 16, v12
+; GCN-NOHSA-SI-NEXT:    v_readfirstlane_b32 s24, v0
+; GCN-NOHSA-SI-NEXT:    v_readfirstlane_b32 s25, v1
+; GCN-NOHSA-SI-NEXT:    v_readfirstlane_b32 s28, v2
+; GCN-NOHSA-SI-NEXT:    v_readfirstlane_b32 s29, v3
 ; GCN-NOHSA-SI-NEXT:    s_waitcnt vmcnt(2)
-; GCN-NOHSA-SI-NEXT:    v_lshrrev_b32_e32 v16, 16, v0
+; GCN-NOHSA-SI-NEXT:    v_readfirstlane_b32 s36, v4
+; GCN-NOHSA-SI-NEXT:    v_readfirstlane_b32 s37, v5
+; GCN-NOHSA-SI-NEXT:    v_readfirstlane_b32 s30, v6
+; GCN-NOHSA-SI-NEXT:    v_readfirstlane_b32 s31, v7
 ; GCN-NOHSA-SI-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NOHSA-SI-NEXT:    v_lshrrev_b32_e32 v17, 16, v6
-; GCN-NOHSA-SI-NEXT:    v_lshrrev_b32_e32 v18, 16, v4
-; GCN-NOHSA-SI-NEXT:    v_lshrrev_b32_e32 v19, 16, v10
-; GCN-NOHSA-SI-NEXT:    v_lshrrev_b32_e32 v22, 16, v8
-; GCN-NOHSA-SI-NEXT:    v_ashrrev_i32_e32 v26, 31, v15
-; GCN-NOHSA-SI-NEXT:    v_ashrrev_i32_e32 v25, 16, v15
-; GCN-NOHSA-SI-NEXT:    v_bfe_i32 v23, v15, 0, 16
-; GCN-NOHSA-SI-NEXT:    v_ashrrev_i32_e32 v24, 31, v23
-; GCN-NOHSA-SI-NEXT:    buffer_store_dwordx4 v[23:26], off, s[0:3], 0 offset:240
-; GCN-NOHSA-SI-NEXT:    s_waitcnt expcnt(0)
-; GCN-NOHSA-SI-NEXT:    v_ashrrev_i32_e32 v26, 31, v13
-; GCN-NOHSA-SI-NEXT:    v_ashrrev_i32_e32 v25, 16, v13
-; GCN-NOHSA-SI-NEXT:    v_bfe_i32 v23, v13, 0, 16
-; GCN-NOHSA-SI-NEXT:    v_ashrrev_i32_e32 v24, 31, v23
-; GCN-NOHSA-SI-NEXT:    buffer_store_dwordx4 v[23:26], off, s[0:3], 0 offset:208
+; GCN-NOHSA-SI-NEXT:    v_readfirstlane_b32 s38, v12
+; GCN-NOHSA-SI-NEXT:    v_readfirstlane_b32 s39, v13
+; GCN-NOHSA-SI-NEXT:    v_readfirstlane_b32 s40, v14
+; GCN-NOHSA-SI-NEXT:    v_readfirstlane_b32 s41, v15
+; GCN-NOHSA-SI-NEXT:    v_readfirstlane_b32 s42, v8
+; GCN-NOHSA-SI-NEXT:    v_readfirstlane_b32 s43, v9
+; GCN-NOHSA-SI-NEXT:    v_readfirstlane_b32 s45, v11
+; GCN-NOHSA-SI-NEXT:    v_readfirstlane_b32 s44, v10
+; GCN-NOHSA-SI-NEXT:    s_mov_b32 s18, s29
+; GCN-NOHSA-SI-NEXT:    s_mov_b32 s26, s25
+; GCN-NOHSA-SI-NEXT:    s_mov_b32 s10, s31
+; GCN-NOHSA-SI-NEXT:    s_mov_b32 s12, s37
+; GCN-NOHSA-SI-NEXT:    s_mov_b32 s14, s41
+; GCN-NOHSA-SI-NEXT:    s_mov_b32 s16, s39
+; GCN-NOHSA-SI-NEXT:    s_mov_b32 s20, s45
+; GCN-NOHSA-SI-NEXT:    s_mov_b32 s22, s43
+; GCN-NOHSA-SI-NEXT:    s_lshr_b32 s34, s28, 16
+; GCN-NOHSA-SI-NEXT:    s_lshr_b32 s46, s24, 16
+; GCN-NOHSA-SI-NEXT:    s_lshr_b32 s48, s30, 16
+; GCN-NOHSA-SI-NEXT:    s_lshr_b32 s50, s36, 16
+; GCN-NOHSA-SI-NEXT:    s_lshr_b32 s52, s40, 16
+; GCN-NOHSA-SI-NEXT:    s_lshr_b32 s54, s38, 16
+; GCN-NOHSA-SI-NEXT:    s_lshr_b32 s56, s44, 16
+; GCN-NOHSA-SI-NEXT:    s_lshr_b32 s58, s42, 16
+; GCN-NOHSA-SI-NEXT:    s_bfe_i64 s[4:5], s[42:43], 0x100000
+; GCN-NOHSA-SI-NEXT:    s_bfe_i64 s[6:7], s[44:45], 0x100000
+; GCN-NOHSA-SI-NEXT:    s_bfe_i64 s[8:9], s[38:39], 0x100000
+; GCN-NOHSA-SI-NEXT:    s_ashr_i32 s33, s43, 31
+; GCN-NOHSA-SI-NEXT:    s_ashr_i32 s35, s43, 16
+; GCN-NOHSA-SI-NEXT:    s_ashr_i32 s47, s45, 31
+; GCN-NOHSA-SI-NEXT:    s_ashr_i32 s49, s45, 16
+; GCN-NOHSA-SI-NEXT:    s_ashr_i32 s51, s39, 31
+; GCN-NOHSA-SI-NEXT:    s_ashr_i32 s53, s39, 16
+; GCN-NOHSA-SI-NEXT:    s_ashr_i32 s55, s41, 31
+; GCN-NOHSA-SI-NEXT:    s_ashr_i32 s57, s41, 16
+; GCN-NOHSA-SI-NEXT:    s_bfe_i64 s[38:39], s[18:19], 0x100000
+; GCN-NOHSA-SI-NEXT:    s_bfe_i64 s[18:19], s[40:41], 0x100000
+; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v0, s38
+; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v1, s39
+; GCN-NOHSA-SI-NEXT:    s_ashr_i32 s59, s37, 31
+; GCN-NOHSA-SI-NEXT:    s_ashr_i32 s66, s37, 16
+; GCN-NOHSA-SI-NEXT:    s_bfe_i64 s[38:39], s[26:27], 0x100000
+; GCN-NOHSA-SI-NEXT:    s_bfe_i64 s[26:27], s[36:37], 0x100000
+; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v4, s38
+; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v5, s39
+; GCN-NOHSA-SI-NEXT:    s_ashr_i32 s36, s29, 16
+; GCN-NOHSA-SI-NEXT:    s_ashr_i32 s37, s29, 31
+; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v2, s36
+; GCN-NOHSA-SI-NEXT:    s_ashr_i32 s36, s25, 16
+; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v3, s37
+; GCN-NOHSA-SI-NEXT:    s_ashr_i32 s37, s25, 31
+; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v6, s36
+; GCN-NOHSA-SI-NEXT:    s_ashr_i32 s67, s31, 31
+; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v7, s37
+; GCN-NOHSA-SI-NEXT:    s_ashr_i32 s68, s31, 16
+; GCN-NOHSA-SI-NEXT:    s_bfe_i64 s[30:31], s[30:31], 0x100000
+; GCN-NOHSA-SI-NEXT:    s_bfe_i64 s[36:37], s[24:25], 0x100000
+; GCN-NOHSA-SI-NEXT:    s_bfe_i64 s[38:39], s[28:29], 0x100000
+; GCN-NOHSA-SI-NEXT:    s_bfe_i64 s[40:41], s[22:23], 0x100000
+; GCN-NOHSA-SI-NEXT:    s_bfe_i64 s[42:43], s[20:21], 0x100000
+; GCN-NOHSA-SI-NEXT:    s_bfe_i64 s[44:45], s[16:17], 0x100000
+; GCN-NOHSA-SI-NEXT:    s_bfe_i64 s[60:61], s[14:15], 0x100000
+; GCN-NOHSA-SI-NEXT:    s_bfe_i64 s[62:63], s[12:13], 0x100000
+; GCN-NOHSA-SI-NEXT:    s_bfe_i64 s[64:65], s[10:11], 0x100000
+; GCN-NOHSA-SI-NEXT:    s_bfe_i64 s[10:11], s[58:59], 0x100000
+; GCN-NOHSA-SI-NEXT:    s_bfe_i64 s[12:13], s[56:57], 0x100000
+; GCN-NOHSA-SI-NEXT:    s_bfe_i64 s[14:15], s[54:55], 0x100000
+; GCN-NOHSA-SI-NEXT:    s_bfe_i64 s[16:17], s[52:53], 0x100000
+; GCN-NOHSA-SI-NEXT:    s_bfe_i64 s[20:21], s[50:51], 0x100000
+; GCN-NOHSA-SI-NEXT:    s_bfe_i64 s[22:23], s[48:49], 0x100000
+; GCN-NOHSA-SI-NEXT:    s_bfe_i64 s[24:25], s[46:47], 0x100000
+; GCN-NOHSA-SI-NEXT:    s_bfe_i64 s[28:29], s[34:35], 0x100000
+; GCN-NOHSA-SI-NEXT:    buffer_store_dwordx4 v[0:3], off, s[0:3], 0 offset:240
+; GCN-NOHSA-SI-NEXT:    buffer_store_dwordx4 v[4:7], off, s[0:3], 0 offset:208
+; GCN-NOHSA-SI-NEXT:    s_waitcnt expcnt(1)
+; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v0, s64
+; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v1, s65
+; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v2, s68
+; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v3, s67
+; GCN-NOHSA-SI-NEXT:    buffer_store_dwordx4 v[0:3], off, s[0:3], 0 offset:176
 ; GCN-NOHSA-SI-NEXT:    s_waitcnt expcnt(0)
-; GCN-NOHSA-SI-NEXT:    v_ashrrev_i32_e32 v26, 31, v3
-; GCN-NOHSA-SI-NEXT:    v_ashrrev_i32_e32 v25, 16, v3
-; GCN-NOHSA-SI-NEXT:    v_bfe_i32 v23, v3, 0, 16
-; GCN-NOHSA-SI-NEXT:    v_ashrrev_i32_e32 v24, 31, v23
-; GCN-NOHSA-SI-NEXT:    buffer_store_dwordx4 v[23:26], off, s[0:3], 0 offset:176
+; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v0, s62
+; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v1, s63
+; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v2, s66
+; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v3, s59
+; GCN-NOHSA-SI-NEXT:    buffer_store_dwordx4 v[0:3], off, s[0:3], 0 offset:144
 ; GCN-NOHSA-SI-NEXT:    s_waitcnt expcnt(0)
-; GCN-NOHSA-SI-NEXT:    v_ashrrev_i32_e32 v26, 31, v1
-; GCN-NOHSA-SI-NEXT:    v_ashrrev_i32_e32 v25, 16, v1
-; GCN-NOHSA-SI-NEXT:    v_bfe_i32 v23, v1, 0, 16
-; GCN-NOHSA-SI-NEXT:    v_ashrrev_i32_e32 v24, 31, v23
-; GCN-NOHSA-SI-NEXT:    buffer_store_dwordx4 v[23:26], off, s[0:3], 0 offset:144
+; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v0, s60
+; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v1, s61
+; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v2, s57
+; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v3, s55
+; GCN-NOHSA-SI-NEXT:    buffer_store_dwordx4 v[0:3], off, s[0:3], 0 offset:112
 ; GCN-NOHSA-SI-NEXT:    s_waitcnt expcnt(0)
-; GCN-NOHSA-SI-NEXT:    v_ashrrev_i32_e32 v26, 31, v7
-; GCN-NOHSA-SI-NEXT:    v_ashrrev_i32_e32 v25, 16, v7
-; GCN-NOHSA-SI-NEXT:    v_bfe_i32 v23, v7, 0, 16
-; GCN-NOHSA-SI-NEXT:    v_ashrrev_i32_e32 v24, 31, v23
-; GCN-NOHSA-SI-NEXT:    buffer_store_dwordx4 v[23:26], off, s[0:3], 0 offset:112
+; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v0, s44
+; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v1, s45
+; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v2, s53
+; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v3, s51
+; GCN-NOHSA-SI-NEXT:    buffer_store_dwordx4 v[0:3], off, s[0:3], 0 offset:80
 ; GCN-NOHSA-SI-NEXT:    s_waitcnt expcnt(0)
-; GCN-NOHSA-SI-NEXT:    v_ashrrev_i32_e32 v26, 31, v5
-; GCN-NOHSA-SI-NEXT:    v_ashrrev_i32_e32 v25, 16, v5
-; GCN-NOHSA-SI-NEXT:    v_bfe_i32 v23, v5, 0, 16
-; GCN-NOHSA-SI-NEXT:    v_ashrrev_i32_e32 v24, 31, v23
-; GCN-NOHSA-SI-NEXT:    buffer_store_dwordx4 v[23:26], off, s[0:3], 0 offset:80
+; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v0, s42
+; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v1, s43
+; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v2, s49
+; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v3, s47
+; GCN-NOHSA-SI-NEXT:    buffer_store_dwordx4 v[0:3], off, s[0:3], 0 offset:48
 ; GCN-NOHSA-SI-NEXT:    s_waitcnt expcnt(0)
-; GCN-NOHSA-SI-NEXT:    v_ashrrev_i32_e32 v26, 31, v11
-; GCN-NOHSA-SI-NEXT:    v_ashrrev_i32_e32 v25, 16, v11
-; GCN-NOHSA-SI-NEXT:    v_bfe_i32 v23, v11, 0, 16
-; GCN-NOHSA-SI-NEXT:    v_ashrrev_i32_e32 v24, 31, v23
-; GCN-NOHSA-SI-NEXT:    buffer_store_dwordx4 v[23:26], off, s[0:3], 0 offset:48
-; GCN-NOHSA-SI-NEXT:    v_bfe_i32 v11, v12, 0, 16
+; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v0, s40
+; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v1, s41
+; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v2, s35
+; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v3, s33
+; GCN-NOHSA-SI-NEXT:    buffer_store_dwordx4 v[0:3], off, s[0:3], 0 offset:16
 ; GCN-NOHSA-SI-NEXT:    s_waitcnt expcnt(0)
-; GCN-NOHSA-SI-NEXT:    v_bfe_i32 v23, v14, 0, 16
-; GCN-NOHSA-SI-NEXT:    v_bfe_i32 v13, v21, 0, 16
-; GCN-NOHSA-SI-NEXT:    v_bfe_i32 v25, v20, 0, 16
-; GCN-NOHSA-SI-NEXT:    v_bfe_i32 v3, v22, 0, 16
-; GCN-NOHSA-SI-NEXT:    v_bfe_i32 v1, v8, 0, 16
-; GCN-NOHSA-SI-NEXT:    v_ashrrev_i32_e32 v24, 31, v23
-; GCN-NOHSA-SI-NEXT:    v_ashrrev_i32_e32 v26, 31, v25
-; GCN-NOHSA-SI-NEXT:    buffer_store_dwordx4 v[23:26], off, s[0:3], 0 offset:224
-; GCN-NOHSA-SI-NEXT:    v_ashrrev_i32_e32 v8, 31, v9
-; GCN-NOHSA-SI-NEXT:    v_ashrrev_i32_e32 v7, 16, v9
-; GCN-NOHSA-SI-NEXT:    v_bfe_i32 v5, v9, 0, 16
-; GCN-NOHSA-SI-NEXT:    v_bfe_i32 v9, v10, 0, 16
-; GCN-NOHSA-SI-NEXT:    v_ashrrev_i32_e32 v12, 31, v11
-; GCN-NOHSA-SI-NEXT:    v_ashrrev_i32_e32 v14, 31, v13
-; GCN-NOHSA-SI-NEXT:    buffer_store_dwordx4 v[11:14], off, s[0:3], 0 offset:192
+; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v0, s38
+; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v1, s39
+; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v4, s36
+; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v5, s37
+; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v8, s30
+; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v9, s31
+; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v12, s26
+; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v13, s27
+; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v16, s18
+; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v17, s19
+; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v20, s8
+; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v21, s9
+; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v24, s6
+; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v25, s7
+; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v2, s28
+; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v3, s29
+; GCN-NOHSA-SI-NEXT:    buffer_store_dwordx4 v[0:3], off, s[0:3], 0 offset:224
 ; GCN-NOHSA-SI-NEXT:    s_waitcnt expcnt(0)
-; GCN-NOHSA-SI-NEXT:    v_bfe_i32 v13, v4, 0, 16
-; GCN-NOHSA-SI-NEXT:    v_bfe_i32 v20, v6, 0, 16
-; GCN-NOHSA-SI-NEXT:    v_bfe_i32 v11, v19, 0, 16
-; GCN-NOHSA-SI-NEXT:    v_bfe_i32 v15, v18, 0, 16
-; GCN-NOHSA-SI-NEXT:    v_bfe_i32 v22, v17, 0, 16
-; GCN-NOHSA-SI-NEXT:    v_bfe_i32 v26, v16, 0, 16
-; GCN-NOHSA-SI-NEXT:    v_bfe_i32 v24, v0, 0, 16
-; GCN-NOHSA-SI-NEXT:    v_lshrrev_b32_e32 v0, 16, v2
-; GCN-NOHSA-SI-NEXT:    v_bfe_i32 v28, v2, 0, 16
-; GCN-NOHSA-SI-NEXT:    v_bfe_i32 v30, v0, 0, 16
-; GCN-NOHSA-SI-NEXT:    v_ashrrev_i32_e32 v2, 31, v1
-; GCN-NOHSA-SI-NEXT:    v_ashrrev_i32_e32 v10, 31, v9
-; GCN-NOHSA-SI-NEXT:    v_ashrrev_i32_e32 v14, 31, v13
-; GCN-NOHSA-SI-NEXT:    v_ashrrev_i32_e32 v21, 31, v20
-; GCN-NOHSA-SI-NEXT:    v_ashrrev_i32_e32 v25, 31, v24
-; GCN-NOHSA-SI-NEXT:    v_ashrrev_i32_e32 v29, 31, v28
-; GCN-NOHSA-SI-NEXT:    v_ashrrev_i32_e32 v6, 31, v5
-; GCN-NOHSA-SI-NEXT:    v_ashrrev_i32_e32 v4, 31, v3
-; GCN-NOHSA-SI-NEXT:    v_ashrrev_i32_e32 v12, 31, v11
-; GCN-NOHSA-SI-NEXT:    v_ashrrev_i32_e32 v16, 31, v15
-; GCN-NOHSA-SI-NEXT:    v_ashrrev_i32_e32 v23, 31, v22
-; GCN-NOHSA-SI-NEXT:    v_ashrrev_i32_e32 v27, 31, v26
-; GCN-NOHSA-SI-NEXT:    v_ashrrev_i32_e32 v31, 31, v30
-; GCN-NOHSA-SI-NEXT:    buffer_store_dwordx4 v[28:31], off, s[0:3], 0 offset:160
-; GCN-NOHSA-SI-NEXT:    buffer_store_dwordx4 v[24:27], off, s[0:3], 0 offset:128
-; GCN-NOHSA-SI-NEXT:    buffer_store_dwordx4 v[20:23], off, s[0:3], 0 offset:96
-; GCN-NOHSA-SI-NEXT:    buffer_store_dwordx4 v[13:16], off, s[0:3], 0 offset:64
-; GCN-NOHSA-SI-NEXT:    buffer_store_dwordx4 v[9:12], off, s[0:3], 0 offset:32
-; GCN-NOHSA-SI-NEXT:    buffer_store_dwordx4 v[1:4], off, s[0:3], 0
-; GCN-NOHSA-SI-NEXT:    buffer_store_dwordx4 v[5:8], off, s[0:3], 0 offset:16
+; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v0, s4
+; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v1, s5
+; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v6, s24
+; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v7, s25
+; GCN-NOHSA-SI-NEXT:    buffer_store_dwordx4 v[4:7], off, s[0:3], 0 offset:192
+; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v10, s22
+; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v11, s23
+; GCN-NOHSA-SI-NEXT:    buffer_store_dwordx4 v[8:11], off, s[0:3], 0 offset:160
+; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v14, s20
+; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v15, s21
+; GCN-NOHSA-SI-NEXT:    buffer_store_dwordx4 v[12:15], off, s[0:3], 0 offset:128
+; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v18, s16
+; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v19, s17
+; GCN-NOHSA-SI-NEXT:    buffer_store_dwordx4 v[16:19], off, s[0:3], 0 offset:96
+; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v22, s14
+; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v23, s15
+; GCN-NOHSA-SI-NEXT:    buffer_store_dwordx4 v[20:23], off, s[0:3], 0 offset:64
+; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v26, s12
+; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v27, s13
+; GCN-NOHSA-SI-NEXT:    buffer_store_dwordx4 v[24:27], off, s[0:3], 0 offset:32
+; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v2, s10
+; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v3, s11
+; GCN-NOHSA-SI-NEXT:    buffer_store_dwordx4 v[0:3], off, s[0:3], 0
 ; GCN-NOHSA-SI-NEXT:    s_endpgm
 ;
 ; GCN-HSA-LABEL: global_sextload_v32i16_to_v32i64:
 ; GCN-HSA:       ; %bb.0:
 ; GCN-HSA-NEXT:    s_load_dwordx4 s[0:3], s[8:9], 0x0
 ; GCN-HSA-NEXT:    s_add_i32 s12, s12, s17
-; GCN-HSA-NEXT:    s_mov_b32 flat_scratch_lo, s13
 ; GCN-HSA-NEXT:    s_lshr_b32 flat_scratch_hi, s12, 8
+; GCN-HSA-NEXT:    s_mov_b32 flat_scratch_lo, s13
+; GCN-HSA-NEXT:    s_mov_b32 s13, 0
 ; GCN-HSA-NEXT:    s_waitcnt lgkmcnt(0)
 ; GCN-HSA-NEXT:    v_mov_b32_e32 v0, s2
 ; GCN-HSA-NEXT:    v_mov_b32_e32 v1, s3
-; GCN-HSA-NEXT:    flat_load_dwordx4 v[12:15], v[0:1]
 ; GCN-HSA-NEXT:    s_add_u32 s4, s2, 48
+; GCN-HSA-NEXT:    flat_load_dwordx4 v[10:13], v[0:1]
 ; GCN-HSA-NEXT:    s_addc_u32 s5, s3, 0
 ; GCN-HSA-NEXT:    v_mov_b32_e32 v0, s4
 ; GCN-HSA-NEXT:    v_mov_b32_e32 v1, s5
 ; GCN-HSA-NEXT:    s_add_u32 s4, s2, 32
+; GCN-HSA-NEXT:    flat_load_dwordx4 v[14:17], v[0:1]
 ; GCN-HSA-NEXT:    s_addc_u32 s5, s3, 0
+; GCN-HSA-NEXT:    v_mov_b32_e32 v0, s4
+; GCN-HSA-NEXT:    v_mov_b32_e32 v1, s5
 ; GCN-HSA-NEXT:    s_add_u32 s2, s2, 16
+; GCN-HSA-NEXT:    flat_load_dwordx4 v[6:9], v[0:1]
 ; GCN-HSA-NEXT:    s_addc_u32 s3, s3, 0
-; GCN-HSA-NEXT:    flat_load_dwordx4 v[8:11], v[0:1]
-; GCN-HSA-NEXT:    v_mov_b32_e32 v5, s3
-; GCN-HSA-NEXT:    v_mov_b32_e32 v4, s2
-; GCN-HSA-NEXT:    flat_load_dwordx4 v[4:7], v[4:5]
+; GCN-HSA-NEXT:    v_mov_b32_e32 v0, s2
+; GCN-HSA-NEXT:    v_mov_b32_e32 v1, s3
+; GCN-HSA-NEXT:    flat_load_dwordx4 v[2:5], v[0:1]
+; GCN-HSA-NEXT:    s_mov_b32 s39, s13
+; GCN-HSA-NEXT:    s_mov_b32 s41, s13
+; GCN-HSA-NEXT:    s_mov_b32 s11, s13
+; GCN-HSA-NEXT:    s_mov_b32 s7, s13
+; GCN-HSA-NEXT:    s_mov_b32 s9, s13
+; GCN-HSA-NEXT:    s_mov_b32 s43, s13
+; GCN-HSA-NEXT:    s_mov_b32 s37, s13
+; GCN-HSA-NEXT:    s_waitcnt vmcnt(3)
+; GCN-HSA-NEXT:    v_readfirstlane_b32 s17, v13
+; GCN-HSA-NEXT:    v_readfirstlane_b32 s14, v10
+; GCN-HSA-NEXT:    v_readfirstlane_b32 s15, v11
+; GCN-HSA-NEXT:    v_readfirstlane_b32 s16, v12
+; GCN-HSA-NEXT:    s_mov_b32 s12, s17
+; GCN-HSA-NEXT:    s_mov_b32 s38, s15
+; GCN-HSA-NEXT:    s_lshr_b32 s6, s16, 16
+; GCN-HSA-NEXT:    s_lshr_b32 s8, s14, 16
+; GCN-HSA-NEXT:    s_bfe_i64 s[2:3], s[14:15], 0x100000
+; GCN-HSA-NEXT:    s_bfe_i64 s[4:5], s[16:17], 0x100000
+; GCN-HSA-NEXT:    s_ashr_i32 s33, s15, 31
+; GCN-HSA-NEXT:    s_ashr_i32 s52, s15, 16
+; GCN-HSA-NEXT:    s_waitcnt vmcnt(2)
+; GCN-HSA-NEXT:    v_readfirstlane_b32 s14, v14
+; GCN-HSA-NEXT:    v_readfirstlane_b32 s15, v15
+; GCN-HSA-NEXT:    v_readfirstlane_b32 s16, v16
+; GCN-HSA-NEXT:    s_bfe_i64 s[12:13], s[12:13], 0x100000
+; GCN-HSA-NEXT:    s_ashr_i32 s53, s17, 31
+; GCN-HSA-NEXT:    s_ashr_i32 s54, s17, 16
+; GCN-HSA-NEXT:    v_readfirstlane_b32 s17, v17
+; GCN-HSA-NEXT:    s_bfe_i64 s[20:21], s[8:9], 0x100000
+; GCN-HSA-NEXT:    s_bfe_i64 s[30:31], s[6:7], 0x100000
+; GCN-HSA-NEXT:    s_mov_b32 s10, s15
+; GCN-HSA-NEXT:    v_mov_b32_e32 v0, s12
+; GCN-HSA-NEXT:    v_mov_b32_e32 v1, s13
+; GCN-HSA-NEXT:    s_lshr_b32 s6, s14, 16
+; GCN-HSA-NEXT:    s_ashr_i32 s55, s15, 31
+; GCN-HSA-NEXT:    s_ashr_i32 s56, s15, 16
+; GCN-HSA-NEXT:    s_bfe_i64 s[18:19], s[14:15], 0x100000
+; GCN-HSA-NEXT:    s_lshr_b32 s8, s16, 16
+; GCN-HSA-NEXT:    s_waitcnt vmcnt(1)
+; GCN-HSA-NEXT:    v_readfirstlane_b32 s12, v6
+; GCN-HSA-NEXT:    v_readfirstlane_b32 s13, v7
+; GCN-HSA-NEXT:    v_readfirstlane_b32 s14, v8
+; GCN-HSA-NEXT:    v_readfirstlane_b32 s15, v9
+; GCN-HSA-NEXT:    s_bfe_i64 s[28:29], s[16:17], 0x100000
+; GCN-HSA-NEXT:    s_bfe_i64 s[24:25], s[6:7], 0x100000
+; GCN-HSA-NEXT:    s_bfe_i64 s[34:35], s[8:9], 0x100000
+; GCN-HSA-NEXT:    s_mov_b32 s6, s15
+; GCN-HSA-NEXT:    s_mov_b32 s8, s13
+; GCN-HSA-NEXT:    s_lshr_b32 s16, s12, 16
+; GCN-HSA-NEXT:    s_ashr_i32 s61, s15, 31
+; GCN-HSA-NEXT:    s_ashr_i32 s62, s15, 16
+; GCN-HSA-NEXT:    s_lshr_b32 s26, s14, 16
+; GCN-HSA-NEXT:    s_bfe_i64 s[22:23], s[14:15], 0x100000
+; GCN-HSA-NEXT:    s_waitcnt vmcnt(0)
+; GCN-HSA-NEXT:    v_readfirstlane_b32 s14, v2
+; GCN-HSA-NEXT:    v_readfirstlane_b32 s15, v3
+; GCN-HSA-NEXT:    v_readfirstlane_b32 s46, v4
+; GCN-HSA-NEXT:    v_readfirstlane_b32 s47, v5
+; GCN-HSA-NEXT:    s_mov_b32 s40, s17
+; GCN-HSA-NEXT:    s_bfe_i64 s[44:45], s[10:11], 0x100000
+; GCN-HSA-NEXT:    s_ashr_i32 s59, s13, 31
+; GCN-HSA-NEXT:    s_ashr_i32 s60, s13, 16
+; GCN-HSA-NEXT:    s_bfe_i64 s[10:11], s[12:13], 0x100000
+; GCN-HSA-NEXT:    s_bfe_i64 s[12:13], s[16:17], 0x100000
+; GCN-HSA-NEXT:    s_bfe_i64 s[48:49], s[8:9], 0x100000
+; GCN-HSA-NEXT:    s_mov_b32 s42, s47
+; GCN-HSA-NEXT:    s_mov_b32 s36, s15
+; GCN-HSA-NEXT:    s_lshr_b32 s8, s14, 16
+; GCN-HSA-NEXT:    s_lshr_b32 s16, s46, 16
+; GCN-HSA-NEXT:    s_ashr_i32 s57, s17, 31
+; GCN-HSA-NEXT:    s_ashr_i32 s58, s17, 16
+; GCN-HSA-NEXT:    s_bfe_i64 s[26:27], s[26:27], 0x100000
+; GCN-HSA-NEXT:    s_bfe_i64 s[50:51], s[6:7], 0x100000
+; GCN-HSA-NEXT:    s_ashr_i32 s63, s15, 31
+; GCN-HSA-NEXT:    s_ashr_i32 s64, s15, 16
+; GCN-HSA-NEXT:    s_bfe_i64 s[6:7], s[14:15], 0x100000
+; GCN-HSA-NEXT:    s_ashr_i32 s65, s47, 31
+; GCN-HSA-NEXT:    s_ashr_i32 s66, s47, 16
+; GCN-HSA-NEXT:    s_bfe_i64 s[14:15], s[46:47], 0x100000
+; GCN-HSA-NEXT:    s_bfe_i64 s[8:9], s[8:9], 0x100000
+; GCN-HSA-NEXT:    s_bfe_i64 s[16:17], s[16:17], 0x100000
+; GCN-HSA-NEXT:    s_bfe_i64 s[36:37], s[36:37], 0x100000
+; GCN-HSA-NEXT:    s_bfe_i64 s[42:43], s[42:43], 0x100000
+; GCN-HSA-NEXT:    s_bfe_i64 s[40:41], s[40:41], 0x100000
+; GCN-HSA-NEXT:    s_bfe_i64 s[38:39], s[38:39], 0x100000
+; GCN-HSA-NEXT:    s_add_u32 s46, s0, 48
+; GCN-HSA-NEXT:    s_addc_u32 s47, s1, 0
+; GCN-HSA-NEXT:    v_mov_b32_e32 v4, s38
+; GCN-HSA-NEXT:    s_add_u32 s38, s0, 16
+; GCN-HSA-NEXT:    v_mov_b32_e32 v5, s39
+; GCN-HSA-NEXT:    s_addc_u32 s39, s1, 0
+; GCN-HSA-NEXT:    v_mov_b32_e32 v24, s38
+; GCN-HSA-NEXT:    v_mov_b32_e32 v25, s39
+; GCN-HSA-NEXT:    s_add_u32 s38, s0, 0xf0
+; GCN-HSA-NEXT:    s_addc_u32 s39, s1, 0
+; GCN-HSA-NEXT:    v_mov_b32_e32 v26, s38
+; GCN-HSA-NEXT:    v_mov_b32_e32 v27, s39
+; GCN-HSA-NEXT:    s_add_u32 s38, s0, 0xd0
+; GCN-HSA-NEXT:    s_addc_u32 s39, s1, 0
+; GCN-HSA-NEXT:    v_mov_b32_e32 v28, s38
+; GCN-HSA-NEXT:    v_mov_b32_e32 v29, s39
+; GCN-HSA-NEXT:    s_add_u32 s38, s0, 0xb0
+; GCN-HSA-NEXT:    s_addc_u32 s39, s1, 0
+; GCN-HSA-NEXT:    v_mov_b32_e32 v30, s38
+; GCN-HSA-NEXT:    v_mov_b32_e32 v31, s39
+; GCN-HSA-NEXT:    s_add_u32 s38, s0, 0x90
+; GCN-HSA-NEXT:    s_addc_u32 s39, s1, 0
+; GCN-HSA-NEXT:    v_mov_b32_e32 v32, s38
+; GCN-HSA-NEXT:    v_mov_b32_e32 v6, s52
+; GCN-HSA-NEXT:    v_mov_b32_e32 v7, s33
+; GCN-HSA-NEXT:    v_mov_b32_e32 v33, s39
+; GCN-HSA-NEXT:    s_add_u32 s38, s0, 0x70
+; GCN-HSA-NEXT:    v_mov_b32_e32 v22, s46
+; GCN-HSA-NEXT:    flat_store_dwordx4 v[24:25], v[4:7]
+; GCN-HSA-NEXT:    s_addc_u32 s39, s1, 0
+; GCN-HSA-NEXT:    v_mov_b32_e32 v24, s38
+; GCN-HSA-NEXT:    v_mov_b32_e32 v23, s47
+; GCN-HSA-NEXT:    v_mov_b32_e32 v2, s54
+; GCN-HSA-NEXT:    v_mov_b32_e32 v3, s53
+; GCN-HSA-NEXT:    v_mov_b32_e32 v25, s39
+; GCN-HSA-NEXT:    s_add_u32 s38, s0, 0x50
+; GCN-HSA-NEXT:    v_mov_b32_e32 v16, s50
+; GCN-HSA-NEXT:    v_mov_b32_e32 v17, s51
+; GCN-HSA-NEXT:    v_mov_b32_e32 v18, s62
+; GCN-HSA-NEXT:    v_mov_b32_e32 v19, s61
+; GCN-HSA-NEXT:    flat_store_dwordx4 v[22:23], v[0:3]
+; GCN-HSA-NEXT:    s_addc_u32 s39, s1, 0
+; GCN-HSA-NEXT:    v_mov_b32_e32 v0, s42
+; GCN-HSA-NEXT:    v_mov_b32_e32 v34, s38
+; GCN-HSA-NEXT:    v_mov_b32_e32 v20, s48
+; GCN-HSA-NEXT:    v_mov_b32_e32 v21, s49
+; GCN-HSA-NEXT:    v_mov_b32_e32 v22, s60
+; GCN-HSA-NEXT:    v_mov_b32_e32 v23, s59
+; GCN-HSA-NEXT:    v_mov_b32_e32 v1, s43
+; GCN-HSA-NEXT:    v_mov_b32_e32 v35, s39
+; GCN-HSA-NEXT:    v_mov_b32_e32 v2, s66
+; GCN-HSA-NEXT:    v_mov_b32_e32 v3, s65
+; GCN-HSA-NEXT:    v_mov_b32_e32 v4, s36
+; GCN-HSA-NEXT:    v_mov_b32_e32 v5, s37
+; GCN-HSA-NEXT:    v_mov_b32_e32 v6, s64
+; GCN-HSA-NEXT:    flat_store_dwordx4 v[30:31], v[16:19]
+; GCN-HSA-NEXT:    flat_store_dwordx4 v[32:33], v[20:23]
+; GCN-HSA-NEXT:    flat_store_dwordx4 v[24:25], v[0:3]
+; GCN-HSA-NEXT:    v_mov_b32_e32 v7, s63
 ; GCN-HSA-NEXT:    v_mov_b32_e32 v0, s4
+; GCN-HSA-NEXT:    s_add_u32 s4, s0, 32
+; GCN-HSA-NEXT:    flat_store_dwordx4 v[34:35], v[4:7]
 ; GCN-HSA-NEXT:    v_mov_b32_e32 v1, s5
-; GCN-HSA-NEXT:    flat_load_dwordx4 v[0:3], v[0:1]
-; GCN-HSA-NEXT:    s_add_u32 s2, s0, 32
-; GCN-HSA-NEXT:    s_addc_u32 s3, s1, 0
-; GCN-HSA-NEXT:    v_mov_b32_e32 v22, s3
-; GCN-HSA-NEXT:    v_mov_b32_e32 v21, s2
-; GCN-HSA-NEXT:    s_add_u32 s2, s0, 48
-; GCN-HSA-NEXT:    s_addc_u32 s3, s1, 0
-; GCN-HSA-NEXT:    v_mov_b32_e32 v24, s3
-; GCN-HSA-NEXT:    v_mov_b32_e32 v23, s2
-; GCN-HSA-NEXT:    s_add_u32 s2, s0, 16
-; GCN-HSA-NEXT:    s_addc_u32 s3, s1, 0
-; GCN-HSA-NEXT:    v_mov_b32_e32 v26, s3
-; GCN-HSA-NEXT:    v_mov_b32_e32 v25, s2
+; GCN-HSA-NEXT:    s_addc_u32 s5, s1, 0
+; GCN-HSA-NEXT:    v_mov_b32_e32 v4, s4
+; GCN-HSA-NEXT:    v_mov_b32_e32 v2, s30
+; GCN-HSA-NEXT:    v_mov_b32_e32 v3, s31
+; GCN-HSA-NEXT:    v_mov_b32_e32 v5, s5
+; GCN-HSA-NEXT:    flat_store_dwordx4 v[4:5], v[0:3]
+; GCN-HSA-NEXT:    v_mov_b32_e32 v5, s1
+; GCN-HSA-NEXT:    v_mov_b32_e32 v0, s2
 ; GCN-HSA-NEXT:    s_add_u32 s2, s0, 0xe0
+; GCN-HSA-NEXT:    v_mov_b32_e32 v1, s3
+; GCN-HSA-NEXT:    v_mov_b32_e32 v2, s20
+; GCN-HSA-NEXT:    v_mov_b32_e32 v3, s21
+; GCN-HSA-NEXT:    v_mov_b32_e32 v4, s0
 ; GCN-HSA-NEXT:    s_addc_u32 s3, s1, 0
-; GCN-HSA-NEXT:    v_mov_b32_e32 v20, s1
-; GCN-HSA-NEXT:    v_mov_b32_e32 v19, s0
-; GCN-HSA-NEXT:    s_waitcnt vmcnt(3)
-; GCN-HSA-NEXT:    v_ashrrev_i32_e32 v18, 31, v15
-; GCN-HSA-NEXT:    v_ashrrev_i32_e32 v17, 16, v15
-; GCN-HSA-NEXT:    v_bfe_i32 v15, v15, 0, 16
-; GCN-HSA-NEXT:    v_ashrrev_i32_e32 v16, 31, v15
-; GCN-HSA-NEXT:    flat_store_dwordx4 v[23:24], v[15:18]
-; GCN-HSA-NEXT:    v_mov_b32_e32 v24, s3
-; GCN-HSA-NEXT:    v_bfe_i32 v15, v13, 0, 16
-; GCN-HSA-NEXT:    v_mov_b32_e32 v23, s2
-; GCN-HSA-NEXT:    s_add_u32 s2, s0, 0xf0
-; GCN-HSA-NEXT:    v_ashrrev_i32_e32 v18, 31, v13
-; GCN-HSA-NEXT:    v_ashrrev_i32_e32 v17, 16, v13
-; GCN-HSA-NEXT:    v_ashrrev_i32_e32 v16, 31, v15
-; GCN-HSA-NEXT:    v_lshrrev_b32_e32 v13, 16, v14
-; GCN-HSA-NEXT:    s_addc_u32 s3, s1, 0
-; GCN-HSA-NEXT:    flat_store_dwordx4 v[25:26], v[15:18]
-; GCN-HSA-NEXT:    s_waitcnt vmcnt(4)
-; GCN-HSA-NEXT:    v_lshrrev_b32_e32 v27, 16, v10
-; GCN-HSA-NEXT:    v_bfe_i32 v15, v13, 0, 16
-; GCN-HSA-NEXT:    v_bfe_i32 v13, v14, 0, 16
-; GCN-HSA-NEXT:    v_mov_b32_e32 v18, s3
-; GCN-HSA-NEXT:    v_ashrrev_i32_e32 v14, 31, v13
-; GCN-HSA-NEXT:    v_ashrrev_i32_e32 v16, 31, v15
-; GCN-HSA-NEXT:    v_mov_b32_e32 v17, s2
+; GCN-HSA-NEXT:    flat_store_dwordx4 v[4:5], v[0:3]
+; GCN-HSA-NEXT:    v_mov_b32_e32 v5, s3
+; GCN-HSA-NEXT:    v_mov_b32_e32 v4, s2
 ; GCN-HSA-NEXT:    s_add_u32 s2, s0, 0xc0
-; GCN-HSA-NEXT:    flat_store_dwordx4 v[21:22], v[13:16]
+; GCN-HSA-NEXT:    v_mov_b32_e32 v0, s28
+; GCN-HSA-NEXT:    v_mov_b32_e32 v1, s29
+; GCN-HSA-NEXT:    v_mov_b32_e32 v2, s34
+; GCN-HSA-NEXT:    v_mov_b32_e32 v3, s35
 ; GCN-HSA-NEXT:    s_addc_u32 s3, s1, 0
-; GCN-HSA-NEXT:    v_lshrrev_b32_e32 v14, 16, v12
-; GCN-HSA-NEXT:    v_bfe_i32 v12, v12, 0, 16
-; GCN-HSA-NEXT:    v_bfe_i32 v14, v14, 0, 16
-; GCN-HSA-NEXT:    v_mov_b32_e32 v22, s3
-; GCN-HSA-NEXT:    v_ashrrev_i32_e32 v13, 31, v12
-; GCN-HSA-NEXT:    v_ashrrev_i32_e32 v15, 31, v14
-; GCN-HSA-NEXT:    v_mov_b32_e32 v21, s2
-; GCN-HSA-NEXT:    s_add_u32 s2, s0, 0xd0
-; GCN-HSA-NEXT:    flat_store_dwordx4 v[19:20], v[12:15]
-; GCN-HSA-NEXT:    s_addc_u32 s3, s1, 0
-; GCN-HSA-NEXT:    v_ashrrev_i32_e32 v14, 31, v11
-; GCN-HSA-NEXT:    v_ashrrev_i32_e32 v13, 16, v11
-; GCN-HSA-NEXT:    v_bfe_i32 v11, v11, 0, 16
-; GCN-HSA-NEXT:    v_ashrrev_i32_e32 v12, 31, v11
-; GCN-HSA-NEXT:    v_mov_b32_e32 v26, s3
-; GCN-HSA-NEXT:    v_lshrrev_b32_e32 v28, 16, v8
-; GCN-HSA-NEXT:    flat_store_dwordx4 v[17:18], v[11:14]
-; GCN-HSA-NEXT:    v_mov_b32_e32 v25, s2
-; GCN-HSA-NEXT:    v_bfe_i32 v11, v9, 0, 16
+; GCN-HSA-NEXT:    flat_store_dwordx4 v[4:5], v[0:3]
+; GCN-HSA-NEXT:    v_mov_b32_e32 v5, s3
+; GCN-HSA-NEXT:    v_mov_b32_e32 v4, s2
 ; GCN-HSA-NEXT:    s_add_u32 s2, s0, 0xa0
-; GCN-HSA-NEXT:    v_ashrrev_i32_e32 v14, 31, v9
-; GCN-HSA-NEXT:    v_ashrrev_i32_e32 v13, 16, v9
-; GCN-HSA-NEXT:    v_bfe_i32 v15, v10, 0, 16
-; GCN-HSA-NEXT:    v_ashrrev_i32_e32 v12, 31, v11
-; GCN-HSA-NEXT:    v_bfe_i32 v10, v28, 0, 16
-; GCN-HSA-NEXT:    v_bfe_i32 v17, v27, 0, 16
-; GCN-HSA-NEXT:    v_bfe_i32 v8, v8, 0, 16
-; GCN-HSA-NEXT:    v_ashrrev_i32_e32 v16, 31, v15
-; GCN-HSA-NEXT:    s_waitcnt vmcnt(5)
-; GCN-HSA-NEXT:    v_lshrrev_b32_e32 v19, 16, v2
-; GCN-HSA-NEXT:    flat_store_dwordx4 v[25:26], v[11:14]
-; GCN-HSA-NEXT:    v_ashrrev_i32_e32 v18, 31, v17
-; GCN-HSA-NEXT:    v_ashrrev_i32_e32 v11, 31, v10
-; GCN-HSA-NEXT:    s_addc_u32 s3, s1, 0
-; GCN-HSA-NEXT:    v_ashrrev_i32_e32 v9, 31, v8
-; GCN-HSA-NEXT:    v_bfe_i32 v12, v2, 0, 16
-; GCN-HSA-NEXT:    v_bfe_i32 v14, v19, 0, 16
-; GCN-HSA-NEXT:    flat_store_dwordx4 v[23:24], v[15:18]
-; GCN-HSA-NEXT:    flat_store_dwordx4 v[21:22], v[8:11]
-; GCN-HSA-NEXT:    v_ashrrev_i32_e32 v13, 31, v12
-; GCN-HSA-NEXT:    v_mov_b32_e32 v11, s3
-; GCN-HSA-NEXT:    v_ashrrev_i32_e32 v15, 31, v14
-; GCN-HSA-NEXT:    v_mov_b32_e32 v10, s2
-; GCN-HSA-NEXT:    s_add_u32 s2, s0, 0xb0
-; GCN-HSA-NEXT:    flat_store_dwordx4 v[10:11], v[12:15]
-; GCN-HSA-NEXT:    v_bfe_i32 v10, v3, 0, 16
-; GCN-HSA-NEXT:    v_ashrrev_i32_e32 v13, 31, v3
-; GCN-HSA-NEXT:    v_ashrrev_i32_e32 v12, 16, v3
+; GCN-HSA-NEXT:    v_mov_b32_e32 v0, s18
+; GCN-HSA-NEXT:    v_mov_b32_e32 v1, s19
+; GCN-HSA-NEXT:    v_mov_b32_e32 v2, s24
+; GCN-HSA-NEXT:    v_mov_b32_e32 v3, s25
 ; GCN-HSA-NEXT:    s_addc_u32 s3, s1, 0
-; GCN-HSA-NEXT:    v_mov_b32_e32 v2, s2
-; GCN-HSA-NEXT:    v_lshrrev_b32_e32 v27, 16, v0
-; GCN-HSA-NEXT:    v_mov_b32_e32 v3, s3
+; GCN-HSA-NEXT:    flat_store_dwordx4 v[4:5], v[0:3]
+; GCN-HSA-NEXT:    v_mov_b32_e32 v5, s3
+; GCN-HSA-NEXT:    v_mov_b32_e32 v4, s2
 ; GCN-HSA-NEXT:    s_add_u32 s2, s0, 0x80
-; GCN-HSA-NEXT:    v_ashrrev_i32_e32 v23, 31, v1
-; GCN-HSA-NEXT:    v_ashrrev_i32_e32 v22, 16, v1
-; GCN-HSA-NEXT:    v_bfe_i32 v20, v1, 0, 16
-; GCN-HSA-NEXT:    v_bfe_i32 v24, v0, 0, 16
-; GCN-HSA-NEXT:    v_bfe_i32 v26, v27, 0, 16
-; GCN-HSA-NEXT:    s_addc_u32 s3, s1, 0
-; GCN-HSA-NEXT:    v_mov_b32_e32 v0, s2
-; GCN-HSA-NEXT:    v_ashrrev_i32_e32 v25, 31, v24
-; GCN-HSA-NEXT:    v_ashrrev_i32_e32 v27, 31, v26
-; GCN-HSA-NEXT:    v_mov_b32_e32 v1, s3
-; GCN-HSA-NEXT:    s_add_u32 s2, s0, 0x90
-; GCN-HSA-NEXT:    flat_store_dwordx4 v[0:1], v[24:27]
+; GCN-HSA-NEXT:    v_mov_b32_e32 v0, s22
+; GCN-HSA-NEXT:    v_mov_b32_e32 v1, s23
+; GCN-HSA-NEXT:    v_mov_b32_e32 v2, s26
+; GCN-HSA-NEXT:    v_mov_b32_e32 v3, s27
 ; GCN-HSA-NEXT:    s_addc_u32 s3, s1, 0
-; GCN-HSA-NEXT:    v_mov_b32_e32 v0, s2
-; GCN-HSA-NEXT:    v_lshrrev_b32_e32 v18, 16, v6
-; GCN-HSA-NEXT:    v_ashrrev_i32_e32 v21, 31, v20
-; GCN-HSA-NEXT:    v_mov_b32_e32 v1, s3
+; GCN-HSA-NEXT:    flat_store_dwordx4 v[4:5], v[0:3]
+; GCN-HSA-NEXT:    v_mov_b32_e32 v5, s3
+; GCN-HSA-NEXT:    v_mov_b32_e32 v4, s2
 ; GCN-HSA-NEXT:    s_add_u32 s2, s0, 0x60
-; GCN-HSA-NEXT:    v_lshrrev_b32_e32 v19, 16, v4
-; GCN-HSA-NEXT:    v_bfe_i32 v16, v6, 0, 16
-; GCN-HSA-NEXT:    v_ashrrev_i32_e32 v11, 31, v10
-; GCN-HSA-NEXT:    v_bfe_i32 v18, v18, 0, 16
-; GCN-HSA-NEXT:    flat_store_dwordx4 v[0:1], v[20:23]
-; GCN-HSA-NEXT:    s_addc_u32 s3, s1, 0
-; GCN-HSA-NEXT:    v_mov_b32_e32 v0, s2
-; GCN-HSA-NEXT:    v_ashrrev_i32_e32 v17, 31, v16
-; GCN-HSA-NEXT:    flat_store_dwordx4 v[2:3], v[10:13]
-; GCN-HSA-NEXT:    v_mov_b32_e32 v1, s3
-; GCN-HSA-NEXT:    v_bfe_i32 v10, v19, 0, 16
-; GCN-HSA-NEXT:    v_ashrrev_i32_e32 v19, 31, v18
-; GCN-HSA-NEXT:    s_add_u32 s2, s0, 0x70
-; GCN-HSA-NEXT:    v_bfe_i32 v2, v7, 0, 16
-; GCN-HSA-NEXT:    flat_store_dwordx4 v[0:1], v[16:19]
-; GCN-HSA-NEXT:    s_addc_u32 s3, s1, 0
-; GCN-HSA-NEXT:    v_mov_b32_e32 v0, s2
-; GCN-HSA-NEXT:    v_bfe_i32 v8, v4, 0, 16
-; GCN-HSA-NEXT:    v_ashrrev_i32_e32 v15, 31, v5
-; GCN-HSA-NEXT:    v_ashrrev_i32_e32 v14, 16, v5
-; GCN-HSA-NEXT:    v_bfe_i32 v12, v5, 0, 16
-; GCN-HSA-NEXT:    v_ashrrev_i32_e32 v5, 31, v7
-; GCN-HSA-NEXT:    v_ashrrev_i32_e32 v4, 16, v7
-; GCN-HSA-NEXT:    v_ashrrev_i32_e32 v3, 31, v2
-; GCN-HSA-NEXT:    v_mov_b32_e32 v1, s3
-; GCN-HSA-NEXT:    s_add_u32 s2, s0, 64
-; GCN-HSA-NEXT:    flat_store_dwordx4 v[0:1], v[2:5]
+; GCN-HSA-NEXT:    v_mov_b32_e32 v0, s10
+; GCN-HSA-NEXT:    v_mov_b32_e32 v1, s11
+; GCN-HSA-NEXT:    v_mov_b32_e32 v2, s12
+; GCN-HSA-NEXT:    v_mov_b32_e32 v3, s13
 ; GCN-HSA-NEXT:    s_addc_u32 s3, s1, 0
-; GCN-HSA-NEXT:    v_mov_b32_e32 v0, s2
-; GCN-HSA-NEXT:    v_ashrrev_i32_e32 v9, 31, v8
-; GCN-HSA-NEXT:    v_ashrrev_i32_e32 v11, 31, v10
-; GCN-HSA-NEXT:    v_mov_b32_e32 v1, s3
-; GCN-HSA-NEXT:    s_add_u32 s0, s0, 0x50
-; GCN-HSA-NEXT:    flat_store_dwordx4 v[0:1], v[8:11]
+; GCN-HSA-NEXT:    flat_store_dwordx4 v[4:5], v[0:3]
+; GCN-HSA-NEXT:    v_mov_b32_e32 v5, s3
+; GCN-HSA-NEXT:    s_add_u32 s0, s0, 64
+; GCN-HSA-NEXT:    v_mov_b32_e32 v0, s14
+; GCN-HSA-NEXT:    v_mov_b32_e32 v1, s15
+; GCN-HSA-NEXT:    v_mov_b32_e32 v2, s16
+; GCN-HSA-NEXT:    v_mov_b32_e32 v3, s17
+; GCN-HSA-NEXT:    v_mov_b32_e32 v4, s2
 ; GCN-HSA-NEXT:    s_addc_u32 s1, s1, 0
-; GCN-HSA-NEXT:    v_mov_b32_e32 v0, s0
-; GCN-HSA-NEXT:    v_ashrrev_i32_e32 v13, 31, v12
-; GCN-HSA-NEXT:    v_mov_b32_e32 v1, s1
-; GCN-HSA-NEXT:    flat_store_dwordx4 v[0:1], v[12:15]
+; GCN-HSA-NEXT:    flat_store_dwordx4 v[4:5], v[0:3]
+; GCN-HSA-NEXT:    v_mov_b32_e32 v5, s1
+; GCN-HSA-NEXT:    v_mov_b32_e32 v8, s40
+; GCN-HSA-NEXT:    v_mov_b32_e32 v9, s41
+; GCN-HSA-NEXT:    v_mov_b32_e32 v12, s44
+; GCN-HSA-NEXT:    v_mov_b32_e32 v13, s45
+; GCN-HSA-NEXT:    v_mov_b32_e32 v10, s58
+; GCN-HSA-NEXT:    v_mov_b32_e32 v11, s57
+; GCN-HSA-NEXT:    v_mov_b32_e32 v14, s56
+; GCN-HSA-NEXT:    v_mov_b32_e32 v15, s55
+; GCN-HSA-NEXT:    v_mov_b32_e32 v0, s6
+; GCN-HSA-NEXT:    v_mov_b32_e32 v1, s7
+; GCN-HSA-NEXT:    v_mov_b32_e32 v2, s8
+; GCN-HSA-NEXT:    v_mov_b32_e32 v3, s9
+; GCN-HSA-NEXT:    v_mov_b32_e32 v4, s0
+; GCN-HSA-NEXT:    flat_store_dwordx4 v[26:27], v[8:11]
+; GCN-HSA-NEXT:    flat_store_dwordx4 v[28:29], v[12:15]
+; GCN-HSA-NEXT:    flat_store_dwordx4 v[4:5], v[0:3]
 ; GCN-HSA-NEXT:    s_endpgm
 ;
 ; GCN-NOHSA-VI-LABEL: global_sextload_v32i16_to_v32i64:
diff --git a/llvm/test/CodeGen/AMDGPU/load-global-i8.ll b/llvm/test/CodeGen/AMDGPU/load-global-i8.ll
index b75c8c7e4177b..13b5f7546bb81 100644
--- a/llvm/test/CodeGen/AMDGPU/load-global-i8.ll
+++ b/llvm/test/CodeGen/AMDGPU/load-global-i8.ll
@@ -6273,43 +6273,44 @@ define amdgpu_kernel void @global_sextload_v8i8_to_v8i64(ptr addrspace(1) %out,
 ; GCN-NOHSA-SI-NEXT:    s_mov_b32 s0, s4
 ; GCN-NOHSA-SI-NEXT:    s_mov_b32 s1, s5
 ; GCN-NOHSA-SI-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NOHSA-SI-NEXT:    v_readfirstlane_b32 s10, v1
-; GCN-NOHSA-SI-NEXT:    v_readfirstlane_b32 s5, v0
-; GCN-NOHSA-SI-NEXT:    v_bfe_i32 v0, v0, 0, 8
-; GCN-NOHSA-SI-NEXT:    s_lshr_b32 s6, s10, 16
-; GCN-NOHSA-SI-NEXT:    s_lshr_b32 s4, s5, 16
-; GCN-NOHSA-SI-NEXT:    s_lshr_b32 s12, s5, 24
-; GCN-NOHSA-SI-NEXT:    s_lshr_b32 s14, s5, 8
-; GCN-NOHSA-SI-NEXT:    s_lshr_b32 s8, s10, 8
-; GCN-NOHSA-SI-NEXT:    s_bfe_i64 s[16:17], s[10:11], 0x80000
-; GCN-NOHSA-SI-NEXT:    v_ashrrev_i32_e32 v1, 31, v0
-; GCN-NOHSA-SI-NEXT:    s_ashr_i32 s15, s10, 31
-; GCN-NOHSA-SI-NEXT:    s_ashr_i32 s18, s10, 24
-; GCN-NOHSA-SI-NEXT:    s_bfe_i64 s[8:9], s[8:9], 0x80000
-; GCN-NOHSA-SI-NEXT:    s_bfe_i64 s[10:11], s[14:15], 0x80000
+; GCN-NOHSA-SI-NEXT:    v_readfirstlane_b32 s4, v0
+; GCN-NOHSA-SI-NEXT:    v_readfirstlane_b32 s5, v1
+; GCN-NOHSA-SI-NEXT:    s_lshr_b32 s6, s5, 16
+; GCN-NOHSA-SI-NEXT:    s_lshr_b32 s8, s5, 8
+; GCN-NOHSA-SI-NEXT:    s_mov_b32 s10, s5
+; GCN-NOHSA-SI-NEXT:    s_lshr_b32 s12, s4, 16
+; GCN-NOHSA-SI-NEXT:    s_lshr_b32 s14, s4, 24
+; GCN-NOHSA-SI-NEXT:    s_lshr_b32 s16, s4, 8
+; GCN-NOHSA-SI-NEXT:    s_bfe_i64 s[18:19], s[4:5], 0x80000
+; GCN-NOHSA-SI-NEXT:    s_bfe_i64 s[10:11], s[10:11], 0x80000
+; GCN-NOHSA-SI-NEXT:    s_ashr_i32 s17, s5, 31
+; GCN-NOHSA-SI-NEXT:    s_ashr_i32 s20, s5, 24
+; GCN-NOHSA-SI-NEXT:    s_bfe_i64 s[4:5], s[16:17], 0x80000
+; GCN-NOHSA-SI-NEXT:    s_bfe_i64 s[14:15], s[14:15], 0x80000
 ; GCN-NOHSA-SI-NEXT:    s_bfe_i64 s[12:13], s[12:13], 0x80000
 ; GCN-NOHSA-SI-NEXT:    s_bfe_i64 s[6:7], s[6:7], 0x80000
-; GCN-NOHSA-SI-NEXT:    s_bfe_i64 s[4:5], s[4:5], 0x80000
-; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v4, s18
-; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v5, s15
-; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v6, s16
-; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v7, s17
-; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v2, s6
-; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v3, s7
-; GCN-NOHSA-SI-NEXT:    buffer_store_dwordx4 v[2:5], off, s[0:3], 0 offset:48
-; GCN-NOHSA-SI-NEXT:    s_waitcnt expcnt(0)
-; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v2, s4
-; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v3, s5
-; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v4, s12
-; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v5, s13
-; GCN-NOHSA-SI-NEXT:    buffer_store_dwordx4 v[2:5], off, s[0:3], 0 offset:16
-; GCN-NOHSA-SI-NEXT:    s_waitcnt expcnt(0)
-; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v2, s10
-; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v3, s11
-; GCN-NOHSA-SI-NEXT:    buffer_store_dwordx4 v[0:3], off, s[0:3], 0
-; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v8, s8
-; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v9, s9
-; GCN-NOHSA-SI-NEXT:    buffer_store_dwordx4 v[6:9], off, s[0:3], 0 offset:32
+; GCN-NOHSA-SI-NEXT:    s_bfe_i64 s[8:9], s[8:9], 0x80000
+; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v2, s20
+; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v3, s17
+; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v4, s10
+; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v5, s11
+; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v8, s18
+; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v9, s19
+; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v0, s6
+; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v1, s7
+; GCN-NOHSA-SI-NEXT:    buffer_store_dwordx4 v[0:3], off, s[0:3], 0 offset:48
+; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v6, s8
+; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v7, s9
+; GCN-NOHSA-SI-NEXT:    buffer_store_dwordx4 v[4:7], off, s[0:3], 0 offset:32
+; GCN-NOHSA-SI-NEXT:    s_waitcnt expcnt(1)
+; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v0, s12
+; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v1, s13
+; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v2, s14
+; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v3, s15
+; GCN-NOHSA-SI-NEXT:    buffer_store_dwordx4 v[0:3], off, s[0:3], 0 offset:16
+; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v10, s4
+; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v11, s5
+; GCN-NOHSA-SI-NEXT:    buffer_store_dwordx4 v[8:11], off, s[0:3], 0
 ; GCN-NOHSA-SI-NEXT:    s_endpgm
 ;
 ; GCN-HSA-LABEL: global_sextload_v8i8_to_v8i64:
@@ -6328,53 +6329,55 @@ define amdgpu_kernel void @global_sextload_v8i8_to_v8i64(ptr addrspace(1) %out,
 ; GCN-HSA-NEXT:    v_mov_b32_e32 v17, s1
 ; GCN-HSA-NEXT:    v_mov_b32_e32 v16, s0
 ; GCN-HSA-NEXT:    s_waitcnt vmcnt(0)
-; GCN-HSA-NEXT:    v_readfirstlane_b32 s6, v1
-; GCN-HSA-NEXT:    v_readfirstlane_b32 s4, v0
-; GCN-HSA-NEXT:    s_lshr_b32 s2, s6, 16
-; GCN-HSA-NEXT:    s_lshr_b32 s8, s4, 16
-; GCN-HSA-NEXT:    s_lshr_b32 s10, s4, 24
-; GCN-HSA-NEXT:    s_lshr_b32 s12, s4, 8
-; GCN-HSA-NEXT:    s_lshr_b32 s4, s6, 8
-; GCN-HSA-NEXT:    s_ashr_i32 s13, s6, 31
-; GCN-HSA-NEXT:    v_bfe_i32 v0, v0, 0, 8
-; GCN-HSA-NEXT:    s_bfe_i64 s[14:15], s[6:7], 0x80000
-; GCN-HSA-NEXT:    s_ashr_i32 s16, s6, 24
-; GCN-HSA-NEXT:    s_bfe_i64 s[4:5], s[4:5], 0x80000
-; GCN-HSA-NEXT:    s_bfe_i64 s[6:7], s[12:13], 0x80000
+; GCN-HSA-NEXT:    v_readfirstlane_b32 s8, v0
+; GCN-HSA-NEXT:    v_readfirstlane_b32 s9, v1
+; GCN-HSA-NEXT:    s_lshr_b32 s2, s9, 16
+; GCN-HSA-NEXT:    s_lshr_b32 s4, s9, 8
+; GCN-HSA-NEXT:    s_mov_b32 s6, s9
+; GCN-HSA-NEXT:    s_lshr_b32 s10, s8, 16
+; GCN-HSA-NEXT:    s_lshr_b32 s12, s8, 24
+; GCN-HSA-NEXT:    s_lshr_b32 s14, s8, 8
+; GCN-HSA-NEXT:    s_ashr_i32 s15, s9, 31
+; GCN-HSA-NEXT:    s_bfe_i64 s[16:17], s[8:9], 0x80000
+; GCN-HSA-NEXT:    s_ashr_i32 s18, s9, 24
+; GCN-HSA-NEXT:    s_bfe_i64 s[8:9], s[14:15], 0x80000
+; GCN-HSA-NEXT:    s_bfe_i64 s[12:13], s[12:13], 0x80000
 ; GCN-HSA-NEXT:    s_bfe_i64 s[10:11], s[10:11], 0x80000
-; GCN-HSA-NEXT:    s_bfe_i64 s[8:9], s[8:9], 0x80000
+; GCN-HSA-NEXT:    s_bfe_i64 s[6:7], s[6:7], 0x80000
+; GCN-HSA-NEXT:    s_bfe_i64 s[4:5], s[4:5], 0x80000
 ; GCN-HSA-NEXT:    s_bfe_i64 s[2:3], s[2:3], 0x80000
-; GCN-HSA-NEXT:    v_ashrrev_i32_e32 v1, 31, v0
-; GCN-HSA-NEXT:    v_mov_b32_e32 v4, s2
-; GCN-HSA-NEXT:    s_add_u32 s2, s0, 48
-; GCN-HSA-NEXT:    v_mov_b32_e32 v2, s6
-; GCN-HSA-NEXT:    v_mov_b32_e32 v3, s7
-; GCN-HSA-NEXT:    v_mov_b32_e32 v5, s3
-; GCN-HSA-NEXT:    s_addc_u32 s3, s1, 0
-; GCN-HSA-NEXT:    flat_store_dwordx4 v[16:17], v[0:3]
-; GCN-HSA-NEXT:    v_mov_b32_e32 v6, s16
 ; GCN-HSA-NEXT:    v_mov_b32_e32 v0, s2
-; GCN-HSA-NEXT:    v_mov_b32_e32 v7, s13
+; GCN-HSA-NEXT:    s_add_u32 s2, s0, 48
 ; GCN-HSA-NEXT:    v_mov_b32_e32 v1, s3
-; GCN-HSA-NEXT:    s_add_u32 s2, s0, 16
-; GCN-HSA-NEXT:    flat_store_dwordx4 v[0:1], v[4:7]
+; GCN-HSA-NEXT:    s_addc_u32 s3, s1, 0
+; GCN-HSA-NEXT:    v_mov_b32_e32 v19, s3
+; GCN-HSA-NEXT:    v_mov_b32_e32 v2, s18
+; GCN-HSA-NEXT:    v_mov_b32_e32 v3, s15
+; GCN-HSA-NEXT:    v_mov_b32_e32 v18, s2
+; GCN-HSA-NEXT:    s_add_u32 s2, s0, 32
+; GCN-HSA-NEXT:    flat_store_dwordx4 v[18:19], v[0:3]
 ; GCN-HSA-NEXT:    s_addc_u32 s3, s1, 0
 ; GCN-HSA-NEXT:    v_mov_b32_e32 v0, s2
-; GCN-HSA-NEXT:    v_mov_b32_e32 v12, s8
-; GCN-HSA-NEXT:    v_mov_b32_e32 v13, s9
-; GCN-HSA-NEXT:    v_mov_b32_e32 v14, s10
-; GCN-HSA-NEXT:    v_mov_b32_e32 v15, s11
+; GCN-HSA-NEXT:    v_mov_b32_e32 v8, s6
+; GCN-HSA-NEXT:    v_mov_b32_e32 v9, s7
+; GCN-HSA-NEXT:    v_mov_b32_e32 v10, s4
+; GCN-HSA-NEXT:    v_mov_b32_e32 v11, s5
 ; GCN-HSA-NEXT:    v_mov_b32_e32 v1, s3
-; GCN-HSA-NEXT:    s_add_u32 s0, s0, 32
-; GCN-HSA-NEXT:    flat_store_dwordx4 v[0:1], v[12:15]
+; GCN-HSA-NEXT:    s_add_u32 s0, s0, 16
+; GCN-HSA-NEXT:    flat_store_dwordx4 v[0:1], v[8:11]
 ; GCN-HSA-NEXT:    s_addc_u32 s1, s1, 0
 ; GCN-HSA-NEXT:    v_mov_b32_e32 v0, s0
-; GCN-HSA-NEXT:    v_mov_b32_e32 v8, s14
-; GCN-HSA-NEXT:    v_mov_b32_e32 v9, s15
-; GCN-HSA-NEXT:    v_mov_b32_e32 v10, s4
-; GCN-HSA-NEXT:    v_mov_b32_e32 v11, s5
+; GCN-HSA-NEXT:    v_mov_b32_e32 v12, s10
+; GCN-HSA-NEXT:    v_mov_b32_e32 v13, s11
+; GCN-HSA-NEXT:    v_mov_b32_e32 v14, s12
+; GCN-HSA-NEXT:    v_mov_b32_e32 v15, s13
 ; GCN-HSA-NEXT:    v_mov_b32_e32 v1, s1
-; GCN-HSA-NEXT:    flat_store_dwordx4 v[0:1], v[8:11]
+; GCN-HSA-NEXT:    v_mov_b32_e32 v4, s16
+; GCN-HSA-NEXT:    v_mov_b32_e32 v5, s17
+; GCN-HSA-NEXT:    v_mov_b32_e32 v6, s8
+; GCN-HSA-NEXT:    v_mov_b32_e32 v7, s9
+; GCN-HSA-NEXT:    flat_store_dwordx4 v[0:1], v[12:15]
+; GCN-HSA-NEXT:    flat_store_dwordx4 v[16:17], v[4:7]
 ; GCN-HSA-NEXT:    s_endpgm
 ;
 ; GCN-NOHSA-VI-LABEL: global_sextload_v8i8_to_v8i64:
@@ -6388,46 +6391,48 @@ define amdgpu_kernel void @global_sextload_v8i8_to_v8i64(ptr addrspace(1) %out,
 ; GCN-NOHSA-VI-NEXT:    s_mov_b32 s8, s6
 ; GCN-NOHSA-VI-NEXT:    s_mov_b32 s9, s7
 ; GCN-NOHSA-VI-NEXT:    buffer_load_dwordx2 v[0:1], off, s[8:11], 0
-; GCN-NOHSA-VI-NEXT:    s_mov_b32 s7, 0
 ; GCN-NOHSA-VI-NEXT:    s_mov_b32 s1, s5
-; GCN-NOHSA-VI-NEXT:    s_mov_b32 s9, s7
+; GCN-NOHSA-VI-NEXT:    s_mov_b32 s7, 0
 ; GCN-NOHSA-VI-NEXT:    s_mov_b32 s0, s4
+; GCN-NOHSA-VI-NEXT:    s_mov_b32 s9, s7
 ; GCN-NOHSA-VI-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NOHSA-VI-NEXT:    v_readfirstlane_b32 s8, v1
-; GCN-NOHSA-VI-NEXT:    v_readfirstlane_b32 s5, v0
-; GCN-NOHSA-VI-NEXT:    s_lshr_b32 s6, s8, 16
-; GCN-NOHSA-VI-NEXT:    s_lshr_b32 s4, s5, 16
-; GCN-NOHSA-VI-NEXT:    s_lshr_b32 s10, s5, 24
-; GCN-NOHSA-VI-NEXT:    s_lshr_b32 s12, s5, 8
-; GCN-NOHSA-VI-NEXT:    s_lshr_b32 s14, s8, 8
-; GCN-NOHSA-VI-NEXT:    s_ashr_i32 s15, s8, 31
-; GCN-NOHSA-VI-NEXT:    s_ashr_i32 s18, s8, 24
+; GCN-NOHSA-VI-NEXT:    v_readfirstlane_b32 s5, v1
+; GCN-NOHSA-VI-NEXT:    v_readfirstlane_b32 s4, v0
+; GCN-NOHSA-VI-NEXT:    s_lshr_b32 s6, s5, 16
+; GCN-NOHSA-VI-NEXT:    s_lshr_b32 s10, s5, 8
+; GCN-NOHSA-VI-NEXT:    s_mov_b32 s8, s5
+; GCN-NOHSA-VI-NEXT:    s_lshr_b32 s12, s4, 16
+; GCN-NOHSA-VI-NEXT:    s_lshr_b32 s14, s4, 24
+; GCN-NOHSA-VI-NEXT:    s_lshr_b32 s16, s4, 8
+; GCN-NOHSA-VI-NEXT:    s_ashr_i32 s17, s5, 31
+; GCN-NOHSA-VI-NEXT:    s_ashr_i32 s20, s5, 24
 ; GCN-NOHSA-VI-NEXT:    s_bfe_i64 s[6:7], s[6:7], 0x80000
-; GCN-NOHSA-VI-NEXT:    v_bfe_i32 v0, v0, 0, 8
-; GCN-NOHSA-VI-NEXT:    s_bfe_i64 s[16:17], s[8:9], 0x80000
-; GCN-NOHSA-VI-NEXT:    s_bfe_i64 s[8:9], s[14:15], 0x80000
+; GCN-NOHSA-VI-NEXT:    s_bfe_i64 s[18:19], s[4:5], 0x80000
+; GCN-NOHSA-VI-NEXT:    s_bfe_i64 s[4:5], s[16:17], 0x80000
+; GCN-NOHSA-VI-NEXT:    s_bfe_i64 s[14:15], s[14:15], 0x80000
 ; GCN-NOHSA-VI-NEXT:    s_bfe_i64 s[12:13], s[12:13], 0x80000
+; GCN-NOHSA-VI-NEXT:    s_bfe_i64 s[8:9], s[8:9], 0x80000
 ; GCN-NOHSA-VI-NEXT:    s_bfe_i64 s[10:11], s[10:11], 0x80000
-; GCN-NOHSA-VI-NEXT:    s_bfe_i64 s[4:5], s[4:5], 0x80000
-; GCN-NOHSA-VI-NEXT:    v_mov_b32_e32 v6, s18
-; GCN-NOHSA-VI-NEXT:    v_mov_b32_e32 v7, s15
-; GCN-NOHSA-VI-NEXT:    v_mov_b32_e32 v4, s6
-; GCN-NOHSA-VI-NEXT:    v_mov_b32_e32 v5, s7
-; GCN-NOHSA-VI-NEXT:    v_ashrrev_i32_e32 v1, 31, v0
-; GCN-NOHSA-VI-NEXT:    v_mov_b32_e32 v8, s16
-; GCN-NOHSA-VI-NEXT:    v_mov_b32_e32 v9, s17
-; GCN-NOHSA-VI-NEXT:    v_mov_b32_e32 v12, s4
-; GCN-NOHSA-VI-NEXT:    v_mov_b32_e32 v13, s5
-; GCN-NOHSA-VI-NEXT:    v_mov_b32_e32 v14, s10
-; GCN-NOHSA-VI-NEXT:    v_mov_b32_e32 v15, s11
-; GCN-NOHSA-VI-NEXT:    v_mov_b32_e32 v2, s12
-; GCN-NOHSA-VI-NEXT:    v_mov_b32_e32 v3, s13
-; GCN-NOHSA-VI-NEXT:    v_mov_b32_e32 v10, s8
-; GCN-NOHSA-VI-NEXT:    v_mov_b32_e32 v11, s9
-; GCN-NOHSA-VI-NEXT:    buffer_store_dwordx4 v[4:7], off, s[0:3], 0 offset:48
-; GCN-NOHSA-VI-NEXT:    buffer_store_dwordx4 v[12:15], off, s[0:3], 0 offset:16
-; GCN-NOHSA-VI-NEXT:    buffer_store_dwordx4 v[0:3], off, s[0:3], 0
+; GCN-NOHSA-VI-NEXT:    v_mov_b32_e32 v2, s20
+; GCN-NOHSA-VI-NEXT:    v_mov_b32_e32 v3, s17
+; GCN-NOHSA-VI-NEXT:    v_mov_b32_e32 v0, s6
+; GCN-NOHSA-VI-NEXT:    v_mov_b32_e32 v1, s7
+; GCN-NOHSA-VI-NEXT:    v_mov_b32_e32 v4, s18
+; GCN-NOHSA-VI-NEXT:    v_mov_b32_e32 v5, s19
+; GCN-NOHSA-VI-NEXT:    v_mov_b32_e32 v8, s8
+; GCN-NOHSA-VI-NEXT:    v_mov_b32_e32 v9, s9
+; GCN-NOHSA-VI-NEXT:    v_mov_b32_e32 v10, s10
+; GCN-NOHSA-VI-NEXT:    v_mov_b32_e32 v11, s11
+; GCN-NOHSA-VI-NEXT:    v_mov_b32_e32 v12, s12
+; GCN-NOHSA-VI-NEXT:    v_mov_b32_e32 v13, s13
+; GCN-NOHSA-VI-NEXT:    v_mov_b32_e32 v14, s14
+; GCN-NOHSA-VI-NEXT:    v_mov_b32_e32 v15, s15
+; GCN-NOHSA-VI-NEXT:    v_mov_b32_e32 v6, s4
+; GCN-NOHSA-VI-NEXT:    v_mov_b32_e32 v7, s5
+; GCN-NOHSA-VI-NEXT:    buffer_store_dwordx4 v[0:3], off, s[0:3], 0 offset:48
 ; GCN-NOHSA-VI-NEXT:    buffer_store_dwordx4 v[8:11], off, s[0:3], 0 offset:32
+; GCN-NOHSA-VI-NEXT:    buffer_store_dwordx4 v[12:15], off, s[0:3], 0 offset:16
+; GCN-NOHSA-VI-NEXT:    buffer_store_dwordx4 v[4:7], off, s[0:3], 0
 ; GCN-NOHSA-VI-NEXT:    s_endpgm
 ;
 ; EG-LABEL: global_sextload_v8i8_to_v8i64:
@@ -6939,94 +6944,93 @@ define amdgpu_kernel void @global_sextload_v16i8_to_v16i64(ptr addrspace(1) %out
 ; GCN-NOHSA-SI-NEXT:    s_mov_b32 s8, s6
 ; GCN-NOHSA-SI-NEXT:    s_mov_b32 s9, s7
 ; GCN-NOHSA-SI-NEXT:    buffer_load_dwordx4 v[0:3], off, s[8:11], 0
-; GCN-NOHSA-SI-NEXT:    s_mov_b32 s13, 0
-; GCN-NOHSA-SI-NEXT:    s_mov_b32 s11, s13
-; GCN-NOHSA-SI-NEXT:    s_mov_b32 s7, s13
-; GCN-NOHSA-SI-NEXT:    s_mov_b32 s15, s13
-; GCN-NOHSA-SI-NEXT:    s_mov_b32 s17, s13
-; GCN-NOHSA-SI-NEXT:    s_mov_b32 s9, s13
+; GCN-NOHSA-SI-NEXT:    s_mov_b32 s11, 0
+; GCN-NOHSA-SI-NEXT:    s_mov_b32 s13, s11
+; GCN-NOHSA-SI-NEXT:    s_mov_b32 s9, s11
+; GCN-NOHSA-SI-NEXT:    s_mov_b32 s15, s11
+; GCN-NOHSA-SI-NEXT:    s_mov_b32 s17, s11
+; GCN-NOHSA-SI-NEXT:    s_mov_b32 s19, s11
 ; GCN-NOHSA-SI-NEXT:    s_mov_b32 s0, s4
 ; GCN-NOHSA-SI-NEXT:    s_mov_b32 s1, s5
 ; GCN-NOHSA-SI-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NOHSA-SI-NEXT:    v_readfirstlane_b32 s4, v2
-; GCN-NOHSA-SI-NEXT:    v_readfirstlane_b32 s5, v3
-; GCN-NOHSA-SI-NEXT:    v_readfirstlane_b32 s18, v0
-; GCN-NOHSA-SI-NEXT:    v_readfirstlane_b32 s19, v1
-; GCN-NOHSA-SI-NEXT:    s_lshr_b32 s20, s4, 16
-; GCN-NOHSA-SI-NEXT:    s_lshr_b32 s22, s4, 24
-; GCN-NOHSA-SI-NEXT:    s_lshr_b32 s24, s4, 8
-; GCN-NOHSA-SI-NEXT:    s_lshr_b32 s26, s18, 16
-; GCN-NOHSA-SI-NEXT:    s_lshr_b32 s28, s18, 24
-; GCN-NOHSA-SI-NEXT:    s_lshr_b32 s30, s18, 8
-; GCN-NOHSA-SI-NEXT:    s_lshr_b32 s12, s5, 16
-; GCN-NOHSA-SI-NEXT:    s_lshr_b32 s10, s5, 8
-; GCN-NOHSA-SI-NEXT:    s_mov_b32 s6, s5
-; GCN-NOHSA-SI-NEXT:    s_lshr_b32 s14, s19, 16
-; GCN-NOHSA-SI-NEXT:    s_lshr_b32 s16, s19, 8
-; GCN-NOHSA-SI-NEXT:    s_mov_b32 s8, s19
-; GCN-NOHSA-SI-NEXT:    s_bfe_i64 s[34:35], s[18:19], 0x80000
-; GCN-NOHSA-SI-NEXT:    s_bfe_i64 s[36:37], s[4:5], 0x80000
-; GCN-NOHSA-SI-NEXT:    s_bfe_i64 s[8:9], s[8:9], 0x80000
-; GCN-NOHSA-SI-NEXT:    s_ashr_i32 s29, s19, 31
-; GCN-NOHSA-SI-NEXT:    s_ashr_i32 s31, s19, 24
-; GCN-NOHSA-SI-NEXT:    s_bfe_i64 s[18:19], s[6:7], 0x80000
-; GCN-NOHSA-SI-NEXT:    s_ashr_i32 s33, s5, 31
-; GCN-NOHSA-SI-NEXT:    s_ashr_i32 s38, s5, 24
-; GCN-NOHSA-SI-NEXT:    s_bfe_i64 s[4:5], s[16:17], 0x80000
-; GCN-NOHSA-SI-NEXT:    s_bfe_i64 s[6:7], s[14:15], 0x80000
-; GCN-NOHSA-SI-NEXT:    s_bfe_i64 s[10:11], s[10:11], 0x80000
-; GCN-NOHSA-SI-NEXT:    s_bfe_i64 s[12:13], s[12:13], 0x80000
-; GCN-NOHSA-SI-NEXT:    s_bfe_i64 s[14:15], s[30:31], 0x80000
-; GCN-NOHSA-SI-NEXT:    s_bfe_i64 s[16:17], s[28:29], 0x80000
+; GCN-NOHSA-SI-NEXT:    v_readfirstlane_b32 s4, v0
+; GCN-NOHSA-SI-NEXT:    v_readfirstlane_b32 s5, v1
+; GCN-NOHSA-SI-NEXT:    v_readfirstlane_b32 s20, v2
+; GCN-NOHSA-SI-NEXT:    v_readfirstlane_b32 s21, v3
+; GCN-NOHSA-SI-NEXT:    s_lshr_b32 s10, s21, 16
+; GCN-NOHSA-SI-NEXT:    s_lshr_b32 s12, s21, 8
+; GCN-NOHSA-SI-NEXT:    s_mov_b32 s8, s21
+; GCN-NOHSA-SI-NEXT:    s_lshr_b32 s22, s20, 16
+; GCN-NOHSA-SI-NEXT:    s_lshr_b32 s24, s20, 24
+; GCN-NOHSA-SI-NEXT:    s_lshr_b32 s26, s20, 8
+; GCN-NOHSA-SI-NEXT:    s_lshr_b32 s14, s5, 16
+; GCN-NOHSA-SI-NEXT:    s_lshr_b32 s16, s5, 8
+; GCN-NOHSA-SI-NEXT:    s_mov_b32 s18, s5
+; GCN-NOHSA-SI-NEXT:    s_lshr_b32 s28, s4, 16
+; GCN-NOHSA-SI-NEXT:    s_lshr_b32 s30, s4, 24
+; GCN-NOHSA-SI-NEXT:    s_lshr_b32 s34, s4, 8
+; GCN-NOHSA-SI-NEXT:    s_bfe_i64 s[6:7], s[4:5], 0x80000
+; GCN-NOHSA-SI-NEXT:    s_bfe_i64 s[36:37], s[20:21], 0x80000
+; GCN-NOHSA-SI-NEXT:    s_bfe_i64 s[18:19], s[18:19], 0x80000
+; GCN-NOHSA-SI-NEXT:    s_ashr_i32 s29, s5, 31
+; GCN-NOHSA-SI-NEXT:    s_ashr_i32 s31, s5, 24
+; GCN-NOHSA-SI-NEXT:    s_bfe_i64 s[38:39], s[8:9], 0x80000
+; GCN-NOHSA-SI-NEXT:    s_ashr_i32 s33, s21, 31
+; GCN-NOHSA-SI-NEXT:    s_ashr_i32 s35, s21, 24
+; GCN-NOHSA-SI-NEXT:    s_bfe_i64 s[4:5], s[34:35], 0x80000
+; GCN-NOHSA-SI-NEXT:    s_bfe_i64 s[8:9], s[30:31], 0x80000
+; GCN-NOHSA-SI-NEXT:    s_bfe_i64 s[20:21], s[28:29], 0x80000
+; GCN-NOHSA-SI-NEXT:    s_bfe_i64 s[16:17], s[16:17], 0x80000
+; GCN-NOHSA-SI-NEXT:    s_bfe_i64 s[14:15], s[14:15], 0x80000
 ; GCN-NOHSA-SI-NEXT:    s_bfe_i64 s[26:27], s[26:27], 0x80000
-; GCN-NOHSA-SI-NEXT:    s_bfe_i64 s[22:23], s[22:23], 0x80000
-; GCN-NOHSA-SI-NEXT:    s_bfe_i64 s[20:21], s[20:21], 0x80000
 ; GCN-NOHSA-SI-NEXT:    s_bfe_i64 s[24:25], s[24:25], 0x80000
-; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v0, s36
-; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v1, s37
-; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v4, s34
-; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v5, s35
-; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v8, s38
-; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v9, s33
-; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v10, s18
-; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v11, s19
-; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v12, s20
-; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v13, s21
-; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v14, s22
-; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v15, s23
-; GCN-NOHSA-SI-NEXT:    buffer_store_dwordx4 v[12:15], off, s[0:3], 0 offset:80
-; GCN-NOHSA-SI-NEXT:    s_waitcnt expcnt(0)
-; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v14, s31
-; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v15, s29
-; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v2, s24
-; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v3, s25
-; GCN-NOHSA-SI-NEXT:    buffer_store_dwordx4 v[0:3], off, s[0:3], 0 offset:64
-; GCN-NOHSA-SI-NEXT:    s_waitcnt expcnt(0)
-; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v0, s26
-; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v1, s27
-; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v2, s16
-; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v3, s17
-; GCN-NOHSA-SI-NEXT:    buffer_store_dwordx4 v[0:3], off, s[0:3], 0 offset:16
-; GCN-NOHSA-SI-NEXT:    s_waitcnt expcnt(0)
-; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v0, s8
-; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v1, s9
-; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v6, s14
-; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v7, s15
-; GCN-NOHSA-SI-NEXT:    buffer_store_dwordx4 v[4:7], off, s[0:3], 0
+; GCN-NOHSA-SI-NEXT:    s_bfe_i64 s[22:23], s[22:23], 0x80000
+; GCN-NOHSA-SI-NEXT:    s_bfe_i64 s[10:11], s[10:11], 0x80000
+; GCN-NOHSA-SI-NEXT:    s_bfe_i64 s[12:13], s[12:13], 0x80000
+; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v2, s35
+; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v3, s33
+; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v4, s38
+; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v5, s39
+; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v8, s36
+; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v9, s37
+; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v12, s31
+; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v13, s29
+; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v14, s18
+; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v15, s19
+; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v0, s10
+; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v1, s11
+; GCN-NOHSA-SI-NEXT:    buffer_store_dwordx4 v[0:3], off, s[0:3], 0 offset:112
 ; GCN-NOHSA-SI-NEXT:    s_waitcnt expcnt(0)
+; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v0, s6
+; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v1, s7
 ; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v6, s12
 ; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v7, s13
-; GCN-NOHSA-SI-NEXT:    buffer_store_dwordx4 v[6:9], off, s[0:3], 0 offset:112
-; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v12, s10
-; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v13, s11
-; GCN-NOHSA-SI-NEXT:    buffer_store_dwordx4 v[10:13], off, s[0:3], 0 offset:96
+; GCN-NOHSA-SI-NEXT:    buffer_store_dwordx4 v[4:7], off, s[0:3], 0 offset:96
+; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v2, s22
+; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v3, s23
+; GCN-NOHSA-SI-NEXT:    s_waitcnt expcnt(0)
+; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v4, s24
+; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v5, s25
+; GCN-NOHSA-SI-NEXT:    buffer_store_dwordx4 v[2:5], off, s[0:3], 0 offset:80
+; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v10, s26
+; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v11, s27
+; GCN-NOHSA-SI-NEXT:    buffer_store_dwordx4 v[8:11], off, s[0:3], 0 offset:64
+; GCN-NOHSA-SI-NEXT:    s_waitcnt expcnt(0)
+; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v10, s14
+; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v11, s15
+; GCN-NOHSA-SI-NEXT:    buffer_store_dwordx4 v[10:13], off, s[0:3], 0 offset:48
+; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v16, s16
+; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v17, s17
+; GCN-NOHSA-SI-NEXT:    buffer_store_dwordx4 v[14:17], off, s[0:3], 0 offset:32
+; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v2, s20
+; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v3, s21
+; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v4, s8
+; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v5, s9
+; GCN-NOHSA-SI-NEXT:    buffer_store_dwordx4 v[2:5], off, s[0:3], 0 offset:16
 ; GCN-NOHSA-SI-NEXT:    s_waitcnt expcnt(0)
-; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v12, s6
-; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v13, s7
-; GCN-NOHSA-SI-NEXT:    buffer_store_dwordx4 v[12:15], off, s[0:3], 0 offset:48
 ; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v2, s4
 ; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v3, s5
-; GCN-NOHSA-SI-NEXT:    buffer_store_dwordx4 v[0:3], off, s[0:3], 0 offset:32
+; GCN-NOHSA-SI-NEXT:    buffer_store_dwordx4 v[0:3], off, s[0:3], 0
 ; GCN-NOHSA-SI-NEXT:    s_endpgm
 ;
 ; GCN-HSA-LABEL: global_sextload_v16i8_to_v16i64:
@@ -7045,111 +7049,111 @@ define amdgpu_kernel void @global_sextload_v16i8_to_v16i64(ptr addrspace(1) %out
 ; GCN-HSA-NEXT:    s_mov_b32 s9, s3
 ; GCN-HSA-NEXT:    s_mov_b32 s11, s3
 ; GCN-HSA-NEXT:    s_mov_b32 s13, s3
-; GCN-HSA-NEXT:    v_mov_b32_e32 v9, s1
-; GCN-HSA-NEXT:    v_mov_b32_e32 v8, s0
+; GCN-HSA-NEXT:    v_mov_b32_e32 v17, s1
+; GCN-HSA-NEXT:    v_mov_b32_e32 v16, s0
 ; GCN-HSA-NEXT:    s_waitcnt vmcnt(0)
-; GCN-HSA-NEXT:    v_readfirstlane_b32 s14, v2
-; GCN-HSA-NEXT:    v_readfirstlane_b32 s15, v3
-; GCN-HSA-NEXT:    v_readfirstlane_b32 s16, v0
-; GCN-HSA-NEXT:    v_readfirstlane_b32 s17, v1
-; GCN-HSA-NEXT:    s_lshr_b32 s18, s14, 16
-; GCN-HSA-NEXT:    s_lshr_b32 s20, s14, 24
-; GCN-HSA-NEXT:    s_lshr_b32 s22, s14, 8
-; GCN-HSA-NEXT:    s_lshr_b32 s2, s15, 16
-; GCN-HSA-NEXT:    s_lshr_b32 s4, s15, 8
-; GCN-HSA-NEXT:    s_mov_b32 s6, s15
-; GCN-HSA-NEXT:    s_ashr_i32 s8, s15, 31
-; GCN-HSA-NEXT:    s_ashr_i32 s10, s15, 24
+; GCN-HSA-NEXT:    v_readfirstlane_b32 s14, v0
+; GCN-HSA-NEXT:    v_readfirstlane_b32 s15, v1
+; GCN-HSA-NEXT:    v_readfirstlane_b32 s16, v2
+; GCN-HSA-NEXT:    v_readfirstlane_b32 s17, v3
+; GCN-HSA-NEXT:    s_lshr_b32 s2, s17, 16
+; GCN-HSA-NEXT:    s_lshr_b32 s4, s17, 8
+; GCN-HSA-NEXT:    s_mov_b32 s6, s17
+; GCN-HSA-NEXT:    s_lshr_b32 s18, s16, 16
+; GCN-HSA-NEXT:    s_lshr_b32 s20, s16, 24
+; GCN-HSA-NEXT:    s_lshr_b32 s22, s16, 8
+; GCN-HSA-NEXT:    s_ashr_i32 s19, s17, 31
+; GCN-HSA-NEXT:    s_ashr_i32 s21, s17, 24
+; GCN-HSA-NEXT:    s_bfe_i64 s[16:17], s[16:17], 0x80000
+; GCN-HSA-NEXT:    s_bfe_i64 s[24:25], s[14:15], 0x80000
+; GCN-HSA-NEXT:    s_lshr_b32 s8, s15, 16
+; GCN-HSA-NEXT:    s_lshr_b32 s10, s15, 8
+; GCN-HSA-NEXT:    s_mov_b32 s12, s15
+; GCN-HSA-NEXT:    v_mov_b32_e32 v0, s16
+; GCN-HSA-NEXT:    v_mov_b32_e32 v1, s17
+; GCN-HSA-NEXT:    s_lshr_b32 s16, s14, 16
+; GCN-HSA-NEXT:    s_ashr_i32 s17, s15, 31
+; GCN-HSA-NEXT:    v_mov_b32_e32 v5, s19
+; GCN-HSA-NEXT:    s_ashr_i32 s19, s15, 24
+; GCN-HSA-NEXT:    v_mov_b32_e32 v8, s24
+; GCN-HSA-NEXT:    s_lshr_b32 s24, s14, 24
+; GCN-HSA-NEXT:    s_lshr_b32 s14, s14, 8
+; GCN-HSA-NEXT:    v_mov_b32_e32 v4, s21
+; GCN-HSA-NEXT:    v_mov_b32_e32 v6, s19
+; GCN-HSA-NEXT:    v_mov_b32_e32 v7, s17
+; GCN-HSA-NEXT:    v_mov_b32_e32 v9, s25
 ; GCN-HSA-NEXT:    s_bfe_i64 s[14:15], s[14:15], 0x80000
-; GCN-HSA-NEXT:    v_mov_b32_e32 v0, s14
-; GCN-HSA-NEXT:    v_mov_b32_e32 v1, s15
-; GCN-HSA-NEXT:    s_bfe_i64 s[14:15], s[16:17], 0x80000
-; GCN-HSA-NEXT:    v_mov_b32_e32 v6, s10
-; GCN-HSA-NEXT:    s_ashr_i32 s10, s17, 31
-; GCN-HSA-NEXT:    v_mov_b32_e32 v7, s8
-; GCN-HSA-NEXT:    s_ashr_i32 s8, s17, 24
-; GCN-HSA-NEXT:    v_mov_b32_e32 v2, s14
-; GCN-HSA-NEXT:    s_lshr_b32 s14, s16, 16
-; GCN-HSA-NEXT:    v_mov_b32_e32 v10, s8
-; GCN-HSA-NEXT:    v_mov_b32_e32 v11, s10
-; GCN-HSA-NEXT:    s_lshr_b32 s24, s16, 24
-; GCN-HSA-NEXT:    s_lshr_b32 s8, s17, 16
-; GCN-HSA-NEXT:    s_lshr_b32 s10, s17, 8
-; GCN-HSA-NEXT:    s_mov_b32 s12, s17
-; GCN-HSA-NEXT:    s_lshr_b32 s16, s16, 8
-; GCN-HSA-NEXT:    v_mov_b32_e32 v3, s15
+; GCN-HSA-NEXT:    s_bfe_i64 s[24:25], s[24:25], 0x80000
+; GCN-HSA-NEXT:    s_bfe_i64 s[16:17], s[16:17], 0x80000
 ; GCN-HSA-NEXT:    s_bfe_i64 s[12:13], s[12:13], 0x80000
 ; GCN-HSA-NEXT:    s_bfe_i64 s[10:11], s[10:11], 0x80000
 ; GCN-HSA-NEXT:    s_bfe_i64 s[8:9], s[8:9], 0x80000
-; GCN-HSA-NEXT:    s_bfe_i64 s[6:7], s[6:7], 0x80000
-; GCN-HSA-NEXT:    s_bfe_i64 s[4:5], s[4:5], 0x80000
-; GCN-HSA-NEXT:    s_bfe_i64 s[2:3], s[2:3], 0x80000
-; GCN-HSA-NEXT:    s_bfe_i64 s[16:17], s[16:17], 0x80000
-; GCN-HSA-NEXT:    s_bfe_i64 s[24:25], s[24:25], 0x80000
-; GCN-HSA-NEXT:    s_bfe_i64 s[14:15], s[14:15], 0x80000
 ; GCN-HSA-NEXT:    s_bfe_i64 s[22:23], s[22:23], 0x80000
 ; GCN-HSA-NEXT:    s_bfe_i64 s[20:21], s[20:21], 0x80000
 ; GCN-HSA-NEXT:    s_bfe_i64 s[18:19], s[18:19], 0x80000
-; GCN-HSA-NEXT:    v_mov_b32_e32 v4, s16
-; GCN-HSA-NEXT:    v_mov_b32_e32 v5, s17
-; GCN-HSA-NEXT:    s_add_u32 s16, s0, 0x50
-; GCN-HSA-NEXT:    flat_store_dwordx4 v[8:9], v[2:5]
-; GCN-HSA-NEXT:    s_addc_u32 s17, s1, 0
-; GCN-HSA-NEXT:    v_mov_b32_e32 v4, s16
-; GCN-HSA-NEXT:    v_mov_b32_e32 v12, s18
-; GCN-HSA-NEXT:    v_mov_b32_e32 v13, s19
-; GCN-HSA-NEXT:    v_mov_b32_e32 v14, s20
-; GCN-HSA-NEXT:    v_mov_b32_e32 v15, s21
-; GCN-HSA-NEXT:    v_mov_b32_e32 v5, s17
-; GCN-HSA-NEXT:    flat_store_dwordx4 v[4:5], v[12:15]
-; GCN-HSA-NEXT:    v_mov_b32_e32 v4, s2
-; GCN-HSA-NEXT:    s_add_u32 s2, s0, 64
-; GCN-HSA-NEXT:    v_mov_b32_e32 v5, s3
-; GCN-HSA-NEXT:    s_addc_u32 s3, s1, 0
-; GCN-HSA-NEXT:    v_mov_b32_e32 v9, s3
-; GCN-HSA-NEXT:    v_mov_b32_e32 v8, s2
-; GCN-HSA-NEXT:    s_add_u32 s2, s0, 16
-; GCN-HSA-NEXT:    s_addc_u32 s3, s1, 0
-; GCN-HSA-NEXT:    v_mov_b32_e32 v17, s3
-; GCN-HSA-NEXT:    v_mov_b32_e32 v16, s2
+; GCN-HSA-NEXT:    s_bfe_i64 s[6:7], s[6:7], 0x80000
+; GCN-HSA-NEXT:    s_bfe_i64 s[4:5], s[4:5], 0x80000
+; GCN-HSA-NEXT:    s_bfe_i64 s[2:3], s[2:3], 0x80000
+; GCN-HSA-NEXT:    v_mov_b32_e32 v2, s2
 ; GCN-HSA-NEXT:    s_add_u32 s2, s0, 0x70
-; GCN-HSA-NEXT:    v_mov_b32_e32 v12, s14
-; GCN-HSA-NEXT:    v_mov_b32_e32 v13, s15
-; GCN-HSA-NEXT:    v_mov_b32_e32 v14, s24
-; GCN-HSA-NEXT:    v_mov_b32_e32 v15, s25
+; GCN-HSA-NEXT:    v_mov_b32_e32 v3, s3
 ; GCN-HSA-NEXT:    s_addc_u32 s3, s1, 0
-; GCN-HSA-NEXT:    flat_store_dwordx4 v[16:17], v[12:15]
-; GCN-HSA-NEXT:    v_mov_b32_e32 v17, s3
-; GCN-HSA-NEXT:    v_mov_b32_e32 v16, s2
+; GCN-HSA-NEXT:    v_mov_b32_e32 v11, s3
+; GCN-HSA-NEXT:    v_mov_b32_e32 v10, s2
 ; GCN-HSA-NEXT:    s_add_u32 s2, s0, 0x60
 ; GCN-HSA-NEXT:    s_addc_u32 s3, s1, 0
-; GCN-HSA-NEXT:    v_mov_b32_e32 v2, s22
-; GCN-HSA-NEXT:    v_mov_b32_e32 v3, s23
-; GCN-HSA-NEXT:    flat_store_dwordx4 v[16:17], v[4:7]
-; GCN-HSA-NEXT:    flat_store_dwordx4 v[8:9], v[0:3]
-; GCN-HSA-NEXT:    v_mov_b32_e32 v5, s3
-; GCN-HSA-NEXT:    v_mov_b32_e32 v0, s6
-; GCN-HSA-NEXT:    v_mov_b32_e32 v1, s7
-; GCN-HSA-NEXT:    v_mov_b32_e32 v2, s4
-; GCN-HSA-NEXT:    v_mov_b32_e32 v3, s5
-; GCN-HSA-NEXT:    v_mov_b32_e32 v4, s2
+; GCN-HSA-NEXT:    flat_store_dwordx4 v[10:11], v[2:5]
+; GCN-HSA-NEXT:    v_mov_b32_e32 v11, s3
+; GCN-HSA-NEXT:    v_mov_b32_e32 v10, s2
+; GCN-HSA-NEXT:    s_add_u32 s2, s0, 0x50
+; GCN-HSA-NEXT:    s_addc_u32 s3, s1, 0
+; GCN-HSA-NEXT:    v_mov_b32_e32 v15, s3
+; GCN-HSA-NEXT:    v_mov_b32_e32 v2, s6
+; GCN-HSA-NEXT:    v_mov_b32_e32 v3, s7
+; GCN-HSA-NEXT:    v_mov_b32_e32 v4, s4
+; GCN-HSA-NEXT:    v_mov_b32_e32 v5, s5
+; GCN-HSA-NEXT:    v_mov_b32_e32 v14, s2
+; GCN-HSA-NEXT:    s_add_u32 s2, s0, 64
+; GCN-HSA-NEXT:    flat_store_dwordx4 v[10:11], v[2:5]
+; GCN-HSA-NEXT:    v_mov_b32_e32 v10, s18
+; GCN-HSA-NEXT:    v_mov_b32_e32 v11, s19
+; GCN-HSA-NEXT:    v_mov_b32_e32 v12, s20
+; GCN-HSA-NEXT:    v_mov_b32_e32 v13, s21
+; GCN-HSA-NEXT:    s_addc_u32 s3, s1, 0
+; GCN-HSA-NEXT:    flat_store_dwordx4 v[14:15], v[10:13]
+; GCN-HSA-NEXT:    v_mov_b32_e32 v4, s8
+; GCN-HSA-NEXT:    v_mov_b32_e32 v11, s3
+; GCN-HSA-NEXT:    v_mov_b32_e32 v10, s2
 ; GCN-HSA-NEXT:    s_add_u32 s2, s0, 48
-; GCN-HSA-NEXT:    flat_store_dwordx4 v[4:5], v[0:3]
 ; GCN-HSA-NEXT:    s_addc_u32 s3, s1, 0
-; GCN-HSA-NEXT:    v_mov_b32_e32 v0, s2
-; GCN-HSA-NEXT:    v_mov_b32_e32 v8, s8
-; GCN-HSA-NEXT:    v_mov_b32_e32 v9, s9
-; GCN-HSA-NEXT:    v_mov_b32_e32 v1, s3
-; GCN-HSA-NEXT:    s_add_u32 s0, s0, 32
-; GCN-HSA-NEXT:    flat_store_dwordx4 v[0:1], v[8:11]
-; GCN-HSA-NEXT:    s_addc_u32 s1, s1, 0
-; GCN-HSA-NEXT:    v_mov_b32_e32 v0, s0
+; GCN-HSA-NEXT:    v_mov_b32_e32 v19, s3
+; GCN-HSA-NEXT:    v_mov_b32_e32 v18, s2
+; GCN-HSA-NEXT:    s_add_u32 s2, s0, 32
+; GCN-HSA-NEXT:    v_mov_b32_e32 v5, s9
+; GCN-HSA-NEXT:    s_addc_u32 s3, s1, 0
+; GCN-HSA-NEXT:    flat_store_dwordx4 v[18:19], v[4:7]
+; GCN-HSA-NEXT:    s_add_u32 s0, s0, 16
+; GCN-HSA-NEXT:    v_mov_b32_e32 v5, s3
 ; GCN-HSA-NEXT:    v_mov_b32_e32 v12, s12
 ; GCN-HSA-NEXT:    v_mov_b32_e32 v13, s13
 ; GCN-HSA-NEXT:    v_mov_b32_e32 v14, s10
 ; GCN-HSA-NEXT:    v_mov_b32_e32 v15, s11
-; GCN-HSA-NEXT:    v_mov_b32_e32 v1, s1
-; GCN-HSA-NEXT:    flat_store_dwordx4 v[0:1], v[12:15]
+; GCN-HSA-NEXT:    v_mov_b32_e32 v4, s2
+; GCN-HSA-NEXT:    s_addc_u32 s1, s1, 0
+; GCN-HSA-NEXT:    v_mov_b32_e32 v2, s22
+; GCN-HSA-NEXT:    v_mov_b32_e32 v3, s23
+; GCN-HSA-NEXT:    flat_store_dwordx4 v[4:5], v[12:15]
+; GCN-HSA-NEXT:    v_mov_b32_e32 v5, s1
+; GCN-HSA-NEXT:    flat_store_dwordx4 v[10:11], v[0:3]
+; GCN-HSA-NEXT:    v_mov_b32_e32 v4, s0
+; GCN-HSA-NEXT:    v_mov_b32_e32 v0, s16
+; GCN-HSA-NEXT:    v_mov_b32_e32 v1, s17
+; GCN-HSA-NEXT:    v_mov_b32_e32 v2, s24
+; GCN-HSA-NEXT:    v_mov_b32_e32 v3, s25
+; GCN-HSA-NEXT:    v_mov_b32_e32 v10, s14
+; GCN-HSA-NEXT:    v_mov_b32_e32 v11, s15
+; GCN-HSA-NEXT:    flat_store_dwordx4 v[4:5], v[0:3]
+; GCN-HSA-NEXT:    flat_store_dwordx4 v[16:17], v[8:11]
 ; GCN-HSA-NEXT:    s_endpgm
 ;
 ; GCN-NOHSA-VI-LABEL: global_sextload_v16i8_to_v16i64:
@@ -7163,88 +7167,87 @@ define amdgpu_kernel void @global_sextload_v16i8_to_v16i64(ptr addrspace(1) %out
 ; GCN-NOHSA-VI-NEXT:    s_mov_b32 s8, s6
 ; GCN-NOHSA-VI-NEXT:    s_mov_b32 s9, s7
 ; GCN-NOHSA-VI-NEXT:    buffer_load_dwordx4 v[0:3], off, s[8:11], 0
+; GCN-NOHSA-VI-NEXT:    s_mov_b32 s17, 0
+; GCN-NOHSA-VI-NEXT:    s_mov_b32 s19, s17
+; GCN-NOHSA-VI-NEXT:    s_mov_b32 s21, s17
+; GCN-NOHSA-VI-NEXT:    s_mov_b32 s13, s17
 ; GCN-NOHSA-VI-NEXT:    s_mov_b32 s0, s4
-; GCN-NOHSA-VI-NEXT:    s_mov_b32 s15, 0
 ; GCN-NOHSA-VI-NEXT:    s_mov_b32 s1, s5
-; GCN-NOHSA-VI-NEXT:    s_mov_b32 s11, s15
-; GCN-NOHSA-VI-NEXT:    s_mov_b32 s9, s15
-; GCN-NOHSA-VI-NEXT:    s_mov_b32 s7, s15
 ; GCN-NOHSA-VI-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NOHSA-VI-NEXT:    v_readfirstlane_b32 s4, v2
-; GCN-NOHSA-VI-NEXT:    v_readfirstlane_b32 s12, v0
-; GCN-NOHSA-VI-NEXT:    s_lshr_b32 s16, s4, 16
-; GCN-NOHSA-VI-NEXT:    s_lshr_b32 s18, s4, 24
-; GCN-NOHSA-VI-NEXT:    v_readfirstlane_b32 s5, v3
-; GCN-NOHSA-VI-NEXT:    v_readfirstlane_b32 s13, v1
-; GCN-NOHSA-VI-NEXT:    s_lshr_b32 s20, s4, 8
-; GCN-NOHSA-VI-NEXT:    s_lshr_b32 s22, s12, 16
-; GCN-NOHSA-VI-NEXT:    s_lshr_b32 s24, s12, 24
-; GCN-NOHSA-VI-NEXT:    s_lshr_b32 s26, s12, 8
-; GCN-NOHSA-VI-NEXT:    s_bfe_i64 s[18:19], s[18:19], 0x80000
+; GCN-NOHSA-VI-NEXT:    v_readfirstlane_b32 s9, v3
+; GCN-NOHSA-VI-NEXT:    v_readfirstlane_b32 s8, v2
+; GCN-NOHSA-VI-NEXT:    s_lshr_b32 s16, s9, 16
+; GCN-NOHSA-VI-NEXT:    v_readfirstlane_b32 s4, v0
+; GCN-NOHSA-VI-NEXT:    v_readfirstlane_b32 s5, v1
+; GCN-NOHSA-VI-NEXT:    s_lshr_b32 s22, s9, 8
+; GCN-NOHSA-VI-NEXT:    s_mov_b32 s18, s9
+; GCN-NOHSA-VI-NEXT:    s_lshr_b32 s24, s8, 16
+; GCN-NOHSA-VI-NEXT:    s_lshr_b32 s26, s8, 24
+; GCN-NOHSA-VI-NEXT:    s_lshr_b32 s28, s8, 8
+; GCN-NOHSA-VI-NEXT:    s_ashr_i32 s35, s9, 31
+; GCN-NOHSA-VI-NEXT:    s_ashr_i32 s38, s9, 24
 ; GCN-NOHSA-VI-NEXT:    s_bfe_i64 s[16:17], s[16:17], 0x80000
-; GCN-NOHSA-VI-NEXT:    s_lshr_b32 s14, s5, 16
-; GCN-NOHSA-VI-NEXT:    s_lshr_b32 s28, s5, 8
-; GCN-NOHSA-VI-NEXT:    s_mov_b32 s10, s5
-; GCN-NOHSA-VI-NEXT:    s_lshr_b32 s8, s13, 16
-; GCN-NOHSA-VI-NEXT:    s_lshr_b32 s30, s13, 8
-; GCN-NOHSA-VI-NEXT:    s_mov_b32 s6, s13
-; GCN-NOHSA-VI-NEXT:    s_bfe_i64 s[34:35], s[12:13], 0x80000
-; GCN-NOHSA-VI-NEXT:    s_bfe_i64 s[36:37], s[4:5], 0x80000
-; GCN-NOHSA-VI-NEXT:    s_ashr_i32 s29, s13, 31
-; GCN-NOHSA-VI-NEXT:    s_ashr_i32 s31, s13, 24
+; GCN-NOHSA-VI-NEXT:    s_lshr_b32 s20, s5, 16
+; GCN-NOHSA-VI-NEXT:    s_lshr_b32 s30, s5, 8
+; GCN-NOHSA-VI-NEXT:    s_mov_b32 s12, s5
+; GCN-NOHSA-VI-NEXT:    s_lshr_b32 s10, s4, 16
+; GCN-NOHSA-VI-NEXT:    s_lshr_b32 s14, s4, 24
+; GCN-NOHSA-VI-NEXT:    s_lshr_b32 s34, s4, 8
+; GCN-NOHSA-VI-NEXT:    s_bfe_i64 s[36:37], s[8:9], 0x80000
+; GCN-NOHSA-VI-NEXT:    s_ashr_i32 s31, s5, 31
+; GCN-NOHSA-VI-NEXT:    s_bfe_i64 s[28:29], s[28:29], 0x80000
 ; GCN-NOHSA-VI-NEXT:    s_bfe_i64 s[26:27], s[26:27], 0x80000
 ; GCN-NOHSA-VI-NEXT:    s_bfe_i64 s[24:25], s[24:25], 0x80000
+; GCN-NOHSA-VI-NEXT:    s_bfe_i64 s[18:19], s[18:19], 0x80000
 ; GCN-NOHSA-VI-NEXT:    s_bfe_i64 s[22:23], s[22:23], 0x80000
+; GCN-NOHSA-VI-NEXT:    v_mov_b32_e32 v2, s38
+; GCN-NOHSA-VI-NEXT:    v_mov_b32_e32 v3, s35
+; GCN-NOHSA-VI-NEXT:    v_mov_b32_e32 v0, s16
+; GCN-NOHSA-VI-NEXT:    v_mov_b32_e32 v1, s17
+; GCN-NOHSA-VI-NEXT:    s_bfe_i64 s[6:7], s[4:5], 0x80000
+; GCN-NOHSA-VI-NEXT:    s_ashr_i32 s33, s5, 24
+; GCN-NOHSA-VI-NEXT:    s_bfe_i64 s[4:5], s[34:35], 0x80000
+; GCN-NOHSA-VI-NEXT:    s_bfe_i64 s[8:9], s[14:15], 0x80000
+; GCN-NOHSA-VI-NEXT:    s_bfe_i64 s[10:11], s[10:11], 0x80000
+; GCN-NOHSA-VI-NEXT:    s_bfe_i64 s[14:15], s[12:13], 0x80000
+; GCN-NOHSA-VI-NEXT:    s_bfe_i64 s[12:13], s[30:31], 0x80000
 ; GCN-NOHSA-VI-NEXT:    s_bfe_i64 s[20:21], s[20:21], 0x80000
-; GCN-NOHSA-VI-NEXT:    v_mov_b32_e32 v6, s16
-; GCN-NOHSA-VI-NEXT:    v_mov_b32_e32 v7, s17
-; GCN-NOHSA-VI-NEXT:    v_mov_b32_e32 v8, s18
-; GCN-NOHSA-VI-NEXT:    v_mov_b32_e32 v9, s19
-; GCN-NOHSA-VI-NEXT:    s_ashr_i32 s33, s5, 31
-; GCN-NOHSA-VI-NEXT:    s_ashr_i32 s38, s5, 24
-; GCN-NOHSA-VI-NEXT:    s_bfe_i64 s[6:7], s[6:7], 0x80000
-; GCN-NOHSA-VI-NEXT:    s_bfe_i64 s[4:5], s[30:31], 0x80000
-; GCN-NOHSA-VI-NEXT:    s_bfe_i64 s[8:9], s[8:9], 0x80000
-; GCN-NOHSA-VI-NEXT:    s_bfe_i64 s[12:13], s[10:11], 0x80000
-; GCN-NOHSA-VI-NEXT:    s_bfe_i64 s[10:11], s[28:29], 0x80000
-; GCN-NOHSA-VI-NEXT:    s_bfe_i64 s[14:15], s[14:15], 0x80000
-; GCN-NOHSA-VI-NEXT:    v_mov_b32_e32 v0, s36
-; GCN-NOHSA-VI-NEXT:    v_mov_b32_e32 v1, s37
-; GCN-NOHSA-VI-NEXT:    v_mov_b32_e32 v4, s34
-; GCN-NOHSA-VI-NEXT:    v_mov_b32_e32 v5, s35
-; GCN-NOHSA-VI-NEXT:    v_mov_b32_e32 v2, s20
-; GCN-NOHSA-VI-NEXT:    v_mov_b32_e32 v3, s21
-; GCN-NOHSA-VI-NEXT:    buffer_store_dwordx4 v[6:9], off, s[0:3], 0 offset:80
+; GCN-NOHSA-VI-NEXT:    v_mov_b32_e32 v4, s36
+; GCN-NOHSA-VI-NEXT:    v_mov_b32_e32 v5, s37
+; GCN-NOHSA-VI-NEXT:    v_mov_b32_e32 v14, s18
+; GCN-NOHSA-VI-NEXT:    v_mov_b32_e32 v15, s19
 ; GCN-NOHSA-VI-NEXT:    v_mov_b32_e32 v16, s22
 ; GCN-NOHSA-VI-NEXT:    v_mov_b32_e32 v17, s23
-; GCN-NOHSA-VI-NEXT:    v_mov_b32_e32 v18, s24
-; GCN-NOHSA-VI-NEXT:    v_mov_b32_e32 v19, s25
-; GCN-NOHSA-VI-NEXT:    v_mov_b32_e32 v6, s26
-; GCN-NOHSA-VI-NEXT:    v_mov_b32_e32 v7, s27
-; GCN-NOHSA-VI-NEXT:    v_mov_b32_e32 v10, s38
-; GCN-NOHSA-VI-NEXT:    v_mov_b32_e32 v11, s33
-; GCN-NOHSA-VI-NEXT:    v_mov_b32_e32 v14, s31
-; GCN-NOHSA-VI-NEXT:    v_mov_b32_e32 v15, s29
-; GCN-NOHSA-VI-NEXT:    v_mov_b32_e32 v8, s14
-; GCN-NOHSA-VI-NEXT:    v_mov_b32_e32 v9, s15
-; GCN-NOHSA-VI-NEXT:    buffer_store_dwordx4 v[0:3], off, s[0:3], 0 offset:64
-; GCN-NOHSA-VI-NEXT:    v_mov_b32_e32 v12, s8
-; GCN-NOHSA-VI-NEXT:    v_mov_b32_e32 v0, s12
-; GCN-NOHSA-VI-NEXT:    v_mov_b32_e32 v1, s13
-; GCN-NOHSA-VI-NEXT:    v_mov_b32_e32 v2, s10
-; GCN-NOHSA-VI-NEXT:    v_mov_b32_e32 v3, s11
-; GCN-NOHSA-VI-NEXT:    v_mov_b32_e32 v13, s9
-; GCN-NOHSA-VI-NEXT:    buffer_store_dwordx4 v[16:19], off, s[0:3], 0 offset:16
-; GCN-NOHSA-VI-NEXT:    s_nop 0
-; GCN-NOHSA-VI-NEXT:    v_mov_b32_e32 v16, s6
-; GCN-NOHSA-VI-NEXT:    v_mov_b32_e32 v17, s7
-; GCN-NOHSA-VI-NEXT:    v_mov_b32_e32 v18, s4
-; GCN-NOHSA-VI-NEXT:    v_mov_b32_e32 v19, s5
-; GCN-NOHSA-VI-NEXT:    buffer_store_dwordx4 v[4:7], off, s[0:3], 0
-; GCN-NOHSA-VI-NEXT:    buffer_store_dwordx4 v[8:11], off, s[0:3], 0 offset:112
-; GCN-NOHSA-VI-NEXT:    buffer_store_dwordx4 v[0:3], off, s[0:3], 0 offset:96
-; GCN-NOHSA-VI-NEXT:    buffer_store_dwordx4 v[12:15], off, s[0:3], 0 offset:48
+; GCN-NOHSA-VI-NEXT:    buffer_store_dwordx4 v[0:3], off, s[0:3], 0 offset:112
+; GCN-NOHSA-VI-NEXT:    v_mov_b32_e32 v6, s28
+; GCN-NOHSA-VI-NEXT:    v_mov_b32_e32 v0, s24
+; GCN-NOHSA-VI-NEXT:    v_mov_b32_e32 v1, s25
+; GCN-NOHSA-VI-NEXT:    v_mov_b32_e32 v2, s26
+; GCN-NOHSA-VI-NEXT:    v_mov_b32_e32 v3, s27
+; GCN-NOHSA-VI-NEXT:    v_mov_b32_e32 v7, s29
+; GCN-NOHSA-VI-NEXT:    v_mov_b32_e32 v10, s33
+; GCN-NOHSA-VI-NEXT:    v_mov_b32_e32 v11, s31
+; GCN-NOHSA-VI-NEXT:    v_mov_b32_e32 v12, s6
+; GCN-NOHSA-VI-NEXT:    v_mov_b32_e32 v13, s7
+; GCN-NOHSA-VI-NEXT:    v_mov_b32_e32 v8, s20
+; GCN-NOHSA-VI-NEXT:    v_mov_b32_e32 v9, s21
+; GCN-NOHSA-VI-NEXT:    buffer_store_dwordx4 v[14:17], off, s[0:3], 0 offset:96
+; GCN-NOHSA-VI-NEXT:    v_mov_b32_e32 v18, s12
+; GCN-NOHSA-VI-NEXT:    v_mov_b32_e32 v16, s14
+; GCN-NOHSA-VI-NEXT:    v_mov_b32_e32 v17, s15
+; GCN-NOHSA-VI-NEXT:    v_mov_b32_e32 v19, s13
+; GCN-NOHSA-VI-NEXT:    buffer_store_dwordx4 v[0:3], off, s[0:3], 0 offset:80
+; GCN-NOHSA-VI-NEXT:    v_mov_b32_e32 v14, s4
+; GCN-NOHSA-VI-NEXT:    v_mov_b32_e32 v0, s10
+; GCN-NOHSA-VI-NEXT:    v_mov_b32_e32 v1, s11
+; GCN-NOHSA-VI-NEXT:    v_mov_b32_e32 v2, s8
+; GCN-NOHSA-VI-NEXT:    v_mov_b32_e32 v3, s9
+; GCN-NOHSA-VI-NEXT:    v_mov_b32_e32 v15, s5
+; GCN-NOHSA-VI-NEXT:    buffer_store_dwordx4 v[4:7], off, s[0:3], 0 offset:64
+; GCN-NOHSA-VI-NEXT:    buffer_store_dwordx4 v[8:11], off, s[0:3], 0 offset:48
 ; GCN-NOHSA-VI-NEXT:    buffer_store_dwordx4 v[16:19], off, s[0:3], 0 offset:32
+; GCN-NOHSA-VI-NEXT:    buffer_store_dwordx4 v[0:3], off, s[0:3], 0 offset:16
+; GCN-NOHSA-VI-NEXT:    buffer_store_dwordx4 v[12:15], off, s[0:3], 0
 ; GCN-NOHSA-VI-NEXT:    s_endpgm
 ;
 ; EG-LABEL: global_sextload_v16i8_to_v16i64:
@@ -8195,184 +8198,183 @@ define amdgpu_kernel void @global_sextload_v32i8_to_v32i64(ptr addrspace(1) %out
 ; GCN-NOHSA-SI-NEXT:    s_mov_b32 s8, s6
 ; GCN-NOHSA-SI-NEXT:    s_mov_b32 s9, s7
 ; GCN-NOHSA-SI-NEXT:    buffer_load_dwordx4 v[0:3], off, s[8:11], 0 offset:16
-; GCN-NOHSA-SI-NEXT:    s_mov_b32 s7, 0
+; GCN-NOHSA-SI-NEXT:    s_mov_b32 s23, 0
 ; GCN-NOHSA-SI-NEXT:    buffer_load_dwordx4 v[4:7], off, s[8:11], 0
-; GCN-NOHSA-SI-NEXT:    s_mov_b32 s9, s7
-; GCN-NOHSA-SI-NEXT:    s_mov_b32 s37, s7
-; GCN-NOHSA-SI-NEXT:    s_mov_b32 s11, s7
-; GCN-NOHSA-SI-NEXT:    s_mov_b32 s13, s7
-; GCN-NOHSA-SI-NEXT:    s_mov_b32 s17, s7
-; GCN-NOHSA-SI-NEXT:    s_mov_b32 s15, s7
-; GCN-NOHSA-SI-NEXT:    s_mov_b32 s19, s7
-; GCN-NOHSA-SI-NEXT:    s_mov_b32 s21, s7
-; GCN-NOHSA-SI-NEXT:    s_mov_b32 s23, s7
-; GCN-NOHSA-SI-NEXT:    s_mov_b32 s25, s7
-; GCN-NOHSA-SI-NEXT:    s_mov_b32 s27, s7
+; GCN-NOHSA-SI-NEXT:    s_mov_b32 s25, s23
+; GCN-NOHSA-SI-NEXT:    s_mov_b32 s37, s23
+; GCN-NOHSA-SI-NEXT:    s_mov_b32 s7, s23
+; GCN-NOHSA-SI-NEXT:    s_mov_b32 s9, s23
+; GCN-NOHSA-SI-NEXT:    s_mov_b32 s41, s23
+; GCN-NOHSA-SI-NEXT:    s_mov_b32 s11, s23
+; GCN-NOHSA-SI-NEXT:    s_mov_b32 s13, s23
+; GCN-NOHSA-SI-NEXT:    s_mov_b32 s15, s23
+; GCN-NOHSA-SI-NEXT:    s_mov_b32 s17, s23
+; GCN-NOHSA-SI-NEXT:    s_mov_b32 s19, s23
+; GCN-NOHSA-SI-NEXT:    s_mov_b32 s21, s23
 ; GCN-NOHSA-SI-NEXT:    s_mov_b32 s0, s4
 ; GCN-NOHSA-SI-NEXT:    s_mov_b32 s1, s5
 ; GCN-NOHSA-SI-NEXT:    s_waitcnt vmcnt(1)
-; GCN-NOHSA-SI-NEXT:    v_readfirstlane_b32 s28, v2
-; GCN-NOHSA-SI-NEXT:    v_readfirstlane_b32 s29, v3
-; GCN-NOHSA-SI-NEXT:    v_readfirstlane_b32 s34, v0
-; GCN-NOHSA-SI-NEXT:    v_readfirstlane_b32 s35, v1
+; GCN-NOHSA-SI-NEXT:    v_readfirstlane_b32 s38, v0
+; GCN-NOHSA-SI-NEXT:    v_readfirstlane_b32 s39, v1
+; GCN-NOHSA-SI-NEXT:    v_readfirstlane_b32 s42, v2
+; GCN-NOHSA-SI-NEXT:    v_readfirstlane_b32 s43, v3
 ; GCN-NOHSA-SI-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NOHSA-SI-NEXT:    v_readfirstlane_b32 s30, v6
-; GCN-NOHSA-SI-NEXT:    v_readfirstlane_b32 s31, v7
 ; GCN-NOHSA-SI-NEXT:    v_readfirstlane_b32 s4, v4
 ; GCN-NOHSA-SI-NEXT:    v_readfirstlane_b32 s5, v5
-; GCN-NOHSA-SI-NEXT:    s_lshr_b32 s8, s28, 16
-; GCN-NOHSA-SI-NEXT:    s_lshr_b32 s6, s28, 24
-; GCN-NOHSA-SI-NEXT:    s_bfe_i64 s[38:39], s[28:29], 0x80000
-; GCN-NOHSA-SI-NEXT:    s_bfe_i64 s[40:41], s[34:35], 0x80000
-; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v10, s38
-; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v11, s39
-; GCN-NOHSA-SI-NEXT:    s_bfe_i64 s[38:39], s[30:31], 0x80000
-; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v0, s40
-; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v1, s41
-; GCN-NOHSA-SI-NEXT:    s_bfe_i64 s[40:41], s[4:5], 0x80000
-; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v4, s38
-; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v5, s39
-; GCN-NOHSA-SI-NEXT:    s_lshr_b32 s10, s28, 8
-; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v8, s40
-; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v9, s41
-; GCN-NOHSA-SI-NEXT:    s_lshr_b32 s22, s34, 16
-; GCN-NOHSA-SI-NEXT:    s_bfe_i64 s[38:39], s[8:9], 0x80000
-; GCN-NOHSA-SI-NEXT:    s_bfe_i64 s[40:41], s[6:7], 0x80000
-; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v12, s38
-; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v13, s39
-; GCN-NOHSA-SI-NEXT:    s_lshr_b32 s24, s34, 24
-; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v14, s40
-; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v15, s41
-; GCN-NOHSA-SI-NEXT:    s_lshr_b32 s26, s34, 8
-; GCN-NOHSA-SI-NEXT:    s_mov_b32 s36, s29
-; GCN-NOHSA-SI-NEXT:    s_bfe_i64 s[36:37], s[36:37], 0x80000
-; GCN-NOHSA-SI-NEXT:    s_bfe_i64 s[38:39], s[10:11], 0x80000
-; GCN-NOHSA-SI-NEXT:    buffer_store_dwordx4 v[12:15], off, s[0:3], 0 offset:208
-; GCN-NOHSA-SI-NEXT:    s_waitcnt expcnt(0)
-; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v14, s36
-; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v15, s37
-; GCN-NOHSA-SI-NEXT:    s_lshr_b32 s28, s30, 16
+; GCN-NOHSA-SI-NEXT:    v_readfirstlane_b32 s26, v6
+; GCN-NOHSA-SI-NEXT:    v_readfirstlane_b32 s27, v7
+; GCN-NOHSA-SI-NEXT:    s_lshr_b32 s22, s43, 16
+; GCN-NOHSA-SI-NEXT:    s_lshr_b32 s24, s43, 8
+; GCN-NOHSA-SI-NEXT:    s_mov_b32 s36, s43
+; GCN-NOHSA-SI-NEXT:    s_lshr_b32 s34, s42, 16
+; GCN-NOHSA-SI-NEXT:    s_lshr_b32 s30, s42, 24
+; GCN-NOHSA-SI-NEXT:    s_lshr_b32 s28, s42, 8
+; GCN-NOHSA-SI-NEXT:    s_lshr_b32 s6, s39, 16
+; GCN-NOHSA-SI-NEXT:    s_lshr_b32 s8, s39, 8
+; GCN-NOHSA-SI-NEXT:    s_mov_b32 s40, s39
+; GCN-NOHSA-SI-NEXT:    s_bfe_i64 s[44:45], s[36:37], 0x80000
+; GCN-NOHSA-SI-NEXT:    s_lshr_b32 s36, s38, 16
+; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v2, s44
+; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v3, s45
+; GCN-NOHSA-SI-NEXT:    s_ashr_i32 s12, s43, 31
+; GCN-NOHSA-SI-NEXT:    s_ashr_i32 s14, s43, 24
+; GCN-NOHSA-SI-NEXT:    s_bfe_i64 s[42:43], s[42:43], 0x80000
+; GCN-NOHSA-SI-NEXT:    s_bfe_i64 s[44:45], s[40:41], 0x80000
+; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v6, s42
+; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v7, s43
+; GCN-NOHSA-SI-NEXT:    s_lshr_b32 s40, s38, 24
+; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v0, s44
+; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v1, s45
+; GCN-NOHSA-SI-NEXT:    s_lshr_b32 s42, s38, 8
+; GCN-NOHSA-SI-NEXT:    s_lshr_b32 s10, s27, 16
+; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v10, s14
+; GCN-NOHSA-SI-NEXT:    s_ashr_i32 s31, s39, 31
+; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v11, s12
+; GCN-NOHSA-SI-NEXT:    s_ashr_i32 s33, s39, 24
+; GCN-NOHSA-SI-NEXT:    s_bfe_i64 s[38:39], s[38:39], 0x80000
+; GCN-NOHSA-SI-NEXT:    s_lshr_b32 s12, s27, 8
+; GCN-NOHSA-SI-NEXT:    s_mov_b32 s14, s27
 ; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v12, s38
 ; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v13, s39
-; GCN-NOHSA-SI-NEXT:    s_lshr_b32 s34, s30, 24
-; GCN-NOHSA-SI-NEXT:    s_lshr_b32 s6, s29, 16
-; GCN-NOHSA-SI-NEXT:    s_lshr_b32 s8, s29, 8
-; GCN-NOHSA-SI-NEXT:    s_lshr_b32 s10, s35, 16
-; GCN-NOHSA-SI-NEXT:    s_lshr_b32 s12, s35, 8
-; GCN-NOHSA-SI-NEXT:    s_mov_b32 s16, s35
-; GCN-NOHSA-SI-NEXT:    s_lshr_b32 s14, s31, 16
-; GCN-NOHSA-SI-NEXT:    s_lshr_b32 s18, s31, 8
-; GCN-NOHSA-SI-NEXT:    s_mov_b32 s20, s31
-; GCN-NOHSA-SI-NEXT:    s_bfe_i64 s[36:37], s[22:23], 0x80000
-; GCN-NOHSA-SI-NEXT:    buffer_store_dwordx4 v[10:13], off, s[0:3], 0 offset:192
+; GCN-NOHSA-SI-NEXT:    s_lshr_b32 s38, s26, 16
+; GCN-NOHSA-SI-NEXT:    s_bfe_i64 s[22:23], s[22:23], 0x80000
+; GCN-NOHSA-SI-NEXT:    s_bfe_i64 s[24:25], s[24:25], 0x80000
+; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v8, s22
+; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v9, s23
+; GCN-NOHSA-SI-NEXT:    s_lshr_b32 s44, s26, 24
+; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v4, s24
+; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v5, s25
+; GCN-NOHSA-SI-NEXT:    s_lshr_b32 s22, s26, 8
+; GCN-NOHSA-SI-NEXT:    s_lshr_b32 s16, s5, 16
+; GCN-NOHSA-SI-NEXT:    s_lshr_b32 s18, s5, 8
+; GCN-NOHSA-SI-NEXT:    s_mov_b32 s20, s5
+; GCN-NOHSA-SI-NEXT:    s_bfe_i64 s[24:25], s[34:35], 0x80000
+; GCN-NOHSA-SI-NEXT:    buffer_store_dwordx4 v[8:11], off, s[0:3], 0 offset:240
+; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v14, s24
+; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v15, s25
+; GCN-NOHSA-SI-NEXT:    s_lshr_b32 s34, s4, 16
+; GCN-NOHSA-SI-NEXT:    s_bfe_i64 s[24:25], s[30:31], 0x80000
+; GCN-NOHSA-SI-NEXT:    s_bfe_i64 s[28:29], s[28:29], 0x80000
+; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v16, s24
+; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v17, s25
+; GCN-NOHSA-SI-NEXT:    s_lshr_b32 s30, s4, 24
 ; GCN-NOHSA-SI-NEXT:    s_waitcnt expcnt(0)
-; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v10, s36
-; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v11, s37
-; GCN-NOHSA-SI-NEXT:    s_ashr_i32 s33, s31, 31
-; GCN-NOHSA-SI-NEXT:    s_ashr_i32 s41, s35, 31
-; GCN-NOHSA-SI-NEXT:    s_ashr_i32 s44, s35, 24
-; GCN-NOHSA-SI-NEXT:    s_ashr_i32 s35, s29, 31
-; GCN-NOHSA-SI-NEXT:    s_ashr_i32 s29, s29, 24
-; GCN-NOHSA-SI-NEXT:    s_ashr_i32 s45, s31, 24
-; GCN-NOHSA-SI-NEXT:    s_lshr_b32 s30, s30, 8
-; GCN-NOHSA-SI-NEXT:    s_bfe_i64 s[36:37], s[24:25], 0x80000
-; GCN-NOHSA-SI-NEXT:    s_bfe_i64 s[38:39], s[26:27], 0x80000
-; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v12, s36
-; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v13, s37
-; GCN-NOHSA-SI-NEXT:    s_lshr_b32 s36, s4, 16
-; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v2, s38
-; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v3, s39
-; GCN-NOHSA-SI-NEXT:    s_lshr_b32 s38, s4, 24
-; GCN-NOHSA-SI-NEXT:    s_lshr_b32 s22, s5, 16
-; GCN-NOHSA-SI-NEXT:    s_lshr_b32 s24, s5, 8
-; GCN-NOHSA-SI-NEXT:    s_mov_b32 s26, s5
-; GCN-NOHSA-SI-NEXT:    buffer_store_dwordx4 v[10:13], off, s[0:3], 0 offset:144
+; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v8, s28
+; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v9, s29
+; GCN-NOHSA-SI-NEXT:    s_lshr_b32 s28, s4, 8
+; GCN-NOHSA-SI-NEXT:    buffer_store_dwordx4 v[2:5], off, s[0:3], 0 offset:224
 ; GCN-NOHSA-SI-NEXT:    s_waitcnt expcnt(0)
-; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v12, s29
-; GCN-NOHSA-SI-NEXT:    s_ashr_i32 s39, s5, 31
-; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v13, s35
-; GCN-NOHSA-SI-NEXT:    s_ashr_i32 s46, s5, 24
-; GCN-NOHSA-SI-NEXT:    s_lshr_b32 s40, s4, 8
+; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v4, s33
+; GCN-NOHSA-SI-NEXT:    s_ashr_i32 s23, s27, 31
+; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v5, s31
+; GCN-NOHSA-SI-NEXT:    s_ashr_i32 s24, s27, 24
+; GCN-NOHSA-SI-NEXT:    buffer_store_dwordx4 v[14:17], off, s[0:3], 0 offset:208
+; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v10, s24
+; GCN-NOHSA-SI-NEXT:    s_ashr_i32 s33, s5, 31
+; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v11, s23
+; GCN-NOHSA-SI-NEXT:    s_ashr_i32 s41, s5, 24
+; GCN-NOHSA-SI-NEXT:    s_bfe_i64 s[24:25], s[4:5], 0x80000
 ; GCN-NOHSA-SI-NEXT:    s_bfe_i64 s[26:27], s[26:27], 0x80000
-; GCN-NOHSA-SI-NEXT:    s_bfe_i64 s[20:21], s[20:21], 0x80000
-; GCN-NOHSA-SI-NEXT:    s_bfe_i64 s[42:43], s[16:17], 0x80000
-; GCN-NOHSA-SI-NEXT:    s_bfe_i64 s[4:5], s[24:25], 0x80000
-; GCN-NOHSA-SI-NEXT:    s_bfe_i64 s[16:17], s[22:23], 0x80000
+; GCN-NOHSA-SI-NEXT:    s_bfe_i64 s[46:47], s[20:21], 0x80000
+; GCN-NOHSA-SI-NEXT:    s_bfe_i64 s[48:49], s[14:15], 0x80000
+; GCN-NOHSA-SI-NEXT:    s_bfe_i64 s[4:5], s[28:29], 0x80000
+; GCN-NOHSA-SI-NEXT:    s_bfe_i64 s[14:15], s[30:31], 0x80000
+; GCN-NOHSA-SI-NEXT:    s_bfe_i64 s[20:21], s[34:35], 0x80000
 ; GCN-NOHSA-SI-NEXT:    s_bfe_i64 s[18:19], s[18:19], 0x80000
-; GCN-NOHSA-SI-NEXT:    s_bfe_i64 s[14:15], s[14:15], 0x80000
+; GCN-NOHSA-SI-NEXT:    s_bfe_i64 s[16:17], s[16:17], 0x80000
+; GCN-NOHSA-SI-NEXT:    s_bfe_i64 s[22:23], s[22:23], 0x80000
+; GCN-NOHSA-SI-NEXT:    s_bfe_i64 s[28:29], s[44:45], 0x80000
+; GCN-NOHSA-SI-NEXT:    s_bfe_i64 s[30:31], s[38:39], 0x80000
 ; GCN-NOHSA-SI-NEXT:    s_bfe_i64 s[12:13], s[12:13], 0x80000
 ; GCN-NOHSA-SI-NEXT:    s_bfe_i64 s[10:11], s[10:11], 0x80000
+; GCN-NOHSA-SI-NEXT:    s_bfe_i64 s[34:35], s[42:43], 0x80000
+; GCN-NOHSA-SI-NEXT:    s_bfe_i64 s[38:39], s[40:41], 0x80000
+; GCN-NOHSA-SI-NEXT:    s_bfe_i64 s[36:37], s[36:37], 0x80000
 ; GCN-NOHSA-SI-NEXT:    s_bfe_i64 s[8:9], s[8:9], 0x80000
 ; GCN-NOHSA-SI-NEXT:    s_bfe_i64 s[6:7], s[6:7], 0x80000
-; GCN-NOHSA-SI-NEXT:    s_bfe_i64 s[22:23], s[40:41], 0x80000
-; GCN-NOHSA-SI-NEXT:    s_bfe_i64 s[24:25], s[38:39], 0x80000
-; GCN-NOHSA-SI-NEXT:    s_bfe_i64 s[36:37], s[36:37], 0x80000
-; GCN-NOHSA-SI-NEXT:    s_bfe_i64 s[30:31], s[30:31], 0x80000
-; GCN-NOHSA-SI-NEXT:    s_bfe_i64 s[34:35], s[34:35], 0x80000
-; GCN-NOHSA-SI-NEXT:    s_bfe_i64 s[28:29], s[28:29], 0x80000
-; GCN-NOHSA-SI-NEXT:    buffer_store_dwordx4 v[0:3], off, s[0:3], 0 offset:128
-; GCN-NOHSA-SI-NEXT:    s_waitcnt expcnt(0)
-; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v0, s28
-; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v1, s29
-; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v2, s34
-; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v3, s35
-; GCN-NOHSA-SI-NEXT:    buffer_store_dwordx4 v[0:3], off, s[0:3], 0 offset:80
-; GCN-NOHSA-SI-NEXT:    s_waitcnt expcnt(0)
-; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v2, s44
-; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v3, s41
-; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v6, s30
-; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v7, s31
-; GCN-NOHSA-SI-NEXT:    buffer_store_dwordx4 v[4:7], off, s[0:3], 0 offset:64
+; GCN-NOHSA-SI-NEXT:    buffer_store_dwordx4 v[6:9], off, s[0:3], 0 offset:192
 ; GCN-NOHSA-SI-NEXT:    s_waitcnt expcnt(0)
-; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v4, s36
-; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v5, s37
-; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v6, s24
-; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v7, s25
-; GCN-NOHSA-SI-NEXT:    buffer_store_dwordx4 v[4:7], off, s[0:3], 0 offset:16
+; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v6, s48
+; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v7, s49
+; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v2, s6
+; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v3, s7
+; GCN-NOHSA-SI-NEXT:    buffer_store_dwordx4 v[2:5], off, s[0:3], 0 offset:176
 ; GCN-NOHSA-SI-NEXT:    s_waitcnt expcnt(0)
-; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v4, s42
-; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v5, s43
-; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v10, s22
-; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v11, s23
-; GCN-NOHSA-SI-NEXT:    buffer_store_dwordx4 v[8:11], off, s[0:3], 0
+; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v4, s26
+; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v5, s27
+; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v2, s8
+; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v3, s9
+; GCN-NOHSA-SI-NEXT:    buffer_store_dwordx4 v[0:3], off, s[0:3], 0 offset:160
 ; GCN-NOHSA-SI-NEXT:    s_waitcnt expcnt(0)
-; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v8, s45
-; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v9, s33
-; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v10, s6
-; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v11, s7
-; GCN-NOHSA-SI-NEXT:    buffer_store_dwordx4 v[10:13], off, s[0:3], 0 offset:240
+; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v0, s36
+; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v1, s37
+; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v2, s38
+; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v3, s39
+; GCN-NOHSA-SI-NEXT:    buffer_store_dwordx4 v[0:3], off, s[0:3], 0 offset:144
 ; GCN-NOHSA-SI-NEXT:    s_waitcnt expcnt(0)
-; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v10, s20
-; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v11, s21
-; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v16, s8
-; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v17, s9
-; GCN-NOHSA-SI-NEXT:    buffer_store_dwordx4 v[14:17], off, s[0:3], 0 offset:224
+; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v2, s41
+; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v3, s33
+; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v14, s34
+; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v15, s35
+; GCN-NOHSA-SI-NEXT:    buffer_store_dwordx4 v[12:15], off, s[0:3], 0 offset:128
 ; GCN-NOHSA-SI-NEXT:    s_waitcnt expcnt(0)
-; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v14, s46
-; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v15, s39
-; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v0, s10
-; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v1, s11
-; GCN-NOHSA-SI-NEXT:    buffer_store_dwordx4 v[0:3], off, s[0:3], 0 offset:176
+; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v12, s46
+; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v13, s47
+; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v8, s10
+; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v9, s11
+; GCN-NOHSA-SI-NEXT:    buffer_store_dwordx4 v[8:11], off, s[0:3], 0 offset:112
 ; GCN-NOHSA-SI-NEXT:    s_waitcnt expcnt(0)
-; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v0, s26
-; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v1, s27
-; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v6, s12
-; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v7, s13
-; GCN-NOHSA-SI-NEXT:    buffer_store_dwordx4 v[4:7], off, s[0:3], 0 offset:160
+; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v10, s24
+; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v11, s25
+; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v8, s12
+; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v9, s13
+; GCN-NOHSA-SI-NEXT:    buffer_store_dwordx4 v[6:9], off, s[0:3], 0 offset:96
 ; GCN-NOHSA-SI-NEXT:    s_waitcnt expcnt(0)
-; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v6, s14
-; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v7, s15
-; GCN-NOHSA-SI-NEXT:    buffer_store_dwordx4 v[6:9], off, s[0:3], 0 offset:112
-; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v12, s18
-; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v13, s19
-; GCN-NOHSA-SI-NEXT:    buffer_store_dwordx4 v[10:13], off, s[0:3], 0 offset:96
+; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v6, s30
+; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v7, s31
+; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v8, s28
+; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v9, s29
+; GCN-NOHSA-SI-NEXT:    buffer_store_dwordx4 v[6:9], off, s[0:3], 0 offset:80
 ; GCN-NOHSA-SI-NEXT:    s_waitcnt expcnt(0)
-; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v12, s16
-; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v13, s17
-; GCN-NOHSA-SI-NEXT:    buffer_store_dwordx4 v[12:15], off, s[0:3], 0 offset:48
-; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v2, s4
-; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v3, s5
-; GCN-NOHSA-SI-NEXT:    buffer_store_dwordx4 v[0:3], off, s[0:3], 0 offset:32
+; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v6, s22
+; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v7, s23
+; GCN-NOHSA-SI-NEXT:    buffer_store_dwordx4 v[4:7], off, s[0:3], 0 offset:64
+; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v0, s16
+; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v1, s17
+; GCN-NOHSA-SI-NEXT:    buffer_store_dwordx4 v[0:3], off, s[0:3], 0 offset:48
+; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v14, s18
+; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v15, s19
+; GCN-NOHSA-SI-NEXT:    buffer_store_dwordx4 v[12:15], off, s[0:3], 0 offset:32
+; GCN-NOHSA-SI-NEXT:    s_waitcnt expcnt(1)
+; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v0, s20
+; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v1, s21
+; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v2, s14
+; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v3, s15
+; GCN-NOHSA-SI-NEXT:    buffer_store_dwordx4 v[0:3], off, s[0:3], 0 offset:16
+; GCN-NOHSA-SI-NEXT:    s_waitcnt expcnt(1)
+; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v12, s4
+; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v13, s5
+; GCN-NOHSA-SI-NEXT:    buffer_store_dwordx4 v[10:13], off, s[0:3], 0
 ; GCN-NOHSA-SI-NEXT:    s_endpgm
 ;
 ; GCN-HSA-LABEL: global_sextload_v32i8_to_v32i64:
@@ -8381,6 +8383,7 @@ define amdgpu_kernel void @global_sextload_v32i8_to_v32i64(ptr addrspace(1) %out
 ; GCN-HSA-NEXT:    s_add_i32 s12, s12, s17
 ; GCN-HSA-NEXT:    s_lshr_b32 flat_scratch_hi, s12, 8
 ; GCN-HSA-NEXT:    s_mov_b32 flat_scratch_lo, s13
+; GCN-HSA-NEXT:    s_mov_b32 s5, 0
 ; GCN-HSA-NEXT:    s_waitcnt lgkmcnt(0)
 ; GCN-HSA-NEXT:    v_mov_b32_e32 v0, s2
 ; GCN-HSA-NEXT:    v_mov_b32_e32 v1, s3
@@ -8390,227 +8393,232 @@ define amdgpu_kernel void @global_sextload_v32i8_to_v32i64(ptr addrspace(1) %out
 ; GCN-HSA-NEXT:    v_mov_b32_e32 v0, s2
 ; GCN-HSA-NEXT:    v_mov_b32_e32 v1, s3
 ; GCN-HSA-NEXT:    flat_load_dwordx4 v[0:3], v[0:1]
-; GCN-HSA-NEXT:    s_mov_b32 s3, 0
-; GCN-HSA-NEXT:    s_mov_b32 s19, s3
-; GCN-HSA-NEXT:    s_mov_b32 s5, s3
-; GCN-HSA-NEXT:    s_mov_b32 s7, s3
-; GCN-HSA-NEXT:    s_mov_b32 s29, s3
-; GCN-HSA-NEXT:    s_mov_b32 s15, s3
-; GCN-HSA-NEXT:    s_mov_b32 s31, s3
-; GCN-HSA-NEXT:    s_mov_b32 s13, s3
-; GCN-HSA-NEXT:    s_mov_b32 s9, s3
-; GCN-HSA-NEXT:    s_mov_b32 s11, s3
+; GCN-HSA-NEXT:    s_mov_b32 s9, s5
+; GCN-HSA-NEXT:    s_mov_b32 s3, s5
+; GCN-HSA-NEXT:    s_mov_b32 s7, s5
+; GCN-HSA-NEXT:    s_mov_b32 s11, s5
+; GCN-HSA-NEXT:    s_mov_b32 s13, s5
+; GCN-HSA-NEXT:    s_mov_b32 s31, s5
+; GCN-HSA-NEXT:    s_mov_b32 s27, s5
+; GCN-HSA-NEXT:    s_mov_b32 s37, s5
+; GCN-HSA-NEXT:    s_mov_b32 s15, s5
+; GCN-HSA-NEXT:    s_mov_b32 s19, s5
+; GCN-HSA-NEXT:    s_mov_b32 s23, s5
 ; GCN-HSA-NEXT:    s_waitcnt vmcnt(1)
-; GCN-HSA-NEXT:    v_readfirstlane_b32 s16, v6
 ; GCN-HSA-NEXT:    v_readfirstlane_b32 s17, v7
 ; GCN-HSA-NEXT:    v_readfirstlane_b32 s20, v4
 ; GCN-HSA-NEXT:    v_readfirstlane_b32 s21, v5
-; GCN-HSA-NEXT:    s_lshr_b32 s18, s16, 24
-; GCN-HSA-NEXT:    s_lshr_b32 s14, s16, 16
-; GCN-HSA-NEXT:    s_lshr_b32 s26, s20, 16
-; GCN-HSA-NEXT:    s_lshr_b32 s28, s20, 24
-; GCN-HSA-NEXT:    s_lshr_b32 s30, s20, 8
-; GCN-HSA-NEXT:    s_lshr_b32 s4, s17, 8
-; GCN-HSA-NEXT:    s_mov_b32 s6, s17
-; GCN-HSA-NEXT:    s_lshr_b32 s12, s21, 16
-; GCN-HSA-NEXT:    s_lshr_b32 s8, s21, 8
-; GCN-HSA-NEXT:    s_mov_b32 s10, s21
-; GCN-HSA-NEXT:    s_ashr_i32 s37, s21, 31
-; GCN-HSA-NEXT:    s_ashr_i32 s38, s21, 24
-; GCN-HSA-NEXT:    s_bfe_i64 s[44:45], s[20:21], 0x80000
-; GCN-HSA-NEXT:    s_lshr_b32 s34, s16, 8
-; GCN-HSA-NEXT:    s_bfe_i64 s[20:21], s[18:19], 0x80000
+; GCN-HSA-NEXT:    v_readfirstlane_b32 s16, v6
+; GCN-HSA-NEXT:    s_lshr_b32 s4, s17, 16
+; GCN-HSA-NEXT:    s_mov_b32 s8, s17
+; GCN-HSA-NEXT:    s_lshr_b32 s2, s17, 8
+; GCN-HSA-NEXT:    s_lshr_b32 s6, s21, 16
+; GCN-HSA-NEXT:    s_lshr_b32 s14, s20, 16
+; GCN-HSA-NEXT:    s_lshr_b32 s18, s20, 24
+; GCN-HSA-NEXT:    s_lshr_b32 s22, s20, 8
+; GCN-HSA-NEXT:    s_ashr_i32 s63, s17, 31
+; GCN-HSA-NEXT:    s_ashr_i32 s64, s17, 24
+; GCN-HSA-NEXT:    s_lshr_b32 s24, s16, 16
+; GCN-HSA-NEXT:    s_lshr_b32 s26, s16, 24
+; GCN-HSA-NEXT:    s_lshr_b32 s30, s16, 8
+; GCN-HSA-NEXT:    s_bfe_i64 s[46:47], s[16:17], 0x80000
+; GCN-HSA-NEXT:    s_bfe_i64 s[4:5], s[4:5], 0x80000
+; GCN-HSA-NEXT:    s_bfe_i64 s[8:9], s[8:9], 0x80000
 ; GCN-HSA-NEXT:    s_waitcnt vmcnt(0)
+; GCN-HSA-NEXT:    v_readfirstlane_b32 s16, v0
+; GCN-HSA-NEXT:    v_readfirstlane_b32 s17, v1
 ; GCN-HSA-NEXT:    v_readfirstlane_b32 s48, v2
 ; GCN-HSA-NEXT:    v_readfirstlane_b32 s49, v3
-; GCN-HSA-NEXT:    v_readfirstlane_b32 s50, v0
-; GCN-HSA-NEXT:    v_readfirstlane_b32 s51, v1
-; GCN-HSA-NEXT:    s_lshr_b32 s2, s17, 16
-; GCN-HSA-NEXT:    s_ashr_i32 s41, s17, 31
-; GCN-HSA-NEXT:    s_ashr_i32 s42, s17, 24
-; GCN-HSA-NEXT:    s_bfe_i64 s[46:47], s[16:17], 0x80000
-; GCN-HSA-NEXT:    s_bfe_i64 s[16:17], s[14:15], 0x80000
-; GCN-HSA-NEXT:    v_mov_b32_e32 v6, s20
-; GCN-HSA-NEXT:    v_mov_b32_e32 v7, s21
-; GCN-HSA-NEXT:    s_bfe_i64 s[22:23], s[6:7], 0x80000
-; GCN-HSA-NEXT:    s_bfe_i64 s[20:21], s[4:5], 0x80000
+; GCN-HSA-NEXT:    s_lshr_b32 s10, s21, 8
+; GCN-HSA-NEXT:    s_mov_b32 s12, s21
+; GCN-HSA-NEXT:    s_ashr_i32 s61, s21, 24
+; GCN-HSA-NEXT:    s_bfe_i64 s[28:29], s[22:23], 0x80000
+; GCN-HSA-NEXT:    s_bfe_i64 s[34:35], s[18:19], 0x80000
+; GCN-HSA-NEXT:    s_bfe_i64 s[38:39], s[14:15], 0x80000
+; GCN-HSA-NEXT:    s_bfe_i64 s[50:51], s[6:7], 0x80000
 ; GCN-HSA-NEXT:    s_bfe_i64 s[52:53], s[30:31], 0x80000
-; GCN-HSA-NEXT:    s_bfe_i64 s[54:55], s[28:29], 0x80000
-; GCN-HSA-NEXT:    s_bfe_i64 s[56:57], s[26:27], 0x80000
-; GCN-HSA-NEXT:    s_bfe_i64 s[58:59], s[34:35], 0x80000
-; GCN-HSA-NEXT:    s_lshr_b32 s18, s49, 16
-; GCN-HSA-NEXT:    s_lshr_b32 s28, s49, 8
-; GCN-HSA-NEXT:    s_mov_b32 s14, s49
-; GCN-HSA-NEXT:    s_lshr_b32 s30, s51, 16
-; GCN-HSA-NEXT:    s_lshr_b32 s6, s51, 8
-; GCN-HSA-NEXT:    s_mov_b32 s4, s51
-; GCN-HSA-NEXT:    s_lshr_b32 s34, s50, 16
-; GCN-HSA-NEXT:    s_lshr_b32 s60, s50, 24
-; GCN-HSA-NEXT:    s_lshr_b32 s62, s50, 8
-; GCN-HSA-NEXT:    s_bfe_i64 s[26:27], s[50:51], 0x80000
-; GCN-HSA-NEXT:    s_lshr_b32 s50, s48, 16
-; GCN-HSA-NEXT:    s_lshr_b32 s64, s48, 24
-; GCN-HSA-NEXT:    s_lshr_b32 s66, s48, 8
-; GCN-HSA-NEXT:    v_mov_b32_e32 v4, s16
-; GCN-HSA-NEXT:    v_mov_b32_e32 v5, s17
-; GCN-HSA-NEXT:    s_bfe_i64 s[10:11], s[10:11], 0x80000
-; GCN-HSA-NEXT:    s_bfe_i64 s[8:9], s[8:9], 0x80000
-; GCN-HSA-NEXT:    s_bfe_i64 s[16:17], s[12:13], 0x80000
-; GCN-HSA-NEXT:    s_bfe_i64 s[24:25], s[2:3], 0x80000
-; GCN-HSA-NEXT:    s_ashr_i32 s33, s51, 31
-; GCN-HSA-NEXT:    s_ashr_i32 s36, s51, 24
-; GCN-HSA-NEXT:    s_ashr_i32 s39, s49, 31
-; GCN-HSA-NEXT:    s_ashr_i32 s40, s49, 24
-; GCN-HSA-NEXT:    s_bfe_i64 s[48:49], s[48:49], 0x80000
+; GCN-HSA-NEXT:    s_bfe_i64 s[54:55], s[26:27], 0x80000
+; GCN-HSA-NEXT:    s_bfe_i64 s[56:57], s[24:25], 0x80000
+; GCN-HSA-NEXT:    v_mov_b32_e32 v4, s4
+; GCN-HSA-NEXT:    v_mov_b32_e32 v0, s8
+; GCN-HSA-NEXT:    s_lshr_b32 s30, s49, 16
+; GCN-HSA-NEXT:    s_lshr_b32 s26, s49, 8
+; GCN-HSA-NEXT:    s_mov_b32 s36, s49
+; GCN-HSA-NEXT:    s_lshr_b32 s14, s17, 16
+; GCN-HSA-NEXT:    s_lshr_b32 s18, s17, 8
+; GCN-HSA-NEXT:    s_mov_b32 s22, s17
+; GCN-HSA-NEXT:    s_lshr_b32 s8, s16, 16
+; GCN-HSA-NEXT:    s_lshr_b32 s6, s16, 24
+; GCN-HSA-NEXT:    s_lshr_b32 s4, s16, 8
+; GCN-HSA-NEXT:    s_lshr_b32 s24, s48, 16
+; GCN-HSA-NEXT:    s_lshr_b32 s60, s48, 24
+; GCN-HSA-NEXT:    s_lshr_b32 s62, s48, 8
+; GCN-HSA-NEXT:    s_ashr_i32 s45, s21, 31
+; GCN-HSA-NEXT:    s_bfe_i64 s[20:21], s[20:21], 0x80000
+; GCN-HSA-NEXT:    s_bfe_i64 s[42:43], s[12:13], 0x80000
+; GCN-HSA-NEXT:    s_bfe_i64 s[40:41], s[10:11], 0x80000
+; GCN-HSA-NEXT:    s_bfe_i64 s[58:59], s[2:3], 0x80000
+; GCN-HSA-NEXT:    v_mov_b32_e32 v5, s5
+; GCN-HSA-NEXT:    v_mov_b32_e32 v1, s9
+; GCN-HSA-NEXT:    s_ashr_i32 s33, s17, 31
+; GCN-HSA-NEXT:    s_ashr_i32 s44, s17, 24
+; GCN-HSA-NEXT:    s_bfe_i64 s[2:3], s[16:17], 0x80000
+; GCN-HSA-NEXT:    s_ashr_i32 s65, s49, 31
+; GCN-HSA-NEXT:    s_ashr_i32 s66, s49, 24
+; GCN-HSA-NEXT:    s_bfe_i64 s[16:17], s[48:49], 0x80000
 ; GCN-HSA-NEXT:    s_bfe_i64 s[4:5], s[4:5], 0x80000
-; GCN-HSA-NEXT:    s_bfe_i64 s[2:3], s[6:7], 0x80000
-; GCN-HSA-NEXT:    s_bfe_i64 s[6:7], s[30:31], 0x80000
+; GCN-HSA-NEXT:    s_bfe_i64 s[6:7], s[6:7], 0x80000
+; GCN-HSA-NEXT:    s_bfe_i64 s[8:9], s[8:9], 0x80000
+; GCN-HSA-NEXT:    s_bfe_i64 s[12:13], s[22:23], 0x80000
+; GCN-HSA-NEXT:    s_bfe_i64 s[10:11], s[18:19], 0x80000
 ; GCN-HSA-NEXT:    s_bfe_i64 s[14:15], s[14:15], 0x80000
-; GCN-HSA-NEXT:    s_bfe_i64 s[12:13], s[28:29], 0x80000
-; GCN-HSA-NEXT:    s_bfe_i64 s[18:19], s[18:19], 0x80000
-; GCN-HSA-NEXT:    s_bfe_i64 s[28:29], s[62:63], 0x80000
-; GCN-HSA-NEXT:    s_bfe_i64 s[30:31], s[60:61], 0x80000
-; GCN-HSA-NEXT:    s_bfe_i64 s[34:35], s[34:35], 0x80000
-; GCN-HSA-NEXT:    s_bfe_i64 s[60:61], s[66:67], 0x80000
-; GCN-HSA-NEXT:    s_bfe_i64 s[62:63], s[64:65], 0x80000
-; GCN-HSA-NEXT:    s_bfe_i64 s[50:51], s[50:51], 0x80000
-; GCN-HSA-NEXT:    s_add_u32 s64, s0, 0x50
-; GCN-HSA-NEXT:    s_addc_u32 s65, s1, 0
-; GCN-HSA-NEXT:    v_mov_b32_e32 v0, s46
+; GCN-HSA-NEXT:    s_bfe_i64 s[18:19], s[62:63], 0x80000
+; GCN-HSA-NEXT:    s_bfe_i64 s[22:23], s[60:61], 0x80000
+; GCN-HSA-NEXT:    s_bfe_i64 s[24:25], s[24:25], 0x80000
+; GCN-HSA-NEXT:    s_bfe_i64 s[36:37], s[36:37], 0x80000
+; GCN-HSA-NEXT:    s_bfe_i64 s[26:27], s[26:27], 0x80000
+; GCN-HSA-NEXT:    s_bfe_i64 s[30:31], s[30:31], 0x80000
+; GCN-HSA-NEXT:    s_add_u32 s48, s0, 0x70
+; GCN-HSA-NEXT:    s_addc_u32 s49, s1, 0
+; GCN-HSA-NEXT:    v_mov_b32_e32 v18, s48
+; GCN-HSA-NEXT:    v_mov_b32_e32 v19, s49
+; GCN-HSA-NEXT:    s_add_u32 s48, s0, 0x60
+; GCN-HSA-NEXT:    s_addc_u32 s49, s1, 0
+; GCN-HSA-NEXT:    v_mov_b32_e32 v24, s48
+; GCN-HSA-NEXT:    v_mov_b32_e32 v25, s49
+; GCN-HSA-NEXT:    s_add_u32 s48, s0, 0x50
+; GCN-HSA-NEXT:    s_addc_u32 s49, s1, 0
+; GCN-HSA-NEXT:    v_mov_b32_e32 v12, s46
 ; GCN-HSA-NEXT:    s_add_u32 s46, s0, 64
-; GCN-HSA-NEXT:    v_mov_b32_e32 v1, s47
-; GCN-HSA-NEXT:    s_addc_u32 s47, s1, 0
-; GCN-HSA-NEXT:    v_mov_b32_e32 v26, s46
-; GCN-HSA-NEXT:    v_mov_b32_e32 v24, s64
-; GCN-HSA-NEXT:    v_mov_b32_e32 v27, s47
-; GCN-HSA-NEXT:    s_add_u32 s46, s0, 16
-; GCN-HSA-NEXT:    v_mov_b32_e32 v25, s65
+; GCN-HSA-NEXT:    v_mov_b32_e32 v13, s47
 ; GCN-HSA-NEXT:    s_addc_u32 s47, s1, 0
-; GCN-HSA-NEXT:    flat_store_dwordx4 v[24:25], v[4:7]
+; GCN-HSA-NEXT:    v_mov_b32_e32 v20, s42
+; GCN-HSA-NEXT:    s_add_u32 s42, s0, 48
 ; GCN-HSA-NEXT:    v_mov_b32_e32 v2, s58
-; GCN-HSA-NEXT:    v_mov_b32_e32 v4, s34
-; GCN-HSA-NEXT:    s_add_u32 s34, s0, 0xd0
-; GCN-HSA-NEXT:    v_mov_b32_e32 v5, s35
-; GCN-HSA-NEXT:    s_addc_u32 s35, s1, 0
-; GCN-HSA-NEXT:    v_mov_b32_e32 v24, s34
-; GCN-HSA-NEXT:    v_mov_b32_e32 v25, s35
-; GCN-HSA-NEXT:    s_add_u32 s34, s0, 0xc0
 ; GCN-HSA-NEXT:    v_mov_b32_e32 v3, s59
-; GCN-HSA-NEXT:    s_addc_u32 s35, s1, 0
+; GCN-HSA-NEXT:    v_mov_b32_e32 v21, s43
+; GCN-HSA-NEXT:    s_addc_u32 s43, s1, 0
+; GCN-HSA-NEXT:    flat_store_dwordx4 v[24:25], v[0:3]
+; GCN-HSA-NEXT:    v_mov_b32_e32 v6, s64
+; GCN-HSA-NEXT:    v_mov_b32_e32 v0, s38
+; GCN-HSA-NEXT:    s_add_u32 s38, s0, 32
+; GCN-HSA-NEXT:    v_mov_b32_e32 v7, s63
+; GCN-HSA-NEXT:    v_mov_b32_e32 v30, s42
+; GCN-HSA-NEXT:    v_mov_b32_e32 v1, s39
+; GCN-HSA-NEXT:    s_addc_u32 s39, s1, 0
+; GCN-HSA-NEXT:    v_mov_b32_e32 v16, s50
+; GCN-HSA-NEXT:    v_mov_b32_e32 v17, s51
+; GCN-HSA-NEXT:    flat_store_dwordx4 v[18:19], v[4:7]
+; GCN-HSA-NEXT:    v_mov_b32_e32 v31, s43
+; GCN-HSA-NEXT:    v_mov_b32_e32 v18, s61
+; GCN-HSA-NEXT:    v_mov_b32_e32 v19, s45
+; GCN-HSA-NEXT:    v_mov_b32_e32 v4, s20
+; GCN-HSA-NEXT:    s_add_u32 s20, s0, 16
+; GCN-HSA-NEXT:    v_mov_b32_e32 v5, s21
+; GCN-HSA-NEXT:    s_addc_u32 s21, s1, 0
+; GCN-HSA-NEXT:    flat_store_dwordx4 v[30:31], v[16:19]
+; GCN-HSA-NEXT:    v_mov_b32_e32 v24, s38
+; GCN-HSA-NEXT:    v_mov_b32_e32 v16, s20
+; GCN-HSA-NEXT:    v_mov_b32_e32 v17, s21
+; GCN-HSA-NEXT:    s_add_u32 s20, s0, 0xf0
+; GCN-HSA-NEXT:    s_addc_u32 s21, s1, 0
+; GCN-HSA-NEXT:    v_mov_b32_e32 v18, s20
+; GCN-HSA-NEXT:    v_mov_b32_e32 v22, s40
+; GCN-HSA-NEXT:    v_mov_b32_e32 v23, s41
+; GCN-HSA-NEXT:    v_mov_b32_e32 v25, s39
+; GCN-HSA-NEXT:    v_mov_b32_e32 v2, s34
+; GCN-HSA-NEXT:    v_mov_b32_e32 v3, s35
+; GCN-HSA-NEXT:    v_mov_b32_e32 v19, s21
+; GCN-HSA-NEXT:    s_add_u32 s20, s0, 0xe0
+; GCN-HSA-NEXT:    v_mov_b32_e32 v26, s48
 ; GCN-HSA-NEXT:    v_mov_b32_e32 v28, s46
-; GCN-HSA-NEXT:    flat_store_dwordx4 v[26:27], v[0:3]
+; GCN-HSA-NEXT:    flat_store_dwordx4 v[24:25], v[20:23]
+; GCN-HSA-NEXT:    s_addc_u32 s21, s1, 0
+; GCN-HSA-NEXT:    v_mov_b32_e32 v20, s20
+; GCN-HSA-NEXT:    flat_store_dwordx4 v[16:17], v[0:3]
 ; GCN-HSA-NEXT:    v_mov_b32_e32 v8, s56
-; GCN-HSA-NEXT:    v_mov_b32_e32 v0, s26
-; GCN-HSA-NEXT:    s_add_u32 s26, s0, 0x90
+; GCN-HSA-NEXT:    v_mov_b32_e32 v0, s0
 ; GCN-HSA-NEXT:    v_mov_b32_e32 v9, s57
 ; GCN-HSA-NEXT:    v_mov_b32_e32 v10, s54
 ; GCN-HSA-NEXT:    v_mov_b32_e32 v11, s55
-; GCN-HSA-NEXT:    v_mov_b32_e32 v29, s47
-; GCN-HSA-NEXT:    v_mov_b32_e32 v1, s27
-; GCN-HSA-NEXT:    s_addc_u32 s27, s1, 0
-; GCN-HSA-NEXT:    flat_store_dwordx4 v[28:29], v[8:11]
-; GCN-HSA-NEXT:    v_mov_b32_e32 v29, s27
-; GCN-HSA-NEXT:    v_mov_b32_e32 v28, s26
-; GCN-HSA-NEXT:    s_add_u32 s26, s0, 0x80
-; GCN-HSA-NEXT:    s_addc_u32 s27, s1, 0
-; GCN-HSA-NEXT:    v_mov_b32_e32 v3, s1
-; GCN-HSA-NEXT:    v_mov_b32_e32 v8, s24
-; GCN-HSA-NEXT:    s_add_u32 s24, s0, 0x70
-; GCN-HSA-NEXT:    v_mov_b32_e32 v12, s44
-; GCN-HSA-NEXT:    v_mov_b32_e32 v13, s45
+; GCN-HSA-NEXT:    v_mov_b32_e32 v27, s49
 ; GCN-HSA-NEXT:    v_mov_b32_e32 v14, s52
 ; GCN-HSA-NEXT:    v_mov_b32_e32 v15, s53
-; GCN-HSA-NEXT:    v_mov_b32_e32 v2, s0
-; GCN-HSA-NEXT:    v_mov_b32_e32 v9, s25
-; GCN-HSA-NEXT:    s_addc_u32 s25, s1, 0
-; GCN-HSA-NEXT:    v_mov_b32_e32 v6, s30
-; GCN-HSA-NEXT:    v_mov_b32_e32 v7, s31
-; GCN-HSA-NEXT:    flat_store_dwordx4 v[2:3], v[12:15]
-; GCN-HSA-NEXT:    v_mov_b32_e32 v31, s27
-; GCN-HSA-NEXT:    v_mov_b32_e32 v14, s20
-; GCN-HSA-NEXT:    s_add_u32 s20, s0, 0x60
-; GCN-HSA-NEXT:    v_mov_b32_e32 v2, s28
-; GCN-HSA-NEXT:    v_mov_b32_e32 v3, s29
-; GCN-HSA-NEXT:    v_mov_b32_e32 v30, s26
-; GCN-HSA-NEXT:    flat_store_dwordx4 v[28:29], v[4:7]
-; GCN-HSA-NEXT:    v_mov_b32_e32 v15, s21
-; GCN-HSA-NEXT:    v_mov_b32_e32 v4, s24
+; GCN-HSA-NEXT:    v_mov_b32_e32 v29, s47
+; GCN-HSA-NEXT:    v_mov_b32_e32 v6, s28
+; GCN-HSA-NEXT:    v_mov_b32_e32 v7, s29
+; GCN-HSA-NEXT:    v_mov_b32_e32 v21, s21
+; GCN-HSA-NEXT:    v_mov_b32_e32 v1, s1
+; GCN-HSA-NEXT:    s_add_u32 s20, s0, 0xd0
+; GCN-HSA-NEXT:    flat_store_dwordx4 v[26:27], v[8:11]
+; GCN-HSA-NEXT:    flat_store_dwordx4 v[28:29], v[12:15]
+; GCN-HSA-NEXT:    v_mov_b32_e32 v8, s30
+; GCN-HSA-NEXT:    v_mov_b32_e32 v9, s31
+; GCN-HSA-NEXT:    v_mov_b32_e32 v10, s66
+; GCN-HSA-NEXT:    v_mov_b32_e32 v11, s65
+; GCN-HSA-NEXT:    v_mov_b32_e32 v12, s36
+; GCN-HSA-NEXT:    v_mov_b32_e32 v13, s37
+; GCN-HSA-NEXT:    v_mov_b32_e32 v14, s26
+; GCN-HSA-NEXT:    v_mov_b32_e32 v15, s27
+; GCN-HSA-NEXT:    flat_store_dwordx4 v[0:1], v[4:7]
+; GCN-HSA-NEXT:    flat_store_dwordx4 v[18:19], v[8:11]
+; GCN-HSA-NEXT:    flat_store_dwordx4 v[20:21], v[12:15]
 ; GCN-HSA-NEXT:    s_addc_u32 s21, s1, 0
-; GCN-HSA-NEXT:    v_mov_b32_e32 v6, s20
-; GCN-HSA-NEXT:    v_mov_b32_e32 v12, s22
-; GCN-HSA-NEXT:    v_mov_b32_e32 v10, s42
-; GCN-HSA-NEXT:    v_mov_b32_e32 v11, s41
-; GCN-HSA-NEXT:    v_mov_b32_e32 v13, s23
-; GCN-HSA-NEXT:    v_mov_b32_e32 v5, s25
-; GCN-HSA-NEXT:    v_mov_b32_e32 v7, s21
-; GCN-HSA-NEXT:    flat_store_dwordx4 v[30:31], v[0:3]
-; GCN-HSA-NEXT:    flat_store_dwordx4 v[4:5], v[8:11]
-; GCN-HSA-NEXT:    flat_store_dwordx4 v[6:7], v[12:15]
+; GCN-HSA-NEXT:    v_mov_b32_e32 v4, s20
+; GCN-HSA-NEXT:    v_mov_b32_e32 v0, s24
+; GCN-HSA-NEXT:    v_mov_b32_e32 v1, s25
+; GCN-HSA-NEXT:    v_mov_b32_e32 v2, s22
+; GCN-HSA-NEXT:    v_mov_b32_e32 v3, s23
+; GCN-HSA-NEXT:    v_mov_b32_e32 v5, s21
+; GCN-HSA-NEXT:    flat_store_dwordx4 v[4:5], v[0:3]
+; GCN-HSA-NEXT:    s_nop 0
 ; GCN-HSA-NEXT:    v_mov_b32_e32 v0, s16
-; GCN-HSA-NEXT:    s_add_u32 s16, s0, 48
+; GCN-HSA-NEXT:    s_add_u32 s16, s0, 0xc0
 ; GCN-HSA-NEXT:    v_mov_b32_e32 v1, s17
 ; GCN-HSA-NEXT:    s_addc_u32 s17, s1, 0
 ; GCN-HSA-NEXT:    v_mov_b32_e32 v4, s16
-; GCN-HSA-NEXT:    v_mov_b32_e32 v2, s38
-; GCN-HSA-NEXT:    v_mov_b32_e32 v3, s37
+; GCN-HSA-NEXT:    v_mov_b32_e32 v2, s18
+; GCN-HSA-NEXT:    v_mov_b32_e32 v3, s19
 ; GCN-HSA-NEXT:    v_mov_b32_e32 v5, s17
 ; GCN-HSA-NEXT:    flat_store_dwordx4 v[4:5], v[0:3]
-; GCN-HSA-NEXT:    v_mov_b32_e32 v16, s50
-; GCN-HSA-NEXT:    v_mov_b32_e32 v2, s8
-; GCN-HSA-NEXT:    s_add_u32 s8, s0, 32
-; GCN-HSA-NEXT:    v_mov_b32_e32 v3, s9
-; GCN-HSA-NEXT:    s_addc_u32 s9, s1, 0
-; GCN-HSA-NEXT:    v_mov_b32_e32 v4, s8
-; GCN-HSA-NEXT:    v_mov_b32_e32 v0, s10
-; GCN-HSA-NEXT:    v_mov_b32_e32 v1, s11
-; GCN-HSA-NEXT:    v_mov_b32_e32 v5, s9
-; GCN-HSA-NEXT:    s_add_u32 s8, s0, 0xf0
-; GCN-HSA-NEXT:    flat_store_dwordx4 v[4:5], v[0:3]
-; GCN-HSA-NEXT:    s_addc_u32 s9, s1, 0
-; GCN-HSA-NEXT:    v_mov_b32_e32 v4, s8
-; GCN-HSA-NEXT:    v_mov_b32_e32 v0, s18
-; GCN-HSA-NEXT:    v_mov_b32_e32 v1, s19
-; GCN-HSA-NEXT:    v_mov_b32_e32 v2, s40
-; GCN-HSA-NEXT:    v_mov_b32_e32 v3, s39
-; GCN-HSA-NEXT:    v_mov_b32_e32 v5, s9
-; GCN-HSA-NEXT:    s_add_u32 s8, s0, 0xe0
-; GCN-HSA-NEXT:    flat_store_dwordx4 v[4:5], v[0:3]
-; GCN-HSA-NEXT:    s_addc_u32 s9, s1, 0
-; GCN-HSA-NEXT:    v_mov_b32_e32 v4, s8
+; GCN-HSA-NEXT:    s_nop 0
 ; GCN-HSA-NEXT:    v_mov_b32_e32 v0, s14
+; GCN-HSA-NEXT:    s_add_u32 s14, s0, 0xb0
 ; GCN-HSA-NEXT:    v_mov_b32_e32 v1, s15
-; GCN-HSA-NEXT:    v_mov_b32_e32 v2, s12
-; GCN-HSA-NEXT:    v_mov_b32_e32 v3, s13
-; GCN-HSA-NEXT:    v_mov_b32_e32 v5, s9
+; GCN-HSA-NEXT:    s_addc_u32 s15, s1, 0
+; GCN-HSA-NEXT:    v_mov_b32_e32 v4, s14
+; GCN-HSA-NEXT:    v_mov_b32_e32 v2, s44
+; GCN-HSA-NEXT:    v_mov_b32_e32 v3, s33
+; GCN-HSA-NEXT:    v_mov_b32_e32 v5, s15
 ; GCN-HSA-NEXT:    flat_store_dwordx4 v[4:5], v[0:3]
-; GCN-HSA-NEXT:    v_mov_b32_e32 v17, s51
-; GCN-HSA-NEXT:    v_mov_b32_e32 v0, s6
-; GCN-HSA-NEXT:    s_add_u32 s6, s0, 0xb0
-; GCN-HSA-NEXT:    v_mov_b32_e32 v1, s7
+; GCN-HSA-NEXT:    s_nop 0
+; GCN-HSA-NEXT:    v_mov_b32_e32 v2, s10
+; GCN-HSA-NEXT:    s_add_u32 s10, s0, 0xa0
+; GCN-HSA-NEXT:    v_mov_b32_e32 v3, s11
+; GCN-HSA-NEXT:    s_addc_u32 s11, s1, 0
+; GCN-HSA-NEXT:    v_mov_b32_e32 v4, s10
+; GCN-HSA-NEXT:    v_mov_b32_e32 v0, s12
+; GCN-HSA-NEXT:    v_mov_b32_e32 v1, s13
+; GCN-HSA-NEXT:    v_mov_b32_e32 v5, s11
+; GCN-HSA-NEXT:    flat_store_dwordx4 v[4:5], v[0:3]
+; GCN-HSA-NEXT:    s_nop 0
+; GCN-HSA-NEXT:    v_mov_b32_e32 v2, s6
+; GCN-HSA-NEXT:    s_add_u32 s6, s0, 0x90
+; GCN-HSA-NEXT:    v_mov_b32_e32 v3, s7
 ; GCN-HSA-NEXT:    s_addc_u32 s7, s1, 0
 ; GCN-HSA-NEXT:    v_mov_b32_e32 v4, s6
-; GCN-HSA-NEXT:    s_add_u32 s0, s0, 0xa0
-; GCN-HSA-NEXT:    v_mov_b32_e32 v2, s36
-; GCN-HSA-NEXT:    v_mov_b32_e32 v3, s33
+; GCN-HSA-NEXT:    s_add_u32 s0, s0, 0x80
+; GCN-HSA-NEXT:    v_mov_b32_e32 v0, s8
+; GCN-HSA-NEXT:    v_mov_b32_e32 v1, s9
 ; GCN-HSA-NEXT:    v_mov_b32_e32 v5, s7
 ; GCN-HSA-NEXT:    s_addc_u32 s1, s1, 0
 ; GCN-HSA-NEXT:    flat_store_dwordx4 v[4:5], v[0:3]
 ; GCN-HSA-NEXT:    v_mov_b32_e32 v5, s1
-; GCN-HSA-NEXT:    v_mov_b32_e32 v18, s62
-; GCN-HSA-NEXT:    v_mov_b32_e32 v19, s63
-; GCN-HSA-NEXT:    v_mov_b32_e32 v26, s34
-; GCN-HSA-NEXT:    v_mov_b32_e32 v0, s4
-; GCN-HSA-NEXT:    v_mov_b32_e32 v1, s5
-; GCN-HSA-NEXT:    v_mov_b32_e32 v2, s2
-; GCN-HSA-NEXT:    v_mov_b32_e32 v3, s3
+; GCN-HSA-NEXT:    v_mov_b32_e32 v0, s2
+; GCN-HSA-NEXT:    v_mov_b32_e32 v1, s3
+; GCN-HSA-NEXT:    v_mov_b32_e32 v2, s4
+; GCN-HSA-NEXT:    v_mov_b32_e32 v3, s5
 ; GCN-HSA-NEXT:    v_mov_b32_e32 v4, s0
-; GCN-HSA-NEXT:    v_mov_b32_e32 v20, s48
-; GCN-HSA-NEXT:    v_mov_b32_e32 v21, s49
-; GCN-HSA-NEXT:    v_mov_b32_e32 v22, s60
-; GCN-HSA-NEXT:    v_mov_b32_e32 v23, s61
-; GCN-HSA-NEXT:    v_mov_b32_e32 v27, s35
-; GCN-HSA-NEXT:    flat_store_dwordx4 v[24:25], v[16:19]
-; GCN-HSA-NEXT:    flat_store_dwordx4 v[26:27], v[20:23]
 ; GCN-HSA-NEXT:    flat_store_dwordx4 v[4:5], v[0:3]
 ; GCN-HSA-NEXT:    s_endpgm
 ;
@@ -8626,168 +8634,168 @@ define amdgpu_kernel void @global_sextload_v32i8_to_v32i64(ptr addrspace(1) %out
 ; GCN-NOHSA-VI-NEXT:    s_mov_b32 s9, s7
 ; GCN-NOHSA-VI-NEXT:    buffer_load_dwordx4 v[0:3], off, s[8:11], 0 offset:16
 ; GCN-NOHSA-VI-NEXT:    buffer_load_dwordx4 v[4:7], off, s[8:11], 0
+; GCN-NOHSA-VI-NEXT:    s_mov_b32 s17, 0
+; GCN-NOHSA-VI-NEXT:    s_mov_b32 s23, s17
+; GCN-NOHSA-VI-NEXT:    s_mov_b32 s27, s17
+; GCN-NOHSA-VI-NEXT:    s_mov_b32 s31, s17
+; GCN-NOHSA-VI-NEXT:    s_mov_b32 s37, s17
+; GCN-NOHSA-VI-NEXT:    s_mov_b32 s41, s17
+; GCN-NOHSA-VI-NEXT:    s_mov_b32 s25, s17
+; GCN-NOHSA-VI-NEXT:    s_mov_b32 s15, s17
 ; GCN-NOHSA-VI-NEXT:    s_mov_b32 s0, s4
 ; GCN-NOHSA-VI-NEXT:    s_mov_b32 s1, s5
-; GCN-NOHSA-VI-NEXT:    s_mov_b32 s31, 0
-; GCN-NOHSA-VI-NEXT:    s_mov_b32 s25, s31
-; GCN-NOHSA-VI-NEXT:    s_mov_b32 s23, s31
-; GCN-NOHSA-VI-NEXT:    s_mov_b32 s17, s31
-; GCN-NOHSA-VI-NEXT:    s_mov_b32 s15, s31
-; GCN-NOHSA-VI-NEXT:    s_mov_b32 s11, s31
-; GCN-NOHSA-VI-NEXT:    s_mov_b32 s9, s31
-; GCN-NOHSA-VI-NEXT:    s_mov_b32 s7, s31
 ; GCN-NOHSA-VI-NEXT:    s_waitcnt vmcnt(1)
-; GCN-NOHSA-VI-NEXT:    v_readfirstlane_b32 s4, v2
-; GCN-NOHSA-VI-NEXT:    s_lshr_b32 s40, s4, 16
-; GCN-NOHSA-VI-NEXT:    s_lshr_b32 s42, s4, 24
-; GCN-NOHSA-VI-NEXT:    v_readfirstlane_b32 s5, v3
-; GCN-NOHSA-VI-NEXT:    v_readfirstlane_b32 s12, v0
-; GCN-NOHSA-VI-NEXT:    s_lshr_b32 s44, s4, 8
-; GCN-NOHSA-VI-NEXT:    s_bfe_i64 s[42:43], s[42:43], 0x80000
-; GCN-NOHSA-VI-NEXT:    s_bfe_i64 s[40:41], s[40:41], 0x80000
-; GCN-NOHSA-VI-NEXT:    v_readfirstlane_b32 s13, v1
-; GCN-NOHSA-VI-NEXT:    s_lshr_b32 s46, s12, 16
-; GCN-NOHSA-VI-NEXT:    s_lshr_b32 s48, s12, 24
-; GCN-NOHSA-VI-NEXT:    s_bfe_i64 s[70:71], s[4:5], 0x80000
-; GCN-NOHSA-VI-NEXT:    s_bfe_i64 s[44:45], s[44:45], 0x80000
-; GCN-NOHSA-VI-NEXT:    v_mov_b32_e32 v0, s40
-; GCN-NOHSA-VI-NEXT:    v_mov_b32_e32 v1, s41
-; GCN-NOHSA-VI-NEXT:    v_mov_b32_e32 v2, s42
-; GCN-NOHSA-VI-NEXT:    v_mov_b32_e32 v3, s43
-; GCN-NOHSA-VI-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NOHSA-VI-NEXT:    v_readfirstlane_b32 s18, v6
-; GCN-NOHSA-VI-NEXT:    s_lshr_b32 s50, s12, 8
-; GCN-NOHSA-VI-NEXT:    s_bfe_i64 s[48:49], s[48:49], 0x80000
+; GCN-NOHSA-VI-NEXT:    v_readfirstlane_b32 s11, v3
+; GCN-NOHSA-VI-NEXT:    s_lshr_b32 s16, s11, 16
+; GCN-NOHSA-VI-NEXT:    v_readfirstlane_b32 s10, v2
+; GCN-NOHSA-VI-NEXT:    s_lshr_b32 s46, s11, 8
+; GCN-NOHSA-VI-NEXT:    s_mov_b32 s22, s11
+; GCN-NOHSA-VI-NEXT:    s_ashr_i32 s71, s11, 31
+; GCN-NOHSA-VI-NEXT:    s_ashr_i32 s72, s11, 24
+; GCN-NOHSA-VI-NEXT:    s_bfe_i64 s[16:17], s[16:17], 0x80000
+; GCN-NOHSA-VI-NEXT:    v_readfirstlane_b32 s8, v0
+; GCN-NOHSA-VI-NEXT:    v_readfirstlane_b32 s9, v1
+; GCN-NOHSA-VI-NEXT:    s_lshr_b32 s48, s10, 16
+; GCN-NOHSA-VI-NEXT:    s_lshr_b32 s50, s10, 24
+; GCN-NOHSA-VI-NEXT:    s_bfe_i64 s[22:23], s[22:23], 0x80000
 ; GCN-NOHSA-VI-NEXT:    s_bfe_i64 s[46:47], s[46:47], 0x80000
-; GCN-NOHSA-VI-NEXT:    buffer_store_dwordx4 v[0:3], off, s[0:3], 0 offset:208
-; GCN-NOHSA-VI-NEXT:    s_lshr_b32 s52, s18, 16
-; GCN-NOHSA-VI-NEXT:    v_mov_b32_e32 v0, s70
-; GCN-NOHSA-VI-NEXT:    v_mov_b32_e32 v1, s71
-; GCN-NOHSA-VI-NEXT:    v_mov_b32_e32 v2, s44
-; GCN-NOHSA-VI-NEXT:    v_mov_b32_e32 v3, s45
-; GCN-NOHSA-VI-NEXT:    s_lshr_b32 s54, s18, 24
-; GCN-NOHSA-VI-NEXT:    s_bfe_i64 s[34:35], s[12:13], 0x80000
+; GCN-NOHSA-VI-NEXT:    v_mov_b32_e32 v2, s72
+; GCN-NOHSA-VI-NEXT:    v_mov_b32_e32 v0, s16
+; GCN-NOHSA-VI-NEXT:    v_mov_b32_e32 v1, s17
+; GCN-NOHSA-VI-NEXT:    v_mov_b32_e32 v3, s71
+; GCN-NOHSA-VI-NEXT:    s_lshr_b32 s52, s10, 8
 ; GCN-NOHSA-VI-NEXT:    s_bfe_i64 s[50:51], s[50:51], 0x80000
-; GCN-NOHSA-VI-NEXT:    buffer_store_dwordx4 v[0:3], off, s[0:3], 0 offset:192
-; GCN-NOHSA-VI-NEXT:    v_readfirstlane_b32 s19, v7
-; GCN-NOHSA-VI-NEXT:    v_mov_b32_e32 v0, s46
-; GCN-NOHSA-VI-NEXT:    v_mov_b32_e32 v1, s47
-; GCN-NOHSA-VI-NEXT:    v_mov_b32_e32 v2, s48
-; GCN-NOHSA-VI-NEXT:    v_mov_b32_e32 v3, s49
-; GCN-NOHSA-VI-NEXT:    v_readfirstlane_b32 s26, v4
-; GCN-NOHSA-VI-NEXT:    s_lshr_b32 s56, s18, 8
-; GCN-NOHSA-VI-NEXT:    s_bfe_i64 s[54:55], s[54:55], 0x80000
+; GCN-NOHSA-VI-NEXT:    s_bfe_i64 s[48:49], s[48:49], 0x80000
+; GCN-NOHSA-VI-NEXT:    buffer_store_dwordx4 v[0:3], off, s[0:3], 0 offset:240
+; GCN-NOHSA-VI-NEXT:    s_lshr_b32 s26, s9, 16
+; GCN-NOHSA-VI-NEXT:    v_mov_b32_e32 v0, s22
+; GCN-NOHSA-VI-NEXT:    v_mov_b32_e32 v1, s23
+; GCN-NOHSA-VI-NEXT:    v_mov_b32_e32 v2, s46
+; GCN-NOHSA-VI-NEXT:    v_mov_b32_e32 v3, s47
+; GCN-NOHSA-VI-NEXT:    s_bfe_i64 s[42:43], s[10:11], 0x80000
 ; GCN-NOHSA-VI-NEXT:    s_bfe_i64 s[52:53], s[52:53], 0x80000
-; GCN-NOHSA-VI-NEXT:    buffer_store_dwordx4 v[0:3], off, s[0:3], 0 offset:144
-; GCN-NOHSA-VI-NEXT:    s_lshr_b32 s58, s26, 16
-; GCN-NOHSA-VI-NEXT:    v_mov_b32_e32 v0, s34
-; GCN-NOHSA-VI-NEXT:    v_mov_b32_e32 v1, s35
+; GCN-NOHSA-VI-NEXT:    buffer_store_dwordx4 v[0:3], off, s[0:3], 0 offset:224
+; GCN-NOHSA-VI-NEXT:    s_lshr_b32 s54, s9, 8
+; GCN-NOHSA-VI-NEXT:    v_mov_b32_e32 v0, s48
+; GCN-NOHSA-VI-NEXT:    v_mov_b32_e32 v1, s49
 ; GCN-NOHSA-VI-NEXT:    v_mov_b32_e32 v2, s50
 ; GCN-NOHSA-VI-NEXT:    v_mov_b32_e32 v3, s51
-; GCN-NOHSA-VI-NEXT:    s_lshr_b32 s60, s26, 24
-; GCN-NOHSA-VI-NEXT:    s_bfe_i64 s[28:29], s[18:19], 0x80000
+; GCN-NOHSA-VI-NEXT:    s_mov_b32 s30, s9
+; GCN-NOHSA-VI-NEXT:    s_ashr_i32 s69, s9, 31
+; GCN-NOHSA-VI-NEXT:    s_ashr_i32 s70, s9, 24
+; GCN-NOHSA-VI-NEXT:    s_bfe_i64 s[26:27], s[26:27], 0x80000
+; GCN-NOHSA-VI-NEXT:    buffer_store_dwordx4 v[0:3], off, s[0:3], 0 offset:208
+; GCN-NOHSA-VI-NEXT:    s_lshr_b32 s56, s8, 16
+; GCN-NOHSA-VI-NEXT:    v_mov_b32_e32 v0, s42
+; GCN-NOHSA-VI-NEXT:    v_mov_b32_e32 v1, s43
+; GCN-NOHSA-VI-NEXT:    v_mov_b32_e32 v2, s52
+; GCN-NOHSA-VI-NEXT:    v_mov_b32_e32 v3, s53
+; GCN-NOHSA-VI-NEXT:    s_lshr_b32 s58, s8, 24
+; GCN-NOHSA-VI-NEXT:    s_bfe_i64 s[30:31], s[30:31], 0x80000
+; GCN-NOHSA-VI-NEXT:    s_bfe_i64 s[54:55], s[54:55], 0x80000
+; GCN-NOHSA-VI-NEXT:    buffer_store_dwordx4 v[0:3], off, s[0:3], 0 offset:192
+; GCN-NOHSA-VI-NEXT:    s_waitcnt vmcnt(4)
+; GCN-NOHSA-VI-NEXT:    v_readfirstlane_b32 s19, v7
+; GCN-NOHSA-VI-NEXT:    v_mov_b32_e32 v0, s26
+; GCN-NOHSA-VI-NEXT:    v_mov_b32_e32 v1, s27
+; GCN-NOHSA-VI-NEXT:    v_mov_b32_e32 v2, s70
+; GCN-NOHSA-VI-NEXT:    v_mov_b32_e32 v3, s69
+; GCN-NOHSA-VI-NEXT:    s_lshr_b32 s60, s8, 8
+; GCN-NOHSA-VI-NEXT:    s_bfe_i64 s[58:59], s[58:59], 0x80000
 ; GCN-NOHSA-VI-NEXT:    s_bfe_i64 s[56:57], s[56:57], 0x80000
-; GCN-NOHSA-VI-NEXT:    buffer_store_dwordx4 v[0:3], off, s[0:3], 0 offset:128
-; GCN-NOHSA-VI-NEXT:    v_readfirstlane_b32 s27, v5
-; GCN-NOHSA-VI-NEXT:    v_mov_b32_e32 v0, s52
-; GCN-NOHSA-VI-NEXT:    v_mov_b32_e32 v1, s53
+; GCN-NOHSA-VI-NEXT:    buffer_store_dwordx4 v[0:3], off, s[0:3], 0 offset:176
+; GCN-NOHSA-VI-NEXT:    v_readfirstlane_b32 s13, v5
+; GCN-NOHSA-VI-NEXT:    v_mov_b32_e32 v0, s30
+; GCN-NOHSA-VI-NEXT:    v_mov_b32_e32 v1, s31
 ; GCN-NOHSA-VI-NEXT:    v_mov_b32_e32 v2, s54
 ; GCN-NOHSA-VI-NEXT:    v_mov_b32_e32 v3, s55
-; GCN-NOHSA-VI-NEXT:    s_lshr_b32 s36, s26, 8
+; GCN-NOHSA-VI-NEXT:    s_lshr_b32 s36, s19, 16
+; GCN-NOHSA-VI-NEXT:    s_bfe_i64 s[20:21], s[8:9], 0x80000
 ; GCN-NOHSA-VI-NEXT:    s_bfe_i64 s[60:61], s[60:61], 0x80000
-; GCN-NOHSA-VI-NEXT:    s_bfe_i64 s[58:59], s[58:59], 0x80000
-; GCN-NOHSA-VI-NEXT:    buffer_store_dwordx4 v[0:3], off, s[0:3], 0 offset:80
-; GCN-NOHSA-VI-NEXT:    s_lshr_b32 s30, s5, 16
-; GCN-NOHSA-VI-NEXT:    v_mov_b32_e32 v0, s28
-; GCN-NOHSA-VI-NEXT:    v_mov_b32_e32 v1, s29
-; GCN-NOHSA-VI-NEXT:    v_mov_b32_e32 v2, s56
-; GCN-NOHSA-VI-NEXT:    v_mov_b32_e32 v3, s57
-; GCN-NOHSA-VI-NEXT:    s_bfe_i64 s[20:21], s[26:27], 0x80000
+; GCN-NOHSA-VI-NEXT:    buffer_store_dwordx4 v[0:3], off, s[0:3], 0 offset:160
+; GCN-NOHSA-VI-NEXT:    v_readfirstlane_b32 s18, v6
+; GCN-NOHSA-VI-NEXT:    v_mov_b32_e32 v0, s56
+; GCN-NOHSA-VI-NEXT:    v_mov_b32_e32 v1, s57
+; GCN-NOHSA-VI-NEXT:    v_mov_b32_e32 v2, s58
+; GCN-NOHSA-VI-NEXT:    v_mov_b32_e32 v3, s59
+; GCN-NOHSA-VI-NEXT:    s_lshr_b32 s62, s19, 8
+; GCN-NOHSA-VI-NEXT:    s_mov_b32 s40, s19
+; GCN-NOHSA-VI-NEXT:    s_lshr_b32 s44, s13, 8
+; GCN-NOHSA-VI-NEXT:    s_mov_b32 s14, s13
+; GCN-NOHSA-VI-NEXT:    s_ashr_i32 s63, s13, 24
+; GCN-NOHSA-VI-NEXT:    s_ashr_i32 s65, s19, 31
+; GCN-NOHSA-VI-NEXT:    s_ashr_i32 s67, s19, 24
 ; GCN-NOHSA-VI-NEXT:    s_bfe_i64 s[36:37], s[36:37], 0x80000
-; GCN-NOHSA-VI-NEXT:    buffer_store_dwordx4 v[0:3], off, s[0:3], 0 offset:64
-; GCN-NOHSA-VI-NEXT:    s_lshr_b32 s62, s5, 8
-; GCN-NOHSA-VI-NEXT:    v_mov_b32_e32 v0, s58
-; GCN-NOHSA-VI-NEXT:    v_mov_b32_e32 v1, s59
-; GCN-NOHSA-VI-NEXT:    v_mov_b32_e32 v2, s60
-; GCN-NOHSA-VI-NEXT:    v_mov_b32_e32 v3, s61
-; GCN-NOHSA-VI-NEXT:    s_mov_b32 s24, s5
-; GCN-NOHSA-VI-NEXT:    s_ashr_i32 s63, s19, 24
-; GCN-NOHSA-VI-NEXT:    s_ashr_i32 s69, s5, 31
-; GCN-NOHSA-VI-NEXT:    s_ashr_i32 s72, s5, 24
-; GCN-NOHSA-VI-NEXT:    s_bfe_i64 s[30:31], s[30:31], 0x80000
-; GCN-NOHSA-VI-NEXT:    buffer_store_dwordx4 v[0:3], off, s[0:3], 0 offset:16
-; GCN-NOHSA-VI-NEXT:    s_lshr_b32 s22, s13, 16
+; GCN-NOHSA-VI-NEXT:    buffer_store_dwordx4 v[0:3], off, s[0:3], 0 offset:144
+; GCN-NOHSA-VI-NEXT:    s_lshr_b32 s38, s18, 16
 ; GCN-NOHSA-VI-NEXT:    v_mov_b32_e32 v0, s20
 ; GCN-NOHSA-VI-NEXT:    v_mov_b32_e32 v1, s21
-; GCN-NOHSA-VI-NEXT:    v_mov_b32_e32 v2, s36
-; GCN-NOHSA-VI-NEXT:    v_mov_b32_e32 v3, s37
-; GCN-NOHSA-VI-NEXT:    s_lshr_b32 s8, s27, 16
-; GCN-NOHSA-VI-NEXT:    s_lshr_b32 s68, s27, 8
-; GCN-NOHSA-VI-NEXT:    s_mov_b32 s6, s27
-; GCN-NOHSA-VI-NEXT:    s_ashr_i32 s33, s27, 31
-; GCN-NOHSA-VI-NEXT:    s_ashr_i32 s38, s27, 24
-; GCN-NOHSA-VI-NEXT:    s_bfe_i64 s[26:27], s[24:25], 0x80000
-; GCN-NOHSA-VI-NEXT:    s_bfe_i64 s[24:25], s[62:63], 0x80000
-; GCN-NOHSA-VI-NEXT:    buffer_store_dwordx4 v[0:3], off, s[0:3], 0
-; GCN-NOHSA-VI-NEXT:    s_lshr_b32 s64, s13, 8
-; GCN-NOHSA-VI-NEXT:    v_mov_b32_e32 v0, s30
-; GCN-NOHSA-VI-NEXT:    v_mov_b32_e32 v1, s31
-; GCN-NOHSA-VI-NEXT:    v_mov_b32_e32 v2, s72
-; GCN-NOHSA-VI-NEXT:    v_mov_b32_e32 v3, s69
-; GCN-NOHSA-VI-NEXT:    s_mov_b32 s16, s13
-; GCN-NOHSA-VI-NEXT:    s_ashr_i32 s65, s13, 31
-; GCN-NOHSA-VI-NEXT:    s_ashr_i32 s67, s13, 24
-; GCN-NOHSA-VI-NEXT:    s_bfe_i64 s[22:23], s[22:23], 0x80000
-; GCN-NOHSA-VI-NEXT:    buffer_store_dwordx4 v[0:3], off, s[0:3], 0 offset:240
-; GCN-NOHSA-VI-NEXT:    s_lshr_b32 s14, s19, 16
-; GCN-NOHSA-VI-NEXT:    v_mov_b32_e32 v0, s26
-; GCN-NOHSA-VI-NEXT:    v_mov_b32_e32 v1, s27
-; GCN-NOHSA-VI-NEXT:    v_mov_b32_e32 v2, s24
-; GCN-NOHSA-VI-NEXT:    v_mov_b32_e32 v3, s25
-; GCN-NOHSA-VI-NEXT:    s_lshr_b32 s66, s19, 8
-; GCN-NOHSA-VI-NEXT:    s_mov_b32 s10, s19
-; GCN-NOHSA-VI-NEXT:    s_ashr_i32 s39, s19, 31
-; GCN-NOHSA-VI-NEXT:    s_bfe_i64 s[18:19], s[16:17], 0x80000
-; GCN-NOHSA-VI-NEXT:    s_bfe_i64 s[16:17], s[64:65], 0x80000
-; GCN-NOHSA-VI-NEXT:    buffer_store_dwordx4 v[0:3], off, s[0:3], 0 offset:224
-; GCN-NOHSA-VI-NEXT:    s_bfe_i64 s[14:15], s[14:15], 0x80000
-; GCN-NOHSA-VI-NEXT:    v_mov_b32_e32 v0, s22
-; GCN-NOHSA-VI-NEXT:    v_mov_b32_e32 v1, s23
+; GCN-NOHSA-VI-NEXT:    v_mov_b32_e32 v2, s60
+; GCN-NOHSA-VI-NEXT:    v_mov_b32_e32 v3, s61
+; GCN-NOHSA-VI-NEXT:    s_lshr_b32 s34, s18, 24
+; GCN-NOHSA-VI-NEXT:    s_lshr_b32 s28, s18, 8
+; GCN-NOHSA-VI-NEXT:    s_bfe_i64 s[6:7], s[18:19], 0x80000
+; GCN-NOHSA-VI-NEXT:    s_bfe_i64 s[18:19], s[14:15], 0x80000
+; GCN-NOHSA-VI-NEXT:    s_bfe_i64 s[14:15], s[44:45], 0x80000
+; GCN-NOHSA-VI-NEXT:    s_bfe_i64 s[44:45], s[40:41], 0x80000
+; GCN-NOHSA-VI-NEXT:    s_bfe_i64 s[40:41], s[62:63], 0x80000
+; GCN-NOHSA-VI-NEXT:    buffer_store_dwordx4 v[0:3], off, s[0:3], 0 offset:128
+; GCN-NOHSA-VI-NEXT:    s_bfe_i64 s[34:35], s[34:35], 0x80000
+; GCN-NOHSA-VI-NEXT:    v_mov_b32_e32 v0, s36
+; GCN-NOHSA-VI-NEXT:    v_mov_b32_e32 v1, s37
 ; GCN-NOHSA-VI-NEXT:    v_mov_b32_e32 v2, s67
 ; GCN-NOHSA-VI-NEXT:    v_mov_b32_e32 v3, s65
-; GCN-NOHSA-VI-NEXT:    buffer_store_dwordx4 v[0:3], off, s[0:3], 0 offset:176
-; GCN-NOHSA-VI-NEXT:    s_bfe_i64 s[12:13], s[10:11], 0x80000
+; GCN-NOHSA-VI-NEXT:    s_bfe_i64 s[38:39], s[38:39], 0x80000
+; GCN-NOHSA-VI-NEXT:    buffer_store_dwordx4 v[0:3], off, s[0:3], 0 offset:112
+; GCN-NOHSA-VI-NEXT:    s_lshr_b32 s24, s13, 16
+; GCN-NOHSA-VI-NEXT:    v_mov_b32_e32 v0, s44
+; GCN-NOHSA-VI-NEXT:    v_mov_b32_e32 v1, s45
+; GCN-NOHSA-VI-NEXT:    v_mov_b32_e32 v2, s40
+; GCN-NOHSA-VI-NEXT:    v_mov_b32_e32 v3, s41
+; GCN-NOHSA-VI-NEXT:    s_bfe_i64 s[28:29], s[28:29], 0x80000
+; GCN-NOHSA-VI-NEXT:    buffer_store_dwordx4 v[0:3], off, s[0:3], 0 offset:96
+; GCN-NOHSA-VI-NEXT:    v_readfirstlane_b32 s12, v4
+; GCN-NOHSA-VI-NEXT:    v_mov_b32_e32 v0, s38
+; GCN-NOHSA-VI-NEXT:    v_mov_b32_e32 v1, s39
+; GCN-NOHSA-VI-NEXT:    v_mov_b32_e32 v2, s34
+; GCN-NOHSA-VI-NEXT:    v_mov_b32_e32 v3, s35
+; GCN-NOHSA-VI-NEXT:    s_ashr_i32 s33, s13, 31
+; GCN-NOHSA-VI-NEXT:    s_bfe_i64 s[24:25], s[24:25], 0x80000
+; GCN-NOHSA-VI-NEXT:    buffer_store_dwordx4 v[0:3], off, s[0:3], 0 offset:80
+; GCN-NOHSA-VI-NEXT:    s_lshr_b32 s64, s12, 16
+; GCN-NOHSA-VI-NEXT:    v_mov_b32_e32 v0, s6
+; GCN-NOHSA-VI-NEXT:    v_mov_b32_e32 v1, s7
+; GCN-NOHSA-VI-NEXT:    v_mov_b32_e32 v2, s28
+; GCN-NOHSA-VI-NEXT:    v_mov_b32_e32 v3, s29
+; GCN-NOHSA-VI-NEXT:    s_lshr_b32 s66, s12, 24
+; GCN-NOHSA-VI-NEXT:    buffer_store_dwordx4 v[0:3], off, s[0:3], 0 offset:64
+; GCN-NOHSA-VI-NEXT:    s_lshr_b32 s68, s12, 8
+; GCN-NOHSA-VI-NEXT:    v_mov_b32_e32 v0, s24
+; GCN-NOHSA-VI-NEXT:    v_mov_b32_e32 v1, s25
+; GCN-NOHSA-VI-NEXT:    v_mov_b32_e32 v2, s63
+; GCN-NOHSA-VI-NEXT:    v_mov_b32_e32 v3, s33
+; GCN-NOHSA-VI-NEXT:    s_bfe_i64 s[4:5], s[12:13], 0x80000
+; GCN-NOHSA-VI-NEXT:    s_bfe_i64 s[10:11], s[66:67], 0x80000
+; GCN-NOHSA-VI-NEXT:    s_bfe_i64 s[12:13], s[64:65], 0x80000
+; GCN-NOHSA-VI-NEXT:    buffer_store_dwordx4 v[0:3], off, s[0:3], 0 offset:48
+; GCN-NOHSA-VI-NEXT:    s_bfe_i64 s[8:9], s[68:69], 0x80000
 ; GCN-NOHSA-VI-NEXT:    v_mov_b32_e32 v0, s18
 ; GCN-NOHSA-VI-NEXT:    v_mov_b32_e32 v1, s19
-; GCN-NOHSA-VI-NEXT:    v_mov_b32_e32 v2, s16
-; GCN-NOHSA-VI-NEXT:    v_mov_b32_e32 v3, s17
-; GCN-NOHSA-VI-NEXT:    s_bfe_i64 s[10:11], s[66:67], 0x80000
-; GCN-NOHSA-VI-NEXT:    buffer_store_dwordx4 v[0:3], off, s[0:3], 0 offset:160
-; GCN-NOHSA-VI-NEXT:    s_bfe_i64 s[8:9], s[8:9], 0x80000
-; GCN-NOHSA-VI-NEXT:    v_mov_b32_e32 v0, s14
-; GCN-NOHSA-VI-NEXT:    v_mov_b32_e32 v1, s15
-; GCN-NOHSA-VI-NEXT:    v_mov_b32_e32 v2, s63
-; GCN-NOHSA-VI-NEXT:    v_mov_b32_e32 v3, s39
-; GCN-NOHSA-VI-NEXT:    buffer_store_dwordx4 v[0:3], off, s[0:3], 0 offset:112
-; GCN-NOHSA-VI-NEXT:    s_bfe_i64 s[6:7], s[6:7], 0x80000
+; GCN-NOHSA-VI-NEXT:    v_mov_b32_e32 v2, s14
+; GCN-NOHSA-VI-NEXT:    v_mov_b32_e32 v3, s15
+; GCN-NOHSA-VI-NEXT:    buffer_store_dwordx4 v[0:3], off, s[0:3], 0 offset:32
+; GCN-NOHSA-VI-NEXT:    s_nop 0
 ; GCN-NOHSA-VI-NEXT:    v_mov_b32_e32 v0, s12
 ; GCN-NOHSA-VI-NEXT:    v_mov_b32_e32 v1, s13
 ; GCN-NOHSA-VI-NEXT:    v_mov_b32_e32 v2, s10
 ; GCN-NOHSA-VI-NEXT:    v_mov_b32_e32 v3, s11
-; GCN-NOHSA-VI-NEXT:    s_bfe_i64 s[4:5], s[68:69], 0x80000
-; GCN-NOHSA-VI-NEXT:    buffer_store_dwordx4 v[0:3], off, s[0:3], 0 offset:96
-; GCN-NOHSA-VI-NEXT:    s_nop 0
-; GCN-NOHSA-VI-NEXT:    v_mov_b32_e32 v0, s8
-; GCN-NOHSA-VI-NEXT:    v_mov_b32_e32 v1, s9
-; GCN-NOHSA-VI-NEXT:    v_mov_b32_e32 v2, s38
-; GCN-NOHSA-VI-NEXT:    v_mov_b32_e32 v3, s33
-; GCN-NOHSA-VI-NEXT:    buffer_store_dwordx4 v[0:3], off, s[0:3], 0 offset:48
+; GCN-NOHSA-VI-NEXT:    buffer_store_dwordx4 v[0:3], off, s[0:3], 0 offset:16
 ; GCN-NOHSA-VI-NEXT:    s_nop 0
-; GCN-NOHSA-VI-NEXT:    v_mov_b32_e32 v0, s6
-; GCN-NOHSA-VI-NEXT:    v_mov_b32_e32 v1, s7
-; GCN-NOHSA-VI-NEXT:    v_mov_b32_e32 v2, s4
-; GCN-NOHSA-VI-NEXT:    v_mov_b32_e32 v3, s5
-; GCN-NOHSA-VI-NEXT:    buffer_store_dwordx4 v[0:3], off, s[0:3], 0 offset:32
+; GCN-NOHSA-VI-NEXT:    v_mov_b32_e32 v0, s4
+; GCN-NOHSA-VI-NEXT:    v_mov_b32_e32 v1, s5
+; GCN-NOHSA-VI-NEXT:    v_mov_b32_e32 v2, s8
+; GCN-NOHSA-VI-NEXT:    v_mov_b32_e32 v3, s9
+; GCN-NOHSA-VI-NEXT:    buffer_store_dwordx4 v[0:3], off, s[0:3], 0
 ; GCN-NOHSA-VI-NEXT:    s_endpgm
 ;
 ; EG-LABEL: global_sextload_v32i8_to_v32i64:
diff --git a/llvm/test/CodeGen/AMDGPU/load-local-i16.ll b/llvm/test/CodeGen/AMDGPU/load-local-i16.ll
index 04d906ca6ad9c..1572ceae9e40a 100644
--- a/llvm/test/CodeGen/AMDGPU/load-local-i16.ll
+++ b/llvm/test/CodeGen/AMDGPU/load-local-i16.ll
@@ -6137,8 +6137,8 @@ define amdgpu_kernel void @local_sextload_v8i16_to_v8i64(ptr addrspace(3) %out,
 ; SI-NEXT:    ds_read2_b64 v[0:3], v0 offset1:1
 ; SI-NEXT:    v_mov_b32_e32 v16, s0
 ; SI-NEXT:    s_waitcnt lgkmcnt(0)
-; SI-NEXT:    v_lshrrev_b32_e32 v11, 16, v2
-; SI-NEXT:    v_lshrrev_b32_e32 v9, 16, v0
+; SI-NEXT:    v_lshrrev_b32_e32 v9, 16, v2
+; SI-NEXT:    v_lshrrev_b32_e32 v11, 16, v0
 ; SI-NEXT:    v_ashrrev_i32_e32 v5, 31, v1
 ; SI-NEXT:    v_ashrrev_i32_e32 v4, 16, v1
 ; SI-NEXT:    v_ashrrev_i32_e32 v7, 31, v3
@@ -6147,11 +6147,11 @@ define amdgpu_kernel void @local_sextload_v8i16_to_v8i64(ptr addrspace(3) %out,
 ; SI-NEXT:    v_bfe_i32 v8, v1, 0, 16
 ; SI-NEXT:    v_bfe_i32 v2, v2, 0, 16
 ; SI-NEXT:    v_bfe_i32 v10, v3, 0, 16
+; SI-NEXT:    v_bfe_i32 v12, v11, 0, 16
+; SI-NEXT:    v_bfe_i32 v14, v9, 0, 16
 ; SI-NEXT:    v_ashrrev_i32_e32 v1, 31, v0
-; SI-NEXT:    v_bfe_i32 v12, v9, 0, 16
 ; SI-NEXT:    v_ashrrev_i32_e32 v9, 31, v8
 ; SI-NEXT:    v_ashrrev_i32_e32 v3, 31, v2
-; SI-NEXT:    v_bfe_i32 v14, v11, 0, 16
 ; SI-NEXT:    v_ashrrev_i32_e32 v11, 31, v10
 ; SI-NEXT:    v_ashrrev_i32_e32 v13, 31, v12
 ; SI-NEXT:    v_ashrrev_i32_e32 v15, 31, v14
@@ -6833,24 +6833,24 @@ define amdgpu_kernel void @local_sextload_v16i16_to_v16i64(ptr addrspace(3) %out
 ; SI-NEXT:    v_bfe_i32 v1, v4, 0, 16
 ; SI-NEXT:    v_bfe_i32 v3, v5, 0, 16
 ; SI-NEXT:    v_bfe_i32 v5, v6, 0, 16
-; SI-NEXT:    v_bfe_i32 v10, v0, 0, 16
-; SI-NEXT:    v_bfe_i32 v7, v2, 0, 16
+; SI-NEXT:    v_bfe_i32 v7, v0, 0, 16
+; SI-NEXT:    v_bfe_i32 v10, v2, 0, 16
 ; SI-NEXT:    v_bfe_i32 v12, v17, 0, 16
-; SI-NEXT:    v_ashrrev_i32_e32 v2, 31, v1
-; SI-NEXT:    v_ashrrev_i32_e32 v4, 31, v3
 ; SI-NEXT:    v_bfe_i32 v14, v14, 0, 16
-; SI-NEXT:    v_ashrrev_i32_e32 v6, 31, v5
 ; SI-NEXT:    v_bfe_i32 v16, v16, 0, 16
-; SI-NEXT:    v_ashrrev_i32_e32 v11, 31, v10
+; SI-NEXT:    v_ashrrev_i32_e32 v4, 31, v3
 ; SI-NEXT:    ds_write2_b64 v18, v[3:4], v[8:9] offset0:2 offset1:3
 ; SI-NEXT:    v_bfe_i32 v3, v15, 0, 16
+; SI-NEXT:    v_ashrrev_i32_e32 v2, 31, v1
+; SI-NEXT:    v_ashrrev_i32_e32 v6, 31, v5
 ; SI-NEXT:    v_ashrrev_i32_e32 v8, 31, v7
+; SI-NEXT:    v_ashrrev_i32_e32 v11, 31, v10
 ; SI-NEXT:    v_ashrrev_i32_e32 v13, 31, v12
 ; SI-NEXT:    v_ashrrev_i32_e32 v15, 31, v14
 ; SI-NEXT:    v_ashrrev_i32_e32 v17, 31, v16
 ; SI-NEXT:    v_ashrrev_i32_e32 v4, 31, v3
-; SI-NEXT:    ds_write2_b64 v18, v[7:8], v[3:4] offset0:12 offset1:13
-; SI-NEXT:    ds_write2_b64 v18, v[10:11], v[16:17] offset0:8 offset1:9
+; SI-NEXT:    ds_write2_b64 v18, v[10:11], v[3:4] offset0:12 offset1:13
+; SI-NEXT:    ds_write2_b64 v18, v[7:8], v[16:17] offset0:8 offset1:9
 ; SI-NEXT:    ds_write2_b64 v18, v[5:6], v[14:15] offset0:4 offset1:5
 ; SI-NEXT:    ds_write2_b64 v18, v[1:2], v[12:13] offset1:1
 ; SI-NEXT:    s_endpgm
@@ -8090,16 +8090,16 @@ define amdgpu_kernel void @local_sextload_v32i16_to_v32i64(ptr addrspace(3) %out
 ; SI-NEXT:    v_ashrrev_i32_e32 v13, 31, v12
 ; SI-NEXT:    ds_write2_b64 v7, v[10:11], v[12:13] offset0:4 offset1:5
 ; SI-NEXT:    v_bfe_i32 v11, v6, 0, 16
-; SI-NEXT:    v_ashrrev_i32_e32 v2, 31, v1
 ; SI-NEXT:    v_bfe_i32 v13, v4, 0, 16
-; SI-NEXT:    v_ashrrev_i32_e32 v4, 31, v3
 ; SI-NEXT:    v_bfe_i32 v15, v15, 0, 16
-; SI-NEXT:    v_ashrrev_i32_e32 v6, 31, v5
 ; SI-NEXT:    v_bfe_i32 v16, v14, 0, 16
 ; SI-NEXT:    v_ashrrev_i32_e32 v10, 31, v9
 ; SI-NEXT:    v_ashrrev_i32_e32 v17, 31, v16
 ; SI-NEXT:    ds_write2_b64 v7, v[9:10], v[16:17] offset1:1
 ; SI-NEXT:    v_bfe_i32 v17, v18, 0, 16
+; SI-NEXT:    v_ashrrev_i32_e32 v2, 31, v1
+; SI-NEXT:    v_ashrrev_i32_e32 v4, 31, v3
+; SI-NEXT:    v_ashrrev_i32_e32 v6, 31, v5
 ; SI-NEXT:    v_ashrrev_i32_e32 v9, 31, v8
 ; SI-NEXT:    v_ashrrev_i32_e32 v12, 31, v11
 ; SI-NEXT:    v_ashrrev_i32_e32 v14, 31, v13
diff --git a/llvm/test/CodeGen/AMDGPU/srem.ll b/llvm/test/CodeGen/AMDGPU/srem.ll
index bf6dba48cb921..0161b13839d79 100644
--- a/llvm/test/CodeGen/AMDGPU/srem.ll
+++ b/llvm/test/CodeGen/AMDGPU/srem.ll
@@ -4732,52 +4732,68 @@ define amdgpu_kernel void @srem_v2i64_4(ptr addrspace(1) %out, ptr addrspace(1)
 ; GCN-NEXT:    s_waitcnt lgkmcnt(0)
 ; GCN-NEXT:    global_load_dwordx4 v[0:3], v4, s[2:3]
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    v_ashrrev_i32_e32 v5, 31, v1
-; GCN-NEXT:    v_lshrrev_b32_e32 v5, 30, v5
-; GCN-NEXT:    v_ashrrev_i32_e32 v6, 31, v3
-; GCN-NEXT:    v_add_co_u32_e32 v5, vcc, v0, v5
-; GCN-NEXT:    v_lshrrev_b32_e32 v6, 30, v6
-; GCN-NEXT:    v_addc_co_u32_e32 v7, vcc, 0, v1, vcc
-; GCN-NEXT:    v_add_co_u32_e32 v6, vcc, v2, v6
-; GCN-NEXT:    v_addc_co_u32_e32 v8, vcc, 0, v3, vcc
-; GCN-NEXT:    v_and_b32_e32 v5, -4, v5
-; GCN-NEXT:    v_sub_co_u32_e32 v0, vcc, v0, v5
-; GCN-NEXT:    v_and_b32_e32 v6, -4, v6
-; GCN-NEXT:    v_subb_co_u32_e32 v1, vcc, v1, v7, vcc
-; GCN-NEXT:    v_sub_co_u32_e32 v2, vcc, v2, v6
-; GCN-NEXT:    v_subb_co_u32_e32 v3, vcc, v3, v8, vcc
+; GCN-NEXT:    v_readfirstlane_b32 s3, v1
+; GCN-NEXT:    s_ashr_i32 s6, s3, 31
+; GCN-NEXT:    v_readfirstlane_b32 s2, v0
+; GCN-NEXT:    s_lshr_b32 s6, s6, 30
+; GCN-NEXT:    s_add_u32 s6, s2, s6
+; GCN-NEXT:    s_addc_u32 s7, s3, 0
+; GCN-NEXT:    s_and_b32 s6, s6, -4
+; GCN-NEXT:    v_readfirstlane_b32 s5, v3
+; GCN-NEXT:    s_sub_u32 s2, s2, s6
+; GCN-NEXT:    s_subb_u32 s3, s3, s7
+; GCN-NEXT:    s_ashr_i32 s6, s5, 31
+; GCN-NEXT:    v_readfirstlane_b32 s4, v2
+; GCN-NEXT:    v_mov_b32_e32 v0, s2
+; GCN-NEXT:    s_lshr_b32 s2, s6, 30
+; GCN-NEXT:    s_add_u32 s2, s4, s2
+; GCN-NEXT:    v_mov_b32_e32 v1, s3
+; GCN-NEXT:    s_addc_u32 s3, s5, 0
+; GCN-NEXT:    s_and_b32 s2, s2, -4
+; GCN-NEXT:    s_sub_u32 s2, s4, s2
+; GCN-NEXT:    s_subb_u32 s3, s5, s3
+; GCN-NEXT:    v_mov_b32_e32 v2, s2
+; GCN-NEXT:    v_mov_b32_e32 v3, s3
 ; GCN-NEXT:    global_store_dwordx4 v4, v[0:3], s[0:1]
 ; GCN-NEXT:    s_endpgm
 ;
 ; TAHITI-LABEL: srem_v2i64_4:
 ; TAHITI:       ; %bb.0:
-; TAHITI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x9
-; TAHITI-NEXT:    s_mov_b32 s7, 0xf000
-; TAHITI-NEXT:    s_mov_b32 s6, -1
-; TAHITI-NEXT:    s_mov_b32 s10, s6
-; TAHITI-NEXT:    s_mov_b32 s11, s7
+; TAHITI-NEXT:    s_load_dwordx4 s[4:7], s[4:5], 0x9
+; TAHITI-NEXT:    s_mov_b32 s3, 0xf000
+; TAHITI-NEXT:    s_mov_b32 s2, -1
+; TAHITI-NEXT:    s_mov_b32 s10, s2
+; TAHITI-NEXT:    s_mov_b32 s11, s3
 ; TAHITI-NEXT:    s_waitcnt lgkmcnt(0)
-; TAHITI-NEXT:    s_mov_b32 s8, s2
-; TAHITI-NEXT:    s_mov_b32 s9, s3
+; TAHITI-NEXT:    s_mov_b32 s8, s6
+; TAHITI-NEXT:    s_mov_b32 s9, s7
 ; TAHITI-NEXT:    buffer_load_dwordx4 v[0:3], off, s[8:11], 0
-; TAHITI-NEXT:    s_mov_b32 s4, s0
-; TAHITI-NEXT:    s_mov_b32 s5, s1
+; TAHITI-NEXT:    s_mov_b32 s1, s5
+; TAHITI-NEXT:    s_mov_b32 s0, s4
 ; TAHITI-NEXT:    s_waitcnt vmcnt(0)
-; TAHITI-NEXT:    v_ashrrev_i32_e32 v4, 31, v1
-; TAHITI-NEXT:    v_lshrrev_b32_e32 v4, 30, v4
-; TAHITI-NEXT:    v_ashrrev_i32_e32 v5, 31, v3
-; TAHITI-NEXT:    v_add_i32_e32 v4, vcc, v0, v4
-; TAHITI-NEXT:    v_lshrrev_b32_e32 v5, 30, v5
-; TAHITI-NEXT:    v_addc_u32_e32 v6, vcc, 0, v1, vcc
-; TAHITI-NEXT:    v_add_i32_e32 v5, vcc, v2, v5
-; TAHITI-NEXT:    v_addc_u32_e32 v7, vcc, 0, v3, vcc
-; TAHITI-NEXT:    v_and_b32_e32 v4, -4, v4
-; TAHITI-NEXT:    v_sub_i32_e32 v0, vcc, v0, v4
-; TAHITI-NEXT:    v_and_b32_e32 v5, -4, v5
-; TAHITI-NEXT:    v_subb_u32_e32 v1, vcc, v1, v6, vcc
-; TAHITI-NEXT:    v_sub_i32_e32 v2, vcc, v2, v5
-; TAHITI-NEXT:    v_subb_u32_e32 v3, vcc, v3, v7, vcc
-; TAHITI-NEXT:    buffer_store_dwordx4 v[0:3], off, s[4:7], 0
+; TAHITI-NEXT:    v_readfirstlane_b32 s5, v1
+; TAHITI-NEXT:    s_ashr_i32 s8, s5, 31
+; TAHITI-NEXT:    v_readfirstlane_b32 s4, v0
+; TAHITI-NEXT:    s_lshr_b32 s8, s8, 30
+; TAHITI-NEXT:    s_add_u32 s8, s4, s8
+; TAHITI-NEXT:    s_addc_u32 s9, s5, 0
+; TAHITI-NEXT:    s_and_b32 s8, s8, -4
+; TAHITI-NEXT:    v_readfirstlane_b32 s7, v3
+; TAHITI-NEXT:    s_sub_u32 s4, s4, s8
+; TAHITI-NEXT:    s_subb_u32 s5, s5, s9
+; TAHITI-NEXT:    s_ashr_i32 s8, s7, 31
+; TAHITI-NEXT:    v_readfirstlane_b32 s6, v2
+; TAHITI-NEXT:    v_mov_b32_e32 v0, s4
+; TAHITI-NEXT:    s_lshr_b32 s4, s8, 30
+; TAHITI-NEXT:    s_add_u32 s4, s6, s4
+; TAHITI-NEXT:    v_mov_b32_e32 v1, s5
+; TAHITI-NEXT:    s_addc_u32 s5, s7, 0
+; TAHITI-NEXT:    s_and_b32 s4, s4, -4
+; TAHITI-NEXT:    s_sub_u32 s4, s6, s4
+; TAHITI-NEXT:    s_subb_u32 s5, s7, s5
+; TAHITI-NEXT:    v_mov_b32_e32 v2, s4
+; TAHITI-NEXT:    v_mov_b32_e32 v3, s5
+; TAHITI-NEXT:    buffer_store_dwordx4 v[0:3], off, s[0:3], 0
 ; TAHITI-NEXT:    s_endpgm
 ;
 ; TONGA-LABEL: srem_v2i64_4:
@@ -4787,23 +4803,31 @@ define amdgpu_kernel void @srem_v2i64_4(ptr addrspace(1) %out, ptr addrspace(1)
 ; TONGA-NEXT:    v_mov_b32_e32 v0, s2
 ; TONGA-NEXT:    v_mov_b32_e32 v1, s3
 ; TONGA-NEXT:    flat_load_dwordx4 v[0:3], v[0:1]
-; TONGA-NEXT:    v_mov_b32_e32 v4, s0
 ; TONGA-NEXT:    v_mov_b32_e32 v5, s1
+; TONGA-NEXT:    v_mov_b32_e32 v4, s0
 ; TONGA-NEXT:    s_waitcnt vmcnt(0)
-; TONGA-NEXT:    v_ashrrev_i32_e32 v6, 31, v1
-; TONGA-NEXT:    v_lshrrev_b32_e32 v6, 30, v6
-; TONGA-NEXT:    v_ashrrev_i32_e32 v7, 31, v3
-; TONGA-NEXT:    v_add_u32_e32 v6, vcc, v0, v6
-; TONGA-NEXT:    v_lshrrev_b32_e32 v7, 30, v7
-; TONGA-NEXT:    v_addc_u32_e32 v8, vcc, 0, v1, vcc
-; TONGA-NEXT:    v_add_u32_e32 v7, vcc, v2, v7
-; TONGA-NEXT:    v_addc_u32_e32 v9, vcc, 0, v3, vcc
-; TONGA-NEXT:    v_and_b32_e32 v6, -4, v6
-; TONGA-NEXT:    v_sub_u32_e32 v0, vcc, v0, v6
-; TONGA-NEXT:    v_and_b32_e32 v7, -4, v7
-; TONGA-NEXT:    v_subb_u32_e32 v1, vcc, v1, v8, vcc
-; TONGA-NEXT:    v_sub_u32_e32 v2, vcc, v2, v7
-; TONGA-NEXT:    v_subb_u32_e32 v3, vcc, v3, v9, vcc
+; TONGA-NEXT:    v_readfirstlane_b32 s1, v1
+; TONGA-NEXT:    s_ashr_i32 s4, s1, 31
+; TONGA-NEXT:    v_readfirstlane_b32 s0, v0
+; TONGA-NEXT:    s_lshr_b32 s4, s4, 30
+; TONGA-NEXT:    s_add_u32 s4, s0, s4
+; TONGA-NEXT:    s_addc_u32 s5, s1, 0
+; TONGA-NEXT:    s_and_b32 s4, s4, -4
+; TONGA-NEXT:    v_readfirstlane_b32 s3, v3
+; TONGA-NEXT:    s_sub_u32 s0, s0, s4
+; TONGA-NEXT:    s_subb_u32 s1, s1, s5
+; TONGA-NEXT:    s_ashr_i32 s4, s3, 31
+; TONGA-NEXT:    v_readfirstlane_b32 s2, v2
+; TONGA-NEXT:    v_mov_b32_e32 v0, s0
+; TONGA-NEXT:    s_lshr_b32 s0, s4, 30
+; TONGA-NEXT:    s_add_u32 s0, s2, s0
+; TONGA-NEXT:    v_mov_b32_e32 v1, s1
+; TONGA-NEXT:    s_addc_u32 s1, s3, 0
+; TONGA-NEXT:    s_and_b32 s0, s0, -4
+; TONGA-NEXT:    s_sub_u32 s0, s2, s0
+; TONGA-NEXT:    s_subb_u32 s1, s3, s1
+; TONGA-NEXT:    v_mov_b32_e32 v2, s0
+; TONGA-NEXT:    v_mov_b32_e32 v3, s1
 ; TONGA-NEXT:    flat_store_dwordx4 v[4:5], v[0:3]
 ; TONGA-NEXT:    s_endpgm
 ;
@@ -8916,38 +8940,54 @@ define amdgpu_kernel void @srem_v4i64_4(ptr addrspace(1) %out, ptr addrspace(1)
 ; GCN-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x24
 ; GCN-NEXT:    v_mov_b32_e32 v8, 0
 ; GCN-NEXT:    s_waitcnt lgkmcnt(0)
-; GCN-NEXT:    global_load_dwordx4 v[0:3], v8, s[2:3]
-; GCN-NEXT:    global_load_dwordx4 v[4:7], v8, s[2:3] offset:16
+; GCN-NEXT:    global_load_dwordx4 v[0:3], v8, s[2:3] offset:16
+; GCN-NEXT:    global_load_dwordx4 v[4:7], v8, s[2:3]
 ; GCN-NEXT:    s_waitcnt vmcnt(1)
-; GCN-NEXT:    v_ashrrev_i32_e32 v9, 31, v1
-; GCN-NEXT:    v_lshrrev_b32_e32 v9, 30, v9
-; GCN-NEXT:    v_ashrrev_i32_e32 v10, 31, v3
-; GCN-NEXT:    v_add_co_u32_e32 v9, vcc, v0, v9
-; GCN-NEXT:    v_lshrrev_b32_e32 v10, 30, v10
-; GCN-NEXT:    v_addc_co_u32_e32 v13, vcc, 0, v1, vcc
+; GCN-NEXT:    v_readfirstlane_b32 s2, v0
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    v_ashrrev_i32_e32 v11, 31, v5
-; GCN-NEXT:    v_add_co_u32_e32 v10, vcc, v2, v10
-; GCN-NEXT:    v_lshrrev_b32_e32 v11, 30, v11
-; GCN-NEXT:    v_addc_co_u32_e32 v14, vcc, 0, v3, vcc
-; GCN-NEXT:    v_ashrrev_i32_e32 v12, 31, v7
-; GCN-NEXT:    v_add_co_u32_e32 v11, vcc, v4, v11
-; GCN-NEXT:    v_lshrrev_b32_e32 v12, 30, v12
-; GCN-NEXT:    v_addc_co_u32_e32 v15, vcc, 0, v5, vcc
-; GCN-NEXT:    v_add_co_u32_e32 v12, vcc, v6, v12
-; GCN-NEXT:    v_addc_co_u32_e32 v16, vcc, 0, v7, vcc
-; GCN-NEXT:    v_and_b32_e32 v9, -4, v9
-; GCN-NEXT:    v_sub_co_u32_e32 v0, vcc, v0, v9
-; GCN-NEXT:    v_and_b32_e32 v10, -4, v10
-; GCN-NEXT:    v_subb_co_u32_e32 v1, vcc, v1, v13, vcc
-; GCN-NEXT:    v_sub_co_u32_e32 v2, vcc, v2, v10
-; GCN-NEXT:    v_and_b32_e32 v11, -4, v11
-; GCN-NEXT:    v_subb_co_u32_e32 v3, vcc, v3, v14, vcc
-; GCN-NEXT:    v_sub_co_u32_e32 v4, vcc, v4, v11
-; GCN-NEXT:    v_and_b32_e32 v12, -4, v12
-; GCN-NEXT:    v_subb_co_u32_e32 v5, vcc, v5, v15, vcc
-; GCN-NEXT:    v_sub_co_u32_e32 v6, vcc, v6, v12
-; GCN-NEXT:    v_subb_co_u32_e32 v7, vcc, v7, v16, vcc
+; GCN-NEXT:    v_readfirstlane_b32 s7, v5
+; GCN-NEXT:    s_ashr_i32 s10, s7, 31
+; GCN-NEXT:    v_readfirstlane_b32 s6, v4
+; GCN-NEXT:    s_lshr_b32 s10, s10, 30
+; GCN-NEXT:    s_add_u32 s10, s6, s10
+; GCN-NEXT:    s_addc_u32 s11, s7, 0
+; GCN-NEXT:    s_and_b32 s10, s10, -4
+; GCN-NEXT:    v_readfirstlane_b32 s9, v7
+; GCN-NEXT:    s_sub_u32 s6, s6, s10
+; GCN-NEXT:    s_subb_u32 s7, s7, s11
+; GCN-NEXT:    s_ashr_i32 s10, s9, 31
+; GCN-NEXT:    v_readfirstlane_b32 s8, v6
+; GCN-NEXT:    v_mov_b32_e32 v0, s6
+; GCN-NEXT:    s_lshr_b32 s6, s10, 30
+; GCN-NEXT:    s_add_u32 s6, s8, s6
+; GCN-NEXT:    v_readfirstlane_b32 s3, v1
+; GCN-NEXT:    v_mov_b32_e32 v1, s7
+; GCN-NEXT:    s_addc_u32 s7, s9, 0
+; GCN-NEXT:    s_and_b32 s6, s6, -4
+; GCN-NEXT:    s_sub_u32 s6, s8, s6
+; GCN-NEXT:    s_subb_u32 s7, s9, s7
+; GCN-NEXT:    s_ashr_i32 s8, s3, 31
+; GCN-NEXT:    v_readfirstlane_b32 s4, v2
+; GCN-NEXT:    v_mov_b32_e32 v2, s6
+; GCN-NEXT:    s_lshr_b32 s6, s8, 30
+; GCN-NEXT:    s_add_u32 s6, s2, s6
+; GCN-NEXT:    v_readfirstlane_b32 s5, v3
+; GCN-NEXT:    v_mov_b32_e32 v3, s7
+; GCN-NEXT:    s_addc_u32 s7, s3, 0
+; GCN-NEXT:    s_and_b32 s6, s6, -4
+; GCN-NEXT:    s_sub_u32 s2, s2, s6
+; GCN-NEXT:    s_subb_u32 s3, s3, s7
+; GCN-NEXT:    s_ashr_i32 s6, s5, 31
+; GCN-NEXT:    v_mov_b32_e32 v4, s2
+; GCN-NEXT:    s_lshr_b32 s2, s6, 30
+; GCN-NEXT:    s_add_u32 s2, s4, s2
+; GCN-NEXT:    v_mov_b32_e32 v5, s3
+; GCN-NEXT:    s_addc_u32 s3, s5, 0
+; GCN-NEXT:    s_and_b32 s2, s2, -4
+; GCN-NEXT:    s_sub_u32 s2, s4, s2
+; GCN-NEXT:    s_subb_u32 s3, s5, s3
+; GCN-NEXT:    v_mov_b32_e32 v6, s2
+; GCN-NEXT:    v_mov_b32_e32 v7, s3
 ; GCN-NEXT:    global_store_dwordx4 v8, v[4:7], s[0:1] offset:16
 ; GCN-NEXT:    global_store_dwordx4 v8, v[0:3], s[0:1]
 ; GCN-NEXT:    s_endpgm
@@ -8962,40 +9002,56 @@ define amdgpu_kernel void @srem_v4i64_4(ptr addrspace(1) %out, ptr addrspace(1)
 ; TAHITI-NEXT:    s_waitcnt lgkmcnt(0)
 ; TAHITI-NEXT:    s_mov_b32 s8, s6
 ; TAHITI-NEXT:    s_mov_b32 s9, s7
-; TAHITI-NEXT:    buffer_load_dwordx4 v[0:3], off, s[8:11], 0
-; TAHITI-NEXT:    buffer_load_dwordx4 v[4:7], off, s[8:11], 0 offset:16
+; TAHITI-NEXT:    buffer_load_dwordx4 v[0:3], off, s[8:11], 0 offset:16
+; TAHITI-NEXT:    buffer_load_dwordx4 v[4:7], off, s[8:11], 0
 ; TAHITI-NEXT:    s_mov_b32 s0, s4
 ; TAHITI-NEXT:    s_mov_b32 s1, s5
 ; TAHITI-NEXT:    s_waitcnt vmcnt(1)
-; TAHITI-NEXT:    v_ashrrev_i32_e32 v8, 31, v1
-; TAHITI-NEXT:    v_lshrrev_b32_e32 v8, 30, v8
-; TAHITI-NEXT:    v_ashrrev_i32_e32 v9, 31, v3
-; TAHITI-NEXT:    v_add_i32_e32 v8, vcc, v0, v8
-; TAHITI-NEXT:    v_lshrrev_b32_e32 v9, 30, v9
-; TAHITI-NEXT:    v_addc_u32_e32 v12, vcc, 0, v1, vcc
+; TAHITI-NEXT:    v_readfirstlane_b32 s4, v0
 ; TAHITI-NEXT:    s_waitcnt vmcnt(0)
-; TAHITI-NEXT:    v_ashrrev_i32_e32 v10, 31, v5
-; TAHITI-NEXT:    v_add_i32_e32 v9, vcc, v2, v9
-; TAHITI-NEXT:    v_lshrrev_b32_e32 v10, 30, v10
-; TAHITI-NEXT:    v_addc_u32_e32 v13, vcc, 0, v3, vcc
-; TAHITI-NEXT:    v_ashrrev_i32_e32 v11, 31, v7
-; TAHITI-NEXT:    v_add_i32_e32 v10, vcc, v4, v10
-; TAHITI-NEXT:    v_lshrrev_b32_e32 v11, 30, v11
-; TAHITI-NEXT:    v_addc_u32_e32 v14, vcc, 0, v5, vcc
-; TAHITI-NEXT:    v_add_i32_e32 v11, vcc, v6, v11
-; TAHITI-NEXT:    v_addc_u32_e32 v15, vcc, 0, v7, vcc
-; TAHITI-NEXT:    v_and_b32_e32 v8, -4, v8
-; TAHITI-NEXT:    v_sub_i32_e32 v0, vcc, v0, v8
-; TAHITI-NEXT:    v_and_b32_e32 v9, -4, v9
-; TAHITI-NEXT:    v_subb_u32_e32 v1, vcc, v1, v12, vcc
-; TAHITI-NEXT:    v_sub_i32_e32 v2, vcc, v2, v9
-; TAHITI-NEXT:    v_and_b32_e32 v10, -4, v10
-; TAHITI-NEXT:    v_subb_u32_e32 v3, vcc, v3, v13, vcc
-; TAHITI-NEXT:    v_sub_i32_e32 v4, vcc, v4, v10
-; TAHITI-NEXT:    v_and_b32_e32 v11, -4, v11
-; TAHITI-NEXT:    v_subb_u32_e32 v5, vcc, v5, v14, vcc
-; TAHITI-NEXT:    v_sub_i32_e32 v6, vcc, v6, v11
-; TAHITI-NEXT:    v_subb_u32_e32 v7, vcc, v7, v15, vcc
+; TAHITI-NEXT:    v_readfirstlane_b32 s9, v5
+; TAHITI-NEXT:    s_ashr_i32 s12, s9, 31
+; TAHITI-NEXT:    v_readfirstlane_b32 s8, v4
+; TAHITI-NEXT:    s_lshr_b32 s12, s12, 30
+; TAHITI-NEXT:    s_add_u32 s12, s8, s12
+; TAHITI-NEXT:    s_addc_u32 s13, s9, 0
+; TAHITI-NEXT:    s_and_b32 s12, s12, -4
+; TAHITI-NEXT:    v_readfirstlane_b32 s11, v7
+; TAHITI-NEXT:    s_sub_u32 s8, s8, s12
+; TAHITI-NEXT:    s_subb_u32 s9, s9, s13
+; TAHITI-NEXT:    s_ashr_i32 s12, s11, 31
+; TAHITI-NEXT:    v_readfirstlane_b32 s10, v6
+; TAHITI-NEXT:    v_mov_b32_e32 v0, s8
+; TAHITI-NEXT:    s_lshr_b32 s8, s12, 30
+; TAHITI-NEXT:    s_add_u32 s8, s10, s8
+; TAHITI-NEXT:    v_readfirstlane_b32 s5, v1
+; TAHITI-NEXT:    v_mov_b32_e32 v1, s9
+; TAHITI-NEXT:    s_addc_u32 s9, s11, 0
+; TAHITI-NEXT:    s_and_b32 s8, s8, -4
+; TAHITI-NEXT:    s_sub_u32 s8, s10, s8
+; TAHITI-NEXT:    s_subb_u32 s9, s11, s9
+; TAHITI-NEXT:    s_ashr_i32 s10, s5, 31
+; TAHITI-NEXT:    v_readfirstlane_b32 s6, v2
+; TAHITI-NEXT:    v_mov_b32_e32 v2, s8
+; TAHITI-NEXT:    s_lshr_b32 s8, s10, 30
+; TAHITI-NEXT:    s_add_u32 s8, s4, s8
+; TAHITI-NEXT:    v_readfirstlane_b32 s7, v3
+; TAHITI-NEXT:    v_mov_b32_e32 v3, s9
+; TAHITI-NEXT:    s_addc_u32 s9, s5, 0
+; TAHITI-NEXT:    s_and_b32 s8, s8, -4
+; TAHITI-NEXT:    s_sub_u32 s4, s4, s8
+; TAHITI-NEXT:    s_subb_u32 s5, s5, s9
+; TAHITI-NEXT:    s_ashr_i32 s8, s7, 31
+; TAHITI-NEXT:    v_mov_b32_e32 v4, s4
+; TAHITI-NEXT:    s_lshr_b32 s4, s8, 30
+; TAHITI-NEXT:    s_add_u32 s4, s6, s4
+; TAHITI-NEXT:    v_mov_b32_e32 v5, s5
+; TAHITI-NEXT:    s_addc_u32 s5, s7, 0
+; TAHITI-NEXT:    s_and_b32 s4, s4, -4
+; TAHITI-NEXT:    s_sub_u32 s4, s6, s4
+; TAHITI-NEXT:    s_subb_u32 s5, s7, s5
+; TAHITI-NEXT:    v_mov_b32_e32 v6, s4
+; TAHITI-NEXT:    v_mov_b32_e32 v7, s5
 ; TAHITI-NEXT:    buffer_store_dwordx4 v[4:7], off, s[0:3], 0 offset:16
 ; TAHITI-NEXT:    buffer_store_dwordx4 v[0:3], off, s[0:3], 0
 ; TAHITI-NEXT:    s_endpgm
@@ -9004,52 +9060,69 @@ define amdgpu_kernel void @srem_v4i64_4(ptr addrspace(1) %out, ptr addrspace(1)
 ; TONGA:       ; %bb.0:
 ; TONGA-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x24
 ; TONGA-NEXT:    s_waitcnt lgkmcnt(0)
-; TONGA-NEXT:    v_mov_b32_e32 v0, s2
-; TONGA-NEXT:    v_mov_b32_e32 v1, s3
-; TONGA-NEXT:    flat_load_dwordx4 v[0:3], v[0:1]
 ; TONGA-NEXT:    s_add_u32 s4, s2, 16
 ; TONGA-NEXT:    s_addc_u32 s5, s3, 0
-; TONGA-NEXT:    v_mov_b32_e32 v4, s4
-; TONGA-NEXT:    v_mov_b32_e32 v5, s5
+; TONGA-NEXT:    v_mov_b32_e32 v0, s4
+; TONGA-NEXT:    v_mov_b32_e32 v5, s3
+; TONGA-NEXT:    v_mov_b32_e32 v1, s5
+; TONGA-NEXT:    v_mov_b32_e32 v4, s2
+; TONGA-NEXT:    flat_load_dwordx4 v[0:3], v[0:1]
 ; TONGA-NEXT:    flat_load_dwordx4 v[4:7], v[4:5]
 ; TONGA-NEXT:    v_mov_b32_e32 v9, s1
 ; TONGA-NEXT:    v_mov_b32_e32 v8, s0
-; TONGA-NEXT:    s_add_u32 s0, s0, 16
-; TONGA-NEXT:    s_addc_u32 s1, s1, 0
-; TONGA-NEXT:    v_mov_b32_e32 v11, s1
-; TONGA-NEXT:    v_mov_b32_e32 v10, s0
 ; TONGA-NEXT:    s_waitcnt vmcnt(1)
-; TONGA-NEXT:    v_ashrrev_i32_e32 v12, 31, v1
-; TONGA-NEXT:    v_lshrrev_b32_e32 v12, 30, v12
-; TONGA-NEXT:    v_add_u32_e32 v12, vcc, v0, v12
-; TONGA-NEXT:    v_addc_u32_e32 v16, vcc, 0, v1, vcc
-; TONGA-NEXT:    v_and_b32_e32 v12, -4, v12
-; TONGA-NEXT:    v_ashrrev_i32_e32 v13, 31, v3
-; TONGA-NEXT:    v_sub_u32_e32 v0, vcc, v0, v12
-; TONGA-NEXT:    v_lshrrev_b32_e32 v13, 30, v13
-; TONGA-NEXT:    v_subb_u32_e32 v1, vcc, v1, v16, vcc
-; TONGA-NEXT:    v_add_u32_e32 v12, vcc, v2, v13
-; TONGA-NEXT:    v_addc_u32_e32 v13, vcc, 0, v3, vcc
-; TONGA-NEXT:    v_and_b32_e32 v12, -4, v12
+; TONGA-NEXT:    v_readfirstlane_b32 s2, v0
 ; TONGA-NEXT:    s_waitcnt vmcnt(0)
-; TONGA-NEXT:    v_ashrrev_i32_e32 v14, 31, v5
-; TONGA-NEXT:    v_ashrrev_i32_e32 v15, 31, v7
-; TONGA-NEXT:    v_sub_u32_e32 v2, vcc, v2, v12
-; TONGA-NEXT:    v_lshrrev_b32_e32 v14, 30, v14
-; TONGA-NEXT:    v_lshrrev_b32_e32 v15, 30, v15
-; TONGA-NEXT:    v_subb_u32_e32 v3, vcc, v3, v13, vcc
-; TONGA-NEXT:    v_add_u32_e64 v12, s[0:1], v4, v14
-; TONGA-NEXT:    v_add_u32_e32 v13, vcc, v6, v15
-; TONGA-NEXT:    v_addc_u32_e32 v15, vcc, 0, v7, vcc
-; TONGA-NEXT:    v_and_b32_e32 v12, -4, v12
-; TONGA-NEXT:    v_addc_u32_e64 v14, s[0:1], 0, v5, s[0:1]
-; TONGA-NEXT:    v_sub_u32_e32 v4, vcc, v4, v12
-; TONGA-NEXT:    v_and_b32_e32 v13, -4, v13
-; TONGA-NEXT:    v_subb_u32_e32 v5, vcc, v5, v14, vcc
-; TONGA-NEXT:    v_sub_u32_e32 v6, vcc, v6, v13
-; TONGA-NEXT:    v_subb_u32_e32 v7, vcc, v7, v15, vcc
+; TONGA-NEXT:    v_readfirstlane_b32 s7, v5
+; TONGA-NEXT:    s_ashr_i32 s10, s7, 31
+; TONGA-NEXT:    v_readfirstlane_b32 s6, v4
+; TONGA-NEXT:    s_lshr_b32 s10, s10, 30
+; TONGA-NEXT:    s_add_u32 s10, s6, s10
+; TONGA-NEXT:    s_addc_u32 s11, s7, 0
+; TONGA-NEXT:    s_and_b32 s10, s10, -4
+; TONGA-NEXT:    v_readfirstlane_b32 s9, v7
+; TONGA-NEXT:    s_sub_u32 s6, s6, s10
+; TONGA-NEXT:    s_subb_u32 s7, s7, s11
+; TONGA-NEXT:    s_ashr_i32 s10, s9, 31
+; TONGA-NEXT:    v_readfirstlane_b32 s8, v6
+; TONGA-NEXT:    v_mov_b32_e32 v0, s6
+; TONGA-NEXT:    s_lshr_b32 s6, s10, 30
+; TONGA-NEXT:    s_add_u32 s6, s8, s6
+; TONGA-NEXT:    v_readfirstlane_b32 s3, v1
+; TONGA-NEXT:    v_mov_b32_e32 v1, s7
+; TONGA-NEXT:    s_addc_u32 s7, s9, 0
+; TONGA-NEXT:    s_and_b32 s6, s6, -4
+; TONGA-NEXT:    s_sub_u32 s6, s8, s6
+; TONGA-NEXT:    s_subb_u32 s7, s9, s7
+; TONGA-NEXT:    s_ashr_i32 s8, s3, 31
+; TONGA-NEXT:    v_readfirstlane_b32 s4, v2
+; TONGA-NEXT:    v_mov_b32_e32 v2, s6
+; TONGA-NEXT:    s_lshr_b32 s6, s8, 30
+; TONGA-NEXT:    s_add_u32 s6, s2, s6
+; TONGA-NEXT:    v_readfirstlane_b32 s5, v3
+; TONGA-NEXT:    v_mov_b32_e32 v3, s7
+; TONGA-NEXT:    s_addc_u32 s7, s3, 0
+; TONGA-NEXT:    s_and_b32 s6, s6, -4
+; TONGA-NEXT:    s_sub_u32 s2, s2, s6
+; TONGA-NEXT:    s_subb_u32 s3, s3, s7
+; TONGA-NEXT:    s_ashr_i32 s6, s5, 31
 ; TONGA-NEXT:    flat_store_dwordx4 v[8:9], v[0:3]
-; TONGA-NEXT:    flat_store_dwordx4 v[10:11], v[4:7]
+; TONGA-NEXT:    s_nop 0
+; TONGA-NEXT:    v_mov_b32_e32 v0, s2
+; TONGA-NEXT:    s_lshr_b32 s2, s6, 30
+; TONGA-NEXT:    s_add_u32 s2, s4, s2
+; TONGA-NEXT:    v_mov_b32_e32 v1, s3
+; TONGA-NEXT:    s_addc_u32 s3, s5, 0
+; TONGA-NEXT:    s_and_b32 s2, s2, -4
+; TONGA-NEXT:    s_sub_u32 s2, s4, s2
+; TONGA-NEXT:    s_subb_u32 s3, s5, s3
+; TONGA-NEXT:    s_add_u32 s0, s0, 16
+; TONGA-NEXT:    s_addc_u32 s1, s1, 0
+; TONGA-NEXT:    v_mov_b32_e32 v5, s1
+; TONGA-NEXT:    v_mov_b32_e32 v2, s2
+; TONGA-NEXT:    v_mov_b32_e32 v3, s3
+; TONGA-NEXT:    v_mov_b32_e32 v4, s0
+; TONGA-NEXT:    flat_store_dwordx4 v[4:5], v[0:3]
 ; TONGA-NEXT:    s_endpgm
 ;
 ; EG-LABEL: srem_v4i64_4:
diff --git a/llvm/test/CodeGen/RISCV/abds.ll b/llvm/test/CodeGen/RISCV/abds.ll
index b89885bc32dba..0d36b59822032 100644
--- a/llvm/test/CodeGen/RISCV/abds.ll
+++ b/llvm/test/CodeGen/RISCV/abds.ll
@@ -2013,48 +2013,48 @@ define i128 @abd_subnsw_i128(i128 %a, i128 %b) nounwind {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    lw a3, 0(a2)
 ; RV32I-NEXT:    lw a4, 4(a2)
-; RV32I-NEXT:    lw a5, 8(a2)
-; RV32I-NEXT:    lw a2, 12(a2)
-; RV32I-NEXT:    lw a7, 8(a1)
+; RV32I-NEXT:    lw a6, 8(a2)
+; RV32I-NEXT:    lw a7, 12(a2)
+; RV32I-NEXT:    lw a2, 8(a1)
 ; RV32I-NEXT:    lw t0, 12(a1)
-; RV32I-NEXT:    lw a6, 0(a1)
+; RV32I-NEXT:    lw a5, 0(a1)
 ; RV32I-NEXT:    lw a1, 4(a1)
-; RV32I-NEXT:    sltu t1, a7, a5
-; RV32I-NEXT:    sub t0, t0, a2
-; RV32I-NEXT:    sltu a2, a6, a3
-; RV32I-NEXT:    sub t0, t0, t1
-; RV32I-NEXT:    mv t1, a2
+; RV32I-NEXT:    sltu t1, a2, a6
+; RV32I-NEXT:    sub a7, t0, a7
+; RV32I-NEXT:    sltu t0, a5, a3
+; RV32I-NEXT:    sub a7, a7, t1
+; RV32I-NEXT:    mv t1, t0
 ; RV32I-NEXT:    beq a1, a4, .LBB31_2
 ; RV32I-NEXT:  # %bb.1:
 ; RV32I-NEXT:    sltu t1, a1, a4
 ; RV32I-NEXT:  .LBB31_2:
-; RV32I-NEXT:    sub a5, a7, a5
+; RV32I-NEXT:    sub a6, a2, a6
 ; RV32I-NEXT:    sub a1, a1, a4
-; RV32I-NEXT:    sltu a4, a5, t1
-; RV32I-NEXT:    sub a5, a5, t1
-; RV32I-NEXT:    sub a4, t0, a4
-; RV32I-NEXT:    sub a2, a1, a2
-; RV32I-NEXT:    sub a1, a6, a3
-; RV32I-NEXT:    bgez a4, .LBB31_4
+; RV32I-NEXT:    sltu a2, a6, t1
+; RV32I-NEXT:    sub a1, a1, t0
+; RV32I-NEXT:    sub a2, a7, a2
+; RV32I-NEXT:    sub a4, a6, t1
+; RV32I-NEXT:    sub a3, a5, a3
+; RV32I-NEXT:    bgez a2, .LBB31_4
 ; RV32I-NEXT:  # %bb.3:
-; RV32I-NEXT:    snez a3, a2
-; RV32I-NEXT:    snez a6, a1
-; RV32I-NEXT:    neg a7, a5
-; RV32I-NEXT:    snez a5, a5
-; RV32I-NEXT:    or a3, a6, a3
-; RV32I-NEXT:    add a4, a4, a5
-; RV32I-NEXT:    add a2, a2, a6
-; RV32I-NEXT:    sltu a6, a7, a3
-; RV32I-NEXT:    neg a4, a4
-; RV32I-NEXT:    sub a5, a7, a3
+; RV32I-NEXT:    neg a5, a4
+; RV32I-NEXT:    or a6, a3, a1
+; RV32I-NEXT:    snez a4, a4
+; RV32I-NEXT:    snez a7, a3
+; RV32I-NEXT:    snez a6, a6
+; RV32I-NEXT:    add a2, a2, a4
+; RV32I-NEXT:    add a1, a1, a7
+; RV32I-NEXT:    sltu a7, a5, a6
 ; RV32I-NEXT:    neg a2, a2
-; RV32I-NEXT:    sub a4, a4, a6
+; RV32I-NEXT:    sub a4, a5, a6
 ; RV32I-NEXT:    neg a1, a1
+; RV32I-NEXT:    sub a2, a2, a7
+; RV32I-NEXT:    neg a3, a3
 ; RV32I-NEXT:  .LBB31_4:
-; RV32I-NEXT:    sw a1, 0(a0)
-; RV32I-NEXT:    sw a2, 4(a0)
-; RV32I-NEXT:    sw a5, 8(a0)
-; RV32I-NEXT:    sw a4, 12(a0)
+; RV32I-NEXT:    sw a3, 0(a0)
+; RV32I-NEXT:    sw a1, 4(a0)
+; RV32I-NEXT:    sw a4, 8(a0)
+; RV32I-NEXT:    sw a2, 12(a0)
 ; RV32I-NEXT:    ret
 ;
 ; RV64I-LABEL: abd_subnsw_i128:
@@ -2076,48 +2076,48 @@ define i128 @abd_subnsw_i128(i128 %a, i128 %b) nounwind {
 ; RV32ZBB:       # %bb.0:
 ; RV32ZBB-NEXT:    lw a3, 0(a2)
 ; RV32ZBB-NEXT:    lw a4, 4(a2)
-; RV32ZBB-NEXT:    lw a5, 8(a2)
-; RV32ZBB-NEXT:    lw a2, 12(a2)
-; RV32ZBB-NEXT:    lw a7, 8(a1)
+; RV32ZBB-NEXT:    lw a6, 8(a2)
+; RV32ZBB-NEXT:    lw a7, 12(a2)
+; RV32ZBB-NEXT:    lw a2, 8(a1)
 ; RV32ZBB-NEXT:    lw t0, 12(a1)
-; RV32ZBB-NEXT:    lw a6, 0(a1)
+; RV32ZBB-NEXT:    lw a5, 0(a1)
 ; RV32ZBB-NEXT:    lw a1, 4(a1)
-; RV32ZBB-NEXT:    sltu t1, a7, a5
-; RV32ZBB-NEXT:    sub t0, t0, a2
-; RV32ZBB-NEXT:    sltu a2, a6, a3
-; RV32ZBB-NEXT:    sub t0, t0, t1
-; RV32ZBB-NEXT:    mv t1, a2
+; RV32ZBB-NEXT:    sltu t1, a2, a6
+; RV32ZBB-NEXT:    sub a7, t0, a7
+; RV32ZBB-NEXT:    sltu t0, a5, a3
+; RV32ZBB-NEXT:    sub a7, a7, t1
+; RV32ZBB-NEXT:    mv t1, t0
 ; RV32ZBB-NEXT:    beq a1, a4, .LBB31_2
 ; RV32ZBB-NEXT:  # %bb.1:
 ; RV32ZBB-NEXT:    sltu t1, a1, a4
 ; RV32ZBB-NEXT:  .LBB31_2:
-; RV32ZBB-NEXT:    sub a5, a7, a5
+; RV32ZBB-NEXT:    sub a6, a2, a6
 ; RV32ZBB-NEXT:    sub a1, a1, a4
-; RV32ZBB-NEXT:    sltu a4, a5, t1
-; RV32ZBB-NEXT:    sub a5, a5, t1
-; RV32ZBB-NEXT:    sub a4, t0, a4
-; RV32ZBB-NEXT:    sub a2, a1, a2
-; RV32ZBB-NEXT:    sub a1, a6, a3
-; RV32ZBB-NEXT:    bgez a4, .LBB31_4
+; RV32ZBB-NEXT:    sltu a2, a6, t1
+; RV32ZBB-NEXT:    sub a1, a1, t0
+; RV32ZBB-NEXT:    sub a2, a7, a2
+; RV32ZBB-NEXT:    sub a4, a6, t1
+; RV32ZBB-NEXT:    sub a3, a5, a3
+; RV32ZBB-NEXT:    bgez a2, .LBB31_4
 ; RV32ZBB-NEXT:  # %bb.3:
-; RV32ZBB-NEXT:    snez a3, a2
-; RV32ZBB-NEXT:    snez a6, a1
-; RV32ZBB-NEXT:    neg a7, a5
-; RV32ZBB-NEXT:    snez a5, a5
-; RV32ZBB-NEXT:    or a3, a6, a3
-; RV32ZBB-NEXT:    add a4, a4, a5
-; RV32ZBB-NEXT:    add a2, a2, a6
-; RV32ZBB-NEXT:    sltu a6, a7, a3
-; RV32ZBB-NEXT:    neg a4, a4
-; RV32ZBB-NEXT:    sub a5, a7, a3
+; RV32ZBB-NEXT:    neg a5, a4
+; RV32ZBB-NEXT:    or a6, a3, a1
+; RV32ZBB-NEXT:    snez a4, a4
+; RV32ZBB-NEXT:    snez a7, a3
+; RV32ZBB-NEXT:    snez a6, a6
+; RV32ZBB-NEXT:    add a2, a2, a4
+; RV32ZBB-NEXT:    add a1, a1, a7
+; RV32ZBB-NEXT:    sltu a7, a5, a6
 ; RV32ZBB-NEXT:    neg a2, a2
-; RV32ZBB-NEXT:    sub a4, a4, a6
+; RV32ZBB-NEXT:    sub a4, a5, a6
 ; RV32ZBB-NEXT:    neg a1, a1
+; RV32ZBB-NEXT:    sub a2, a2, a7
+; RV32ZBB-NEXT:    neg a3, a3
 ; RV32ZBB-NEXT:  .LBB31_4:
-; RV32ZBB-NEXT:    sw a1, 0(a0)
-; RV32ZBB-NEXT:    sw a2, 4(a0)
-; RV32ZBB-NEXT:    sw a5, 8(a0)
-; RV32ZBB-NEXT:    sw a4, 12(a0)
+; RV32ZBB-NEXT:    sw a3, 0(a0)
+; RV32ZBB-NEXT:    sw a1, 4(a0)
+; RV32ZBB-NEXT:    sw a4, 8(a0)
+; RV32ZBB-NEXT:    sw a2, 12(a0)
 ; RV32ZBB-NEXT:    ret
 ;
 ; RV64ZBB-LABEL: abd_subnsw_i128:
@@ -2144,48 +2144,48 @@ define i128 @abd_subnsw_i128_undef(i128 %a, i128 %b) nounwind {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    lw a3, 0(a2)
 ; RV32I-NEXT:    lw a4, 4(a2)
-; RV32I-NEXT:    lw a5, 8(a2)
-; RV32I-NEXT:    lw a2, 12(a2)
-; RV32I-NEXT:    lw a7, 8(a1)
+; RV32I-NEXT:    lw a6, 8(a2)
+; RV32I-NEXT:    lw a7, 12(a2)
+; RV32I-NEXT:    lw a2, 8(a1)
 ; RV32I-NEXT:    lw t0, 12(a1)
-; RV32I-NEXT:    lw a6, 0(a1)
+; RV32I-NEXT:    lw a5, 0(a1)
 ; RV32I-NEXT:    lw a1, 4(a1)
-; RV32I-NEXT:    sltu t1, a7, a5
-; RV32I-NEXT:    sub t0, t0, a2
-; RV32I-NEXT:    sltu a2, a6, a3
-; RV32I-NEXT:    sub t0, t0, t1
-; RV32I-NEXT:    mv t1, a2
+; RV32I-NEXT:    sltu t1, a2, a6
+; RV32I-NEXT:    sub a7, t0, a7
+; RV32I-NEXT:    sltu t0, a5, a3
+; RV32I-NEXT:    sub a7, a7, t1
+; RV32I-NEXT:    mv t1, t0
 ; RV32I-NEXT:    beq a1, a4, .LBB32_2
 ; RV32I-NEXT:  # %bb.1:
 ; RV32I-NEXT:    sltu t1, a1, a4
 ; RV32I-NEXT:  .LBB32_2:
-; RV32I-NEXT:    sub a5, a7, a5
+; RV32I-NEXT:    sub a6, a2, a6
 ; RV32I-NEXT:    sub a1, a1, a4
-; RV32I-NEXT:    sltu a4, a5, t1
-; RV32I-NEXT:    sub a5, a5, t1
-; RV32I-NEXT:    sub a4, t0, a4
-; RV32I-NEXT:    sub a2, a1, a2
-; RV32I-NEXT:    sub a1, a6, a3
-; RV32I-NEXT:    bgez a4, .LBB32_4
+; RV32I-NEXT:    sltu a2, a6, t1
+; RV32I-NEXT:    sub a1, a1, t0
+; RV32I-NEXT:    sub a2, a7, a2
+; RV32I-NEXT:    sub a4, a6, t1
+; RV32I-NEXT:    sub a3, a5, a3
+; RV32I-NEXT:    bgez a2, .LBB32_4
 ; RV32I-NEXT:  # %bb.3:
-; RV32I-NEXT:    snez a3, a2
-; RV32I-NEXT:    snez a6, a1
-; RV32I-NEXT:    neg a7, a5
-; RV32I-NEXT:    snez a5, a5
-; RV32I-NEXT:    or a3, a6, a3
-; RV32I-NEXT:    add a4, a4, a5
-; RV32I-NEXT:    add a2, a2, a6
-; RV32I-NEXT:    sltu a6, a7, a3
-; RV32I-NEXT:    neg a4, a4
-; RV32I-NEXT:    sub a5, a7, a3
+; RV32I-NEXT:    neg a5, a4
+; RV32I-NEXT:    or a6, a3, a1
+; RV32I-NEXT:    snez a4, a4
+; RV32I-NEXT:    snez a7, a3
+; RV32I-NEXT:    snez a6, a6
+; RV32I-NEXT:    add a2, a2, a4
+; RV32I-NEXT:    add a1, a1, a7
+; RV32I-NEXT:    sltu a7, a5, a6
 ; RV32I-NEXT:    neg a2, a2
-; RV32I-NEXT:    sub a4, a4, a6
+; RV32I-NEXT:    sub a4, a5, a6
 ; RV32I-NEXT:    neg a1, a1
+; RV32I-NEXT:    sub a2, a2, a7
+; RV32I-NEXT:    neg a3, a3
 ; RV32I-NEXT:  .LBB32_4:
-; RV32I-NEXT:    sw a1, 0(a0)
-; RV32I-NEXT:    sw a2, 4(a0)
-; RV32I-NEXT:    sw a5, 8(a0)
-; RV32I-NEXT:    sw a4, 12(a0)
+; RV32I-NEXT:    sw a3, 0(a0)
+; RV32I-NEXT:    sw a1, 4(a0)
+; RV32I-NEXT:    sw a4, 8(a0)
+; RV32I-NEXT:    sw a2, 12(a0)
 ; RV32I-NEXT:    ret
 ;
 ; RV64I-LABEL: abd_subnsw_i128_undef:
@@ -2207,48 +2207,48 @@ define i128 @abd_subnsw_i128_undef(i128 %a, i128 %b) nounwind {
 ; RV32ZBB:       # %bb.0:
 ; RV32ZBB-NEXT:    lw a3, 0(a2)
 ; RV32ZBB-NEXT:    lw a4, 4(a2)
-; RV32ZBB-NEXT:    lw a5, 8(a2)
-; RV32ZBB-NEXT:    lw a2, 12(a2)
-; RV32ZBB-NEXT:    lw a7, 8(a1)
+; RV32ZBB-NEXT:    lw a6, 8(a2)
+; RV32ZBB-NEXT:    lw a7, 12(a2)
+; RV32ZBB-NEXT:    lw a2, 8(a1)
 ; RV32ZBB-NEXT:    lw t0, 12(a1)
-; RV32ZBB-NEXT:    lw a6, 0(a1)
+; RV32ZBB-NEXT:    lw a5, 0(a1)
 ; RV32ZBB-NEXT:    lw a1, 4(a1)
-; RV32ZBB-NEXT:    sltu t1, a7, a5
-; RV32ZBB-NEXT:    sub t0, t0, a2
-; RV32ZBB-NEXT:    sltu a2, a6, a3
-; RV32ZBB-NEXT:    sub t0, t0, t1
-; RV32ZBB-NEXT:    mv t1, a2
+; RV32ZBB-NEXT:    sltu t1, a2, a6
+; RV32ZBB-NEXT:    sub a7, t0, a7
+; RV32ZBB-NEXT:    sltu t0, a5, a3
+; RV32ZBB-NEXT:    sub a7, a7, t1
+; RV32ZBB-NEXT:    mv t1, t0
 ; RV32ZBB-NEXT:    beq a1, a4, .LBB32_2
 ; RV32ZBB-NEXT:  # %bb.1:
 ; RV32ZBB-NEXT:    sltu t1, a1, a4
 ; RV32ZBB-NEXT:  .LBB32_2:
-; RV32ZBB-NEXT:    sub a5, a7, a5
+; RV32ZBB-NEXT:    sub a6, a2, a6
 ; RV32ZBB-NEXT:    sub a1, a1, a4
-; RV32ZBB-NEXT:    sltu a4, a5, t1
-; RV32ZBB-NEXT:    sub a5, a5, t1
-; RV32ZBB-NEXT:    sub a4, t0, a4
-; RV32ZBB-NEXT:    sub a2, a1, a2
-; RV32ZBB-NEXT:    sub a1, a6, a3
-; RV32ZBB-NEXT:    bgez a4, .LBB32_4
+; RV32ZBB-NEXT:    sltu a2, a6, t1
+; RV32ZBB-NEXT:    sub a1, a1, t0
+; RV32ZBB-NEXT:    sub a2, a7, a2
+; RV32ZBB-NEXT:    sub a4, a6, t1
+; RV32ZBB-NEXT:    sub a3, a5, a3
+; RV32ZBB-NEXT:    bgez a2, .LBB32_4
 ; RV32ZBB-NEXT:  # %bb.3:
-; RV32ZBB-NEXT:    snez a3, a2
-; RV32ZBB-NEXT:    snez a6, a1
-; RV32ZBB-NEXT:    neg a7, a5
-; RV32ZBB-NEXT:    snez a5, a5
-; RV32ZBB-NEXT:    or a3, a6, a3
-; RV32ZBB-NEXT:    add a4, a4, a5
-; RV32ZBB-NEXT:    add a2, a2, a6
-; RV32ZBB-NEXT:    sltu a6, a7, a3
-; RV32ZBB-NEXT:    neg a4, a4
-; RV32ZBB-NEXT:    sub a5, a7, a3
+; RV32ZBB-NEXT:    neg a5, a4
+; RV32ZBB-NEXT:    or a6, a3, a1
+; RV32ZBB-NEXT:    snez a4, a4
+; RV32ZBB-NEXT:    snez a7, a3
+; RV32ZBB-NEXT:    snez a6, a6
+; RV32ZBB-NEXT:    add a2, a2, a4
+; RV32ZBB-NEXT:    add a1, a1, a7
+; RV32ZBB-NEXT:    sltu a7, a5, a6
 ; RV32ZBB-NEXT:    neg a2, a2
-; RV32ZBB-NEXT:    sub a4, a4, a6
+; RV32ZBB-NEXT:    sub a4, a5, a6
 ; RV32ZBB-NEXT:    neg a1, a1
+; RV32ZBB-NEXT:    sub a2, a2, a7
+; RV32ZBB-NEXT:    neg a3, a3
 ; RV32ZBB-NEXT:  .LBB32_4:
-; RV32ZBB-NEXT:    sw a1, 0(a0)
-; RV32ZBB-NEXT:    sw a2, 4(a0)
-; RV32ZBB-NEXT:    sw a5, 8(a0)
-; RV32ZBB-NEXT:    sw a4, 12(a0)
+; RV32ZBB-NEXT:    sw a3, 0(a0)
+; RV32ZBB-NEXT:    sw a1, 4(a0)
+; RV32ZBB-NEXT:    sw a4, 8(a0)
+; RV32ZBB-NEXT:    sw a2, 12(a0)
 ; RV32ZBB-NEXT:    ret
 ;
 ; RV64ZBB-LABEL: abd_subnsw_i128_undef:
diff --git a/llvm/test/CodeGen/RISCV/abdu-neg.ll b/llvm/test/CodeGen/RISCV/abdu-neg.ll
index c865d8b16e0eb..373b195be3820 100644
--- a/llvm/test/CodeGen/RISCV/abdu-neg.ll
+++ b/llvm/test/CodeGen/RISCV/abdu-neg.ll
@@ -624,87 +624,88 @@ define i64 @abd_ext_i64_undef(i64 %a, i64 %b) nounwind {
 define i128 @abd_ext_i128(i128 %a, i128 %b) nounwind {
 ; RV32I-LABEL: abd_ext_i128:
 ; RV32I:       # %bb.0:
-; RV32I-NEXT:    lw a5, 0(a2)
-; RV32I-NEXT:    lw a7, 4(a2)
-; RV32I-NEXT:    lw a3, 8(a2)
-; RV32I-NEXT:    lw t1, 12(a2)
-; RV32I-NEXT:    lw a4, 8(a1)
-; RV32I-NEXT:    lw a6, 12(a1)
-; RV32I-NEXT:    lw a2, 0(a1)
+; RV32I-NEXT:    lw a4, 0(a2)
+; RV32I-NEXT:    lw a6, 4(a2)
+; RV32I-NEXT:    lw t1, 8(a2)
+; RV32I-NEXT:    lw a2, 12(a2)
+; RV32I-NEXT:    lw a3, 8(a1)
+; RV32I-NEXT:    lw a5, 12(a1)
+; RV32I-NEXT:    lw a7, 0(a1)
 ; RV32I-NEXT:    lw t0, 4(a1)
-; RV32I-NEXT:    sltu a1, a4, a3
-; RV32I-NEXT:    sub t1, a6, t1
-; RV32I-NEXT:    sltu t2, a2, a5
-; RV32I-NEXT:    sub a1, t1, a1
-; RV32I-NEXT:    mv t1, t2
-; RV32I-NEXT:    beq t0, a7, .LBB11_2
+; RV32I-NEXT:    sltu a1, a3, t1
+; RV32I-NEXT:    sub a2, a5, a2
+; RV32I-NEXT:    sltu t2, a7, a4
+; RV32I-NEXT:    sub a1, a2, a1
+; RV32I-NEXT:    mv a2, t2
+; RV32I-NEXT:    beq t0, a6, .LBB11_2
 ; RV32I-NEXT:  # %bb.1:
-; RV32I-NEXT:    sltu t1, t0, a7
+; RV32I-NEXT:    sltu a2, t0, a6
 ; RV32I-NEXT:  .LBB11_2:
-; RV32I-NEXT:    sub a3, a4, a3
-; RV32I-NEXT:    sltu t3, a3, t1
+; RV32I-NEXT:    sub t1, a3, t1
+; RV32I-NEXT:    sltu t3, t1, a2
 ; RV32I-NEXT:    sub a1, a1, t3
-; RV32I-NEXT:    sub a3, a3, t1
-; RV32I-NEXT:    beq a1, a6, .LBB11_4
+; RV32I-NEXT:    sub a2, t1, a2
+; RV32I-NEXT:    beq a1, a5, .LBB11_4
 ; RV32I-NEXT:  # %bb.3:
-; RV32I-NEXT:    sltu t1, a6, a1
+; RV32I-NEXT:    sltu t1, a5, a1
 ; RV32I-NEXT:    j .LBB11_5
 ; RV32I-NEXT:  .LBB11_4:
-; RV32I-NEXT:    sltu t1, a4, a3
+; RV32I-NEXT:    sltu t1, a3, a2
 ; RV32I-NEXT:  .LBB11_5:
-; RV32I-NEXT:    sub a7, t0, a7
-; RV32I-NEXT:    sub a7, a7, t2
-; RV32I-NEXT:    sub a5, a2, a5
-; RV32I-NEXT:    beq a7, t0, .LBB11_7
+; RV32I-NEXT:    sub a6, t0, a6
+; RV32I-NEXT:    sub a6, a6, t2
+; RV32I-NEXT:    sub t2, a7, a4
+; RV32I-NEXT:    beq a6, t0, .LBB11_7
 ; RV32I-NEXT:  # %bb.6:
-; RV32I-NEXT:    sltu a2, t0, a7
+; RV32I-NEXT:    sltu a4, t0, a6
 ; RV32I-NEXT:    j .LBB11_8
 ; RV32I-NEXT:  .LBB11_7:
-; RV32I-NEXT:    sltu a2, a2, a5
+; RV32I-NEXT:    sltu a4, a7, t2
 ; RV32I-NEXT:  .LBB11_8:
-; RV32I-NEXT:    xor a6, a1, a6
-; RV32I-NEXT:    xor a4, a3, a4
-; RV32I-NEXT:    or a4, a4, a6
-; RV32I-NEXT:    beqz a4, .LBB11_10
+; RV32I-NEXT:    xor a5, a1, a5
+; RV32I-NEXT:    xor a3, a2, a3
+; RV32I-NEXT:    or a3, a3, a5
+; RV32I-NEXT:    beqz a3, .LBB11_10
 ; RV32I-NEXT:  # %bb.9:
-; RV32I-NEXT:    mv a2, t1
+; RV32I-NEXT:    mv a4, t1
 ; RV32I-NEXT:  .LBB11_10:
-; RV32I-NEXT:    neg a4, a2
-; RV32I-NEXT:    xor t0, a5, a4
-; RV32I-NEXT:    xor t3, a7, a4
-; RV32I-NEXT:    sltu a5, t0, a4
-; RV32I-NEXT:    add a6, t3, a2
-; RV32I-NEXT:    add t0, t0, a2
-; RV32I-NEXT:    sub t1, a6, a5
-; RV32I-NEXT:    snez a6, t1
-; RV32I-NEXT:    snez t2, t0
-; RV32I-NEXT:    or a6, t2, a6
-; RV32I-NEXT:    beqz a7, .LBB11_12
+; RV32I-NEXT:    neg a5, a4
+; RV32I-NEXT:    xor t0, t2, a5
+; RV32I-NEXT:    sltu a7, t0, a5
+; RV32I-NEXT:    xor t1, a6, a5
+; RV32I-NEXT:    mv a3, a7
+; RV32I-NEXT:    beqz a6, .LBB11_12
 ; RV32I-NEXT:  # %bb.11:
-; RV32I-NEXT:    sltu a5, t3, a4
+; RV32I-NEXT:    sltu a3, t1, a5
 ; RV32I-NEXT:  .LBB11_12:
-; RV32I-NEXT:    xor a3, a3, a4
-; RV32I-NEXT:    xor a1, a1, a4
-; RV32I-NEXT:    add t1, t1, t2
-; RV32I-NEXT:    neg a7, t0
-; RV32I-NEXT:    add t0, a3, a2
-; RV32I-NEXT:    sltu a3, a3, a4
+; RV32I-NEXT:    xor a2, a2, a5
+; RV32I-NEXT:    add t1, t1, a4
+; RV32I-NEXT:    add t0, t0, a4
+; RV32I-NEXT:    xor a1, a1, a5
+; RV32I-NEXT:    add a6, a2, a4
+; RV32I-NEXT:    sub a7, t1, a7
+; RV32I-NEXT:    sltu a2, a2, a5
+; RV32I-NEXT:    add a1, a1, a4
+; RV32I-NEXT:    snez a4, t0
+; RV32I-NEXT:    neg a5, t0
+; RV32I-NEXT:    sub t1, a6, a3
+; RV32I-NEXT:    or t0, t0, a7
+; RV32I-NEXT:    sub a1, a1, a2
+; RV32I-NEXT:    sltu a2, a6, a3
+; RV32I-NEXT:    add a4, a7, a4
+; RV32I-NEXT:    neg a3, t1
+; RV32I-NEXT:    snez a6, t0
+; RV32I-NEXT:    sub a1, a1, a2
+; RV32I-NEXT:    snez a2, t1
+; RV32I-NEXT:    neg a4, a4
+; RV32I-NEXT:    sltu a7, a3, a6
 ; RV32I-NEXT:    add a1, a1, a2
-; RV32I-NEXT:    neg a2, t1
-; RV32I-NEXT:    sub a4, t0, a5
-; RV32I-NEXT:    sub a1, a1, a3
-; RV32I-NEXT:    sltu a3, t0, a5
-; RV32I-NEXT:    neg a5, a4
-; RV32I-NEXT:    sub a1, a1, a3
-; RV32I-NEXT:    snez a3, a4
-; RV32I-NEXT:    sltu a4, a5, a6
-; RV32I-NEXT:    add a1, a1, a3
-; RV32I-NEXT:    sub a3, a5, a6
+; RV32I-NEXT:    sub a2, a3, a6
 ; RV32I-NEXT:    neg a1, a1
-; RV32I-NEXT:    sub a1, a1, a4
-; RV32I-NEXT:    sw a7, 0(a0)
-; RV32I-NEXT:    sw a2, 4(a0)
-; RV32I-NEXT:    sw a3, 8(a0)
+; RV32I-NEXT:    sub a1, a1, a7
+; RV32I-NEXT:    sw a5, 0(a0)
+; RV32I-NEXT:    sw a4, 4(a0)
+; RV32I-NEXT:    sw a2, 8(a0)
 ; RV32I-NEXT:    sw a1, 12(a0)
 ; RV32I-NEXT:    ret
 ;
@@ -736,87 +737,88 @@ define i128 @abd_ext_i128(i128 %a, i128 %b) nounwind {
 ;
 ; RV32ZBB-LABEL: abd_ext_i128:
 ; RV32ZBB:       # %bb.0:
-; RV32ZBB-NEXT:    lw a5, 0(a2)
-; RV32ZBB-NEXT:    lw a7, 4(a2)
-; RV32ZBB-NEXT:    lw a3, 8(a2)
-; RV32ZBB-NEXT:    lw t1, 12(a2)
-; RV32ZBB-NEXT:    lw a4, 8(a1)
-; RV32ZBB-NEXT:    lw a6, 12(a1)
-; RV32ZBB-NEXT:    lw a2, 0(a1)
+; RV32ZBB-NEXT:    lw a4, 0(a2)
+; RV32ZBB-NEXT:    lw a6, 4(a2)
+; RV32ZBB-NEXT:    lw t1, 8(a2)
+; RV32ZBB-NEXT:    lw a2, 12(a2)
+; RV32ZBB-NEXT:    lw a3, 8(a1)
+; RV32ZBB-NEXT:    lw a5, 12(a1)
+; RV32ZBB-NEXT:    lw a7, 0(a1)
 ; RV32ZBB-NEXT:    lw t0, 4(a1)
-; RV32ZBB-NEXT:    sltu a1, a4, a3
-; RV32ZBB-NEXT:    sub t1, a6, t1
-; RV32ZBB-NEXT:    sltu t2, a2, a5
-; RV32ZBB-NEXT:    sub a1, t1, a1
-; RV32ZBB-NEXT:    mv t1, t2
-; RV32ZBB-NEXT:    beq t0, a7, .LBB11_2
+; RV32ZBB-NEXT:    sltu a1, a3, t1
+; RV32ZBB-NEXT:    sub a2, a5, a2
+; RV32ZBB-NEXT:    sltu t2, a7, a4
+; RV32ZBB-NEXT:    sub a1, a2, a1
+; RV32ZBB-NEXT:    mv a2, t2
+; RV32ZBB-NEXT:    beq t0, a6, .LBB11_2
 ; RV32ZBB-NEXT:  # %bb.1:
-; RV32ZBB-NEXT:    sltu t1, t0, a7
+; RV32ZBB-NEXT:    sltu a2, t0, a6
 ; RV32ZBB-NEXT:  .LBB11_2:
-; RV32ZBB-NEXT:    sub a3, a4, a3
-; RV32ZBB-NEXT:    sltu t3, a3, t1
+; RV32ZBB-NEXT:    sub t1, a3, t1
+; RV32ZBB-NEXT:    sltu t3, t1, a2
 ; RV32ZBB-NEXT:    sub a1, a1, t3
-; RV32ZBB-NEXT:    sub a3, a3, t1
-; RV32ZBB-NEXT:    beq a1, a6, .LBB11_4
+; RV32ZBB-NEXT:    sub a2, t1, a2
+; RV32ZBB-NEXT:    beq a1, a5, .LBB11_4
 ; RV32ZBB-NEXT:  # %bb.3:
-; RV32ZBB-NEXT:    sltu t1, a6, a1
+; RV32ZBB-NEXT:    sltu t1, a5, a1
 ; RV32ZBB-NEXT:    j .LBB11_5
 ; RV32ZBB-NEXT:  .LBB11_4:
-; RV32ZBB-NEXT:    sltu t1, a4, a3
+; RV32ZBB-NEXT:    sltu t1, a3, a2
 ; RV32ZBB-NEXT:  .LBB11_5:
-; RV32ZBB-NEXT:    sub a7, t0, a7
-; RV32ZBB-NEXT:    sub a7, a7, t2
-; RV32ZBB-NEXT:    sub a5, a2, a5
-; RV32ZBB-NEXT:    beq a7, t0, .LBB11_7
+; RV32ZBB-NEXT:    sub a6, t0, a6
+; RV32ZBB-NEXT:    sub a6, a6, t2
+; RV32ZBB-NEXT:    sub t2, a7, a4
+; RV32ZBB-NEXT:    beq a6, t0, .LBB11_7
 ; RV32ZBB-NEXT:  # %bb.6:
-; RV32ZBB-NEXT:    sltu a2, t0, a7
+; RV32ZBB-NEXT:    sltu a4, t0, a6
 ; RV32ZBB-NEXT:    j .LBB11_8
 ; RV32ZBB-NEXT:  .LBB11_7:
-; RV32ZBB-NEXT:    sltu a2, a2, a5
+; RV32ZBB-NEXT:    sltu a4, a7, t2
 ; RV32ZBB-NEXT:  .LBB11_8:
-; RV32ZBB-NEXT:    xor a6, a1, a6
-; RV32ZBB-NEXT:    xor a4, a3, a4
-; RV32ZBB-NEXT:    or a4, a4, a6
-; RV32ZBB-NEXT:    beqz a4, .LBB11_10
+; RV32ZBB-NEXT:    xor a5, a1, a5
+; RV32ZBB-NEXT:    xor a3, a2, a3
+; RV32ZBB-NEXT:    or a3, a3, a5
+; RV32ZBB-NEXT:    beqz a3, .LBB11_10
 ; RV32ZBB-NEXT:  # %bb.9:
-; RV32ZBB-NEXT:    mv a2, t1
+; RV32ZBB-NEXT:    mv a4, t1
 ; RV32ZBB-NEXT:  .LBB11_10:
-; RV32ZBB-NEXT:    neg a4, a2
-; RV32ZBB-NEXT:    xor t0, a5, a4
-; RV32ZBB-NEXT:    xor t3, a7, a4
-; RV32ZBB-NEXT:    sltu a5, t0, a4
-; RV32ZBB-NEXT:    add a6, t3, a2
-; RV32ZBB-NEXT:    add t0, t0, a2
-; RV32ZBB-NEXT:    sub t1, a6, a5
-; RV32ZBB-NEXT:    snez a6, t1
-; RV32ZBB-NEXT:    snez t2, t0
-; RV32ZBB-NEXT:    or a6, t2, a6
-; RV32ZBB-NEXT:    beqz a7, .LBB11_12
+; RV32ZBB-NEXT:    neg a5, a4
+; RV32ZBB-NEXT:    xor t0, t2, a5
+; RV32ZBB-NEXT:    sltu a7, t0, a5
+; RV32ZBB-NEXT:    xor t1, a6, a5
+; RV32ZBB-NEXT:    mv a3, a7
+; RV32ZBB-NEXT:    beqz a6, .LBB11_12
 ; RV32ZBB-NEXT:  # %bb.11:
-; RV32ZBB-NEXT:    sltu a5, t3, a4
+; RV32ZBB-NEXT:    sltu a3, t1, a5
 ; RV32ZBB-NEXT:  .LBB11_12:
-; RV32ZBB-NEXT:    xor a3, a3, a4
-; RV32ZBB-NEXT:    xor a1, a1, a4
-; RV32ZBB-NEXT:    add t1, t1, t2
-; RV32ZBB-NEXT:    neg a7, t0
-; RV32ZBB-NEXT:    add t0, a3, a2
-; RV32ZBB-NEXT:    sltu a3, a3, a4
+; RV32ZBB-NEXT:    xor a2, a2, a5
+; RV32ZBB-NEXT:    add t1, t1, a4
+; RV32ZBB-NEXT:    add t0, t0, a4
+; RV32ZBB-NEXT:    xor a1, a1, a5
+; RV32ZBB-NEXT:    add a6, a2, a4
+; RV32ZBB-NEXT:    sub a7, t1, a7
+; RV32ZBB-NEXT:    sltu a2, a2, a5
+; RV32ZBB-NEXT:    add a1, a1, a4
+; RV32ZBB-NEXT:    snez a4, t0
+; RV32ZBB-NEXT:    neg a5, t0
+; RV32ZBB-NEXT:    sub t1, a6, a3
+; RV32ZBB-NEXT:    or t0, t0, a7
+; RV32ZBB-NEXT:    sub a1, a1, a2
+; RV32ZBB-NEXT:    sltu a2, a6, a3
+; RV32ZBB-NEXT:    add a4, a7, a4
+; RV32ZBB-NEXT:    neg a3, t1
+; RV32ZBB-NEXT:    snez a6, t0
+; RV32ZBB-NEXT:    sub a1, a1, a2
+; RV32ZBB-NEXT:    snez a2, t1
+; RV32ZBB-NEXT:    neg a4, a4
+; RV32ZBB-NEXT:    sltu a7, a3, a6
 ; RV32ZBB-NEXT:    add a1, a1, a2
-; RV32ZBB-NEXT:    neg a2, t1
-; RV32ZBB-NEXT:    sub a4, t0, a5
-; RV32ZBB-NEXT:    sub a1, a1, a3
-; RV32ZBB-NEXT:    sltu a3, t0, a5
-; RV32ZBB-NEXT:    neg a5, a4
-; RV32ZBB-NEXT:    sub a1, a1, a3
-; RV32ZBB-NEXT:    snez a3, a4
-; RV32ZBB-NEXT:    sltu a4, a5, a6
-; RV32ZBB-NEXT:    add a1, a1, a3
-; RV32ZBB-NEXT:    sub a3, a5, a6
+; RV32ZBB-NEXT:    sub a2, a3, a6
 ; RV32ZBB-NEXT:    neg a1, a1
-; RV32ZBB-NEXT:    sub a1, a1, a4
-; RV32ZBB-NEXT:    sw a7, 0(a0)
-; RV32ZBB-NEXT:    sw a2, 4(a0)
-; RV32ZBB-NEXT:    sw a3, 8(a0)
+; RV32ZBB-NEXT:    sub a1, a1, a7
+; RV32ZBB-NEXT:    sw a5, 0(a0)
+; RV32ZBB-NEXT:    sw a4, 4(a0)
+; RV32ZBB-NEXT:    sw a2, 8(a0)
 ; RV32ZBB-NEXT:    sw a1, 12(a0)
 ; RV32ZBB-NEXT:    ret
 ;
@@ -857,87 +859,88 @@ define i128 @abd_ext_i128(i128 %a, i128 %b) nounwind {
 define i128 @abd_ext_i128_undef(i128 %a, i128 %b) nounwind {
 ; RV32I-LABEL: abd_ext_i128_undef:
 ; RV32I:       # %bb.0:
-; RV32I-NEXT:    lw a5, 0(a2)
-; RV32I-NEXT:    lw a7, 4(a2)
-; RV32I-NEXT:    lw a3, 8(a2)
-; RV32I-NEXT:    lw t1, 12(a2)
-; RV32I-NEXT:    lw a4, 8(a1)
-; RV32I-NEXT:    lw a6, 12(a1)
-; RV32I-NEXT:    lw a2, 0(a1)
+; RV32I-NEXT:    lw a4, 0(a2)
+; RV32I-NEXT:    lw a6, 4(a2)
+; RV32I-NEXT:    lw t1, 8(a2)
+; RV32I-NEXT:    lw a2, 12(a2)
+; RV32I-NEXT:    lw a3, 8(a1)
+; RV32I-NEXT:    lw a5, 12(a1)
+; RV32I-NEXT:    lw a7, 0(a1)
 ; RV32I-NEXT:    lw t0, 4(a1)
-; RV32I-NEXT:    sltu a1, a4, a3
-; RV32I-NEXT:    sub t1, a6, t1
-; RV32I-NEXT:    sltu t2, a2, a5
-; RV32I-NEXT:    sub a1, t1, a1
-; RV32I-NEXT:    mv t1, t2
-; RV32I-NEXT:    beq t0, a7, .LBB12_2
+; RV32I-NEXT:    sltu a1, a3, t1
+; RV32I-NEXT:    sub a2, a5, a2
+; RV32I-NEXT:    sltu t2, a7, a4
+; RV32I-NEXT:    sub a1, a2, a1
+; RV32I-NEXT:    mv a2, t2
+; RV32I-NEXT:    beq t0, a6, .LBB12_2
 ; RV32I-NEXT:  # %bb.1:
-; RV32I-NEXT:    sltu t1, t0, a7
+; RV32I-NEXT:    sltu a2, t0, a6
 ; RV32I-NEXT:  .LBB12_2:
-; RV32I-NEXT:    sub a3, a4, a3
-; RV32I-NEXT:    sltu t3, a3, t1
+; RV32I-NEXT:    sub t1, a3, t1
+; RV32I-NEXT:    sltu t3, t1, a2
 ; RV32I-NEXT:    sub a1, a1, t3
-; RV32I-NEXT:    sub a3, a3, t1
-; RV32I-NEXT:    beq a1, a6, .LBB12_4
+; RV32I-NEXT:    sub a2, t1, a2
+; RV32I-NEXT:    beq a1, a5, .LBB12_4
 ; RV32I-NEXT:  # %bb.3:
-; RV32I-NEXT:    sltu t1, a6, a1
+; RV32I-NEXT:    sltu t1, a5, a1
 ; RV32I-NEXT:    j .LBB12_5
 ; RV32I-NEXT:  .LBB12_4:
-; RV32I-NEXT:    sltu t1, a4, a3
+; RV32I-NEXT:    sltu t1, a3, a2
 ; RV32I-NEXT:  .LBB12_5:
-; RV32I-NEXT:    sub a7, t0, a7
-; RV32I-NEXT:    sub a7, a7, t2
-; RV32I-NEXT:    sub a5, a2, a5
-; RV32I-NEXT:    beq a7, t0, .LBB12_7
+; RV32I-NEXT:    sub a6, t0, a6
+; RV32I-NEXT:    sub a6, a6, t2
+; RV32I-NEXT:    sub t2, a7, a4
+; RV32I-NEXT:    beq a6, t0, .LBB12_7
 ; RV32I-NEXT:  # %bb.6:
-; RV32I-NEXT:    sltu a2, t0, a7
+; RV32I-NEXT:    sltu a4, t0, a6
 ; RV32I-NEXT:    j .LBB12_8
 ; RV32I-NEXT:  .LBB12_7:
-; RV32I-NEXT:    sltu a2, a2, a5
+; RV32I-NEXT:    sltu a4, a7, t2
 ; RV32I-NEXT:  .LBB12_8:
-; RV32I-NEXT:    xor a6, a1, a6
-; RV32I-NEXT:    xor a4, a3, a4
-; RV32I-NEXT:    or a4, a4, a6
-; RV32I-NEXT:    beqz a4, .LBB12_10
+; RV32I-NEXT:    xor a5, a1, a5
+; RV32I-NEXT:    xor a3, a2, a3
+; RV32I-NEXT:    or a3, a3, a5
+; RV32I-NEXT:    beqz a3, .LBB12_10
 ; RV32I-NEXT:  # %bb.9:
-; RV32I-NEXT:    mv a2, t1
+; RV32I-NEXT:    mv a4, t1
 ; RV32I-NEXT:  .LBB12_10:
-; RV32I-NEXT:    neg a4, a2
-; RV32I-NEXT:    xor t0, a5, a4
-; RV32I-NEXT:    xor t3, a7, a4
-; RV32I-NEXT:    sltu a5, t0, a4
-; RV32I-NEXT:    add a6, t3, a2
-; RV32I-NEXT:    add t0, t0, a2
-; RV32I-NEXT:    sub t1, a6, a5
-; RV32I-NEXT:    snez a6, t1
-; RV32I-NEXT:    snez t2, t0
-; RV32I-NEXT:    or a6, t2, a6
-; RV32I-NEXT:    beqz a7, .LBB12_12
+; RV32I-NEXT:    neg a5, a4
+; RV32I-NEXT:    xor t0, t2, a5
+; RV32I-NEXT:    sltu a7, t0, a5
+; RV32I-NEXT:    xor t1, a6, a5
+; RV32I-NEXT:    mv a3, a7
+; RV32I-NEXT:    beqz a6, .LBB12_12
 ; RV32I-NEXT:  # %bb.11:
-; RV32I-NEXT:    sltu a5, t3, a4
+; RV32I-NEXT:    sltu a3, t1, a5
 ; RV32I-NEXT:  .LBB12_12:
-; RV32I-NEXT:    xor a3, a3, a4
-; RV32I-NEXT:    xor a1, a1, a4
-; RV32I-NEXT:    add t1, t1, t2
-; RV32I-NEXT:    neg a7, t0
-; RV32I-NEXT:    add t0, a3, a2
-; RV32I-NEXT:    sltu a3, a3, a4
+; RV32I-NEXT:    xor a2, a2, a5
+; RV32I-NEXT:    add t1, t1, a4
+; RV32I-NEXT:    add t0, t0, a4
+; RV32I-NEXT:    xor a1, a1, a5
+; RV32I-NEXT:    add a6, a2, a4
+; RV32I-NEXT:    sub a7, t1, a7
+; RV32I-NEXT:    sltu a2, a2, a5
+; RV32I-NEXT:    add a1, a1, a4
+; RV32I-NEXT:    snez a4, t0
+; RV32I-NEXT:    neg a5, t0
+; RV32I-NEXT:    sub t1, a6, a3
+; RV32I-NEXT:    or t0, t0, a7
+; RV32I-NEXT:    sub a1, a1, a2
+; RV32I-NEXT:    sltu a2, a6, a3
+; RV32I-NEXT:    add a4, a7, a4
+; RV32I-NEXT:    neg a3, t1
+; RV32I-NEXT:    snez a6, t0
+; RV32I-NEXT:    sub a1, a1, a2
+; RV32I-NEXT:    snez a2, t1
+; RV32I-NEXT:    neg a4, a4
+; RV32I-NEXT:    sltu a7, a3, a6
 ; RV32I-NEXT:    add a1, a1, a2
-; RV32I-NEXT:    neg a2, t1
-; RV32I-NEXT:    sub a4, t0, a5
-; RV32I-NEXT:    sub a1, a1, a3
-; RV32I-NEXT:    sltu a3, t0, a5
-; RV32I-NEXT:    neg a5, a4
-; RV32I-NEXT:    sub a1, a1, a3
-; RV32I-NEXT:    snez a3, a4
-; RV32I-NEXT:    sltu a4, a5, a6
-; RV32I-NEXT:    add a1, a1, a3
-; RV32I-NEXT:    sub a3, a5, a6
+; RV32I-NEXT:    sub a2, a3, a6
 ; RV32I-NEXT:    neg a1, a1
-; RV32I-NEXT:    sub a1, a1, a4
-; RV32I-NEXT:    sw a7, 0(a0)
-; RV32I-NEXT:    sw a2, 4(a0)
-; RV32I-NEXT:    sw a3, 8(a0)
+; RV32I-NEXT:    sub a1, a1, a7
+; RV32I-NEXT:    sw a5, 0(a0)
+; RV32I-NEXT:    sw a4, 4(a0)
+; RV32I-NEXT:    sw a2, 8(a0)
 ; RV32I-NEXT:    sw a1, 12(a0)
 ; RV32I-NEXT:    ret
 ;
@@ -969,87 +972,88 @@ define i128 @abd_ext_i128_undef(i128 %a, i128 %b) nounwind {
 ;
 ; RV32ZBB-LABEL: abd_ext_i128_undef:
 ; RV32ZBB:       # %bb.0:
-; RV32ZBB-NEXT:    lw a5, 0(a2)
-; RV32ZBB-NEXT:    lw a7, 4(a2)
-; RV32ZBB-NEXT:    lw a3, 8(a2)
-; RV32ZBB-NEXT:    lw t1, 12(a2)
-; RV32ZBB-NEXT:    lw a4, 8(a1)
-; RV32ZBB-NEXT:    lw a6, 12(a1)
-; RV32ZBB-NEXT:    lw a2, 0(a1)
+; RV32ZBB-NEXT:    lw a4, 0(a2)
+; RV32ZBB-NEXT:    lw a6, 4(a2)
+; RV32ZBB-NEXT:    lw t1, 8(a2)
+; RV32ZBB-NEXT:    lw a2, 12(a2)
+; RV32ZBB-NEXT:    lw a3, 8(a1)
+; RV32ZBB-NEXT:    lw a5, 12(a1)
+; RV32ZBB-NEXT:    lw a7, 0(a1)
 ; RV32ZBB-NEXT:    lw t0, 4(a1)
-; RV32ZBB-NEXT:    sltu a1, a4, a3
-; RV32ZBB-NEXT:    sub t1, a6, t1
-; RV32ZBB-NEXT:    sltu t2, a2, a5
-; RV32ZBB-NEXT:    sub a1, t1, a1
-; RV32ZBB-NEXT:    mv t1, t2
-; RV32ZBB-NEXT:    beq t0, a7, .LBB12_2
+; RV32ZBB-NEXT:    sltu a1, a3, t1
+; RV32ZBB-NEXT:    sub a2, a5, a2
+; RV32ZBB-NEXT:    sltu t2, a7, a4
+; RV32ZBB-NEXT:    sub a1, a2, a1
+; RV32ZBB-NEXT:    mv a2, t2
+; RV32ZBB-NEXT:    beq t0, a6, .LBB12_2
 ; RV32ZBB-NEXT:  # %bb.1:
-; RV32ZBB-NEXT:    sltu t1, t0, a7
+; RV32ZBB-NEXT:    sltu a2, t0, a6
 ; RV32ZBB-NEXT:  .LBB12_2:
-; RV32ZBB-NEXT:    sub a3, a4, a3
-; RV32ZBB-NEXT:    sltu t3, a3, t1
+; RV32ZBB-NEXT:    sub t1, a3, t1
+; RV32ZBB-NEXT:    sltu t3, t1, a2
 ; RV32ZBB-NEXT:    sub a1, a1, t3
-; RV32ZBB-NEXT:    sub a3, a3, t1
-; RV32ZBB-NEXT:    beq a1, a6, .LBB12_4
+; RV32ZBB-NEXT:    sub a2, t1, a2
+; RV32ZBB-NEXT:    beq a1, a5, .LBB12_4
 ; RV32ZBB-NEXT:  # %bb.3:
-; RV32ZBB-NEXT:    sltu t1, a6, a1
+; RV32ZBB-NEXT:    sltu t1, a5, a1
 ; RV32ZBB-NEXT:    j .LBB12_5
 ; RV32ZBB-NEXT:  .LBB12_4:
-; RV32ZBB-NEXT:    sltu t1, a4, a3
+; RV32ZBB-NEXT:    sltu t1, a3, a2
 ; RV32ZBB-NEXT:  .LBB12_5:
-; RV32ZBB-NEXT:    sub a7, t0, a7
-; RV32ZBB-NEXT:    sub a7, a7, t2
-; RV32ZBB-NEXT:    sub a5, a2, a5
-; RV32ZBB-NEXT:    beq a7, t0, .LBB12_7
+; RV32ZBB-NEXT:    sub a6, t0, a6
+; RV32ZBB-NEXT:    sub a6, a6, t2
+; RV32ZBB-NEXT:    sub t2, a7, a4
+; RV32ZBB-NEXT:    beq a6, t0, .LBB12_7
 ; RV32ZBB-NEXT:  # %bb.6:
-; RV32ZBB-NEXT:    sltu a2, t0, a7
+; RV32ZBB-NEXT:    sltu a4, t0, a6
 ; RV32ZBB-NEXT:    j .LBB12_8
 ; RV32ZBB-NEXT:  .LBB12_7:
-; RV32ZBB-NEXT:    sltu a2, a2, a5
+; RV32ZBB-NEXT:    sltu a4, a7, t2
 ; RV32ZBB-NEXT:  .LBB12_8:
-; RV32ZBB-NEXT:    xor a6, a1, a6
-; RV32ZBB-NEXT:    xor a4, a3, a4
-; RV32ZBB-NEXT:    or a4, a4, a6
-; RV32ZBB-NEXT:    beqz a4, .LBB12_10
+; RV32ZBB-NEXT:    xor a5, a1, a5
+; RV32ZBB-NEXT:    xor a3, a2, a3
+; RV32ZBB-NEXT:    or a3, a3, a5
+; RV32ZBB-NEXT:    beqz a3, .LBB12_10
 ; RV32ZBB-NEXT:  # %bb.9:
-; RV32ZBB-NEXT:    mv a2, t1
+; RV32ZBB-NEXT:    mv a4, t1
 ; RV32ZBB-NEXT:  .LBB12_10:
-; RV32ZBB-NEXT:    neg a4, a2
-; RV32ZBB-NEXT:    xor t0, a5, a4
-; RV32ZBB-NEXT:    xor t3, a7, a4
-; RV32ZBB-NEXT:    sltu a5, t0, a4
-; RV32ZBB-NEXT:    add a6, t3, a2
-; RV32ZBB-NEXT:    add t0, t0, a2
-; RV32ZBB-NEXT:    sub t1, a6, a5
-; RV32ZBB-NEXT:    snez a6, t1
-; RV32ZBB-NEXT:    snez t2, t0
-; RV32ZBB-NEXT:    or a6, t2, a6
-; RV32ZBB-NEXT:    beqz a7, .LBB12_12
+; RV32ZBB-NEXT:    neg a5, a4
+; RV32ZBB-NEXT:    xor t0, t2, a5
+; RV32ZBB-NEXT:    sltu a7, t0, a5
+; RV32ZBB-NEXT:    xor t1, a6, a5
+; RV32ZBB-NEXT:    mv a3, a7
+; RV32ZBB-NEXT:    beqz a6, .LBB12_12
 ; RV32ZBB-NEXT:  # %bb.11:
-; RV32ZBB-NEXT:    sltu a5, t3, a4
+; RV32ZBB-NEXT:    sltu a3, t1, a5
 ; RV32ZBB-NEXT:  .LBB12_12:
-; RV32ZBB-NEXT:    xor a3, a3, a4
-; RV32ZBB-NEXT:    xor a1, a1, a4
-; RV32ZBB-NEXT:    add t1, t1, t2
-; RV32ZBB-NEXT:    neg a7, t0
-; RV32ZBB-NEXT:    add t0, a3, a2
-; RV32ZBB-NEXT:    sltu a3, a3, a4
+; RV32ZBB-NEXT:    xor a2, a2, a5
+; RV32ZBB-NEXT:    add t1, t1, a4
+; RV32ZBB-NEXT:    add t0, t0, a4
+; RV32ZBB-NEXT:    xor a1, a1, a5
+; RV32ZBB-NEXT:    add a6, a2, a4
+; RV32ZBB-NEXT:    sub a7, t1, a7
+; RV32ZBB-NEXT:    sltu a2, a2, a5
+; RV32ZBB-NEXT:    add a1, a1, a4
+; RV32ZBB-NEXT:    snez a4, t0
+; RV32ZBB-NEXT:    neg a5, t0
+; RV32ZBB-NEXT:    sub t1, a6, a3
+; RV32ZBB-NEXT:    or t0, t0, a7
+; RV32ZBB-NEXT:    sub a1, a1, a2
+; RV32ZBB-NEXT:    sltu a2, a6, a3
+; RV32ZBB-NEXT:    add a4, a7, a4
+; RV32ZBB-NEXT:    neg a3, t1
+; RV32ZBB-NEXT:    snez a6, t0
+; RV32ZBB-NEXT:    sub a1, a1, a2
+; RV32ZBB-NEXT:    snez a2, t1
+; RV32ZBB-NEXT:    neg a4, a4
+; RV32ZBB-NEXT:    sltu a7, a3, a6
 ; RV32ZBB-NEXT:    add a1, a1, a2
-; RV32ZBB-NEXT:    neg a2, t1
-; RV32ZBB-NEXT:    sub a4, t0, a5
-; RV32ZBB-NEXT:    sub a1, a1, a3
-; RV32ZBB-NEXT:    sltu a3, t0, a5
-; RV32ZBB-NEXT:    neg a5, a4
-; RV32ZBB-NEXT:    sub a1, a1, a3
-; RV32ZBB-NEXT:    snez a3, a4
-; RV32ZBB-NEXT:    sltu a4, a5, a6
-; RV32ZBB-NEXT:    add a1, a1, a3
-; RV32ZBB-NEXT:    sub a3, a5, a6
+; RV32ZBB-NEXT:    sub a2, a3, a6
 ; RV32ZBB-NEXT:    neg a1, a1
-; RV32ZBB-NEXT:    sub a1, a1, a4
-; RV32ZBB-NEXT:    sw a7, 0(a0)
-; RV32ZBB-NEXT:    sw a2, 4(a0)
-; RV32ZBB-NEXT:    sw a3, 8(a0)
+; RV32ZBB-NEXT:    sub a1, a1, a7
+; RV32ZBB-NEXT:    sw a5, 0(a0)
+; RV32ZBB-NEXT:    sw a4, 4(a0)
+; RV32ZBB-NEXT:    sw a2, 8(a0)
 ; RV32ZBB-NEXT:    sw a1, 12(a0)
 ; RV32ZBB-NEXT:    ret
 ;
diff --git a/llvm/test/CodeGen/RISCV/fpclamptosat.ll b/llvm/test/CodeGen/RISCV/fpclamptosat.ll
index a724556e553d5..f4699ca7f1134 100644
--- a/llvm/test/CodeGen/RISCV/fpclamptosat.ll
+++ b/llvm/test/CodeGen/RISCV/fpclamptosat.ll
@@ -1091,24 +1091,23 @@ define i64 @stest_f64i64(double %x) {
 ; RV32IF-NEXT:    mv a1, a0
 ; RV32IF-NEXT:    addi a0, sp, 8
 ; RV32IF-NEXT:    call __fixdfti
-; RV32IF-NEXT:    lw a3, 8(sp)
 ; RV32IF-NEXT:    lw a1, 12(sp)
-; RV32IF-NEXT:    lw a2, 16(sp)
-; RV32IF-NEXT:    lw a4, 20(sp)
-; RV32IF-NEXT:    lui a0, 524288
-; RV32IF-NEXT:    addi a5, a0, -1
+; RV32IF-NEXT:    lw a0, 16(sp)
+; RV32IF-NEXT:    lw a3, 20(sp)
+; RV32IF-NEXT:    lw a4, 8(sp)
+; RV32IF-NEXT:    lui a2, 524288
+; RV32IF-NEXT:    addi a5, a2, -1
+; RV32IF-NEXT:    or a7, a0, a3
 ; RV32IF-NEXT:    beq a1, a5, .LBB18_2
 ; RV32IF-NEXT:  # %bb.1: # %entry
 ; RV32IF-NEXT:    sltu a6, a1, a5
-; RV32IF-NEXT:    or a7, a2, a4
 ; RV32IF-NEXT:    bnez a7, .LBB18_3
 ; RV32IF-NEXT:    j .LBB18_4
 ; RV32IF-NEXT:  .LBB18_2:
-; RV32IF-NEXT:    sltiu a6, a3, -1
-; RV32IF-NEXT:    or a7, a2, a4
+; RV32IF-NEXT:    sltiu a6, a4, -1
 ; RV32IF-NEXT:    beqz a7, .LBB18_4
 ; RV32IF-NEXT:  .LBB18_3: # %entry
-; RV32IF-NEXT:    srli a6, a4, 31
+; RV32IF-NEXT:    srli a6, a3, 31
 ; RV32IF-NEXT:  .LBB18_4: # %entry
 ; RV32IF-NEXT:    neg a7, a6
 ; RV32IF-NEXT:    addi t0, a6, -1
@@ -1116,29 +1115,29 @@ define i64 @stest_f64i64(double %x) {
 ; RV32IF-NEXT:  # %bb.5: # %entry
 ; RV32IF-NEXT:    mv a1, a5
 ; RV32IF-NEXT:  .LBB18_6: # %entry
-; RV32IF-NEXT:    or a3, t0, a3
-; RV32IF-NEXT:    and a4, a7, a4
-; RV32IF-NEXT:    and a2, a7, a2
-; RV32IF-NEXT:    beq a1, a0, .LBB18_8
+; RV32IF-NEXT:    or a4, t0, a4
+; RV32IF-NEXT:    and a3, a7, a3
+; RV32IF-NEXT:    and a5, a7, a0
+; RV32IF-NEXT:    beq a1, a2, .LBB18_8
 ; RV32IF-NEXT:  # %bb.7: # %entry
-; RV32IF-NEXT:    sltu a0, a0, a1
+; RV32IF-NEXT:    sltu a0, a2, a1
 ; RV32IF-NEXT:    j .LBB18_9
 ; RV32IF-NEXT:  .LBB18_8:
-; RV32IF-NEXT:    snez a0, a3
+; RV32IF-NEXT:    snez a0, a4
 ; RV32IF-NEXT:  .LBB18_9: # %entry
-; RV32IF-NEXT:    and a2, a2, a4
-; RV32IF-NEXT:    li a5, -1
-; RV32IF-NEXT:    beq a2, a5, .LBB18_11
+; RV32IF-NEXT:    and a5, a5, a3
+; RV32IF-NEXT:    li a2, -1
+; RV32IF-NEXT:    beq a5, a2, .LBB18_11
 ; RV32IF-NEXT:  # %bb.10: # %entry
-; RV32IF-NEXT:    srli a4, a4, 31
-; RV32IF-NEXT:    xori a0, a4, 1
+; RV32IF-NEXT:    srli a3, a3, 31
+; RV32IF-NEXT:    xori a0, a3, 1
 ; RV32IF-NEXT:  .LBB18_11: # %entry
 ; RV32IF-NEXT:    bnez a0, .LBB18_13
 ; RV32IF-NEXT:  # %bb.12: # %entry
 ; RV32IF-NEXT:    lui a1, 524288
 ; RV32IF-NEXT:  .LBB18_13: # %entry
 ; RV32IF-NEXT:    neg a0, a0
-; RV32IF-NEXT:    and a0, a0, a3
+; RV32IF-NEXT:    and a0, a0, a4
 ; RV32IF-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
 ; RV32IF-NEXT:    .cfi_restore ra
 ; RV32IF-NEXT:    addi sp, sp, 32
@@ -1194,24 +1193,23 @@ define i64 @stest_f64i64(double %x) {
 ; RV32IFD-NEXT:    .cfi_offset ra, -4
 ; RV32IFD-NEXT:    addi a0, sp, 8
 ; RV32IFD-NEXT:    call __fixdfti
-; RV32IFD-NEXT:    lw a3, 8(sp)
 ; RV32IFD-NEXT:    lw a1, 12(sp)
-; RV32IFD-NEXT:    lw a2, 16(sp)
-; RV32IFD-NEXT:    lw a4, 20(sp)
-; RV32IFD-NEXT:    lui a0, 524288
-; RV32IFD-NEXT:    addi a5, a0, -1
+; RV32IFD-NEXT:    lw a0, 16(sp)
+; RV32IFD-NEXT:    lw a3, 20(sp)
+; RV32IFD-NEXT:    lw a4, 8(sp)
+; RV32IFD-NEXT:    lui a2, 524288
+; RV32IFD-NEXT:    addi a5, a2, -1
+; RV32IFD-NEXT:    or a7, a0, a3
 ; RV32IFD-NEXT:    beq a1, a5, .LBB18_2
 ; RV32IFD-NEXT:  # %bb.1: # %entry
 ; RV32IFD-NEXT:    sltu a6, a1, a5
-; RV32IFD-NEXT:    or a7, a2, a4
 ; RV32IFD-NEXT:    bnez a7, .LBB18_3
 ; RV32IFD-NEXT:    j .LBB18_4
 ; RV32IFD-NEXT:  .LBB18_2:
-; RV32IFD-NEXT:    sltiu a6, a3, -1
-; RV32IFD-NEXT:    or a7, a2, a4
+; RV32IFD-NEXT:    sltiu a6, a4, -1
 ; RV32IFD-NEXT:    beqz a7, .LBB18_4
 ; RV32IFD-NEXT:  .LBB18_3: # %entry
-; RV32IFD-NEXT:    srli a6, a4, 31
+; RV32IFD-NEXT:    srli a6, a3, 31
 ; RV32IFD-NEXT:  .LBB18_4: # %entry
 ; RV32IFD-NEXT:    neg a7, a6
 ; RV32IFD-NEXT:    addi t0, a6, -1
@@ -1219,29 +1217,29 @@ define i64 @stest_f64i64(double %x) {
 ; RV32IFD-NEXT:  # %bb.5: # %entry
 ; RV32IFD-NEXT:    mv a1, a5
 ; RV32IFD-NEXT:  .LBB18_6: # %entry
-; RV32IFD-NEXT:    or a3, t0, a3
-; RV32IFD-NEXT:    and a4, a7, a4
-; RV32IFD-NEXT:    and a2, a7, a2
-; RV32IFD-NEXT:    beq a1, a0, .LBB18_8
+; RV32IFD-NEXT:    or a4, t0, a4
+; RV32IFD-NEXT:    and a3, a7, a3
+; RV32IFD-NEXT:    and a5, a7, a0
+; RV32IFD-NEXT:    beq a1, a2, .LBB18_8
 ; RV32IFD-NEXT:  # %bb.7: # %entry
-; RV32IFD-NEXT:    sltu a0, a0, a1
+; RV32IFD-NEXT:    sltu a0, a2, a1
 ; RV32IFD-NEXT:    j .LBB18_9
 ; RV32IFD-NEXT:  .LBB18_8:
-; RV32IFD-NEXT:    snez a0, a3
+; RV32IFD-NEXT:    snez a0, a4
 ; RV32IFD-NEXT:  .LBB18_9: # %entry
-; RV32IFD-NEXT:    and a2, a2, a4
-; RV32IFD-NEXT:    li a5, -1
-; RV32IFD-NEXT:    beq a2, a5, .LBB18_11
+; RV32IFD-NEXT:    and a5, a5, a3
+; RV32IFD-NEXT:    li a2, -1
+; RV32IFD-NEXT:    beq a5, a2, .LBB18_11
 ; RV32IFD-NEXT:  # %bb.10: # %entry
-; RV32IFD-NEXT:    srli a4, a4, 31
-; RV32IFD-NEXT:    xori a0, a4, 1
+; RV32IFD-NEXT:    srli a3, a3, 31
+; RV32IFD-NEXT:    xori a0, a3, 1
 ; RV32IFD-NEXT:  .LBB18_11: # %entry
 ; RV32IFD-NEXT:    bnez a0, .LBB18_13
 ; RV32IFD-NEXT:  # %bb.12: # %entry
 ; RV32IFD-NEXT:    lui a1, 524288
 ; RV32IFD-NEXT:  .LBB18_13: # %entry
 ; RV32IFD-NEXT:    neg a0, a0
-; RV32IFD-NEXT:    and a0, a0, a3
+; RV32IFD-NEXT:    and a0, a0, a4
 ; RV32IFD-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
 ; RV32IFD-NEXT:    .cfi_restore ra
 ; RV32IFD-NEXT:    addi sp, sp, 32
@@ -1378,8 +1376,8 @@ define i64 @ustest_f64i64(double %x) {
 ; RV32IF-NEXT:  # %bb.4: # %entry
 ; RV32IF-NEXT:    li a0, 1
 ; RV32IF-NEXT:  .LBB20_5: # %entry
-; RV32IF-NEXT:    lw a4, 8(sp)
-; RV32IF-NEXT:    lw a3, 12(sp)
+; RV32IF-NEXT:    lw a3, 8(sp)
+; RV32IF-NEXT:    lw a4, 12(sp)
 ; RV32IF-NEXT:    and a5, a2, a1
 ; RV32IF-NEXT:    beqz a5, .LBB20_7
 ; RV32IF-NEXT:  # %bb.6: # %entry
@@ -1393,12 +1391,12 @@ define i64 @ustest_f64i64(double %x) {
 ; RV32IF-NEXT:    and a2, a2, a3
 ; RV32IF-NEXT:    bnez a0, .LBB20_10
 ; RV32IF-NEXT:  # %bb.9:
-; RV32IF-NEXT:    or a0, a4, a2
+; RV32IF-NEXT:    or a0, a2, a4
 ; RV32IF-NEXT:    snez a1, a0
 ; RV32IF-NEXT:  .LBB20_10: # %entry
 ; RV32IF-NEXT:    neg a1, a1
-; RV32IF-NEXT:    and a0, a1, a4
-; RV32IF-NEXT:    and a1, a1, a2
+; RV32IF-NEXT:    and a0, a1, a2
+; RV32IF-NEXT:    and a1, a1, a4
 ; RV32IF-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
 ; RV32IF-NEXT:    .cfi_restore ra
 ; RV32IF-NEXT:    addi sp, sp, 32
@@ -1461,8 +1459,8 @@ define i64 @ustest_f64i64(double %x) {
 ; RV32IFD-NEXT:  # %bb.4: # %entry
 ; RV32IFD-NEXT:    li a0, 1
 ; RV32IFD-NEXT:  .LBB20_5: # %entry
-; RV32IFD-NEXT:    lw a4, 8(sp)
-; RV32IFD-NEXT:    lw a3, 12(sp)
+; RV32IFD-NEXT:    lw a3, 8(sp)
+; RV32IFD-NEXT:    lw a4, 12(sp)
 ; RV32IFD-NEXT:    and a5, a2, a1
 ; RV32IFD-NEXT:    beqz a5, .LBB20_7
 ; RV32IFD-NEXT:  # %bb.6: # %entry
@@ -1476,12 +1474,12 @@ define i64 @ustest_f64i64(double %x) {
 ; RV32IFD-NEXT:    and a2, a2, a3
 ; RV32IFD-NEXT:    bnez a0, .LBB20_10
 ; RV32IFD-NEXT:  # %bb.9:
-; RV32IFD-NEXT:    or a0, a4, a2
+; RV32IFD-NEXT:    or a0, a2, a4
 ; RV32IFD-NEXT:    snez a1, a0
 ; RV32IFD-NEXT:  .LBB20_10: # %entry
 ; RV32IFD-NEXT:    neg a1, a1
-; RV32IFD-NEXT:    and a0, a1, a4
-; RV32IFD-NEXT:    and a1, a1, a2
+; RV32IFD-NEXT:    and a0, a1, a2
+; RV32IFD-NEXT:    and a1, a1, a4
 ; RV32IFD-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
 ; RV32IFD-NEXT:    .cfi_restore ra
 ; RV32IFD-NEXT:    addi sp, sp, 32
@@ -1506,24 +1504,23 @@ define i64 @stest_f32i64(float %x) {
 ; RV32-NEXT:    .cfi_offset ra, -4
 ; RV32-NEXT:    addi a0, sp, 8
 ; RV32-NEXT:    call __fixsfti
-; RV32-NEXT:    lw a3, 8(sp)
 ; RV32-NEXT:    lw a1, 12(sp)
-; RV32-NEXT:    lw a2, 16(sp)
-; RV32-NEXT:    lw a4, 20(sp)
-; RV32-NEXT:    lui a0, 524288
-; RV32-NEXT:    addi a5, a0, -1
+; RV32-NEXT:    lw a0, 16(sp)
+; RV32-NEXT:    lw a3, 20(sp)
+; RV32-NEXT:    lw a4, 8(sp)
+; RV32-NEXT:    lui a2, 524288
+; RV32-NEXT:    addi a5, a2, -1
+; RV32-NEXT:    or a7, a0, a3
 ; RV32-NEXT:    beq a1, a5, .LBB21_2
 ; RV32-NEXT:  # %bb.1: # %entry
 ; RV32-NEXT:    sltu a6, a1, a5
-; RV32-NEXT:    or a7, a2, a4
 ; RV32-NEXT:    bnez a7, .LBB21_3
 ; RV32-NEXT:    j .LBB21_4
 ; RV32-NEXT:  .LBB21_2:
-; RV32-NEXT:    sltiu a6, a3, -1
-; RV32-NEXT:    or a7, a2, a4
+; RV32-NEXT:    sltiu a6, a4, -1
 ; RV32-NEXT:    beqz a7, .LBB21_4
 ; RV32-NEXT:  .LBB21_3: # %entry
-; RV32-NEXT:    srli a6, a4, 31
+; RV32-NEXT:    srli a6, a3, 31
 ; RV32-NEXT:  .LBB21_4: # %entry
 ; RV32-NEXT:    neg a7, a6
 ; RV32-NEXT:    addi t0, a6, -1
@@ -1531,29 +1528,29 @@ define i64 @stest_f32i64(float %x) {
 ; RV32-NEXT:  # %bb.5: # %entry
 ; RV32-NEXT:    mv a1, a5
 ; RV32-NEXT:  .LBB21_6: # %entry
-; RV32-NEXT:    or a3, t0, a3
-; RV32-NEXT:    and a4, a7, a4
-; RV32-NEXT:    and a2, a7, a2
-; RV32-NEXT:    beq a1, a0, .LBB21_8
+; RV32-NEXT:    or a4, t0, a4
+; RV32-NEXT:    and a3, a7, a3
+; RV32-NEXT:    and a5, a7, a0
+; RV32-NEXT:    beq a1, a2, .LBB21_8
 ; RV32-NEXT:  # %bb.7: # %entry
-; RV32-NEXT:    sltu a0, a0, a1
+; RV32-NEXT:    sltu a0, a2, a1
 ; RV32-NEXT:    j .LBB21_9
 ; RV32-NEXT:  .LBB21_8:
-; RV32-NEXT:    snez a0, a3
+; RV32-NEXT:    snez a0, a4
 ; RV32-NEXT:  .LBB21_9: # %entry
-; RV32-NEXT:    and a2, a2, a4
-; RV32-NEXT:    li a5, -1
-; RV32-NEXT:    beq a2, a5, .LBB21_11
+; RV32-NEXT:    and a5, a5, a3
+; RV32-NEXT:    li a2, -1
+; RV32-NEXT:    beq a5, a2, .LBB21_11
 ; RV32-NEXT:  # %bb.10: # %entry
-; RV32-NEXT:    srli a4, a4, 31
-; RV32-NEXT:    xori a0, a4, 1
+; RV32-NEXT:    srli a3, a3, 31
+; RV32-NEXT:    xori a0, a3, 1
 ; RV32-NEXT:  .LBB21_11: # %entry
 ; RV32-NEXT:    bnez a0, .LBB21_13
 ; RV32-NEXT:  # %bb.12: # %entry
 ; RV32-NEXT:    lui a1, 524288
 ; RV32-NEXT:  .LBB21_13: # %entry
 ; RV32-NEXT:    neg a0, a0
-; RV32-NEXT:    and a0, a0, a3
+; RV32-NEXT:    and a0, a0, a4
 ; RV32-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
 ; RV32-NEXT:    .cfi_restore ra
 ; RV32-NEXT:    addi sp, sp, 32
@@ -1658,8 +1655,8 @@ define i64 @ustest_f32i64(float %x) {
 ; RV32-NEXT:  # %bb.4: # %entry
 ; RV32-NEXT:    li a0, 1
 ; RV32-NEXT:  .LBB23_5: # %entry
-; RV32-NEXT:    lw a4, 8(sp)
-; RV32-NEXT:    lw a3, 12(sp)
+; RV32-NEXT:    lw a3, 8(sp)
+; RV32-NEXT:    lw a4, 12(sp)
 ; RV32-NEXT:    and a5, a2, a1
 ; RV32-NEXT:    beqz a5, .LBB23_7
 ; RV32-NEXT:  # %bb.6: # %entry
@@ -1673,12 +1670,12 @@ define i64 @ustest_f32i64(float %x) {
 ; RV32-NEXT:    and a2, a2, a3
 ; RV32-NEXT:    bnez a0, .LBB23_10
 ; RV32-NEXT:  # %bb.9:
-; RV32-NEXT:    or a0, a4, a2
+; RV32-NEXT:    or a0, a2, a4
 ; RV32-NEXT:    snez a1, a0
 ; RV32-NEXT:  .LBB23_10: # %entry
 ; RV32-NEXT:    neg a1, a1
-; RV32-NEXT:    and a0, a1, a4
-; RV32-NEXT:    and a1, a1, a2
+; RV32-NEXT:    and a0, a1, a2
+; RV32-NEXT:    and a1, a1, a4
 ; RV32-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
 ; RV32-NEXT:    .cfi_restore ra
 ; RV32-NEXT:    addi sp, sp, 32
@@ -1733,24 +1730,23 @@ define i64 @stest_f16i64(half %x) {
 ; RV32-NEXT:    call __extendhfsf2
 ; RV32-NEXT:    addi a0, sp, 8
 ; RV32-NEXT:    call __fixsfti
-; RV32-NEXT:    lw a3, 8(sp)
 ; RV32-NEXT:    lw a1, 12(sp)
-; RV32-NEXT:    lw a2, 16(sp)
-; RV32-NEXT:    lw a4, 20(sp)
-; RV32-NEXT:    lui a0, 524288
-; RV32-NEXT:    addi a5, a0, -1
+; RV32-NEXT:    lw a0, 16(sp)
+; RV32-NEXT:    lw a3, 20(sp)
+; RV32-NEXT:    lw a4, 8(sp)
+; RV32-NEXT:    lui a2, 524288
+; RV32-NEXT:    addi a5, a2, -1
+; RV32-NEXT:    or a7, a0, a3
 ; RV32-NEXT:    beq a1, a5, .LBB24_2
 ; RV32-NEXT:  # %bb.1: # %entry
 ; RV32-NEXT:    sltu a6, a1, a5
-; RV32-NEXT:    or a7, a2, a4
 ; RV32-NEXT:    bnez a7, .LBB24_3
 ; RV32-NEXT:    j .LBB24_4
 ; RV32-NEXT:  .LBB24_2:
-; RV32-NEXT:    sltiu a6, a3, -1
-; RV32-NEXT:    or a7, a2, a4
+; RV32-NEXT:    sltiu a6, a4, -1
 ; RV32-NEXT:    beqz a7, .LBB24_4
 ; RV32-NEXT:  .LBB24_3: # %entry
-; RV32-NEXT:    srli a6, a4, 31
+; RV32-NEXT:    srli a6, a3, 31
 ; RV32-NEXT:  .LBB24_4: # %entry
 ; RV32-NEXT:    neg a7, a6
 ; RV32-NEXT:    addi t0, a6, -1
@@ -1758,29 +1754,29 @@ define i64 @stest_f16i64(half %x) {
 ; RV32-NEXT:  # %bb.5: # %entry
 ; RV32-NEXT:    mv a1, a5
 ; RV32-NEXT:  .LBB24_6: # %entry
-; RV32-NEXT:    or a3, t0, a3
-; RV32-NEXT:    and a4, a7, a4
-; RV32-NEXT:    and a2, a7, a2
-; RV32-NEXT:    beq a1, a0, .LBB24_8
+; RV32-NEXT:    or a4, t0, a4
+; RV32-NEXT:    and a3, a7, a3
+; RV32-NEXT:    and a5, a7, a0
+; RV32-NEXT:    beq a1, a2, .LBB24_8
 ; RV32-NEXT:  # %bb.7: # %entry
-; RV32-NEXT:    sltu a0, a0, a1
+; RV32-NEXT:    sltu a0, a2, a1
 ; RV32-NEXT:    j .LBB24_9
 ; RV32-NEXT:  .LBB24_8:
-; RV32-NEXT:    snez a0, a3
+; RV32-NEXT:    snez a0, a4
 ; RV32-NEXT:  .LBB24_9: # %entry
-; RV32-NEXT:    and a2, a2, a4
-; RV32-NEXT:    li a5, -1
-; RV32-NEXT:    beq a2, a5, .LBB24_11
+; RV32-NEXT:    and a5, a5, a3
+; RV32-NEXT:    li a2, -1
+; RV32-NEXT:    beq a5, a2, .LBB24_11
 ; RV32-NEXT:  # %bb.10: # %entry
-; RV32-NEXT:    srli a4, a4, 31
-; RV32-NEXT:    xori a0, a4, 1
+; RV32-NEXT:    srli a3, a3, 31
+; RV32-NEXT:    xori a0, a3, 1
 ; RV32-NEXT:  .LBB24_11: # %entry
 ; RV32-NEXT:    bnez a0, .LBB24_13
 ; RV32-NEXT:  # %bb.12: # %entry
 ; RV32-NEXT:    lui a1, 524288
 ; RV32-NEXT:  .LBB24_13: # %entry
 ; RV32-NEXT:    neg a0, a0
-; RV32-NEXT:    and a0, a0, a3
+; RV32-NEXT:    and a0, a0, a4
 ; RV32-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
 ; RV32-NEXT:    .cfi_restore ra
 ; RV32-NEXT:    addi sp, sp, 32
@@ -1921,8 +1917,8 @@ define i64 @ustest_f16i64(half %x) {
 ; RV32-NEXT:  # %bb.4: # %entry
 ; RV32-NEXT:    li a0, 1
 ; RV32-NEXT:  .LBB26_5: # %entry
-; RV32-NEXT:    lw a4, 8(sp)
-; RV32-NEXT:    lw a3, 12(sp)
+; RV32-NEXT:    lw a3, 8(sp)
+; RV32-NEXT:    lw a4, 12(sp)
 ; RV32-NEXT:    and a5, a2, a1
 ; RV32-NEXT:    beqz a5, .LBB26_7
 ; RV32-NEXT:  # %bb.6: # %entry
@@ -1936,12 +1932,12 @@ define i64 @ustest_f16i64(half %x) {
 ; RV32-NEXT:    and a2, a2, a3
 ; RV32-NEXT:    bnez a0, .LBB26_10
 ; RV32-NEXT:  # %bb.9:
-; RV32-NEXT:    or a0, a4, a2
+; RV32-NEXT:    or a0, a2, a4
 ; RV32-NEXT:    snez a1, a0
 ; RV32-NEXT:  .LBB26_10: # %entry
 ; RV32-NEXT:    neg a1, a1
-; RV32-NEXT:    and a0, a1, a4
-; RV32-NEXT:    and a1, a1, a2
+; RV32-NEXT:    and a0, a1, a2
+; RV32-NEXT:    and a1, a1, a4
 ; RV32-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
 ; RV32-NEXT:    .cfi_restore ra
 ; RV32-NEXT:    addi sp, sp, 32
@@ -3024,24 +3020,23 @@ define i64 @stest_f64i64_mm(double %x) {
 ; RV32IF-NEXT:    mv a1, a0
 ; RV32IF-NEXT:    addi a0, sp, 8
 ; RV32IF-NEXT:    call __fixdfti
-; RV32IF-NEXT:    lw a3, 8(sp)
 ; RV32IF-NEXT:    lw a1, 12(sp)
-; RV32IF-NEXT:    lw a2, 16(sp)
-; RV32IF-NEXT:    lw a4, 20(sp)
-; RV32IF-NEXT:    lui a0, 524288
-; RV32IF-NEXT:    addi a5, a0, -1
+; RV32IF-NEXT:    lw a0, 16(sp)
+; RV32IF-NEXT:    lw a3, 20(sp)
+; RV32IF-NEXT:    lw a4, 8(sp)
+; RV32IF-NEXT:    lui a2, 524288
+; RV32IF-NEXT:    addi a5, a2, -1
+; RV32IF-NEXT:    or a7, a0, a3
 ; RV32IF-NEXT:    beq a1, a5, .LBB45_2
 ; RV32IF-NEXT:  # %bb.1: # %entry
 ; RV32IF-NEXT:    sltu a6, a1, a5
-; RV32IF-NEXT:    or a7, a2, a4
 ; RV32IF-NEXT:    bnez a7, .LBB45_3
 ; RV32IF-NEXT:    j .LBB45_4
 ; RV32IF-NEXT:  .LBB45_2:
-; RV32IF-NEXT:    sltiu a6, a3, -1
-; RV32IF-NEXT:    or a7, a2, a4
+; RV32IF-NEXT:    sltiu a6, a4, -1
 ; RV32IF-NEXT:    beqz a7, .LBB45_4
 ; RV32IF-NEXT:  .LBB45_3: # %entry
-; RV32IF-NEXT:    srli a6, a4, 31
+; RV32IF-NEXT:    srli a6, a3, 31
 ; RV32IF-NEXT:  .LBB45_4: # %entry
 ; RV32IF-NEXT:    neg a7, a6
 ; RV32IF-NEXT:    addi t0, a6, -1
@@ -3049,29 +3044,29 @@ define i64 @stest_f64i64_mm(double %x) {
 ; RV32IF-NEXT:  # %bb.5: # %entry
 ; RV32IF-NEXT:    mv a1, a5
 ; RV32IF-NEXT:  .LBB45_6: # %entry
-; RV32IF-NEXT:    or a3, t0, a3
-; RV32IF-NEXT:    and a4, a7, a4
-; RV32IF-NEXT:    and a2, a7, a2
-; RV32IF-NEXT:    beq a1, a0, .LBB45_8
+; RV32IF-NEXT:    or a4, t0, a4
+; RV32IF-NEXT:    and a3, a7, a3
+; RV32IF-NEXT:    and a5, a7, a0
+; RV32IF-NEXT:    beq a1, a2, .LBB45_8
 ; RV32IF-NEXT:  # %bb.7: # %entry
-; RV32IF-NEXT:    sltu a0, a0, a1
+; RV32IF-NEXT:    sltu a0, a2, a1
 ; RV32IF-NEXT:    j .LBB45_9
 ; RV32IF-NEXT:  .LBB45_8:
-; RV32IF-NEXT:    snez a0, a3
+; RV32IF-NEXT:    snez a0, a4
 ; RV32IF-NEXT:  .LBB45_9: # %entry
-; RV32IF-NEXT:    and a2, a2, a4
-; RV32IF-NEXT:    li a5, -1
-; RV32IF-NEXT:    beq a2, a5, .LBB45_11
+; RV32IF-NEXT:    and a5, a5, a3
+; RV32IF-NEXT:    li a2, -1
+; RV32IF-NEXT:    beq a5, a2, .LBB45_11
 ; RV32IF-NEXT:  # %bb.10: # %entry
-; RV32IF-NEXT:    srli a4, a4, 31
-; RV32IF-NEXT:    xori a0, a4, 1
+; RV32IF-NEXT:    srli a3, a3, 31
+; RV32IF-NEXT:    xori a0, a3, 1
 ; RV32IF-NEXT:  .LBB45_11: # %entry
 ; RV32IF-NEXT:    bnez a0, .LBB45_13
 ; RV32IF-NEXT:  # %bb.12: # %entry
 ; RV32IF-NEXT:    lui a1, 524288
 ; RV32IF-NEXT:  .LBB45_13: # %entry
 ; RV32IF-NEXT:    neg a0, a0
-; RV32IF-NEXT:    and a0, a0, a3
+; RV32IF-NEXT:    and a0, a0, a4
 ; RV32IF-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
 ; RV32IF-NEXT:    .cfi_restore ra
 ; RV32IF-NEXT:    addi sp, sp, 32
@@ -3127,24 +3122,23 @@ define i64 @stest_f64i64_mm(double %x) {
 ; RV32IFD-NEXT:    .cfi_offset ra, -4
 ; RV32IFD-NEXT:    addi a0, sp, 8
 ; RV32IFD-NEXT:    call __fixdfti
-; RV32IFD-NEXT:    lw a3, 8(sp)
 ; RV32IFD-NEXT:    lw a1, 12(sp)
-; RV32IFD-NEXT:    lw a2, 16(sp)
-; RV32IFD-NEXT:    lw a4, 20(sp)
-; RV32IFD-NEXT:    lui a0, 524288
-; RV32IFD-NEXT:    addi a5, a0, -1
+; RV32IFD-NEXT:    lw a0, 16(sp)
+; RV32IFD-NEXT:    lw a3, 20(sp)
+; RV32IFD-NEXT:    lw a4, 8(sp)
+; RV32IFD-NEXT:    lui a2, 524288
+; RV32IFD-NEXT:    addi a5, a2, -1
+; RV32IFD-NEXT:    or a7, a0, a3
 ; RV32IFD-NEXT:    beq a1, a5, .LBB45_2
 ; RV32IFD-NEXT:  # %bb.1: # %entry
 ; RV32IFD-NEXT:    sltu a6, a1, a5
-; RV32IFD-NEXT:    or a7, a2, a4
 ; RV32IFD-NEXT:    bnez a7, .LBB45_3
 ; RV32IFD-NEXT:    j .LBB45_4
 ; RV32IFD-NEXT:  .LBB45_2:
-; RV32IFD-NEXT:    sltiu a6, a3, -1
-; RV32IFD-NEXT:    or a7, a2, a4
+; RV32IFD-NEXT:    sltiu a6, a4, -1
 ; RV32IFD-NEXT:    beqz a7, .LBB45_4
 ; RV32IFD-NEXT:  .LBB45_3: # %entry
-; RV32IFD-NEXT:    srli a6, a4, 31
+; RV32IFD-NEXT:    srli a6, a3, 31
 ; RV32IFD-NEXT:  .LBB45_4: # %entry
 ; RV32IFD-NEXT:    neg a7, a6
 ; RV32IFD-NEXT:    addi t0, a6, -1
@@ -3152,29 +3146,29 @@ define i64 @stest_f64i64_mm(double %x) {
 ; RV32IFD-NEXT:  # %bb.5: # %entry
 ; RV32IFD-NEXT:    mv a1, a5
 ; RV32IFD-NEXT:  .LBB45_6: # %entry
-; RV32IFD-NEXT:    or a3, t0, a3
-; RV32IFD-NEXT:    and a4, a7, a4
-; RV32IFD-NEXT:    and a2, a7, a2
-; RV32IFD-NEXT:    beq a1, a0, .LBB45_8
+; RV32IFD-NEXT:    or a4, t0, a4
+; RV32IFD-NEXT:    and a3, a7, a3
+; RV32IFD-NEXT:    and a5, a7, a0
+; RV32IFD-NEXT:    beq a1, a2, .LBB45_8
 ; RV32IFD-NEXT:  # %bb.7: # %entry
-; RV32IFD-NEXT:    sltu a0, a0, a1
+; RV32IFD-NEXT:    sltu a0, a2, a1
 ; RV32IFD-NEXT:    j .LBB45_9
 ; RV32IFD-NEXT:  .LBB45_8:
-; RV32IFD-NEXT:    snez a0, a3
+; RV32IFD-NEXT:    snez a0, a4
 ; RV32IFD-NEXT:  .LBB45_9: # %entry
-; RV32IFD-NEXT:    and a2, a2, a4
-; RV32IFD-NEXT:    li a5, -1
-; RV32IFD-NEXT:    beq a2, a5, .LBB45_11
+; RV32IFD-NEXT:    and a5, a5, a3
+; RV32IFD-NEXT:    li a2, -1
+; RV32IFD-NEXT:    beq a5, a2, .LBB45_11
 ; RV32IFD-NEXT:  # %bb.10: # %entry
-; RV32IFD-NEXT:    srli a4, a4, 31
-; RV32IFD-NEXT:    xori a0, a4, 1
+; RV32IFD-NEXT:    srli a3, a3, 31
+; RV32IFD-NEXT:    xori a0, a3, 1
 ; RV32IFD-NEXT:  .LBB45_11: # %entry
 ; RV32IFD-NEXT:    bnez a0, .LBB45_13
 ; RV32IFD-NEXT:  # %bb.12: # %entry
 ; RV32IFD-NEXT:    lui a1, 524288
 ; RV32IFD-NEXT:  .LBB45_13: # %entry
 ; RV32IFD-NEXT:    neg a0, a0
-; RV32IFD-NEXT:    and a0, a0, a3
+; RV32IFD-NEXT:    and a0, a0, a4
 ; RV32IFD-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
 ; RV32IFD-NEXT:    .cfi_restore ra
 ; RV32IFD-NEXT:    addi sp, sp, 32
@@ -3397,24 +3391,23 @@ define i64 @stest_f32i64_mm(float %x) {
 ; RV32-NEXT:    .cfi_offset ra, -4
 ; RV32-NEXT:    addi a0, sp, 8
 ; RV32-NEXT:    call __fixsfti
-; RV32-NEXT:    lw a3, 8(sp)
 ; RV32-NEXT:    lw a1, 12(sp)
-; RV32-NEXT:    lw a2, 16(sp)
-; RV32-NEXT:    lw a4, 20(sp)
-; RV32-NEXT:    lui a0, 524288
-; RV32-NEXT:    addi a5, a0, -1
+; RV32-NEXT:    lw a0, 16(sp)
+; RV32-NEXT:    lw a3, 20(sp)
+; RV32-NEXT:    lw a4, 8(sp)
+; RV32-NEXT:    lui a2, 524288
+; RV32-NEXT:    addi a5, a2, -1
+; RV32-NEXT:    or a7, a0, a3
 ; RV32-NEXT:    beq a1, a5, .LBB48_2
 ; RV32-NEXT:  # %bb.1: # %entry
 ; RV32-NEXT:    sltu a6, a1, a5
-; RV32-NEXT:    or a7, a2, a4
 ; RV32-NEXT:    bnez a7, .LBB48_3
 ; RV32-NEXT:    j .LBB48_4
 ; RV32-NEXT:  .LBB48_2:
-; RV32-NEXT:    sltiu a6, a3, -1
-; RV32-NEXT:    or a7, a2, a4
+; RV32-NEXT:    sltiu a6, a4, -1
 ; RV32-NEXT:    beqz a7, .LBB48_4
 ; RV32-NEXT:  .LBB48_3: # %entry
-; RV32-NEXT:    srli a6, a4, 31
+; RV32-NEXT:    srli a6, a3, 31
 ; RV32-NEXT:  .LBB48_4: # %entry
 ; RV32-NEXT:    neg a7, a6
 ; RV32-NEXT:    addi t0, a6, -1
@@ -3422,29 +3415,29 @@ define i64 @stest_f32i64_mm(float %x) {
 ; RV32-NEXT:  # %bb.5: # %entry
 ; RV32-NEXT:    mv a1, a5
 ; RV32-NEXT:  .LBB48_6: # %entry
-; RV32-NEXT:    or a3, t0, a3
-; RV32-NEXT:    and a4, a7, a4
-; RV32-NEXT:    and a2, a7, a2
-; RV32-NEXT:    beq a1, a0, .LBB48_8
+; RV32-NEXT:    or a4, t0, a4
+; RV32-NEXT:    and a3, a7, a3
+; RV32-NEXT:    and a5, a7, a0
+; RV32-NEXT:    beq a1, a2, .LBB48_8
 ; RV32-NEXT:  # %bb.7: # %entry
-; RV32-NEXT:    sltu a0, a0, a1
+; RV32-NEXT:    sltu a0, a2, a1
 ; RV32-NEXT:    j .LBB48_9
 ; RV32-NEXT:  .LBB48_8:
-; RV32-NEXT:    snez a0, a3
+; RV32-NEXT:    snez a0, a4
 ; RV32-NEXT:  .LBB48_9: # %entry
-; RV32-NEXT:    and a2, a2, a4
-; RV32-NEXT:    li a5, -1
-; RV32-NEXT:    beq a2, a5, .LBB48_11
+; RV32-NEXT:    and a5, a5, a3
+; RV32-NEXT:    li a2, -1
+; RV32-NEXT:    beq a5, a2, .LBB48_11
 ; RV32-NEXT:  # %bb.10: # %entry
-; RV32-NEXT:    srli a4, a4, 31
-; RV32-NEXT:    xori a0, a4, 1
+; RV32-NEXT:    srli a3, a3, 31
+; RV32-NEXT:    xori a0, a3, 1
 ; RV32-NEXT:  .LBB48_11: # %entry
 ; RV32-NEXT:    bnez a0, .LBB48_13
 ; RV32-NEXT:  # %bb.12: # %entry
 ; RV32-NEXT:    lui a1, 524288
 ; RV32-NEXT:  .LBB48_13: # %entry
 ; RV32-NEXT:    neg a0, a0
-; RV32-NEXT:    and a0, a0, a3
+; RV32-NEXT:    and a0, a0, a4
 ; RV32-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
 ; RV32-NEXT:    .cfi_restore ra
 ; RV32-NEXT:    addi sp, sp, 32
@@ -3598,24 +3591,23 @@ define i64 @stest_f16i64_mm(half %x) {
 ; RV32-NEXT:    call __extendhfsf2
 ; RV32-NEXT:    addi a0, sp, 8
 ; RV32-NEXT:    call __fixsfti
-; RV32-NEXT:    lw a3, 8(sp)
 ; RV32-NEXT:    lw a1, 12(sp)
-; RV32-NEXT:    lw a2, 16(sp)
-; RV32-NEXT:    lw a4, 20(sp)
-; RV32-NEXT:    lui a0, 524288
-; RV32-NEXT:    addi a5, a0, -1
+; RV32-NEXT:    lw a0, 16(sp)
+; RV32-NEXT:    lw a3, 20(sp)
+; RV32-NEXT:    lw a4, 8(sp)
+; RV32-NEXT:    lui a2, 524288
+; RV32-NEXT:    addi a5, a2, -1
+; RV32-NEXT:    or a7, a0, a3
 ; RV32-NEXT:    beq a1, a5, .LBB51_2
 ; RV32-NEXT:  # %bb.1: # %entry
 ; RV32-NEXT:    sltu a6, a1, a5
-; RV32-NEXT:    or a7, a2, a4
 ; RV32-NEXT:    bnez a7, .LBB51_3
 ; RV32-NEXT:    j .LBB51_4
 ; RV32-NEXT:  .LBB51_2:
-; RV32-NEXT:    sltiu a6, a3, -1
-; RV32-NEXT:    or a7, a2, a4
+; RV32-NEXT:    sltiu a6, a4, -1
 ; RV32-NEXT:    beqz a7, .LBB51_4
 ; RV32-NEXT:  .LBB51_3: # %entry
-; RV32-NEXT:    srli a6, a4, 31
+; RV32-NEXT:    srli a6, a3, 31
 ; RV32-NEXT:  .LBB51_4: # %entry
 ; RV32-NEXT:    neg a7, a6
 ; RV32-NEXT:    addi t0, a6, -1
@@ -3623,29 +3615,29 @@ define i64 @stest_f16i64_mm(half %x) {
 ; RV32-NEXT:  # %bb.5: # %entry
 ; RV32-NEXT:    mv a1, a5
 ; RV32-NEXT:  .LBB51_6: # %entry
-; RV32-NEXT:    or a3, t0, a3
-; RV32-NEXT:    and a4, a7, a4
-; RV32-NEXT:    and a2, a7, a2
-; RV32-NEXT:    beq a1, a0, .LBB51_8
+; RV32-NEXT:    or a4, t0, a4
+; RV32-NEXT:    and a3, a7, a3
+; RV32-NEXT:    and a5, a7, a0
+; RV32-NEXT:    beq a1, a2, .LBB51_8
 ; RV32-NEXT:  # %bb.7: # %entry
-; RV32-NEXT:    sltu a0, a0, a1
+; RV32-NEXT:    sltu a0, a2, a1
 ; RV32-NEXT:    j .LBB51_9
 ; RV32-NEXT:  .LBB51_8:
-; RV32-NEXT:    snez a0, a3
+; RV32-NEXT:    snez a0, a4
 ; RV32-NEXT:  .LBB51_9: # %entry
-; RV32-NEXT:    and a2, a2, a4
-; RV32-NEXT:    li a5, -1
-; RV32-NEXT:    beq a2, a5, .LBB51_11
+; RV32-NEXT:    and a5, a5, a3
+; RV32-NEXT:    li a2, -1
+; RV32-NEXT:    beq a5, a2, .LBB51_11
 ; RV32-NEXT:  # %bb.10: # %entry
-; RV32-NEXT:    srli a4, a4, 31
-; RV32-NEXT:    xori a0, a4, 1
+; RV32-NEXT:    srli a3, a3, 31
+; RV32-NEXT:    xori a0, a3, 1
 ; RV32-NEXT:  .LBB51_11: # %entry
 ; RV32-NEXT:    bnez a0, .LBB51_13
 ; RV32-NEXT:  # %bb.12: # %entry
 ; RV32-NEXT:    lui a1, 524288
 ; RV32-NEXT:  .LBB51_13: # %entry
 ; RV32-NEXT:    neg a0, a0
-; RV32-NEXT:    and a0, a0, a3
+; RV32-NEXT:    and a0, a0, a4
 ; RV32-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
 ; RV32-NEXT:    .cfi_restore ra
 ; RV32-NEXT:    addi sp, sp, 32
diff --git a/llvm/test/CodeGen/RISCV/iabs.ll b/llvm/test/CodeGen/RISCV/iabs.ll
index 35ff8bece9b5d..2f70744632d22 100644
--- a/llvm/test/CodeGen/RISCV/iabs.ll
+++ b/llvm/test/CodeGen/RISCV/iabs.ll
@@ -302,11 +302,11 @@ define i128 @abs128(i128 %x) {
 ; RV32I-NEXT:    bgez a2, .LBB8_2
 ; RV32I-NEXT:  # %bb.1:
 ; RV32I-NEXT:    neg a5, a1
-; RV32I-NEXT:    snez a6, a4
-; RV32I-NEXT:    snez a7, a3
+; RV32I-NEXT:    or a6, a3, a4
 ; RV32I-NEXT:    snez a1, a1
+; RV32I-NEXT:    snez a7, a3
 ; RV32I-NEXT:    neg a4, a4
-; RV32I-NEXT:    or a6, a7, a6
+; RV32I-NEXT:    snez a6, a6
 ; RV32I-NEXT:    add a1, a2, a1
 ; RV32I-NEXT:    sub a4, a4, a7
 ; RV32I-NEXT:    sltu a2, a5, a6
@@ -330,11 +330,11 @@ define i128 @abs128(i128 %x) {
 ; RV32ZBB-NEXT:    bgez a2, .LBB8_2
 ; RV32ZBB-NEXT:  # %bb.1:
 ; RV32ZBB-NEXT:    neg a5, a1
-; RV32ZBB-NEXT:    snez a6, a4
-; RV32ZBB-NEXT:    snez a7, a3
+; RV32ZBB-NEXT:    or a6, a3, a4
 ; RV32ZBB-NEXT:    snez a1, a1
+; RV32ZBB-NEXT:    snez a7, a3
 ; RV32ZBB-NEXT:    neg a4, a4
-; RV32ZBB-NEXT:    or a6, a7, a6
+; RV32ZBB-NEXT:    snez a6, a6
 ; RV32ZBB-NEXT:    add a1, a2, a1
 ; RV32ZBB-NEXT:    sub a4, a4, a7
 ; RV32ZBB-NEXT:    sltu a2, a5, a6
@@ -384,11 +384,11 @@ define i128 @select_abs128(i128 %x) {
 ; RV32I-NEXT:    bgez a2, .LBB9_2
 ; RV32I-NEXT:  # %bb.1:
 ; RV32I-NEXT:    neg a5, a1
-; RV32I-NEXT:    snez a6, a4
-; RV32I-NEXT:    snez a7, a3
+; RV32I-NEXT:    or a6, a3, a4
 ; RV32I-NEXT:    snez a1, a1
+; RV32I-NEXT:    snez a7, a3
 ; RV32I-NEXT:    neg a4, a4
-; RV32I-NEXT:    or a6, a7, a6
+; RV32I-NEXT:    snez a6, a6
 ; RV32I-NEXT:    add a1, a2, a1
 ; RV32I-NEXT:    sub a4, a4, a7
 ; RV32I-NEXT:    sltu a2, a5, a6
@@ -412,11 +412,11 @@ define i128 @select_abs128(i128 %x) {
 ; RV32ZBB-NEXT:    bgez a2, .LBB9_2
 ; RV32ZBB-NEXT:  # %bb.1:
 ; RV32ZBB-NEXT:    neg a5, a1
-; RV32ZBB-NEXT:    snez a6, a4
-; RV32ZBB-NEXT:    snez a7, a3
+; RV32ZBB-NEXT:    or a6, a3, a4
 ; RV32ZBB-NEXT:    snez a1, a1
+; RV32ZBB-NEXT:    snez a7, a3
 ; RV32ZBB-NEXT:    neg a4, a4
-; RV32ZBB-NEXT:    or a6, a7, a6
+; RV32ZBB-NEXT:    snez a6, a6
 ; RV32ZBB-NEXT:    add a1, a2, a1
 ; RV32ZBB-NEXT:    sub a4, a4, a7
 ; RV32ZBB-NEXT:    sltu a2, a5, a6
diff --git a/llvm/test/CodeGen/RISCV/idiv_large.ll b/llvm/test/CodeGen/RISCV/idiv_large.ll
index c75a807621b6c..2d67c05ac63f6 100644
--- a/llvm/test/CodeGen/RISCV/idiv_large.ll
+++ b/llvm/test/CodeGen/RISCV/idiv_large.ll
@@ -1032,26 +1032,26 @@ define i129 @udiv_i129(i129 %x, i129 %y) nounwind {
 ; RV32-NEXT:    sw s9, 196(sp) # 4-byte Folded Spill
 ; RV32-NEXT:    sw s10, 192(sp) # 4-byte Folded Spill
 ; RV32-NEXT:    sw s11, 188(sp) # 4-byte Folded Spill
-; RV32-NEXT:    mv s8, a0
-; RV32-NEXT:    lw t2, 16(a2)
+; RV32-NEXT:    mv s6, a0
+; RV32-NEXT:    lw t1, 16(a2)
 ; RV32-NEXT:    lw a4, 0(a2)
 ; RV32-NEXT:    lw a5, 4(a2)
 ; RV32-NEXT:    lw a6, 8(a2)
 ; RV32-NEXT:    lw a0, 12(a2)
-; RV32-NEXT:    sw a0, 24(sp) # 4-byte Folded Spill
+; RV32-NEXT:    sw a0, 28(sp) # 4-byte Folded Spill
 ; RV32-NEXT:    lui a0, 349525
 ; RV32-NEXT:    lui a2, 209715
 ; RV32-NEXT:    lui a3, 61681
 ; RV32-NEXT:    addi t5, a0, 1365
 ; RV32-NEXT:    addi t4, a2, 819
 ; RV32-NEXT:    addi t3, a3, -241
-; RV32-NEXT:    sw a6, 28(sp) # 4-byte Folded Spill
+; RV32-NEXT:    sw a6, 32(sp) # 4-byte Folded Spill
 ; RV32-NEXT:    slli a0, a6, 31
 ; RV32-NEXT:    srli a2, a5, 1
-; RV32-NEXT:    sw a5, 20(sp) # 4-byte Folded Spill
+; RV32-NEXT:    sw a5, 24(sp) # 4-byte Folded Spill
 ; RV32-NEXT:    slli a3, a5, 31
 ; RV32-NEXT:    or a6, a2, a0
-; RV32-NEXT:    sw a4, 32(sp) # 4-byte Folded Spill
+; RV32-NEXT:    sw a4, 36(sp) # 4-byte Folded Spill
 ; RV32-NEXT:    srli a0, a4, 1
 ; RV32-NEXT:    or a7, a0, a3
 ; RV32-NEXT:    bnez a6, .LBB3_2
@@ -1082,7 +1082,7 @@ define i129 @udiv_i129(i129 %x, i129 %y) nounwind {
 ; RV32-NEXT:    slli a2, a0, 16
 ; RV32-NEXT:    add a0, a0, a2
 ; RV32-NEXT:    srli a0, a0, 24
-; RV32-NEXT:    addi a5, a0, 32
+; RV32-NEXT:    addi a4, a0, 32
 ; RV32-NEXT:    j .LBB3_3
 ; RV32-NEXT:  .LBB3_2:
 ; RV32-NEXT:    srli a0, a6, 1
@@ -1110,152 +1110,151 @@ define i129 @udiv_i129(i129 %x, i129 %y) nounwind {
 ; RV32-NEXT:    add a0, a0, a2
 ; RV32-NEXT:    slli a2, a0, 16
 ; RV32-NEXT:    add a0, a0, a2
-; RV32-NEXT:    srli a5, a0, 24
+; RV32-NEXT:    srli a4, a0, 24
 ; RV32-NEXT:  .LBB3_3: # %_udiv-special-cases
-; RV32-NEXT:    lw a4, 24(sp) # 4-byte Folded Reload
-; RV32-NEXT:    srli a0, a4, 1
-; RV32-NEXT:    slli a3, t2, 31
-; RV32-NEXT:    slli a4, a4, 31
-; RV32-NEXT:    lw a2, 28(sp) # 4-byte Folded Reload
-; RV32-NEXT:    srli t0, a2, 1
+; RV32-NEXT:    lw a5, 28(sp) # 4-byte Folded Reload
+; RV32-NEXT:    srli a0, a5, 1
+; RV32-NEXT:    slli a3, t1, 31
+; RV32-NEXT:    slli a5, a5, 31
 ; RV32-NEXT:    lw a2, 32(sp) # 4-byte Folded Reload
+; RV32-NEXT:    srli t0, a2, 1
+; RV32-NEXT:    lw a2, 36(sp) # 4-byte Folded Reload
 ; RV32-NEXT:    slli a2, a2, 31
-; RV32-NEXT:    li s2, 64
+; RV32-NEXT:    li s1, 64
 ; RV32-NEXT:    bnez a2, .LBB3_5
 ; RV32-NEXT:  # %bb.4: # %_udiv-special-cases
 ; RV32-NEXT:    li t6, 64
 ; RV32-NEXT:    j .LBB3_6
 ; RV32-NEXT:  .LBB3_5:
-; RV32-NEXT:    srli t1, a2, 1
-; RV32-NEXT:    or t1, a2, t1
-; RV32-NEXT:    srli t6, t1, 2
-; RV32-NEXT:    or t1, t1, t6
-; RV32-NEXT:    srli t6, t1, 4
-; RV32-NEXT:    or t1, t1, t6
-; RV32-NEXT:    srli t6, t1, 8
-; RV32-NEXT:    or t1, t1, t6
-; RV32-NEXT:    srli t6, t1, 16
-; RV32-NEXT:    or t1, t1, t6
-; RV32-NEXT:    not t1, t1
-; RV32-NEXT:    srli t6, t1, 1
+; RV32-NEXT:    srli t2, a2, 1
+; RV32-NEXT:    or t2, a2, t2
+; RV32-NEXT:    srli t6, t2, 2
+; RV32-NEXT:    or t2, t2, t6
+; RV32-NEXT:    srli t6, t2, 4
+; RV32-NEXT:    or t2, t2, t6
+; RV32-NEXT:    srli t6, t2, 8
+; RV32-NEXT:    or t2, t2, t6
+; RV32-NEXT:    srli t6, t2, 16
+; RV32-NEXT:    or t2, t2, t6
+; RV32-NEXT:    not t2, t2
+; RV32-NEXT:    srli t6, t2, 1
 ; RV32-NEXT:    and t6, t6, t5
-; RV32-NEXT:    sub t1, t1, t6
-; RV32-NEXT:    and t6, t1, t4
-; RV32-NEXT:    srli t1, t1, 2
-; RV32-NEXT:    and t1, t1, t4
-; RV32-NEXT:    add t1, t6, t1
-; RV32-NEXT:    srli t6, t1, 4
-; RV32-NEXT:    add t1, t1, t6
-; RV32-NEXT:    and t1, t1, t3
-; RV32-NEXT:    slli t6, t1, 8
-; RV32-NEXT:    add t1, t1, t6
-; RV32-NEXT:    slli t6, t1, 16
-; RV32-NEXT:    add t1, t1, t6
-; RV32-NEXT:    srli t6, t1, 24
+; RV32-NEXT:    sub t2, t2, t6
+; RV32-NEXT:    and t6, t2, t4
+; RV32-NEXT:    srli t2, t2, 2
+; RV32-NEXT:    and t2, t2, t4
+; RV32-NEXT:    add t2, t6, t2
+; RV32-NEXT:    srli t6, t2, 4
+; RV32-NEXT:    add t2, t2, t6
+; RV32-NEXT:    and t2, t2, t3
+; RV32-NEXT:    slli t6, t2, 8
+; RV32-NEXT:    add t2, t2, t6
+; RV32-NEXT:    slli t6, t2, 16
+; RV32-NEXT:    add t2, t2, t6
+; RV32-NEXT:    srli t6, t2, 24
 ; RV32-NEXT:  .LBB3_6: # %_udiv-special-cases
-; RV32-NEXT:    or t1, a3, a0
-; RV32-NEXT:    or t0, t0, a4
+; RV32-NEXT:    or s0, a3, a0
+; RV32-NEXT:    or a5, t0, a5
 ; RV32-NEXT:    bnez a2, .LBB3_8
 ; RV32-NEXT:  # %bb.7: # %_udiv-special-cases
 ; RV32-NEXT:    li t6, 128
 ; RV32-NEXT:  .LBB3_8: # %_udiv-special-cases
-; RV32-NEXT:    or a4, t0, t1
-; RV32-NEXT:    addi a3, a5, 64
+; RV32-NEXT:    or t2, a5, s0
+; RV32-NEXT:    addi t0, a4, 64
+; RV32-NEXT:    or a0, a6, s0
+; RV32-NEXT:    or a3, a7, a5
+; RV32-NEXT:    or a3, a3, a0
 ; RV32-NEXT:    addi a0, t6, 128
-; RV32-NEXT:    or a2, a6, t1
-; RV32-NEXT:    or a6, a7, t0
-; RV32-NEXT:    or s3, a6, a2
-; RV32-NEXT:    sltu s0, a0, t6
-; RV32-NEXT:    bnez s3, .LBB3_11
+; RV32-NEXT:    bnez a3, .LBB3_11
 ; RV32-NEXT:  # %bb.9: # %_udiv-special-cases
-; RV32-NEXT:    mv t6, s0
-; RV32-NEXT:    beqz t1, .LBB3_12
+; RV32-NEXT:    sltu t6, a0, t6
+; RV32-NEXT:    beqz s0, .LBB3_12
 ; RV32-NEXT:  .LBB3_10:
-; RV32-NEXT:    srli a2, t1, 1
-; RV32-NEXT:    or a2, t1, a2
-; RV32-NEXT:    srli a5, a2, 2
-; RV32-NEXT:    or a2, a2, a5
-; RV32-NEXT:    srli a5, a2, 4
-; RV32-NEXT:    or a2, a2, a5
-; RV32-NEXT:    srli a5, a2, 8
-; RV32-NEXT:    or a2, a2, a5
-; RV32-NEXT:    srli a5, a2, 16
-; RV32-NEXT:    or a2, a2, a5
+; RV32-NEXT:    srli a2, s0, 1
+; RV32-NEXT:    or a2, s0, a2
+; RV32-NEXT:    srli a4, a2, 2
+; RV32-NEXT:    or a2, a2, a4
+; RV32-NEXT:    srli a4, a2, 4
+; RV32-NEXT:    or a2, a2, a4
+; RV32-NEXT:    srli a4, a2, 8
+; RV32-NEXT:    or a2, a2, a4
+; RV32-NEXT:    srli a4, a2, 16
+; RV32-NEXT:    or a2, a2, a4
 ; RV32-NEXT:    not a2, a2
-; RV32-NEXT:    srli a5, a2, 1
-; RV32-NEXT:    and a5, a5, t5
-; RV32-NEXT:    sub a2, a2, a5
-; RV32-NEXT:    and a5, a2, t4
+; RV32-NEXT:    srli a4, a2, 1
+; RV32-NEXT:    and a4, a4, t5
+; RV32-NEXT:    sub a2, a2, a4
+; RV32-NEXT:    and a4, a2, t4
 ; RV32-NEXT:    srli a2, a2, 2
 ; RV32-NEXT:    and a2, a2, t4
-; RV32-NEXT:    add a2, a5, a2
-; RV32-NEXT:    srli a5, a2, 4
-; RV32-NEXT:    add a2, a2, a5
+; RV32-NEXT:    add a2, a4, a2
+; RV32-NEXT:    srli a4, a2, 4
+; RV32-NEXT:    add a2, a2, a4
 ; RV32-NEXT:    and a2, a2, t3
-; RV32-NEXT:    slli a5, a2, 8
-; RV32-NEXT:    add a2, a2, a5
-; RV32-NEXT:    slli a5, a2, 16
-; RV32-NEXT:    add a2, a2, a5
-; RV32-NEXT:    srli s1, a2, 24
-; RV32-NEXT:    beqz a4, .LBB3_13
+; RV32-NEXT:    slli a4, a2, 8
+; RV32-NEXT:    add a2, a2, a4
+; RV32-NEXT:    slli a4, a2, 16
+; RV32-NEXT:    add a2, a2, a4
+; RV32-NEXT:    srli s0, a2, 24
+; RV32-NEXT:    beqz t2, .LBB3_13
 ; RV32-NEXT:    j .LBB3_14
 ; RV32-NEXT:  .LBB3_11:
-; RV32-NEXT:    snez a2, a4
-; RV32-NEXT:    sltu a5, a3, a5
+; RV32-NEXT:    snez a2, t2
+; RV32-NEXT:    sltu a4, t0, a4
 ; RV32-NEXT:    addi a2, a2, -1
-; RV32-NEXT:    and t6, a2, a5
-; RV32-NEXT:    bnez t1, .LBB3_10
+; RV32-NEXT:    and t6, a2, a4
+; RV32-NEXT:    bnez s0, .LBB3_10
 ; RV32-NEXT:  .LBB3_12: # %_udiv-special-cases
-; RV32-NEXT:    srli a2, t0, 1
-; RV32-NEXT:    or a2, t0, a2
-; RV32-NEXT:    srli a5, a2, 2
-; RV32-NEXT:    or a2, a2, a5
-; RV32-NEXT:    srli a5, a2, 4
-; RV32-NEXT:    or a2, a2, a5
-; RV32-NEXT:    srli a5, a2, 8
-; RV32-NEXT:    or a2, a2, a5
-; RV32-NEXT:    srli a5, a2, 16
-; RV32-NEXT:    or a2, a2, a5
+; RV32-NEXT:    srli a2, a5, 1
+; RV32-NEXT:    or a2, a5, a2
+; RV32-NEXT:    srli a4, a2, 2
+; RV32-NEXT:    or a2, a2, a4
+; RV32-NEXT:    srli a4, a2, 4
+; RV32-NEXT:    or a2, a2, a4
+; RV32-NEXT:    srli a4, a2, 8
+; RV32-NEXT:    or a2, a2, a4
+; RV32-NEXT:    srli a4, a2, 16
+; RV32-NEXT:    or a2, a2, a4
 ; RV32-NEXT:    not a2, a2
-; RV32-NEXT:    srli a5, a2, 1
-; RV32-NEXT:    and a5, a5, t5
-; RV32-NEXT:    sub a2, a2, a5
-; RV32-NEXT:    and a5, a2, t4
+; RV32-NEXT:    srli a4, a2, 1
+; RV32-NEXT:    and a4, a4, t5
+; RV32-NEXT:    sub a2, a2, a4
+; RV32-NEXT:    and a4, a2, t4
 ; RV32-NEXT:    srli a2, a2, 2
 ; RV32-NEXT:    and a2, a2, t4
-; RV32-NEXT:    add a2, a5, a2
-; RV32-NEXT:    srli a5, a2, 4
-; RV32-NEXT:    add a2, a2, a5
+; RV32-NEXT:    add a2, a4, a2
+; RV32-NEXT:    srli a4, a2, 4
+; RV32-NEXT:    add a2, a2, a4
 ; RV32-NEXT:    and a2, a2, t3
-; RV32-NEXT:    slli a5, a2, 8
-; RV32-NEXT:    add a2, a2, a5
-; RV32-NEXT:    slli a5, a2, 16
-; RV32-NEXT:    add a2, a2, a5
+; RV32-NEXT:    slli a4, a2, 8
+; RV32-NEXT:    add a2, a2, a4
+; RV32-NEXT:    slli a4, a2, 16
+; RV32-NEXT:    add a2, a2, a4
 ; RV32-NEXT:    srli a2, a2, 24
-; RV32-NEXT:    addi s1, a2, 32
-; RV32-NEXT:    bnez a4, .LBB3_14
+; RV32-NEXT:    addi s0, a2, 32
+; RV32-NEXT:    bnez t2, .LBB3_14
 ; RV32-NEXT:  .LBB3_13: # %_udiv-special-cases
-; RV32-NEXT:    mv s1, a3
+; RV32-NEXT:    mv s0, t0
 ; RV32-NEXT:  .LBB3_14: # %_udiv-special-cases
 ; RV32-NEXT:    lw a7, 0(a1)
 ; RV32-NEXT:    lw t0, 4(a1)
 ; RV32-NEXT:    lw a6, 8(a1)
-; RV32-NEXT:    bnez s3, .LBB3_16
+; RV32-NEXT:    bnez a3, .LBB3_16
 ; RV32-NEXT:  # %bb.15: # %_udiv-special-cases
-; RV32-NEXT:    mv s1, a0
+; RV32-NEXT:    mv s0, a0
 ; RV32-NEXT:  .LBB3_16: # %_udiv-special-cases
-; RV32-NEXT:    lw t1, 12(a1)
+; RV32-NEXT:    lw t2, 12(a1)
 ; RV32-NEXT:    lw a1, 16(a1)
 ; RV32-NEXT:    slli a0, a6, 31
 ; RV32-NEXT:    srli a2, t0, 1
-; RV32-NEXT:    or s4, a2, a0
+; RV32-NEXT:    or s2, a2, a0
 ; RV32-NEXT:    slli a0, t0, 31
 ; RV32-NEXT:    srli a2, a7, 1
-; RV32-NEXT:    or s5, a2, a0
-; RV32-NEXT:    bnez s4, .LBB3_18
+; RV32-NEXT:    or s3, a2, a0
+; RV32-NEXT:    bnez s2, .LBB3_18
 ; RV32-NEXT:  # %bb.17: # %_udiv-special-cases
-; RV32-NEXT:    srli a0, s5, 1
-; RV32-NEXT:    or a0, s5, a0
+; RV32-NEXT:    srli a0, s3, 1
+; RV32-NEXT:    or a0, s3, a0
 ; RV32-NEXT:    srli a2, a0, 2
 ; RV32-NEXT:    or a0, a0, a2
 ; RV32-NEXT:    srli a2, a0, 4
@@ -1280,11 +1279,11 @@ define i129 @udiv_i129(i129 %x, i129 %y) nounwind {
 ; RV32-NEXT:    slli a2, a0, 16
 ; RV32-NEXT:    add a0, a0, a2
 ; RV32-NEXT:    srli a0, a0, 24
-; RV32-NEXT:    addi a4, a0, 32
+; RV32-NEXT:    addi a0, a0, 32
 ; RV32-NEXT:    j .LBB3_19
 ; RV32-NEXT:  .LBB3_18:
-; RV32-NEXT:    srli a0, s4, 1
-; RV32-NEXT:    or a0, s4, a0
+; RV32-NEXT:    srli a0, s2, 1
+; RV32-NEXT:    or a0, s2, a0
 ; RV32-NEXT:    srli a2, a0, 2
 ; RV32-NEXT:    or a0, a0, a2
 ; RV32-NEXT:    srli a2, a0, 4
@@ -1308,215 +1307,193 @@ define i129 @udiv_i129(i129 %x, i129 %y) nounwind {
 ; RV32-NEXT:    add a0, a0, a2
 ; RV32-NEXT:    slli a2, a0, 16
 ; RV32-NEXT:    add a0, a0, a2
-; RV32-NEXT:    srli a4, a0, 24
+; RV32-NEXT:    srli a0, a0, 24
 ; RV32-NEXT:  .LBB3_19: # %_udiv-special-cases
-; RV32-NEXT:    srli a0, t1, 1
-; RV32-NEXT:    slli a2, a1, 31
-; RV32-NEXT:    slli a3, t1, 31
+; RV32-NEXT:    srli a2, t2, 1
+; RV32-NEXT:    slli a3, a1, 31
+; RV32-NEXT:    slli a4, t2, 31
 ; RV32-NEXT:    slli a5, a7, 31
-; RV32-NEXT:    srli s6, a6, 1
+; RV32-NEXT:    srli s4, a6, 1
 ; RV32-NEXT:    beqz a5, .LBB3_21
 ; RV32-NEXT:  # %bb.20:
-; RV32-NEXT:    srli s2, a5, 1
-; RV32-NEXT:    or s2, a5, s2
-; RV32-NEXT:    srli s7, s2, 2
-; RV32-NEXT:    or s2, s2, s7
-; RV32-NEXT:    srli s7, s2, 4
-; RV32-NEXT:    or s2, s2, s7
-; RV32-NEXT:    srli s7, s2, 8
-; RV32-NEXT:    or s2, s2, s7
-; RV32-NEXT:    srli s7, s2, 16
-; RV32-NEXT:    or s2, s2, s7
-; RV32-NEXT:    not s2, s2
-; RV32-NEXT:    srli s7, s2, 1
-; RV32-NEXT:    and s7, s7, t5
-; RV32-NEXT:    sub s2, s2, s7
-; RV32-NEXT:    and s7, s2, t4
-; RV32-NEXT:    srli s2, s2, 2
-; RV32-NEXT:    and s2, s2, t4
-; RV32-NEXT:    add s2, s7, s2
-; RV32-NEXT:    srli s7, s2, 4
-; RV32-NEXT:    add s2, s2, s7
-; RV32-NEXT:    and s2, s2, t3
-; RV32-NEXT:    slli s7, s2, 8
-; RV32-NEXT:    add s2, s2, s7
-; RV32-NEXT:    slli s7, s2, 16
-; RV32-NEXT:    add s2, s2, s7
-; RV32-NEXT:    srli s2, s2, 24
+; RV32-NEXT:    srli s1, a5, 1
+; RV32-NEXT:    or s1, a5, s1
+; RV32-NEXT:    srli s5, s1, 2
+; RV32-NEXT:    or s1, s1, s5
+; RV32-NEXT:    srli s5, s1, 4
+; RV32-NEXT:    or s1, s1, s5
+; RV32-NEXT:    srli s5, s1, 8
+; RV32-NEXT:    or s1, s1, s5
+; RV32-NEXT:    srli s5, s1, 16
+; RV32-NEXT:    or s1, s1, s5
+; RV32-NEXT:    not s1, s1
+; RV32-NEXT:    srli s5, s1, 1
+; RV32-NEXT:    and s5, s5, t5
+; RV32-NEXT:    sub s1, s1, s5
+; RV32-NEXT:    and s5, s1, t4
+; RV32-NEXT:    srli s1, s1, 2
+; RV32-NEXT:    and s1, s1, t4
+; RV32-NEXT:    add s1, s5, s1
+; RV32-NEXT:    srli s5, s1, 4
+; RV32-NEXT:    add s1, s1, s5
+; RV32-NEXT:    and s1, s1, t3
+; RV32-NEXT:    slli s5, s1, 8
+; RV32-NEXT:    add s1, s1, s5
+; RV32-NEXT:    slli s5, s1, 16
+; RV32-NEXT:    add s1, s1, s5
+; RV32-NEXT:    srli s1, s1, 24
 ; RV32-NEXT:  .LBB3_21: # %_udiv-special-cases
-; RV32-NEXT:    or a2, a2, a0
-; RV32-NEXT:    or s7, s6, a3
+; RV32-NEXT:    or a2, a3, a2
+; RV32-NEXT:    or a4, s4, a4
 ; RV32-NEXT:    bnez a5, .LBB3_23
 ; RV32-NEXT:  # %bb.22: # %_udiv-special-cases
-; RV32-NEXT:    li s2, 128
+; RV32-NEXT:    li s1, 128
 ; RV32-NEXT:  .LBB3_23: # %_udiv-special-cases
-; RV32-NEXT:    or a3, s7, a2
-; RV32-NEXT:    addi a0, a4, 64
-; RV32-NEXT:    addi s6, s2, 128
-; RV32-NEXT:    or a5, s4, a2
-; RV32-NEXT:    or s4, s5, s7
-; RV32-NEXT:    or s5, s4, a5
-; RV32-NEXT:    sltu s4, s6, s2
-; RV32-NEXT:    bnez s5, .LBB3_26
+; RV32-NEXT:    or s4, a4, a2
+; RV32-NEXT:    addi a3, a0, 64
+; RV32-NEXT:    or a5, s2, a2
+; RV32-NEXT:    or s2, s3, a4
+; RV32-NEXT:    or s3, s2, a5
+; RV32-NEXT:    addi s2, s1, 128
+; RV32-NEXT:    bnez s3, .LBB3_26
 ; RV32-NEXT:  # %bb.24: # %_udiv-special-cases
-; RV32-NEXT:    mv s2, s4
-; RV32-NEXT:    snez a5, s3
+; RV32-NEXT:    sltu s1, s2, s1
 ; RV32-NEXT:    beqz a2, .LBB3_27
 ; RV32-NEXT:  .LBB3_25:
-; RV32-NEXT:    srli a4, a2, 1
-; RV32-NEXT:    or a2, a2, a4
-; RV32-NEXT:    srli a4, a2, 2
-; RV32-NEXT:    or a2, a2, a4
-; RV32-NEXT:    srli a4, a2, 4
-; RV32-NEXT:    or a2, a2, a4
-; RV32-NEXT:    srli a4, a2, 8
-; RV32-NEXT:    or a2, a2, a4
-; RV32-NEXT:    srli a4, a2, 16
-; RV32-NEXT:    or a2, a2, a4
-; RV32-NEXT:    not a2, a2
-; RV32-NEXT:    srli a4, a2, 1
-; RV32-NEXT:    and a4, a4, t5
-; RV32-NEXT:    sub a2, a2, a4
-; RV32-NEXT:    and a4, a2, t4
-; RV32-NEXT:    srli a2, a2, 2
-; RV32-NEXT:    and a2, a2, t4
-; RV32-NEXT:    add a2, a4, a2
-; RV32-NEXT:    srli a4, a2, 4
-; RV32-NEXT:    add a2, a2, a4
-; RV32-NEXT:    and a2, a2, t3
-; RV32-NEXT:    slli a4, a2, 8
-; RV32-NEXT:    add a2, a2, a4
-; RV32-NEXT:    slli a4, a2, 16
-; RV32-NEXT:    add a2, a2, a4
-; RV32-NEXT:    srli a4, a2, 24
+; RV32-NEXT:    srli a0, a2, 1
+; RV32-NEXT:    or a0, a2, a0
+; RV32-NEXT:    srli a2, a0, 2
+; RV32-NEXT:    or a0, a0, a2
+; RV32-NEXT:    srli a2, a0, 4
+; RV32-NEXT:    or a0, a0, a2
+; RV32-NEXT:    srli a2, a0, 8
+; RV32-NEXT:    or a0, a0, a2
+; RV32-NEXT:    srli a2, a0, 16
+; RV32-NEXT:    or a0, a0, a2
+; RV32-NEXT:    not a0, a0
+; RV32-NEXT:    srli a2, a0, 1
+; RV32-NEXT:    and a2, a2, t5
+; RV32-NEXT:    sub a0, a0, a2
+; RV32-NEXT:    and a2, a0, t4
+; RV32-NEXT:    srli a0, a0, 2
+; RV32-NEXT:    and a0, a0, t4
+; RV32-NEXT:    add a0, a2, a0
+; RV32-NEXT:    srli a2, a0, 4
+; RV32-NEXT:    add a0, a0, a2
+; RV32-NEXT:    and a0, a0, t3
+; RV32-NEXT:    slli a2, a0, 8
+; RV32-NEXT:    add a0, a0, a2
+; RV32-NEXT:    slli a2, a0, 16
+; RV32-NEXT:    add a0, a0, a2
+; RV32-NEXT:    srli a4, a0, 24
 ; RV32-NEXT:    j .LBB3_28
 ; RV32-NEXT:  .LBB3_26:
-; RV32-NEXT:    snez a5, a3
-; RV32-NEXT:    sltu a4, a0, a4
+; RV32-NEXT:    snez a5, s4
+; RV32-NEXT:    sltu a0, a3, a0
 ; RV32-NEXT:    addi a5, a5, -1
-; RV32-NEXT:    and s2, a5, a4
-; RV32-NEXT:    snez a5, s3
+; RV32-NEXT:    and s1, a5, a0
 ; RV32-NEXT:    bnez a2, .LBB3_25
 ; RV32-NEXT:  .LBB3_27: # %_udiv-special-cases
-; RV32-NEXT:    srli a2, s7, 1
-; RV32-NEXT:    or a2, s7, a2
-; RV32-NEXT:    srli a4, a2, 2
-; RV32-NEXT:    or a2, a2, a4
-; RV32-NEXT:    srli a4, a2, 4
-; RV32-NEXT:    or a2, a2, a4
-; RV32-NEXT:    srli a4, a2, 8
-; RV32-NEXT:    or a2, a2, a4
-; RV32-NEXT:    srli a4, a2, 16
-; RV32-NEXT:    or a2, a2, a4
-; RV32-NEXT:    not a2, a2
-; RV32-NEXT:    srli a4, a2, 1
-; RV32-NEXT:    and a4, a4, t5
-; RV32-NEXT:    sub a2, a2, a4
-; RV32-NEXT:    and a4, a2, t4
-; RV32-NEXT:    srli a2, a2, 2
-; RV32-NEXT:    and a2, a2, t4
-; RV32-NEXT:    add a2, a4, a2
-; RV32-NEXT:    srli a4, a2, 4
-; RV32-NEXT:    add a2, a2, a4
-; RV32-NEXT:    and a2, a2, t3
-; RV32-NEXT:    slli a4, a2, 8
-; RV32-NEXT:    add a2, a2, a4
-; RV32-NEXT:    slli a4, a2, 16
-; RV32-NEXT:    add a2, a2, a4
-; RV32-NEXT:    srli a2, a2, 24
-; RV32-NEXT:    addi a4, a2, 32
+; RV32-NEXT:    srli a0, a4, 1
+; RV32-NEXT:    or a0, a4, a0
+; RV32-NEXT:    srli a2, a0, 2
+; RV32-NEXT:    or a0, a0, a2
+; RV32-NEXT:    srli a2, a0, 4
+; RV32-NEXT:    or a0, a0, a2
+; RV32-NEXT:    srli a2, a0, 8
+; RV32-NEXT:    or a0, a0, a2
+; RV32-NEXT:    srli a2, a0, 16
+; RV32-NEXT:    or a0, a0, a2
+; RV32-NEXT:    not a0, a0
+; RV32-NEXT:    srli a2, a0, 1
+; RV32-NEXT:    and a2, a2, t5
+; RV32-NEXT:    sub a0, a0, a2
+; RV32-NEXT:    and a2, a0, t4
+; RV32-NEXT:    srli a0, a0, 2
+; RV32-NEXT:    and a0, a0, t4
+; RV32-NEXT:    add a0, a2, a0
+; RV32-NEXT:    srli a2, a0, 4
+; RV32-NEXT:    add a0, a0, a2
+; RV32-NEXT:    and a0, a0, t3
+; RV32-NEXT:    slli a2, a0, 8
+; RV32-NEXT:    add a0, a0, a2
+; RV32-NEXT:    slli a2, a0, 16
+; RV32-NEXT:    add a0, a0, a2
+; RV32-NEXT:    srli a0, a0, 24
+; RV32-NEXT:    addi a4, a0, 32
 ; RV32-NEXT:  .LBB3_28: # %_udiv-special-cases
-; RV32-NEXT:    xori a2, s0, 1
-; RV32-NEXT:    addi a5, a5, -1
-; RV32-NEXT:    bnez a3, .LBB3_30
+; RV32-NEXT:    andi s11, a1, 1
+; RV32-NEXT:    andi a0, t1, 1
+; RV32-NEXT:    lw a1, 36(sp) # 4-byte Folded Reload
+; RV32-NEXT:    lw a2, 32(sp) # 4-byte Folded Reload
+; RV32-NEXT:    or s9, a1, a2
+; RV32-NEXT:    or a1, a7, a6
+; RV32-NEXT:    bnez s4, .LBB3_30
 ; RV32-NEXT:  # %bb.29: # %_udiv-special-cases
-; RV32-NEXT:    mv a4, a0
+; RV32-NEXT:    mv a4, a3
 ; RV32-NEXT:  .LBB3_30: # %_udiv-special-cases
-; RV32-NEXT:    andi s11, a1, 1
-; RV32-NEXT:    andi a0, t2, 1
-; RV32-NEXT:    lw a1, 32(sp) # 4-byte Folded Reload
+; RV32-NEXT:    lw a2, 24(sp) # 4-byte Folded Reload
 ; RV32-NEXT:    lw a3, 28(sp) # 4-byte Folded Reload
-; RV32-NEXT:    or s9, a1, a3
-; RV32-NEXT:    or a3, a7, a6
-; RV32-NEXT:    neg a1, a2
-; RV32-NEXT:    and s0, a5, s0
-; RV32-NEXT:    bnez s5, .LBB3_32
+; RV32-NEXT:    or s10, a2, a3
+; RV32-NEXT:    or a3, s9, a0
+; RV32-NEXT:    or a2, t0, t2
+; RV32-NEXT:    or a5, a1, s11
+; RV32-NEXT:    bnez s3, .LBB3_32
 ; RV32-NEXT:  # %bb.31: # %_udiv-special-cases
-; RV32-NEXT:    mv a4, s6
+; RV32-NEXT:    mv a4, s2
 ; RV32-NEXT:  .LBB3_32: # %_udiv-special-cases
-; RV32-NEXT:    lw a2, 20(sp) # 4-byte Folded Reload
-; RV32-NEXT:    lw a5, 24(sp) # 4-byte Folded Reload
-; RV32-NEXT:    or s10, a2, a5
-; RV32-NEXT:    or a2, s9, a0
-; RV32-NEXT:    or a5, t0, t1
-; RV32-NEXT:    or t4, a3, s11
-; RV32-NEXT:    and a1, s0, a1
-; RV32-NEXT:    xori a3, s4, 1
-; RV32-NEXT:    snez t2, s5
-; RV32-NEXT:    neg a3, a3
-; RV32-NEXT:    addi t2, t2, -1
-; RV32-NEXT:    and t3, t2, s4
-; RV32-NEXT:    sltu t2, s1, a4
-; RV32-NEXT:    and t3, t3, a3
-; RV32-NEXT:    mv a3, t2
-; RV32-NEXT:    beq t6, s2, .LBB3_34
+; RV32-NEXT:    or a1, a3, s10
+; RV32-NEXT:    sltu a3, s0, a4
+; RV32-NEXT:    or a2, a5, a2
+; RV32-NEXT:    mv a5, a3
+; RV32-NEXT:    beq t6, s1, .LBB3_34
 ; RV32-NEXT:  # %bb.33: # %_udiv-special-cases
-; RV32-NEXT:    sltu a3, t6, s2
+; RV32-NEXT:    sltu a5, t6, s1
 ; RV32-NEXT:  .LBB3_34: # %_udiv-special-cases
-; RV32-NEXT:    or a2, a2, s10
-; RV32-NEXT:    or a5, t4, a5
-; RV32-NEXT:    sltu t5, a1, t3
-; RV32-NEXT:    mv t4, a3
-; RV32-NEXT:    beq a1, t3, .LBB3_36
-; RV32-NEXT:  # %bb.35: # %_udiv-special-cases
-; RV32-NEXT:    mv t4, t5
-; RV32-NEXT:  .LBB3_36: # %_udiv-special-cases
+; RV32-NEXT:    seqz a1, a1
 ; RV32-NEXT:    seqz a2, a2
-; RV32-NEXT:    seqz a5, a5
-; RV32-NEXT:    andi t4, t4, 1
-; RV32-NEXT:    sub t6, t6, s2
-; RV32-NEXT:    sub a1, a1, t3
-; RV32-NEXT:    sub t2, t6, t2
-; RV32-NEXT:    sltu t3, a1, a3
-; RV32-NEXT:    add t3, t5, t3
-; RV32-NEXT:    neg t3, t3
-; RV32-NEXT:    sub t5, a1, a3
-; RV32-NEXT:    or a1, t5, t3
-; RV32-NEXT:    sub a3, s1, a4
-; RV32-NEXT:    beqz a1, .LBB3_38
-; RV32-NEXT:  # %bb.37: # %_udiv-special-cases
-; RV32-NEXT:    snez a1, a1
-; RV32-NEXT:    or a2, a2, a5
-; RV32-NEXT:    bnez t4, .LBB3_39
-; RV32-NEXT:    j .LBB3_40
-; RV32-NEXT:  .LBB3_38:
-; RV32-NEXT:    snez a1, t2
-; RV32-NEXT:    sltiu a4, a3, 129
+; RV32-NEXT:    andi t4, a5, 1
+; RV32-NEXT:    sub t1, t6, s1
+; RV32-NEXT:    neg t3, a5
+; RV32-NEXT:    snez a5, a5
+; RV32-NEXT:    sub t1, t1, a3
+; RV32-NEXT:    neg t5, a5
+; RV32-NEXT:    or a3, t3, t5
+; RV32-NEXT:    sub t6, s0, a4
+; RV32-NEXT:    beqz a3, .LBB3_36
+; RV32-NEXT:  # %bb.35: # %_udiv-special-cases
+; RV32-NEXT:    snez a3, a3
+; RV32-NEXT:    or a1, a1, a2
+; RV32-NEXT:    bnez t4, .LBB3_37
+; RV32-NEXT:    j .LBB3_38
+; RV32-NEXT:  .LBB3_36:
+; RV32-NEXT:    snez a3, t1
+; RV32-NEXT:    sltiu a4, t6, 129
 ; RV32-NEXT:    xori a4, a4, 1
-; RV32-NEXT:    or a1, a4, a1
-; RV32-NEXT:    or a2, a2, a5
-; RV32-NEXT:    beqz t4, .LBB3_40
-; RV32-NEXT:  .LBB3_39: # %_udiv-special-cases
-; RV32-NEXT:    mv a1, t4
-; RV32-NEXT:  .LBB3_40: # %_udiv-special-cases
-; RV32-NEXT:    or t6, a2, a1
-; RV32-NEXT:    addi a5, t6, -1
-; RV32-NEXT:    and s0, s11, a5
-; RV32-NEXT:    and a4, a5, t1
-; RV32-NEXT:    and a2, a5, a6
-; RV32-NEXT:    and a1, a5, t0
-; RV32-NEXT:    and a5, a5, a7
-; RV32-NEXT:    bnez t6, .LBB3_57
-; RV32-NEXT:  # %bb.41: # %_udiv-special-cases
-; RV32-NEXT:    or t6, t2, t3
-; RV32-NEXT:    xori s1, a3, 128
+; RV32-NEXT:    or a3, a4, a3
+; RV32-NEXT:    or a1, a1, a2
+; RV32-NEXT:    beqz t4, .LBB3_38
+; RV32-NEXT:  .LBB3_37: # %_udiv-special-cases
+; RV32-NEXT:    mv a3, t4
+; RV32-NEXT:  .LBB3_38: # %_udiv-special-cases
+; RV32-NEXT:    or a5, a1, a3
+; RV32-NEXT:    addi a4, a5, -1
+; RV32-NEXT:    and s0, s11, a4
+; RV32-NEXT:    and a3, a4, t2
+; RV32-NEXT:    and a2, a4, a6
+; RV32-NEXT:    and a1, a4, t0
+; RV32-NEXT:    and a4, a4, a7
+; RV32-NEXT:    bnez a5, .LBB3_55
+; RV32-NEXT:  # %bb.39: # %_udiv-special-cases
+; RV32-NEXT:    or a5, t1, t5
+; RV32-NEXT:    xori s1, t6, 128
 ; RV32-NEXT:    or s1, s1, t4
-; RV32-NEXT:    or s1, s1, t5
-; RV32-NEXT:    or t6, s1, t6
-; RV32-NEXT:    beqz t6, .LBB3_57
-; RV32-NEXT:  # %bb.42: # %udiv-bb1
-; RV32-NEXT:    sw s8, 8(sp) # 4-byte Folded Spill
-; RV32-NEXT:    addi a1, a3, 1
+; RV32-NEXT:    or s1, s1, t3
+; RV32-NEXT:    or a5, s1, a5
+; RV32-NEXT:    beqz a5, .LBB3_55
+; RV32-NEXT:  # %bb.40: # %udiv-bb1
+; RV32-NEXT:    sw s6, 8(sp) # 4-byte Folded Spill
+; RV32-NEXT:    addi a1, t6, 1
 ; RV32-NEXT:    sw zero, 136(sp)
 ; RV32-NEXT:    sw zero, 140(sp)
 ; RV32-NEXT:    sw zero, 144(sp)
@@ -1528,30 +1505,30 @@ define i129 @udiv_i129(i129 %x, i129 %y) nounwind {
 ; RV32-NEXT:    sw a7, 152(sp)
 ; RV32-NEXT:    sw t0, 156(sp)
 ; RV32-NEXT:    sw a6, 160(sp)
-; RV32-NEXT:    sw t1, 164(sp)
+; RV32-NEXT:    sw t2, 164(sp)
 ; RV32-NEXT:    sw s11, 168(sp)
 ; RV32-NEXT:    li a2, 128
-; RV32-NEXT:    addi a4, sp, 152
-; RV32-NEXT:    neg ra, a3
-; RV32-NEXT:    seqz a5, a1
-; RV32-NEXT:    sub a2, a2, a3
-; RV32-NEXT:    add t2, t2, a5
-; RV32-NEXT:    andi a3, a2, 31
+; RV32-NEXT:    addi a3, sp, 152
+; RV32-NEXT:    neg ra, t6
+; RV32-NEXT:    seqz a4, a1
+; RV32-NEXT:    sub a2, a2, t6
+; RV32-NEXT:    add t1, t1, a4
+; RV32-NEXT:    andi a4, a2, 31
 ; RV32-NEXT:    srli a2, a2, 3
-; RV32-NEXT:    or a5, a1, t2
-; RV32-NEXT:    xori s8, a3, 31
+; RV32-NEXT:    or a5, a1, t1
+; RV32-NEXT:    xori s8, a4, 31
 ; RV32-NEXT:    andi a2, a2, 28
 ; RV32-NEXT:    seqz t6, a5
-; RV32-NEXT:    sub a3, a4, a2
-; RV32-NEXT:    add t6, t5, t6
+; RV32-NEXT:    sub a3, a3, a2
+; RV32-NEXT:    add t6, t3, t6
 ; RV32-NEXT:    lw a2, 0(a3)
 ; RV32-NEXT:    lw a5, 4(a3)
 ; RV32-NEXT:    lw s1, 8(a3)
 ; RV32-NEXT:    lw a4, 12(a3)
-; RV32-NEXT:    sltu t5, t6, t5
+; RV32-NEXT:    sltu t3, t6, t3
 ; RV32-NEXT:    or s0, a1, t6
-; RV32-NEXT:    add t3, t3, t5
-; RV32-NEXT:    or t5, t2, t3
+; RV32-NEXT:    add t3, t5, t3
+; RV32-NEXT:    or t5, t1, t3
 ; RV32-NEXT:    or t5, s0, t5
 ; RV32-NEXT:    srli s0, s1, 1
 ; RV32-NEXT:    seqz s2, t5
@@ -1570,12 +1547,12 @@ define i129 @udiv_i129(i129 %x, i129 %y) nounwind {
 ; RV32-NEXT:    or s3, a5, s3
 ; RV32-NEXT:    or a5, t5, s1
 ; RV32-NEXT:    sll t5, a2, ra
-; RV32-NEXT:    beqz a5, .LBB3_55
-; RV32-NEXT:  # %bb.43: # %udiv-preheader
+; RV32-NEXT:    beqz a5, .LBB3_53
+; RV32-NEXT:  # %bb.41: # %udiv-preheader
 ; RV32-NEXT:    sw zero, 52(sp) # 4-byte Folded Spill
 ; RV32-NEXT:    sw zero, 48(sp) # 4-byte Folded Spill
 ; RV32-NEXT:    sw zero, 44(sp) # 4-byte Folded Spill
-; RV32-NEXT:    sw zero, 40(sp) # 4-byte Folded Spill
+; RV32-NEXT:    li s6, 0
 ; RV32-NEXT:    li s7, 0
 ; RV32-NEXT:    srli a4, a4, 1
 ; RV32-NEXT:    lw a3, 16(a3)
@@ -1594,7 +1571,7 @@ define i129 @udiv_i129(i129 %x, i129 %y) nounwind {
 ; RV32-NEXT:    sw a7, 56(sp)
 ; RV32-NEXT:    sw t0, 60(sp)
 ; RV32-NEXT:    sw a6, 64(sp)
-; RV32-NEXT:    sw t1, 68(sp)
+; RV32-NEXT:    sw t2, 68(sp)
 ; RV32-NEXT:    srli a2, a1, 3
 ; RV32-NEXT:    addi a5, sp, 56
 ; RV32-NEXT:    andi a6, a1, 31
@@ -1608,13 +1585,13 @@ define i129 @udiv_i129(i129 %x, i129 %y) nounwind {
 ; RV32-NEXT:    lw a5, 16(a2)
 ; RV32-NEXT:    lw a7, 0(a2)
 ; RV32-NEXT:    lw t0, 4(a2)
-; RV32-NEXT:    lw t1, 8(a2)
+; RV32-NEXT:    lw t2, 8(a2)
 ; RV32-NEXT:    lw a2, 12(a2)
 ; RV32-NEXT:    sll a3, a3, ra
 ; RV32-NEXT:    or a3, a3, a4
 ; RV32-NEXT:    slli a5, a5, 1
 ; RV32-NEXT:    slli a4, a2, 1
-; RV32-NEXT:    slli t4, t1, 1
+; RV32-NEXT:    slli t4, t2, 1
 ; RV32-NEXT:    slli s4, t0, 1
 ; RV32-NEXT:    sll a5, a5, a6
 ; RV32-NEXT:    sll a4, a4, a6
@@ -1622,56 +1599,57 @@ define i129 @udiv_i129(i129 %x, i129 %y) nounwind {
 ; RV32-NEXT:    sll a6, s4, a6
 ; RV32-NEXT:    srl a2, a2, a1
 ; RV32-NEXT:    or s9, a2, a5
-; RV32-NEXT:    lw s4, 32(sp) # 4-byte Folded Reload
+; RV32-NEXT:    lw s4, 36(sp) # 4-byte Folded Reload
 ; RV32-NEXT:    seqz a2, s4
-; RV32-NEXT:    srl a5, t1, a1
+; RV32-NEXT:    srl a5, t2, a1
 ; RV32-NEXT:    or ra, a5, a4
-; RV32-NEXT:    lw a5, 20(sp) # 4-byte Folded Reload
+; RV32-NEXT:    lw a5, 24(sp) # 4-byte Folded Reload
 ; RV32-NEXT:    or a4, s4, a5
-; RV32-NEXT:    sub s5, a5, a2
+; RV32-NEXT:    sub a5, a5, a2
 ; RV32-NEXT:    seqz a2, a4
 ; RV32-NEXT:    srl a4, t0, a1
 ; RV32-NEXT:    or s11, a4, t4
-; RV32-NEXT:    lw a4, 28(sp) # 4-byte Folded Reload
-; RV32-NEXT:    sub a5, a4, a2
-; RV32-NEXT:    sw a5, 36(sp) # 4-byte Folded Spill
+; RV32-NEXT:    lw a4, 32(sp) # 4-byte Folded Reload
+; RV32-NEXT:    sub t0, a4, a2
+; RV32-NEXT:    sw t0, 40(sp) # 4-byte Folded Spill
 ; RV32-NEXT:    sltu a2, a4, a2
 ; RV32-NEXT:    not a0, a0
-; RV32-NEXT:    lw a4, 24(sp) # 4-byte Folded Reload
-; RV32-NEXT:    sub s6, a4, a2
+; RV32-NEXT:    lw a4, 28(sp) # 4-byte Folded Reload
+; RV32-NEXT:    sub s5, a4, a2
 ; RV32-NEXT:    andi a0, a0, 1
 ; RV32-NEXT:    sw a0, 16(sp) # 4-byte Folded Spill
 ; RV32-NEXT:    andi a0, a3, 1
 ; RV32-NEXT:    srl a2, a7, a1
-; RV32-NEXT:    or a3, a2, a6
+; RV32-NEXT:    or s8, a2, a6
 ; RV32-NEXT:    addi s4, s4, -1
 ; RV32-NEXT:    sw s4, 12(sp) # 4-byte Folded Spill
-; RV32-NEXT:    j .LBB3_45
-; RV32-NEXT:  .LBB3_44: # %udiv-do-while
-; RV32-NEXT:    # in Loop: Header=BB3_45 Depth=1
-; RV32-NEXT:    lw a6, 28(sp) # 4-byte Folded Reload
-; RV32-NEXT:    and t1, a0, a6
-; RV32-NEXT:    xor a6, a4, s8
-; RV32-NEXT:    xor a7, ra, t1
-; RV32-NEXT:    or a6, a7, a6
-; RV32-NEXT:    srli a6, s2, 31
-; RV32-NEXT:    sltu a7, ra, t1
-; RV32-NEXT:    sub t1, ra, t1
+; RV32-NEXT:    sw a5, 20(sp) # 4-byte Folded Spill
+; RV32-NEXT:    j .LBB3_43
+; RV32-NEXT:  .LBB3_42: # %udiv-do-while
+; RV32-NEXT:    # in Loop: Header=BB3_43 Depth=1
+; RV32-NEXT:    lw a2, 32(sp) # 4-byte Folded Reload
+; RV32-NEXT:    and t2, a0, a2
+; RV32-NEXT:    xor a2, a3, a6
+; RV32-NEXT:    xor a7, ra, t2
+; RV32-NEXT:    or a2, a7, a2
+; RV32-NEXT:    srli a2, s2, 31
+; RV32-NEXT:    sltu a7, ra, t2
+; RV32-NEXT:    sub t2, ra, t2
 ; RV32-NEXT:    slli ra, s0, 1
-; RV32-NEXT:    sub a4, a4, s8
-; RV32-NEXT:    srli s8, s3, 31
+; RV32-NEXT:    sub a3, a3, a6
+; RV32-NEXT:    srli a6, s3, 31
 ; RV32-NEXT:    slli s2, s2, 1
-; RV32-NEXT:    sub a2, s11, a2
+; RV32-NEXT:    sub a5, s11, a5
 ; RV32-NEXT:    srli s11, t5, 31
 ; RV32-NEXT:    slli s3, s3, 1
 ; RV32-NEXT:    srli s0, s0, 31
 ; RV32-NEXT:    slli t5, t5, 1
-; RV32-NEXT:    or a6, ra, a6
+; RV32-NEXT:    or a2, ra, a2
 ; RV32-NEXT:    or t0, a1, t6
-; RV32-NEXT:    or s2, s2, s8
-; RV32-NEXT:    or s8, t2, t3
+; RV32-NEXT:    or a6, s2, a6
+; RV32-NEXT:    or s2, t1, t3
 ; RV32-NEXT:    or s3, s3, s11
-; RV32-NEXT:    or t4, a1, t2
+; RV32-NEXT:    or t4, a1, t1
 ; RV32-NEXT:    lw s4, 52(sp) # 4-byte Folded Reload
 ; RV32-NEXT:    or t5, s4, t5
 ; RV32-NEXT:    seqz s4, a1
@@ -1679,104 +1657,104 @@ define i129 @udiv_i129(i129 %x, i129 %y) nounwind {
 ; RV32-NEXT:    or s7, s7, s0
 ; RV32-NEXT:    andi a0, a0, 1
 ; RV32-NEXT:    sw a0, 52(sp) # 4-byte Folded Spill
-; RV32-NEXT:    sub ra, t1, s9
-; RV32-NEXT:    sltu t1, t1, s9
-; RV32-NEXT:    sub a4, a4, a7
-; RV32-NEXT:    sub s11, a2, a5
-; RV32-NEXT:    or a2, t0, s8
+; RV32-NEXT:    sub ra, t2, s9
+; RV32-NEXT:    sltu t2, t2, s9
+; RV32-NEXT:    sub a3, a3, a7
+; RV32-NEXT:    sub s11, a5, a4
+; RV32-NEXT:    or a4, t0, s2
 ; RV32-NEXT:    seqz a5, t4
-; RV32-NEXT:    sub t2, t2, s4
+; RV32-NEXT:    sub t1, t1, s4
 ; RV32-NEXT:    lw a0, 48(sp) # 4-byte Folded Reload
 ; RV32-NEXT:    or s3, a0, s3
 ; RV32-NEXT:    lw a0, 44(sp) # 4-byte Folded Reload
-; RV32-NEXT:    or s2, a0, s2
-; RV32-NEXT:    lw a0, 40(sp) # 4-byte Folded Reload
-; RV32-NEXT:    or s0, a0, a6
+; RV32-NEXT:    or s2, a0, a6
+; RV32-NEXT:    or s0, s6, a2
 ; RV32-NEXT:    andi a0, s7, 1
-; RV32-NEXT:    sub s9, a4, t1
-; RV32-NEXT:    snez a2, a2
-; RV32-NEXT:    sltu a4, t6, a5
+; RV32-NEXT:    sub s9, a3, t2
+; RV32-NEXT:    snez a2, a4
+; RV32-NEXT:    sltu a3, t6, a5
 ; RV32-NEXT:    sub t6, t6, a5
 ; RV32-NEXT:    add a2, s1, a2
-; RV32-NEXT:    sub t3, t3, a4
-; RV32-NEXT:    or a4, a1, t6
+; RV32-NEXT:    sub t3, t3, a3
+; RV32-NEXT:    or a3, a1, t6
 ; RV32-NEXT:    not s1, a2
-; RV32-NEXT:    or a2, t2, t3
+; RV32-NEXT:    or a2, t1, t3
 ; RV32-NEXT:    andi s1, s1, 1
-; RV32-NEXT:    or a2, a4, a2
+; RV32-NEXT:    or a2, a3, a2
 ; RV32-NEXT:    or a2, a2, s1
-; RV32-NEXT:    sub a3, s10, a3
+; RV32-NEXT:    sub s8, s10, s8
 ; RV32-NEXT:    sw zero, 48(sp) # 4-byte Folded Spill
 ; RV32-NEXT:    sw zero, 44(sp) # 4-byte Folded Spill
-; RV32-NEXT:    sw zero, 40(sp) # 4-byte Folded Spill
+; RV32-NEXT:    li s6, 0
 ; RV32-NEXT:    li s7, 0
-; RV32-NEXT:    beqz a2, .LBB3_56
-; RV32-NEXT:  .LBB3_45: # %udiv-do-while
+; RV32-NEXT:    lw a5, 20(sp) # 4-byte Folded Reload
+; RV32-NEXT:    beqz a2, .LBB3_54
+; RV32-NEXT:  .LBB3_43: # %udiv-do-while
 ; RV32-NEXT:    # =>This Inner Loop Header: Depth=1
 ; RV32-NEXT:    srli a2, ra, 31
-; RV32-NEXT:    slli a4, s9, 1
-; RV32-NEXT:    or a4, a4, a2
+; RV32-NEXT:    slli a3, s9, 1
+; RV32-NEXT:    or a3, a3, a2
 ; RV32-NEXT:    srli a2, s11, 31
 ; RV32-NEXT:    slli ra, ra, 1
 ; RV32-NEXT:    or ra, ra, a2
-; RV32-NEXT:    beq s6, a4, .LBB3_47
-; RV32-NEXT:  # %bb.46: # %udiv-do-while
-; RV32-NEXT:    # in Loop: Header=BB3_45 Depth=1
-; RV32-NEXT:    sltu a2, s6, a4
-; RV32-NEXT:    j .LBB3_48
-; RV32-NEXT:  .LBB3_47: # in Loop: Header=BB3_45 Depth=1
-; RV32-NEXT:    lw a2, 36(sp) # 4-byte Folded Reload
+; RV32-NEXT:    beq s5, a3, .LBB3_45
+; RV32-NEXT:  # %bb.44: # %udiv-do-while
+; RV32-NEXT:    # in Loop: Header=BB3_43 Depth=1
+; RV32-NEXT:    sltu a2, s5, a3
+; RV32-NEXT:    j .LBB3_46
+; RV32-NEXT:  .LBB3_45: # in Loop: Header=BB3_43 Depth=1
+; RV32-NEXT:    lw a2, 40(sp) # 4-byte Folded Reload
 ; RV32-NEXT:    sltu a2, a2, ra
-; RV32-NEXT:  .LBB3_48: # %udiv-do-while
-; RV32-NEXT:    # in Loop: Header=BB3_45 Depth=1
-; RV32-NEXT:    srli a5, a3, 31
+; RV32-NEXT:  .LBB3_46: # %udiv-do-while
+; RV32-NEXT:    # in Loop: Header=BB3_43 Depth=1
+; RV32-NEXT:    srli a4, s8, 31
 ; RV32-NEXT:    slli s11, s11, 1
-; RV32-NEXT:    slli a3, a3, 1
-; RV32-NEXT:    or s11, s11, a5
+; RV32-NEXT:    slli s8, s8, 1
+; RV32-NEXT:    or s11, s11, a4
 ; RV32-NEXT:    andi a0, a0, 1
-; RV32-NEXT:    or s10, a3, a0
-; RV32-NEXT:    beq s5, s11, .LBB3_50
-; RV32-NEXT:  # %bb.49: # %udiv-do-while
-; RV32-NEXT:    # in Loop: Header=BB3_45 Depth=1
-; RV32-NEXT:    sltu a0, s5, s11
-; RV32-NEXT:    j .LBB3_51
-; RV32-NEXT:  .LBB3_50: # in Loop: Header=BB3_45 Depth=1
+; RV32-NEXT:    or s10, s8, a0
+; RV32-NEXT:    beq a5, s11, .LBB3_48
+; RV32-NEXT:  # %bb.47: # %udiv-do-while
+; RV32-NEXT:    # in Loop: Header=BB3_43 Depth=1
+; RV32-NEXT:    sltu a0, a5, s11
+; RV32-NEXT:    j .LBB3_49
+; RV32-NEXT:  .LBB3_48: # in Loop: Header=BB3_43 Depth=1
 ; RV32-NEXT:    lw a0, 12(sp) # 4-byte Folded Reload
 ; RV32-NEXT:    sltu a0, a0, s10
-; RV32-NEXT:  .LBB3_51: # %udiv-do-while
-; RV32-NEXT:    # in Loop: Header=BB3_45 Depth=1
-; RV32-NEXT:    lw a3, 36(sp) # 4-byte Folded Reload
-; RV32-NEXT:    xor a3, a3, ra
-; RV32-NEXT:    xor a5, s6, a4
-; RV32-NEXT:    or a3, a3, a5
-; RV32-NEXT:    beqz a3, .LBB3_53
-; RV32-NEXT:  # %bb.52: # %udiv-do-while
-; RV32-NEXT:    # in Loop: Header=BB3_45 Depth=1
+; RV32-NEXT:  .LBB3_49: # %udiv-do-while
+; RV32-NEXT:    # in Loop: Header=BB3_43 Depth=1
+; RV32-NEXT:    lw a4, 40(sp) # 4-byte Folded Reload
+; RV32-NEXT:    xor a4, a4, ra
+; RV32-NEXT:    xor a5, s5, a3
+; RV32-NEXT:    or a4, a4, a5
+; RV32-NEXT:    beqz a4, .LBB3_51
+; RV32-NEXT:  # %bb.50: # %udiv-do-while
+; RV32-NEXT:    # in Loop: Header=BB3_43 Depth=1
 ; RV32-NEXT:    mv a0, a2
-; RV32-NEXT:  .LBB3_53: # %udiv-do-while
-; RV32-NEXT:    # in Loop: Header=BB3_45 Depth=1
+; RV32-NEXT:  .LBB3_51: # %udiv-do-while
+; RV32-NEXT:    # in Loop: Header=BB3_43 Depth=1
 ; RV32-NEXT:    srli a2, s9, 31
-; RV32-NEXT:    lw a3, 16(sp) # 4-byte Folded Reload
-; RV32-NEXT:    sub a2, a3, a2
+; RV32-NEXT:    lw a4, 16(sp) # 4-byte Folded Reload
+; RV32-NEXT:    sub a2, a4, a2
 ; RV32-NEXT:    sub a2, a2, a0
 ; RV32-NEXT:    slli a0, a2, 31
 ; RV32-NEXT:    srai a0, a0, 31
-; RV32-NEXT:    lw a2, 24(sp) # 4-byte Folded Reload
+; RV32-NEXT:    lw a2, 28(sp) # 4-byte Folded Reload
+; RV32-NEXT:    and a6, a0, a2
+; RV32-NEXT:    lw a2, 36(sp) # 4-byte Folded Reload
 ; RV32-NEXT:    and s8, a0, a2
-; RV32-NEXT:    lw a3, 32(sp) # 4-byte Folded Reload
-; RV32-NEXT:    and a3, a0, a3
-; RV32-NEXT:    lw a2, 20(sp) # 4-byte Folded Reload
-; RV32-NEXT:    and a2, a0, a2
-; RV32-NEXT:    sltu a5, s10, a3
-; RV32-NEXT:    mv s9, a5
-; RV32-NEXT:    beq s11, a2, .LBB3_44
-; RV32-NEXT:  # %bb.54: # %udiv-do-while
-; RV32-NEXT:    # in Loop: Header=BB3_45 Depth=1
-; RV32-NEXT:    sltu s9, s11, a2
-; RV32-NEXT:    j .LBB3_44
-; RV32-NEXT:  .LBB3_55:
+; RV32-NEXT:    lw a5, 24(sp) # 4-byte Folded Reload
+; RV32-NEXT:    and a5, a0, a5
+; RV32-NEXT:    sltu a4, s10, s8
+; RV32-NEXT:    mv s9, a4
+; RV32-NEXT:    beq s11, a5, .LBB3_42
+; RV32-NEXT:  # %bb.52: # %udiv-do-while
+; RV32-NEXT:    # in Loop: Header=BB3_43 Depth=1
+; RV32-NEXT:    sltu s9, s11, a5
+; RV32-NEXT:    j .LBB3_42
+; RV32-NEXT:  .LBB3_53:
 ; RV32-NEXT:    sw zero, 52(sp) # 4-byte Folded Spill
-; RV32-NEXT:  .LBB3_56: # %udiv-loop-exit
+; RV32-NEXT:  .LBB3_54: # %udiv-loop-exit
 ; RV32-NEXT:    srli a0, t5, 31
 ; RV32-NEXT:    slli a1, s3, 1
 ; RV32-NEXT:    srli a2, s3, 31
@@ -1784,19 +1762,19 @@ define i129 @udiv_i129(i129 %x, i129 %y) nounwind {
 ; RV32-NEXT:    slli a0, s2, 1
 ; RV32-NEXT:    srli a3, s2, 31
 ; RV32-NEXT:    or a2, a0, a2
-; RV32-NEXT:    slli a4, s0, 1
+; RV32-NEXT:    slli a0, s0, 1
 ; RV32-NEXT:    srli s0, s0, 31
 ; RV32-NEXT:    slli t5, t5, 1
-; RV32-NEXT:    or a4, a4, a3
+; RV32-NEXT:    or a3, a0, a3
 ; RV32-NEXT:    lw a0, 52(sp) # 4-byte Folded Reload
-; RV32-NEXT:    or a5, a0, t5
-; RV32-NEXT:    lw s8, 8(sp) # 4-byte Folded Reload
-; RV32-NEXT:  .LBB3_57: # %udiv-end
-; RV32-NEXT:    sw a5, 0(s8)
-; RV32-NEXT:    sw a1, 4(s8)
-; RV32-NEXT:    sw a2, 8(s8)
-; RV32-NEXT:    sw a4, 12(s8)
-; RV32-NEXT:    sb s0, 16(s8)
+; RV32-NEXT:    or a4, a0, t5
+; RV32-NEXT:    lw s6, 8(sp) # 4-byte Folded Reload
+; RV32-NEXT:  .LBB3_55: # %udiv-end
+; RV32-NEXT:    sw a4, 0(s6)
+; RV32-NEXT:    sw a1, 4(s6)
+; RV32-NEXT:    sw a2, 8(s6)
+; RV32-NEXT:    sw a3, 12(s6)
+; RV32-NEXT:    sb s0, 16(s6)
 ; RV32-NEXT:    lw ra, 236(sp) # 4-byte Folded Reload
 ; RV32-NEXT:    lw s0, 232(sp) # 4-byte Folded Reload
 ; RV32-NEXT:    lw s1, 228(sp) # 4-byte Folded Reload
diff --git a/llvm/test/CodeGen/RISCV/rvv/setcc-fp-vp.ll b/llvm/test/CodeGen/RISCV/rvv/setcc-fp-vp.ll
index e420d49cab81e..9d039798177f9 100644
--- a/llvm/test/CodeGen/RISCV/rvv/setcc-fp-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/setcc-fp-vp.ll
@@ -7,10 +7,10 @@
 ; RUN:     --check-prefixes=CHECK,CHECK64,ZVFH
 ; RUN: llc -mtriple=riscv32 -mattr=+d,+zfhmin,+zvfhmin,+zfbfmin,+zvfbfmin,+v \
 ; RUN:     -target-abi=ilp32d -verify-machineinstrs < %s | FileCheck %s \
-; RUN:     --check-prefixes=CHECK,CHECK32,ZVFHMIN
+; RUN:     --check-prefixes=CHECK,CHECK32,ZVFHMIN,ZVFHMIN32
 ; RUN: llc -mtriple=riscv64 -mattr=+d,+zfhmin,+zvfhmin,+zfbfmin,+zvfbfmin,+v \
 ; RUN:     -target-abi=lp64d -verify-machineinstrs < %s | FileCheck %s \
-; RUN:     --check-prefixes=CHECK,CHECK64,ZVFHMIN
+; RUN:     --check-prefixes=CHECK,CHECK64,ZVFHMIN,ZVFHMIN64
 
 define <vscale x 1 x i1> @fcmp_oeq_vv_nxv1bf16(<vscale x 1 x bfloat> %va, <vscale x 1 x bfloat> %vb, <vscale x 1 x i1> %m, i32 zeroext %evl) {
 ; CHECK-LABEL: fcmp_oeq_vv_nxv1bf16:
@@ -1337,211 +1337,404 @@ define <vscale x 8 x i1> @fcmp_uno_vf_swap_nxv8bf16(<vscale x 8 x bfloat> %va, b
 }
 
 define <vscale x 64 x i1> @fcmp_oeq_vv_nxv64bf16(<vscale x 64 x bfloat> %va, <vscale x 64 x bfloat> %vb, <vscale x 64 x i1> %m, i32 zeroext %evl) {
-; CHECK-LABEL: fcmp_oeq_vv_nxv64bf16:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi sp, sp, -16
-; CHECK-NEXT:    .cfi_def_cfa_offset 16
-; CHECK-NEXT:    csrr a1, vlenb
-; CHECK-NEXT:    mv a3, a1
-; CHECK-NEXT:    slli a1, a1, 1
-; CHECK-NEXT:    add a3, a3, a1
-; CHECK-NEXT:    slli a1, a1, 2
-; CHECK-NEXT:    add a3, a3, a1
-; CHECK-NEXT:    slli a1, a1, 1
-; CHECK-NEXT:    add a1, a1, a3
-; CHECK-NEXT:    sub sp, sp, a1
-; CHECK-NEXT:    .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x1b, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 27 * vlenb
-; CHECK-NEXT:    csrr a1, vlenb
-; CHECK-NEXT:    slli a1, a1, 1
-; CHECK-NEXT:    mv a3, a1
-; CHECK-NEXT:    slli a1, a1, 2
-; CHECK-NEXT:    add a3, a3, a1
-; CHECK-NEXT:    slli a1, a1, 1
-; CHECK-NEXT:    add a1, a1, a3
-; CHECK-NEXT:    add a1, sp, a1
-; CHECK-NEXT:    addi a1, a1, 16
-; CHECK-NEXT:    vs1r.v v0, (a1) # vscale x 8-byte Folded Spill
-; CHECK-NEXT:    vsetvli a1, zero, e8, m1, ta, ma
-; CHECK-NEXT:    vmv8r.v v0, v16
-; CHECK-NEXT:    csrr a1, vlenb
-; CHECK-NEXT:    slli a1, a1, 1
-; CHECK-NEXT:    mv a3, a1
-; CHECK-NEXT:    slli a1, a1, 3
-; CHECK-NEXT:    add a1, a1, a3
-; CHECK-NEXT:    add a1, sp, a1
-; CHECK-NEXT:    addi a1, a1, 16
-; CHECK-NEXT:    vs8r.v v8, (a1) # vscale x 64-byte Folded Spill
-; CHECK-NEXT:    csrr a3, vlenb
-; CHECK-NEXT:    slli a1, a3, 3
-; CHECK-NEXT:    slli a5, a3, 2
-; CHECK-NEXT:    slli a4, a3, 1
-; CHECK-NEXT:    add a1, a0, a1
-; CHECK-NEXT:    sub a6, a2, a5
-; CHECK-NEXT:    vl8re16.v v24, (a1)
-; CHECK-NEXT:    sltu a1, a2, a6
-; CHECK-NEXT:    addi a1, a1, -1
-; CHECK-NEXT:    and a6, a1, a6
-; CHECK-NEXT:    sub a1, a6, a4
-; CHECK-NEXT:    sltu a7, a6, a1
-; CHECK-NEXT:    addi a7, a7, -1
-; CHECK-NEXT:    and a7, a7, a1
-; CHECK-NEXT:    srli a1, a3, 1
-; CHECK-NEXT:    srli a3, a3, 2
-; CHECK-NEXT:    csrr t0, vlenb
-; CHECK-NEXT:    slli t0, t0, 1
-; CHECK-NEXT:    mv t1, t0
-; CHECK-NEXT:    slli t0, t0, 2
-; CHECK-NEXT:    add t1, t1, t0
-; CHECK-NEXT:    slli t0, t0, 1
-; CHECK-NEXT:    add t0, t0, t1
-; CHECK-NEXT:    add t0, sp, t0
-; CHECK-NEXT:    addi t0, t0, 16
-; CHECK-NEXT:    vl1r.v v8, (t0) # vscale x 8-byte Folded Reload
-; CHECK-NEXT:    vslidedown.vx v16, v8, a1
-; CHECK-NEXT:    vl8re16.v v8, (a0)
-; CHECK-NEXT:    csrr a0, vlenb
-; CHECK-NEXT:    slli a0, a0, 1
-; CHECK-NEXT:    mv t0, a0
-; CHECK-NEXT:    slli a0, a0, 2
-; CHECK-NEXT:    add a0, a0, t0
-; CHECK-NEXT:    add a0, sp, a0
-; CHECK-NEXT:    addi a0, a0, 16
-; CHECK-NEXT:    vs8r.v v8, (a0) # vscale x 64-byte Folded Spill
-; CHECK-NEXT:    csrr a0, vlenb
-; CHECK-NEXT:    add a0, sp, a0
-; CHECK-NEXT:    addi a0, a0, 16
-; CHECK-NEXT:    vs1r.v v16, (a0) # vscale x 8-byte Folded Spill
-; CHECK-NEXT:    vsetvli a0, zero, e8, mf2, ta, ma
-; CHECK-NEXT:    vslidedown.vx v8, v16, a3
-; CHECK-NEXT:    addi a0, sp, 16
-; CHECK-NEXT:    vs1r.v v8, (a0) # vscale x 8-byte Folded Spill
-; CHECK-NEXT:    vsetvli zero, a7, e16, m4, ta, ma
-; CHECK-NEXT:    vfwcvtbf16.f.f.v v16, v28
-; CHECK-NEXT:    csrr a0, vlenb
-; CHECK-NEXT:    slli a0, a0, 1
-; CHECK-NEXT:    add a0, sp, a0
-; CHECK-NEXT:    addi a0, a0, 16
-; CHECK-NEXT:    vs8r.v v0, (a0) # vscale x 64-byte Folded Spill
-; CHECK-NEXT:    vfwcvtbf16.f.f.v v8, v4
-; CHECK-NEXT:    bltu a6, a4, .LBB85_2
-; CHECK-NEXT:  # %bb.1:
-; CHECK-NEXT:    mv a6, a4
-; CHECK-NEXT:  .LBB85_2:
-; CHECK-NEXT:    addi a0, sp, 16
-; CHECK-NEXT:    vl1r.v v0, (a0) # vscale x 8-byte Folded Reload
-; CHECK-NEXT:    vsetvli zero, zero, e32, m8, ta, ma
-; CHECK-NEXT:    vmfeq.vv v5, v8, v16, v0.t
-; CHECK-NEXT:    vsetvli zero, a6, e16, m4, ta, ma
-; CHECK-NEXT:    vfwcvtbf16.f.f.v v16, v24
-; CHECK-NEXT:    csrr a0, vlenb
-; CHECK-NEXT:    slli a0, a0, 1
-; CHECK-NEXT:    add a0, sp, a0
-; CHECK-NEXT:    addi a0, a0, 16
-; CHECK-NEXT:    vl8r.v v8, (a0) # vscale x 64-byte Folded Reload
-; CHECK-NEXT:    vfwcvtbf16.f.f.v v24, v8
-; CHECK-NEXT:    csrr a0, vlenb
-; CHECK-NEXT:    add a0, sp, a0
-; CHECK-NEXT:    addi a0, a0, 16
-; CHECK-NEXT:    vl1r.v v0, (a0) # vscale x 8-byte Folded Reload
-; CHECK-NEXT:    vsetvli zero, zero, e32, m8, ta, ma
-; CHECK-NEXT:    vmfeq.vv v7, v24, v16, v0.t
-; CHECK-NEXT:    bltu a2, a5, .LBB85_4
-; CHECK-NEXT:  # %bb.3:
-; CHECK-NEXT:    mv a2, a5
-; CHECK-NEXT:  .LBB85_4:
-; CHECK-NEXT:    sub a0, a2, a4
-; CHECK-NEXT:    csrr a5, vlenb
-; CHECK-NEXT:    slli a5, a5, 1
-; CHECK-NEXT:    mv a6, a5
-; CHECK-NEXT:    slli a5, a5, 2
-; CHECK-NEXT:    add a6, a6, a5
-; CHECK-NEXT:    slli a5, a5, 1
-; CHECK-NEXT:    add a5, a5, a6
-; CHECK-NEXT:    add a5, sp, a5
-; CHECK-NEXT:    addi a5, a5, 16
-; CHECK-NEXT:    vl1r.v v8, (a5) # vscale x 8-byte Folded Reload
-; CHECK-NEXT:    vsetvli a5, zero, e8, mf2, ta, ma
-; CHECK-NEXT:    vslidedown.vx v0, v8, a3
-; CHECK-NEXT:    sltu a5, a2, a0
-; CHECK-NEXT:    addi a5, a5, -1
-; CHECK-NEXT:    and a0, a5, a0
-; CHECK-NEXT:    csrr a5, vlenb
-; CHECK-NEXT:    slli a5, a5, 1
-; CHECK-NEXT:    mv a6, a5
-; CHECK-NEXT:    slli a5, a5, 3
-; CHECK-NEXT:    add a5, a5, a6
-; CHECK-NEXT:    add a5, sp, a5
-; CHECK-NEXT:    addi a5, a5, 16
-; CHECK-NEXT:    vl8r.v v24, (a5) # vscale x 64-byte Folded Reload
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
-; CHECK-NEXT:    vfwcvtbf16.f.f.v v16, v28
-; CHECK-NEXT:    csrr a0, vlenb
-; CHECK-NEXT:    slli a0, a0, 1
-; CHECK-NEXT:    mv a5, a0
-; CHECK-NEXT:    slli a0, a0, 2
-; CHECK-NEXT:    add a0, a0, a5
-; CHECK-NEXT:    add a0, sp, a0
-; CHECK-NEXT:    addi a0, a0, 16
-; CHECK-NEXT:    vl8r.v v8, (a0) # vscale x 64-byte Folded Reload
-; CHECK-NEXT:    vfwcvtbf16.f.f.v v24, v12
-; CHECK-NEXT:    vsetvli zero, zero, e32, m8, ta, ma
-; CHECK-NEXT:    vmfeq.vv v9, v16, v24, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v7
-; CHECK-NEXT:    vsetvli a0, zero, e8, mf2, ta, ma
-; CHECK-NEXT:    vslideup.vx v8, v5, a3
-; CHECK-NEXT:    bltu a2, a4, .LBB85_6
-; CHECK-NEXT:  # %bb.5:
-; CHECK-NEXT:    mv a2, a4
-; CHECK-NEXT:  .LBB85_6:
-; CHECK-NEXT:    csrr a0, vlenb
-; CHECK-NEXT:    slli a0, a0, 1
-; CHECK-NEXT:    mv a4, a0
-; CHECK-NEXT:    slli a0, a0, 3
-; CHECK-NEXT:    add a0, a0, a4
-; CHECK-NEXT:    add a0, sp, a0
-; CHECK-NEXT:    addi a0, a0, 16
-; CHECK-NEXT:    vl8r.v v24, (a0) # vscale x 64-byte Folded Reload
-; CHECK-NEXT:    vsetvli zero, a2, e16, m4, ta, ma
-; CHECK-NEXT:    vfwcvtbf16.f.f.v v16, v24
-; CHECK-NEXT:    csrr a0, vlenb
-; CHECK-NEXT:    slli a0, a0, 1
-; CHECK-NEXT:    mv a2, a0
-; CHECK-NEXT:    slli a0, a0, 2
-; CHECK-NEXT:    add a0, a0, a2
-; CHECK-NEXT:    add a0, sp, a0
-; CHECK-NEXT:    addi a0, a0, 16
-; CHECK-NEXT:    vl8r.v v0, (a0) # vscale x 64-byte Folded Reload
-; CHECK-NEXT:    vfwcvtbf16.f.f.v v24, v0
-; CHECK-NEXT:    csrr a0, vlenb
-; CHECK-NEXT:    slli a0, a0, 1
-; CHECK-NEXT:    mv a2, a0
-; CHECK-NEXT:    slli a0, a0, 2
-; CHECK-NEXT:    add a2, a2, a0
-; CHECK-NEXT:    slli a0, a0, 1
-; CHECK-NEXT:    add a0, a0, a2
-; CHECK-NEXT:    add a0, sp, a0
-; CHECK-NEXT:    addi a0, a0, 16
-; CHECK-NEXT:    vl1r.v v0, (a0) # vscale x 8-byte Folded Reload
-; CHECK-NEXT:    vsetvli zero, zero, e32, m8, ta, ma
-; CHECK-NEXT:    vmfeq.vv v10, v16, v24, v0.t
-; CHECK-NEXT:    vsetvli a0, zero, e8, mf2, ta, ma
-; CHECK-NEXT:    vslideup.vx v10, v9, a3
-; CHECK-NEXT:    vsetvli a0, zero, e8, m1, ta, ma
-; CHECK-NEXT:    vslideup.vx v10, v8, a1
-; CHECK-NEXT:    vmv.v.v v0, v10
-; CHECK-NEXT:    csrr a0, vlenb
-; CHECK-NEXT:    mv a1, a0
-; CHECK-NEXT:    slli a0, a0, 1
-; CHECK-NEXT:    add a1, a1, a0
-; CHECK-NEXT:    slli a0, a0, 2
-; CHECK-NEXT:    add a1, a1, a0
-; CHECK-NEXT:    slli a0, a0, 1
-; CHECK-NEXT:    add a0, a0, a1
-; CHECK-NEXT:    add sp, sp, a0
-; CHECK-NEXT:    .cfi_def_cfa sp, 16
-; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    .cfi_def_cfa_offset 0
-; CHECK-NEXT:    ret
+; CHECK32-LABEL: fcmp_oeq_vv_nxv64bf16:
+; CHECK32:       # %bb.0:
+; CHECK32-NEXT:    addi sp, sp, -16
+; CHECK32-NEXT:    .cfi_def_cfa_offset 16
+; CHECK32-NEXT:    csrr a1, vlenb
+; CHECK32-NEXT:    mv a3, a1
+; CHECK32-NEXT:    slli a1, a1, 1
+; CHECK32-NEXT:    add a3, a3, a1
+; CHECK32-NEXT:    slli a1, a1, 2
+; CHECK32-NEXT:    add a3, a3, a1
+; CHECK32-NEXT:    slli a1, a1, 1
+; CHECK32-NEXT:    add a1, a1, a3
+; CHECK32-NEXT:    sub sp, sp, a1
+; CHECK32-NEXT:    .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x1b, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 27 * vlenb
+; CHECK32-NEXT:    csrr a1, vlenb
+; CHECK32-NEXT:    slli a1, a1, 1
+; CHECK32-NEXT:    mv a3, a1
+; CHECK32-NEXT:    slli a1, a1, 2
+; CHECK32-NEXT:    add a3, a3, a1
+; CHECK32-NEXT:    slli a1, a1, 1
+; CHECK32-NEXT:    add a1, a1, a3
+; CHECK32-NEXT:    add a1, sp, a1
+; CHECK32-NEXT:    addi a1, a1, 16
+; CHECK32-NEXT:    vs1r.v v0, (a1) # vscale x 8-byte Folded Spill
+; CHECK32-NEXT:    vsetvli a1, zero, e8, m1, ta, ma
+; CHECK32-NEXT:    vmv8r.v v0, v16
+; CHECK32-NEXT:    csrr a1, vlenb
+; CHECK32-NEXT:    slli a1, a1, 1
+; CHECK32-NEXT:    mv a3, a1
+; CHECK32-NEXT:    slli a1, a1, 3
+; CHECK32-NEXT:    add a1, a1, a3
+; CHECK32-NEXT:    add a1, sp, a1
+; CHECK32-NEXT:    addi a1, a1, 16
+; CHECK32-NEXT:    vs8r.v v8, (a1) # vscale x 64-byte Folded Spill
+; CHECK32-NEXT:    csrr a3, vlenb
+; CHECK32-NEXT:    slli a1, a3, 3
+; CHECK32-NEXT:    slli a5, a3, 2
+; CHECK32-NEXT:    slli a4, a3, 1
+; CHECK32-NEXT:    add a1, a0, a1
+; CHECK32-NEXT:    sub a6, a2, a5
+; CHECK32-NEXT:    vl8re16.v v24, (a1)
+; CHECK32-NEXT:    sltu a1, a2, a6
+; CHECK32-NEXT:    addi a1, a1, -1
+; CHECK32-NEXT:    and a6, a1, a6
+; CHECK32-NEXT:    sub a1, a6, a4
+; CHECK32-NEXT:    sltu a7, a6, a1
+; CHECK32-NEXT:    addi a7, a7, -1
+; CHECK32-NEXT:    and a7, a7, a1
+; CHECK32-NEXT:    srli a1, a3, 1
+; CHECK32-NEXT:    srli a3, a3, 2
+; CHECK32-NEXT:    csrr t0, vlenb
+; CHECK32-NEXT:    slli t0, t0, 1
+; CHECK32-NEXT:    mv t1, t0
+; CHECK32-NEXT:    slli t0, t0, 2
+; CHECK32-NEXT:    add t1, t1, t0
+; CHECK32-NEXT:    slli t0, t0, 1
+; CHECK32-NEXT:    add t0, t0, t1
+; CHECK32-NEXT:    add t0, sp, t0
+; CHECK32-NEXT:    addi t0, t0, 16
+; CHECK32-NEXT:    vl1r.v v8, (t0) # vscale x 8-byte Folded Reload
+; CHECK32-NEXT:    vslidedown.vx v16, v8, a1
+; CHECK32-NEXT:    vl8re16.v v8, (a0)
+; CHECK32-NEXT:    csrr a0, vlenb
+; CHECK32-NEXT:    slli a0, a0, 1
+; CHECK32-NEXT:    mv t0, a0
+; CHECK32-NEXT:    slli a0, a0, 2
+; CHECK32-NEXT:    add a0, a0, t0
+; CHECK32-NEXT:    add a0, sp, a0
+; CHECK32-NEXT:    addi a0, a0, 16
+; CHECK32-NEXT:    vs8r.v v8, (a0) # vscale x 64-byte Folded Spill
+; CHECK32-NEXT:    csrr a0, vlenb
+; CHECK32-NEXT:    add a0, sp, a0
+; CHECK32-NEXT:    addi a0, a0, 16
+; CHECK32-NEXT:    vs1r.v v16, (a0) # vscale x 8-byte Folded Spill
+; CHECK32-NEXT:    vsetvli a0, zero, e8, mf2, ta, ma
+; CHECK32-NEXT:    vslidedown.vx v8, v16, a3
+; CHECK32-NEXT:    addi a0, sp, 16
+; CHECK32-NEXT:    vs1r.v v8, (a0) # vscale x 8-byte Folded Spill
+; CHECK32-NEXT:    vsetvli zero, a7, e16, m4, ta, ma
+; CHECK32-NEXT:    vfwcvtbf16.f.f.v v16, v28
+; CHECK32-NEXT:    csrr a0, vlenb
+; CHECK32-NEXT:    slli a0, a0, 1
+; CHECK32-NEXT:    add a0, sp, a0
+; CHECK32-NEXT:    addi a0, a0, 16
+; CHECK32-NEXT:    vs8r.v v0, (a0) # vscale x 64-byte Folded Spill
+; CHECK32-NEXT:    vfwcvtbf16.f.f.v v8, v4
+; CHECK32-NEXT:    bltu a6, a4, .LBB85_2
+; CHECK32-NEXT:  # %bb.1:
+; CHECK32-NEXT:    mv a6, a4
+; CHECK32-NEXT:  .LBB85_2:
+; CHECK32-NEXT:    addi a0, sp, 16
+; CHECK32-NEXT:    vl1r.v v0, (a0) # vscale x 8-byte Folded Reload
+; CHECK32-NEXT:    vsetvli zero, zero, e32, m8, ta, ma
+; CHECK32-NEXT:    vmfeq.vv v5, v8, v16, v0.t
+; CHECK32-NEXT:    vsetvli zero, a6, e16, m4, ta, ma
+; CHECK32-NEXT:    vfwcvtbf16.f.f.v v16, v24
+; CHECK32-NEXT:    csrr a0, vlenb
+; CHECK32-NEXT:    slli a0, a0, 1
+; CHECK32-NEXT:    add a0, sp, a0
+; CHECK32-NEXT:    addi a0, a0, 16
+; CHECK32-NEXT:    vl8r.v v8, (a0) # vscale x 64-byte Folded Reload
+; CHECK32-NEXT:    vfwcvtbf16.f.f.v v24, v8
+; CHECK32-NEXT:    csrr a0, vlenb
+; CHECK32-NEXT:    add a0, sp, a0
+; CHECK32-NEXT:    addi a0, a0, 16
+; CHECK32-NEXT:    vl1r.v v0, (a0) # vscale x 8-byte Folded Reload
+; CHECK32-NEXT:    vsetvli zero, zero, e32, m8, ta, ma
+; CHECK32-NEXT:    vmfeq.vv v7, v24, v16, v0.t
+; CHECK32-NEXT:    bltu a2, a5, .LBB85_4
+; CHECK32-NEXT:  # %bb.3:
+; CHECK32-NEXT:    mv a2, a5
+; CHECK32-NEXT:  .LBB85_4:
+; CHECK32-NEXT:    sub a0, a2, a4
+; CHECK32-NEXT:    csrr a5, vlenb
+; CHECK32-NEXT:    slli a5, a5, 1
+; CHECK32-NEXT:    mv a6, a5
+; CHECK32-NEXT:    slli a5, a5, 2
+; CHECK32-NEXT:    add a6, a6, a5
+; CHECK32-NEXT:    slli a5, a5, 1
+; CHECK32-NEXT:    add a5, a5, a6
+; CHECK32-NEXT:    add a5, sp, a5
+; CHECK32-NEXT:    addi a5, a5, 16
+; CHECK32-NEXT:    vl1r.v v8, (a5) # vscale x 8-byte Folded Reload
+; CHECK32-NEXT:    vsetvli a5, zero, e8, mf2, ta, ma
+; CHECK32-NEXT:    vslidedown.vx v0, v8, a3
+; CHECK32-NEXT:    sltu a5, a2, a0
+; CHECK32-NEXT:    addi a5, a5, -1
+; CHECK32-NEXT:    and a0, a5, a0
+; CHECK32-NEXT:    csrr a5, vlenb
+; CHECK32-NEXT:    slli a5, a5, 1
+; CHECK32-NEXT:    mv a6, a5
+; CHECK32-NEXT:    slli a5, a5, 3
+; CHECK32-NEXT:    add a5, a5, a6
+; CHECK32-NEXT:    add a5, sp, a5
+; CHECK32-NEXT:    addi a5, a5, 16
+; CHECK32-NEXT:    vl8r.v v24, (a5) # vscale x 64-byte Folded Reload
+; CHECK32-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
+; CHECK32-NEXT:    vfwcvtbf16.f.f.v v16, v28
+; CHECK32-NEXT:    csrr a0, vlenb
+; CHECK32-NEXT:    slli a0, a0, 1
+; CHECK32-NEXT:    mv a5, a0
+; CHECK32-NEXT:    slli a0, a0, 2
+; CHECK32-NEXT:    add a0, a0, a5
+; CHECK32-NEXT:    add a0, sp, a0
+; CHECK32-NEXT:    addi a0, a0, 16
+; CHECK32-NEXT:    vl8r.v v8, (a0) # vscale x 64-byte Folded Reload
+; CHECK32-NEXT:    vfwcvtbf16.f.f.v v24, v12
+; CHECK32-NEXT:    vsetvli zero, zero, e32, m8, ta, ma
+; CHECK32-NEXT:    vmfeq.vv v9, v16, v24, v0.t
+; CHECK32-NEXT:    vmv1r.v v8, v7
+; CHECK32-NEXT:    vsetvli a0, zero, e8, mf2, ta, ma
+; CHECK32-NEXT:    vslideup.vx v8, v5, a3
+; CHECK32-NEXT:    bltu a2, a4, .LBB85_6
+; CHECK32-NEXT:  # %bb.5:
+; CHECK32-NEXT:    mv a2, a4
+; CHECK32-NEXT:  .LBB85_6:
+; CHECK32-NEXT:    csrr a0, vlenb
+; CHECK32-NEXT:    slli a0, a0, 1
+; CHECK32-NEXT:    mv a4, a0
+; CHECK32-NEXT:    slli a0, a0, 3
+; CHECK32-NEXT:    add a0, a0, a4
+; CHECK32-NEXT:    add a0, sp, a0
+; CHECK32-NEXT:    addi a0, a0, 16
+; CHECK32-NEXT:    vl8r.v v24, (a0) # vscale x 64-byte Folded Reload
+; CHECK32-NEXT:    vsetvli zero, a2, e16, m4, ta, ma
+; CHECK32-NEXT:    vfwcvtbf16.f.f.v v16, v24
+; CHECK32-NEXT:    csrr a0, vlenb
+; CHECK32-NEXT:    slli a0, a0, 1
+; CHECK32-NEXT:    mv a2, a0
+; CHECK32-NEXT:    slli a0, a0, 2
+; CHECK32-NEXT:    add a0, a0, a2
+; CHECK32-NEXT:    add a0, sp, a0
+; CHECK32-NEXT:    addi a0, a0, 16
+; CHECK32-NEXT:    vl8r.v v0, (a0) # vscale x 64-byte Folded Reload
+; CHECK32-NEXT:    vfwcvtbf16.f.f.v v24, v0
+; CHECK32-NEXT:    csrr a0, vlenb
+; CHECK32-NEXT:    slli a0, a0, 1
+; CHECK32-NEXT:    mv a2, a0
+; CHECK32-NEXT:    slli a0, a0, 2
+; CHECK32-NEXT:    add a2, a2, a0
+; CHECK32-NEXT:    slli a0, a0, 1
+; CHECK32-NEXT:    add a0, a0, a2
+; CHECK32-NEXT:    add a0, sp, a0
+; CHECK32-NEXT:    addi a0, a0, 16
+; CHECK32-NEXT:    vl1r.v v0, (a0) # vscale x 8-byte Folded Reload
+; CHECK32-NEXT:    vsetvli zero, zero, e32, m8, ta, ma
+; CHECK32-NEXT:    vmfeq.vv v10, v16, v24, v0.t
+; CHECK32-NEXT:    vsetvli a0, zero, e8, mf2, ta, ma
+; CHECK32-NEXT:    vslideup.vx v10, v9, a3
+; CHECK32-NEXT:    vsetvli a0, zero, e8, m1, ta, ma
+; CHECK32-NEXT:    vslideup.vx v10, v8, a1
+; CHECK32-NEXT:    vmv.v.v v0, v10
+; CHECK32-NEXT:    csrr a0, vlenb
+; CHECK32-NEXT:    mv a1, a0
+; CHECK32-NEXT:    slli a0, a0, 1
+; CHECK32-NEXT:    add a1, a1, a0
+; CHECK32-NEXT:    slli a0, a0, 2
+; CHECK32-NEXT:    add a1, a1, a0
+; CHECK32-NEXT:    slli a0, a0, 1
+; CHECK32-NEXT:    add a0, a0, a1
+; CHECK32-NEXT:    add sp, sp, a0
+; CHECK32-NEXT:    .cfi_def_cfa sp, 16
+; CHECK32-NEXT:    addi sp, sp, 16
+; CHECK32-NEXT:    .cfi_def_cfa_offset 0
+; CHECK32-NEXT:    ret
+;
+; CHECK64-LABEL: fcmp_oeq_vv_nxv64bf16:
+; CHECK64:       # %bb.0:
+; CHECK64-NEXT:    addi sp, sp, -16
+; CHECK64-NEXT:    .cfi_def_cfa_offset 16
+; CHECK64-NEXT:    csrr a1, vlenb
+; CHECK64-NEXT:    mv a3, a1
+; CHECK64-NEXT:    slli a1, a1, 1
+; CHECK64-NEXT:    add a3, a3, a1
+; CHECK64-NEXT:    slli a1, a1, 2
+; CHECK64-NEXT:    add a3, a3, a1
+; CHECK64-NEXT:    slli a1, a1, 1
+; CHECK64-NEXT:    add a1, a1, a3
+; CHECK64-NEXT:    sub sp, sp, a1
+; CHECK64-NEXT:    .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x1b, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 27 * vlenb
+; CHECK64-NEXT:    csrr a1, vlenb
+; CHECK64-NEXT:    slli a1, a1, 1
+; CHECK64-NEXT:    mv a3, a1
+; CHECK64-NEXT:    slli a1, a1, 3
+; CHECK64-NEXT:    add a1, a1, a3
+; CHECK64-NEXT:    add a1, sp, a1
+; CHECK64-NEXT:    addi a1, a1, 16
+; CHECK64-NEXT:    vs1r.v v0, (a1) # vscale x 8-byte Folded Spill
+; CHECK64-NEXT:    vsetvli a1, zero, e8, m1, ta, ma
+; CHECK64-NEXT:    vmv8r.v v0, v16
+; CHECK64-NEXT:    csrr a1, vlenb
+; CHECK64-NEXT:    mv a3, a1
+; CHECK64-NEXT:    slli a1, a1, 1
+; CHECK64-NEXT:    add a3, a3, a1
+; CHECK64-NEXT:    slli a1, a1, 3
+; CHECK64-NEXT:    add a1, a1, a3
+; CHECK64-NEXT:    add a1, sp, a1
+; CHECK64-NEXT:    addi a1, a1, 16
+; CHECK64-NEXT:    vs8r.v v8, (a1) # vscale x 64-byte Folded Spill
+; CHECK64-NEXT:    csrr a3, vlenb
+; CHECK64-NEXT:    slli a1, a3, 3
+; CHECK64-NEXT:    slli a5, a3, 2
+; CHECK64-NEXT:    slli a4, a3, 1
+; CHECK64-NEXT:    add a1, a0, a1
+; CHECK64-NEXT:    sub a6, a2, a5
+; CHECK64-NEXT:    vl8re16.v v24, (a1)
+; CHECK64-NEXT:    sltu a1, a2, a6
+; CHECK64-NEXT:    addi a1, a1, -1
+; CHECK64-NEXT:    and a6, a1, a6
+; CHECK64-NEXT:    sub a1, a6, a4
+; CHECK64-NEXT:    sltu a7, a6, a1
+; CHECK64-NEXT:    addi a7, a7, -1
+; CHECK64-NEXT:    and a7, a7, a1
+; CHECK64-NEXT:    srli a1, a3, 1
+; CHECK64-NEXT:    srli a3, a3, 2
+; CHECK64-NEXT:    csrr t0, vlenb
+; CHECK64-NEXT:    slli t0, t0, 1
+; CHECK64-NEXT:    mv t1, t0
+; CHECK64-NEXT:    slli t0, t0, 3
+; CHECK64-NEXT:    add t0, t0, t1
+; CHECK64-NEXT:    add t0, sp, t0
+; CHECK64-NEXT:    addi t0, t0, 16
+; CHECK64-NEXT:    vl1r.v v8, (t0) # vscale x 8-byte Folded Reload
+; CHECK64-NEXT:    vslidedown.vx v16, v8, a1
+; CHECK64-NEXT:    vl8re16.v v8, (a0)
+; CHECK64-NEXT:    csrr a0, vlenb
+; CHECK64-NEXT:    slli a0, a0, 1
+; CHECK64-NEXT:    mv t0, a0
+; CHECK64-NEXT:    slli a0, a0, 2
+; CHECK64-NEXT:    add a0, a0, t0
+; CHECK64-NEXT:    add a0, sp, a0
+; CHECK64-NEXT:    addi a0, a0, 16
+; CHECK64-NEXT:    vs8r.v v8, (a0) # vscale x 64-byte Folded Spill
+; CHECK64-NEXT:    csrr a0, vlenb
+; CHECK64-NEXT:    add a0, sp, a0
+; CHECK64-NEXT:    addi a0, a0, 16
+; CHECK64-NEXT:    vs1r.v v16, (a0) # vscale x 8-byte Folded Spill
+; CHECK64-NEXT:    vsetvli a0, zero, e8, mf2, ta, ma
+; CHECK64-NEXT:    vslidedown.vx v8, v16, a3
+; CHECK64-NEXT:    addi a0, sp, 16
+; CHECK64-NEXT:    vs1r.v v8, (a0) # vscale x 8-byte Folded Spill
+; CHECK64-NEXT:    vsetvli zero, a7, e16, m4, ta, ma
+; CHECK64-NEXT:    vfwcvtbf16.f.f.v v16, v28
+; CHECK64-NEXT:    csrr a0, vlenb
+; CHECK64-NEXT:    slli a0, a0, 1
+; CHECK64-NEXT:    add a0, sp, a0
+; CHECK64-NEXT:    addi a0, a0, 16
+; CHECK64-NEXT:    vs8r.v v0, (a0) # vscale x 64-byte Folded Spill
+; CHECK64-NEXT:    vfwcvtbf16.f.f.v v8, v4
+; CHECK64-NEXT:    bltu a6, a4, .LBB85_2
+; CHECK64-NEXT:  # %bb.1:
+; CHECK64-NEXT:    mv a6, a4
+; CHECK64-NEXT:  .LBB85_2:
+; CHECK64-NEXT:    addi a0, sp, 16
+; CHECK64-NEXT:    vl1r.v v0, (a0) # vscale x 8-byte Folded Reload
+; CHECK64-NEXT:    vsetvli zero, zero, e32, m8, ta, ma
+; CHECK64-NEXT:    vmfeq.vv v5, v8, v16, v0.t
+; CHECK64-NEXT:    vsetvli zero, a6, e16, m4, ta, ma
+; CHECK64-NEXT:    vfwcvtbf16.f.f.v v16, v24
+; CHECK64-NEXT:    csrr a0, vlenb
+; CHECK64-NEXT:    slli a0, a0, 1
+; CHECK64-NEXT:    add a0, sp, a0
+; CHECK64-NEXT:    addi a0, a0, 16
+; CHECK64-NEXT:    vl8r.v v8, (a0) # vscale x 64-byte Folded Reload
+; CHECK64-NEXT:    vfwcvtbf16.f.f.v v24, v8
+; CHECK64-NEXT:    csrr a0, vlenb
+; CHECK64-NEXT:    add a0, sp, a0
+; CHECK64-NEXT:    addi a0, a0, 16
+; CHECK64-NEXT:    vl1r.v v0, (a0) # vscale x 8-byte Folded Reload
+; CHECK64-NEXT:    vsetvli zero, zero, e32, m8, ta, ma
+; CHECK64-NEXT:    vmfeq.vv v6, v24, v16, v0.t
+; CHECK64-NEXT:    bltu a2, a5, .LBB85_4
+; CHECK64-NEXT:  # %bb.3:
+; CHECK64-NEXT:    mv a2, a5
+; CHECK64-NEXT:  .LBB85_4:
+; CHECK64-NEXT:    sub a0, a2, a4
+; CHECK64-NEXT:    csrr a5, vlenb
+; CHECK64-NEXT:    slli a5, a5, 1
+; CHECK64-NEXT:    mv a6, a5
+; CHECK64-NEXT:    slli a5, a5, 3
+; CHECK64-NEXT:    add a5, a5, a6
+; CHECK64-NEXT:    add a5, sp, a5
+; CHECK64-NEXT:    addi a5, a5, 16
+; CHECK64-NEXT:    vl1r.v v7, (a5) # vscale x 8-byte Folded Reload
+; CHECK64-NEXT:    vsetvli a5, zero, e8, mf2, ta, ma
+; CHECK64-NEXT:    vslidedown.vx v0, v7, a3
+; CHECK64-NEXT:    sltu a5, a2, a0
+; CHECK64-NEXT:    addi a5, a5, -1
+; CHECK64-NEXT:    and a0, a5, a0
+; CHECK64-NEXT:    csrr a5, vlenb
+; CHECK64-NEXT:    mv a6, a5
+; CHECK64-NEXT:    slli a5, a5, 1
+; CHECK64-NEXT:    add a6, a6, a5
+; CHECK64-NEXT:    slli a5, a5, 3
+; CHECK64-NEXT:    add a5, a5, a6
+; CHECK64-NEXT:    add a5, sp, a5
+; CHECK64-NEXT:    addi a5, a5, 16
+; CHECK64-NEXT:    vl8r.v v24, (a5) # vscale x 64-byte Folded Reload
+; CHECK64-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
+; CHECK64-NEXT:    vfwcvtbf16.f.f.v v16, v28
+; CHECK64-NEXT:    csrr a0, vlenb
+; CHECK64-NEXT:    slli a0, a0, 1
+; CHECK64-NEXT:    mv a5, a0
+; CHECK64-NEXT:    slli a0, a0, 2
+; CHECK64-NEXT:    add a0, a0, a5
+; CHECK64-NEXT:    add a0, sp, a0
+; CHECK64-NEXT:    addi a0, a0, 16
+; CHECK64-NEXT:    vl8r.v v8, (a0) # vscale x 64-byte Folded Reload
+; CHECK64-NEXT:    vfwcvtbf16.f.f.v v24, v12
+; CHECK64-NEXT:    vsetvli zero, zero, e32, m8, ta, ma
+; CHECK64-NEXT:    vmfeq.vv v4, v16, v24, v0.t
+; CHECK64-NEXT:    vsetvli a0, zero, e8, mf2, ta, ma
+; CHECK64-NEXT:    vslideup.vx v6, v5, a3
+; CHECK64-NEXT:    bltu a2, a4, .LBB85_6
+; CHECK64-NEXT:  # %bb.5:
+; CHECK64-NEXT:    mv a2, a4
+; CHECK64-NEXT:  .LBB85_6:
+; CHECK64-NEXT:    csrr a0, vlenb
+; CHECK64-NEXT:    mv a4, a0
+; CHECK64-NEXT:    slli a0, a0, 1
+; CHECK64-NEXT:    add a4, a4, a0
+; CHECK64-NEXT:    slli a0, a0, 3
+; CHECK64-NEXT:    add a0, a0, a4
+; CHECK64-NEXT:    add a0, sp, a0
+; CHECK64-NEXT:    addi a0, a0, 16
+; CHECK64-NEXT:    vl8r.v v24, (a0) # vscale x 64-byte Folded Reload
+; CHECK64-NEXT:    vsetvli zero, a2, e16, m4, ta, ma
+; CHECK64-NEXT:    vfwcvtbf16.f.f.v v16, v24
+; CHECK64-NEXT:    csrr a0, vlenb
+; CHECK64-NEXT:    slli a0, a0, 1
+; CHECK64-NEXT:    mv a2, a0
+; CHECK64-NEXT:    slli a0, a0, 2
+; CHECK64-NEXT:    add a0, a0, a2
+; CHECK64-NEXT:    add a0, sp, a0
+; CHECK64-NEXT:    addi a0, a0, 16
+; CHECK64-NEXT:    vl8r.v v8, (a0) # vscale x 64-byte Folded Reload
+; CHECK64-NEXT:    vfwcvtbf16.f.f.v v24, v8
+; CHECK64-NEXT:    vmv1r.v v0, v7
+; CHECK64-NEXT:    vsetvli zero, zero, e32, m8, ta, ma
+; CHECK64-NEXT:    vmfeq.vv v10, v16, v24, v0.t
+; CHECK64-NEXT:    vsetvli a0, zero, e8, mf2, ta, ma
+; CHECK64-NEXT:    vslideup.vx v10, v4, a3
+; CHECK64-NEXT:    vsetvli a0, zero, e8, m1, ta, ma
+; CHECK64-NEXT:    vslideup.vx v10, v6, a1
+; CHECK64-NEXT:    vmv.v.v v0, v10
+; CHECK64-NEXT:    csrr a0, vlenb
+; CHECK64-NEXT:    mv a1, a0
+; CHECK64-NEXT:    slli a0, a0, 1
+; CHECK64-NEXT:    add a1, a1, a0
+; CHECK64-NEXT:    slli a0, a0, 2
+; CHECK64-NEXT:    add a1, a1, a0
+; CHECK64-NEXT:    slli a0, a0, 1
+; CHECK64-NEXT:    add a0, a0, a1
+; CHECK64-NEXT:    add sp, sp, a0
+; CHECK64-NEXT:    .cfi_def_cfa sp, 16
+; CHECK64-NEXT:    addi sp, sp, 16
+; CHECK64-NEXT:    .cfi_def_cfa_offset 0
+; CHECK64-NEXT:    ret
   %v = call <vscale x 64 x i1> @llvm.vp.fcmp.nxv64bf16(<vscale x 64 x bfloat> %va, <vscale x 64 x bfloat> %vb, metadata !"oeq", <vscale x 64 x i1> %m, i32 %evl)
   ret <vscale x 64 x i1> %v
 }
@@ -3527,211 +3720,404 @@ define <vscale x 64 x i1> @fcmp_oeq_vv_nxv64f16(<vscale x 64 x half> %va, <vscal
 ; ZVFH-NEXT:    .cfi_def_cfa_offset 0
 ; ZVFH-NEXT:    ret
 ;
-; ZVFHMIN-LABEL: fcmp_oeq_vv_nxv64f16:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    addi sp, sp, -16
-; ZVFHMIN-NEXT:    .cfi_def_cfa_offset 16
-; ZVFHMIN-NEXT:    csrr a1, vlenb
-; ZVFHMIN-NEXT:    mv a3, a1
-; ZVFHMIN-NEXT:    slli a1, a1, 1
-; ZVFHMIN-NEXT:    add a3, a3, a1
-; ZVFHMIN-NEXT:    slli a1, a1, 2
-; ZVFHMIN-NEXT:    add a3, a3, a1
-; ZVFHMIN-NEXT:    slli a1, a1, 1
-; ZVFHMIN-NEXT:    add a1, a1, a3
-; ZVFHMIN-NEXT:    sub sp, sp, a1
-; ZVFHMIN-NEXT:    .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x1b, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 27 * vlenb
-; ZVFHMIN-NEXT:    csrr a1, vlenb
-; ZVFHMIN-NEXT:    slli a1, a1, 1
-; ZVFHMIN-NEXT:    mv a3, a1
-; ZVFHMIN-NEXT:    slli a1, a1, 2
-; ZVFHMIN-NEXT:    add a3, a3, a1
-; ZVFHMIN-NEXT:    slli a1, a1, 1
-; ZVFHMIN-NEXT:    add a1, a1, a3
-; ZVFHMIN-NEXT:    add a1, sp, a1
-; ZVFHMIN-NEXT:    addi a1, a1, 16
-; ZVFHMIN-NEXT:    vs1r.v v0, (a1) # vscale x 8-byte Folded Spill
-; ZVFHMIN-NEXT:    vsetvli a1, zero, e8, m1, ta, ma
-; ZVFHMIN-NEXT:    vmv8r.v v0, v16
-; ZVFHMIN-NEXT:    csrr a1, vlenb
-; ZVFHMIN-NEXT:    slli a1, a1, 1
-; ZVFHMIN-NEXT:    mv a3, a1
-; ZVFHMIN-NEXT:    slli a1, a1, 3
-; ZVFHMIN-NEXT:    add a1, a1, a3
-; ZVFHMIN-NEXT:    add a1, sp, a1
-; ZVFHMIN-NEXT:    addi a1, a1, 16
-; ZVFHMIN-NEXT:    vs8r.v v8, (a1) # vscale x 64-byte Folded Spill
-; ZVFHMIN-NEXT:    csrr a3, vlenb
-; ZVFHMIN-NEXT:    slli a1, a3, 3
-; ZVFHMIN-NEXT:    slli a5, a3, 2
-; ZVFHMIN-NEXT:    slli a4, a3, 1
-; ZVFHMIN-NEXT:    add a1, a0, a1
-; ZVFHMIN-NEXT:    sub a6, a2, a5
-; ZVFHMIN-NEXT:    vl8re16.v v24, (a1)
-; ZVFHMIN-NEXT:    sltu a1, a2, a6
-; ZVFHMIN-NEXT:    addi a1, a1, -1
-; ZVFHMIN-NEXT:    and a6, a1, a6
-; ZVFHMIN-NEXT:    sub a1, a6, a4
-; ZVFHMIN-NEXT:    sltu a7, a6, a1
-; ZVFHMIN-NEXT:    addi a7, a7, -1
-; ZVFHMIN-NEXT:    and a7, a7, a1
-; ZVFHMIN-NEXT:    srli a1, a3, 1
-; ZVFHMIN-NEXT:    srli a3, a3, 2
-; ZVFHMIN-NEXT:    csrr t0, vlenb
-; ZVFHMIN-NEXT:    slli t0, t0, 1
-; ZVFHMIN-NEXT:    mv t1, t0
-; ZVFHMIN-NEXT:    slli t0, t0, 2
-; ZVFHMIN-NEXT:    add t1, t1, t0
-; ZVFHMIN-NEXT:    slli t0, t0, 1
-; ZVFHMIN-NEXT:    add t0, t0, t1
-; ZVFHMIN-NEXT:    add t0, sp, t0
-; ZVFHMIN-NEXT:    addi t0, t0, 16
-; ZVFHMIN-NEXT:    vl1r.v v8, (t0) # vscale x 8-byte Folded Reload
-; ZVFHMIN-NEXT:    vslidedown.vx v16, v8, a1
-; ZVFHMIN-NEXT:    vl8re16.v v8, (a0)
-; ZVFHMIN-NEXT:    csrr a0, vlenb
-; ZVFHMIN-NEXT:    slli a0, a0, 1
-; ZVFHMIN-NEXT:    mv t0, a0
-; ZVFHMIN-NEXT:    slli a0, a0, 2
-; ZVFHMIN-NEXT:    add a0, a0, t0
-; ZVFHMIN-NEXT:    add a0, sp, a0
-; ZVFHMIN-NEXT:    addi a0, a0, 16
-; ZVFHMIN-NEXT:    vs8r.v v8, (a0) # vscale x 64-byte Folded Spill
-; ZVFHMIN-NEXT:    csrr a0, vlenb
-; ZVFHMIN-NEXT:    add a0, sp, a0
-; ZVFHMIN-NEXT:    addi a0, a0, 16
-; ZVFHMIN-NEXT:    vs1r.v v16, (a0) # vscale x 8-byte Folded Spill
-; ZVFHMIN-NEXT:    vsetvli a0, zero, e8, mf2, ta, ma
-; ZVFHMIN-NEXT:    vslidedown.vx v8, v16, a3
-; ZVFHMIN-NEXT:    addi a0, sp, 16
-; ZVFHMIN-NEXT:    vs1r.v v8, (a0) # vscale x 8-byte Folded Spill
-; ZVFHMIN-NEXT:    vsetvli zero, a7, e16, m4, ta, ma
-; ZVFHMIN-NEXT:    vfwcvt.f.f.v v16, v28
-; ZVFHMIN-NEXT:    csrr a0, vlenb
-; ZVFHMIN-NEXT:    slli a0, a0, 1
-; ZVFHMIN-NEXT:    add a0, sp, a0
-; ZVFHMIN-NEXT:    addi a0, a0, 16
-; ZVFHMIN-NEXT:    vs8r.v v0, (a0) # vscale x 64-byte Folded Spill
-; ZVFHMIN-NEXT:    vfwcvt.f.f.v v8, v4
-; ZVFHMIN-NEXT:    bltu a6, a4, .LBB171_2
-; ZVFHMIN-NEXT:  # %bb.1:
-; ZVFHMIN-NEXT:    mv a6, a4
-; ZVFHMIN-NEXT:  .LBB171_2:
-; ZVFHMIN-NEXT:    addi a0, sp, 16
-; ZVFHMIN-NEXT:    vl1r.v v0, (a0) # vscale x 8-byte Folded Reload
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m8, ta, ma
-; ZVFHMIN-NEXT:    vmfeq.vv v5, v8, v16, v0.t
-; ZVFHMIN-NEXT:    vsetvli zero, a6, e16, m4, ta, ma
-; ZVFHMIN-NEXT:    vfwcvt.f.f.v v16, v24
-; ZVFHMIN-NEXT:    csrr a0, vlenb
-; ZVFHMIN-NEXT:    slli a0, a0, 1
-; ZVFHMIN-NEXT:    add a0, sp, a0
-; ZVFHMIN-NEXT:    addi a0, a0, 16
-; ZVFHMIN-NEXT:    vl8r.v v8, (a0) # vscale x 64-byte Folded Reload
-; ZVFHMIN-NEXT:    vfwcvt.f.f.v v24, v8
-; ZVFHMIN-NEXT:    csrr a0, vlenb
-; ZVFHMIN-NEXT:    add a0, sp, a0
-; ZVFHMIN-NEXT:    addi a0, a0, 16
-; ZVFHMIN-NEXT:    vl1r.v v0, (a0) # vscale x 8-byte Folded Reload
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m8, ta, ma
-; ZVFHMIN-NEXT:    vmfeq.vv v7, v24, v16, v0.t
-; ZVFHMIN-NEXT:    bltu a2, a5, .LBB171_4
-; ZVFHMIN-NEXT:  # %bb.3:
-; ZVFHMIN-NEXT:    mv a2, a5
-; ZVFHMIN-NEXT:  .LBB171_4:
-; ZVFHMIN-NEXT:    sub a0, a2, a4
-; ZVFHMIN-NEXT:    csrr a5, vlenb
-; ZVFHMIN-NEXT:    slli a5, a5, 1
-; ZVFHMIN-NEXT:    mv a6, a5
-; ZVFHMIN-NEXT:    slli a5, a5, 2
-; ZVFHMIN-NEXT:    add a6, a6, a5
-; ZVFHMIN-NEXT:    slli a5, a5, 1
-; ZVFHMIN-NEXT:    add a5, a5, a6
-; ZVFHMIN-NEXT:    add a5, sp, a5
-; ZVFHMIN-NEXT:    addi a5, a5, 16
-; ZVFHMIN-NEXT:    vl1r.v v8, (a5) # vscale x 8-byte Folded Reload
-; ZVFHMIN-NEXT:    vsetvli a5, zero, e8, mf2, ta, ma
-; ZVFHMIN-NEXT:    vslidedown.vx v0, v8, a3
-; ZVFHMIN-NEXT:    sltu a5, a2, a0
-; ZVFHMIN-NEXT:    addi a5, a5, -1
-; ZVFHMIN-NEXT:    and a0, a5, a0
-; ZVFHMIN-NEXT:    csrr a5, vlenb
-; ZVFHMIN-NEXT:    slli a5, a5, 1
-; ZVFHMIN-NEXT:    mv a6, a5
-; ZVFHMIN-NEXT:    slli a5, a5, 3
-; ZVFHMIN-NEXT:    add a5, a5, a6
-; ZVFHMIN-NEXT:    add a5, sp, a5
-; ZVFHMIN-NEXT:    addi a5, a5, 16
-; ZVFHMIN-NEXT:    vl8r.v v24, (a5) # vscale x 64-byte Folded Reload
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
-; ZVFHMIN-NEXT:    vfwcvt.f.f.v v16, v28
-; ZVFHMIN-NEXT:    csrr a0, vlenb
-; ZVFHMIN-NEXT:    slli a0, a0, 1
-; ZVFHMIN-NEXT:    mv a5, a0
-; ZVFHMIN-NEXT:    slli a0, a0, 2
-; ZVFHMIN-NEXT:    add a0, a0, a5
-; ZVFHMIN-NEXT:    add a0, sp, a0
-; ZVFHMIN-NEXT:    addi a0, a0, 16
-; ZVFHMIN-NEXT:    vl8r.v v8, (a0) # vscale x 64-byte Folded Reload
-; ZVFHMIN-NEXT:    vfwcvt.f.f.v v24, v12
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m8, ta, ma
-; ZVFHMIN-NEXT:    vmfeq.vv v9, v16, v24, v0.t
-; ZVFHMIN-NEXT:    vmv1r.v v8, v7
-; ZVFHMIN-NEXT:    vsetvli a0, zero, e8, mf2, ta, ma
-; ZVFHMIN-NEXT:    vslideup.vx v8, v5, a3
-; ZVFHMIN-NEXT:    bltu a2, a4, .LBB171_6
-; ZVFHMIN-NEXT:  # %bb.5:
-; ZVFHMIN-NEXT:    mv a2, a4
-; ZVFHMIN-NEXT:  .LBB171_6:
-; ZVFHMIN-NEXT:    csrr a0, vlenb
-; ZVFHMIN-NEXT:    slli a0, a0, 1
-; ZVFHMIN-NEXT:    mv a4, a0
-; ZVFHMIN-NEXT:    slli a0, a0, 3
-; ZVFHMIN-NEXT:    add a0, a0, a4
-; ZVFHMIN-NEXT:    add a0, sp, a0
-; ZVFHMIN-NEXT:    addi a0, a0, 16
-; ZVFHMIN-NEXT:    vl8r.v v24, (a0) # vscale x 64-byte Folded Reload
-; ZVFHMIN-NEXT:    vsetvli zero, a2, e16, m4, ta, ma
-; ZVFHMIN-NEXT:    vfwcvt.f.f.v v16, v24
-; ZVFHMIN-NEXT:    csrr a0, vlenb
-; ZVFHMIN-NEXT:    slli a0, a0, 1
-; ZVFHMIN-NEXT:    mv a2, a0
-; ZVFHMIN-NEXT:    slli a0, a0, 2
-; ZVFHMIN-NEXT:    add a0, a0, a2
-; ZVFHMIN-NEXT:    add a0, sp, a0
-; ZVFHMIN-NEXT:    addi a0, a0, 16
-; ZVFHMIN-NEXT:    vl8r.v v0, (a0) # vscale x 64-byte Folded Reload
-; ZVFHMIN-NEXT:    vfwcvt.f.f.v v24, v0
-; ZVFHMIN-NEXT:    csrr a0, vlenb
-; ZVFHMIN-NEXT:    slli a0, a0, 1
-; ZVFHMIN-NEXT:    mv a2, a0
-; ZVFHMIN-NEXT:    slli a0, a0, 2
-; ZVFHMIN-NEXT:    add a2, a2, a0
-; ZVFHMIN-NEXT:    slli a0, a0, 1
-; ZVFHMIN-NEXT:    add a0, a0, a2
-; ZVFHMIN-NEXT:    add a0, sp, a0
-; ZVFHMIN-NEXT:    addi a0, a0, 16
-; ZVFHMIN-NEXT:    vl1r.v v0, (a0) # vscale x 8-byte Folded Reload
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m8, ta, ma
-; ZVFHMIN-NEXT:    vmfeq.vv v10, v16, v24, v0.t
-; ZVFHMIN-NEXT:    vsetvli a0, zero, e8, mf2, ta, ma
-; ZVFHMIN-NEXT:    vslideup.vx v10, v9, a3
-; ZVFHMIN-NEXT:    vsetvli a0, zero, e8, m1, ta, ma
-; ZVFHMIN-NEXT:    vslideup.vx v10, v8, a1
-; ZVFHMIN-NEXT:    vmv.v.v v0, v10
-; ZVFHMIN-NEXT:    csrr a0, vlenb
-; ZVFHMIN-NEXT:    mv a1, a0
-; ZVFHMIN-NEXT:    slli a0, a0, 1
-; ZVFHMIN-NEXT:    add a1, a1, a0
-; ZVFHMIN-NEXT:    slli a0, a0, 2
-; ZVFHMIN-NEXT:    add a1, a1, a0
-; ZVFHMIN-NEXT:    slli a0, a0, 1
-; ZVFHMIN-NEXT:    add a0, a0, a1
-; ZVFHMIN-NEXT:    add sp, sp, a0
-; ZVFHMIN-NEXT:    .cfi_def_cfa sp, 16
-; ZVFHMIN-NEXT:    addi sp, sp, 16
-; ZVFHMIN-NEXT:    .cfi_def_cfa_offset 0
-; ZVFHMIN-NEXT:    ret
+; ZVFHMIN32-LABEL: fcmp_oeq_vv_nxv64f16:
+; ZVFHMIN32:       # %bb.0:
+; ZVFHMIN32-NEXT:    addi sp, sp, -16
+; ZVFHMIN32-NEXT:    .cfi_def_cfa_offset 16
+; ZVFHMIN32-NEXT:    csrr a1, vlenb
+; ZVFHMIN32-NEXT:    mv a3, a1
+; ZVFHMIN32-NEXT:    slli a1, a1, 1
+; ZVFHMIN32-NEXT:    add a3, a3, a1
+; ZVFHMIN32-NEXT:    slli a1, a1, 2
+; ZVFHMIN32-NEXT:    add a3, a3, a1
+; ZVFHMIN32-NEXT:    slli a1, a1, 1
+; ZVFHMIN32-NEXT:    add a1, a1, a3
+; ZVFHMIN32-NEXT:    sub sp, sp, a1
+; ZVFHMIN32-NEXT:    .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x1b, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 27 * vlenb
+; ZVFHMIN32-NEXT:    csrr a1, vlenb
+; ZVFHMIN32-NEXT:    slli a1, a1, 1
+; ZVFHMIN32-NEXT:    mv a3, a1
+; ZVFHMIN32-NEXT:    slli a1, a1, 2
+; ZVFHMIN32-NEXT:    add a3, a3, a1
+; ZVFHMIN32-NEXT:    slli a1, a1, 1
+; ZVFHMIN32-NEXT:    add a1, a1, a3
+; ZVFHMIN32-NEXT:    add a1, sp, a1
+; ZVFHMIN32-NEXT:    addi a1, a1, 16
+; ZVFHMIN32-NEXT:    vs1r.v v0, (a1) # vscale x 8-byte Folded Spill
+; ZVFHMIN32-NEXT:    vsetvli a1, zero, e8, m1, ta, ma
+; ZVFHMIN32-NEXT:    vmv8r.v v0, v16
+; ZVFHMIN32-NEXT:    csrr a1, vlenb
+; ZVFHMIN32-NEXT:    slli a1, a1, 1
+; ZVFHMIN32-NEXT:    mv a3, a1
+; ZVFHMIN32-NEXT:    slli a1, a1, 3
+; ZVFHMIN32-NEXT:    add a1, a1, a3
+; ZVFHMIN32-NEXT:    add a1, sp, a1
+; ZVFHMIN32-NEXT:    addi a1, a1, 16
+; ZVFHMIN32-NEXT:    vs8r.v v8, (a1) # vscale x 64-byte Folded Spill
+; ZVFHMIN32-NEXT:    csrr a3, vlenb
+; ZVFHMIN32-NEXT:    slli a1, a3, 3
+; ZVFHMIN32-NEXT:    slli a5, a3, 2
+; ZVFHMIN32-NEXT:    slli a4, a3, 1
+; ZVFHMIN32-NEXT:    add a1, a0, a1
+; ZVFHMIN32-NEXT:    sub a6, a2, a5
+; ZVFHMIN32-NEXT:    vl8re16.v v24, (a1)
+; ZVFHMIN32-NEXT:    sltu a1, a2, a6
+; ZVFHMIN32-NEXT:    addi a1, a1, -1
+; ZVFHMIN32-NEXT:    and a6, a1, a6
+; ZVFHMIN32-NEXT:    sub a1, a6, a4
+; ZVFHMIN32-NEXT:    sltu a7, a6, a1
+; ZVFHMIN32-NEXT:    addi a7, a7, -1
+; ZVFHMIN32-NEXT:    and a7, a7, a1
+; ZVFHMIN32-NEXT:    srli a1, a3, 1
+; ZVFHMIN32-NEXT:    srli a3, a3, 2
+; ZVFHMIN32-NEXT:    csrr t0, vlenb
+; ZVFHMIN32-NEXT:    slli t0, t0, 1
+; ZVFHMIN32-NEXT:    mv t1, t0
+; ZVFHMIN32-NEXT:    slli t0, t0, 2
+; ZVFHMIN32-NEXT:    add t1, t1, t0
+; ZVFHMIN32-NEXT:    slli t0, t0, 1
+; ZVFHMIN32-NEXT:    add t0, t0, t1
+; ZVFHMIN32-NEXT:    add t0, sp, t0
+; ZVFHMIN32-NEXT:    addi t0, t0, 16
+; ZVFHMIN32-NEXT:    vl1r.v v8, (t0) # vscale x 8-byte Folded Reload
+; ZVFHMIN32-NEXT:    vslidedown.vx v16, v8, a1
+; ZVFHMIN32-NEXT:    vl8re16.v v8, (a0)
+; ZVFHMIN32-NEXT:    csrr a0, vlenb
+; ZVFHMIN32-NEXT:    slli a0, a0, 1
+; ZVFHMIN32-NEXT:    mv t0, a0
+; ZVFHMIN32-NEXT:    slli a0, a0, 2
+; ZVFHMIN32-NEXT:    add a0, a0, t0
+; ZVFHMIN32-NEXT:    add a0, sp, a0
+; ZVFHMIN32-NEXT:    addi a0, a0, 16
+; ZVFHMIN32-NEXT:    vs8r.v v8, (a0) # vscale x 64-byte Folded Spill
+; ZVFHMIN32-NEXT:    csrr a0, vlenb
+; ZVFHMIN32-NEXT:    add a0, sp, a0
+; ZVFHMIN32-NEXT:    addi a0, a0, 16
+; ZVFHMIN32-NEXT:    vs1r.v v16, (a0) # vscale x 8-byte Folded Spill
+; ZVFHMIN32-NEXT:    vsetvli a0, zero, e8, mf2, ta, ma
+; ZVFHMIN32-NEXT:    vslidedown.vx v8, v16, a3
+; ZVFHMIN32-NEXT:    addi a0, sp, 16
+; ZVFHMIN32-NEXT:    vs1r.v v8, (a0) # vscale x 8-byte Folded Spill
+; ZVFHMIN32-NEXT:    vsetvli zero, a7, e16, m4, ta, ma
+; ZVFHMIN32-NEXT:    vfwcvt.f.f.v v16, v28
+; ZVFHMIN32-NEXT:    csrr a0, vlenb
+; ZVFHMIN32-NEXT:    slli a0, a0, 1
+; ZVFHMIN32-NEXT:    add a0, sp, a0
+; ZVFHMIN32-NEXT:    addi a0, a0, 16
+; ZVFHMIN32-NEXT:    vs8r.v v0, (a0) # vscale x 64-byte Folded Spill
+; ZVFHMIN32-NEXT:    vfwcvt.f.f.v v8, v4
+; ZVFHMIN32-NEXT:    bltu a6, a4, .LBB171_2
+; ZVFHMIN32-NEXT:  # %bb.1:
+; ZVFHMIN32-NEXT:    mv a6, a4
+; ZVFHMIN32-NEXT:  .LBB171_2:
+; ZVFHMIN32-NEXT:    addi a0, sp, 16
+; ZVFHMIN32-NEXT:    vl1r.v v0, (a0) # vscale x 8-byte Folded Reload
+; ZVFHMIN32-NEXT:    vsetvli zero, zero, e32, m8, ta, ma
+; ZVFHMIN32-NEXT:    vmfeq.vv v5, v8, v16, v0.t
+; ZVFHMIN32-NEXT:    vsetvli zero, a6, e16, m4, ta, ma
+; ZVFHMIN32-NEXT:    vfwcvt.f.f.v v16, v24
+; ZVFHMIN32-NEXT:    csrr a0, vlenb
+; ZVFHMIN32-NEXT:    slli a0, a0, 1
+; ZVFHMIN32-NEXT:    add a0, sp, a0
+; ZVFHMIN32-NEXT:    addi a0, a0, 16
+; ZVFHMIN32-NEXT:    vl8r.v v8, (a0) # vscale x 64-byte Folded Reload
+; ZVFHMIN32-NEXT:    vfwcvt.f.f.v v24, v8
+; ZVFHMIN32-NEXT:    csrr a0, vlenb
+; ZVFHMIN32-NEXT:    add a0, sp, a0
+; ZVFHMIN32-NEXT:    addi a0, a0, 16
+; ZVFHMIN32-NEXT:    vl1r.v v0, (a0) # vscale x 8-byte Folded Reload
+; ZVFHMIN32-NEXT:    vsetvli zero, zero, e32, m8, ta, ma
+; ZVFHMIN32-NEXT:    vmfeq.vv v7, v24, v16, v0.t
+; ZVFHMIN32-NEXT:    bltu a2, a5, .LBB171_4
+; ZVFHMIN32-NEXT:  # %bb.3:
+; ZVFHMIN32-NEXT:    mv a2, a5
+; ZVFHMIN32-NEXT:  .LBB171_4:
+; ZVFHMIN32-NEXT:    sub a0, a2, a4
+; ZVFHMIN32-NEXT:    csrr a5, vlenb
+; ZVFHMIN32-NEXT:    slli a5, a5, 1
+; ZVFHMIN32-NEXT:    mv a6, a5
+; ZVFHMIN32-NEXT:    slli a5, a5, 2
+; ZVFHMIN32-NEXT:    add a6, a6, a5
+; ZVFHMIN32-NEXT:    slli a5, a5, 1
+; ZVFHMIN32-NEXT:    add a5, a5, a6
+; ZVFHMIN32-NEXT:    add a5, sp, a5
+; ZVFHMIN32-NEXT:    addi a5, a5, 16
+; ZVFHMIN32-NEXT:    vl1r.v v8, (a5) # vscale x 8-byte Folded Reload
+; ZVFHMIN32-NEXT:    vsetvli a5, zero, e8, mf2, ta, ma
+; ZVFHMIN32-NEXT:    vslidedown.vx v0, v8, a3
+; ZVFHMIN32-NEXT:    sltu a5, a2, a0
+; ZVFHMIN32-NEXT:    addi a5, a5, -1
+; ZVFHMIN32-NEXT:    and a0, a5, a0
+; ZVFHMIN32-NEXT:    csrr a5, vlenb
+; ZVFHMIN32-NEXT:    slli a5, a5, 1
+; ZVFHMIN32-NEXT:    mv a6, a5
+; ZVFHMIN32-NEXT:    slli a5, a5, 3
+; ZVFHMIN32-NEXT:    add a5, a5, a6
+; ZVFHMIN32-NEXT:    add a5, sp, a5
+; ZVFHMIN32-NEXT:    addi a5, a5, 16
+; ZVFHMIN32-NEXT:    vl8r.v v24, (a5) # vscale x 64-byte Folded Reload
+; ZVFHMIN32-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
+; ZVFHMIN32-NEXT:    vfwcvt.f.f.v v16, v28
+; ZVFHMIN32-NEXT:    csrr a0, vlenb
+; ZVFHMIN32-NEXT:    slli a0, a0, 1
+; ZVFHMIN32-NEXT:    mv a5, a0
+; ZVFHMIN32-NEXT:    slli a0, a0, 2
+; ZVFHMIN32-NEXT:    add a0, a0, a5
+; ZVFHMIN32-NEXT:    add a0, sp, a0
+; ZVFHMIN32-NEXT:    addi a0, a0, 16
+; ZVFHMIN32-NEXT:    vl8r.v v8, (a0) # vscale x 64-byte Folded Reload
+; ZVFHMIN32-NEXT:    vfwcvt.f.f.v v24, v12
+; ZVFHMIN32-NEXT:    vsetvli zero, zero, e32, m8, ta, ma
+; ZVFHMIN32-NEXT:    vmfeq.vv v9, v16, v24, v0.t
+; ZVFHMIN32-NEXT:    vmv1r.v v8, v7
+; ZVFHMIN32-NEXT:    vsetvli a0, zero, e8, mf2, ta, ma
+; ZVFHMIN32-NEXT:    vslideup.vx v8, v5, a3
+; ZVFHMIN32-NEXT:    bltu a2, a4, .LBB171_6
+; ZVFHMIN32-NEXT:  # %bb.5:
+; ZVFHMIN32-NEXT:    mv a2, a4
+; ZVFHMIN32-NEXT:  .LBB171_6:
+; ZVFHMIN32-NEXT:    csrr a0, vlenb
+; ZVFHMIN32-NEXT:    slli a0, a0, 1
+; ZVFHMIN32-NEXT:    mv a4, a0
+; ZVFHMIN32-NEXT:    slli a0, a0, 3
+; ZVFHMIN32-NEXT:    add a0, a0, a4
+; ZVFHMIN32-NEXT:    add a0, sp, a0
+; ZVFHMIN32-NEXT:    addi a0, a0, 16
+; ZVFHMIN32-NEXT:    vl8r.v v24, (a0) # vscale x 64-byte Folded Reload
+; ZVFHMIN32-NEXT:    vsetvli zero, a2, e16, m4, ta, ma
+; ZVFHMIN32-NEXT:    vfwcvt.f.f.v v16, v24
+; ZVFHMIN32-NEXT:    csrr a0, vlenb
+; ZVFHMIN32-NEXT:    slli a0, a0, 1
+; ZVFHMIN32-NEXT:    mv a2, a0
+; ZVFHMIN32-NEXT:    slli a0, a0, 2
+; ZVFHMIN32-NEXT:    add a0, a0, a2
+; ZVFHMIN32-NEXT:    add a0, sp, a0
+; ZVFHMIN32-NEXT:    addi a0, a0, 16
+; ZVFHMIN32-NEXT:    vl8r.v v0, (a0) # vscale x 64-byte Folded Reload
+; ZVFHMIN32-NEXT:    vfwcvt.f.f.v v24, v0
+; ZVFHMIN32-NEXT:    csrr a0, vlenb
+; ZVFHMIN32-NEXT:    slli a0, a0, 1
+; ZVFHMIN32-NEXT:    mv a2, a0
+; ZVFHMIN32-NEXT:    slli a0, a0, 2
+; ZVFHMIN32-NEXT:    add a2, a2, a0
+; ZVFHMIN32-NEXT:    slli a0, a0, 1
+; ZVFHMIN32-NEXT:    add a0, a0, a2
+; ZVFHMIN32-NEXT:    add a0, sp, a0
+; ZVFHMIN32-NEXT:    addi a0, a0, 16
+; ZVFHMIN32-NEXT:    vl1r.v v0, (a0) # vscale x 8-byte Folded Reload
+; ZVFHMIN32-NEXT:    vsetvli zero, zero, e32, m8, ta, ma
+; ZVFHMIN32-NEXT:    vmfeq.vv v10, v16, v24, v0.t
+; ZVFHMIN32-NEXT:    vsetvli a0, zero, e8, mf2, ta, ma
+; ZVFHMIN32-NEXT:    vslideup.vx v10, v9, a3
+; ZVFHMIN32-NEXT:    vsetvli a0, zero, e8, m1, ta, ma
+; ZVFHMIN32-NEXT:    vslideup.vx v10, v8, a1
+; ZVFHMIN32-NEXT:    vmv.v.v v0, v10
+; ZVFHMIN32-NEXT:    csrr a0, vlenb
+; ZVFHMIN32-NEXT:    mv a1, a0
+; ZVFHMIN32-NEXT:    slli a0, a0, 1
+; ZVFHMIN32-NEXT:    add a1, a1, a0
+; ZVFHMIN32-NEXT:    slli a0, a0, 2
+; ZVFHMIN32-NEXT:    add a1, a1, a0
+; ZVFHMIN32-NEXT:    slli a0, a0, 1
+; ZVFHMIN32-NEXT:    add a0, a0, a1
+; ZVFHMIN32-NEXT:    add sp, sp, a0
+; ZVFHMIN32-NEXT:    .cfi_def_cfa sp, 16
+; ZVFHMIN32-NEXT:    addi sp, sp, 16
+; ZVFHMIN32-NEXT:    .cfi_def_cfa_offset 0
+; ZVFHMIN32-NEXT:    ret
+;
+; ZVFHMIN64-LABEL: fcmp_oeq_vv_nxv64f16:
+; ZVFHMIN64:       # %bb.0:
+; ZVFHMIN64-NEXT:    addi sp, sp, -16
+; ZVFHMIN64-NEXT:    .cfi_def_cfa_offset 16
+; ZVFHMIN64-NEXT:    csrr a1, vlenb
+; ZVFHMIN64-NEXT:    mv a3, a1
+; ZVFHMIN64-NEXT:    slli a1, a1, 1
+; ZVFHMIN64-NEXT:    add a3, a3, a1
+; ZVFHMIN64-NEXT:    slli a1, a1, 2
+; ZVFHMIN64-NEXT:    add a3, a3, a1
+; ZVFHMIN64-NEXT:    slli a1, a1, 1
+; ZVFHMIN64-NEXT:    add a1, a1, a3
+; ZVFHMIN64-NEXT:    sub sp, sp, a1
+; ZVFHMIN64-NEXT:    .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x1b, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 27 * vlenb
+; ZVFHMIN64-NEXT:    csrr a1, vlenb
+; ZVFHMIN64-NEXT:    slli a1, a1, 1
+; ZVFHMIN64-NEXT:    mv a3, a1
+; ZVFHMIN64-NEXT:    slli a1, a1, 3
+; ZVFHMIN64-NEXT:    add a1, a1, a3
+; ZVFHMIN64-NEXT:    add a1, sp, a1
+; ZVFHMIN64-NEXT:    addi a1, a1, 16
+; ZVFHMIN64-NEXT:    vs1r.v v0, (a1) # vscale x 8-byte Folded Spill
+; ZVFHMIN64-NEXT:    vsetvli a1, zero, e8, m1, ta, ma
+; ZVFHMIN64-NEXT:    vmv8r.v v0, v16
+; ZVFHMIN64-NEXT:    csrr a1, vlenb
+; ZVFHMIN64-NEXT:    mv a3, a1
+; ZVFHMIN64-NEXT:    slli a1, a1, 1
+; ZVFHMIN64-NEXT:    add a3, a3, a1
+; ZVFHMIN64-NEXT:    slli a1, a1, 3
+; ZVFHMIN64-NEXT:    add a1, a1, a3
+; ZVFHMIN64-NEXT:    add a1, sp, a1
+; ZVFHMIN64-NEXT:    addi a1, a1, 16
+; ZVFHMIN64-NEXT:    vs8r.v v8, (a1) # vscale x 64-byte Folded Spill
+; ZVFHMIN64-NEXT:    csrr a3, vlenb
+; ZVFHMIN64-NEXT:    slli a1, a3, 3
+; ZVFHMIN64-NEXT:    slli a5, a3, 2
+; ZVFHMIN64-NEXT:    slli a4, a3, 1
+; ZVFHMIN64-NEXT:    add a1, a0, a1
+; ZVFHMIN64-NEXT:    sub a6, a2, a5
+; ZVFHMIN64-NEXT:    vl8re16.v v24, (a1)
+; ZVFHMIN64-NEXT:    sltu a1, a2, a6
+; ZVFHMIN64-NEXT:    addi a1, a1, -1
+; ZVFHMIN64-NEXT:    and a6, a1, a6
+; ZVFHMIN64-NEXT:    sub a1, a6, a4
+; ZVFHMIN64-NEXT:    sltu a7, a6, a1
+; ZVFHMIN64-NEXT:    addi a7, a7, -1
+; ZVFHMIN64-NEXT:    and a7, a7, a1
+; ZVFHMIN64-NEXT:    srli a1, a3, 1
+; ZVFHMIN64-NEXT:    srli a3, a3, 2
+; ZVFHMIN64-NEXT:    csrr t0, vlenb
+; ZVFHMIN64-NEXT:    slli t0, t0, 1
+; ZVFHMIN64-NEXT:    mv t1, t0
+; ZVFHMIN64-NEXT:    slli t0, t0, 3
+; ZVFHMIN64-NEXT:    add t0, t0, t1
+; ZVFHMIN64-NEXT:    add t0, sp, t0
+; ZVFHMIN64-NEXT:    addi t0, t0, 16
+; ZVFHMIN64-NEXT:    vl1r.v v8, (t0) # vscale x 8-byte Folded Reload
+; ZVFHMIN64-NEXT:    vslidedown.vx v16, v8, a1
+; ZVFHMIN64-NEXT:    vl8re16.v v8, (a0)
+; ZVFHMIN64-NEXT:    csrr a0, vlenb
+; ZVFHMIN64-NEXT:    slli a0, a0, 1
+; ZVFHMIN64-NEXT:    mv t0, a0
+; ZVFHMIN64-NEXT:    slli a0, a0, 2
+; ZVFHMIN64-NEXT:    add a0, a0, t0
+; ZVFHMIN64-NEXT:    add a0, sp, a0
+; ZVFHMIN64-NEXT:    addi a0, a0, 16
+; ZVFHMIN64-NEXT:    vs8r.v v8, (a0) # vscale x 64-byte Folded Spill
+; ZVFHMIN64-NEXT:    csrr a0, vlenb
+; ZVFHMIN64-NEXT:    add a0, sp, a0
+; ZVFHMIN64-NEXT:    addi a0, a0, 16
+; ZVFHMIN64-NEXT:    vs1r.v v16, (a0) # vscale x 8-byte Folded Spill
+; ZVFHMIN64-NEXT:    vsetvli a0, zero, e8, mf2, ta, ma
+; ZVFHMIN64-NEXT:    vslidedown.vx v8, v16, a3
+; ZVFHMIN64-NEXT:    addi a0, sp, 16
+; ZVFHMIN64-NEXT:    vs1r.v v8, (a0) # vscale x 8-byte Folded Spill
+; ZVFHMIN64-NEXT:    vsetvli zero, a7, e16, m4, ta, ma
+; ZVFHMIN64-NEXT:    vfwcvt.f.f.v v16, v28
+; ZVFHMIN64-NEXT:    csrr a0, vlenb
+; ZVFHMIN64-NEXT:    slli a0, a0, 1
+; ZVFHMIN64-NEXT:    add a0, sp, a0
+; ZVFHMIN64-NEXT:    addi a0, a0, 16
+; ZVFHMIN64-NEXT:    vs8r.v v0, (a0) # vscale x 64-byte Folded Spill
+; ZVFHMIN64-NEXT:    vfwcvt.f.f.v v8, v4
+; ZVFHMIN64-NEXT:    bltu a6, a4, .LBB171_2
+; ZVFHMIN64-NEXT:  # %bb.1:
+; ZVFHMIN64-NEXT:    mv a6, a4
+; ZVFHMIN64-NEXT:  .LBB171_2:
+; ZVFHMIN64-NEXT:    addi a0, sp, 16
+; ZVFHMIN64-NEXT:    vl1r.v v0, (a0) # vscale x 8-byte Folded Reload
+; ZVFHMIN64-NEXT:    vsetvli zero, zero, e32, m8, ta, ma
+; ZVFHMIN64-NEXT:    vmfeq.vv v5, v8, v16, v0.t
+; ZVFHMIN64-NEXT:    vsetvli zero, a6, e16, m4, ta, ma
+; ZVFHMIN64-NEXT:    vfwcvt.f.f.v v16, v24
+; ZVFHMIN64-NEXT:    csrr a0, vlenb
+; ZVFHMIN64-NEXT:    slli a0, a0, 1
+; ZVFHMIN64-NEXT:    add a0, sp, a0
+; ZVFHMIN64-NEXT:    addi a0, a0, 16
+; ZVFHMIN64-NEXT:    vl8r.v v8, (a0) # vscale x 64-byte Folded Reload
+; ZVFHMIN64-NEXT:    vfwcvt.f.f.v v24, v8
+; ZVFHMIN64-NEXT:    csrr a0, vlenb
+; ZVFHMIN64-NEXT:    add a0, sp, a0
+; ZVFHMIN64-NEXT:    addi a0, a0, 16
+; ZVFHMIN64-NEXT:    vl1r.v v0, (a0) # vscale x 8-byte Folded Reload
+; ZVFHMIN64-NEXT:    vsetvli zero, zero, e32, m8, ta, ma
+; ZVFHMIN64-NEXT:    vmfeq.vv v6, v24, v16, v0.t
+; ZVFHMIN64-NEXT:    bltu a2, a5, .LBB171_4
+; ZVFHMIN64-NEXT:  # %bb.3:
+; ZVFHMIN64-NEXT:    mv a2, a5
+; ZVFHMIN64-NEXT:  .LBB171_4:
+; ZVFHMIN64-NEXT:    sub a0, a2, a4
+; ZVFHMIN64-NEXT:    csrr a5, vlenb
+; ZVFHMIN64-NEXT:    slli a5, a5, 1
+; ZVFHMIN64-NEXT:    mv a6, a5
+; ZVFHMIN64-NEXT:    slli a5, a5, 3
+; ZVFHMIN64-NEXT:    add a5, a5, a6
+; ZVFHMIN64-NEXT:    add a5, sp, a5
+; ZVFHMIN64-NEXT:    addi a5, a5, 16
+; ZVFHMIN64-NEXT:    vl1r.v v7, (a5) # vscale x 8-byte Folded Reload
+; ZVFHMIN64-NEXT:    vsetvli a5, zero, e8, mf2, ta, ma
+; ZVFHMIN64-NEXT:    vslidedown.vx v0, v7, a3
+; ZVFHMIN64-NEXT:    sltu a5, a2, a0
+; ZVFHMIN64-NEXT:    addi a5, a5, -1
+; ZVFHMIN64-NEXT:    and a0, a5, a0
+; ZVFHMIN64-NEXT:    csrr a5, vlenb
+; ZVFHMIN64-NEXT:    mv a6, a5
+; ZVFHMIN64-NEXT:    slli a5, a5, 1
+; ZVFHMIN64-NEXT:    add a6, a6, a5
+; ZVFHMIN64-NEXT:    slli a5, a5, 3
+; ZVFHMIN64-NEXT:    add a5, a5, a6
+; ZVFHMIN64-NEXT:    add a5, sp, a5
+; ZVFHMIN64-NEXT:    addi a5, a5, 16
+; ZVFHMIN64-NEXT:    vl8r.v v24, (a5) # vscale x 64-byte Folded Reload
+; ZVFHMIN64-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
+; ZVFHMIN64-NEXT:    vfwcvt.f.f.v v16, v28
+; ZVFHMIN64-NEXT:    csrr a0, vlenb
+; ZVFHMIN64-NEXT:    slli a0, a0, 1
+; ZVFHMIN64-NEXT:    mv a5, a0
+; ZVFHMIN64-NEXT:    slli a0, a0, 2
+; ZVFHMIN64-NEXT:    add a0, a0, a5
+; ZVFHMIN64-NEXT:    add a0, sp, a0
+; ZVFHMIN64-NEXT:    addi a0, a0, 16
+; ZVFHMIN64-NEXT:    vl8r.v v8, (a0) # vscale x 64-byte Folded Reload
+; ZVFHMIN64-NEXT:    vfwcvt.f.f.v v24, v12
+; ZVFHMIN64-NEXT:    vsetvli zero, zero, e32, m8, ta, ma
+; ZVFHMIN64-NEXT:    vmfeq.vv v4, v16, v24, v0.t
+; ZVFHMIN64-NEXT:    vsetvli a0, zero, e8, mf2, ta, ma
+; ZVFHMIN64-NEXT:    vslideup.vx v6, v5, a3
+; ZVFHMIN64-NEXT:    bltu a2, a4, .LBB171_6
+; ZVFHMIN64-NEXT:  # %bb.5:
+; ZVFHMIN64-NEXT:    mv a2, a4
+; ZVFHMIN64-NEXT:  .LBB171_6:
+; ZVFHMIN64-NEXT:    csrr a0, vlenb
+; ZVFHMIN64-NEXT:    mv a4, a0
+; ZVFHMIN64-NEXT:    slli a0, a0, 1
+; ZVFHMIN64-NEXT:    add a4, a4, a0
+; ZVFHMIN64-NEXT:    slli a0, a0, 3
+; ZVFHMIN64-NEXT:    add a0, a0, a4
+; ZVFHMIN64-NEXT:    add a0, sp, a0
+; ZVFHMIN64-NEXT:    addi a0, a0, 16
+; ZVFHMIN64-NEXT:    vl8r.v v24, (a0) # vscale x 64-byte Folded Reload
+; ZVFHMIN64-NEXT:    vsetvli zero, a2, e16, m4, ta, ma
+; ZVFHMIN64-NEXT:    vfwcvt.f.f.v v16, v24
+; ZVFHMIN64-NEXT:    csrr a0, vlenb
+; ZVFHMIN64-NEXT:    slli a0, a0, 1
+; ZVFHMIN64-NEXT:    mv a2, a0
+; ZVFHMIN64-NEXT:    slli a0, a0, 2
+; ZVFHMIN64-NEXT:    add a0, a0, a2
+; ZVFHMIN64-NEXT:    add a0, sp, a0
+; ZVFHMIN64-NEXT:    addi a0, a0, 16
+; ZVFHMIN64-NEXT:    vl8r.v v8, (a0) # vscale x 64-byte Folded Reload
+; ZVFHMIN64-NEXT:    vfwcvt.f.f.v v24, v8
+; ZVFHMIN64-NEXT:    vmv1r.v v0, v7
+; ZVFHMIN64-NEXT:    vsetvli zero, zero, e32, m8, ta, ma
+; ZVFHMIN64-NEXT:    vmfeq.vv v10, v16, v24, v0.t
+; ZVFHMIN64-NEXT:    vsetvli a0, zero, e8, mf2, ta, ma
+; ZVFHMIN64-NEXT:    vslideup.vx v10, v4, a3
+; ZVFHMIN64-NEXT:    vsetvli a0, zero, e8, m1, ta, ma
+; ZVFHMIN64-NEXT:    vslideup.vx v10, v6, a1
+; ZVFHMIN64-NEXT:    vmv.v.v v0, v10
+; ZVFHMIN64-NEXT:    csrr a0, vlenb
+; ZVFHMIN64-NEXT:    mv a1, a0
+; ZVFHMIN64-NEXT:    slli a0, a0, 1
+; ZVFHMIN64-NEXT:    add a1, a1, a0
+; ZVFHMIN64-NEXT:    slli a0, a0, 2
+; ZVFHMIN64-NEXT:    add a1, a1, a0
+; ZVFHMIN64-NEXT:    slli a0, a0, 1
+; ZVFHMIN64-NEXT:    add a0, a0, a1
+; ZVFHMIN64-NEXT:    add sp, sp, a0
+; ZVFHMIN64-NEXT:    .cfi_def_cfa sp, 16
+; ZVFHMIN64-NEXT:    addi sp, sp, 16
+; ZVFHMIN64-NEXT:    .cfi_def_cfa_offset 0
+; ZVFHMIN64-NEXT:    ret
   %v = call <vscale x 64 x i1> @llvm.vp.fcmp.nxv64f16(<vscale x 64 x half> %va, <vscale x 64 x half> %vb, metadata !"oeq", <vscale x 64 x i1> %m, i32 %evl)
   ret <vscale x 64 x i1> %v
 }
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfma-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vfma-vp.ll
index e669518fe167d..a638e70db7b42 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfma-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfma-vp.ll
@@ -7,10 +7,10 @@
 ; RUN:     --check-prefixes=CHECK,ZVFH
 ; RUN: llc -mtriple=riscv32 -mattr=+d,+zfhmin,+zvfhmin,+zfbfmin,+zvfbfmin,+v \
 ; RUN:     -target-abi=ilp32d -verify-machineinstrs < %s | FileCheck %s \
-; RUN:     --check-prefixes=CHECK,ZVFHMIN
+; RUN:     --check-prefixes=CHECK,ZVFHMIN,ZVFHMIN32
 ; RUN: llc -mtriple=riscv64 -mattr=+d,+zfhmin,+zvfhmin,+zfbfmin,+zvfbfmin,+v \
 ; RUN:     -target-abi=lp64d -verify-machineinstrs < %s | FileCheck %s \
-; RUN:     --check-prefixes=CHECK,ZVFHMIN
+; RUN:     --check-prefixes=CHECK,ZVFHMIN,ZVFHMIN64
 
 define <vscale x 1 x bfloat> @vfma_vv_nxv1bf16(<vscale x 1 x bfloat> %va, <vscale x 1 x bfloat> %b, <vscale x 1 x bfloat> %c, <vscale x 1 x i1> %m, i32 zeroext %evl) {
 ; CHECK-LABEL: vfma_vv_nxv1bf16:
@@ -8706,1033 +8706,969 @@ define <vscale x 32 x half> @vfnmadd_vv_nxv32f16(<vscale x 32 x half> %va, <vsca
 ; ZVFH-NEXT:    vmv.v.v v8, v16
 ; ZVFH-NEXT:    ret
 ;
-; ZVFHMIN-LABEL: vfnmadd_vv_nxv32f16:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    addi sp, sp, -16
-; ZVFHMIN-NEXT:    .cfi_def_cfa_offset 16
-; ZVFHMIN-NEXT:    csrr a2, vlenb
-; ZVFHMIN-NEXT:    slli a2, a2, 5
-; ZVFHMIN-NEXT:    sub sp, sp, a2
-; ZVFHMIN-NEXT:    .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x20, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 32 * vlenb
-; ZVFHMIN-NEXT:    vsetvli zero, a1, e16, m8, ta, ma
-; ZVFHMIN-NEXT:    vmv1r.v v3, v0
-; ZVFHMIN-NEXT:    csrr a2, vlenb
-; ZVFHMIN-NEXT:    slli a2, a2, 3
-; ZVFHMIN-NEXT:    mv a3, a2
-; ZVFHMIN-NEXT:    slli a2, a2, 1
-; ZVFHMIN-NEXT:    add a2, a2, a3
-; ZVFHMIN-NEXT:    add a2, sp, a2
-; ZVFHMIN-NEXT:    addi a2, a2, 16
-; ZVFHMIN-NEXT:    vs8r.v v8, (a2) # vscale x 64-byte Folded Spill
-; ZVFHMIN-NEXT:    vle16.v v8, (a0)
-; ZVFHMIN-NEXT:    csrr a0, vlenb
-; ZVFHMIN-NEXT:    slli a0, a0, 4
-; ZVFHMIN-NEXT:    add a0, sp, a0
-; ZVFHMIN-NEXT:    addi a0, a0, 16
-; ZVFHMIN-NEXT:    vs8r.v v8, (a0) # vscale x 64-byte Folded Spill
-; ZVFHMIN-NEXT:    lui a2, 8
-; ZVFHMIN-NEXT:    csrr a3, vlenb
-; ZVFHMIN-NEXT:    vxor.vx v8, v16, a2, v0.t
-; ZVFHMIN-NEXT:    slli a0, a3, 1
-; ZVFHMIN-NEXT:    srli a3, a3, 2
-; ZVFHMIN-NEXT:    sub a4, a1, a0
-; ZVFHMIN-NEXT:    vsetvli a5, zero, e8, mf2, ta, ma
-; ZVFHMIN-NEXT:    vslidedown.vx v6, v0, a3
-; ZVFHMIN-NEXT:    sltu a3, a1, a4
-; ZVFHMIN-NEXT:    csrr a5, vlenb
-; ZVFHMIN-NEXT:    slli a5, a5, 4
-; ZVFHMIN-NEXT:    add a5, sp, a5
-; ZVFHMIN-NEXT:    addi a5, a5, 16
-; ZVFHMIN-NEXT:    vl8r.v v24, (a5) # vscale x 64-byte Folded Reload
-; ZVFHMIN-NEXT:    vsetvli zero, a1, e16, m8, ta, ma
-; ZVFHMIN-NEXT:    vxor.vx v24, v24, a2, v0.t
-; ZVFHMIN-NEXT:    addi a3, a3, -1
-; ZVFHMIN-NEXT:    and a3, a3, a4
-; ZVFHMIN-NEXT:    vmv1r.v v0, v6
-; ZVFHMIN-NEXT:    csrr a2, vlenb
-; ZVFHMIN-NEXT:    slli a2, a2, 3
-; ZVFHMIN-NEXT:    add a2, sp, a2
-; ZVFHMIN-NEXT:    addi a2, a2, 16
-; ZVFHMIN-NEXT:    vs8r.v v24, (a2) # vscale x 64-byte Folded Spill
-; ZVFHMIN-NEXT:    vsetvli zero, a3, e16, m4, ta, ma
-; ZVFHMIN-NEXT:    vfwcvt.f.f.v v16, v28, v0.t
-; ZVFHMIN-NEXT:    addi a2, sp, 16
-; ZVFHMIN-NEXT:    vs8r.v v16, (a2) # vscale x 64-byte Folded Spill
-; ZVFHMIN-NEXT:    csrr a2, vlenb
-; ZVFHMIN-NEXT:    slli a2, a2, 4
-; ZVFHMIN-NEXT:    add a2, sp, a2
-; ZVFHMIN-NEXT:    addi a2, a2, 16
-; ZVFHMIN-NEXT:    vs8r.v v8, (a2) # vscale x 64-byte Folded Spill
-; ZVFHMIN-NEXT:    vfwcvt.f.f.v v24, v12, v0.t
-; ZVFHMIN-NEXT:    csrr a2, vlenb
-; ZVFHMIN-NEXT:    slli a2, a2, 3
-; ZVFHMIN-NEXT:    mv a3, a2
-; ZVFHMIN-NEXT:    slli a2, a2, 1
-; ZVFHMIN-NEXT:    add a2, a2, a3
-; ZVFHMIN-NEXT:    add a2, sp, a2
-; ZVFHMIN-NEXT:    addi a2, a2, 16
-; ZVFHMIN-NEXT:    vl8r.v v16, (a2) # vscale x 64-byte Folded Reload
-; ZVFHMIN-NEXT:    vfwcvt.f.f.v v8, v20, v0.t
-; ZVFHMIN-NEXT:    addi a2, sp, 16
-; ZVFHMIN-NEXT:    vl8r.v v16, (a2) # vscale x 64-byte Folded Reload
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m8, ta, ma
-; ZVFHMIN-NEXT:    vfmadd.vv v24, v8, v16, v0.t
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e16, m4, ta, ma
-; ZVFHMIN-NEXT:    vfncvt.f.f.w v4, v24, v0.t
-; ZVFHMIN-NEXT:    bltu a1, a0, .LBB286_2
-; ZVFHMIN-NEXT:  # %bb.1:
-; ZVFHMIN-NEXT:    mv a1, a0
-; ZVFHMIN-NEXT:  .LBB286_2:
-; ZVFHMIN-NEXT:    vmv1r.v v0, v3
-; ZVFHMIN-NEXT:    csrr a0, vlenb
-; ZVFHMIN-NEXT:    slli a0, a0, 4
-; ZVFHMIN-NEXT:    add a0, sp, a0
-; ZVFHMIN-NEXT:    addi a0, a0, 16
-; ZVFHMIN-NEXT:    vl8r.v v16, (a0) # vscale x 64-byte Folded Reload
-; ZVFHMIN-NEXT:    vsetvli zero, a1, e16, m4, ta, ma
-; ZVFHMIN-NEXT:    vfwcvt.f.f.v v8, v16, v0.t
-; ZVFHMIN-NEXT:    addi a0, sp, 16
-; ZVFHMIN-NEXT:    vs8r.v v8, (a0) # vscale x 64-byte Folded Spill
-; ZVFHMIN-NEXT:    csrr a0, vlenb
-; ZVFHMIN-NEXT:    slli a0, a0, 3
-; ZVFHMIN-NEXT:    add a0, sp, a0
-; ZVFHMIN-NEXT:    addi a0, a0, 16
-; ZVFHMIN-NEXT:    vl8r.v v16, (a0) # vscale x 64-byte Folded Reload
-; ZVFHMIN-NEXT:    vfwcvt.f.f.v v24, v16, v0.t
-; ZVFHMIN-NEXT:    csrr a0, vlenb
-; ZVFHMIN-NEXT:    slli a0, a0, 4
-; ZVFHMIN-NEXT:    add a0, sp, a0
-; ZVFHMIN-NEXT:    addi a0, a0, 16
-; ZVFHMIN-NEXT:    vs8r.v v24, (a0) # vscale x 64-byte Folded Spill
-; ZVFHMIN-NEXT:    csrr a0, vlenb
-; ZVFHMIN-NEXT:    slli a0, a0, 3
-; ZVFHMIN-NEXT:    mv a1, a0
-; ZVFHMIN-NEXT:    slli a0, a0, 1
-; ZVFHMIN-NEXT:    add a0, a0, a1
-; ZVFHMIN-NEXT:    add a0, sp, a0
-; ZVFHMIN-NEXT:    addi a0, a0, 16
-; ZVFHMIN-NEXT:    vl8r.v v16, (a0) # vscale x 64-byte Folded Reload
-; ZVFHMIN-NEXT:    vfwcvt.f.f.v v8, v16, v0.t
-; ZVFHMIN-NEXT:    csrr a0, vlenb
-; ZVFHMIN-NEXT:    slli a0, a0, 4
-; ZVFHMIN-NEXT:    add a0, sp, a0
-; ZVFHMIN-NEXT:    addi a0, a0, 16
-; ZVFHMIN-NEXT:    vl8r.v v16, (a0) # vscale x 64-byte Folded Reload
-; ZVFHMIN-NEXT:    addi a0, sp, 16
-; ZVFHMIN-NEXT:    vl8r.v v24, (a0) # vscale x 64-byte Folded Reload
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m8, ta, ma
-; ZVFHMIN-NEXT:    vfmadd.vv v24, v8, v16, v0.t
-; ZVFHMIN-NEXT:    vmv4r.v v12, v4
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e16, m4, ta, ma
-; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v24, v0.t
-; ZVFHMIN-NEXT:    csrr a0, vlenb
-; ZVFHMIN-NEXT:    slli a0, a0, 5
-; ZVFHMIN-NEXT:    add sp, sp, a0
-; ZVFHMIN-NEXT:    .cfi_def_cfa sp, 16
-; ZVFHMIN-NEXT:    addi sp, sp, 16
-; ZVFHMIN-NEXT:    .cfi_def_cfa_offset 0
-; ZVFHMIN-NEXT:    ret
-  %negb = call <vscale x 32 x half> @llvm.vp.fneg.nxv32f16(<vscale x 32 x half> %b, <vscale x 32 x i1> %m, i32 %evl)
-  %negc = call <vscale x 32 x half> @llvm.vp.fneg.nxv32f16(<vscale x 32 x half> %c, <vscale x 32 x i1> %m, i32 %evl)
-  %v = call <vscale x 32 x half> @llvm.vp.fma.nxv32f16(<vscale x 32 x half> %va, <vscale x 32 x half> %negb, <vscale x 32 x half> %negc, <vscale x 32 x i1> %m, i32 %evl)
-  ret <vscale x 32 x half> %v
-}
-
-define <vscale x 32 x half> @vfnmadd_vv_nxv32f16_commuted(<vscale x 32 x half> %va, <vscale x 32 x half> %b, <vscale x 32 x half> %c, <vscale x 32 x i1> %m, i32 zeroext %evl) {
-; ZVFH-LABEL: vfnmadd_vv_nxv32f16_commuted:
-; ZVFH:       # %bb.0:
-; ZVFH-NEXT:    vsetvli zero, a1, e16, m8, ta, ma
-; ZVFH-NEXT:    vle16.v v24, (a0)
-; ZVFH-NEXT:    vfnmadd.vv v8, v16, v24, v0.t
-; ZVFH-NEXT:    ret
-;
-; ZVFHMIN-LABEL: vfnmadd_vv_nxv32f16_commuted:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    addi sp, sp, -16
-; ZVFHMIN-NEXT:    .cfi_def_cfa_offset 16
-; ZVFHMIN-NEXT:    csrr a2, vlenb
-; ZVFHMIN-NEXT:    slli a2, a2, 5
-; ZVFHMIN-NEXT:    sub sp, sp, a2
-; ZVFHMIN-NEXT:    .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x20, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 32 * vlenb
-; ZVFHMIN-NEXT:    vsetvli zero, a1, e16, m8, ta, ma
-; ZVFHMIN-NEXT:    vmv1r.v v3, v0
-; ZVFHMIN-NEXT:    csrr a2, vlenb
-; ZVFHMIN-NEXT:    slli a2, a2, 3
-; ZVFHMIN-NEXT:    mv a3, a2
-; ZVFHMIN-NEXT:    slli a2, a2, 1
-; ZVFHMIN-NEXT:    add a2, a2, a3
-; ZVFHMIN-NEXT:    add a2, sp, a2
-; ZVFHMIN-NEXT:    addi a2, a2, 16
-; ZVFHMIN-NEXT:    vs8r.v v8, (a2) # vscale x 64-byte Folded Spill
-; ZVFHMIN-NEXT:    vle16.v v8, (a0)
-; ZVFHMIN-NEXT:    lui a2, 8
-; ZVFHMIN-NEXT:    csrr a3, vlenb
-; ZVFHMIN-NEXT:    vxor.vx v16, v16, a2, v0.t
-; ZVFHMIN-NEXT:    slli a0, a3, 1
-; ZVFHMIN-NEXT:    srli a3, a3, 2
-; ZVFHMIN-NEXT:    sub a4, a1, a0
-; ZVFHMIN-NEXT:    vsetvli a5, zero, e8, mf2, ta, ma
-; ZVFHMIN-NEXT:    vslidedown.vx v6, v0, a3
-; ZVFHMIN-NEXT:    sltu a3, a1, a4
-; ZVFHMIN-NEXT:    vsetvli zero, a1, e16, m8, ta, ma
-; ZVFHMIN-NEXT:    vxor.vx v8, v8, a2, v0.t
-; ZVFHMIN-NEXT:    addi a3, a3, -1
-; ZVFHMIN-NEXT:    and a3, a3, a4
-; ZVFHMIN-NEXT:    vmv1r.v v0, v6
-; ZVFHMIN-NEXT:    csrr a2, vlenb
-; ZVFHMIN-NEXT:    slli a2, a2, 3
-; ZVFHMIN-NEXT:    add a2, sp, a2
-; ZVFHMIN-NEXT:    addi a2, a2, 16
-; ZVFHMIN-NEXT:    vs8r.v v8, (a2) # vscale x 64-byte Folded Spill
-; ZVFHMIN-NEXT:    vsetvli zero, a3, e16, m4, ta, ma
-; ZVFHMIN-NEXT:    vfwcvt.f.f.v v24, v12, v0.t
-; ZVFHMIN-NEXT:    addi a2, sp, 16
-; ZVFHMIN-NEXT:    vs8r.v v24, (a2) # vscale x 64-byte Folded Spill
-; ZVFHMIN-NEXT:    csrr a2, vlenb
-; ZVFHMIN-NEXT:    slli a2, a2, 4
-; ZVFHMIN-NEXT:    add a2, sp, a2
-; ZVFHMIN-NEXT:    addi a2, a2, 16
-; ZVFHMIN-NEXT:    vs8r.v v16, (a2) # vscale x 64-byte Folded Spill
-; ZVFHMIN-NEXT:    vfwcvt.f.f.v v24, v20, v0.t
-; ZVFHMIN-NEXT:    csrr a2, vlenb
-; ZVFHMIN-NEXT:    slli a2, a2, 3
-; ZVFHMIN-NEXT:    mv a3, a2
-; ZVFHMIN-NEXT:    slli a2, a2, 1
-; ZVFHMIN-NEXT:    add a2, a2, a3
-; ZVFHMIN-NEXT:    add a2, sp, a2
-; ZVFHMIN-NEXT:    addi a2, a2, 16
-; ZVFHMIN-NEXT:    vl8r.v v8, (a2) # vscale x 64-byte Folded Reload
-; ZVFHMIN-NEXT:    vfwcvt.f.f.v v16, v12, v0.t
-; ZVFHMIN-NEXT:    addi a2, sp, 16
-; ZVFHMIN-NEXT:    vl8r.v v8, (a2) # vscale x 64-byte Folded Reload
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m8, ta, ma
-; ZVFHMIN-NEXT:    vfmadd.vv v16, v24, v8, v0.t
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e16, m4, ta, ma
-; ZVFHMIN-NEXT:    vfncvt.f.f.w v4, v16, v0.t
-; ZVFHMIN-NEXT:    bltu a1, a0, .LBB287_2
-; ZVFHMIN-NEXT:  # %bb.1:
-; ZVFHMIN-NEXT:    mv a1, a0
-; ZVFHMIN-NEXT:  .LBB287_2:
-; ZVFHMIN-NEXT:    vmv1r.v v0, v3
-; ZVFHMIN-NEXT:    csrr a0, vlenb
-; ZVFHMIN-NEXT:    slli a0, a0, 4
-; ZVFHMIN-NEXT:    add a0, sp, a0
-; ZVFHMIN-NEXT:    addi a0, a0, 16
-; ZVFHMIN-NEXT:    vl8r.v v8, (a0) # vscale x 64-byte Folded Reload
-; ZVFHMIN-NEXT:    vsetvli zero, a1, e16, m4, ta, ma
-; ZVFHMIN-NEXT:    vfwcvt.f.f.v v16, v8, v0.t
-; ZVFHMIN-NEXT:    addi a0, sp, 16
-; ZVFHMIN-NEXT:    vs8r.v v16, (a0) # vscale x 64-byte Folded Spill
-; ZVFHMIN-NEXT:    csrr a0, vlenb
-; ZVFHMIN-NEXT:    slli a0, a0, 3
-; ZVFHMIN-NEXT:    add a0, sp, a0
-; ZVFHMIN-NEXT:    addi a0, a0, 16
-; ZVFHMIN-NEXT:    vl8r.v v8, (a0) # vscale x 64-byte Folded Reload
-; ZVFHMIN-NEXT:    vfwcvt.f.f.v v16, v8, v0.t
-; ZVFHMIN-NEXT:    csrr a0, vlenb
-; ZVFHMIN-NEXT:    slli a0, a0, 4
-; ZVFHMIN-NEXT:    add a0, sp, a0
-; ZVFHMIN-NEXT:    addi a0, a0, 16
-; ZVFHMIN-NEXT:    vs8r.v v16, (a0) # vscale x 64-byte Folded Spill
-; ZVFHMIN-NEXT:    csrr a0, vlenb
-; ZVFHMIN-NEXT:    slli a0, a0, 3
-; ZVFHMIN-NEXT:    mv a1, a0
-; ZVFHMIN-NEXT:    slli a0, a0, 1
-; ZVFHMIN-NEXT:    add a0, a0, a1
-; ZVFHMIN-NEXT:    add a0, sp, a0
-; ZVFHMIN-NEXT:    addi a0, a0, 16
-; ZVFHMIN-NEXT:    vl8r.v v16, (a0) # vscale x 64-byte Folded Reload
-; ZVFHMIN-NEXT:    vfwcvt.f.f.v v8, v16, v0.t
-; ZVFHMIN-NEXT:    addi a0, sp, 16
-; ZVFHMIN-NEXT:    vl8r.v v16, (a0) # vscale x 64-byte Folded Reload
-; ZVFHMIN-NEXT:    csrr a0, vlenb
-; ZVFHMIN-NEXT:    slli a0, a0, 4
-; ZVFHMIN-NEXT:    add a0, sp, a0
-; ZVFHMIN-NEXT:    addi a0, a0, 16
-; ZVFHMIN-NEXT:    vl8r.v v24, (a0) # vscale x 64-byte Folded Reload
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m8, ta, ma
-; ZVFHMIN-NEXT:    vfmadd.vv v8, v16, v24, v0.t
-; ZVFHMIN-NEXT:    vmv.v.v v16, v8
-; ZVFHMIN-NEXT:    vmv4r.v v12, v4
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e16, m4, ta, ma
-; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v16, v0.t
-; ZVFHMIN-NEXT:    csrr a0, vlenb
-; ZVFHMIN-NEXT:    slli a0, a0, 5
-; ZVFHMIN-NEXT:    add sp, sp, a0
-; ZVFHMIN-NEXT:    .cfi_def_cfa sp, 16
-; ZVFHMIN-NEXT:    addi sp, sp, 16
-; ZVFHMIN-NEXT:    .cfi_def_cfa_offset 0
-; ZVFHMIN-NEXT:    ret
-  %negb = call <vscale x 32 x half> @llvm.vp.fneg.nxv32f16(<vscale x 32 x half> %b, <vscale x 32 x i1> %m, i32 %evl)
-  %negc = call <vscale x 32 x half> @llvm.vp.fneg.nxv32f16(<vscale x 32 x half> %c, <vscale x 32 x i1> %m, i32 %evl)
-  %v = call <vscale x 32 x half> @llvm.vp.fma.nxv32f16(<vscale x 32 x half> %negb, <vscale x 32 x half> %va, <vscale x 32 x half> %negc, <vscale x 32 x i1> %m, i32 %evl)
-  ret <vscale x 32 x half> %v
-}
-
-define <vscale x 32 x half> @vfnmadd_vv_nxv32f16_unmasked(<vscale x 32 x half> %va, <vscale x 32 x half> %b, <vscale x 32 x half> %c, i32 zeroext %evl) {
-; ZVFH-LABEL: vfnmadd_vv_nxv32f16_unmasked:
-; ZVFH:       # %bb.0:
-; ZVFH-NEXT:    vsetvli zero, a1, e16, m8, ta, ma
-; ZVFH-NEXT:    vle16.v v24, (a0)
-; ZVFH-NEXT:    vfnmadd.vv v8, v16, v24
-; ZVFH-NEXT:    ret
-;
-; ZVFHMIN-LABEL: vfnmadd_vv_nxv32f16_unmasked:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    addi sp, sp, -16
-; ZVFHMIN-NEXT:    .cfi_def_cfa_offset 16
-; ZVFHMIN-NEXT:    csrr a2, vlenb
-; ZVFHMIN-NEXT:    slli a2, a2, 5
-; ZVFHMIN-NEXT:    sub sp, sp, a2
-; ZVFHMIN-NEXT:    .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x20, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 32 * vlenb
-; ZVFHMIN-NEXT:    csrr a2, vlenb
-; ZVFHMIN-NEXT:    slli a2, a2, 3
-; ZVFHMIN-NEXT:    mv a3, a2
-; ZVFHMIN-NEXT:    slli a2, a2, 1
-; ZVFHMIN-NEXT:    add a2, a2, a3
-; ZVFHMIN-NEXT:    add a2, sp, a2
-; ZVFHMIN-NEXT:    addi a2, a2, 16
-; ZVFHMIN-NEXT:    vs8r.v v8, (a2) # vscale x 64-byte Folded Spill
-; ZVFHMIN-NEXT:    vsetvli zero, a1, e16, m8, ta, ma
-; ZVFHMIN-NEXT:    vle16.v v24, (a0)
-; ZVFHMIN-NEXT:    lui a2, 8
-; ZVFHMIN-NEXT:    vsetvli a0, zero, e8, m4, ta, ma
-; ZVFHMIN-NEXT:    vmset.m v8
-; ZVFHMIN-NEXT:    csrr a3, vlenb
-; ZVFHMIN-NEXT:    vsetvli zero, a1, e16, m8, ta, ma
-; ZVFHMIN-NEXT:    vxor.vx v16, v16, a2
-; ZVFHMIN-NEXT:    slli a0, a3, 1
-; ZVFHMIN-NEXT:    srli a3, a3, 2
-; ZVFHMIN-NEXT:    sub a4, a1, a0
-; ZVFHMIN-NEXT:    vsetvli a5, zero, e8, mf2, ta, ma
-; ZVFHMIN-NEXT:    vslidedown.vx v0, v8, a3
-; ZVFHMIN-NEXT:    sltu a3, a1, a4
-; ZVFHMIN-NEXT:    vsetvli zero, a1, e16, m8, ta, ma
-; ZVFHMIN-NEXT:    vxor.vx v8, v24, a2
-; ZVFHMIN-NEXT:    addi a3, a3, -1
-; ZVFHMIN-NEXT:    and a3, a3, a4
-; ZVFHMIN-NEXT:    csrr a2, vlenb
-; ZVFHMIN-NEXT:    slli a2, a2, 3
-; ZVFHMIN-NEXT:    add a2, sp, a2
-; ZVFHMIN-NEXT:    addi a2, a2, 16
-; ZVFHMIN-NEXT:    vs8r.v v8, (a2) # vscale x 64-byte Folded Spill
-; ZVFHMIN-NEXT:    vsetvli zero, a3, e16, m4, ta, ma
-; ZVFHMIN-NEXT:    vfwcvt.f.f.v v24, v12, v0.t
-; ZVFHMIN-NEXT:    addi a2, sp, 16
-; ZVFHMIN-NEXT:    vs8r.v v24, (a2) # vscale x 64-byte Folded Spill
-; ZVFHMIN-NEXT:    csrr a2, vlenb
-; ZVFHMIN-NEXT:    slli a2, a2, 4
-; ZVFHMIN-NEXT:    add a2, sp, a2
-; ZVFHMIN-NEXT:    addi a2, a2, 16
-; ZVFHMIN-NEXT:    vs8r.v v16, (a2) # vscale x 64-byte Folded Spill
-; ZVFHMIN-NEXT:    vfwcvt.f.f.v v24, v20, v0.t
-; ZVFHMIN-NEXT:    csrr a2, vlenb
-; ZVFHMIN-NEXT:    slli a2, a2, 3
-; ZVFHMIN-NEXT:    mv a3, a2
-; ZVFHMIN-NEXT:    slli a2, a2, 1
-; ZVFHMIN-NEXT:    add a2, a2, a3
-; ZVFHMIN-NEXT:    add a2, sp, a2
-; ZVFHMIN-NEXT:    addi a2, a2, 16
-; ZVFHMIN-NEXT:    vl8r.v v16, (a2) # vscale x 64-byte Folded Reload
-; ZVFHMIN-NEXT:    vfwcvt.f.f.v v8, v20, v0.t
-; ZVFHMIN-NEXT:    addi a2, sp, 16
-; ZVFHMIN-NEXT:    vl8r.v v16, (a2) # vscale x 64-byte Folded Reload
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m8, ta, ma
-; ZVFHMIN-NEXT:    vfmadd.vv v24, v8, v16, v0.t
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e16, m4, ta, ma
-; ZVFHMIN-NEXT:    vfncvt.f.f.w v12, v24, v0.t
-; ZVFHMIN-NEXT:    bltu a1, a0, .LBB288_2
-; ZVFHMIN-NEXT:  # %bb.1:
-; ZVFHMIN-NEXT:    mv a1, a0
-; ZVFHMIN-NEXT:  .LBB288_2:
-; ZVFHMIN-NEXT:    csrr a0, vlenb
-; ZVFHMIN-NEXT:    slli a0, a0, 4
-; ZVFHMIN-NEXT:    add a0, sp, a0
-; ZVFHMIN-NEXT:    addi a0, a0, 16
-; ZVFHMIN-NEXT:    vl8r.v v16, (a0) # vscale x 64-byte Folded Reload
-; ZVFHMIN-NEXT:    vsetvli zero, a1, e16, m4, ta, ma
-; ZVFHMIN-NEXT:    vfwcvt.f.f.v v24, v16
-; ZVFHMIN-NEXT:    addi a0, sp, 16
-; ZVFHMIN-NEXT:    vs8r.v v24, (a0) # vscale x 64-byte Folded Spill
-; ZVFHMIN-NEXT:    csrr a0, vlenb
-; ZVFHMIN-NEXT:    slli a0, a0, 3
-; ZVFHMIN-NEXT:    add a0, sp, a0
-; ZVFHMIN-NEXT:    addi a0, a0, 16
-; ZVFHMIN-NEXT:    vl8r.v v16, (a0) # vscale x 64-byte Folded Reload
-; ZVFHMIN-NEXT:    vfwcvt.f.f.v v0, v16
-; ZVFHMIN-NEXT:    csrr a0, vlenb
-; ZVFHMIN-NEXT:    slli a0, a0, 3
-; ZVFHMIN-NEXT:    mv a1, a0
-; ZVFHMIN-NEXT:    slli a0, a0, 1
-; ZVFHMIN-NEXT:    add a0, a0, a1
-; ZVFHMIN-NEXT:    add a0, sp, a0
-; ZVFHMIN-NEXT:    addi a0, a0, 16
-; ZVFHMIN-NEXT:    vl8r.v v24, (a0) # vscale x 64-byte Folded Reload
-; ZVFHMIN-NEXT:    vfwcvt.f.f.v v16, v24
-; ZVFHMIN-NEXT:    addi a0, sp, 16
-; ZVFHMIN-NEXT:    vl8r.v v24, (a0) # vscale x 64-byte Folded Reload
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m8, ta, ma
-; ZVFHMIN-NEXT:    vfmadd.vv v16, v24, v0
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e16, m4, ta, ma
-; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v16
-; ZVFHMIN-NEXT:    csrr a0, vlenb
-; ZVFHMIN-NEXT:    slli a0, a0, 5
-; ZVFHMIN-NEXT:    add sp, sp, a0
-; ZVFHMIN-NEXT:    .cfi_def_cfa sp, 16
-; ZVFHMIN-NEXT:    addi sp, sp, 16
-; ZVFHMIN-NEXT:    .cfi_def_cfa_offset 0
-; ZVFHMIN-NEXT:    ret
-  %negb = call <vscale x 32 x half> @llvm.vp.fneg.nxv32f16(<vscale x 32 x half> %b, <vscale x 32 x i1> splat (i1 true), i32 %evl)
-  %negc = call <vscale x 32 x half> @llvm.vp.fneg.nxv32f16(<vscale x 32 x half> %c, <vscale x 32 x i1> splat (i1 true), i32 %evl)
-  %v = call <vscale x 32 x half> @llvm.vp.fma.nxv32f16(<vscale x 32 x half> %va, <vscale x 32 x half> %negb, <vscale x 32 x half> %negc, <vscale x 32 x i1> splat (i1 true), i32 %evl)
-  ret <vscale x 32 x half> %v
-}
-
-define <vscale x 32 x half> @vfnmadd_vv_nxv32f16_unmasked_commuted(<vscale x 32 x half> %va, <vscale x 32 x half> %b, <vscale x 32 x half> %c, i32 zeroext %evl) {
-; ZVFH-LABEL: vfnmadd_vv_nxv32f16_unmasked_commuted:
-; ZVFH:       # %bb.0:
-; ZVFH-NEXT:    vsetvli zero, a1, e16, m8, ta, ma
-; ZVFH-NEXT:    vle16.v v24, (a0)
-; ZVFH-NEXT:    vfnmadd.vv v8, v16, v24
-; ZVFH-NEXT:    ret
-;
-; ZVFHMIN-LABEL: vfnmadd_vv_nxv32f16_unmasked_commuted:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    addi sp, sp, -16
-; ZVFHMIN-NEXT:    .cfi_def_cfa_offset 16
-; ZVFHMIN-NEXT:    csrr a2, vlenb
-; ZVFHMIN-NEXT:    slli a2, a2, 5
-; ZVFHMIN-NEXT:    sub sp, sp, a2
-; ZVFHMIN-NEXT:    .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x20, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 32 * vlenb
-; ZVFHMIN-NEXT:    csrr a2, vlenb
-; ZVFHMIN-NEXT:    slli a2, a2, 3
-; ZVFHMIN-NEXT:    mv a3, a2
-; ZVFHMIN-NEXT:    slli a2, a2, 1
-; ZVFHMIN-NEXT:    add a2, a2, a3
-; ZVFHMIN-NEXT:    add a2, sp, a2
-; ZVFHMIN-NEXT:    addi a2, a2, 16
-; ZVFHMIN-NEXT:    vs8r.v v8, (a2) # vscale x 64-byte Folded Spill
-; ZVFHMIN-NEXT:    vsetvli zero, a1, e16, m8, ta, ma
-; ZVFHMIN-NEXT:    vle16.v v24, (a0)
-; ZVFHMIN-NEXT:    lui a2, 8
-; ZVFHMIN-NEXT:    vsetvli a0, zero, e8, m4, ta, ma
-; ZVFHMIN-NEXT:    vmset.m v8
-; ZVFHMIN-NEXT:    csrr a3, vlenb
-; ZVFHMIN-NEXT:    vsetvli zero, a1, e16, m8, ta, ma
-; ZVFHMIN-NEXT:    vxor.vx v16, v16, a2
-; ZVFHMIN-NEXT:    slli a0, a3, 1
-; ZVFHMIN-NEXT:    srli a3, a3, 2
-; ZVFHMIN-NEXT:    sub a4, a1, a0
-; ZVFHMIN-NEXT:    vsetvli a5, zero, e8, mf2, ta, ma
-; ZVFHMIN-NEXT:    vslidedown.vx v0, v8, a3
-; ZVFHMIN-NEXT:    sltu a3, a1, a4
-; ZVFHMIN-NEXT:    vsetvli zero, a1, e16, m8, ta, ma
-; ZVFHMIN-NEXT:    vxor.vx v8, v24, a2
-; ZVFHMIN-NEXT:    addi a3, a3, -1
-; ZVFHMIN-NEXT:    and a3, a3, a4
-; ZVFHMIN-NEXT:    csrr a2, vlenb
-; ZVFHMIN-NEXT:    slli a2, a2, 3
-; ZVFHMIN-NEXT:    add a2, sp, a2
-; ZVFHMIN-NEXT:    addi a2, a2, 16
-; ZVFHMIN-NEXT:    vs8r.v v8, (a2) # vscale x 64-byte Folded Spill
-; ZVFHMIN-NEXT:    vsetvli zero, a3, e16, m4, ta, ma
-; ZVFHMIN-NEXT:    vfwcvt.f.f.v v24, v12, v0.t
-; ZVFHMIN-NEXT:    addi a2, sp, 16
-; ZVFHMIN-NEXT:    vs8r.v v24, (a2) # vscale x 64-byte Folded Spill
-; ZVFHMIN-NEXT:    csrr a2, vlenb
-; ZVFHMIN-NEXT:    slli a2, a2, 4
-; ZVFHMIN-NEXT:    add a2, sp, a2
-; ZVFHMIN-NEXT:    addi a2, a2, 16
-; ZVFHMIN-NEXT:    vs8r.v v16, (a2) # vscale x 64-byte Folded Spill
-; ZVFHMIN-NEXT:    vfwcvt.f.f.v v24, v20, v0.t
-; ZVFHMIN-NEXT:    csrr a2, vlenb
-; ZVFHMIN-NEXT:    slli a2, a2, 3
-; ZVFHMIN-NEXT:    mv a3, a2
-; ZVFHMIN-NEXT:    slli a2, a2, 1
-; ZVFHMIN-NEXT:    add a2, a2, a3
-; ZVFHMIN-NEXT:    add a2, sp, a2
-; ZVFHMIN-NEXT:    addi a2, a2, 16
-; ZVFHMIN-NEXT:    vl8r.v v8, (a2) # vscale x 64-byte Folded Reload
-; ZVFHMIN-NEXT:    vfwcvt.f.f.v v16, v12, v0.t
-; ZVFHMIN-NEXT:    addi a2, sp, 16
-; ZVFHMIN-NEXT:    vl8r.v v8, (a2) # vscale x 64-byte Folded Reload
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m8, ta, ma
-; ZVFHMIN-NEXT:    vfmadd.vv v16, v24, v8, v0.t
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e16, m4, ta, ma
-; ZVFHMIN-NEXT:    vfncvt.f.f.w v12, v16, v0.t
-; ZVFHMIN-NEXT:    bltu a1, a0, .LBB289_2
-; ZVFHMIN-NEXT:  # %bb.1:
-; ZVFHMIN-NEXT:    mv a1, a0
-; ZVFHMIN-NEXT:  .LBB289_2:
-; ZVFHMIN-NEXT:    csrr a0, vlenb
-; ZVFHMIN-NEXT:    slli a0, a0, 4
-; ZVFHMIN-NEXT:    add a0, sp, a0
-; ZVFHMIN-NEXT:    addi a0, a0, 16
-; ZVFHMIN-NEXT:    vl8r.v v16, (a0) # vscale x 64-byte Folded Reload
-; ZVFHMIN-NEXT:    vsetvli zero, a1, e16, m4, ta, ma
-; ZVFHMIN-NEXT:    vfwcvt.f.f.v v24, v16
-; ZVFHMIN-NEXT:    addi a0, sp, 16
-; ZVFHMIN-NEXT:    vs8r.v v24, (a0) # vscale x 64-byte Folded Spill
-; ZVFHMIN-NEXT:    csrr a0, vlenb
-; ZVFHMIN-NEXT:    slli a0, a0, 3
-; ZVFHMIN-NEXT:    add a0, sp, a0
-; ZVFHMIN-NEXT:    addi a0, a0, 16
-; ZVFHMIN-NEXT:    vl8r.v v16, (a0) # vscale x 64-byte Folded Reload
-; ZVFHMIN-NEXT:    vfwcvt.f.f.v v24, v16
-; ZVFHMIN-NEXT:    csrr a0, vlenb
-; ZVFHMIN-NEXT:    slli a0, a0, 3
-; ZVFHMIN-NEXT:    mv a1, a0
-; ZVFHMIN-NEXT:    slli a0, a0, 1
-; ZVFHMIN-NEXT:    add a0, a0, a1
-; ZVFHMIN-NEXT:    add a0, sp, a0
-; ZVFHMIN-NEXT:    addi a0, a0, 16
-; ZVFHMIN-NEXT:    vl8r.v v16, (a0) # vscale x 64-byte Folded Reload
-; ZVFHMIN-NEXT:    vfwcvt.f.f.v v0, v16
-; ZVFHMIN-NEXT:    addi a0, sp, 16
-; ZVFHMIN-NEXT:    vl8r.v v16, (a0) # vscale x 64-byte Folded Reload
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m8, ta, ma
-; ZVFHMIN-NEXT:    vfmadd.vv v0, v16, v24
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e16, m4, ta, ma
-; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v0
-; ZVFHMIN-NEXT:    csrr a0, vlenb
-; ZVFHMIN-NEXT:    slli a0, a0, 5
-; ZVFHMIN-NEXT:    add sp, sp, a0
-; ZVFHMIN-NEXT:    .cfi_def_cfa sp, 16
-; ZVFHMIN-NEXT:    addi sp, sp, 16
-; ZVFHMIN-NEXT:    .cfi_def_cfa_offset 0
-; ZVFHMIN-NEXT:    ret
-  %negb = call <vscale x 32 x half> @llvm.vp.fneg.nxv32f16(<vscale x 32 x half> %b, <vscale x 32 x i1> splat (i1 true), i32 %evl)
-  %negc = call <vscale x 32 x half> @llvm.vp.fneg.nxv32f16(<vscale x 32 x half> %c, <vscale x 32 x i1> splat (i1 true), i32 %evl)
-  %v = call <vscale x 32 x half> @llvm.vp.fma.nxv32f16(<vscale x 32 x half> %negb, <vscale x 32 x half> %va, <vscale x 32 x half> %negc, <vscale x 32 x i1> splat (i1 true), i32 %evl)
-  ret <vscale x 32 x half> %v
-}
-
-define <vscale x 32 x half> @vfnmadd_vf_nxv32f16(<vscale x 32 x half> %va, half %b, <vscale x 32 x half> %vc, <vscale x 32 x i1> %m, i32 zeroext %evl) {
-; ZVFH-LABEL: vfnmadd_vf_nxv32f16:
-; ZVFH:       # %bb.0:
-; ZVFH-NEXT:    vsetvli zero, a0, e16, m8, ta, ma
-; ZVFH-NEXT:    vfnmadd.vf v8, fa0, v16, v0.t
-; ZVFH-NEXT:    ret
-;
-; ZVFHMIN-LABEL: vfnmadd_vf_nxv32f16:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    addi sp, sp, -16
-; ZVFHMIN-NEXT:    .cfi_def_cfa_offset 16
-; ZVFHMIN-NEXT:    csrr a1, vlenb
-; ZVFHMIN-NEXT:    slli a1, a1, 5
-; ZVFHMIN-NEXT:    sub sp, sp, a1
-; ZVFHMIN-NEXT:    .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x20, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 32 * vlenb
-; ZVFHMIN-NEXT:    vsetvli a1, zero, e16, m8, ta, ma
-; ZVFHMIN-NEXT:    vmv1r.v v3, v0
-; ZVFHMIN-NEXT:    fmv.x.h a1, fa0
-; ZVFHMIN-NEXT:    lui a2, 8
-; ZVFHMIN-NEXT:    csrr a3, vlenb
-; ZVFHMIN-NEXT:    vmv.v.x v24, a1
-; ZVFHMIN-NEXT:    csrr a1, vlenb
-; ZVFHMIN-NEXT:    slli a1, a1, 3
-; ZVFHMIN-NEXT:    mv a4, a1
-; ZVFHMIN-NEXT:    slli a1, a1, 1
-; ZVFHMIN-NEXT:    add a1, a1, a4
-; ZVFHMIN-NEXT:    add a1, sp, a1
-; ZVFHMIN-NEXT:    addi a1, a1, 16
-; ZVFHMIN-NEXT:    vs8r.v v24, (a1) # vscale x 64-byte Folded Spill
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e16, m8, ta, ma
-; ZVFHMIN-NEXT:    vxor.vx v8, v8, a2, v0.t
-; ZVFHMIN-NEXT:    vxor.vx v16, v16, a2, v0.t
-; ZVFHMIN-NEXT:    slli a1, a3, 1
-; ZVFHMIN-NEXT:    srli a3, a3, 2
-; ZVFHMIN-NEXT:    sub a2, a0, a1
-; ZVFHMIN-NEXT:    vsetvli a4, zero, e8, mf2, ta, ma
-; ZVFHMIN-NEXT:    vslidedown.vx v0, v0, a3
-; ZVFHMIN-NEXT:    sltu a3, a0, a2
-; ZVFHMIN-NEXT:    addi a3, a3, -1
-; ZVFHMIN-NEXT:    and a2, a3, a2
-; ZVFHMIN-NEXT:    csrr a3, vlenb
-; ZVFHMIN-NEXT:    slli a3, a3, 3
-; ZVFHMIN-NEXT:    add a3, sp, a3
-; ZVFHMIN-NEXT:    addi a3, a3, 16
-; ZVFHMIN-NEXT:    vs8r.v v16, (a3) # vscale x 64-byte Folded Spill
-; ZVFHMIN-NEXT:    vsetvli zero, a2, e16, m4, ta, ma
-; ZVFHMIN-NEXT:    vfwcvt.f.f.v v24, v20, v0.t
-; ZVFHMIN-NEXT:    addi a2, sp, 16
-; ZVFHMIN-NEXT:    vs8r.v v24, (a2) # vscale x 64-byte Folded Spill
-; ZVFHMIN-NEXT:    csrr a2, vlenb
-; ZVFHMIN-NEXT:    slli a2, a2, 4
-; ZVFHMIN-NEXT:    add a2, sp, a2
-; ZVFHMIN-NEXT:    addi a2, a2, 16
-; ZVFHMIN-NEXT:    vs8r.v v8, (a2) # vscale x 64-byte Folded Spill
-; ZVFHMIN-NEXT:    vfwcvt.f.f.v v24, v12, v0.t
-; ZVFHMIN-NEXT:    csrr a2, vlenb
-; ZVFHMIN-NEXT:    slli a2, a2, 3
-; ZVFHMIN-NEXT:    mv a3, a2
-; ZVFHMIN-NEXT:    slli a2, a2, 1
-; ZVFHMIN-NEXT:    add a2, a2, a3
-; ZVFHMIN-NEXT:    add a2, sp, a2
-; ZVFHMIN-NEXT:    addi a2, a2, 16
-; ZVFHMIN-NEXT:    vl8r.v v8, (a2) # vscale x 64-byte Folded Reload
-; ZVFHMIN-NEXT:    vfwcvt.f.f.v v16, v12, v0.t
-; ZVFHMIN-NEXT:    addi a2, sp, 16
-; ZVFHMIN-NEXT:    vl8r.v v8, (a2) # vscale x 64-byte Folded Reload
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m8, ta, ma
-; ZVFHMIN-NEXT:    vfmadd.vv v16, v24, v8, v0.t
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e16, m4, ta, ma
-; ZVFHMIN-NEXT:    vfncvt.f.f.w v4, v16, v0.t
-; ZVFHMIN-NEXT:    bltu a0, a1, .LBB290_2
-; ZVFHMIN-NEXT:  # %bb.1:
-; ZVFHMIN-NEXT:    mv a0, a1
-; ZVFHMIN-NEXT:  .LBB290_2:
-; ZVFHMIN-NEXT:    vmv1r.v v0, v3
-; ZVFHMIN-NEXT:    csrr a1, vlenb
-; ZVFHMIN-NEXT:    slli a1, a1, 3
-; ZVFHMIN-NEXT:    add a1, sp, a1
-; ZVFHMIN-NEXT:    addi a1, a1, 16
-; ZVFHMIN-NEXT:    vl8r.v v8, (a1) # vscale x 64-byte Folded Reload
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
-; ZVFHMIN-NEXT:    vfwcvt.f.f.v v16, v8, v0.t
-; ZVFHMIN-NEXT:    addi a0, sp, 16
-; ZVFHMIN-NEXT:    vs8r.v v16, (a0) # vscale x 64-byte Folded Spill
-; ZVFHMIN-NEXT:    csrr a0, vlenb
-; ZVFHMIN-NEXT:    slli a0, a0, 4
-; ZVFHMIN-NEXT:    add a0, sp, a0
-; ZVFHMIN-NEXT:    addi a0, a0, 16
-; ZVFHMIN-NEXT:    vl8r.v v8, (a0) # vscale x 64-byte Folded Reload
-; ZVFHMIN-NEXT:    vfwcvt.f.f.v v16, v8, v0.t
-; ZVFHMIN-NEXT:    csrr a0, vlenb
-; ZVFHMIN-NEXT:    slli a0, a0, 3
-; ZVFHMIN-NEXT:    add a0, sp, a0
-; ZVFHMIN-NEXT:    addi a0, a0, 16
-; ZVFHMIN-NEXT:    vs8r.v v16, (a0) # vscale x 64-byte Folded Spill
-; ZVFHMIN-NEXT:    csrr a0, vlenb
-; ZVFHMIN-NEXT:    slli a0, a0, 3
-; ZVFHMIN-NEXT:    mv a1, a0
-; ZVFHMIN-NEXT:    slli a0, a0, 1
-; ZVFHMIN-NEXT:    add a0, a0, a1
-; ZVFHMIN-NEXT:    add a0, sp, a0
-; ZVFHMIN-NEXT:    addi a0, a0, 16
-; ZVFHMIN-NEXT:    vl8r.v v16, (a0) # vscale x 64-byte Folded Reload
-; ZVFHMIN-NEXT:    vfwcvt.f.f.v v8, v16, v0.t
-; ZVFHMIN-NEXT:    addi a0, sp, 16
-; ZVFHMIN-NEXT:    vl8r.v v16, (a0) # vscale x 64-byte Folded Reload
-; ZVFHMIN-NEXT:    csrr a0, vlenb
-; ZVFHMIN-NEXT:    slli a0, a0, 3
-; ZVFHMIN-NEXT:    add a0, sp, a0
-; ZVFHMIN-NEXT:    addi a0, a0, 16
-; ZVFHMIN-NEXT:    vl8r.v v24, (a0) # vscale x 64-byte Folded Reload
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m8, ta, ma
-; ZVFHMIN-NEXT:    vfmadd.vv v8, v24, v16, v0.t
-; ZVFHMIN-NEXT:    vmv.v.v v16, v8
-; ZVFHMIN-NEXT:    vmv4r.v v12, v4
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e16, m4, ta, ma
-; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v16, v0.t
-; ZVFHMIN-NEXT:    csrr a0, vlenb
-; ZVFHMIN-NEXT:    slli a0, a0, 5
-; ZVFHMIN-NEXT:    add sp, sp, a0
-; ZVFHMIN-NEXT:    .cfi_def_cfa sp, 16
-; ZVFHMIN-NEXT:    addi sp, sp, 16
-; ZVFHMIN-NEXT:    .cfi_def_cfa_offset 0
-; ZVFHMIN-NEXT:    ret
-  %elt.head = insertelement <vscale x 32 x half> poison, half %b, i32 0
-  %vb = shufflevector <vscale x 32 x half> %elt.head, <vscale x 32 x half> poison, <vscale x 32 x i32> zeroinitializer
-  %negva = call <vscale x 32 x half> @llvm.vp.fneg.nxv32f16(<vscale x 32 x half> %va, <vscale x 32 x i1> %m, i32 %evl)
-  %negvc = call <vscale x 32 x half> @llvm.vp.fneg.nxv32f16(<vscale x 32 x half> %vc, <vscale x 32 x i1> %m, i32 %evl)
-  %v = call <vscale x 32 x half> @llvm.vp.fma.nxv32f16(<vscale x 32 x half> %negva, <vscale x 32 x half> %vb, <vscale x 32 x half> %negvc, <vscale x 32 x i1> %m, i32 %evl)
-  ret <vscale x 32 x half> %v
-}
-
-define <vscale x 32 x half> @vfnmadd_vf_nxv32f16_commute(<vscale x 32 x half> %va, half %b, <vscale x 32 x half> %vc, <vscale x 32 x i1> %m, i32 zeroext %evl) {
-; ZVFH-LABEL: vfnmadd_vf_nxv32f16_commute:
-; ZVFH:       # %bb.0:
-; ZVFH-NEXT:    vsetvli zero, a0, e16, m8, ta, ma
-; ZVFH-NEXT:    vfnmadd.vf v8, fa0, v16, v0.t
-; ZVFH-NEXT:    ret
-;
-; ZVFHMIN-LABEL: vfnmadd_vf_nxv32f16_commute:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    addi sp, sp, -16
-; ZVFHMIN-NEXT:    .cfi_def_cfa_offset 16
-; ZVFHMIN-NEXT:    csrr a1, vlenb
-; ZVFHMIN-NEXT:    slli a1, a1, 5
-; ZVFHMIN-NEXT:    sub sp, sp, a1
-; ZVFHMIN-NEXT:    .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x20, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 32 * vlenb
-; ZVFHMIN-NEXT:    vsetvli a1, zero, e16, m8, ta, ma
-; ZVFHMIN-NEXT:    vmv1r.v v3, v0
-; ZVFHMIN-NEXT:    fmv.x.h a1, fa0
-; ZVFHMIN-NEXT:    lui a2, 8
-; ZVFHMIN-NEXT:    csrr a3, vlenb
-; ZVFHMIN-NEXT:    vmv.v.x v24, a1
-; ZVFHMIN-NEXT:    csrr a1, vlenb
-; ZVFHMIN-NEXT:    slli a1, a1, 3
-; ZVFHMIN-NEXT:    mv a4, a1
-; ZVFHMIN-NEXT:    slli a1, a1, 1
-; ZVFHMIN-NEXT:    add a1, a1, a4
-; ZVFHMIN-NEXT:    add a1, sp, a1
-; ZVFHMIN-NEXT:    addi a1, a1, 16
-; ZVFHMIN-NEXT:    vs8r.v v24, (a1) # vscale x 64-byte Folded Spill
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e16, m8, ta, ma
-; ZVFHMIN-NEXT:    vxor.vx v8, v8, a2, v0.t
-; ZVFHMIN-NEXT:    vxor.vx v16, v16, a2, v0.t
-; ZVFHMIN-NEXT:    slli a1, a3, 1
-; ZVFHMIN-NEXT:    srli a3, a3, 2
-; ZVFHMIN-NEXT:    sub a2, a0, a1
-; ZVFHMIN-NEXT:    vsetvli a4, zero, e8, mf2, ta, ma
-; ZVFHMIN-NEXT:    vslidedown.vx v0, v0, a3
-; ZVFHMIN-NEXT:    sltu a3, a0, a2
-; ZVFHMIN-NEXT:    addi a3, a3, -1
-; ZVFHMIN-NEXT:    and a2, a3, a2
-; ZVFHMIN-NEXT:    csrr a3, vlenb
-; ZVFHMIN-NEXT:    slli a3, a3, 3
-; ZVFHMIN-NEXT:    add a3, sp, a3
-; ZVFHMIN-NEXT:    addi a3, a3, 16
-; ZVFHMIN-NEXT:    vs8r.v v16, (a3) # vscale x 64-byte Folded Spill
-; ZVFHMIN-NEXT:    vsetvli zero, a2, e16, m4, ta, ma
-; ZVFHMIN-NEXT:    vfwcvt.f.f.v v24, v20, v0.t
-; ZVFHMIN-NEXT:    addi a2, sp, 16
-; ZVFHMIN-NEXT:    vs8r.v v24, (a2) # vscale x 64-byte Folded Spill
-; ZVFHMIN-NEXT:    csrr a2, vlenb
-; ZVFHMIN-NEXT:    slli a2, a2, 4
-; ZVFHMIN-NEXT:    add a2, sp, a2
-; ZVFHMIN-NEXT:    addi a2, a2, 16
-; ZVFHMIN-NEXT:    vs8r.v v8, (a2) # vscale x 64-byte Folded Spill
-; ZVFHMIN-NEXT:    vfwcvt.f.f.v v16, v12, v0.t
-; ZVFHMIN-NEXT:    csrr a2, vlenb
-; ZVFHMIN-NEXT:    slli a2, a2, 3
-; ZVFHMIN-NEXT:    mv a3, a2
-; ZVFHMIN-NEXT:    slli a2, a2, 1
-; ZVFHMIN-NEXT:    add a2, a2, a3
-; ZVFHMIN-NEXT:    add a2, sp, a2
-; ZVFHMIN-NEXT:    addi a2, a2, 16
-; ZVFHMIN-NEXT:    vl8r.v v24, (a2) # vscale x 64-byte Folded Reload
-; ZVFHMIN-NEXT:    vfwcvt.f.f.v v8, v28, v0.t
-; ZVFHMIN-NEXT:    addi a2, sp, 16
-; ZVFHMIN-NEXT:    vl8r.v v24, (a2) # vscale x 64-byte Folded Reload
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m8, ta, ma
-; ZVFHMIN-NEXT:    vfmadd.vv v16, v8, v24, v0.t
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e16, m4, ta, ma
-; ZVFHMIN-NEXT:    vfncvt.f.f.w v4, v16, v0.t
-; ZVFHMIN-NEXT:    bltu a0, a1, .LBB291_2
-; ZVFHMIN-NEXT:  # %bb.1:
-; ZVFHMIN-NEXT:    mv a0, a1
-; ZVFHMIN-NEXT:  .LBB291_2:
-; ZVFHMIN-NEXT:    vmv1r.v v0, v3
-; ZVFHMIN-NEXT:    csrr a1, vlenb
-; ZVFHMIN-NEXT:    slli a1, a1, 3
-; ZVFHMIN-NEXT:    add a1, sp, a1
-; ZVFHMIN-NEXT:    addi a1, a1, 16
-; ZVFHMIN-NEXT:    vl8r.v v16, (a1) # vscale x 64-byte Folded Reload
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
-; ZVFHMIN-NEXT:    vfwcvt.f.f.v v24, v16, v0.t
-; ZVFHMIN-NEXT:    addi a0, sp, 16
-; ZVFHMIN-NEXT:    vs8r.v v24, (a0) # vscale x 64-byte Folded Spill
-; ZVFHMIN-NEXT:    csrr a0, vlenb
-; ZVFHMIN-NEXT:    slli a0, a0, 4
-; ZVFHMIN-NEXT:    add a0, sp, a0
-; ZVFHMIN-NEXT:    addi a0, a0, 16
-; ZVFHMIN-NEXT:    vl8r.v v16, (a0) # vscale x 64-byte Folded Reload
-; ZVFHMIN-NEXT:    vfwcvt.f.f.v v8, v16, v0.t
-; ZVFHMIN-NEXT:    csrr a0, vlenb
-; ZVFHMIN-NEXT:    slli a0, a0, 3
-; ZVFHMIN-NEXT:    mv a1, a0
-; ZVFHMIN-NEXT:    slli a0, a0, 1
-; ZVFHMIN-NEXT:    add a0, a0, a1
-; ZVFHMIN-NEXT:    add a0, sp, a0
-; ZVFHMIN-NEXT:    addi a0, a0, 16
-; ZVFHMIN-NEXT:    vl8r.v v16, (a0) # vscale x 64-byte Folded Reload
-; ZVFHMIN-NEXT:    vfwcvt.f.f.v v24, v16, v0.t
-; ZVFHMIN-NEXT:    csrr a0, vlenb
-; ZVFHMIN-NEXT:    slli a0, a0, 4
-; ZVFHMIN-NEXT:    add a0, sp, a0
-; ZVFHMIN-NEXT:    addi a0, a0, 16
-; ZVFHMIN-NEXT:    vs8r.v v24, (a0) # vscale x 64-byte Folded Spill
-; ZVFHMIN-NEXT:    addi a0, sp, 16
-; ZVFHMIN-NEXT:    vl8r.v v16, (a0) # vscale x 64-byte Folded Reload
-; ZVFHMIN-NEXT:    csrr a0, vlenb
-; ZVFHMIN-NEXT:    slli a0, a0, 4
-; ZVFHMIN-NEXT:    add a0, sp, a0
-; ZVFHMIN-NEXT:    addi a0, a0, 16
-; ZVFHMIN-NEXT:    vl8r.v v24, (a0) # vscale x 64-byte Folded Reload
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m8, ta, ma
-; ZVFHMIN-NEXT:    vfmadd.vv v8, v24, v16, v0.t
-; ZVFHMIN-NEXT:    vmv.v.v v16, v8
-; ZVFHMIN-NEXT:    vmv4r.v v12, v4
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e16, m4, ta, ma
-; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v16, v0.t
-; ZVFHMIN-NEXT:    csrr a0, vlenb
-; ZVFHMIN-NEXT:    slli a0, a0, 5
-; ZVFHMIN-NEXT:    add sp, sp, a0
-; ZVFHMIN-NEXT:    .cfi_def_cfa sp, 16
-; ZVFHMIN-NEXT:    addi sp, sp, 16
-; ZVFHMIN-NEXT:    .cfi_def_cfa_offset 0
-; ZVFHMIN-NEXT:    ret
-  %elt.head = insertelement <vscale x 32 x half> poison, half %b, i32 0
-  %vb = shufflevector <vscale x 32 x half> %elt.head, <vscale x 32 x half> poison, <vscale x 32 x i32> zeroinitializer
-  %negva = call <vscale x 32 x half> @llvm.vp.fneg.nxv32f16(<vscale x 32 x half> %va, <vscale x 32 x i1> %m, i32 %evl)
-  %negvc = call <vscale x 32 x half> @llvm.vp.fneg.nxv32f16(<vscale x 32 x half> %vc, <vscale x 32 x i1> %m, i32 %evl)
-  %v = call <vscale x 32 x half> @llvm.vp.fma.nxv32f16(<vscale x 32 x half> %vb, <vscale x 32 x half> %negva, <vscale x 32 x half> %negvc, <vscale x 32 x i1> %m, i32 %evl)
-  ret <vscale x 32 x half> %v
-}
-
-define <vscale x 32 x half> @vfnmadd_vf_nxv32f16_unmasked(<vscale x 32 x half> %va, half %b, <vscale x 32 x half> %vc, i32 zeroext %evl) {
-; ZVFH-LABEL: vfnmadd_vf_nxv32f16_unmasked:
-; ZVFH:       # %bb.0:
-; ZVFH-NEXT:    vsetvli zero, a0, e16, m8, ta, ma
-; ZVFH-NEXT:    vfnmadd.vf v8, fa0, v16
-; ZVFH-NEXT:    ret
-;
-; ZVFHMIN-LABEL: vfnmadd_vf_nxv32f16_unmasked:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    addi sp, sp, -16
-; ZVFHMIN-NEXT:    .cfi_def_cfa_offset 16
-; ZVFHMIN-NEXT:    csrr a1, vlenb
-; ZVFHMIN-NEXT:    slli a1, a1, 3
-; ZVFHMIN-NEXT:    mv a2, a1
-; ZVFHMIN-NEXT:    slli a1, a1, 2
-; ZVFHMIN-NEXT:    add a1, a1, a2
-; ZVFHMIN-NEXT:    sub sp, sp, a1
-; ZVFHMIN-NEXT:    .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x28, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 40 * vlenb
-; ZVFHMIN-NEXT:    fmv.x.h a2, fa0
-; ZVFHMIN-NEXT:    lui a1, 8
-; ZVFHMIN-NEXT:    vsetvli a3, zero, e8, m4, ta, ma
-; ZVFHMIN-NEXT:    vmset.m v7
-; ZVFHMIN-NEXT:    csrr a3, vlenb
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e16, m8, ta, ma
-; ZVFHMIN-NEXT:    vxor.vx v24, v8, a1
-; ZVFHMIN-NEXT:    vxor.vx v8, v16, a1
-; ZVFHMIN-NEXT:    slli a1, a3, 1
-; ZVFHMIN-NEXT:    srli a3, a3, 2
-; ZVFHMIN-NEXT:    sub a4, a0, a1
-; ZVFHMIN-NEXT:    vsetvli a5, zero, e8, mf2, ta, ma
-; ZVFHMIN-NEXT:    vslidedown.vx v0, v7, a3
-; ZVFHMIN-NEXT:    sltu a3, a0, a4
-; ZVFHMIN-NEXT:    addi a3, a3, -1
-; ZVFHMIN-NEXT:    and a3, a3, a4
-; ZVFHMIN-NEXT:    csrr a4, vlenb
-; ZVFHMIN-NEXT:    slli a4, a4, 3
-; ZVFHMIN-NEXT:    mv a5, a4
-; ZVFHMIN-NEXT:    slli a4, a4, 1
-; ZVFHMIN-NEXT:    add a4, a4, a5
-; ZVFHMIN-NEXT:    add a4, sp, a4
-; ZVFHMIN-NEXT:    addi a4, a4, 16
-; ZVFHMIN-NEXT:    vs8r.v v8, (a4) # vscale x 64-byte Folded Spill
-; ZVFHMIN-NEXT:    vsetvli zero, a3, e16, m4, ta, ma
-; ZVFHMIN-NEXT:    vfwcvt.f.f.v v16, v12, v0.t
-; ZVFHMIN-NEXT:    csrr a4, vlenb
-; ZVFHMIN-NEXT:    slli a4, a4, 4
-; ZVFHMIN-NEXT:    add a4, sp, a4
-; ZVFHMIN-NEXT:    addi a4, a4, 16
-; ZVFHMIN-NEXT:    vs8r.v v16, (a4) # vscale x 64-byte Folded Spill
-; ZVFHMIN-NEXT:    csrr a4, vlenb
-; ZVFHMIN-NEXT:    slli a4, a4, 5
-; ZVFHMIN-NEXT:    add a4, sp, a4
-; ZVFHMIN-NEXT:    addi a4, a4, 16
-; ZVFHMIN-NEXT:    vs8r.v v24, (a4) # vscale x 64-byte Folded Spill
-; ZVFHMIN-NEXT:    vfwcvt.f.f.v v8, v28, v0.t
-; ZVFHMIN-NEXT:    csrr a4, vlenb
-; ZVFHMIN-NEXT:    slli a4, a4, 3
-; ZVFHMIN-NEXT:    add a4, sp, a4
-; ZVFHMIN-NEXT:    addi a4, a4, 16
-; ZVFHMIN-NEXT:    vs8r.v v8, (a4) # vscale x 64-byte Folded Spill
-; ZVFHMIN-NEXT:    vsetvli a4, zero, e16, m8, ta, ma
-; ZVFHMIN-NEXT:    vmv.v.x v8, a2
-; ZVFHMIN-NEXT:    addi a2, sp, 16
-; ZVFHMIN-NEXT:    vs8r.v v8, (a2) # vscale x 64-byte Folded Spill
-; ZVFHMIN-NEXT:    vsetvli zero, a3, e16, m4, ta, ma
-; ZVFHMIN-NEXT:    vfwcvt.f.f.v v24, v12, v0.t
-; ZVFHMIN-NEXT:    csrr a2, vlenb
-; ZVFHMIN-NEXT:    slli a2, a2, 4
-; ZVFHMIN-NEXT:    add a2, sp, a2
-; ZVFHMIN-NEXT:    addi a2, a2, 16
-; ZVFHMIN-NEXT:    vl8r.v v8, (a2) # vscale x 64-byte Folded Reload
-; ZVFHMIN-NEXT:    csrr a2, vlenb
-; ZVFHMIN-NEXT:    slli a2, a2, 3
-; ZVFHMIN-NEXT:    add a2, sp, a2
-; ZVFHMIN-NEXT:    addi a2, a2, 16
-; ZVFHMIN-NEXT:    vl8r.v v16, (a2) # vscale x 64-byte Folded Reload
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m8, ta, ma
-; ZVFHMIN-NEXT:    vfmadd.vv v24, v16, v8, v0.t
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e16, m4, ta, ma
-; ZVFHMIN-NEXT:    vfncvt.f.f.w v12, v24, v0.t
-; ZVFHMIN-NEXT:    bltu a0, a1, .LBB292_2
-; ZVFHMIN-NEXT:  # %bb.1:
-; ZVFHMIN-NEXT:    mv a0, a1
-; ZVFHMIN-NEXT:  .LBB292_2:
-; ZVFHMIN-NEXT:    csrr a1, vlenb
-; ZVFHMIN-NEXT:    slli a1, a1, 3
-; ZVFHMIN-NEXT:    mv a2, a1
-; ZVFHMIN-NEXT:    slli a1, a1, 1
-; ZVFHMIN-NEXT:    add a1, a1, a2
-; ZVFHMIN-NEXT:    add a1, sp, a1
-; ZVFHMIN-NEXT:    addi a1, a1, 16
-; ZVFHMIN-NEXT:    vl8r.v v24, (a1) # vscale x 64-byte Folded Reload
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
-; ZVFHMIN-NEXT:    vfwcvt.f.f.v v16, v24
-; ZVFHMIN-NEXT:    csrr a0, vlenb
-; ZVFHMIN-NEXT:    slli a0, a0, 4
-; ZVFHMIN-NEXT:    add a0, sp, a0
-; ZVFHMIN-NEXT:    addi a0, a0, 16
-; ZVFHMIN-NEXT:    vs8r.v v16, (a0) # vscale x 64-byte Folded Spill
-; ZVFHMIN-NEXT:    csrr a0, vlenb
-; ZVFHMIN-NEXT:    slli a0, a0, 5
-; ZVFHMIN-NEXT:    add a0, sp, a0
-; ZVFHMIN-NEXT:    addi a0, a0, 16
-; ZVFHMIN-NEXT:    vl8r.v v24, (a0) # vscale x 64-byte Folded Reload
-; ZVFHMIN-NEXT:    vfwcvt.f.f.v v0, v24
-; ZVFHMIN-NEXT:    addi a0, sp, 16
-; ZVFHMIN-NEXT:    vl8r.v v16, (a0) # vscale x 64-byte Folded Reload
-; ZVFHMIN-NEXT:    vfwcvt.f.f.v v24, v16
-; ZVFHMIN-NEXT:    csrr a0, vlenb
-; ZVFHMIN-NEXT:    slli a0, a0, 4
-; ZVFHMIN-NEXT:    add a0, sp, a0
-; ZVFHMIN-NEXT:    addi a0, a0, 16
-; ZVFHMIN-NEXT:    vl8r.v v16, (a0) # vscale x 64-byte Folded Reload
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m8, ta, ma
-; ZVFHMIN-NEXT:    vfmadd.vv v24, v0, v16
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e16, m4, ta, ma
-; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v24
-; ZVFHMIN-NEXT:    csrr a0, vlenb
-; ZVFHMIN-NEXT:    slli a0, a0, 3
-; ZVFHMIN-NEXT:    mv a1, a0
-; ZVFHMIN-NEXT:    slli a0, a0, 2
-; ZVFHMIN-NEXT:    add a0, a0, a1
-; ZVFHMIN-NEXT:    add sp, sp, a0
-; ZVFHMIN-NEXT:    .cfi_def_cfa sp, 16
-; ZVFHMIN-NEXT:    addi sp, sp, 16
-; ZVFHMIN-NEXT:    .cfi_def_cfa_offset 0
-; ZVFHMIN-NEXT:    ret
-  %elt.head = insertelement <vscale x 32 x half> poison, half %b, i32 0
-  %vb = shufflevector <vscale x 32 x half> %elt.head, <vscale x 32 x half> poison, <vscale x 32 x i32> zeroinitializer
-  %negva = call <vscale x 32 x half> @llvm.vp.fneg.nxv32f16(<vscale x 32 x half> %va, <vscale x 32 x i1> splat (i1 true), i32 %evl)
-  %negvc = call <vscale x 32 x half> @llvm.vp.fneg.nxv32f16(<vscale x 32 x half> %vc, <vscale x 32 x i1> splat (i1 true), i32 %evl)
-  %v = call <vscale x 32 x half> @llvm.vp.fma.nxv32f16(<vscale x 32 x half> %negva, <vscale x 32 x half> %vb, <vscale x 32 x half> %negvc, <vscale x 32 x i1> splat (i1 true), i32 %evl)
-  ret <vscale x 32 x half> %v
-}
-
-define <vscale x 32 x half> @vfnmadd_vf_nxv32f16_unmasked_commute(<vscale x 32 x half> %va, half %b, <vscale x 32 x half> %vc, i32 zeroext %evl) {
-; ZVFH-LABEL: vfnmadd_vf_nxv32f16_unmasked_commute:
-; ZVFH:       # %bb.0:
-; ZVFH-NEXT:    vsetvli zero, a0, e16, m8, ta, ma
-; ZVFH-NEXT:    vfnmadd.vf v8, fa0, v16
-; ZVFH-NEXT:    ret
-;
-; ZVFHMIN-LABEL: vfnmadd_vf_nxv32f16_unmasked_commute:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    addi sp, sp, -16
-; ZVFHMIN-NEXT:    .cfi_def_cfa_offset 16
-; ZVFHMIN-NEXT:    csrr a1, vlenb
-; ZVFHMIN-NEXT:    slli a1, a1, 5
-; ZVFHMIN-NEXT:    sub sp, sp, a1
-; ZVFHMIN-NEXT:    .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x20, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 32 * vlenb
-; ZVFHMIN-NEXT:    fmv.x.h a2, fa0
-; ZVFHMIN-NEXT:    lui a1, 8
-; ZVFHMIN-NEXT:    vsetvli a3, zero, e8, m4, ta, ma
-; ZVFHMIN-NEXT:    vmset.m v24
-; ZVFHMIN-NEXT:    csrr a3, vlenb
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e16, m8, ta, ma
-; ZVFHMIN-NEXT:    vxor.vx v8, v8, a1
-; ZVFHMIN-NEXT:    vxor.vx v16, v16, a1
-; ZVFHMIN-NEXT:    slli a1, a3, 1
-; ZVFHMIN-NEXT:    srli a3, a3, 2
-; ZVFHMIN-NEXT:    sub a4, a0, a1
-; ZVFHMIN-NEXT:    vsetvli a5, zero, e8, mf2, ta, ma
-; ZVFHMIN-NEXT:    vslidedown.vx v0, v24, a3
-; ZVFHMIN-NEXT:    sltu a3, a0, a4
-; ZVFHMIN-NEXT:    addi a3, a3, -1
-; ZVFHMIN-NEXT:    and a3, a3, a4
-; ZVFHMIN-NEXT:    csrr a4, vlenb
-; ZVFHMIN-NEXT:    slli a4, a4, 4
-; ZVFHMIN-NEXT:    add a4, sp, a4
-; ZVFHMIN-NEXT:    addi a4, a4, 16
-; ZVFHMIN-NEXT:    vs8r.v v16, (a4) # vscale x 64-byte Folded Spill
-; ZVFHMIN-NEXT:    vsetvli zero, a3, e16, m4, ta, ma
-; ZVFHMIN-NEXT:    vfwcvt.f.f.v v24, v20, v0.t
-; ZVFHMIN-NEXT:    csrr a4, vlenb
-; ZVFHMIN-NEXT:    slli a4, a4, 3
-; ZVFHMIN-NEXT:    add a4, sp, a4
-; ZVFHMIN-NEXT:    addi a4, a4, 16
-; ZVFHMIN-NEXT:    vs8r.v v24, (a4) # vscale x 64-byte Folded Spill
-; ZVFHMIN-NEXT:    csrr a4, vlenb
-; ZVFHMIN-NEXT:    slli a4, a4, 3
-; ZVFHMIN-NEXT:    mv a5, a4
-; ZVFHMIN-NEXT:    slli a4, a4, 1
-; ZVFHMIN-NEXT:    add a4, a4, a5
-; ZVFHMIN-NEXT:    add a4, sp, a4
-; ZVFHMIN-NEXT:    addi a4, a4, 16
-; ZVFHMIN-NEXT:    vs8r.v v8, (a4) # vscale x 64-byte Folded Spill
-; ZVFHMIN-NEXT:    vfwcvt.f.f.v v16, v12, v0.t
-; ZVFHMIN-NEXT:    vsetvli a4, zero, e16, m8, ta, ma
-; ZVFHMIN-NEXT:    vmv.v.x v8, a2
-; ZVFHMIN-NEXT:    addi a2, sp, 16
-; ZVFHMIN-NEXT:    vs8r.v v8, (a2) # vscale x 64-byte Folded Spill
-; ZVFHMIN-NEXT:    vsetvli zero, a3, e16, m4, ta, ma
-; ZVFHMIN-NEXT:    vfwcvt.f.f.v v24, v12, v0.t
-; ZVFHMIN-NEXT:    csrr a2, vlenb
-; ZVFHMIN-NEXT:    slli a2, a2, 3
-; ZVFHMIN-NEXT:    add a2, sp, a2
-; ZVFHMIN-NEXT:    addi a2, a2, 16
-; ZVFHMIN-NEXT:    vl8r.v v8, (a2) # vscale x 64-byte Folded Reload
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m8, ta, ma
-; ZVFHMIN-NEXT:    vfmadd.vv v16, v24, v8, v0.t
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e16, m4, ta, ma
-; ZVFHMIN-NEXT:    vfncvt.f.f.w v12, v16, v0.t
-; ZVFHMIN-NEXT:    bltu a0, a1, .LBB293_2
-; ZVFHMIN-NEXT:  # %bb.1:
-; ZVFHMIN-NEXT:    mv a0, a1
-; ZVFHMIN-NEXT:  .LBB293_2:
-; ZVFHMIN-NEXT:    csrr a1, vlenb
-; ZVFHMIN-NEXT:    slli a1, a1, 4
-; ZVFHMIN-NEXT:    add a1, sp, a1
-; ZVFHMIN-NEXT:    addi a1, a1, 16
-; ZVFHMIN-NEXT:    vl8r.v v16, (a1) # vscale x 64-byte Folded Reload
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
-; ZVFHMIN-NEXT:    vfwcvt.f.f.v v0, v16
-; ZVFHMIN-NEXT:    csrr a0, vlenb
-; ZVFHMIN-NEXT:    slli a0, a0, 3
-; ZVFHMIN-NEXT:    add a0, sp, a0
-; ZVFHMIN-NEXT:    addi a0, a0, 16
-; ZVFHMIN-NEXT:    vs8r.v v0, (a0) # vscale x 64-byte Folded Spill
-; ZVFHMIN-NEXT:    csrr a0, vlenb
-; ZVFHMIN-NEXT:    slli a0, a0, 3
-; ZVFHMIN-NEXT:    mv a1, a0
-; ZVFHMIN-NEXT:    slli a0, a0, 1
-; ZVFHMIN-NEXT:    add a0, a0, a1
-; ZVFHMIN-NEXT:    add a0, sp, a0
-; ZVFHMIN-NEXT:    addi a0, a0, 16
-; ZVFHMIN-NEXT:    vl8r.v v16, (a0) # vscale x 64-byte Folded Reload
-; ZVFHMIN-NEXT:    vfwcvt.f.f.v v0, v16
-; ZVFHMIN-NEXT:    addi a0, sp, 16
-; ZVFHMIN-NEXT:    vl8r.v v24, (a0) # vscale x 64-byte Folded Reload
-; ZVFHMIN-NEXT:    vfwcvt.f.f.v v16, v24
-; ZVFHMIN-NEXT:    csrr a0, vlenb
-; ZVFHMIN-NEXT:    slli a0, a0, 3
-; ZVFHMIN-NEXT:    add a0, sp, a0
-; ZVFHMIN-NEXT:    addi a0, a0, 16
-; ZVFHMIN-NEXT:    vl8r.v v24, (a0) # vscale x 64-byte Folded Reload
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m8, ta, ma
-; ZVFHMIN-NEXT:    vfmadd.vv v16, v0, v24
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e16, m4, ta, ma
-; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v16
-; ZVFHMIN-NEXT:    csrr a0, vlenb
-; ZVFHMIN-NEXT:    slli a0, a0, 5
-; ZVFHMIN-NEXT:    add sp, sp, a0
-; ZVFHMIN-NEXT:    .cfi_def_cfa sp, 16
-; ZVFHMIN-NEXT:    addi sp, sp, 16
-; ZVFHMIN-NEXT:    .cfi_def_cfa_offset 0
-; ZVFHMIN-NEXT:    ret
-  %elt.head = insertelement <vscale x 32 x half> poison, half %b, i32 0
-  %vb = shufflevector <vscale x 32 x half> %elt.head, <vscale x 32 x half> poison, <vscale x 32 x i32> zeroinitializer
-  %negva = call <vscale x 32 x half> @llvm.vp.fneg.nxv32f16(<vscale x 32 x half> %va, <vscale x 32 x i1> splat (i1 true), i32 %evl)
-  %negvc = call <vscale x 32 x half> @llvm.vp.fneg.nxv32f16(<vscale x 32 x half> %vc, <vscale x 32 x i1> splat (i1 true), i32 %evl)
-  %v = call <vscale x 32 x half> @llvm.vp.fma.nxv32f16(<vscale x 32 x half> %vb, <vscale x 32 x half> %negva, <vscale x 32 x half> %negvc, <vscale x 32 x i1> splat (i1 true), i32 %evl)
+; ZVFHMIN32-LABEL: vfnmadd_vv_nxv32f16:
+; ZVFHMIN32:       # %bb.0:
+; ZVFHMIN32-NEXT:    addi sp, sp, -16
+; ZVFHMIN32-NEXT:    .cfi_def_cfa_offset 16
+; ZVFHMIN32-NEXT:    csrr a2, vlenb
+; ZVFHMIN32-NEXT:    slli a2, a2, 5
+; ZVFHMIN32-NEXT:    sub sp, sp, a2
+; ZVFHMIN32-NEXT:    .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x20, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 32 * vlenb
+; ZVFHMIN32-NEXT:    vsetvli zero, a1, e16, m8, ta, ma
+; ZVFHMIN32-NEXT:    vmv1r.v v3, v0
+; ZVFHMIN32-NEXT:    csrr a2, vlenb
+; ZVFHMIN32-NEXT:    slli a2, a2, 3
+; ZVFHMIN32-NEXT:    mv a3, a2
+; ZVFHMIN32-NEXT:    slli a2, a2, 1
+; ZVFHMIN32-NEXT:    add a2, a2, a3
+; ZVFHMIN32-NEXT:    add a2, sp, a2
+; ZVFHMIN32-NEXT:    addi a2, a2, 16
+; ZVFHMIN32-NEXT:    vs8r.v v8, (a2) # vscale x 64-byte Folded Spill
+; ZVFHMIN32-NEXT:    vle16.v v8, (a0)
+; ZVFHMIN32-NEXT:    csrr a0, vlenb
+; ZVFHMIN32-NEXT:    slli a0, a0, 4
+; ZVFHMIN32-NEXT:    add a0, sp, a0
+; ZVFHMIN32-NEXT:    addi a0, a0, 16
+; ZVFHMIN32-NEXT:    vs8r.v v8, (a0) # vscale x 64-byte Folded Spill
+; ZVFHMIN32-NEXT:    lui a2, 8
+; ZVFHMIN32-NEXT:    csrr a3, vlenb
+; ZVFHMIN32-NEXT:    vxor.vx v8, v16, a2, v0.t
+; ZVFHMIN32-NEXT:    slli a0, a3, 1
+; ZVFHMIN32-NEXT:    srli a3, a3, 2
+; ZVFHMIN32-NEXT:    sub a4, a1, a0
+; ZVFHMIN32-NEXT:    vsetvli a5, zero, e8, mf2, ta, ma
+; ZVFHMIN32-NEXT:    vslidedown.vx v6, v0, a3
+; ZVFHMIN32-NEXT:    sltu a3, a1, a4
+; ZVFHMIN32-NEXT:    csrr a5, vlenb
+; ZVFHMIN32-NEXT:    slli a5, a5, 4
+; ZVFHMIN32-NEXT:    add a5, sp, a5
+; ZVFHMIN32-NEXT:    addi a5, a5, 16
+; ZVFHMIN32-NEXT:    vl8r.v v24, (a5) # vscale x 64-byte Folded Reload
+; ZVFHMIN32-NEXT:    vsetvli zero, a1, e16, m8, ta, ma
+; ZVFHMIN32-NEXT:    vxor.vx v24, v24, a2, v0.t
+; ZVFHMIN32-NEXT:    addi a3, a3, -1
+; ZVFHMIN32-NEXT:    and a3, a3, a4
+; ZVFHMIN32-NEXT:    vmv1r.v v0, v6
+; ZVFHMIN32-NEXT:    csrr a2, vlenb
+; ZVFHMIN32-NEXT:    slli a2, a2, 3
+; ZVFHMIN32-NEXT:    add a2, sp, a2
+; ZVFHMIN32-NEXT:    addi a2, a2, 16
+; ZVFHMIN32-NEXT:    vs8r.v v24, (a2) # vscale x 64-byte Folded Spill
+; ZVFHMIN32-NEXT:    vsetvli zero, a3, e16, m4, ta, ma
+; ZVFHMIN32-NEXT:    vfwcvt.f.f.v v16, v28, v0.t
+; ZVFHMIN32-NEXT:    addi a2, sp, 16
+; ZVFHMIN32-NEXT:    vs8r.v v16, (a2) # vscale x 64-byte Folded Spill
+; ZVFHMIN32-NEXT:    csrr a2, vlenb
+; ZVFHMIN32-NEXT:    slli a2, a2, 4
+; ZVFHMIN32-NEXT:    add a2, sp, a2
+; ZVFHMIN32-NEXT:    addi a2, a2, 16
+; ZVFHMIN32-NEXT:    vs8r.v v8, (a2) # vscale x 64-byte Folded Spill
+; ZVFHMIN32-NEXT:    vfwcvt.f.f.v v24, v12, v0.t
+; ZVFHMIN32-NEXT:    csrr a2, vlenb
+; ZVFHMIN32-NEXT:    slli a2, a2, 3
+; ZVFHMIN32-NEXT:    mv a3, a2
+; ZVFHMIN32-NEXT:    slli a2, a2, 1
+; ZVFHMIN32-NEXT:    add a2, a2, a3
+; ZVFHMIN32-NEXT:    add a2, sp, a2
+; ZVFHMIN32-NEXT:    addi a2, a2, 16
+; ZVFHMIN32-NEXT:    vl8r.v v16, (a2) # vscale x 64-byte Folded Reload
+; ZVFHMIN32-NEXT:    vfwcvt.f.f.v v8, v20, v0.t
+; ZVFHMIN32-NEXT:    addi a2, sp, 16
+; ZVFHMIN32-NEXT:    vl8r.v v16, (a2) # vscale x 64-byte Folded Reload
+; ZVFHMIN32-NEXT:    vsetvli zero, zero, e32, m8, ta, ma
+; ZVFHMIN32-NEXT:    vfmadd.vv v24, v8, v16, v0.t
+; ZVFHMIN32-NEXT:    vsetvli zero, zero, e16, m4, ta, ma
+; ZVFHMIN32-NEXT:    vfncvt.f.f.w v4, v24, v0.t
+; ZVFHMIN32-NEXT:    bltu a1, a0, .LBB286_2
+; ZVFHMIN32-NEXT:  # %bb.1:
+; ZVFHMIN32-NEXT:    mv a1, a0
+; ZVFHMIN32-NEXT:  .LBB286_2:
+; ZVFHMIN32-NEXT:    vmv1r.v v0, v3
+; ZVFHMIN32-NEXT:    csrr a0, vlenb
+; ZVFHMIN32-NEXT:    slli a0, a0, 4
+; ZVFHMIN32-NEXT:    add a0, sp, a0
+; ZVFHMIN32-NEXT:    addi a0, a0, 16
+; ZVFHMIN32-NEXT:    vl8r.v v16, (a0) # vscale x 64-byte Folded Reload
+; ZVFHMIN32-NEXT:    vsetvli zero, a1, e16, m4, ta, ma
+; ZVFHMIN32-NEXT:    vfwcvt.f.f.v v8, v16, v0.t
+; ZVFHMIN32-NEXT:    addi a0, sp, 16
+; ZVFHMIN32-NEXT:    vs8r.v v8, (a0) # vscale x 64-byte Folded Spill
+; ZVFHMIN32-NEXT:    csrr a0, vlenb
+; ZVFHMIN32-NEXT:    slli a0, a0, 3
+; ZVFHMIN32-NEXT:    add a0, sp, a0
+; ZVFHMIN32-NEXT:    addi a0, a0, 16
+; ZVFHMIN32-NEXT:    vl8r.v v16, (a0) # vscale x 64-byte Folded Reload
+; ZVFHMIN32-NEXT:    vfwcvt.f.f.v v24, v16, v0.t
+; ZVFHMIN32-NEXT:    csrr a0, vlenb
+; ZVFHMIN32-NEXT:    slli a0, a0, 4
+; ZVFHMIN32-NEXT:    add a0, sp, a0
+; ZVFHMIN32-NEXT:    addi a0, a0, 16
+; ZVFHMIN32-NEXT:    vs8r.v v24, (a0) # vscale x 64-byte Folded Spill
+; ZVFHMIN32-NEXT:    csrr a0, vlenb
+; ZVFHMIN32-NEXT:    slli a0, a0, 3
+; ZVFHMIN32-NEXT:    mv a1, a0
+; ZVFHMIN32-NEXT:    slli a0, a0, 1
+; ZVFHMIN32-NEXT:    add a0, a0, a1
+; ZVFHMIN32-NEXT:    add a0, sp, a0
+; ZVFHMIN32-NEXT:    addi a0, a0, 16
+; ZVFHMIN32-NEXT:    vl8r.v v16, (a0) # vscale x 64-byte Folded Reload
+; ZVFHMIN32-NEXT:    vfwcvt.f.f.v v8, v16, v0.t
+; ZVFHMIN32-NEXT:    csrr a0, vlenb
+; ZVFHMIN32-NEXT:    slli a0, a0, 4
+; ZVFHMIN32-NEXT:    add a0, sp, a0
+; ZVFHMIN32-NEXT:    addi a0, a0, 16
+; ZVFHMIN32-NEXT:    vl8r.v v16, (a0) # vscale x 64-byte Folded Reload
+; ZVFHMIN32-NEXT:    addi a0, sp, 16
+; ZVFHMIN32-NEXT:    vl8r.v v24, (a0) # vscale x 64-byte Folded Reload
+; ZVFHMIN32-NEXT:    vsetvli zero, zero, e32, m8, ta, ma
+; ZVFHMIN32-NEXT:    vfmadd.vv v24, v8, v16, v0.t
+; ZVFHMIN32-NEXT:    vmv4r.v v12, v4
+; ZVFHMIN32-NEXT:    vsetvli zero, zero, e16, m4, ta, ma
+; ZVFHMIN32-NEXT:    vfncvt.f.f.w v8, v24, v0.t
+; ZVFHMIN32-NEXT:    csrr a0, vlenb
+; ZVFHMIN32-NEXT:    slli a0, a0, 5
+; ZVFHMIN32-NEXT:    add sp, sp, a0
+; ZVFHMIN32-NEXT:    .cfi_def_cfa sp, 16
+; ZVFHMIN32-NEXT:    addi sp, sp, 16
+; ZVFHMIN32-NEXT:    .cfi_def_cfa_offset 0
+; ZVFHMIN32-NEXT:    ret
+;
+; ZVFHMIN64-LABEL: vfnmadd_vv_nxv32f16:
+; ZVFHMIN64:       # %bb.0:
+; ZVFHMIN64-NEXT:    addi sp, sp, -16
+; ZVFHMIN64-NEXT:    .cfi_def_cfa_offset 16
+; ZVFHMIN64-NEXT:    csrr a2, vlenb
+; ZVFHMIN64-NEXT:    slli a2, a2, 5
+; ZVFHMIN64-NEXT:    sub sp, sp, a2
+; ZVFHMIN64-NEXT:    .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x20, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 32 * vlenb
+; ZVFHMIN64-NEXT:    vsetvli zero, a1, e16, m8, ta, ma
+; ZVFHMIN64-NEXT:    vmv1r.v v3, v0
+; ZVFHMIN64-NEXT:    csrr a2, vlenb
+; ZVFHMIN64-NEXT:    slli a2, a2, 3
+; ZVFHMIN64-NEXT:    mv a3, a2
+; ZVFHMIN64-NEXT:    slli a2, a2, 1
+; ZVFHMIN64-NEXT:    add a2, a2, a3
+; ZVFHMIN64-NEXT:    add a2, sp, a2
+; ZVFHMIN64-NEXT:    addi a2, a2, 16
+; ZVFHMIN64-NEXT:    vs8r.v v8, (a2) # vscale x 64-byte Folded Spill
+; ZVFHMIN64-NEXT:    vle16.v v8, (a0)
+; ZVFHMIN64-NEXT:    lui a2, 8
+; ZVFHMIN64-NEXT:    csrr a3, vlenb
+; ZVFHMIN64-NEXT:    vxor.vx v16, v16, a2, v0.t
+; ZVFHMIN64-NEXT:    slli a0, a3, 1
+; ZVFHMIN64-NEXT:    srli a3, a3, 2
+; ZVFHMIN64-NEXT:    sub a4, a1, a0
+; ZVFHMIN64-NEXT:    vsetvli a5, zero, e8, mf2, ta, ma
+; ZVFHMIN64-NEXT:    vslidedown.vx v6, v0, a3
+; ZVFHMIN64-NEXT:    sltu a3, a1, a4
+; ZVFHMIN64-NEXT:    vsetvli zero, a1, e16, m8, ta, ma
+; ZVFHMIN64-NEXT:    vxor.vx v8, v8, a2, v0.t
+; ZVFHMIN64-NEXT:    addi a3, a3, -1
+; ZVFHMIN64-NEXT:    and a3, a3, a4
+; ZVFHMIN64-NEXT:    vmv1r.v v0, v6
+; ZVFHMIN64-NEXT:    csrr a2, vlenb
+; ZVFHMIN64-NEXT:    slli a2, a2, 3
+; ZVFHMIN64-NEXT:    add a2, sp, a2
+; ZVFHMIN64-NEXT:    addi a2, a2, 16
+; ZVFHMIN64-NEXT:    vs8r.v v8, (a2) # vscale x 64-byte Folded Spill
+; ZVFHMIN64-NEXT:    vsetvli zero, a3, e16, m4, ta, ma
+; ZVFHMIN64-NEXT:    vfwcvt.f.f.v v24, v12, v0.t
+; ZVFHMIN64-NEXT:    addi a2, sp, 16
+; ZVFHMIN64-NEXT:    vs8r.v v24, (a2) # vscale x 64-byte Folded Spill
+; ZVFHMIN64-NEXT:    csrr a2, vlenb
+; ZVFHMIN64-NEXT:    slli a2, a2, 4
+; ZVFHMIN64-NEXT:    add a2, sp, a2
+; ZVFHMIN64-NEXT:    addi a2, a2, 16
+; ZVFHMIN64-NEXT:    vs8r.v v16, (a2) # vscale x 64-byte Folded Spill
+; ZVFHMIN64-NEXT:    vfwcvt.f.f.v v24, v20, v0.t
+; ZVFHMIN64-NEXT:    csrr a2, vlenb
+; ZVFHMIN64-NEXT:    slli a2, a2, 3
+; ZVFHMIN64-NEXT:    mv a3, a2
+; ZVFHMIN64-NEXT:    slli a2, a2, 1
+; ZVFHMIN64-NEXT:    add a2, a2, a3
+; ZVFHMIN64-NEXT:    add a2, sp, a2
+; ZVFHMIN64-NEXT:    addi a2, a2, 16
+; ZVFHMIN64-NEXT:    vl8r.v v16, (a2) # vscale x 64-byte Folded Reload
+; ZVFHMIN64-NEXT:    vfwcvt.f.f.v v8, v20, v0.t
+; ZVFHMIN64-NEXT:    addi a2, sp, 16
+; ZVFHMIN64-NEXT:    vl8r.v v16, (a2) # vscale x 64-byte Folded Reload
+; ZVFHMIN64-NEXT:    vsetvli zero, zero, e32, m8, ta, ma
+; ZVFHMIN64-NEXT:    vfmadd.vv v24, v8, v16, v0.t
+; ZVFHMIN64-NEXT:    vsetvli zero, zero, e16, m4, ta, ma
+; ZVFHMIN64-NEXT:    vfncvt.f.f.w v4, v24, v0.t
+; ZVFHMIN64-NEXT:    bltu a1, a0, .LBB286_2
+; ZVFHMIN64-NEXT:  # %bb.1:
+; ZVFHMIN64-NEXT:    mv a1, a0
+; ZVFHMIN64-NEXT:  .LBB286_2:
+; ZVFHMIN64-NEXT:    vmv1r.v v0, v3
+; ZVFHMIN64-NEXT:    csrr a0, vlenb
+; ZVFHMIN64-NEXT:    slli a0, a0, 3
+; ZVFHMIN64-NEXT:    add a0, sp, a0
+; ZVFHMIN64-NEXT:    addi a0, a0, 16
+; ZVFHMIN64-NEXT:    vl8r.v v16, (a0) # vscale x 64-byte Folded Reload
+; ZVFHMIN64-NEXT:    vsetvli zero, a1, e16, m4, ta, ma
+; ZVFHMIN64-NEXT:    vfwcvt.f.f.v v24, v16, v0.t
+; ZVFHMIN64-NEXT:    addi a0, sp, 16
+; ZVFHMIN64-NEXT:    vs8r.v v24, (a0) # vscale x 64-byte Folded Spill
+; ZVFHMIN64-NEXT:    csrr a0, vlenb
+; ZVFHMIN64-NEXT:    slli a0, a0, 4
+; ZVFHMIN64-NEXT:    add a0, sp, a0
+; ZVFHMIN64-NEXT:    addi a0, a0, 16
+; ZVFHMIN64-NEXT:    vl8r.v v16, (a0) # vscale x 64-byte Folded Reload
+; ZVFHMIN64-NEXT:    vfwcvt.f.f.v v8, v16, v0.t
+; ZVFHMIN64-NEXT:    csrr a0, vlenb
+; ZVFHMIN64-NEXT:    slli a0, a0, 3
+; ZVFHMIN64-NEXT:    mv a1, a0
+; ZVFHMIN64-NEXT:    slli a0, a0, 1
+; ZVFHMIN64-NEXT:    add a0, a0, a1
+; ZVFHMIN64-NEXT:    add a0, sp, a0
+; ZVFHMIN64-NEXT:    addi a0, a0, 16
+; ZVFHMIN64-NEXT:    vl8r.v v16, (a0) # vscale x 64-byte Folded Reload
+; ZVFHMIN64-NEXT:    vfwcvt.f.f.v v24, v16, v0.t
+; ZVFHMIN64-NEXT:    csrr a0, vlenb
+; ZVFHMIN64-NEXT:    slli a0, a0, 4
+; ZVFHMIN64-NEXT:    add a0, sp, a0
+; ZVFHMIN64-NEXT:    addi a0, a0, 16
+; ZVFHMIN64-NEXT:    vs8r.v v24, (a0) # vscale x 64-byte Folded Spill
+; ZVFHMIN64-NEXT:    addi a0, sp, 16
+; ZVFHMIN64-NEXT:    vl8r.v v16, (a0) # vscale x 64-byte Folded Reload
+; ZVFHMIN64-NEXT:    csrr a0, vlenb
+; ZVFHMIN64-NEXT:    slli a0, a0, 4
+; ZVFHMIN64-NEXT:    add a0, sp, a0
+; ZVFHMIN64-NEXT:    addi a0, a0, 16
+; ZVFHMIN64-NEXT:    vl8r.v v24, (a0) # vscale x 64-byte Folded Reload
+; ZVFHMIN64-NEXT:    vsetvli zero, zero, e32, m8, ta, ma
+; ZVFHMIN64-NEXT:    vfmadd.vv v8, v24, v16, v0.t
+; ZVFHMIN64-NEXT:    vmv.v.v v16, v8
+; ZVFHMIN64-NEXT:    vmv4r.v v12, v4
+; ZVFHMIN64-NEXT:    vsetvli zero, zero, e16, m4, ta, ma
+; ZVFHMIN64-NEXT:    vfncvt.f.f.w v8, v16, v0.t
+; ZVFHMIN64-NEXT:    csrr a0, vlenb
+; ZVFHMIN64-NEXT:    slli a0, a0, 5
+; ZVFHMIN64-NEXT:    add sp, sp, a0
+; ZVFHMIN64-NEXT:    .cfi_def_cfa sp, 16
+; ZVFHMIN64-NEXT:    addi sp, sp, 16
+; ZVFHMIN64-NEXT:    .cfi_def_cfa_offset 0
+; ZVFHMIN64-NEXT:    ret
+  %negb = call <vscale x 32 x half> @llvm.vp.fneg.nxv32f16(<vscale x 32 x half> %b, <vscale x 32 x i1> %m, i32 %evl)
+  %negc = call <vscale x 32 x half> @llvm.vp.fneg.nxv32f16(<vscale x 32 x half> %c, <vscale x 32 x i1> %m, i32 %evl)
+  %v = call <vscale x 32 x half> @llvm.vp.fma.nxv32f16(<vscale x 32 x half> %va, <vscale x 32 x half> %negb, <vscale x 32 x half> %negc, <vscale x 32 x i1> %m, i32 %evl)
   ret <vscale x 32 x half> %v
 }
 
-define <vscale x 32 x half> @vfnmadd_vf_nxv32f16_neg_splat(<vscale x 32 x half> %va, half %b, <vscale x 32 x half> %vc, <vscale x 32 x i1> %m, i32 zeroext %evl) {
-; ZVFH-LABEL: vfnmadd_vf_nxv32f16_neg_splat:
+define <vscale x 32 x half> @vfnmadd_vv_nxv32f16_commuted(<vscale x 32 x half> %va, <vscale x 32 x half> %b, <vscale x 32 x half> %c, <vscale x 32 x i1> %m, i32 zeroext %evl) {
+; ZVFH-LABEL: vfnmadd_vv_nxv32f16_commuted:
+; ZVFH:       # %bb.0:
+; ZVFH-NEXT:    vsetvli zero, a1, e16, m8, ta, ma
+; ZVFH-NEXT:    vle16.v v24, (a0)
+; ZVFH-NEXT:    vfnmadd.vv v8, v16, v24, v0.t
+; ZVFH-NEXT:    ret
+;
+; ZVFHMIN32-LABEL: vfnmadd_vv_nxv32f16_commuted:
+; ZVFHMIN32:       # %bb.0:
+; ZVFHMIN32-NEXT:    addi sp, sp, -16
+; ZVFHMIN32-NEXT:    .cfi_def_cfa_offset 16
+; ZVFHMIN32-NEXT:    csrr a2, vlenb
+; ZVFHMIN32-NEXT:    slli a2, a2, 5
+; ZVFHMIN32-NEXT:    sub sp, sp, a2
+; ZVFHMIN32-NEXT:    .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x20, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 32 * vlenb
+; ZVFHMIN32-NEXT:    vsetvli zero, a1, e16, m8, ta, ma
+; ZVFHMIN32-NEXT:    vmv1r.v v3, v0
+; ZVFHMIN32-NEXT:    csrr a2, vlenb
+; ZVFHMIN32-NEXT:    slli a2, a2, 3
+; ZVFHMIN32-NEXT:    mv a3, a2
+; ZVFHMIN32-NEXT:    slli a2, a2, 1
+; ZVFHMIN32-NEXT:    add a2, a2, a3
+; ZVFHMIN32-NEXT:    add a2, sp, a2
+; ZVFHMIN32-NEXT:    addi a2, a2, 16
+; ZVFHMIN32-NEXT:    vs8r.v v8, (a2) # vscale x 64-byte Folded Spill
+; ZVFHMIN32-NEXT:    vle16.v v8, (a0)
+; ZVFHMIN32-NEXT:    lui a2, 8
+; ZVFHMIN32-NEXT:    csrr a3, vlenb
+; ZVFHMIN32-NEXT:    vxor.vx v16, v16, a2, v0.t
+; ZVFHMIN32-NEXT:    slli a0, a3, 1
+; ZVFHMIN32-NEXT:    srli a3, a3, 2
+; ZVFHMIN32-NEXT:    sub a4, a1, a0
+; ZVFHMIN32-NEXT:    vsetvli a5, zero, e8, mf2, ta, ma
+; ZVFHMIN32-NEXT:    vslidedown.vx v6, v0, a3
+; ZVFHMIN32-NEXT:    sltu a3, a1, a4
+; ZVFHMIN32-NEXT:    vsetvli zero, a1, e16, m8, ta, ma
+; ZVFHMIN32-NEXT:    vxor.vx v8, v8, a2, v0.t
+; ZVFHMIN32-NEXT:    addi a3, a3, -1
+; ZVFHMIN32-NEXT:    and a3, a3, a4
+; ZVFHMIN32-NEXT:    vmv1r.v v0, v6
+; ZVFHMIN32-NEXT:    csrr a2, vlenb
+; ZVFHMIN32-NEXT:    slli a2, a2, 3
+; ZVFHMIN32-NEXT:    add a2, sp, a2
+; ZVFHMIN32-NEXT:    addi a2, a2, 16
+; ZVFHMIN32-NEXT:    vs8r.v v8, (a2) # vscale x 64-byte Folded Spill
+; ZVFHMIN32-NEXT:    vsetvli zero, a3, e16, m4, ta, ma
+; ZVFHMIN32-NEXT:    vfwcvt.f.f.v v24, v12, v0.t
+; ZVFHMIN32-NEXT:    addi a2, sp, 16
+; ZVFHMIN32-NEXT:    vs8r.v v24, (a2) # vscale x 64-byte Folded Spill
+; ZVFHMIN32-NEXT:    csrr a2, vlenb
+; ZVFHMIN32-NEXT:    slli a2, a2, 4
+; ZVFHMIN32-NEXT:    add a2, sp, a2
+; ZVFHMIN32-NEXT:    addi a2, a2, 16
+; ZVFHMIN32-NEXT:    vs8r.v v16, (a2) # vscale x 64-byte Folded Spill
+; ZVFHMIN32-NEXT:    vfwcvt.f.f.v v24, v20, v0.t
+; ZVFHMIN32-NEXT:    csrr a2, vlenb
+; ZVFHMIN32-NEXT:    slli a2, a2, 3
+; ZVFHMIN32-NEXT:    mv a3, a2
+; ZVFHMIN32-NEXT:    slli a2, a2, 1
+; ZVFHMIN32-NEXT:    add a2, a2, a3
+; ZVFHMIN32-NEXT:    add a2, sp, a2
+; ZVFHMIN32-NEXT:    addi a2, a2, 16
+; ZVFHMIN32-NEXT:    vl8r.v v8, (a2) # vscale x 64-byte Folded Reload
+; ZVFHMIN32-NEXT:    vfwcvt.f.f.v v16, v12, v0.t
+; ZVFHMIN32-NEXT:    addi a2, sp, 16
+; ZVFHMIN32-NEXT:    vl8r.v v8, (a2) # vscale x 64-byte Folded Reload
+; ZVFHMIN32-NEXT:    vsetvli zero, zero, e32, m8, ta, ma
+; ZVFHMIN32-NEXT:    vfmadd.vv v16, v24, v8, v0.t
+; ZVFHMIN32-NEXT:    vsetvli zero, zero, e16, m4, ta, ma
+; ZVFHMIN32-NEXT:    vfncvt.f.f.w v4, v16, v0.t
+; ZVFHMIN32-NEXT:    bltu a1, a0, .LBB287_2
+; ZVFHMIN32-NEXT:  # %bb.1:
+; ZVFHMIN32-NEXT:    mv a1, a0
+; ZVFHMIN32-NEXT:  .LBB287_2:
+; ZVFHMIN32-NEXT:    vmv1r.v v0, v3
+; ZVFHMIN32-NEXT:    csrr a0, vlenb
+; ZVFHMIN32-NEXT:    slli a0, a0, 4
+; ZVFHMIN32-NEXT:    add a0, sp, a0
+; ZVFHMIN32-NEXT:    addi a0, a0, 16
+; ZVFHMIN32-NEXT:    vl8r.v v8, (a0) # vscale x 64-byte Folded Reload
+; ZVFHMIN32-NEXT:    vsetvli zero, a1, e16, m4, ta, ma
+; ZVFHMIN32-NEXT:    vfwcvt.f.f.v v16, v8, v0.t
+; ZVFHMIN32-NEXT:    addi a0, sp, 16
+; ZVFHMIN32-NEXT:    vs8r.v v16, (a0) # vscale x 64-byte Folded Spill
+; ZVFHMIN32-NEXT:    csrr a0, vlenb
+; ZVFHMIN32-NEXT:    slli a0, a0, 3
+; ZVFHMIN32-NEXT:    add a0, sp, a0
+; ZVFHMIN32-NEXT:    addi a0, a0, 16
+; ZVFHMIN32-NEXT:    vl8r.v v8, (a0) # vscale x 64-byte Folded Reload
+; ZVFHMIN32-NEXT:    vfwcvt.f.f.v v16, v8, v0.t
+; ZVFHMIN32-NEXT:    csrr a0, vlenb
+; ZVFHMIN32-NEXT:    slli a0, a0, 4
+; ZVFHMIN32-NEXT:    add a0, sp, a0
+; ZVFHMIN32-NEXT:    addi a0, a0, 16
+; ZVFHMIN32-NEXT:    vs8r.v v16, (a0) # vscale x 64-byte Folded Spill
+; ZVFHMIN32-NEXT:    csrr a0, vlenb
+; ZVFHMIN32-NEXT:    slli a0, a0, 3
+; ZVFHMIN32-NEXT:    mv a1, a0
+; ZVFHMIN32-NEXT:    slli a0, a0, 1
+; ZVFHMIN32-NEXT:    add a0, a0, a1
+; ZVFHMIN32-NEXT:    add a0, sp, a0
+; ZVFHMIN32-NEXT:    addi a0, a0, 16
+; ZVFHMIN32-NEXT:    vl8r.v v16, (a0) # vscale x 64-byte Folded Reload
+; ZVFHMIN32-NEXT:    vfwcvt.f.f.v v8, v16, v0.t
+; ZVFHMIN32-NEXT:    addi a0, sp, 16
+; ZVFHMIN32-NEXT:    vl8r.v v16, (a0) # vscale x 64-byte Folded Reload
+; ZVFHMIN32-NEXT:    csrr a0, vlenb
+; ZVFHMIN32-NEXT:    slli a0, a0, 4
+; ZVFHMIN32-NEXT:    add a0, sp, a0
+; ZVFHMIN32-NEXT:    addi a0, a0, 16
+; ZVFHMIN32-NEXT:    vl8r.v v24, (a0) # vscale x 64-byte Folded Reload
+; ZVFHMIN32-NEXT:    vsetvli zero, zero, e32, m8, ta, ma
+; ZVFHMIN32-NEXT:    vfmadd.vv v8, v16, v24, v0.t
+; ZVFHMIN32-NEXT:    vmv.v.v v16, v8
+; ZVFHMIN32-NEXT:    vmv4r.v v12, v4
+; ZVFHMIN32-NEXT:    vsetvli zero, zero, e16, m4, ta, ma
+; ZVFHMIN32-NEXT:    vfncvt.f.f.w v8, v16, v0.t
+; ZVFHMIN32-NEXT:    csrr a0, vlenb
+; ZVFHMIN32-NEXT:    slli a0, a0, 5
+; ZVFHMIN32-NEXT:    add sp, sp, a0
+; ZVFHMIN32-NEXT:    .cfi_def_cfa sp, 16
+; ZVFHMIN32-NEXT:    addi sp, sp, 16
+; ZVFHMIN32-NEXT:    .cfi_def_cfa_offset 0
+; ZVFHMIN32-NEXT:    ret
+;
+; ZVFHMIN64-LABEL: vfnmadd_vv_nxv32f16_commuted:
+; ZVFHMIN64:       # %bb.0:
+; ZVFHMIN64-NEXT:    addi sp, sp, -16
+; ZVFHMIN64-NEXT:    .cfi_def_cfa_offset 16
+; ZVFHMIN64-NEXT:    csrr a2, vlenb
+; ZVFHMIN64-NEXT:    slli a2, a2, 5
+; ZVFHMIN64-NEXT:    sub sp, sp, a2
+; ZVFHMIN64-NEXT:    .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x20, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 32 * vlenb
+; ZVFHMIN64-NEXT:    vsetvli zero, a1, e16, m8, ta, ma
+; ZVFHMIN64-NEXT:    vmv1r.v v3, v0
+; ZVFHMIN64-NEXT:    csrr a2, vlenb
+; ZVFHMIN64-NEXT:    slli a2, a2, 3
+; ZVFHMIN64-NEXT:    mv a3, a2
+; ZVFHMIN64-NEXT:    slli a2, a2, 1
+; ZVFHMIN64-NEXT:    add a2, a2, a3
+; ZVFHMIN64-NEXT:    add a2, sp, a2
+; ZVFHMIN64-NEXT:    addi a2, a2, 16
+; ZVFHMIN64-NEXT:    vs8r.v v8, (a2) # vscale x 64-byte Folded Spill
+; ZVFHMIN64-NEXT:    vle16.v v8, (a0)
+; ZVFHMIN64-NEXT:    lui a2, 8
+; ZVFHMIN64-NEXT:    csrr a3, vlenb
+; ZVFHMIN64-NEXT:    vxor.vx v16, v16, a2, v0.t
+; ZVFHMIN64-NEXT:    slli a0, a3, 1
+; ZVFHMIN64-NEXT:    srli a3, a3, 2
+; ZVFHMIN64-NEXT:    sub a4, a1, a0
+; ZVFHMIN64-NEXT:    vsetvli a5, zero, e8, mf2, ta, ma
+; ZVFHMIN64-NEXT:    vslidedown.vx v6, v0, a3
+; ZVFHMIN64-NEXT:    sltu a3, a1, a4
+; ZVFHMIN64-NEXT:    vsetvli zero, a1, e16, m8, ta, ma
+; ZVFHMIN64-NEXT:    vxor.vx v8, v8, a2, v0.t
+; ZVFHMIN64-NEXT:    addi a3, a3, -1
+; ZVFHMIN64-NEXT:    and a3, a3, a4
+; ZVFHMIN64-NEXT:    vmv1r.v v0, v6
+; ZVFHMIN64-NEXT:    csrr a2, vlenb
+; ZVFHMIN64-NEXT:    slli a2, a2, 3
+; ZVFHMIN64-NEXT:    add a2, sp, a2
+; ZVFHMIN64-NEXT:    addi a2, a2, 16
+; ZVFHMIN64-NEXT:    vs8r.v v8, (a2) # vscale x 64-byte Folded Spill
+; ZVFHMIN64-NEXT:    vsetvli zero, a3, e16, m4, ta, ma
+; ZVFHMIN64-NEXT:    vfwcvt.f.f.v v24, v12, v0.t
+; ZVFHMIN64-NEXT:    addi a2, sp, 16
+; ZVFHMIN64-NEXT:    vs8r.v v24, (a2) # vscale x 64-byte Folded Spill
+; ZVFHMIN64-NEXT:    csrr a2, vlenb
+; ZVFHMIN64-NEXT:    slli a2, a2, 4
+; ZVFHMIN64-NEXT:    add a2, sp, a2
+; ZVFHMIN64-NEXT:    addi a2, a2, 16
+; ZVFHMIN64-NEXT:    vs8r.v v16, (a2) # vscale x 64-byte Folded Spill
+; ZVFHMIN64-NEXT:    vfwcvt.f.f.v v24, v20, v0.t
+; ZVFHMIN64-NEXT:    csrr a2, vlenb
+; ZVFHMIN64-NEXT:    slli a2, a2, 3
+; ZVFHMIN64-NEXT:    mv a3, a2
+; ZVFHMIN64-NEXT:    slli a2, a2, 1
+; ZVFHMIN64-NEXT:    add a2, a2, a3
+; ZVFHMIN64-NEXT:    add a2, sp, a2
+; ZVFHMIN64-NEXT:    addi a2, a2, 16
+; ZVFHMIN64-NEXT:    vl8r.v v16, (a2) # vscale x 64-byte Folded Reload
+; ZVFHMIN64-NEXT:    vfwcvt.f.f.v v8, v20, v0.t
+; ZVFHMIN64-NEXT:    addi a2, sp, 16
+; ZVFHMIN64-NEXT:    vl8r.v v16, (a2) # vscale x 64-byte Folded Reload
+; ZVFHMIN64-NEXT:    vsetvli zero, zero, e32, m8, ta, ma
+; ZVFHMIN64-NEXT:    vfmadd.vv v8, v24, v16, v0.t
+; ZVFHMIN64-NEXT:    vsetvli zero, zero, e16, m4, ta, ma
+; ZVFHMIN64-NEXT:    vfncvt.f.f.w v4, v8, v0.t
+; ZVFHMIN64-NEXT:    bltu a1, a0, .LBB287_2
+; ZVFHMIN64-NEXT:  # %bb.1:
+; ZVFHMIN64-NEXT:    mv a1, a0
+; ZVFHMIN64-NEXT:  .LBB287_2:
+; ZVFHMIN64-NEXT:    vmv1r.v v0, v3
+; ZVFHMIN64-NEXT:    csrr a0, vlenb
+; ZVFHMIN64-NEXT:    slli a0, a0, 3
+; ZVFHMIN64-NEXT:    add a0, sp, a0
+; ZVFHMIN64-NEXT:    addi a0, a0, 16
+; ZVFHMIN64-NEXT:    vl8r.v v8, (a0) # vscale x 64-byte Folded Reload
+; ZVFHMIN64-NEXT:    vsetvli zero, a1, e16, m4, ta, ma
+; ZVFHMIN64-NEXT:    vfwcvt.f.f.v v16, v8, v0.t
+; ZVFHMIN64-NEXT:    addi a0, sp, 16
+; ZVFHMIN64-NEXT:    vs8r.v v16, (a0) # vscale x 64-byte Folded Spill
+; ZVFHMIN64-NEXT:    csrr a0, vlenb
+; ZVFHMIN64-NEXT:    slli a0, a0, 4
+; ZVFHMIN64-NEXT:    add a0, sp, a0
+; ZVFHMIN64-NEXT:    addi a0, a0, 16
+; ZVFHMIN64-NEXT:    vl8r.v v8, (a0) # vscale x 64-byte Folded Reload
+; ZVFHMIN64-NEXT:    vfwcvt.f.f.v v16, v8, v0.t
+; ZVFHMIN64-NEXT:    csrr a0, vlenb
+; ZVFHMIN64-NEXT:    slli a0, a0, 3
+; ZVFHMIN64-NEXT:    add a0, sp, a0
+; ZVFHMIN64-NEXT:    addi a0, a0, 16
+; ZVFHMIN64-NEXT:    vs8r.v v16, (a0) # vscale x 64-byte Folded Spill
+; ZVFHMIN64-NEXT:    csrr a0, vlenb
+; ZVFHMIN64-NEXT:    slli a0, a0, 3
+; ZVFHMIN64-NEXT:    mv a1, a0
+; ZVFHMIN64-NEXT:    slli a0, a0, 1
+; ZVFHMIN64-NEXT:    add a0, a0, a1
+; ZVFHMIN64-NEXT:    add a0, sp, a0
+; ZVFHMIN64-NEXT:    addi a0, a0, 16
+; ZVFHMIN64-NEXT:    vl8r.v v16, (a0) # vscale x 64-byte Folded Reload
+; ZVFHMIN64-NEXT:    vfwcvt.f.f.v v8, v16, v0.t
+; ZVFHMIN64-NEXT:    addi a0, sp, 16
+; ZVFHMIN64-NEXT:    vl8r.v v16, (a0) # vscale x 64-byte Folded Reload
+; ZVFHMIN64-NEXT:    csrr a0, vlenb
+; ZVFHMIN64-NEXT:    slli a0, a0, 3
+; ZVFHMIN64-NEXT:    add a0, sp, a0
+; ZVFHMIN64-NEXT:    addi a0, a0, 16
+; ZVFHMIN64-NEXT:    vl8r.v v24, (a0) # vscale x 64-byte Folded Reload
+; ZVFHMIN64-NEXT:    vsetvli zero, zero, e32, m8, ta, ma
+; ZVFHMIN64-NEXT:    vfmadd.vv v8, v24, v16, v0.t
+; ZVFHMIN64-NEXT:    vmv.v.v v16, v8
+; ZVFHMIN64-NEXT:    vmv4r.v v12, v4
+; ZVFHMIN64-NEXT:    vsetvli zero, zero, e16, m4, ta, ma
+; ZVFHMIN64-NEXT:    vfncvt.f.f.w v8, v16, v0.t
+; ZVFHMIN64-NEXT:    csrr a0, vlenb
+; ZVFHMIN64-NEXT:    slli a0, a0, 5
+; ZVFHMIN64-NEXT:    add sp, sp, a0
+; ZVFHMIN64-NEXT:    .cfi_def_cfa sp, 16
+; ZVFHMIN64-NEXT:    addi sp, sp, 16
+; ZVFHMIN64-NEXT:    .cfi_def_cfa_offset 0
+; ZVFHMIN64-NEXT:    ret
+  %negb = call <vscale x 32 x half> @llvm.vp.fneg.nxv32f16(<vscale x 32 x half> %b, <vscale x 32 x i1> %m, i32 %evl)
+  %negc = call <vscale x 32 x half> @llvm.vp.fneg.nxv32f16(<vscale x 32 x half> %c, <vscale x 32 x i1> %m, i32 %evl)
+  %v = call <vscale x 32 x half> @llvm.vp.fma.nxv32f16(<vscale x 32 x half> %negb, <vscale x 32 x half> %va, <vscale x 32 x half> %negc, <vscale x 32 x i1> %m, i32 %evl)
+  ret <vscale x 32 x half> %v
+}
+
+define <vscale x 32 x half> @vfnmadd_vv_nxv32f16_unmasked(<vscale x 32 x half> %va, <vscale x 32 x half> %b, <vscale x 32 x half> %c, i32 zeroext %evl) {
+; ZVFH-LABEL: vfnmadd_vv_nxv32f16_unmasked:
+; ZVFH:       # %bb.0:
+; ZVFH-NEXT:    vsetvli zero, a1, e16, m8, ta, ma
+; ZVFH-NEXT:    vle16.v v24, (a0)
+; ZVFH-NEXT:    vfnmadd.vv v8, v16, v24
+; ZVFH-NEXT:    ret
+;
+; ZVFHMIN32-LABEL: vfnmadd_vv_nxv32f16_unmasked:
+; ZVFHMIN32:       # %bb.0:
+; ZVFHMIN32-NEXT:    addi sp, sp, -16
+; ZVFHMIN32-NEXT:    .cfi_def_cfa_offset 16
+; ZVFHMIN32-NEXT:    csrr a2, vlenb
+; ZVFHMIN32-NEXT:    slli a2, a2, 5
+; ZVFHMIN32-NEXT:    sub sp, sp, a2
+; ZVFHMIN32-NEXT:    .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x20, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 32 * vlenb
+; ZVFHMIN32-NEXT:    csrr a2, vlenb
+; ZVFHMIN32-NEXT:    slli a2, a2, 3
+; ZVFHMIN32-NEXT:    mv a3, a2
+; ZVFHMIN32-NEXT:    slli a2, a2, 1
+; ZVFHMIN32-NEXT:    add a2, a2, a3
+; ZVFHMIN32-NEXT:    add a2, sp, a2
+; ZVFHMIN32-NEXT:    addi a2, a2, 16
+; ZVFHMIN32-NEXT:    vs8r.v v8, (a2) # vscale x 64-byte Folded Spill
+; ZVFHMIN32-NEXT:    vsetvli zero, a1, e16, m8, ta, ma
+; ZVFHMIN32-NEXT:    vle16.v v24, (a0)
+; ZVFHMIN32-NEXT:    lui a2, 8
+; ZVFHMIN32-NEXT:    vsetvli a0, zero, e8, m4, ta, ma
+; ZVFHMIN32-NEXT:    vmset.m v8
+; ZVFHMIN32-NEXT:    csrr a3, vlenb
+; ZVFHMIN32-NEXT:    vsetvli zero, a1, e16, m8, ta, ma
+; ZVFHMIN32-NEXT:    vxor.vx v16, v16, a2
+; ZVFHMIN32-NEXT:    slli a0, a3, 1
+; ZVFHMIN32-NEXT:    srli a3, a3, 2
+; ZVFHMIN32-NEXT:    sub a4, a1, a0
+; ZVFHMIN32-NEXT:    vsetvli a5, zero, e8, mf2, ta, ma
+; ZVFHMIN32-NEXT:    vslidedown.vx v0, v8, a3
+; ZVFHMIN32-NEXT:    sltu a3, a1, a4
+; ZVFHMIN32-NEXT:    vsetvli zero, a1, e16, m8, ta, ma
+; ZVFHMIN32-NEXT:    vxor.vx v8, v24, a2
+; ZVFHMIN32-NEXT:    addi a3, a3, -1
+; ZVFHMIN32-NEXT:    and a3, a3, a4
+; ZVFHMIN32-NEXT:    csrr a2, vlenb
+; ZVFHMIN32-NEXT:    slli a2, a2, 3
+; ZVFHMIN32-NEXT:    add a2, sp, a2
+; ZVFHMIN32-NEXT:    addi a2, a2, 16
+; ZVFHMIN32-NEXT:    vs8r.v v8, (a2) # vscale x 64-byte Folded Spill
+; ZVFHMIN32-NEXT:    vsetvli zero, a3, e16, m4, ta, ma
+; ZVFHMIN32-NEXT:    vfwcvt.f.f.v v24, v12, v0.t
+; ZVFHMIN32-NEXT:    addi a2, sp, 16
+; ZVFHMIN32-NEXT:    vs8r.v v24, (a2) # vscale x 64-byte Folded Spill
+; ZVFHMIN32-NEXT:    csrr a2, vlenb
+; ZVFHMIN32-NEXT:    slli a2, a2, 4
+; ZVFHMIN32-NEXT:    add a2, sp, a2
+; ZVFHMIN32-NEXT:    addi a2, a2, 16
+; ZVFHMIN32-NEXT:    vs8r.v v16, (a2) # vscale x 64-byte Folded Spill
+; ZVFHMIN32-NEXT:    vfwcvt.f.f.v v24, v20, v0.t
+; ZVFHMIN32-NEXT:    csrr a2, vlenb
+; ZVFHMIN32-NEXT:    slli a2, a2, 3
+; ZVFHMIN32-NEXT:    mv a3, a2
+; ZVFHMIN32-NEXT:    slli a2, a2, 1
+; ZVFHMIN32-NEXT:    add a2, a2, a3
+; ZVFHMIN32-NEXT:    add a2, sp, a2
+; ZVFHMIN32-NEXT:    addi a2, a2, 16
+; ZVFHMIN32-NEXT:    vl8r.v v16, (a2) # vscale x 64-byte Folded Reload
+; ZVFHMIN32-NEXT:    vfwcvt.f.f.v v8, v20, v0.t
+; ZVFHMIN32-NEXT:    addi a2, sp, 16
+; ZVFHMIN32-NEXT:    vl8r.v v16, (a2) # vscale x 64-byte Folded Reload
+; ZVFHMIN32-NEXT:    vsetvli zero, zero, e32, m8, ta, ma
+; ZVFHMIN32-NEXT:    vfmadd.vv v24, v8, v16, v0.t
+; ZVFHMIN32-NEXT:    vsetvli zero, zero, e16, m4, ta, ma
+; ZVFHMIN32-NEXT:    vfncvt.f.f.w v12, v24, v0.t
+; ZVFHMIN32-NEXT:    bltu a1, a0, .LBB288_2
+; ZVFHMIN32-NEXT:  # %bb.1:
+; ZVFHMIN32-NEXT:    mv a1, a0
+; ZVFHMIN32-NEXT:  .LBB288_2:
+; ZVFHMIN32-NEXT:    csrr a0, vlenb
+; ZVFHMIN32-NEXT:    slli a0, a0, 4
+; ZVFHMIN32-NEXT:    add a0, sp, a0
+; ZVFHMIN32-NEXT:    addi a0, a0, 16
+; ZVFHMIN32-NEXT:    vl8r.v v16, (a0) # vscale x 64-byte Folded Reload
+; ZVFHMIN32-NEXT:    vsetvli zero, a1, e16, m4, ta, ma
+; ZVFHMIN32-NEXT:    vfwcvt.f.f.v v24, v16
+; ZVFHMIN32-NEXT:    addi a0, sp, 16
+; ZVFHMIN32-NEXT:    vs8r.v v24, (a0) # vscale x 64-byte Folded Spill
+; ZVFHMIN32-NEXT:    csrr a0, vlenb
+; ZVFHMIN32-NEXT:    slli a0, a0, 3
+; ZVFHMIN32-NEXT:    add a0, sp, a0
+; ZVFHMIN32-NEXT:    addi a0, a0, 16
+; ZVFHMIN32-NEXT:    vl8r.v v16, (a0) # vscale x 64-byte Folded Reload
+; ZVFHMIN32-NEXT:    vfwcvt.f.f.v v0, v16
+; ZVFHMIN32-NEXT:    csrr a0, vlenb
+; ZVFHMIN32-NEXT:    slli a0, a0, 3
+; ZVFHMIN32-NEXT:    mv a1, a0
+; ZVFHMIN32-NEXT:    slli a0, a0, 1
+; ZVFHMIN32-NEXT:    add a0, a0, a1
+; ZVFHMIN32-NEXT:    add a0, sp, a0
+; ZVFHMIN32-NEXT:    addi a0, a0, 16
+; ZVFHMIN32-NEXT:    vl8r.v v24, (a0) # vscale x 64-byte Folded Reload
+; ZVFHMIN32-NEXT:    vfwcvt.f.f.v v16, v24
+; ZVFHMIN32-NEXT:    addi a0, sp, 16
+; ZVFHMIN32-NEXT:    vl8r.v v24, (a0) # vscale x 64-byte Folded Reload
+; ZVFHMIN32-NEXT:    vsetvli zero, zero, e32, m8, ta, ma
+; ZVFHMIN32-NEXT:    vfmadd.vv v16, v24, v0
+; ZVFHMIN32-NEXT:    vsetvli zero, zero, e16, m4, ta, ma
+; ZVFHMIN32-NEXT:    vfncvt.f.f.w v8, v16
+; ZVFHMIN32-NEXT:    csrr a0, vlenb
+; ZVFHMIN32-NEXT:    slli a0, a0, 5
+; ZVFHMIN32-NEXT:    add sp, sp, a0
+; ZVFHMIN32-NEXT:    .cfi_def_cfa sp, 16
+; ZVFHMIN32-NEXT:    addi sp, sp, 16
+; ZVFHMIN32-NEXT:    .cfi_def_cfa_offset 0
+; ZVFHMIN32-NEXT:    ret
+;
+; ZVFHMIN64-LABEL: vfnmadd_vv_nxv32f16_unmasked:
+; ZVFHMIN64:       # %bb.0:
+; ZVFHMIN64-NEXT:    addi sp, sp, -16
+; ZVFHMIN64-NEXT:    .cfi_def_cfa_offset 16
+; ZVFHMIN64-NEXT:    csrr a2, vlenb
+; ZVFHMIN64-NEXT:    slli a2, a2, 5
+; ZVFHMIN64-NEXT:    sub sp, sp, a2
+; ZVFHMIN64-NEXT:    .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x20, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 32 * vlenb
+; ZVFHMIN64-NEXT:    csrr a2, vlenb
+; ZVFHMIN64-NEXT:    slli a2, a2, 3
+; ZVFHMIN64-NEXT:    mv a3, a2
+; ZVFHMIN64-NEXT:    slli a2, a2, 1
+; ZVFHMIN64-NEXT:    add a2, a2, a3
+; ZVFHMIN64-NEXT:    add a2, sp, a2
+; ZVFHMIN64-NEXT:    addi a2, a2, 16
+; ZVFHMIN64-NEXT:    vs8r.v v8, (a2) # vscale x 64-byte Folded Spill
+; ZVFHMIN64-NEXT:    vsetvli zero, a1, e16, m8, ta, ma
+; ZVFHMIN64-NEXT:    vle16.v v24, (a0)
+; ZVFHMIN64-NEXT:    lui a2, 8
+; ZVFHMIN64-NEXT:    vsetvli a0, zero, e8, m4, ta, ma
+; ZVFHMIN64-NEXT:    vmset.m v8
+; ZVFHMIN64-NEXT:    csrr a3, vlenb
+; ZVFHMIN64-NEXT:    vsetvli zero, a1, e16, m8, ta, ma
+; ZVFHMIN64-NEXT:    vxor.vx v16, v16, a2
+; ZVFHMIN64-NEXT:    slli a0, a3, 1
+; ZVFHMIN64-NEXT:    srli a3, a3, 2
+; ZVFHMIN64-NEXT:    sub a4, a1, a0
+; ZVFHMIN64-NEXT:    vsetvli a5, zero, e8, mf2, ta, ma
+; ZVFHMIN64-NEXT:    vslidedown.vx v0, v8, a3
+; ZVFHMIN64-NEXT:    sltu a3, a1, a4
+; ZVFHMIN64-NEXT:    vsetvli zero, a1, e16, m8, ta, ma
+; ZVFHMIN64-NEXT:    vxor.vx v8, v24, a2
+; ZVFHMIN64-NEXT:    addi a3, a3, -1
+; ZVFHMIN64-NEXT:    and a3, a3, a4
+; ZVFHMIN64-NEXT:    csrr a2, vlenb
+; ZVFHMIN64-NEXT:    slli a2, a2, 3
+; ZVFHMIN64-NEXT:    add a2, sp, a2
+; ZVFHMIN64-NEXT:    addi a2, a2, 16
+; ZVFHMIN64-NEXT:    vs8r.v v8, (a2) # vscale x 64-byte Folded Spill
+; ZVFHMIN64-NEXT:    vsetvli zero, a3, e16, m4, ta, ma
+; ZVFHMIN64-NEXT:    vfwcvt.f.f.v v24, v12, v0.t
+; ZVFHMIN64-NEXT:    addi a2, sp, 16
+; ZVFHMIN64-NEXT:    vs8r.v v24, (a2) # vscale x 64-byte Folded Spill
+; ZVFHMIN64-NEXT:    csrr a2, vlenb
+; ZVFHMIN64-NEXT:    slli a2, a2, 4
+; ZVFHMIN64-NEXT:    add a2, sp, a2
+; ZVFHMIN64-NEXT:    addi a2, a2, 16
+; ZVFHMIN64-NEXT:    vs8r.v v16, (a2) # vscale x 64-byte Folded Spill
+; ZVFHMIN64-NEXT:    vfwcvt.f.f.v v24, v20, v0.t
+; ZVFHMIN64-NEXT:    csrr a2, vlenb
+; ZVFHMIN64-NEXT:    slli a2, a2, 3
+; ZVFHMIN64-NEXT:    mv a3, a2
+; ZVFHMIN64-NEXT:    slli a2, a2, 1
+; ZVFHMIN64-NEXT:    add a2, a2, a3
+; ZVFHMIN64-NEXT:    add a2, sp, a2
+; ZVFHMIN64-NEXT:    addi a2, a2, 16
+; ZVFHMIN64-NEXT:    vl8r.v v16, (a2) # vscale x 64-byte Folded Reload
+; ZVFHMIN64-NEXT:    vfwcvt.f.f.v v8, v20, v0.t
+; ZVFHMIN64-NEXT:    addi a2, sp, 16
+; ZVFHMIN64-NEXT:    vl8r.v v16, (a2) # vscale x 64-byte Folded Reload
+; ZVFHMIN64-NEXT:    vsetvli zero, zero, e32, m8, ta, ma
+; ZVFHMIN64-NEXT:    vfmadd.vv v24, v8, v16, v0.t
+; ZVFHMIN64-NEXT:    vsetvli zero, zero, e16, m4, ta, ma
+; ZVFHMIN64-NEXT:    vfncvt.f.f.w v12, v24, v0.t
+; ZVFHMIN64-NEXT:    bltu a1, a0, .LBB288_2
+; ZVFHMIN64-NEXT:  # %bb.1:
+; ZVFHMIN64-NEXT:    mv a1, a0
+; ZVFHMIN64-NEXT:  .LBB288_2:
+; ZVFHMIN64-NEXT:    csrr a0, vlenb
+; ZVFHMIN64-NEXT:    slli a0, a0, 3
+; ZVFHMIN64-NEXT:    add a0, sp, a0
+; ZVFHMIN64-NEXT:    addi a0, a0, 16
+; ZVFHMIN64-NEXT:    vl8r.v v16, (a0) # vscale x 64-byte Folded Reload
+; ZVFHMIN64-NEXT:    vsetvli zero, a1, e16, m4, ta, ma
+; ZVFHMIN64-NEXT:    vfwcvt.f.f.v v24, v16
+; ZVFHMIN64-NEXT:    addi a0, sp, 16
+; ZVFHMIN64-NEXT:    vs8r.v v24, (a0) # vscale x 64-byte Folded Spill
+; ZVFHMIN64-NEXT:    csrr a0, vlenb
+; ZVFHMIN64-NEXT:    slli a0, a0, 4
+; ZVFHMIN64-NEXT:    add a0, sp, a0
+; ZVFHMIN64-NEXT:    addi a0, a0, 16
+; ZVFHMIN64-NEXT:    vl8r.v v24, (a0) # vscale x 64-byte Folded Reload
+; ZVFHMIN64-NEXT:    vfwcvt.f.f.v v16, v24
+; ZVFHMIN64-NEXT:    csrr a0, vlenb
+; ZVFHMIN64-NEXT:    slli a0, a0, 3
+; ZVFHMIN64-NEXT:    mv a1, a0
+; ZVFHMIN64-NEXT:    slli a0, a0, 1
+; ZVFHMIN64-NEXT:    add a0, a0, a1
+; ZVFHMIN64-NEXT:    add a0, sp, a0
+; ZVFHMIN64-NEXT:    addi a0, a0, 16
+; ZVFHMIN64-NEXT:    vl8r.v v24, (a0) # vscale x 64-byte Folded Reload
+; ZVFHMIN64-NEXT:    vfwcvt.f.f.v v0, v24
+; ZVFHMIN64-NEXT:    addi a0, sp, 16
+; ZVFHMIN64-NEXT:    vl8r.v v24, (a0) # vscale x 64-byte Folded Reload
+; ZVFHMIN64-NEXT:    vsetvli zero, zero, e32, m8, ta, ma
+; ZVFHMIN64-NEXT:    vfmadd.vv v0, v16, v24
+; ZVFHMIN64-NEXT:    vsetvli zero, zero, e16, m4, ta, ma
+; ZVFHMIN64-NEXT:    vfncvt.f.f.w v8, v0
+; ZVFHMIN64-NEXT:    csrr a0, vlenb
+; ZVFHMIN64-NEXT:    slli a0, a0, 5
+; ZVFHMIN64-NEXT:    add sp, sp, a0
+; ZVFHMIN64-NEXT:    .cfi_def_cfa sp, 16
+; ZVFHMIN64-NEXT:    addi sp, sp, 16
+; ZVFHMIN64-NEXT:    .cfi_def_cfa_offset 0
+; ZVFHMIN64-NEXT:    ret
+  %negb = call <vscale x 32 x half> @llvm.vp.fneg.nxv32f16(<vscale x 32 x half> %b, <vscale x 32 x i1> splat (i1 true), i32 %evl)
+  %negc = call <vscale x 32 x half> @llvm.vp.fneg.nxv32f16(<vscale x 32 x half> %c, <vscale x 32 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 32 x half> @llvm.vp.fma.nxv32f16(<vscale x 32 x half> %va, <vscale x 32 x half> %negb, <vscale x 32 x half> %negc, <vscale x 32 x i1> splat (i1 true), i32 %evl)
+  ret <vscale x 32 x half> %v
+}
+
+define <vscale x 32 x half> @vfnmadd_vv_nxv32f16_unmasked_commuted(<vscale x 32 x half> %va, <vscale x 32 x half> %b, <vscale x 32 x half> %c, i32 zeroext %evl) {
+; ZVFH-LABEL: vfnmadd_vv_nxv32f16_unmasked_commuted:
+; ZVFH:       # %bb.0:
+; ZVFH-NEXT:    vsetvli zero, a1, e16, m8, ta, ma
+; ZVFH-NEXT:    vle16.v v24, (a0)
+; ZVFH-NEXT:    vfnmadd.vv v8, v16, v24
+; ZVFH-NEXT:    ret
+;
+; ZVFHMIN32-LABEL: vfnmadd_vv_nxv32f16_unmasked_commuted:
+; ZVFHMIN32:       # %bb.0:
+; ZVFHMIN32-NEXT:    addi sp, sp, -16
+; ZVFHMIN32-NEXT:    .cfi_def_cfa_offset 16
+; ZVFHMIN32-NEXT:    csrr a2, vlenb
+; ZVFHMIN32-NEXT:    slli a2, a2, 5
+; ZVFHMIN32-NEXT:    sub sp, sp, a2
+; ZVFHMIN32-NEXT:    .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x20, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 32 * vlenb
+; ZVFHMIN32-NEXT:    csrr a2, vlenb
+; ZVFHMIN32-NEXT:    slli a2, a2, 3
+; ZVFHMIN32-NEXT:    mv a3, a2
+; ZVFHMIN32-NEXT:    slli a2, a2, 1
+; ZVFHMIN32-NEXT:    add a2, a2, a3
+; ZVFHMIN32-NEXT:    add a2, sp, a2
+; ZVFHMIN32-NEXT:    addi a2, a2, 16
+; ZVFHMIN32-NEXT:    vs8r.v v8, (a2) # vscale x 64-byte Folded Spill
+; ZVFHMIN32-NEXT:    vsetvli zero, a1, e16, m8, ta, ma
+; ZVFHMIN32-NEXT:    vle16.v v24, (a0)
+; ZVFHMIN32-NEXT:    lui a2, 8
+; ZVFHMIN32-NEXT:    vsetvli a0, zero, e8, m4, ta, ma
+; ZVFHMIN32-NEXT:    vmset.m v8
+; ZVFHMIN32-NEXT:    csrr a3, vlenb
+; ZVFHMIN32-NEXT:    vsetvli zero, a1, e16, m8, ta, ma
+; ZVFHMIN32-NEXT:    vxor.vx v16, v16, a2
+; ZVFHMIN32-NEXT:    slli a0, a3, 1
+; ZVFHMIN32-NEXT:    srli a3, a3, 2
+; ZVFHMIN32-NEXT:    sub a4, a1, a0
+; ZVFHMIN32-NEXT:    vsetvli a5, zero, e8, mf2, ta, ma
+; ZVFHMIN32-NEXT:    vslidedown.vx v0, v8, a3
+; ZVFHMIN32-NEXT:    sltu a3, a1, a4
+; ZVFHMIN32-NEXT:    vsetvli zero, a1, e16, m8, ta, ma
+; ZVFHMIN32-NEXT:    vxor.vx v8, v24, a2
+; ZVFHMIN32-NEXT:    addi a3, a3, -1
+; ZVFHMIN32-NEXT:    and a3, a3, a4
+; ZVFHMIN32-NEXT:    csrr a2, vlenb
+; ZVFHMIN32-NEXT:    slli a2, a2, 3
+; ZVFHMIN32-NEXT:    add a2, sp, a2
+; ZVFHMIN32-NEXT:    addi a2, a2, 16
+; ZVFHMIN32-NEXT:    vs8r.v v8, (a2) # vscale x 64-byte Folded Spill
+; ZVFHMIN32-NEXT:    vsetvli zero, a3, e16, m4, ta, ma
+; ZVFHMIN32-NEXT:    vfwcvt.f.f.v v24, v12, v0.t
+; ZVFHMIN32-NEXT:    addi a2, sp, 16
+; ZVFHMIN32-NEXT:    vs8r.v v24, (a2) # vscale x 64-byte Folded Spill
+; ZVFHMIN32-NEXT:    csrr a2, vlenb
+; ZVFHMIN32-NEXT:    slli a2, a2, 4
+; ZVFHMIN32-NEXT:    add a2, sp, a2
+; ZVFHMIN32-NEXT:    addi a2, a2, 16
+; ZVFHMIN32-NEXT:    vs8r.v v16, (a2) # vscale x 64-byte Folded Spill
+; ZVFHMIN32-NEXT:    vfwcvt.f.f.v v24, v20, v0.t
+; ZVFHMIN32-NEXT:    csrr a2, vlenb
+; ZVFHMIN32-NEXT:    slli a2, a2, 3
+; ZVFHMIN32-NEXT:    mv a3, a2
+; ZVFHMIN32-NEXT:    slli a2, a2, 1
+; ZVFHMIN32-NEXT:    add a2, a2, a3
+; ZVFHMIN32-NEXT:    add a2, sp, a2
+; ZVFHMIN32-NEXT:    addi a2, a2, 16
+; ZVFHMIN32-NEXT:    vl8r.v v8, (a2) # vscale x 64-byte Folded Reload
+; ZVFHMIN32-NEXT:    vfwcvt.f.f.v v16, v12, v0.t
+; ZVFHMIN32-NEXT:    addi a2, sp, 16
+; ZVFHMIN32-NEXT:    vl8r.v v8, (a2) # vscale x 64-byte Folded Reload
+; ZVFHMIN32-NEXT:    vsetvli zero, zero, e32, m8, ta, ma
+; ZVFHMIN32-NEXT:    vfmadd.vv v16, v24, v8, v0.t
+; ZVFHMIN32-NEXT:    vsetvli zero, zero, e16, m4, ta, ma
+; ZVFHMIN32-NEXT:    vfncvt.f.f.w v12, v16, v0.t
+; ZVFHMIN32-NEXT:    bltu a1, a0, .LBB289_2
+; ZVFHMIN32-NEXT:  # %bb.1:
+; ZVFHMIN32-NEXT:    mv a1, a0
+; ZVFHMIN32-NEXT:  .LBB289_2:
+; ZVFHMIN32-NEXT:    csrr a0, vlenb
+; ZVFHMIN32-NEXT:    slli a0, a0, 4
+; ZVFHMIN32-NEXT:    add a0, sp, a0
+; ZVFHMIN32-NEXT:    addi a0, a0, 16
+; ZVFHMIN32-NEXT:    vl8r.v v16, (a0) # vscale x 64-byte Folded Reload
+; ZVFHMIN32-NEXT:    vsetvli zero, a1, e16, m4, ta, ma
+; ZVFHMIN32-NEXT:    vfwcvt.f.f.v v24, v16
+; ZVFHMIN32-NEXT:    addi a0, sp, 16
+; ZVFHMIN32-NEXT:    vs8r.v v24, (a0) # vscale x 64-byte Folded Spill
+; ZVFHMIN32-NEXT:    csrr a0, vlenb
+; ZVFHMIN32-NEXT:    slli a0, a0, 3
+; ZVFHMIN32-NEXT:    add a0, sp, a0
+; ZVFHMIN32-NEXT:    addi a0, a0, 16
+; ZVFHMIN32-NEXT:    vl8r.v v16, (a0) # vscale x 64-byte Folded Reload
+; ZVFHMIN32-NEXT:    vfwcvt.f.f.v v24, v16
+; ZVFHMIN32-NEXT:    csrr a0, vlenb
+; ZVFHMIN32-NEXT:    slli a0, a0, 3
+; ZVFHMIN32-NEXT:    mv a1, a0
+; ZVFHMIN32-NEXT:    slli a0, a0, 1
+; ZVFHMIN32-NEXT:    add a0, a0, a1
+; ZVFHMIN32-NEXT:    add a0, sp, a0
+; ZVFHMIN32-NEXT:    addi a0, a0, 16
+; ZVFHMIN32-NEXT:    vl8r.v v16, (a0) # vscale x 64-byte Folded Reload
+; ZVFHMIN32-NEXT:    vfwcvt.f.f.v v0, v16
+; ZVFHMIN32-NEXT:    addi a0, sp, 16
+; ZVFHMIN32-NEXT:    vl8r.v v16, (a0) # vscale x 64-byte Folded Reload
+; ZVFHMIN32-NEXT:    vsetvli zero, zero, e32, m8, ta, ma
+; ZVFHMIN32-NEXT:    vfmadd.vv v0, v16, v24
+; ZVFHMIN32-NEXT:    vsetvli zero, zero, e16, m4, ta, ma
+; ZVFHMIN32-NEXT:    vfncvt.f.f.w v8, v0
+; ZVFHMIN32-NEXT:    csrr a0, vlenb
+; ZVFHMIN32-NEXT:    slli a0, a0, 5
+; ZVFHMIN32-NEXT:    add sp, sp, a0
+; ZVFHMIN32-NEXT:    .cfi_def_cfa sp, 16
+; ZVFHMIN32-NEXT:    addi sp, sp, 16
+; ZVFHMIN32-NEXT:    .cfi_def_cfa_offset 0
+; ZVFHMIN32-NEXT:    ret
+;
+; ZVFHMIN64-LABEL: vfnmadd_vv_nxv32f16_unmasked_commuted:
+; ZVFHMIN64:       # %bb.0:
+; ZVFHMIN64-NEXT:    addi sp, sp, -16
+; ZVFHMIN64-NEXT:    .cfi_def_cfa_offset 16
+; ZVFHMIN64-NEXT:    csrr a2, vlenb
+; ZVFHMIN64-NEXT:    slli a2, a2, 5
+; ZVFHMIN64-NEXT:    sub sp, sp, a2
+; ZVFHMIN64-NEXT:    .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x20, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 32 * vlenb
+; ZVFHMIN64-NEXT:    csrr a2, vlenb
+; ZVFHMIN64-NEXT:    slli a2, a2, 3
+; ZVFHMIN64-NEXT:    mv a3, a2
+; ZVFHMIN64-NEXT:    slli a2, a2, 1
+; ZVFHMIN64-NEXT:    add a2, a2, a3
+; ZVFHMIN64-NEXT:    add a2, sp, a2
+; ZVFHMIN64-NEXT:    addi a2, a2, 16
+; ZVFHMIN64-NEXT:    vs8r.v v8, (a2) # vscale x 64-byte Folded Spill
+; ZVFHMIN64-NEXT:    vsetvli zero, a1, e16, m8, ta, ma
+; ZVFHMIN64-NEXT:    vle16.v v24, (a0)
+; ZVFHMIN64-NEXT:    lui a2, 8
+; ZVFHMIN64-NEXT:    vsetvli a0, zero, e8, m4, ta, ma
+; ZVFHMIN64-NEXT:    vmset.m v7
+; ZVFHMIN64-NEXT:    csrr a3, vlenb
+; ZVFHMIN64-NEXT:    vsetvli zero, a1, e16, m8, ta, ma
+; ZVFHMIN64-NEXT:    vxor.vx v8, v16, a2
+; ZVFHMIN64-NEXT:    slli a0, a3, 1
+; ZVFHMIN64-NEXT:    srli a3, a3, 2
+; ZVFHMIN64-NEXT:    sub a4, a1, a0
+; ZVFHMIN64-NEXT:    vsetvli a5, zero, e8, mf2, ta, ma
+; ZVFHMIN64-NEXT:    vslidedown.vx v0, v7, a3
+; ZVFHMIN64-NEXT:    sltu a3, a1, a4
+; ZVFHMIN64-NEXT:    vsetvli zero, a1, e16, m8, ta, ma
+; ZVFHMIN64-NEXT:    vxor.vx v16, v24, a2
+; ZVFHMIN64-NEXT:    addi a3, a3, -1
+; ZVFHMIN64-NEXT:    and a3, a3, a4
+; ZVFHMIN64-NEXT:    addi a2, sp, 16
+; ZVFHMIN64-NEXT:    vs8r.v v16, (a2) # vscale x 64-byte Folded Spill
+; ZVFHMIN64-NEXT:    vsetvli zero, a3, e16, m4, ta, ma
+; ZVFHMIN64-NEXT:    vfwcvt.f.f.v v24, v20, v0.t
+; ZVFHMIN64-NEXT:    csrr a2, vlenb
+; ZVFHMIN64-NEXT:    slli a2, a2, 3
+; ZVFHMIN64-NEXT:    add a2, sp, a2
+; ZVFHMIN64-NEXT:    addi a2, a2, 16
+; ZVFHMIN64-NEXT:    vs8r.v v24, (a2) # vscale x 64-byte Folded Spill
+; ZVFHMIN64-NEXT:    csrr a2, vlenb
+; ZVFHMIN64-NEXT:    slli a2, a2, 4
+; ZVFHMIN64-NEXT:    add a2, sp, a2
+; ZVFHMIN64-NEXT:    addi a2, a2, 16
+; ZVFHMIN64-NEXT:    vs8r.v v8, (a2) # vscale x 64-byte Folded Spill
+; ZVFHMIN64-NEXT:    vfwcvt.f.f.v v16, v12, v0.t
+; ZVFHMIN64-NEXT:    csrr a2, vlenb
+; ZVFHMIN64-NEXT:    slli a2, a2, 3
+; ZVFHMIN64-NEXT:    mv a3, a2
+; ZVFHMIN64-NEXT:    slli a2, a2, 1
+; ZVFHMIN64-NEXT:    add a2, a2, a3
+; ZVFHMIN64-NEXT:    add a2, sp, a2
+; ZVFHMIN64-NEXT:    addi a2, a2, 16
+; ZVFHMIN64-NEXT:    vl8r.v v8, (a2) # vscale x 64-byte Folded Reload
+; ZVFHMIN64-NEXT:    vfwcvt.f.f.v v24, v12, v0.t
+; ZVFHMIN64-NEXT:    csrr a2, vlenb
+; ZVFHMIN64-NEXT:    slli a2, a2, 3
+; ZVFHMIN64-NEXT:    add a2, sp, a2
+; ZVFHMIN64-NEXT:    addi a2, a2, 16
+; ZVFHMIN64-NEXT:    vl8r.v v8, (a2) # vscale x 64-byte Folded Reload
+; ZVFHMIN64-NEXT:    vsetvli zero, zero, e32, m8, ta, ma
+; ZVFHMIN64-NEXT:    vfmadd.vv v24, v16, v8, v0.t
+; ZVFHMIN64-NEXT:    vsetvli zero, zero, e16, m4, ta, ma
+; ZVFHMIN64-NEXT:    vfncvt.f.f.w v12, v24, v0.t
+; ZVFHMIN64-NEXT:    bltu a1, a0, .LBB289_2
+; ZVFHMIN64-NEXT:  # %bb.1:
+; ZVFHMIN64-NEXT:    mv a1, a0
+; ZVFHMIN64-NEXT:  .LBB289_2:
+; ZVFHMIN64-NEXT:    addi a0, sp, 16
+; ZVFHMIN64-NEXT:    vl8r.v v16, (a0) # vscale x 64-byte Folded Reload
+; ZVFHMIN64-NEXT:    vsetvli zero, a1, e16, m4, ta, ma
+; ZVFHMIN64-NEXT:    vfwcvt.f.f.v v24, v16
+; ZVFHMIN64-NEXT:    csrr a0, vlenb
+; ZVFHMIN64-NEXT:    slli a0, a0, 3
+; ZVFHMIN64-NEXT:    add a0, sp, a0
+; ZVFHMIN64-NEXT:    addi a0, a0, 16
+; ZVFHMIN64-NEXT:    vs8r.v v24, (a0) # vscale x 64-byte Folded Spill
+; ZVFHMIN64-NEXT:    csrr a0, vlenb
+; ZVFHMIN64-NEXT:    slli a0, a0, 4
+; ZVFHMIN64-NEXT:    add a0, sp, a0
+; ZVFHMIN64-NEXT:    addi a0, a0, 16
+; ZVFHMIN64-NEXT:    vl8r.v v24, (a0) # vscale x 64-byte Folded Reload
+; ZVFHMIN64-NEXT:    vfwcvt.f.f.v v16, v24
+; ZVFHMIN64-NEXT:    csrr a0, vlenb
+; ZVFHMIN64-NEXT:    slli a0, a0, 3
+; ZVFHMIN64-NEXT:    mv a1, a0
+; ZVFHMIN64-NEXT:    slli a0, a0, 1
+; ZVFHMIN64-NEXT:    add a0, a0, a1
+; ZVFHMIN64-NEXT:    add a0, sp, a0
+; ZVFHMIN64-NEXT:    addi a0, a0, 16
+; ZVFHMIN64-NEXT:    vl8r.v v24, (a0) # vscale x 64-byte Folded Reload
+; ZVFHMIN64-NEXT:    vfwcvt.f.f.v v0, v24
+; ZVFHMIN64-NEXT:    csrr a0, vlenb
+; ZVFHMIN64-NEXT:    slli a0, a0, 3
+; ZVFHMIN64-NEXT:    add a0, sp, a0
+; ZVFHMIN64-NEXT:    addi a0, a0, 16
+; ZVFHMIN64-NEXT:    vl8r.v v24, (a0) # vscale x 64-byte Folded Reload
+; ZVFHMIN64-NEXT:    vsetvli zero, zero, e32, m8, ta, ma
+; ZVFHMIN64-NEXT:    vfmadd.vv v0, v16, v24
+; ZVFHMIN64-NEXT:    vsetvli zero, zero, e16, m4, ta, ma
+; ZVFHMIN64-NEXT:    vfncvt.f.f.w v8, v0
+; ZVFHMIN64-NEXT:    csrr a0, vlenb
+; ZVFHMIN64-NEXT:    slli a0, a0, 5
+; ZVFHMIN64-NEXT:    add sp, sp, a0
+; ZVFHMIN64-NEXT:    .cfi_def_cfa sp, 16
+; ZVFHMIN64-NEXT:    addi sp, sp, 16
+; ZVFHMIN64-NEXT:    .cfi_def_cfa_offset 0
+; ZVFHMIN64-NEXT:    ret
+  %negb = call <vscale x 32 x half> @llvm.vp.fneg.nxv32f16(<vscale x 32 x half> %b, <vscale x 32 x i1> splat (i1 true), i32 %evl)
+  %negc = call <vscale x 32 x half> @llvm.vp.fneg.nxv32f16(<vscale x 32 x half> %c, <vscale x 32 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 32 x half> @llvm.vp.fma.nxv32f16(<vscale x 32 x half> %negb, <vscale x 32 x half> %va, <vscale x 32 x half> %negc, <vscale x 32 x i1> splat (i1 true), i32 %evl)
+  ret <vscale x 32 x half> %v
+}
+
+define <vscale x 32 x half> @vfnmadd_vf_nxv32f16(<vscale x 32 x half> %va, half %b, <vscale x 32 x half> %vc, <vscale x 32 x i1> %m, i32 zeroext %evl) {
+; ZVFH-LABEL: vfnmadd_vf_nxv32f16:
 ; ZVFH:       # %bb.0:
 ; ZVFH-NEXT:    vsetvli zero, a0, e16, m8, ta, ma
 ; ZVFH-NEXT:    vfnmadd.vf v8, fa0, v16, v0.t
 ; ZVFH-NEXT:    ret
 ;
-; ZVFHMIN-LABEL: vfnmadd_vf_nxv32f16_neg_splat:
+; ZVFHMIN-LABEL: vfnmadd_vf_nxv32f16:
 ; ZVFHMIN:       # %bb.0:
 ; ZVFHMIN-NEXT:    addi sp, sp, -16
 ; ZVFHMIN-NEXT:    .cfi_def_cfa_offset 16
@@ -9740,24 +9676,25 @@ define <vscale x 32 x half> @vfnmadd_vf_nxv32f16_neg_splat(<vscale x 32 x half>
 ; ZVFHMIN-NEXT:    slli a1, a1, 5
 ; ZVFHMIN-NEXT:    sub sp, sp, a1
 ; ZVFHMIN-NEXT:    .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x20, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 32 * vlenb
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e16, m8, ta, ma
+; ZVFHMIN-NEXT:    vsetvli a1, zero, e16, m8, ta, ma
 ; ZVFHMIN-NEXT:    vmv1r.v v3, v0
+; ZVFHMIN-NEXT:    fmv.x.h a1, fa0
+; ZVFHMIN-NEXT:    lui a2, 8
+; ZVFHMIN-NEXT:    csrr a3, vlenb
+; ZVFHMIN-NEXT:    vmv.v.x v24, a1
 ; ZVFHMIN-NEXT:    csrr a1, vlenb
 ; ZVFHMIN-NEXT:    slli a1, a1, 3
-; ZVFHMIN-NEXT:    mv a2, a1
+; ZVFHMIN-NEXT:    mv a4, a1
 ; ZVFHMIN-NEXT:    slli a1, a1, 1
-; ZVFHMIN-NEXT:    add a1, a1, a2
+; ZVFHMIN-NEXT:    add a1, a1, a4
 ; ZVFHMIN-NEXT:    add a1, sp, a1
 ; ZVFHMIN-NEXT:    addi a1, a1, 16
-; ZVFHMIN-NEXT:    vs8r.v v8, (a1) # vscale x 64-byte Folded Spill
-; ZVFHMIN-NEXT:    fmv.x.h a1, fa0
-; ZVFHMIN-NEXT:    lui a2, 8
-; ZVFHMIN-NEXT:    csrr a3, vlenb
-; ZVFHMIN-NEXT:    vmv.v.x v24, a1
+; ZVFHMIN-NEXT:    vs8r.v v24, (a1) # vscale x 64-byte Folded Spill
+; ZVFHMIN-NEXT:    vsetvli zero, a0, e16, m8, ta, ma
+; ZVFHMIN-NEXT:    vxor.vx v8, v8, a2, v0.t
+; ZVFHMIN-NEXT:    vxor.vx v16, v16, a2, v0.t
 ; ZVFHMIN-NEXT:    slli a1, a3, 1
 ; ZVFHMIN-NEXT:    srli a3, a3, 2
-; ZVFHMIN-NEXT:    vxor.vx v8, v24, a2, v0.t
-; ZVFHMIN-NEXT:    vxor.vx v16, v16, a2, v0.t
 ; ZVFHMIN-NEXT:    sub a2, a0, a1
 ; ZVFHMIN-NEXT:    vsetvli a4, zero, e8, mf2, ta, ma
 ; ZVFHMIN-NEXT:    vslidedown.vx v0, v0, a3
@@ -9778,7 +9715,7 @@ define <vscale x 32 x half> @vfnmadd_vf_nxv32f16_neg_splat(<vscale x 32 x half>
 ; ZVFHMIN-NEXT:    add a2, sp, a2
 ; ZVFHMIN-NEXT:    addi a2, a2, 16
 ; ZVFHMIN-NEXT:    vs8r.v v8, (a2) # vscale x 64-byte Folded Spill
-; ZVFHMIN-NEXT:    vfwcvt.f.f.v v16, v12, v0.t
+; ZVFHMIN-NEXT:    vfwcvt.f.f.v v24, v12, v0.t
 ; ZVFHMIN-NEXT:    csrr a2, vlenb
 ; ZVFHMIN-NEXT:    slli a2, a2, 3
 ; ZVFHMIN-NEXT:    mv a3, a2
@@ -9786,34 +9723,39 @@ define <vscale x 32 x half> @vfnmadd_vf_nxv32f16_neg_splat(<vscale x 32 x half>
 ; ZVFHMIN-NEXT:    add a2, a2, a3
 ; ZVFHMIN-NEXT:    add a2, sp, a2
 ; ZVFHMIN-NEXT:    addi a2, a2, 16
-; ZVFHMIN-NEXT:    vl8r.v v24, (a2) # vscale x 64-byte Folded Reload
-; ZVFHMIN-NEXT:    vfwcvt.f.f.v v8, v28, v0.t
+; ZVFHMIN-NEXT:    vl8r.v v8, (a2) # vscale x 64-byte Folded Reload
+; ZVFHMIN-NEXT:    vfwcvt.f.f.v v16, v12, v0.t
 ; ZVFHMIN-NEXT:    addi a2, sp, 16
-; ZVFHMIN-NEXT:    vl8r.v v24, (a2) # vscale x 64-byte Folded Reload
+; ZVFHMIN-NEXT:    vl8r.v v8, (a2) # vscale x 64-byte Folded Reload
 ; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m8, ta, ma
-; ZVFHMIN-NEXT:    vfmadd.vv v16, v8, v24, v0.t
+; ZVFHMIN-NEXT:    vfmadd.vv v16, v24, v8, v0.t
 ; ZVFHMIN-NEXT:    vsetvli zero, zero, e16, m4, ta, ma
 ; ZVFHMIN-NEXT:    vfncvt.f.f.w v4, v16, v0.t
-; ZVFHMIN-NEXT:    bltu a0, a1, .LBB294_2
+; ZVFHMIN-NEXT:    bltu a0, a1, .LBB290_2
 ; ZVFHMIN-NEXT:  # %bb.1:
 ; ZVFHMIN-NEXT:    mv a0, a1
-; ZVFHMIN-NEXT:  .LBB294_2:
+; ZVFHMIN-NEXT:  .LBB290_2:
 ; ZVFHMIN-NEXT:    vmv1r.v v0, v3
 ; ZVFHMIN-NEXT:    csrr a1, vlenb
 ; ZVFHMIN-NEXT:    slli a1, a1, 3
 ; ZVFHMIN-NEXT:    add a1, sp, a1
 ; ZVFHMIN-NEXT:    addi a1, a1, 16
-; ZVFHMIN-NEXT:    vl8r.v v16, (a1) # vscale x 64-byte Folded Reload
+; ZVFHMIN-NEXT:    vl8r.v v8, (a1) # vscale x 64-byte Folded Reload
 ; ZVFHMIN-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
-; ZVFHMIN-NEXT:    vfwcvt.f.f.v v24, v16, v0.t
+; ZVFHMIN-NEXT:    vfwcvt.f.f.v v16, v8, v0.t
 ; ZVFHMIN-NEXT:    addi a0, sp, 16
-; ZVFHMIN-NEXT:    vs8r.v v24, (a0) # vscale x 64-byte Folded Spill
+; ZVFHMIN-NEXT:    vs8r.v v16, (a0) # vscale x 64-byte Folded Spill
 ; ZVFHMIN-NEXT:    csrr a0, vlenb
 ; ZVFHMIN-NEXT:    slli a0, a0, 4
 ; ZVFHMIN-NEXT:    add a0, sp, a0
 ; ZVFHMIN-NEXT:    addi a0, a0, 16
-; ZVFHMIN-NEXT:    vl8r.v v16, (a0) # vscale x 64-byte Folded Reload
-; ZVFHMIN-NEXT:    vfwcvt.f.f.v v8, v16, v0.t
+; ZVFHMIN-NEXT:    vl8r.v v8, (a0) # vscale x 64-byte Folded Reload
+; ZVFHMIN-NEXT:    vfwcvt.f.f.v v16, v8, v0.t
+; ZVFHMIN-NEXT:    csrr a0, vlenb
+; ZVFHMIN-NEXT:    slli a0, a0, 3
+; ZVFHMIN-NEXT:    add a0, sp, a0
+; ZVFHMIN-NEXT:    addi a0, a0, 16
+; ZVFHMIN-NEXT:    vs8r.v v16, (a0) # vscale x 64-byte Folded Spill
 ; ZVFHMIN-NEXT:    csrr a0, vlenb
 ; ZVFHMIN-NEXT:    slli a0, a0, 3
 ; ZVFHMIN-NEXT:    mv a1, a0
@@ -9822,16 +9764,11 @@ define <vscale x 32 x half> @vfnmadd_vf_nxv32f16_neg_splat(<vscale x 32 x half>
 ; ZVFHMIN-NEXT:    add a0, sp, a0
 ; ZVFHMIN-NEXT:    addi a0, a0, 16
 ; ZVFHMIN-NEXT:    vl8r.v v16, (a0) # vscale x 64-byte Folded Reload
-; ZVFHMIN-NEXT:    vfwcvt.f.f.v v24, v16, v0.t
-; ZVFHMIN-NEXT:    csrr a0, vlenb
-; ZVFHMIN-NEXT:    slli a0, a0, 4
-; ZVFHMIN-NEXT:    add a0, sp, a0
-; ZVFHMIN-NEXT:    addi a0, a0, 16
-; ZVFHMIN-NEXT:    vs8r.v v24, (a0) # vscale x 64-byte Folded Spill
+; ZVFHMIN-NEXT:    vfwcvt.f.f.v v8, v16, v0.t
 ; ZVFHMIN-NEXT:    addi a0, sp, 16
 ; ZVFHMIN-NEXT:    vl8r.v v16, (a0) # vscale x 64-byte Folded Reload
 ; ZVFHMIN-NEXT:    csrr a0, vlenb
-; ZVFHMIN-NEXT:    slli a0, a0, 4
+; ZVFHMIN-NEXT:    slli a0, a0, 3
 ; ZVFHMIN-NEXT:    add a0, sp, a0
 ; ZVFHMIN-NEXT:    addi a0, a0, 16
 ; ZVFHMIN-NEXT:    vl8r.v v24, (a0) # vscale x 64-byte Folded Reload
@@ -9850,20 +9787,20 @@ define <vscale x 32 x half> @vfnmadd_vf_nxv32f16_neg_splat(<vscale x 32 x half>
 ; ZVFHMIN-NEXT:    ret
   %elt.head = insertelement <vscale x 32 x half> poison, half %b, i32 0
   %vb = shufflevector <vscale x 32 x half> %elt.head, <vscale x 32 x half> poison, <vscale x 32 x i32> zeroinitializer
-  %negvb = call <vscale x 32 x half> @llvm.vp.fneg.nxv32f16(<vscale x 32 x half> %vb, <vscale x 32 x i1> %m, i32 %evl)
+  %negva = call <vscale x 32 x half> @llvm.vp.fneg.nxv32f16(<vscale x 32 x half> %va, <vscale x 32 x i1> %m, i32 %evl)
   %negvc = call <vscale x 32 x half> @llvm.vp.fneg.nxv32f16(<vscale x 32 x half> %vc, <vscale x 32 x i1> %m, i32 %evl)
-  %v = call <vscale x 32 x half> @llvm.vp.fma.nxv32f16(<vscale x 32 x half> %va, <vscale x 32 x half> %negvb, <vscale x 32 x half> %negvc, <vscale x 32 x i1> %m, i32 %evl)
+  %v = call <vscale x 32 x half> @llvm.vp.fma.nxv32f16(<vscale x 32 x half> %negva, <vscale x 32 x half> %vb, <vscale x 32 x half> %negvc, <vscale x 32 x i1> %m, i32 %evl)
   ret <vscale x 32 x half> %v
 }
 
-define <vscale x 32 x half> @vfnmadd_vf_nxv32f16_neg_splat_commute(<vscale x 32 x half> %va, half %b, <vscale x 32 x half> %vc, <vscale x 32 x i1> %m, i32 zeroext %evl) {
-; ZVFH-LABEL: vfnmadd_vf_nxv32f16_neg_splat_commute:
+define <vscale x 32 x half> @vfnmadd_vf_nxv32f16_commute(<vscale x 32 x half> %va, half %b, <vscale x 32 x half> %vc, <vscale x 32 x i1> %m, i32 zeroext %evl) {
+; ZVFH-LABEL: vfnmadd_vf_nxv32f16_commute:
 ; ZVFH:       # %bb.0:
 ; ZVFH-NEXT:    vsetvli zero, a0, e16, m8, ta, ma
 ; ZVFH-NEXT:    vfnmadd.vf v8, fa0, v16, v0.t
 ; ZVFH-NEXT:    ret
 ;
-; ZVFHMIN-LABEL: vfnmadd_vf_nxv32f16_neg_splat_commute:
+; ZVFHMIN-LABEL: vfnmadd_vf_nxv32f16_commute:
 ; ZVFHMIN:       # %bb.0:
 ; ZVFHMIN-NEXT:    addi sp, sp, -16
 ; ZVFHMIN-NEXT:    .cfi_def_cfa_offset 16
@@ -9871,24 +9808,25 @@ define <vscale x 32 x half> @vfnmadd_vf_nxv32f16_neg_splat_commute(<vscale x 32
 ; ZVFHMIN-NEXT:    slli a1, a1, 5
 ; ZVFHMIN-NEXT:    sub sp, sp, a1
 ; ZVFHMIN-NEXT:    .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x20, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 32 * vlenb
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e16, m8, ta, ma
+; ZVFHMIN-NEXT:    vsetvli a1, zero, e16, m8, ta, ma
 ; ZVFHMIN-NEXT:    vmv1r.v v3, v0
+; ZVFHMIN-NEXT:    fmv.x.h a1, fa0
+; ZVFHMIN-NEXT:    lui a2, 8
+; ZVFHMIN-NEXT:    csrr a3, vlenb
+; ZVFHMIN-NEXT:    vmv.v.x v24, a1
 ; ZVFHMIN-NEXT:    csrr a1, vlenb
 ; ZVFHMIN-NEXT:    slli a1, a1, 3
-; ZVFHMIN-NEXT:    mv a2, a1
+; ZVFHMIN-NEXT:    mv a4, a1
 ; ZVFHMIN-NEXT:    slli a1, a1, 1
-; ZVFHMIN-NEXT:    add a1, a1, a2
+; ZVFHMIN-NEXT:    add a1, a1, a4
 ; ZVFHMIN-NEXT:    add a1, sp, a1
 ; ZVFHMIN-NEXT:    addi a1, a1, 16
-; ZVFHMIN-NEXT:    vs8r.v v8, (a1) # vscale x 64-byte Folded Spill
-; ZVFHMIN-NEXT:    fmv.x.h a1, fa0
-; ZVFHMIN-NEXT:    lui a2, 8
-; ZVFHMIN-NEXT:    csrr a3, vlenb
-; ZVFHMIN-NEXT:    vmv.v.x v24, a1
+; ZVFHMIN-NEXT:    vs8r.v v24, (a1) # vscale x 64-byte Folded Spill
+; ZVFHMIN-NEXT:    vsetvli zero, a0, e16, m8, ta, ma
+; ZVFHMIN-NEXT:    vxor.vx v8, v8, a2, v0.t
+; ZVFHMIN-NEXT:    vxor.vx v16, v16, a2, v0.t
 ; ZVFHMIN-NEXT:    slli a1, a3, 1
 ; ZVFHMIN-NEXT:    srli a3, a3, 2
-; ZVFHMIN-NEXT:    vxor.vx v8, v24, a2, v0.t
-; ZVFHMIN-NEXT:    vxor.vx v16, v16, a2, v0.t
 ; ZVFHMIN-NEXT:    sub a2, a0, a1
 ; ZVFHMIN-NEXT:    vsetvli a4, zero, e8, mf2, ta, ma
 ; ZVFHMIN-NEXT:    vslidedown.vx v0, v0, a3
@@ -9922,34 +9860,29 @@ define <vscale x 32 x half> @vfnmadd_vf_nxv32f16_neg_splat_commute(<vscale x 32
 ; ZVFHMIN-NEXT:    addi a2, sp, 16
 ; ZVFHMIN-NEXT:    vl8r.v v24, (a2) # vscale x 64-byte Folded Reload
 ; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m8, ta, ma
-; ZVFHMIN-NEXT:    vfmadd.vv v8, v16, v24, v0.t
+; ZVFHMIN-NEXT:    vfmadd.vv v16, v8, v24, v0.t
 ; ZVFHMIN-NEXT:    vsetvli zero, zero, e16, m4, ta, ma
-; ZVFHMIN-NEXT:    vfncvt.f.f.w v4, v8, v0.t
-; ZVFHMIN-NEXT:    bltu a0, a1, .LBB295_2
+; ZVFHMIN-NEXT:    vfncvt.f.f.w v4, v16, v0.t
+; ZVFHMIN-NEXT:    bltu a0, a1, .LBB291_2
 ; ZVFHMIN-NEXT:  # %bb.1:
 ; ZVFHMIN-NEXT:    mv a0, a1
-; ZVFHMIN-NEXT:  .LBB295_2:
+; ZVFHMIN-NEXT:  .LBB291_2:
 ; ZVFHMIN-NEXT:    vmv1r.v v0, v3
 ; ZVFHMIN-NEXT:    csrr a1, vlenb
 ; ZVFHMIN-NEXT:    slli a1, a1, 3
 ; ZVFHMIN-NEXT:    add a1, sp, a1
 ; ZVFHMIN-NEXT:    addi a1, a1, 16
-; ZVFHMIN-NEXT:    vl8r.v v8, (a1) # vscale x 64-byte Folded Reload
+; ZVFHMIN-NEXT:    vl8r.v v16, (a1) # vscale x 64-byte Folded Reload
 ; ZVFHMIN-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
-; ZVFHMIN-NEXT:    vfwcvt.f.f.v v24, v8, v0.t
+; ZVFHMIN-NEXT:    vfwcvt.f.f.v v24, v16, v0.t
 ; ZVFHMIN-NEXT:    addi a0, sp, 16
 ; ZVFHMIN-NEXT:    vs8r.v v24, (a0) # vscale x 64-byte Folded Spill
 ; ZVFHMIN-NEXT:    csrr a0, vlenb
 ; ZVFHMIN-NEXT:    slli a0, a0, 4
 ; ZVFHMIN-NEXT:    add a0, sp, a0
 ; ZVFHMIN-NEXT:    addi a0, a0, 16
-; ZVFHMIN-NEXT:    vl8r.v v8, (a0) # vscale x 64-byte Folded Reload
-; ZVFHMIN-NEXT:    vfwcvt.f.f.v v16, v8, v0.t
-; ZVFHMIN-NEXT:    csrr a0, vlenb
-; ZVFHMIN-NEXT:    slli a0, a0, 3
-; ZVFHMIN-NEXT:    add a0, sp, a0
-; ZVFHMIN-NEXT:    addi a0, a0, 16
-; ZVFHMIN-NEXT:    vs8r.v v16, (a0) # vscale x 64-byte Folded Spill
+; ZVFHMIN-NEXT:    vl8r.v v16, (a0) # vscale x 64-byte Folded Reload
+; ZVFHMIN-NEXT:    vfwcvt.f.f.v v8, v16, v0.t
 ; ZVFHMIN-NEXT:    csrr a0, vlenb
 ; ZVFHMIN-NEXT:    slli a0, a0, 3
 ; ZVFHMIN-NEXT:    mv a1, a0
@@ -9957,17 +9890,22 @@ define <vscale x 32 x half> @vfnmadd_vf_nxv32f16_neg_splat_commute(<vscale x 32
 ; ZVFHMIN-NEXT:    add a0, a0, a1
 ; ZVFHMIN-NEXT:    add a0, sp, a0
 ; ZVFHMIN-NEXT:    addi a0, a0, 16
-; ZVFHMIN-NEXT:    vl8r.v v24, (a0) # vscale x 64-byte Folded Reload
-; ZVFHMIN-NEXT:    vfwcvt.f.f.v v8, v24, v0.t
-; ZVFHMIN-NEXT:    addi a0, sp, 16
-; ZVFHMIN-NEXT:    vl8r.v v24, (a0) # vscale x 64-byte Folded Reload
+; ZVFHMIN-NEXT:    vl8r.v v16, (a0) # vscale x 64-byte Folded Reload
+; ZVFHMIN-NEXT:    vfwcvt.f.f.v v24, v16, v0.t
 ; ZVFHMIN-NEXT:    csrr a0, vlenb
-; ZVFHMIN-NEXT:    slli a0, a0, 3
+; ZVFHMIN-NEXT:    slli a0, a0, 4
 ; ZVFHMIN-NEXT:    add a0, sp, a0
 ; ZVFHMIN-NEXT:    addi a0, a0, 16
+; ZVFHMIN-NEXT:    vs8r.v v24, (a0) # vscale x 64-byte Folded Spill
+; ZVFHMIN-NEXT:    addi a0, sp, 16
 ; ZVFHMIN-NEXT:    vl8r.v v16, (a0) # vscale x 64-byte Folded Reload
+; ZVFHMIN-NEXT:    csrr a0, vlenb
+; ZVFHMIN-NEXT:    slli a0, a0, 4
+; ZVFHMIN-NEXT:    add a0, sp, a0
+; ZVFHMIN-NEXT:    addi a0, a0, 16
+; ZVFHMIN-NEXT:    vl8r.v v24, (a0) # vscale x 64-byte Folded Reload
 ; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m8, ta, ma
-; ZVFHMIN-NEXT:    vfmadd.vv v8, v16, v24, v0.t
+; ZVFHMIN-NEXT:    vfmadd.vv v8, v24, v16, v0.t
 ; ZVFHMIN-NEXT:    vmv.v.v v16, v8
 ; ZVFHMIN-NEXT:    vmv4r.v v12, v4
 ; ZVFHMIN-NEXT:    vsetvli zero, zero, e16, m4, ta, ma
@@ -9981,118 +9919,134 @@ define <vscale x 32 x half> @vfnmadd_vf_nxv32f16_neg_splat_commute(<vscale x 32
 ; ZVFHMIN-NEXT:    ret
   %elt.head = insertelement <vscale x 32 x half> poison, half %b, i32 0
   %vb = shufflevector <vscale x 32 x half> %elt.head, <vscale x 32 x half> poison, <vscale x 32 x i32> zeroinitializer
-  %negvb = call <vscale x 32 x half> @llvm.vp.fneg.nxv32f16(<vscale x 32 x half> %vb, <vscale x 32 x i1> %m, i32 %evl)
+  %negva = call <vscale x 32 x half> @llvm.vp.fneg.nxv32f16(<vscale x 32 x half> %va, <vscale x 32 x i1> %m, i32 %evl)
   %negvc = call <vscale x 32 x half> @llvm.vp.fneg.nxv32f16(<vscale x 32 x half> %vc, <vscale x 32 x i1> %m, i32 %evl)
-  %v = call <vscale x 32 x half> @llvm.vp.fma.nxv32f16(<vscale x 32 x half> %negvb, <vscale x 32 x half> %va, <vscale x 32 x half> %negvc, <vscale x 32 x i1> %m, i32 %evl)
+  %v = call <vscale x 32 x half> @llvm.vp.fma.nxv32f16(<vscale x 32 x half> %vb, <vscale x 32 x half> %negva, <vscale x 32 x half> %negvc, <vscale x 32 x i1> %m, i32 %evl)
   ret <vscale x 32 x half> %v
 }
 
-define <vscale x 32 x half> @vfnmadd_vf_nxv32f16_neg_splat_unmasked(<vscale x 32 x half> %va, half %b, <vscale x 32 x half> %vc, i32 zeroext %evl) {
-; ZVFH-LABEL: vfnmadd_vf_nxv32f16_neg_splat_unmasked:
+define <vscale x 32 x half> @vfnmadd_vf_nxv32f16_unmasked(<vscale x 32 x half> %va, half %b, <vscale x 32 x half> %vc, i32 zeroext %evl) {
+; ZVFH-LABEL: vfnmadd_vf_nxv32f16_unmasked:
 ; ZVFH:       # %bb.0:
 ; ZVFH-NEXT:    vsetvli zero, a0, e16, m8, ta, ma
 ; ZVFH-NEXT:    vfnmadd.vf v8, fa0, v16
 ; ZVFH-NEXT:    ret
 ;
-; ZVFHMIN-LABEL: vfnmadd_vf_nxv32f16_neg_splat_unmasked:
+; ZVFHMIN-LABEL: vfnmadd_vf_nxv32f16_unmasked:
 ; ZVFHMIN:       # %bb.0:
 ; ZVFHMIN-NEXT:    addi sp, sp, -16
 ; ZVFHMIN-NEXT:    .cfi_def_cfa_offset 16
 ; ZVFHMIN-NEXT:    csrr a1, vlenb
-; ZVFHMIN-NEXT:    slli a1, a1, 5
-; ZVFHMIN-NEXT:    sub sp, sp, a1
-; ZVFHMIN-NEXT:    .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x20, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 32 * vlenb
-; ZVFHMIN-NEXT:    csrr a1, vlenb
 ; ZVFHMIN-NEXT:    slli a1, a1, 3
 ; ZVFHMIN-NEXT:    mv a2, a1
-; ZVFHMIN-NEXT:    slli a1, a1, 1
+; ZVFHMIN-NEXT:    slli a1, a1, 2
 ; ZVFHMIN-NEXT:    add a1, a1, a2
-; ZVFHMIN-NEXT:    add a1, sp, a1
-; ZVFHMIN-NEXT:    addi a1, a1, 16
-; ZVFHMIN-NEXT:    vs8r.v v8, (a1) # vscale x 64-byte Folded Spill
-; ZVFHMIN-NEXT:    fmv.x.h a1, fa0
-; ZVFHMIN-NEXT:    lui a2, 8
+; ZVFHMIN-NEXT:    sub sp, sp, a1
+; ZVFHMIN-NEXT:    .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x28, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 40 * vlenb
+; ZVFHMIN-NEXT:    fmv.x.h a2, fa0
+; ZVFHMIN-NEXT:    lui a1, 8
 ; ZVFHMIN-NEXT:    vsetvli a3, zero, e8, m4, ta, ma
 ; ZVFHMIN-NEXT:    vmset.m v7
 ; ZVFHMIN-NEXT:    csrr a3, vlenb
 ; ZVFHMIN-NEXT:    vsetvli zero, a0, e16, m8, ta, ma
-; ZVFHMIN-NEXT:    vmv.v.x v24, a1
-; ZVFHMIN-NEXT:    vxor.vx v16, v16, a2
+; ZVFHMIN-NEXT:    vxor.vx v24, v8, a1
+; ZVFHMIN-NEXT:    vxor.vx v8, v16, a1
 ; ZVFHMIN-NEXT:    slli a1, a3, 1
 ; ZVFHMIN-NEXT:    srli a3, a3, 2
-; ZVFHMIN-NEXT:    vxor.vx v8, v24, a2
-; ZVFHMIN-NEXT:    sub a2, a0, a1
-; ZVFHMIN-NEXT:    vsetvli a4, zero, e8, mf2, ta, ma
+; ZVFHMIN-NEXT:    sub a4, a0, a1
+; ZVFHMIN-NEXT:    vsetvli a5, zero, e8, mf2, ta, ma
 ; ZVFHMIN-NEXT:    vslidedown.vx v0, v7, a3
-; ZVFHMIN-NEXT:    sltu a3, a0, a2
+; ZVFHMIN-NEXT:    sltu a3, a0, a4
 ; ZVFHMIN-NEXT:    addi a3, a3, -1
-; ZVFHMIN-NEXT:    and a2, a3, a2
-; ZVFHMIN-NEXT:    csrr a3, vlenb
-; ZVFHMIN-NEXT:    slli a3, a3, 4
-; ZVFHMIN-NEXT:    add a3, sp, a3
-; ZVFHMIN-NEXT:    addi a3, a3, 16
-; ZVFHMIN-NEXT:    vs8r.v v16, (a3) # vscale x 64-byte Folded Spill
-; ZVFHMIN-NEXT:    vsetvli zero, a2, e16, m4, ta, ma
-; ZVFHMIN-NEXT:    vfwcvt.f.f.v v24, v20, v0.t
+; ZVFHMIN-NEXT:    and a3, a3, a4
+; ZVFHMIN-NEXT:    csrr a4, vlenb
+; ZVFHMIN-NEXT:    slli a4, a4, 3
+; ZVFHMIN-NEXT:    mv a5, a4
+; ZVFHMIN-NEXT:    slli a4, a4, 1
+; ZVFHMIN-NEXT:    add a4, a4, a5
+; ZVFHMIN-NEXT:    add a4, sp, a4
+; ZVFHMIN-NEXT:    addi a4, a4, 16
+; ZVFHMIN-NEXT:    vs8r.v v8, (a4) # vscale x 64-byte Folded Spill
+; ZVFHMIN-NEXT:    vsetvli zero, a3, e16, m4, ta, ma
+; ZVFHMIN-NEXT:    vfwcvt.f.f.v v16, v12, v0.t
+; ZVFHMIN-NEXT:    csrr a4, vlenb
+; ZVFHMIN-NEXT:    slli a4, a4, 4
+; ZVFHMIN-NEXT:    add a4, sp, a4
+; ZVFHMIN-NEXT:    addi a4, a4, 16
+; ZVFHMIN-NEXT:    vs8r.v v16, (a4) # vscale x 64-byte Folded Spill
+; ZVFHMIN-NEXT:    csrr a4, vlenb
+; ZVFHMIN-NEXT:    slli a4, a4, 5
+; ZVFHMIN-NEXT:    add a4, sp, a4
+; ZVFHMIN-NEXT:    addi a4, a4, 16
+; ZVFHMIN-NEXT:    vs8r.v v24, (a4) # vscale x 64-byte Folded Spill
+; ZVFHMIN-NEXT:    vfwcvt.f.f.v v8, v28, v0.t
+; ZVFHMIN-NEXT:    csrr a4, vlenb
+; ZVFHMIN-NEXT:    slli a4, a4, 3
+; ZVFHMIN-NEXT:    add a4, sp, a4
+; ZVFHMIN-NEXT:    addi a4, a4, 16
+; ZVFHMIN-NEXT:    vs8r.v v8, (a4) # vscale x 64-byte Folded Spill
+; ZVFHMIN-NEXT:    vsetvli a4, zero, e16, m8, ta, ma
+; ZVFHMIN-NEXT:    vmv.v.x v8, a2
 ; ZVFHMIN-NEXT:    addi a2, sp, 16
-; ZVFHMIN-NEXT:    vs8r.v v24, (a2) # vscale x 64-byte Folded Spill
+; ZVFHMIN-NEXT:    vs8r.v v8, (a2) # vscale x 64-byte Folded Spill
+; ZVFHMIN-NEXT:    vsetvli zero, a3, e16, m4, ta, ma
+; ZVFHMIN-NEXT:    vfwcvt.f.f.v v24, v12, v0.t
 ; ZVFHMIN-NEXT:    csrr a2, vlenb
-; ZVFHMIN-NEXT:    slli a2, a2, 3
+; ZVFHMIN-NEXT:    slli a2, a2, 4
 ; ZVFHMIN-NEXT:    add a2, sp, a2
 ; ZVFHMIN-NEXT:    addi a2, a2, 16
-; ZVFHMIN-NEXT:    vs8r.v v8, (a2) # vscale x 64-byte Folded Spill
-; ZVFHMIN-NEXT:    vfwcvt.f.f.v v24, v12, v0.t
+; ZVFHMIN-NEXT:    vl8r.v v8, (a2) # vscale x 64-byte Folded Reload
 ; ZVFHMIN-NEXT:    csrr a2, vlenb
 ; ZVFHMIN-NEXT:    slli a2, a2, 3
-; ZVFHMIN-NEXT:    mv a3, a2
-; ZVFHMIN-NEXT:    slli a2, a2, 1
-; ZVFHMIN-NEXT:    add a2, a2, a3
 ; ZVFHMIN-NEXT:    add a2, sp, a2
 ; ZVFHMIN-NEXT:    addi a2, a2, 16
 ; ZVFHMIN-NEXT:    vl8r.v v16, (a2) # vscale x 64-byte Folded Reload
-; ZVFHMIN-NEXT:    vfwcvt.f.f.v v8, v20, v0.t
-; ZVFHMIN-NEXT:    addi a2, sp, 16
-; ZVFHMIN-NEXT:    vl8r.v v16, (a2) # vscale x 64-byte Folded Reload
 ; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m8, ta, ma
-; ZVFHMIN-NEXT:    vfmadd.vv v24, v8, v16, v0.t
+; ZVFHMIN-NEXT:    vfmadd.vv v24, v16, v8, v0.t
 ; ZVFHMIN-NEXT:    vsetvli zero, zero, e16, m4, ta, ma
 ; ZVFHMIN-NEXT:    vfncvt.f.f.w v12, v24, v0.t
-; ZVFHMIN-NEXT:    bltu a0, a1, .LBB296_2
+; ZVFHMIN-NEXT:    bltu a0, a1, .LBB292_2
 ; ZVFHMIN-NEXT:  # %bb.1:
 ; ZVFHMIN-NEXT:    mv a0, a1
-; ZVFHMIN-NEXT:  .LBB296_2:
+; ZVFHMIN-NEXT:  .LBB292_2:
 ; ZVFHMIN-NEXT:    csrr a1, vlenb
-; ZVFHMIN-NEXT:    slli a1, a1, 4
+; ZVFHMIN-NEXT:    slli a1, a1, 3
+; ZVFHMIN-NEXT:    mv a2, a1
+; ZVFHMIN-NEXT:    slli a1, a1, 1
+; ZVFHMIN-NEXT:    add a1, a1, a2
 ; ZVFHMIN-NEXT:    add a1, sp, a1
 ; ZVFHMIN-NEXT:    addi a1, a1, 16
-; ZVFHMIN-NEXT:    vl8r.v v16, (a1) # vscale x 64-byte Folded Reload
+; ZVFHMIN-NEXT:    vl8r.v v24, (a1) # vscale x 64-byte Folded Reload
 ; ZVFHMIN-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
-; ZVFHMIN-NEXT:    vfwcvt.f.f.v v24, v16
-; ZVFHMIN-NEXT:    addi a0, sp, 16
-; ZVFHMIN-NEXT:    vs8r.v v24, (a0) # vscale x 64-byte Folded Spill
+; ZVFHMIN-NEXT:    vfwcvt.f.f.v v16, v24
 ; ZVFHMIN-NEXT:    csrr a0, vlenb
-; ZVFHMIN-NEXT:    slli a0, a0, 3
+; ZVFHMIN-NEXT:    slli a0, a0, 4
+; ZVFHMIN-NEXT:    add a0, sp, a0
+; ZVFHMIN-NEXT:    addi a0, a0, 16
+; ZVFHMIN-NEXT:    vs8r.v v16, (a0) # vscale x 64-byte Folded Spill
+; ZVFHMIN-NEXT:    csrr a0, vlenb
+; ZVFHMIN-NEXT:    slli a0, a0, 5
 ; ZVFHMIN-NEXT:    add a0, sp, a0
 ; ZVFHMIN-NEXT:    addi a0, a0, 16
+; ZVFHMIN-NEXT:    vl8r.v v24, (a0) # vscale x 64-byte Folded Reload
+; ZVFHMIN-NEXT:    vfwcvt.f.f.v v0, v24
+; ZVFHMIN-NEXT:    addi a0, sp, 16
 ; ZVFHMIN-NEXT:    vl8r.v v16, (a0) # vscale x 64-byte Folded Reload
 ; ZVFHMIN-NEXT:    vfwcvt.f.f.v v24, v16
 ; ZVFHMIN-NEXT:    csrr a0, vlenb
-; ZVFHMIN-NEXT:    slli a0, a0, 3
-; ZVFHMIN-NEXT:    mv a1, a0
-; ZVFHMIN-NEXT:    slli a0, a0, 1
-; ZVFHMIN-NEXT:    add a0, a0, a1
+; ZVFHMIN-NEXT:    slli a0, a0, 4
 ; ZVFHMIN-NEXT:    add a0, sp, a0
 ; ZVFHMIN-NEXT:    addi a0, a0, 16
 ; ZVFHMIN-NEXT:    vl8r.v v16, (a0) # vscale x 64-byte Folded Reload
-; ZVFHMIN-NEXT:    vfwcvt.f.f.v v0, v16
-; ZVFHMIN-NEXT:    addi a0, sp, 16
-; ZVFHMIN-NEXT:    vl8r.v v16, (a0) # vscale x 64-byte Folded Reload
 ; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m8, ta, ma
-; ZVFHMIN-NEXT:    vfmadd.vv v0, v24, v16
+; ZVFHMIN-NEXT:    vfmadd.vv v24, v0, v16
 ; ZVFHMIN-NEXT:    vsetvli zero, zero, e16, m4, ta, ma
-; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v0
+; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v24
 ; ZVFHMIN-NEXT:    csrr a0, vlenb
-; ZVFHMIN-NEXT:    slli a0, a0, 5
+; ZVFHMIN-NEXT:    slli a0, a0, 3
+; ZVFHMIN-NEXT:    mv a1, a0
+; ZVFHMIN-NEXT:    slli a0, a0, 2
+; ZVFHMIN-NEXT:    add a0, a0, a1
 ; ZVFHMIN-NEXT:    add sp, sp, a0
 ; ZVFHMIN-NEXT:    .cfi_def_cfa sp, 16
 ; ZVFHMIN-NEXT:    addi sp, sp, 16
@@ -10100,20 +10054,20 @@ define <vscale x 32 x half> @vfnmadd_vf_nxv32f16_neg_splat_unmasked(<vscale x 32
 ; ZVFHMIN-NEXT:    ret
   %elt.head = insertelement <vscale x 32 x half> poison, half %b, i32 0
   %vb = shufflevector <vscale x 32 x half> %elt.head, <vscale x 32 x half> poison, <vscale x 32 x i32> zeroinitializer
-  %negvb = call <vscale x 32 x half> @llvm.vp.fneg.nxv32f16(<vscale x 32 x half> %vb, <vscale x 32 x i1> splat (i1 true), i32 %evl)
+  %negva = call <vscale x 32 x half> @llvm.vp.fneg.nxv32f16(<vscale x 32 x half> %va, <vscale x 32 x i1> splat (i1 true), i32 %evl)
   %negvc = call <vscale x 32 x half> @llvm.vp.fneg.nxv32f16(<vscale x 32 x half> %vc, <vscale x 32 x i1> splat (i1 true), i32 %evl)
-  %v = call <vscale x 32 x half> @llvm.vp.fma.nxv32f16(<vscale x 32 x half> %va, <vscale x 32 x half> %negvb, <vscale x 32 x half> %negvc, <vscale x 32 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 32 x half> @llvm.vp.fma.nxv32f16(<vscale x 32 x half> %negva, <vscale x 32 x half> %vb, <vscale x 32 x half> %negvc, <vscale x 32 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 32 x half> %v
 }
 
-define <vscale x 32 x half> @vfnmadd_vf_nxv32f16_neg_splat_unmasked_commute(<vscale x 32 x half> %va, half %b, <vscale x 32 x half> %vc, i32 zeroext %evl) {
-; ZVFH-LABEL: vfnmadd_vf_nxv32f16_neg_splat_unmasked_commute:
+define <vscale x 32 x half> @vfnmadd_vf_nxv32f16_unmasked_commute(<vscale x 32 x half> %va, half %b, <vscale x 32 x half> %vc, i32 zeroext %evl) {
+; ZVFH-LABEL: vfnmadd_vf_nxv32f16_unmasked_commute:
 ; ZVFH:       # %bb.0:
 ; ZVFH-NEXT:    vsetvli zero, a0, e16, m8, ta, ma
 ; ZVFH-NEXT:    vfnmadd.vf v8, fa0, v16
 ; ZVFH-NEXT:    ret
 ;
-; ZVFHMIN-LABEL: vfnmadd_vf_nxv32f16_neg_splat_unmasked_commute:
+; ZVFHMIN-LABEL: vfnmadd_vf_nxv32f16_unmasked_commute:
 ; ZVFHMIN:       # %bb.0:
 ; ZVFHMIN-NEXT:    addi sp, sp, -16
 ; ZVFHMIN-NEXT:    .cfi_def_cfa_offset 16
@@ -10121,80 +10075,74 @@ define <vscale x 32 x half> @vfnmadd_vf_nxv32f16_neg_splat_unmasked_commute(<vsc
 ; ZVFHMIN-NEXT:    slli a1, a1, 5
 ; ZVFHMIN-NEXT:    sub sp, sp, a1
 ; ZVFHMIN-NEXT:    .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x20, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 32 * vlenb
-; ZVFHMIN-NEXT:    csrr a1, vlenb
-; ZVFHMIN-NEXT:    slli a1, a1, 3
-; ZVFHMIN-NEXT:    mv a2, a1
-; ZVFHMIN-NEXT:    slli a1, a1, 1
-; ZVFHMIN-NEXT:    add a1, a1, a2
-; ZVFHMIN-NEXT:    add a1, sp, a1
-; ZVFHMIN-NEXT:    addi a1, a1, 16
-; ZVFHMIN-NEXT:    vs8r.v v8, (a1) # vscale x 64-byte Folded Spill
-; ZVFHMIN-NEXT:    fmv.x.h a1, fa0
-; ZVFHMIN-NEXT:    lui a2, 8
+; ZVFHMIN-NEXT:    fmv.x.h a2, fa0
+; ZVFHMIN-NEXT:    lui a1, 8
 ; ZVFHMIN-NEXT:    vsetvli a3, zero, e8, m4, ta, ma
-; ZVFHMIN-NEXT:    vmset.m v7
+; ZVFHMIN-NEXT:    vmset.m v24
 ; ZVFHMIN-NEXT:    csrr a3, vlenb
 ; ZVFHMIN-NEXT:    vsetvli zero, a0, e16, m8, ta, ma
-; ZVFHMIN-NEXT:    vmv.v.x v24, a1
-; ZVFHMIN-NEXT:    vxor.vx v16, v16, a2
+; ZVFHMIN-NEXT:    vxor.vx v8, v8, a1
+; ZVFHMIN-NEXT:    vxor.vx v16, v16, a1
 ; ZVFHMIN-NEXT:    slli a1, a3, 1
 ; ZVFHMIN-NEXT:    srli a3, a3, 2
-; ZVFHMIN-NEXT:    vxor.vx v8, v24, a2
-; ZVFHMIN-NEXT:    sub a2, a0, a1
-; ZVFHMIN-NEXT:    vsetvli a4, zero, e8, mf2, ta, ma
-; ZVFHMIN-NEXT:    vslidedown.vx v0, v7, a3
-; ZVFHMIN-NEXT:    sltu a3, a0, a2
+; ZVFHMIN-NEXT:    sub a4, a0, a1
+; ZVFHMIN-NEXT:    vsetvli a5, zero, e8, mf2, ta, ma
+; ZVFHMIN-NEXT:    vslidedown.vx v0, v24, a3
+; ZVFHMIN-NEXT:    sltu a3, a0, a4
 ; ZVFHMIN-NEXT:    addi a3, a3, -1
-; ZVFHMIN-NEXT:    and a2, a3, a2
-; ZVFHMIN-NEXT:    csrr a3, vlenb
-; ZVFHMIN-NEXT:    slli a3, a3, 4
-; ZVFHMIN-NEXT:    add a3, sp, a3
-; ZVFHMIN-NEXT:    addi a3, a3, 16
-; ZVFHMIN-NEXT:    vs8r.v v16, (a3) # vscale x 64-byte Folded Spill
-; ZVFHMIN-NEXT:    vsetvli zero, a2, e16, m4, ta, ma
+; ZVFHMIN-NEXT:    and a3, a3, a4
+; ZVFHMIN-NEXT:    csrr a4, vlenb
+; ZVFHMIN-NEXT:    slli a4, a4, 4
+; ZVFHMIN-NEXT:    add a4, sp, a4
+; ZVFHMIN-NEXT:    addi a4, a4, 16
+; ZVFHMIN-NEXT:    vs8r.v v16, (a4) # vscale x 64-byte Folded Spill
+; ZVFHMIN-NEXT:    vsetvli zero, a3, e16, m4, ta, ma
 ; ZVFHMIN-NEXT:    vfwcvt.f.f.v v24, v20, v0.t
+; ZVFHMIN-NEXT:    csrr a4, vlenb
+; ZVFHMIN-NEXT:    slli a4, a4, 3
+; ZVFHMIN-NEXT:    add a4, sp, a4
+; ZVFHMIN-NEXT:    addi a4, a4, 16
+; ZVFHMIN-NEXT:    vs8r.v v24, (a4) # vscale x 64-byte Folded Spill
+; ZVFHMIN-NEXT:    csrr a4, vlenb
+; ZVFHMIN-NEXT:    slli a4, a4, 3
+; ZVFHMIN-NEXT:    mv a5, a4
+; ZVFHMIN-NEXT:    slli a4, a4, 1
+; ZVFHMIN-NEXT:    add a4, a4, a5
+; ZVFHMIN-NEXT:    add a4, sp, a4
+; ZVFHMIN-NEXT:    addi a4, a4, 16
+; ZVFHMIN-NEXT:    vs8r.v v8, (a4) # vscale x 64-byte Folded Spill
+; ZVFHMIN-NEXT:    vfwcvt.f.f.v v16, v12, v0.t
+; ZVFHMIN-NEXT:    vsetvli a4, zero, e16, m8, ta, ma
+; ZVFHMIN-NEXT:    vmv.v.x v8, a2
 ; ZVFHMIN-NEXT:    addi a2, sp, 16
-; ZVFHMIN-NEXT:    vs8r.v v24, (a2) # vscale x 64-byte Folded Spill
-; ZVFHMIN-NEXT:    csrr a2, vlenb
-; ZVFHMIN-NEXT:    slli a2, a2, 3
-; ZVFHMIN-NEXT:    add a2, sp, a2
-; ZVFHMIN-NEXT:    addi a2, a2, 16
 ; ZVFHMIN-NEXT:    vs8r.v v8, (a2) # vscale x 64-byte Folded Spill
+; ZVFHMIN-NEXT:    vsetvli zero, a3, e16, m4, ta, ma
 ; ZVFHMIN-NEXT:    vfwcvt.f.f.v v24, v12, v0.t
 ; ZVFHMIN-NEXT:    csrr a2, vlenb
 ; ZVFHMIN-NEXT:    slli a2, a2, 3
-; ZVFHMIN-NEXT:    mv a3, a2
-; ZVFHMIN-NEXT:    slli a2, a2, 1
-; ZVFHMIN-NEXT:    add a2, a2, a3
 ; ZVFHMIN-NEXT:    add a2, sp, a2
 ; ZVFHMIN-NEXT:    addi a2, a2, 16
-; ZVFHMIN-NEXT:    vl8r.v v16, (a2) # vscale x 64-byte Folded Reload
-; ZVFHMIN-NEXT:    vfwcvt.f.f.v v8, v20, v0.t
-; ZVFHMIN-NEXT:    addi a2, sp, 16
-; ZVFHMIN-NEXT:    vl8r.v v16, (a2) # vscale x 64-byte Folded Reload
+; ZVFHMIN-NEXT:    vl8r.v v8, (a2) # vscale x 64-byte Folded Reload
 ; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m8, ta, ma
-; ZVFHMIN-NEXT:    vfmadd.vv v8, v24, v16, v0.t
+; ZVFHMIN-NEXT:    vfmadd.vv v16, v24, v8, v0.t
 ; ZVFHMIN-NEXT:    vsetvli zero, zero, e16, m4, ta, ma
-; ZVFHMIN-NEXT:    vfncvt.f.f.w v20, v8, v0.t
-; ZVFHMIN-NEXT:    bltu a0, a1, .LBB297_2
+; ZVFHMIN-NEXT:    vfncvt.f.f.w v12, v16, v0.t
+; ZVFHMIN-NEXT:    bltu a0, a1, .LBB293_2
 ; ZVFHMIN-NEXT:  # %bb.1:
 ; ZVFHMIN-NEXT:    mv a0, a1
-; ZVFHMIN-NEXT:  .LBB297_2:
+; ZVFHMIN-NEXT:  .LBB293_2:
 ; ZVFHMIN-NEXT:    csrr a1, vlenb
 ; ZVFHMIN-NEXT:    slli a1, a1, 4
 ; ZVFHMIN-NEXT:    add a1, sp, a1
 ; ZVFHMIN-NEXT:    addi a1, a1, 16
-; ZVFHMIN-NEXT:    vl8r.v v8, (a1) # vscale x 64-byte Folded Reload
+; ZVFHMIN-NEXT:    vl8r.v v16, (a1) # vscale x 64-byte Folded Reload
 ; ZVFHMIN-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
-; ZVFHMIN-NEXT:    vfwcvt.f.f.v v24, v8
-; ZVFHMIN-NEXT:    addi a0, sp, 16
-; ZVFHMIN-NEXT:    vs8r.v v24, (a0) # vscale x 64-byte Folded Spill
+; ZVFHMIN-NEXT:    vfwcvt.f.f.v v0, v16
 ; ZVFHMIN-NEXT:    csrr a0, vlenb
 ; ZVFHMIN-NEXT:    slli a0, a0, 3
 ; ZVFHMIN-NEXT:    add a0, sp, a0
 ; ZVFHMIN-NEXT:    addi a0, a0, 16
-; ZVFHMIN-NEXT:    vl8r.v v8, (a0) # vscale x 64-byte Folded Reload
-; ZVFHMIN-NEXT:    vfwcvt.f.f.v v24, v8
+; ZVFHMIN-NEXT:    vs8r.v v0, (a0) # vscale x 64-byte Folded Spill
 ; ZVFHMIN-NEXT:    csrr a0, vlenb
 ; ZVFHMIN-NEXT:    slli a0, a0, 3
 ; ZVFHMIN-NEXT:    mv a1, a0
@@ -10202,15 +10150,20 @@ define <vscale x 32 x half> @vfnmadd_vf_nxv32f16_neg_splat_unmasked_commute(<vsc
 ; ZVFHMIN-NEXT:    add a0, a0, a1
 ; ZVFHMIN-NEXT:    add a0, sp, a0
 ; ZVFHMIN-NEXT:    addi a0, a0, 16
-; ZVFHMIN-NEXT:    vl8r.v v8, (a0) # vscale x 64-byte Folded Reload
-; ZVFHMIN-NEXT:    vfwcvt.f.f.v v0, v8
+; ZVFHMIN-NEXT:    vl8r.v v16, (a0) # vscale x 64-byte Folded Reload
+; ZVFHMIN-NEXT:    vfwcvt.f.f.v v0, v16
 ; ZVFHMIN-NEXT:    addi a0, sp, 16
-; ZVFHMIN-NEXT:    vl8r.v v8, (a0) # vscale x 64-byte Folded Reload
+; ZVFHMIN-NEXT:    vl8r.v v24, (a0) # vscale x 64-byte Folded Reload
+; ZVFHMIN-NEXT:    vfwcvt.f.f.v v16, v24
+; ZVFHMIN-NEXT:    csrr a0, vlenb
+; ZVFHMIN-NEXT:    slli a0, a0, 3
+; ZVFHMIN-NEXT:    add a0, sp, a0
+; ZVFHMIN-NEXT:    addi a0, a0, 16
+; ZVFHMIN-NEXT:    vl8r.v v24, (a0) # vscale x 64-byte Folded Reload
 ; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m8, ta, ma
-; ZVFHMIN-NEXT:    vfmadd.vv v0, v24, v8
+; ZVFHMIN-NEXT:    vfmadd.vv v16, v0, v24
 ; ZVFHMIN-NEXT:    vsetvli zero, zero, e16, m4, ta, ma
-; ZVFHMIN-NEXT:    vfncvt.f.f.w v16, v0
-; ZVFHMIN-NEXT:    vmv8r.v v8, v16
+; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v16
 ; ZVFHMIN-NEXT:    csrr a0, vlenb
 ; ZVFHMIN-NEXT:    slli a0, a0, 5
 ; ZVFHMIN-NEXT:    add sp, sp, a0
@@ -10220,79 +10173,66 @@ define <vscale x 32 x half> @vfnmadd_vf_nxv32f16_neg_splat_unmasked_commute(<vsc
 ; ZVFHMIN-NEXT:    ret
   %elt.head = insertelement <vscale x 32 x half> poison, half %b, i32 0
   %vb = shufflevector <vscale x 32 x half> %elt.head, <vscale x 32 x half> poison, <vscale x 32 x i32> zeroinitializer
-  %negvb = call <vscale x 32 x half> @llvm.vp.fneg.nxv32f16(<vscale x 32 x half> %vb, <vscale x 32 x i1> splat (i1 true), i32 %evl)
+  %negva = call <vscale x 32 x half> @llvm.vp.fneg.nxv32f16(<vscale x 32 x half> %va, <vscale x 32 x i1> splat (i1 true), i32 %evl)
   %negvc = call <vscale x 32 x half> @llvm.vp.fneg.nxv32f16(<vscale x 32 x half> %vc, <vscale x 32 x i1> splat (i1 true), i32 %evl)
-  %v = call <vscale x 32 x half> @llvm.vp.fma.nxv32f16(<vscale x 32 x half> %negvb, <vscale x 32 x half> %va, <vscale x 32 x half> %negvc, <vscale x 32 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 32 x half> @llvm.vp.fma.nxv32f16(<vscale x 32 x half> %vb, <vscale x 32 x half> %negva, <vscale x 32 x half> %negvc, <vscale x 32 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 32 x half> %v
 }
 
-define <vscale x 32 x half> @vfnmsub_vv_nxv32f16(<vscale x 32 x half> %va, <vscale x 32 x half> %b, <vscale x 32 x half> %c, <vscale x 32 x i1> %m, i32 zeroext %evl) {
-; ZVFH-LABEL: vfnmsub_vv_nxv32f16:
+define <vscale x 32 x half> @vfnmadd_vf_nxv32f16_neg_splat(<vscale x 32 x half> %va, half %b, <vscale x 32 x half> %vc, <vscale x 32 x i1> %m, i32 zeroext %evl) {
+; ZVFH-LABEL: vfnmadd_vf_nxv32f16_neg_splat:
 ; ZVFH:       # %bb.0:
-; ZVFH-NEXT:    vsetvli zero, a1, e16, m8, ta, ma
-; ZVFH-NEXT:    vle16.v v24, (a0)
-; ZVFH-NEXT:    vfnmadd.vv v16, v8, v24, v0.t
-; ZVFH-NEXT:    vmv.v.v v8, v16
+; ZVFH-NEXT:    vsetvli zero, a0, e16, m8, ta, ma
+; ZVFH-NEXT:    vfnmadd.vf v8, fa0, v16, v0.t
 ; ZVFH-NEXT:    ret
 ;
-; ZVFHMIN-LABEL: vfnmsub_vv_nxv32f16:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    addi sp, sp, -16
-; ZVFHMIN-NEXT:    .cfi_def_cfa_offset 16
-; ZVFHMIN-NEXT:    csrr a2, vlenb
-; ZVFHMIN-NEXT:    slli a2, a2, 5
-; ZVFHMIN-NEXT:    sub sp, sp, a2
-; ZVFHMIN-NEXT:    .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x20, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 32 * vlenb
-; ZVFHMIN-NEXT:    vsetvli zero, a1, e16, m8, ta, ma
-; ZVFHMIN-NEXT:    vmv1r.v v3, v0
-; ZVFHMIN-NEXT:    csrr a2, vlenb
-; ZVFHMIN-NEXT:    slli a2, a2, 3
-; ZVFHMIN-NEXT:    mv a3, a2
-; ZVFHMIN-NEXT:    slli a2, a2, 1
-; ZVFHMIN-NEXT:    add a2, a2, a3
-; ZVFHMIN-NEXT:    add a2, sp, a2
-; ZVFHMIN-NEXT:    addi a2, a2, 16
-; ZVFHMIN-NEXT:    vs8r.v v8, (a2) # vscale x 64-byte Folded Spill
-; ZVFHMIN-NEXT:    vle16.v v8, (a0)
-; ZVFHMIN-NEXT:    csrr a0, vlenb
-; ZVFHMIN-NEXT:    slli a0, a0, 4
-; ZVFHMIN-NEXT:    add a0, sp, a0
-; ZVFHMIN-NEXT:    addi a0, a0, 16
-; ZVFHMIN-NEXT:    vs8r.v v8, (a0) # vscale x 64-byte Folded Spill
+; ZVFHMIN-LABEL: vfnmadd_vf_nxv32f16_neg_splat:
+; ZVFHMIN:       # %bb.0:
+; ZVFHMIN-NEXT:    addi sp, sp, -16
+; ZVFHMIN-NEXT:    .cfi_def_cfa_offset 16
+; ZVFHMIN-NEXT:    csrr a1, vlenb
+; ZVFHMIN-NEXT:    slli a1, a1, 5
+; ZVFHMIN-NEXT:    sub sp, sp, a1
+; ZVFHMIN-NEXT:    .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x20, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 32 * vlenb
+; ZVFHMIN-NEXT:    vsetvli zero, a0, e16, m8, ta, ma
+; ZVFHMIN-NEXT:    vmv1r.v v3, v0
+; ZVFHMIN-NEXT:    csrr a1, vlenb
+; ZVFHMIN-NEXT:    slli a1, a1, 3
+; ZVFHMIN-NEXT:    mv a2, a1
+; ZVFHMIN-NEXT:    slli a1, a1, 1
+; ZVFHMIN-NEXT:    add a1, a1, a2
+; ZVFHMIN-NEXT:    add a1, sp, a1
+; ZVFHMIN-NEXT:    addi a1, a1, 16
+; ZVFHMIN-NEXT:    vs8r.v v8, (a1) # vscale x 64-byte Folded Spill
+; ZVFHMIN-NEXT:    fmv.x.h a1, fa0
 ; ZVFHMIN-NEXT:    lui a2, 8
 ; ZVFHMIN-NEXT:    csrr a3, vlenb
-; ZVFHMIN-NEXT:    vxor.vx v8, v16, a2, v0.t
-; ZVFHMIN-NEXT:    slli a0, a3, 1
+; ZVFHMIN-NEXT:    vmv.v.x v24, a1
+; ZVFHMIN-NEXT:    slli a1, a3, 1
 ; ZVFHMIN-NEXT:    srli a3, a3, 2
-; ZVFHMIN-NEXT:    sub a4, a1, a0
-; ZVFHMIN-NEXT:    vsetvli a5, zero, e8, mf2, ta, ma
-; ZVFHMIN-NEXT:    vslidedown.vx v6, v0, a3
-; ZVFHMIN-NEXT:    sltu a3, a1, a4
-; ZVFHMIN-NEXT:    csrr a5, vlenb
-; ZVFHMIN-NEXT:    slli a5, a5, 4
-; ZVFHMIN-NEXT:    add a5, sp, a5
-; ZVFHMIN-NEXT:    addi a5, a5, 16
-; ZVFHMIN-NEXT:    vl8r.v v24, (a5) # vscale x 64-byte Folded Reload
-; ZVFHMIN-NEXT:    vsetvli zero, a1, e16, m8, ta, ma
-; ZVFHMIN-NEXT:    vxor.vx v24, v24, a2, v0.t
+; ZVFHMIN-NEXT:    vxor.vx v8, v24, a2, v0.t
+; ZVFHMIN-NEXT:    vxor.vx v16, v16, a2, v0.t
+; ZVFHMIN-NEXT:    sub a2, a0, a1
+; ZVFHMIN-NEXT:    vsetvli a4, zero, e8, mf2, ta, ma
+; ZVFHMIN-NEXT:    vslidedown.vx v0, v0, a3
+; ZVFHMIN-NEXT:    sltu a3, a0, a2
 ; ZVFHMIN-NEXT:    addi a3, a3, -1
-; ZVFHMIN-NEXT:    and a3, a3, a4
-; ZVFHMIN-NEXT:    vmv1r.v v0, v6
-; ZVFHMIN-NEXT:    csrr a2, vlenb
-; ZVFHMIN-NEXT:    slli a2, a2, 3
-; ZVFHMIN-NEXT:    add a2, sp, a2
-; ZVFHMIN-NEXT:    addi a2, a2, 16
-; ZVFHMIN-NEXT:    vs8r.v v24, (a2) # vscale x 64-byte Folded Spill
-; ZVFHMIN-NEXT:    vsetvli zero, a3, e16, m4, ta, ma
-; ZVFHMIN-NEXT:    vfwcvt.f.f.v v16, v28, v0.t
+; ZVFHMIN-NEXT:    and a2, a3, a2
+; ZVFHMIN-NEXT:    csrr a3, vlenb
+; ZVFHMIN-NEXT:    slli a3, a3, 3
+; ZVFHMIN-NEXT:    add a3, sp, a3
+; ZVFHMIN-NEXT:    addi a3, a3, 16
+; ZVFHMIN-NEXT:    vs8r.v v16, (a3) # vscale x 64-byte Folded Spill
+; ZVFHMIN-NEXT:    vsetvli zero, a2, e16, m4, ta, ma
+; ZVFHMIN-NEXT:    vfwcvt.f.f.v v24, v20, v0.t
 ; ZVFHMIN-NEXT:    addi a2, sp, 16
-; ZVFHMIN-NEXT:    vs8r.v v16, (a2) # vscale x 64-byte Folded Spill
+; ZVFHMIN-NEXT:    vs8r.v v24, (a2) # vscale x 64-byte Folded Spill
 ; ZVFHMIN-NEXT:    csrr a2, vlenb
 ; ZVFHMIN-NEXT:    slli a2, a2, 4
 ; ZVFHMIN-NEXT:    add a2, sp, a2
 ; ZVFHMIN-NEXT:    addi a2, a2, 16
 ; ZVFHMIN-NEXT:    vs8r.v v8, (a2) # vscale x 64-byte Folded Spill
-; ZVFHMIN-NEXT:    vfwcvt.f.f.v v24, v12, v0.t
+; ZVFHMIN-NEXT:    vfwcvt.f.f.v v16, v12, v0.t
 ; ZVFHMIN-NEXT:    csrr a2, vlenb
 ; ZVFHMIN-NEXT:    slli a2, a2, 3
 ; ZVFHMIN-NEXT:    mv a3, a2
@@ -10300,30 +10240,39 @@ define <vscale x 32 x half> @vfnmsub_vv_nxv32f16(<vscale x 32 x half> %va, <vsca
 ; ZVFHMIN-NEXT:    add a2, a2, a3
 ; ZVFHMIN-NEXT:    add a2, sp, a2
 ; ZVFHMIN-NEXT:    addi a2, a2, 16
-; ZVFHMIN-NEXT:    vl8r.v v16, (a2) # vscale x 64-byte Folded Reload
-; ZVFHMIN-NEXT:    vfwcvt.f.f.v v8, v20, v0.t
+; ZVFHMIN-NEXT:    vl8r.v v24, (a2) # vscale x 64-byte Folded Reload
+; ZVFHMIN-NEXT:    vfwcvt.f.f.v v8, v28, v0.t
 ; ZVFHMIN-NEXT:    addi a2, sp, 16
-; ZVFHMIN-NEXT:    vl8r.v v16, (a2) # vscale x 64-byte Folded Reload
+; ZVFHMIN-NEXT:    vl8r.v v24, (a2) # vscale x 64-byte Folded Reload
 ; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m8, ta, ma
-; ZVFHMIN-NEXT:    vfmadd.vv v24, v8, v16, v0.t
+; ZVFHMIN-NEXT:    vfmadd.vv v16, v8, v24, v0.t
 ; ZVFHMIN-NEXT:    vsetvli zero, zero, e16, m4, ta, ma
-; ZVFHMIN-NEXT:    vfncvt.f.f.w v4, v24, v0.t
-; ZVFHMIN-NEXT:    bltu a1, a0, .LBB298_2
+; ZVFHMIN-NEXT:    vfncvt.f.f.w v4, v16, v0.t
+; ZVFHMIN-NEXT:    bltu a0, a1, .LBB294_2
 ; ZVFHMIN-NEXT:  # %bb.1:
-; ZVFHMIN-NEXT:    mv a1, a0
-; ZVFHMIN-NEXT:  .LBB298_2:
+; ZVFHMIN-NEXT:    mv a0, a1
+; ZVFHMIN-NEXT:  .LBB294_2:
 ; ZVFHMIN-NEXT:    vmv1r.v v0, v3
+; ZVFHMIN-NEXT:    csrr a1, vlenb
+; ZVFHMIN-NEXT:    slli a1, a1, 3
+; ZVFHMIN-NEXT:    add a1, sp, a1
+; ZVFHMIN-NEXT:    addi a1, a1, 16
+; ZVFHMIN-NEXT:    vl8r.v v16, (a1) # vscale x 64-byte Folded Reload
+; ZVFHMIN-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
+; ZVFHMIN-NEXT:    vfwcvt.f.f.v v24, v16, v0.t
+; ZVFHMIN-NEXT:    addi a0, sp, 16
+; ZVFHMIN-NEXT:    vs8r.v v24, (a0) # vscale x 64-byte Folded Spill
 ; ZVFHMIN-NEXT:    csrr a0, vlenb
 ; ZVFHMIN-NEXT:    slli a0, a0, 4
 ; ZVFHMIN-NEXT:    add a0, sp, a0
 ; ZVFHMIN-NEXT:    addi a0, a0, 16
 ; ZVFHMIN-NEXT:    vl8r.v v16, (a0) # vscale x 64-byte Folded Reload
-; ZVFHMIN-NEXT:    vsetvli zero, a1, e16, m4, ta, ma
 ; ZVFHMIN-NEXT:    vfwcvt.f.f.v v8, v16, v0.t
-; ZVFHMIN-NEXT:    addi a0, sp, 16
-; ZVFHMIN-NEXT:    vs8r.v v8, (a0) # vscale x 64-byte Folded Spill
 ; ZVFHMIN-NEXT:    csrr a0, vlenb
 ; ZVFHMIN-NEXT:    slli a0, a0, 3
+; ZVFHMIN-NEXT:    mv a1, a0
+; ZVFHMIN-NEXT:    slli a0, a0, 1
+; ZVFHMIN-NEXT:    add a0, a0, a1
 ; ZVFHMIN-NEXT:    add a0, sp, a0
 ; ZVFHMIN-NEXT:    addi a0, a0, 16
 ; ZVFHMIN-NEXT:    vl8r.v v16, (a0) # vscale x 64-byte Folded Reload
@@ -10333,27 +10282,19 @@ define <vscale x 32 x half> @vfnmsub_vv_nxv32f16(<vscale x 32 x half> %va, <vsca
 ; ZVFHMIN-NEXT:    add a0, sp, a0
 ; ZVFHMIN-NEXT:    addi a0, a0, 16
 ; ZVFHMIN-NEXT:    vs8r.v v24, (a0) # vscale x 64-byte Folded Spill
-; ZVFHMIN-NEXT:    csrr a0, vlenb
-; ZVFHMIN-NEXT:    slli a0, a0, 3
-; ZVFHMIN-NEXT:    mv a1, a0
-; ZVFHMIN-NEXT:    slli a0, a0, 1
-; ZVFHMIN-NEXT:    add a0, a0, a1
-; ZVFHMIN-NEXT:    add a0, sp, a0
-; ZVFHMIN-NEXT:    addi a0, a0, 16
+; ZVFHMIN-NEXT:    addi a0, sp, 16
 ; ZVFHMIN-NEXT:    vl8r.v v16, (a0) # vscale x 64-byte Folded Reload
-; ZVFHMIN-NEXT:    vfwcvt.f.f.v v8, v16, v0.t
 ; ZVFHMIN-NEXT:    csrr a0, vlenb
 ; ZVFHMIN-NEXT:    slli a0, a0, 4
 ; ZVFHMIN-NEXT:    add a0, sp, a0
 ; ZVFHMIN-NEXT:    addi a0, a0, 16
-; ZVFHMIN-NEXT:    vl8r.v v16, (a0) # vscale x 64-byte Folded Reload
-; ZVFHMIN-NEXT:    addi a0, sp, 16
 ; ZVFHMIN-NEXT:    vl8r.v v24, (a0) # vscale x 64-byte Folded Reload
 ; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m8, ta, ma
-; ZVFHMIN-NEXT:    vfmadd.vv v24, v8, v16, v0.t
+; ZVFHMIN-NEXT:    vfmadd.vv v8, v24, v16, v0.t
+; ZVFHMIN-NEXT:    vmv.v.v v16, v8
 ; ZVFHMIN-NEXT:    vmv4r.v v12, v4
 ; ZVFHMIN-NEXT:    vsetvli zero, zero, e16, m4, ta, ma
-; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v24, v0.t
+; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v16, v0.t
 ; ZVFHMIN-NEXT:    csrr a0, vlenb
 ; ZVFHMIN-NEXT:    slli a0, a0, 5
 ; ZVFHMIN-NEXT:    add sp, sp, a0
@@ -10361,68 +10302,68 @@ define <vscale x 32 x half> @vfnmsub_vv_nxv32f16(<vscale x 32 x half> %va, <vsca
 ; ZVFHMIN-NEXT:    addi sp, sp, 16
 ; ZVFHMIN-NEXT:    .cfi_def_cfa_offset 0
 ; ZVFHMIN-NEXT:    ret
-  %negb = call <vscale x 32 x half> @llvm.vp.fneg.nxv32f16(<vscale x 32 x half> %b, <vscale x 32 x i1> %m, i32 %evl)
-  %negc = call <vscale x 32 x half> @llvm.vp.fneg.nxv32f16(<vscale x 32 x half> %c, <vscale x 32 x i1> %m, i32 %evl)
-  %v = call <vscale x 32 x half> @llvm.vp.fma.nxv32f16(<vscale x 32 x half> %va, <vscale x 32 x half> %negb, <vscale x 32 x half> %negc, <vscale x 32 x i1> %m, i32 %evl)
+  %elt.head = insertelement <vscale x 32 x half> poison, half %b, i32 0
+  %vb = shufflevector <vscale x 32 x half> %elt.head, <vscale x 32 x half> poison, <vscale x 32 x i32> zeroinitializer
+  %negvb = call <vscale x 32 x half> @llvm.vp.fneg.nxv32f16(<vscale x 32 x half> %vb, <vscale x 32 x i1> %m, i32 %evl)
+  %negvc = call <vscale x 32 x half> @llvm.vp.fneg.nxv32f16(<vscale x 32 x half> %vc, <vscale x 32 x i1> %m, i32 %evl)
+  %v = call <vscale x 32 x half> @llvm.vp.fma.nxv32f16(<vscale x 32 x half> %va, <vscale x 32 x half> %negvb, <vscale x 32 x half> %negvc, <vscale x 32 x i1> %m, i32 %evl)
   ret <vscale x 32 x half> %v
 }
 
-define <vscale x 32 x half> @vfnmsub_vv_nxv32f16_commuted(<vscale x 32 x half> %va, <vscale x 32 x half> %b, <vscale x 32 x half> %c, <vscale x 32 x i1> %m, i32 zeroext %evl) {
-; ZVFH-LABEL: vfnmsub_vv_nxv32f16_commuted:
+define <vscale x 32 x half> @vfnmadd_vf_nxv32f16_neg_splat_commute(<vscale x 32 x half> %va, half %b, <vscale x 32 x half> %vc, <vscale x 32 x i1> %m, i32 zeroext %evl) {
+; ZVFH-LABEL: vfnmadd_vf_nxv32f16_neg_splat_commute:
 ; ZVFH:       # %bb.0:
-; ZVFH-NEXT:    vsetvli zero, a1, e16, m8, ta, ma
-; ZVFH-NEXT:    vle16.v v24, (a0)
-; ZVFH-NEXT:    vfnmadd.vv v8, v16, v24, v0.t
+; ZVFH-NEXT:    vsetvli zero, a0, e16, m8, ta, ma
+; ZVFH-NEXT:    vfnmadd.vf v8, fa0, v16, v0.t
 ; ZVFH-NEXT:    ret
 ;
-; ZVFHMIN-LABEL: vfnmsub_vv_nxv32f16_commuted:
+; ZVFHMIN-LABEL: vfnmadd_vf_nxv32f16_neg_splat_commute:
 ; ZVFHMIN:       # %bb.0:
 ; ZVFHMIN-NEXT:    addi sp, sp, -16
 ; ZVFHMIN-NEXT:    .cfi_def_cfa_offset 16
-; ZVFHMIN-NEXT:    csrr a2, vlenb
-; ZVFHMIN-NEXT:    slli a2, a2, 5
-; ZVFHMIN-NEXT:    sub sp, sp, a2
+; ZVFHMIN-NEXT:    csrr a1, vlenb
+; ZVFHMIN-NEXT:    slli a1, a1, 5
+; ZVFHMIN-NEXT:    sub sp, sp, a1
 ; ZVFHMIN-NEXT:    .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x20, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 32 * vlenb
-; ZVFHMIN-NEXT:    vsetvli zero, a1, e16, m8, ta, ma
+; ZVFHMIN-NEXT:    vsetvli zero, a0, e16, m8, ta, ma
 ; ZVFHMIN-NEXT:    vmv1r.v v3, v0
-; ZVFHMIN-NEXT:    csrr a2, vlenb
-; ZVFHMIN-NEXT:    slli a2, a2, 3
-; ZVFHMIN-NEXT:    mv a3, a2
-; ZVFHMIN-NEXT:    slli a2, a2, 1
-; ZVFHMIN-NEXT:    add a2, a2, a3
-; ZVFHMIN-NEXT:    add a2, sp, a2
-; ZVFHMIN-NEXT:    addi a2, a2, 16
-; ZVFHMIN-NEXT:    vs8r.v v8, (a2) # vscale x 64-byte Folded Spill
-; ZVFHMIN-NEXT:    vle16.v v8, (a0)
+; ZVFHMIN-NEXT:    csrr a1, vlenb
+; ZVFHMIN-NEXT:    slli a1, a1, 3
+; ZVFHMIN-NEXT:    mv a2, a1
+; ZVFHMIN-NEXT:    slli a1, a1, 1
+; ZVFHMIN-NEXT:    add a1, a1, a2
+; ZVFHMIN-NEXT:    add a1, sp, a1
+; ZVFHMIN-NEXT:    addi a1, a1, 16
+; ZVFHMIN-NEXT:    vs8r.v v8, (a1) # vscale x 64-byte Folded Spill
+; ZVFHMIN-NEXT:    fmv.x.h a1, fa0
 ; ZVFHMIN-NEXT:    lui a2, 8
 ; ZVFHMIN-NEXT:    csrr a3, vlenb
-; ZVFHMIN-NEXT:    vxor.vx v16, v16, a2, v0.t
-; ZVFHMIN-NEXT:    slli a0, a3, 1
+; ZVFHMIN-NEXT:    vmv.v.x v24, a1
+; ZVFHMIN-NEXT:    slli a1, a3, 1
 ; ZVFHMIN-NEXT:    srli a3, a3, 2
-; ZVFHMIN-NEXT:    sub a4, a1, a0
-; ZVFHMIN-NEXT:    vsetvli a5, zero, e8, mf2, ta, ma
-; ZVFHMIN-NEXT:    vslidedown.vx v6, v0, a3
-; ZVFHMIN-NEXT:    sltu a3, a1, a4
-; ZVFHMIN-NEXT:    vsetvli zero, a1, e16, m8, ta, ma
-; ZVFHMIN-NEXT:    vxor.vx v8, v8, a2, v0.t
+; ZVFHMIN-NEXT:    vxor.vx v8, v24, a2, v0.t
+; ZVFHMIN-NEXT:    vxor.vx v16, v16, a2, v0.t
+; ZVFHMIN-NEXT:    sub a2, a0, a1
+; ZVFHMIN-NEXT:    vsetvli a4, zero, e8, mf2, ta, ma
+; ZVFHMIN-NEXT:    vslidedown.vx v0, v0, a3
+; ZVFHMIN-NEXT:    sltu a3, a0, a2
 ; ZVFHMIN-NEXT:    addi a3, a3, -1
-; ZVFHMIN-NEXT:    and a3, a3, a4
-; ZVFHMIN-NEXT:    vmv1r.v v0, v6
-; ZVFHMIN-NEXT:    csrr a2, vlenb
-; ZVFHMIN-NEXT:    slli a2, a2, 3
-; ZVFHMIN-NEXT:    add a2, sp, a2
-; ZVFHMIN-NEXT:    addi a2, a2, 16
-; ZVFHMIN-NEXT:    vs8r.v v8, (a2) # vscale x 64-byte Folded Spill
-; ZVFHMIN-NEXT:    vsetvli zero, a3, e16, m4, ta, ma
-; ZVFHMIN-NEXT:    vfwcvt.f.f.v v24, v12, v0.t
+; ZVFHMIN-NEXT:    and a2, a3, a2
+; ZVFHMIN-NEXT:    csrr a3, vlenb
+; ZVFHMIN-NEXT:    slli a3, a3, 3
+; ZVFHMIN-NEXT:    add a3, sp, a3
+; ZVFHMIN-NEXT:    addi a3, a3, 16
+; ZVFHMIN-NEXT:    vs8r.v v16, (a3) # vscale x 64-byte Folded Spill
+; ZVFHMIN-NEXT:    vsetvli zero, a2, e16, m4, ta, ma
+; ZVFHMIN-NEXT:    vfwcvt.f.f.v v24, v20, v0.t
 ; ZVFHMIN-NEXT:    addi a2, sp, 16
 ; ZVFHMIN-NEXT:    vs8r.v v24, (a2) # vscale x 64-byte Folded Spill
 ; ZVFHMIN-NEXT:    csrr a2, vlenb
 ; ZVFHMIN-NEXT:    slli a2, a2, 4
 ; ZVFHMIN-NEXT:    add a2, sp, a2
 ; ZVFHMIN-NEXT:    addi a2, a2, 16
-; ZVFHMIN-NEXT:    vs8r.v v16, (a2) # vscale x 64-byte Folded Spill
-; ZVFHMIN-NEXT:    vfwcvt.f.f.v v24, v20, v0.t
+; ZVFHMIN-NEXT:    vs8r.v v8, (a2) # vscale x 64-byte Folded Spill
+; ZVFHMIN-NEXT:    vfwcvt.f.f.v v16, v12, v0.t
 ; ZVFHMIN-NEXT:    csrr a2, vlenb
 ; ZVFHMIN-NEXT:    slli a2, a2, 3
 ; ZVFHMIN-NEXT:    mv a3, a2
@@ -10430,38 +10371,38 @@ define <vscale x 32 x half> @vfnmsub_vv_nxv32f16_commuted(<vscale x 32 x half> %
 ; ZVFHMIN-NEXT:    add a2, a2, a3
 ; ZVFHMIN-NEXT:    add a2, sp, a2
 ; ZVFHMIN-NEXT:    addi a2, a2, 16
-; ZVFHMIN-NEXT:    vl8r.v v8, (a2) # vscale x 64-byte Folded Reload
-; ZVFHMIN-NEXT:    vfwcvt.f.f.v v16, v12, v0.t
+; ZVFHMIN-NEXT:    vl8r.v v24, (a2) # vscale x 64-byte Folded Reload
+; ZVFHMIN-NEXT:    vfwcvt.f.f.v v8, v28, v0.t
 ; ZVFHMIN-NEXT:    addi a2, sp, 16
-; ZVFHMIN-NEXT:    vl8r.v v8, (a2) # vscale x 64-byte Folded Reload
+; ZVFHMIN-NEXT:    vl8r.v v24, (a2) # vscale x 64-byte Folded Reload
 ; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m8, ta, ma
-; ZVFHMIN-NEXT:    vfmadd.vv v16, v24, v8, v0.t
+; ZVFHMIN-NEXT:    vfmadd.vv v8, v16, v24, v0.t
 ; ZVFHMIN-NEXT:    vsetvli zero, zero, e16, m4, ta, ma
-; ZVFHMIN-NEXT:    vfncvt.f.f.w v4, v16, v0.t
-; ZVFHMIN-NEXT:    bltu a1, a0, .LBB299_2
+; ZVFHMIN-NEXT:    vfncvt.f.f.w v4, v8, v0.t
+; ZVFHMIN-NEXT:    bltu a0, a1, .LBB295_2
 ; ZVFHMIN-NEXT:  # %bb.1:
-; ZVFHMIN-NEXT:    mv a1, a0
-; ZVFHMIN-NEXT:  .LBB299_2:
+; ZVFHMIN-NEXT:    mv a0, a1
+; ZVFHMIN-NEXT:  .LBB295_2:
 ; ZVFHMIN-NEXT:    vmv1r.v v0, v3
+; ZVFHMIN-NEXT:    csrr a1, vlenb
+; ZVFHMIN-NEXT:    slli a1, a1, 3
+; ZVFHMIN-NEXT:    add a1, sp, a1
+; ZVFHMIN-NEXT:    addi a1, a1, 16
+; ZVFHMIN-NEXT:    vl8r.v v8, (a1) # vscale x 64-byte Folded Reload
+; ZVFHMIN-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
+; ZVFHMIN-NEXT:    vfwcvt.f.f.v v24, v8, v0.t
+; ZVFHMIN-NEXT:    addi a0, sp, 16
+; ZVFHMIN-NEXT:    vs8r.v v24, (a0) # vscale x 64-byte Folded Spill
 ; ZVFHMIN-NEXT:    csrr a0, vlenb
 ; ZVFHMIN-NEXT:    slli a0, a0, 4
 ; ZVFHMIN-NEXT:    add a0, sp, a0
 ; ZVFHMIN-NEXT:    addi a0, a0, 16
 ; ZVFHMIN-NEXT:    vl8r.v v8, (a0) # vscale x 64-byte Folded Reload
-; ZVFHMIN-NEXT:    vsetvli zero, a1, e16, m4, ta, ma
 ; ZVFHMIN-NEXT:    vfwcvt.f.f.v v16, v8, v0.t
-; ZVFHMIN-NEXT:    addi a0, sp, 16
-; ZVFHMIN-NEXT:    vs8r.v v16, (a0) # vscale x 64-byte Folded Spill
 ; ZVFHMIN-NEXT:    csrr a0, vlenb
 ; ZVFHMIN-NEXT:    slli a0, a0, 3
 ; ZVFHMIN-NEXT:    add a0, sp, a0
 ; ZVFHMIN-NEXT:    addi a0, a0, 16
-; ZVFHMIN-NEXT:    vl8r.v v8, (a0) # vscale x 64-byte Folded Reload
-; ZVFHMIN-NEXT:    vfwcvt.f.f.v v16, v8, v0.t
-; ZVFHMIN-NEXT:    csrr a0, vlenb
-; ZVFHMIN-NEXT:    slli a0, a0, 4
-; ZVFHMIN-NEXT:    add a0, sp, a0
-; ZVFHMIN-NEXT:    addi a0, a0, 16
 ; ZVFHMIN-NEXT:    vs8r.v v16, (a0) # vscale x 64-byte Folded Spill
 ; ZVFHMIN-NEXT:    csrr a0, vlenb
 ; ZVFHMIN-NEXT:    slli a0, a0, 3
@@ -10470,15 +10411,15 @@ define <vscale x 32 x half> @vfnmsub_vv_nxv32f16_commuted(<vscale x 32 x half> %
 ; ZVFHMIN-NEXT:    add a0, a0, a1
 ; ZVFHMIN-NEXT:    add a0, sp, a0
 ; ZVFHMIN-NEXT:    addi a0, a0, 16
-; ZVFHMIN-NEXT:    vl8r.v v16, (a0) # vscale x 64-byte Folded Reload
-; ZVFHMIN-NEXT:    vfwcvt.f.f.v v8, v16, v0.t
+; ZVFHMIN-NEXT:    vl8r.v v24, (a0) # vscale x 64-byte Folded Reload
+; ZVFHMIN-NEXT:    vfwcvt.f.f.v v8, v24, v0.t
 ; ZVFHMIN-NEXT:    addi a0, sp, 16
-; ZVFHMIN-NEXT:    vl8r.v v16, (a0) # vscale x 64-byte Folded Reload
+; ZVFHMIN-NEXT:    vl8r.v v24, (a0) # vscale x 64-byte Folded Reload
 ; ZVFHMIN-NEXT:    csrr a0, vlenb
-; ZVFHMIN-NEXT:    slli a0, a0, 4
+; ZVFHMIN-NEXT:    slli a0, a0, 3
 ; ZVFHMIN-NEXT:    add a0, sp, a0
 ; ZVFHMIN-NEXT:    addi a0, a0, 16
-; ZVFHMIN-NEXT:    vl8r.v v24, (a0) # vscale x 64-byte Folded Reload
+; ZVFHMIN-NEXT:    vl8r.v v16, (a0) # vscale x 64-byte Folded Reload
 ; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m8, ta, ma
 ; ZVFHMIN-NEXT:    vfmadd.vv v8, v16, v24, v0.t
 ; ZVFHMIN-NEXT:    vmv.v.v v16, v8
@@ -10492,69 +10433,69 @@ define <vscale x 32 x half> @vfnmsub_vv_nxv32f16_commuted(<vscale x 32 x half> %
 ; ZVFHMIN-NEXT:    addi sp, sp, 16
 ; ZVFHMIN-NEXT:    .cfi_def_cfa_offset 0
 ; ZVFHMIN-NEXT:    ret
-  %negb = call <vscale x 32 x half> @llvm.vp.fneg.nxv32f16(<vscale x 32 x half> %b, <vscale x 32 x i1> %m, i32 %evl)
-  %negc = call <vscale x 32 x half> @llvm.vp.fneg.nxv32f16(<vscale x 32 x half> %c, <vscale x 32 x i1> %m, i32 %evl)
-  %v = call <vscale x 32 x half> @llvm.vp.fma.nxv32f16(<vscale x 32 x half> %negb, <vscale x 32 x half> %va, <vscale x 32 x half> %negc, <vscale x 32 x i1> %m, i32 %evl)
+  %elt.head = insertelement <vscale x 32 x half> poison, half %b, i32 0
+  %vb = shufflevector <vscale x 32 x half> %elt.head, <vscale x 32 x half> poison, <vscale x 32 x i32> zeroinitializer
+  %negvb = call <vscale x 32 x half> @llvm.vp.fneg.nxv32f16(<vscale x 32 x half> %vb, <vscale x 32 x i1> %m, i32 %evl)
+  %negvc = call <vscale x 32 x half> @llvm.vp.fneg.nxv32f16(<vscale x 32 x half> %vc, <vscale x 32 x i1> %m, i32 %evl)
+  %v = call <vscale x 32 x half> @llvm.vp.fma.nxv32f16(<vscale x 32 x half> %negvb, <vscale x 32 x half> %va, <vscale x 32 x half> %negvc, <vscale x 32 x i1> %m, i32 %evl)
   ret <vscale x 32 x half> %v
 }
 
-define <vscale x 32 x half> @vfnmsub_vv_nxv32f16_unmasked(<vscale x 32 x half> %va, <vscale x 32 x half> %b, <vscale x 32 x half> %c, i32 zeroext %evl) {
-; ZVFH-LABEL: vfnmsub_vv_nxv32f16_unmasked:
+define <vscale x 32 x half> @vfnmadd_vf_nxv32f16_neg_splat_unmasked(<vscale x 32 x half> %va, half %b, <vscale x 32 x half> %vc, i32 zeroext %evl) {
+; ZVFH-LABEL: vfnmadd_vf_nxv32f16_neg_splat_unmasked:
 ; ZVFH:       # %bb.0:
-; ZVFH-NEXT:    vsetvli zero, a1, e16, m8, ta, ma
-; ZVFH-NEXT:    vle16.v v24, (a0)
-; ZVFH-NEXT:    vfnmadd.vv v8, v16, v24
+; ZVFH-NEXT:    vsetvli zero, a0, e16, m8, ta, ma
+; ZVFH-NEXT:    vfnmadd.vf v8, fa0, v16
 ; ZVFH-NEXT:    ret
 ;
-; ZVFHMIN-LABEL: vfnmsub_vv_nxv32f16_unmasked:
+; ZVFHMIN-LABEL: vfnmadd_vf_nxv32f16_neg_splat_unmasked:
 ; ZVFHMIN:       # %bb.0:
 ; ZVFHMIN-NEXT:    addi sp, sp, -16
 ; ZVFHMIN-NEXT:    .cfi_def_cfa_offset 16
-; ZVFHMIN-NEXT:    csrr a2, vlenb
-; ZVFHMIN-NEXT:    slli a2, a2, 5
-; ZVFHMIN-NEXT:    sub sp, sp, a2
+; ZVFHMIN-NEXT:    csrr a1, vlenb
+; ZVFHMIN-NEXT:    slli a1, a1, 5
+; ZVFHMIN-NEXT:    sub sp, sp, a1
 ; ZVFHMIN-NEXT:    .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x20, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 32 * vlenb
-; ZVFHMIN-NEXT:    csrr a2, vlenb
-; ZVFHMIN-NEXT:    slli a2, a2, 3
-; ZVFHMIN-NEXT:    mv a3, a2
-; ZVFHMIN-NEXT:    slli a2, a2, 1
-; ZVFHMIN-NEXT:    add a2, a2, a3
-; ZVFHMIN-NEXT:    add a2, sp, a2
-; ZVFHMIN-NEXT:    addi a2, a2, 16
-; ZVFHMIN-NEXT:    vs8r.v v8, (a2) # vscale x 64-byte Folded Spill
-; ZVFHMIN-NEXT:    vsetvli zero, a1, e16, m8, ta, ma
-; ZVFHMIN-NEXT:    vle16.v v24, (a0)
+; ZVFHMIN-NEXT:    csrr a1, vlenb
+; ZVFHMIN-NEXT:    slli a1, a1, 3
+; ZVFHMIN-NEXT:    mv a2, a1
+; ZVFHMIN-NEXT:    slli a1, a1, 1
+; ZVFHMIN-NEXT:    add a1, a1, a2
+; ZVFHMIN-NEXT:    add a1, sp, a1
+; ZVFHMIN-NEXT:    addi a1, a1, 16
+; ZVFHMIN-NEXT:    vs8r.v v8, (a1) # vscale x 64-byte Folded Spill
+; ZVFHMIN-NEXT:    fmv.x.h a1, fa0
 ; ZVFHMIN-NEXT:    lui a2, 8
-; ZVFHMIN-NEXT:    vsetvli a0, zero, e8, m4, ta, ma
-; ZVFHMIN-NEXT:    vmset.m v8
+; ZVFHMIN-NEXT:    vsetvli a3, zero, e8, m4, ta, ma
+; ZVFHMIN-NEXT:    vmset.m v7
 ; ZVFHMIN-NEXT:    csrr a3, vlenb
-; ZVFHMIN-NEXT:    vsetvli zero, a1, e16, m8, ta, ma
+; ZVFHMIN-NEXT:    vsetvli zero, a0, e16, m8, ta, ma
+; ZVFHMIN-NEXT:    vmv.v.x v24, a1
 ; ZVFHMIN-NEXT:    vxor.vx v16, v16, a2
-; ZVFHMIN-NEXT:    slli a0, a3, 1
+; ZVFHMIN-NEXT:    slli a1, a3, 1
 ; ZVFHMIN-NEXT:    srli a3, a3, 2
-; ZVFHMIN-NEXT:    sub a4, a1, a0
-; ZVFHMIN-NEXT:    vsetvli a5, zero, e8, mf2, ta, ma
-; ZVFHMIN-NEXT:    vslidedown.vx v0, v8, a3
-; ZVFHMIN-NEXT:    sltu a3, a1, a4
-; ZVFHMIN-NEXT:    vsetvli zero, a1, e16, m8, ta, ma
 ; ZVFHMIN-NEXT:    vxor.vx v8, v24, a2
+; ZVFHMIN-NEXT:    sub a2, a0, a1
+; ZVFHMIN-NEXT:    vsetvli a4, zero, e8, mf2, ta, ma
+; ZVFHMIN-NEXT:    vslidedown.vx v0, v7, a3
+; ZVFHMIN-NEXT:    sltu a3, a0, a2
 ; ZVFHMIN-NEXT:    addi a3, a3, -1
-; ZVFHMIN-NEXT:    and a3, a3, a4
+; ZVFHMIN-NEXT:    and a2, a3, a2
+; ZVFHMIN-NEXT:    csrr a3, vlenb
+; ZVFHMIN-NEXT:    slli a3, a3, 4
+; ZVFHMIN-NEXT:    add a3, sp, a3
+; ZVFHMIN-NEXT:    addi a3, a3, 16
+; ZVFHMIN-NEXT:    vs8r.v v16, (a3) # vscale x 64-byte Folded Spill
+; ZVFHMIN-NEXT:    vsetvli zero, a2, e16, m4, ta, ma
+; ZVFHMIN-NEXT:    vfwcvt.f.f.v v24, v20, v0.t
+; ZVFHMIN-NEXT:    addi a2, sp, 16
+; ZVFHMIN-NEXT:    vs8r.v v24, (a2) # vscale x 64-byte Folded Spill
 ; ZVFHMIN-NEXT:    csrr a2, vlenb
 ; ZVFHMIN-NEXT:    slli a2, a2, 3
 ; ZVFHMIN-NEXT:    add a2, sp, a2
 ; ZVFHMIN-NEXT:    addi a2, a2, 16
 ; ZVFHMIN-NEXT:    vs8r.v v8, (a2) # vscale x 64-byte Folded Spill
-; ZVFHMIN-NEXT:    vsetvli zero, a3, e16, m4, ta, ma
 ; ZVFHMIN-NEXT:    vfwcvt.f.f.v v24, v12, v0.t
-; ZVFHMIN-NEXT:    addi a2, sp, 16
-; ZVFHMIN-NEXT:    vs8r.v v24, (a2) # vscale x 64-byte Folded Spill
-; ZVFHMIN-NEXT:    csrr a2, vlenb
-; ZVFHMIN-NEXT:    slli a2, a2, 4
-; ZVFHMIN-NEXT:    add a2, sp, a2
-; ZVFHMIN-NEXT:    addi a2, a2, 16
-; ZVFHMIN-NEXT:    vs8r.v v16, (a2) # vscale x 64-byte Folded Spill
-; ZVFHMIN-NEXT:    vfwcvt.f.f.v v24, v20, v0.t
 ; ZVFHMIN-NEXT:    csrr a2, vlenb
 ; ZVFHMIN-NEXT:    slli a2, a2, 3
 ; ZVFHMIN-NEXT:    mv a3, a2
@@ -10570,16 +10511,16 @@ define <vscale x 32 x half> @vfnmsub_vv_nxv32f16_unmasked(<vscale x 32 x half> %
 ; ZVFHMIN-NEXT:    vfmadd.vv v24, v8, v16, v0.t
 ; ZVFHMIN-NEXT:    vsetvli zero, zero, e16, m4, ta, ma
 ; ZVFHMIN-NEXT:    vfncvt.f.f.w v12, v24, v0.t
-; ZVFHMIN-NEXT:    bltu a1, a0, .LBB300_2
+; ZVFHMIN-NEXT:    bltu a0, a1, .LBB296_2
 ; ZVFHMIN-NEXT:  # %bb.1:
-; ZVFHMIN-NEXT:    mv a1, a0
-; ZVFHMIN-NEXT:  .LBB300_2:
-; ZVFHMIN-NEXT:    csrr a0, vlenb
-; ZVFHMIN-NEXT:    slli a0, a0, 4
-; ZVFHMIN-NEXT:    add a0, sp, a0
-; ZVFHMIN-NEXT:    addi a0, a0, 16
-; ZVFHMIN-NEXT:    vl8r.v v16, (a0) # vscale x 64-byte Folded Reload
-; ZVFHMIN-NEXT:    vsetvli zero, a1, e16, m4, ta, ma
+; ZVFHMIN-NEXT:    mv a0, a1
+; ZVFHMIN-NEXT:  .LBB296_2:
+; ZVFHMIN-NEXT:    csrr a1, vlenb
+; ZVFHMIN-NEXT:    slli a1, a1, 4
+; ZVFHMIN-NEXT:    add a1, sp, a1
+; ZVFHMIN-NEXT:    addi a1, a1, 16
+; ZVFHMIN-NEXT:    vl8r.v v16, (a1) # vscale x 64-byte Folded Reload
+; ZVFHMIN-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
 ; ZVFHMIN-NEXT:    vfwcvt.f.f.v v24, v16
 ; ZVFHMIN-NEXT:    addi a0, sp, 16
 ; ZVFHMIN-NEXT:    vs8r.v v24, (a0) # vscale x 64-byte Folded Spill
@@ -10588,7 +10529,7 @@ define <vscale x 32 x half> @vfnmsub_vv_nxv32f16_unmasked(<vscale x 32 x half> %
 ; ZVFHMIN-NEXT:    add a0, sp, a0
 ; ZVFHMIN-NEXT:    addi a0, a0, 16
 ; ZVFHMIN-NEXT:    vl8r.v v16, (a0) # vscale x 64-byte Folded Reload
-; ZVFHMIN-NEXT:    vfwcvt.f.f.v v0, v16
+; ZVFHMIN-NEXT:    vfwcvt.f.f.v v24, v16
 ; ZVFHMIN-NEXT:    csrr a0, vlenb
 ; ZVFHMIN-NEXT:    slli a0, a0, 3
 ; ZVFHMIN-NEXT:    mv a1, a0
@@ -10596,14 +10537,14 @@ define <vscale x 32 x half> @vfnmsub_vv_nxv32f16_unmasked(<vscale x 32 x half> %
 ; ZVFHMIN-NEXT:    add a0, a0, a1
 ; ZVFHMIN-NEXT:    add a0, sp, a0
 ; ZVFHMIN-NEXT:    addi a0, a0, 16
-; ZVFHMIN-NEXT:    vl8r.v v24, (a0) # vscale x 64-byte Folded Reload
-; ZVFHMIN-NEXT:    vfwcvt.f.f.v v16, v24
+; ZVFHMIN-NEXT:    vl8r.v v16, (a0) # vscale x 64-byte Folded Reload
+; ZVFHMIN-NEXT:    vfwcvt.f.f.v v0, v16
 ; ZVFHMIN-NEXT:    addi a0, sp, 16
-; ZVFHMIN-NEXT:    vl8r.v v24, (a0) # vscale x 64-byte Folded Reload
+; ZVFHMIN-NEXT:    vl8r.v v16, (a0) # vscale x 64-byte Folded Reload
 ; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m8, ta, ma
-; ZVFHMIN-NEXT:    vfmadd.vv v16, v24, v0
+; ZVFHMIN-NEXT:    vfmadd.vv v0, v24, v16
 ; ZVFHMIN-NEXT:    vsetvli zero, zero, e16, m4, ta, ma
-; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v16
+; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v0
 ; ZVFHMIN-NEXT:    csrr a0, vlenb
 ; ZVFHMIN-NEXT:    slli a0, a0, 5
 ; ZVFHMIN-NEXT:    add sp, sp, a0
@@ -10611,69 +10552,69 @@ define <vscale x 32 x half> @vfnmsub_vv_nxv32f16_unmasked(<vscale x 32 x half> %
 ; ZVFHMIN-NEXT:    addi sp, sp, 16
 ; ZVFHMIN-NEXT:    .cfi_def_cfa_offset 0
 ; ZVFHMIN-NEXT:    ret
-  %negb = call <vscale x 32 x half> @llvm.vp.fneg.nxv32f16(<vscale x 32 x half> %b, <vscale x 32 x i1> splat (i1 true), i32 %evl)
-  %negc = call <vscale x 32 x half> @llvm.vp.fneg.nxv32f16(<vscale x 32 x half> %c, <vscale x 32 x i1> splat (i1 true), i32 %evl)
-  %v = call <vscale x 32 x half> @llvm.vp.fma.nxv32f16(<vscale x 32 x half> %va, <vscale x 32 x half> %negb, <vscale x 32 x half> %negc, <vscale x 32 x i1> splat (i1 true), i32 %evl)
+  %elt.head = insertelement <vscale x 32 x half> poison, half %b, i32 0
+  %vb = shufflevector <vscale x 32 x half> %elt.head, <vscale x 32 x half> poison, <vscale x 32 x i32> zeroinitializer
+  %negvb = call <vscale x 32 x half> @llvm.vp.fneg.nxv32f16(<vscale x 32 x half> %vb, <vscale x 32 x i1> splat (i1 true), i32 %evl)
+  %negvc = call <vscale x 32 x half> @llvm.vp.fneg.nxv32f16(<vscale x 32 x half> %vc, <vscale x 32 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 32 x half> @llvm.vp.fma.nxv32f16(<vscale x 32 x half> %va, <vscale x 32 x half> %negvb, <vscale x 32 x half> %negvc, <vscale x 32 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 32 x half> %v
 }
 
-define <vscale x 32 x half> @vfnmsub_vv_nxv32f16_unmasked_commuted(<vscale x 32 x half> %va, <vscale x 32 x half> %b, <vscale x 32 x half> %c, i32 zeroext %evl) {
-; ZVFH-LABEL: vfnmsub_vv_nxv32f16_unmasked_commuted:
+define <vscale x 32 x half> @vfnmadd_vf_nxv32f16_neg_splat_unmasked_commute(<vscale x 32 x half> %va, half %b, <vscale x 32 x half> %vc, i32 zeroext %evl) {
+; ZVFH-LABEL: vfnmadd_vf_nxv32f16_neg_splat_unmasked_commute:
 ; ZVFH:       # %bb.0:
-; ZVFH-NEXT:    vsetvli zero, a1, e16, m8, ta, ma
-; ZVFH-NEXT:    vle16.v v24, (a0)
-; ZVFH-NEXT:    vfnmadd.vv v8, v16, v24
+; ZVFH-NEXT:    vsetvli zero, a0, e16, m8, ta, ma
+; ZVFH-NEXT:    vfnmadd.vf v8, fa0, v16
 ; ZVFH-NEXT:    ret
 ;
-; ZVFHMIN-LABEL: vfnmsub_vv_nxv32f16_unmasked_commuted:
+; ZVFHMIN-LABEL: vfnmadd_vf_nxv32f16_neg_splat_unmasked_commute:
 ; ZVFHMIN:       # %bb.0:
 ; ZVFHMIN-NEXT:    addi sp, sp, -16
 ; ZVFHMIN-NEXT:    .cfi_def_cfa_offset 16
-; ZVFHMIN-NEXT:    csrr a2, vlenb
-; ZVFHMIN-NEXT:    slli a2, a2, 5
-; ZVFHMIN-NEXT:    sub sp, sp, a2
+; ZVFHMIN-NEXT:    csrr a1, vlenb
+; ZVFHMIN-NEXT:    slli a1, a1, 5
+; ZVFHMIN-NEXT:    sub sp, sp, a1
 ; ZVFHMIN-NEXT:    .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x20, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 32 * vlenb
-; ZVFHMIN-NEXT:    csrr a2, vlenb
-; ZVFHMIN-NEXT:    slli a2, a2, 3
-; ZVFHMIN-NEXT:    mv a3, a2
-; ZVFHMIN-NEXT:    slli a2, a2, 1
-; ZVFHMIN-NEXT:    add a2, a2, a3
-; ZVFHMIN-NEXT:    add a2, sp, a2
-; ZVFHMIN-NEXT:    addi a2, a2, 16
-; ZVFHMIN-NEXT:    vs8r.v v8, (a2) # vscale x 64-byte Folded Spill
-; ZVFHMIN-NEXT:    vsetvli zero, a1, e16, m8, ta, ma
-; ZVFHMIN-NEXT:    vle16.v v24, (a0)
+; ZVFHMIN-NEXT:    csrr a1, vlenb
+; ZVFHMIN-NEXT:    slli a1, a1, 3
+; ZVFHMIN-NEXT:    mv a2, a1
+; ZVFHMIN-NEXT:    slli a1, a1, 1
+; ZVFHMIN-NEXT:    add a1, a1, a2
+; ZVFHMIN-NEXT:    add a1, sp, a1
+; ZVFHMIN-NEXT:    addi a1, a1, 16
+; ZVFHMIN-NEXT:    vs8r.v v8, (a1) # vscale x 64-byte Folded Spill
+; ZVFHMIN-NEXT:    fmv.x.h a1, fa0
 ; ZVFHMIN-NEXT:    lui a2, 8
-; ZVFHMIN-NEXT:    vsetvli a0, zero, e8, m4, ta, ma
-; ZVFHMIN-NEXT:    vmset.m v8
+; ZVFHMIN-NEXT:    vsetvli a3, zero, e8, m4, ta, ma
+; ZVFHMIN-NEXT:    vmset.m v7
 ; ZVFHMIN-NEXT:    csrr a3, vlenb
-; ZVFHMIN-NEXT:    vsetvli zero, a1, e16, m8, ta, ma
+; ZVFHMIN-NEXT:    vsetvli zero, a0, e16, m8, ta, ma
+; ZVFHMIN-NEXT:    vmv.v.x v24, a1
 ; ZVFHMIN-NEXT:    vxor.vx v16, v16, a2
-; ZVFHMIN-NEXT:    slli a0, a3, 1
+; ZVFHMIN-NEXT:    slli a1, a3, 1
 ; ZVFHMIN-NEXT:    srli a3, a3, 2
-; ZVFHMIN-NEXT:    sub a4, a1, a0
-; ZVFHMIN-NEXT:    vsetvli a5, zero, e8, mf2, ta, ma
-; ZVFHMIN-NEXT:    vslidedown.vx v0, v8, a3
-; ZVFHMIN-NEXT:    sltu a3, a1, a4
-; ZVFHMIN-NEXT:    vsetvli zero, a1, e16, m8, ta, ma
 ; ZVFHMIN-NEXT:    vxor.vx v8, v24, a2
+; ZVFHMIN-NEXT:    sub a2, a0, a1
+; ZVFHMIN-NEXT:    vsetvli a4, zero, e8, mf2, ta, ma
+; ZVFHMIN-NEXT:    vslidedown.vx v0, v7, a3
+; ZVFHMIN-NEXT:    sltu a3, a0, a2
 ; ZVFHMIN-NEXT:    addi a3, a3, -1
-; ZVFHMIN-NEXT:    and a3, a3, a4
+; ZVFHMIN-NEXT:    and a2, a3, a2
+; ZVFHMIN-NEXT:    csrr a3, vlenb
+; ZVFHMIN-NEXT:    slli a3, a3, 4
+; ZVFHMIN-NEXT:    add a3, sp, a3
+; ZVFHMIN-NEXT:    addi a3, a3, 16
+; ZVFHMIN-NEXT:    vs8r.v v16, (a3) # vscale x 64-byte Folded Spill
+; ZVFHMIN-NEXT:    vsetvli zero, a2, e16, m4, ta, ma
+; ZVFHMIN-NEXT:    vfwcvt.f.f.v v24, v20, v0.t
+; ZVFHMIN-NEXT:    addi a2, sp, 16
+; ZVFHMIN-NEXT:    vs8r.v v24, (a2) # vscale x 64-byte Folded Spill
 ; ZVFHMIN-NEXT:    csrr a2, vlenb
 ; ZVFHMIN-NEXT:    slli a2, a2, 3
 ; ZVFHMIN-NEXT:    add a2, sp, a2
 ; ZVFHMIN-NEXT:    addi a2, a2, 16
 ; ZVFHMIN-NEXT:    vs8r.v v8, (a2) # vscale x 64-byte Folded Spill
-; ZVFHMIN-NEXT:    vsetvli zero, a3, e16, m4, ta, ma
 ; ZVFHMIN-NEXT:    vfwcvt.f.f.v v24, v12, v0.t
-; ZVFHMIN-NEXT:    addi a2, sp, 16
-; ZVFHMIN-NEXT:    vs8r.v v24, (a2) # vscale x 64-byte Folded Spill
-; ZVFHMIN-NEXT:    csrr a2, vlenb
-; ZVFHMIN-NEXT:    slli a2, a2, 4
-; ZVFHMIN-NEXT:    add a2, sp, a2
-; ZVFHMIN-NEXT:    addi a2, a2, 16
-; ZVFHMIN-NEXT:    vs8r.v v16, (a2) # vscale x 64-byte Folded Spill
-; ZVFHMIN-NEXT:    vfwcvt.f.f.v v24, v20, v0.t
 ; ZVFHMIN-NEXT:    csrr a2, vlenb
 ; ZVFHMIN-NEXT:    slli a2, a2, 3
 ; ZVFHMIN-NEXT:    mv a3, a2
@@ -10681,33 +10622,33 @@ define <vscale x 32 x half> @vfnmsub_vv_nxv32f16_unmasked_commuted(<vscale x 32
 ; ZVFHMIN-NEXT:    add a2, a2, a3
 ; ZVFHMIN-NEXT:    add a2, sp, a2
 ; ZVFHMIN-NEXT:    addi a2, a2, 16
-; ZVFHMIN-NEXT:    vl8r.v v8, (a2) # vscale x 64-byte Folded Reload
-; ZVFHMIN-NEXT:    vfwcvt.f.f.v v16, v12, v0.t
+; ZVFHMIN-NEXT:    vl8r.v v16, (a2) # vscale x 64-byte Folded Reload
+; ZVFHMIN-NEXT:    vfwcvt.f.f.v v8, v20, v0.t
 ; ZVFHMIN-NEXT:    addi a2, sp, 16
-; ZVFHMIN-NEXT:    vl8r.v v8, (a2) # vscale x 64-byte Folded Reload
+; ZVFHMIN-NEXT:    vl8r.v v16, (a2) # vscale x 64-byte Folded Reload
 ; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m8, ta, ma
-; ZVFHMIN-NEXT:    vfmadd.vv v16, v24, v8, v0.t
+; ZVFHMIN-NEXT:    vfmadd.vv v8, v24, v16, v0.t
 ; ZVFHMIN-NEXT:    vsetvli zero, zero, e16, m4, ta, ma
-; ZVFHMIN-NEXT:    vfncvt.f.f.w v12, v16, v0.t
-; ZVFHMIN-NEXT:    bltu a1, a0, .LBB301_2
+; ZVFHMIN-NEXT:    vfncvt.f.f.w v20, v8, v0.t
+; ZVFHMIN-NEXT:    bltu a0, a1, .LBB297_2
 ; ZVFHMIN-NEXT:  # %bb.1:
-; ZVFHMIN-NEXT:    mv a1, a0
-; ZVFHMIN-NEXT:  .LBB301_2:
-; ZVFHMIN-NEXT:    csrr a0, vlenb
-; ZVFHMIN-NEXT:    slli a0, a0, 4
-; ZVFHMIN-NEXT:    add a0, sp, a0
-; ZVFHMIN-NEXT:    addi a0, a0, 16
-; ZVFHMIN-NEXT:    vl8r.v v16, (a0) # vscale x 64-byte Folded Reload
-; ZVFHMIN-NEXT:    vsetvli zero, a1, e16, m4, ta, ma
-; ZVFHMIN-NEXT:    vfwcvt.f.f.v v24, v16
+; ZVFHMIN-NEXT:    mv a0, a1
+; ZVFHMIN-NEXT:  .LBB297_2:
+; ZVFHMIN-NEXT:    csrr a1, vlenb
+; ZVFHMIN-NEXT:    slli a1, a1, 4
+; ZVFHMIN-NEXT:    add a1, sp, a1
+; ZVFHMIN-NEXT:    addi a1, a1, 16
+; ZVFHMIN-NEXT:    vl8r.v v8, (a1) # vscale x 64-byte Folded Reload
+; ZVFHMIN-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
+; ZVFHMIN-NEXT:    vfwcvt.f.f.v v24, v8
 ; ZVFHMIN-NEXT:    addi a0, sp, 16
 ; ZVFHMIN-NEXT:    vs8r.v v24, (a0) # vscale x 64-byte Folded Spill
 ; ZVFHMIN-NEXT:    csrr a0, vlenb
 ; ZVFHMIN-NEXT:    slli a0, a0, 3
 ; ZVFHMIN-NEXT:    add a0, sp, a0
 ; ZVFHMIN-NEXT:    addi a0, a0, 16
-; ZVFHMIN-NEXT:    vl8r.v v16, (a0) # vscale x 64-byte Folded Reload
-; ZVFHMIN-NEXT:    vfwcvt.f.f.v v24, v16
+; ZVFHMIN-NEXT:    vl8r.v v8, (a0) # vscale x 64-byte Folded Reload
+; ZVFHMIN-NEXT:    vfwcvt.f.f.v v24, v8
 ; ZVFHMIN-NEXT:    csrr a0, vlenb
 ; ZVFHMIN-NEXT:    slli a0, a0, 3
 ; ZVFHMIN-NEXT:    mv a1, a0
@@ -10715,14 +10656,15 @@ define <vscale x 32 x half> @vfnmsub_vv_nxv32f16_unmasked_commuted(<vscale x 32
 ; ZVFHMIN-NEXT:    add a0, a0, a1
 ; ZVFHMIN-NEXT:    add a0, sp, a0
 ; ZVFHMIN-NEXT:    addi a0, a0, 16
-; ZVFHMIN-NEXT:    vl8r.v v16, (a0) # vscale x 64-byte Folded Reload
-; ZVFHMIN-NEXT:    vfwcvt.f.f.v v0, v16
+; ZVFHMIN-NEXT:    vl8r.v v8, (a0) # vscale x 64-byte Folded Reload
+; ZVFHMIN-NEXT:    vfwcvt.f.f.v v0, v8
 ; ZVFHMIN-NEXT:    addi a0, sp, 16
-; ZVFHMIN-NEXT:    vl8r.v v16, (a0) # vscale x 64-byte Folded Reload
+; ZVFHMIN-NEXT:    vl8r.v v8, (a0) # vscale x 64-byte Folded Reload
 ; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m8, ta, ma
-; ZVFHMIN-NEXT:    vfmadd.vv v0, v16, v24
+; ZVFHMIN-NEXT:    vfmadd.vv v0, v24, v8
 ; ZVFHMIN-NEXT:    vsetvli zero, zero, e16, m4, ta, ma
-; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v0
+; ZVFHMIN-NEXT:    vfncvt.f.f.w v16, v0
+; ZVFHMIN-NEXT:    vmv8r.v v8, v16
 ; ZVFHMIN-NEXT:    csrr a0, vlenb
 ; ZVFHMIN-NEXT:    slli a0, a0, 5
 ; ZVFHMIN-NEXT:    add sp, sp, a0
@@ -10730,6 +10672,972 @@ define <vscale x 32 x half> @vfnmsub_vv_nxv32f16_unmasked_commuted(<vscale x 32
 ; ZVFHMIN-NEXT:    addi sp, sp, 16
 ; ZVFHMIN-NEXT:    .cfi_def_cfa_offset 0
 ; ZVFHMIN-NEXT:    ret
+  %elt.head = insertelement <vscale x 32 x half> poison, half %b, i32 0
+  %vb = shufflevector <vscale x 32 x half> %elt.head, <vscale x 32 x half> poison, <vscale x 32 x i32> zeroinitializer
+  %negvb = call <vscale x 32 x half> @llvm.vp.fneg.nxv32f16(<vscale x 32 x half> %vb, <vscale x 32 x i1> splat (i1 true), i32 %evl)
+  %negvc = call <vscale x 32 x half> @llvm.vp.fneg.nxv32f16(<vscale x 32 x half> %vc, <vscale x 32 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 32 x half> @llvm.vp.fma.nxv32f16(<vscale x 32 x half> %negvb, <vscale x 32 x half> %va, <vscale x 32 x half> %negvc, <vscale x 32 x i1> splat (i1 true), i32 %evl)
+  ret <vscale x 32 x half> %v
+}
+
+define <vscale x 32 x half> @vfnmsub_vv_nxv32f16(<vscale x 32 x half> %va, <vscale x 32 x half> %b, <vscale x 32 x half> %c, <vscale x 32 x i1> %m, i32 zeroext %evl) {
+; ZVFH-LABEL: vfnmsub_vv_nxv32f16:
+; ZVFH:       # %bb.0:
+; ZVFH-NEXT:    vsetvli zero, a1, e16, m8, ta, ma
+; ZVFH-NEXT:    vle16.v v24, (a0)
+; ZVFH-NEXT:    vfnmadd.vv v16, v8, v24, v0.t
+; ZVFH-NEXT:    vmv.v.v v8, v16
+; ZVFH-NEXT:    ret
+;
+; ZVFHMIN32-LABEL: vfnmsub_vv_nxv32f16:
+; ZVFHMIN32:       # %bb.0:
+; ZVFHMIN32-NEXT:    addi sp, sp, -16
+; ZVFHMIN32-NEXT:    .cfi_def_cfa_offset 16
+; ZVFHMIN32-NEXT:    csrr a2, vlenb
+; ZVFHMIN32-NEXT:    slli a2, a2, 5
+; ZVFHMIN32-NEXT:    sub sp, sp, a2
+; ZVFHMIN32-NEXT:    .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x20, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 32 * vlenb
+; ZVFHMIN32-NEXT:    vsetvli zero, a1, e16, m8, ta, ma
+; ZVFHMIN32-NEXT:    vmv1r.v v3, v0
+; ZVFHMIN32-NEXT:    csrr a2, vlenb
+; ZVFHMIN32-NEXT:    slli a2, a2, 3
+; ZVFHMIN32-NEXT:    mv a3, a2
+; ZVFHMIN32-NEXT:    slli a2, a2, 1
+; ZVFHMIN32-NEXT:    add a2, a2, a3
+; ZVFHMIN32-NEXT:    add a2, sp, a2
+; ZVFHMIN32-NEXT:    addi a2, a2, 16
+; ZVFHMIN32-NEXT:    vs8r.v v8, (a2) # vscale x 64-byte Folded Spill
+; ZVFHMIN32-NEXT:    vle16.v v8, (a0)
+; ZVFHMIN32-NEXT:    csrr a0, vlenb
+; ZVFHMIN32-NEXT:    slli a0, a0, 4
+; ZVFHMIN32-NEXT:    add a0, sp, a0
+; ZVFHMIN32-NEXT:    addi a0, a0, 16
+; ZVFHMIN32-NEXT:    vs8r.v v8, (a0) # vscale x 64-byte Folded Spill
+; ZVFHMIN32-NEXT:    lui a2, 8
+; ZVFHMIN32-NEXT:    csrr a3, vlenb
+; ZVFHMIN32-NEXT:    vxor.vx v8, v16, a2, v0.t
+; ZVFHMIN32-NEXT:    slli a0, a3, 1
+; ZVFHMIN32-NEXT:    srli a3, a3, 2
+; ZVFHMIN32-NEXT:    sub a4, a1, a0
+; ZVFHMIN32-NEXT:    vsetvli a5, zero, e8, mf2, ta, ma
+; ZVFHMIN32-NEXT:    vslidedown.vx v6, v0, a3
+; ZVFHMIN32-NEXT:    sltu a3, a1, a4
+; ZVFHMIN32-NEXT:    csrr a5, vlenb
+; ZVFHMIN32-NEXT:    slli a5, a5, 4
+; ZVFHMIN32-NEXT:    add a5, sp, a5
+; ZVFHMIN32-NEXT:    addi a5, a5, 16
+; ZVFHMIN32-NEXT:    vl8r.v v24, (a5) # vscale x 64-byte Folded Reload
+; ZVFHMIN32-NEXT:    vsetvli zero, a1, e16, m8, ta, ma
+; ZVFHMIN32-NEXT:    vxor.vx v24, v24, a2, v0.t
+; ZVFHMIN32-NEXT:    addi a3, a3, -1
+; ZVFHMIN32-NEXT:    and a3, a3, a4
+; ZVFHMIN32-NEXT:    vmv1r.v v0, v6
+; ZVFHMIN32-NEXT:    csrr a2, vlenb
+; ZVFHMIN32-NEXT:    slli a2, a2, 3
+; ZVFHMIN32-NEXT:    add a2, sp, a2
+; ZVFHMIN32-NEXT:    addi a2, a2, 16
+; ZVFHMIN32-NEXT:    vs8r.v v24, (a2) # vscale x 64-byte Folded Spill
+; ZVFHMIN32-NEXT:    vsetvli zero, a3, e16, m4, ta, ma
+; ZVFHMIN32-NEXT:    vfwcvt.f.f.v v16, v28, v0.t
+; ZVFHMIN32-NEXT:    addi a2, sp, 16
+; ZVFHMIN32-NEXT:    vs8r.v v16, (a2) # vscale x 64-byte Folded Spill
+; ZVFHMIN32-NEXT:    csrr a2, vlenb
+; ZVFHMIN32-NEXT:    slli a2, a2, 4
+; ZVFHMIN32-NEXT:    add a2, sp, a2
+; ZVFHMIN32-NEXT:    addi a2, a2, 16
+; ZVFHMIN32-NEXT:    vs8r.v v8, (a2) # vscale x 64-byte Folded Spill
+; ZVFHMIN32-NEXT:    vfwcvt.f.f.v v24, v12, v0.t
+; ZVFHMIN32-NEXT:    csrr a2, vlenb
+; ZVFHMIN32-NEXT:    slli a2, a2, 3
+; ZVFHMIN32-NEXT:    mv a3, a2
+; ZVFHMIN32-NEXT:    slli a2, a2, 1
+; ZVFHMIN32-NEXT:    add a2, a2, a3
+; ZVFHMIN32-NEXT:    add a2, sp, a2
+; ZVFHMIN32-NEXT:    addi a2, a2, 16
+; ZVFHMIN32-NEXT:    vl8r.v v16, (a2) # vscale x 64-byte Folded Reload
+; ZVFHMIN32-NEXT:    vfwcvt.f.f.v v8, v20, v0.t
+; ZVFHMIN32-NEXT:    addi a2, sp, 16
+; ZVFHMIN32-NEXT:    vl8r.v v16, (a2) # vscale x 64-byte Folded Reload
+; ZVFHMIN32-NEXT:    vsetvli zero, zero, e32, m8, ta, ma
+; ZVFHMIN32-NEXT:    vfmadd.vv v24, v8, v16, v0.t
+; ZVFHMIN32-NEXT:    vsetvli zero, zero, e16, m4, ta, ma
+; ZVFHMIN32-NEXT:    vfncvt.f.f.w v4, v24, v0.t
+; ZVFHMIN32-NEXT:    bltu a1, a0, .LBB298_2
+; ZVFHMIN32-NEXT:  # %bb.1:
+; ZVFHMIN32-NEXT:    mv a1, a0
+; ZVFHMIN32-NEXT:  .LBB298_2:
+; ZVFHMIN32-NEXT:    vmv1r.v v0, v3
+; ZVFHMIN32-NEXT:    csrr a0, vlenb
+; ZVFHMIN32-NEXT:    slli a0, a0, 4
+; ZVFHMIN32-NEXT:    add a0, sp, a0
+; ZVFHMIN32-NEXT:    addi a0, a0, 16
+; ZVFHMIN32-NEXT:    vl8r.v v16, (a0) # vscale x 64-byte Folded Reload
+; ZVFHMIN32-NEXT:    vsetvli zero, a1, e16, m4, ta, ma
+; ZVFHMIN32-NEXT:    vfwcvt.f.f.v v8, v16, v0.t
+; ZVFHMIN32-NEXT:    addi a0, sp, 16
+; ZVFHMIN32-NEXT:    vs8r.v v8, (a0) # vscale x 64-byte Folded Spill
+; ZVFHMIN32-NEXT:    csrr a0, vlenb
+; ZVFHMIN32-NEXT:    slli a0, a0, 3
+; ZVFHMIN32-NEXT:    add a0, sp, a0
+; ZVFHMIN32-NEXT:    addi a0, a0, 16
+; ZVFHMIN32-NEXT:    vl8r.v v16, (a0) # vscale x 64-byte Folded Reload
+; ZVFHMIN32-NEXT:    vfwcvt.f.f.v v24, v16, v0.t
+; ZVFHMIN32-NEXT:    csrr a0, vlenb
+; ZVFHMIN32-NEXT:    slli a0, a0, 4
+; ZVFHMIN32-NEXT:    add a0, sp, a0
+; ZVFHMIN32-NEXT:    addi a0, a0, 16
+; ZVFHMIN32-NEXT:    vs8r.v v24, (a0) # vscale x 64-byte Folded Spill
+; ZVFHMIN32-NEXT:    csrr a0, vlenb
+; ZVFHMIN32-NEXT:    slli a0, a0, 3
+; ZVFHMIN32-NEXT:    mv a1, a0
+; ZVFHMIN32-NEXT:    slli a0, a0, 1
+; ZVFHMIN32-NEXT:    add a0, a0, a1
+; ZVFHMIN32-NEXT:    add a0, sp, a0
+; ZVFHMIN32-NEXT:    addi a0, a0, 16
+; ZVFHMIN32-NEXT:    vl8r.v v16, (a0) # vscale x 64-byte Folded Reload
+; ZVFHMIN32-NEXT:    vfwcvt.f.f.v v8, v16, v0.t
+; ZVFHMIN32-NEXT:    csrr a0, vlenb
+; ZVFHMIN32-NEXT:    slli a0, a0, 4
+; ZVFHMIN32-NEXT:    add a0, sp, a0
+; ZVFHMIN32-NEXT:    addi a0, a0, 16
+; ZVFHMIN32-NEXT:    vl8r.v v16, (a0) # vscale x 64-byte Folded Reload
+; ZVFHMIN32-NEXT:    addi a0, sp, 16
+; ZVFHMIN32-NEXT:    vl8r.v v24, (a0) # vscale x 64-byte Folded Reload
+; ZVFHMIN32-NEXT:    vsetvli zero, zero, e32, m8, ta, ma
+; ZVFHMIN32-NEXT:    vfmadd.vv v24, v8, v16, v0.t
+; ZVFHMIN32-NEXT:    vmv4r.v v12, v4
+; ZVFHMIN32-NEXT:    vsetvli zero, zero, e16, m4, ta, ma
+; ZVFHMIN32-NEXT:    vfncvt.f.f.w v8, v24, v0.t
+; ZVFHMIN32-NEXT:    csrr a0, vlenb
+; ZVFHMIN32-NEXT:    slli a0, a0, 5
+; ZVFHMIN32-NEXT:    add sp, sp, a0
+; ZVFHMIN32-NEXT:    .cfi_def_cfa sp, 16
+; ZVFHMIN32-NEXT:    addi sp, sp, 16
+; ZVFHMIN32-NEXT:    .cfi_def_cfa_offset 0
+; ZVFHMIN32-NEXT:    ret
+;
+; ZVFHMIN64-LABEL: vfnmsub_vv_nxv32f16:
+; ZVFHMIN64:       # %bb.0:
+; ZVFHMIN64-NEXT:    addi sp, sp, -16
+; ZVFHMIN64-NEXT:    .cfi_def_cfa_offset 16
+; ZVFHMIN64-NEXT:    csrr a2, vlenb
+; ZVFHMIN64-NEXT:    slli a2, a2, 5
+; ZVFHMIN64-NEXT:    sub sp, sp, a2
+; ZVFHMIN64-NEXT:    .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x20, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 32 * vlenb
+; ZVFHMIN64-NEXT:    vsetvli zero, a1, e16, m8, ta, ma
+; ZVFHMIN64-NEXT:    vmv1r.v v3, v0
+; ZVFHMIN64-NEXT:    csrr a2, vlenb
+; ZVFHMIN64-NEXT:    slli a2, a2, 3
+; ZVFHMIN64-NEXT:    mv a3, a2
+; ZVFHMIN64-NEXT:    slli a2, a2, 1
+; ZVFHMIN64-NEXT:    add a2, a2, a3
+; ZVFHMIN64-NEXT:    add a2, sp, a2
+; ZVFHMIN64-NEXT:    addi a2, a2, 16
+; ZVFHMIN64-NEXT:    vs8r.v v8, (a2) # vscale x 64-byte Folded Spill
+; ZVFHMIN64-NEXT:    vle16.v v8, (a0)
+; ZVFHMIN64-NEXT:    lui a2, 8
+; ZVFHMIN64-NEXT:    csrr a3, vlenb
+; ZVFHMIN64-NEXT:    vxor.vx v16, v16, a2, v0.t
+; ZVFHMIN64-NEXT:    slli a0, a3, 1
+; ZVFHMIN64-NEXT:    srli a3, a3, 2
+; ZVFHMIN64-NEXT:    sub a4, a1, a0
+; ZVFHMIN64-NEXT:    vsetvli a5, zero, e8, mf2, ta, ma
+; ZVFHMIN64-NEXT:    vslidedown.vx v6, v0, a3
+; ZVFHMIN64-NEXT:    sltu a3, a1, a4
+; ZVFHMIN64-NEXT:    vsetvli zero, a1, e16, m8, ta, ma
+; ZVFHMIN64-NEXT:    vxor.vx v8, v8, a2, v0.t
+; ZVFHMIN64-NEXT:    addi a3, a3, -1
+; ZVFHMIN64-NEXT:    and a3, a3, a4
+; ZVFHMIN64-NEXT:    vmv1r.v v0, v6
+; ZVFHMIN64-NEXT:    csrr a2, vlenb
+; ZVFHMIN64-NEXT:    slli a2, a2, 3
+; ZVFHMIN64-NEXT:    add a2, sp, a2
+; ZVFHMIN64-NEXT:    addi a2, a2, 16
+; ZVFHMIN64-NEXT:    vs8r.v v8, (a2) # vscale x 64-byte Folded Spill
+; ZVFHMIN64-NEXT:    vsetvli zero, a3, e16, m4, ta, ma
+; ZVFHMIN64-NEXT:    vfwcvt.f.f.v v24, v12, v0.t
+; ZVFHMIN64-NEXT:    addi a2, sp, 16
+; ZVFHMIN64-NEXT:    vs8r.v v24, (a2) # vscale x 64-byte Folded Spill
+; ZVFHMIN64-NEXT:    csrr a2, vlenb
+; ZVFHMIN64-NEXT:    slli a2, a2, 4
+; ZVFHMIN64-NEXT:    add a2, sp, a2
+; ZVFHMIN64-NEXT:    addi a2, a2, 16
+; ZVFHMIN64-NEXT:    vs8r.v v16, (a2) # vscale x 64-byte Folded Spill
+; ZVFHMIN64-NEXT:    vfwcvt.f.f.v v24, v20, v0.t
+; ZVFHMIN64-NEXT:    csrr a2, vlenb
+; ZVFHMIN64-NEXT:    slli a2, a2, 3
+; ZVFHMIN64-NEXT:    mv a3, a2
+; ZVFHMIN64-NEXT:    slli a2, a2, 1
+; ZVFHMIN64-NEXT:    add a2, a2, a3
+; ZVFHMIN64-NEXT:    add a2, sp, a2
+; ZVFHMIN64-NEXT:    addi a2, a2, 16
+; ZVFHMIN64-NEXT:    vl8r.v v16, (a2) # vscale x 64-byte Folded Reload
+; ZVFHMIN64-NEXT:    vfwcvt.f.f.v v8, v20, v0.t
+; ZVFHMIN64-NEXT:    addi a2, sp, 16
+; ZVFHMIN64-NEXT:    vl8r.v v16, (a2) # vscale x 64-byte Folded Reload
+; ZVFHMIN64-NEXT:    vsetvli zero, zero, e32, m8, ta, ma
+; ZVFHMIN64-NEXT:    vfmadd.vv v24, v8, v16, v0.t
+; ZVFHMIN64-NEXT:    vsetvli zero, zero, e16, m4, ta, ma
+; ZVFHMIN64-NEXT:    vfncvt.f.f.w v4, v24, v0.t
+; ZVFHMIN64-NEXT:    bltu a1, a0, .LBB298_2
+; ZVFHMIN64-NEXT:  # %bb.1:
+; ZVFHMIN64-NEXT:    mv a1, a0
+; ZVFHMIN64-NEXT:  .LBB298_2:
+; ZVFHMIN64-NEXT:    vmv1r.v v0, v3
+; ZVFHMIN64-NEXT:    csrr a0, vlenb
+; ZVFHMIN64-NEXT:    slli a0, a0, 3
+; ZVFHMIN64-NEXT:    add a0, sp, a0
+; ZVFHMIN64-NEXT:    addi a0, a0, 16
+; ZVFHMIN64-NEXT:    vl8r.v v16, (a0) # vscale x 64-byte Folded Reload
+; ZVFHMIN64-NEXT:    vsetvli zero, a1, e16, m4, ta, ma
+; ZVFHMIN64-NEXT:    vfwcvt.f.f.v v24, v16, v0.t
+; ZVFHMIN64-NEXT:    addi a0, sp, 16
+; ZVFHMIN64-NEXT:    vs8r.v v24, (a0) # vscale x 64-byte Folded Spill
+; ZVFHMIN64-NEXT:    csrr a0, vlenb
+; ZVFHMIN64-NEXT:    slli a0, a0, 4
+; ZVFHMIN64-NEXT:    add a0, sp, a0
+; ZVFHMIN64-NEXT:    addi a0, a0, 16
+; ZVFHMIN64-NEXT:    vl8r.v v16, (a0) # vscale x 64-byte Folded Reload
+; ZVFHMIN64-NEXT:    vfwcvt.f.f.v v8, v16, v0.t
+; ZVFHMIN64-NEXT:    csrr a0, vlenb
+; ZVFHMIN64-NEXT:    slli a0, a0, 3
+; ZVFHMIN64-NEXT:    mv a1, a0
+; ZVFHMIN64-NEXT:    slli a0, a0, 1
+; ZVFHMIN64-NEXT:    add a0, a0, a1
+; ZVFHMIN64-NEXT:    add a0, sp, a0
+; ZVFHMIN64-NEXT:    addi a0, a0, 16
+; ZVFHMIN64-NEXT:    vl8r.v v16, (a0) # vscale x 64-byte Folded Reload
+; ZVFHMIN64-NEXT:    vfwcvt.f.f.v v24, v16, v0.t
+; ZVFHMIN64-NEXT:    csrr a0, vlenb
+; ZVFHMIN64-NEXT:    slli a0, a0, 4
+; ZVFHMIN64-NEXT:    add a0, sp, a0
+; ZVFHMIN64-NEXT:    addi a0, a0, 16
+; ZVFHMIN64-NEXT:    vs8r.v v24, (a0) # vscale x 64-byte Folded Spill
+; ZVFHMIN64-NEXT:    addi a0, sp, 16
+; ZVFHMIN64-NEXT:    vl8r.v v16, (a0) # vscale x 64-byte Folded Reload
+; ZVFHMIN64-NEXT:    csrr a0, vlenb
+; ZVFHMIN64-NEXT:    slli a0, a0, 4
+; ZVFHMIN64-NEXT:    add a0, sp, a0
+; ZVFHMIN64-NEXT:    addi a0, a0, 16
+; ZVFHMIN64-NEXT:    vl8r.v v24, (a0) # vscale x 64-byte Folded Reload
+; ZVFHMIN64-NEXT:    vsetvli zero, zero, e32, m8, ta, ma
+; ZVFHMIN64-NEXT:    vfmadd.vv v8, v24, v16, v0.t
+; ZVFHMIN64-NEXT:    vmv.v.v v16, v8
+; ZVFHMIN64-NEXT:    vmv4r.v v12, v4
+; ZVFHMIN64-NEXT:    vsetvli zero, zero, e16, m4, ta, ma
+; ZVFHMIN64-NEXT:    vfncvt.f.f.w v8, v16, v0.t
+; ZVFHMIN64-NEXT:    csrr a0, vlenb
+; ZVFHMIN64-NEXT:    slli a0, a0, 5
+; ZVFHMIN64-NEXT:    add sp, sp, a0
+; ZVFHMIN64-NEXT:    .cfi_def_cfa sp, 16
+; ZVFHMIN64-NEXT:    addi sp, sp, 16
+; ZVFHMIN64-NEXT:    .cfi_def_cfa_offset 0
+; ZVFHMIN64-NEXT:    ret
+  %negb = call <vscale x 32 x half> @llvm.vp.fneg.nxv32f16(<vscale x 32 x half> %b, <vscale x 32 x i1> %m, i32 %evl)
+  %negc = call <vscale x 32 x half> @llvm.vp.fneg.nxv32f16(<vscale x 32 x half> %c, <vscale x 32 x i1> %m, i32 %evl)
+  %v = call <vscale x 32 x half> @llvm.vp.fma.nxv32f16(<vscale x 32 x half> %va, <vscale x 32 x half> %negb, <vscale x 32 x half> %negc, <vscale x 32 x i1> %m, i32 %evl)
+  ret <vscale x 32 x half> %v
+}
+
+define <vscale x 32 x half> @vfnmsub_vv_nxv32f16_commuted(<vscale x 32 x half> %va, <vscale x 32 x half> %b, <vscale x 32 x half> %c, <vscale x 32 x i1> %m, i32 zeroext %evl) {
+; ZVFH-LABEL: vfnmsub_vv_nxv32f16_commuted:
+; ZVFH:       # %bb.0:
+; ZVFH-NEXT:    vsetvli zero, a1, e16, m8, ta, ma
+; ZVFH-NEXT:    vle16.v v24, (a0)
+; ZVFH-NEXT:    vfnmadd.vv v8, v16, v24, v0.t
+; ZVFH-NEXT:    ret
+;
+; ZVFHMIN32-LABEL: vfnmsub_vv_nxv32f16_commuted:
+; ZVFHMIN32:       # %bb.0:
+; ZVFHMIN32-NEXT:    addi sp, sp, -16
+; ZVFHMIN32-NEXT:    .cfi_def_cfa_offset 16
+; ZVFHMIN32-NEXT:    csrr a2, vlenb
+; ZVFHMIN32-NEXT:    slli a2, a2, 5
+; ZVFHMIN32-NEXT:    sub sp, sp, a2
+; ZVFHMIN32-NEXT:    .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x20, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 32 * vlenb
+; ZVFHMIN32-NEXT:    vsetvli zero, a1, e16, m8, ta, ma
+; ZVFHMIN32-NEXT:    vmv1r.v v3, v0
+; ZVFHMIN32-NEXT:    csrr a2, vlenb
+; ZVFHMIN32-NEXT:    slli a2, a2, 3
+; ZVFHMIN32-NEXT:    mv a3, a2
+; ZVFHMIN32-NEXT:    slli a2, a2, 1
+; ZVFHMIN32-NEXT:    add a2, a2, a3
+; ZVFHMIN32-NEXT:    add a2, sp, a2
+; ZVFHMIN32-NEXT:    addi a2, a2, 16
+; ZVFHMIN32-NEXT:    vs8r.v v8, (a2) # vscale x 64-byte Folded Spill
+; ZVFHMIN32-NEXT:    vle16.v v8, (a0)
+; ZVFHMIN32-NEXT:    lui a2, 8
+; ZVFHMIN32-NEXT:    csrr a3, vlenb
+; ZVFHMIN32-NEXT:    vxor.vx v16, v16, a2, v0.t
+; ZVFHMIN32-NEXT:    slli a0, a3, 1
+; ZVFHMIN32-NEXT:    srli a3, a3, 2
+; ZVFHMIN32-NEXT:    sub a4, a1, a0
+; ZVFHMIN32-NEXT:    vsetvli a5, zero, e8, mf2, ta, ma
+; ZVFHMIN32-NEXT:    vslidedown.vx v6, v0, a3
+; ZVFHMIN32-NEXT:    sltu a3, a1, a4
+; ZVFHMIN32-NEXT:    vsetvli zero, a1, e16, m8, ta, ma
+; ZVFHMIN32-NEXT:    vxor.vx v8, v8, a2, v0.t
+; ZVFHMIN32-NEXT:    addi a3, a3, -1
+; ZVFHMIN32-NEXT:    and a3, a3, a4
+; ZVFHMIN32-NEXT:    vmv1r.v v0, v6
+; ZVFHMIN32-NEXT:    csrr a2, vlenb
+; ZVFHMIN32-NEXT:    slli a2, a2, 3
+; ZVFHMIN32-NEXT:    add a2, sp, a2
+; ZVFHMIN32-NEXT:    addi a2, a2, 16
+; ZVFHMIN32-NEXT:    vs8r.v v8, (a2) # vscale x 64-byte Folded Spill
+; ZVFHMIN32-NEXT:    vsetvli zero, a3, e16, m4, ta, ma
+; ZVFHMIN32-NEXT:    vfwcvt.f.f.v v24, v12, v0.t
+; ZVFHMIN32-NEXT:    addi a2, sp, 16
+; ZVFHMIN32-NEXT:    vs8r.v v24, (a2) # vscale x 64-byte Folded Spill
+; ZVFHMIN32-NEXT:    csrr a2, vlenb
+; ZVFHMIN32-NEXT:    slli a2, a2, 4
+; ZVFHMIN32-NEXT:    add a2, sp, a2
+; ZVFHMIN32-NEXT:    addi a2, a2, 16
+; ZVFHMIN32-NEXT:    vs8r.v v16, (a2) # vscale x 64-byte Folded Spill
+; ZVFHMIN32-NEXT:    vfwcvt.f.f.v v24, v20, v0.t
+; ZVFHMIN32-NEXT:    csrr a2, vlenb
+; ZVFHMIN32-NEXT:    slli a2, a2, 3
+; ZVFHMIN32-NEXT:    mv a3, a2
+; ZVFHMIN32-NEXT:    slli a2, a2, 1
+; ZVFHMIN32-NEXT:    add a2, a2, a3
+; ZVFHMIN32-NEXT:    add a2, sp, a2
+; ZVFHMIN32-NEXT:    addi a2, a2, 16
+; ZVFHMIN32-NEXT:    vl8r.v v8, (a2) # vscale x 64-byte Folded Reload
+; ZVFHMIN32-NEXT:    vfwcvt.f.f.v v16, v12, v0.t
+; ZVFHMIN32-NEXT:    addi a2, sp, 16
+; ZVFHMIN32-NEXT:    vl8r.v v8, (a2) # vscale x 64-byte Folded Reload
+; ZVFHMIN32-NEXT:    vsetvli zero, zero, e32, m8, ta, ma
+; ZVFHMIN32-NEXT:    vfmadd.vv v16, v24, v8, v0.t
+; ZVFHMIN32-NEXT:    vsetvli zero, zero, e16, m4, ta, ma
+; ZVFHMIN32-NEXT:    vfncvt.f.f.w v4, v16, v0.t
+; ZVFHMIN32-NEXT:    bltu a1, a0, .LBB299_2
+; ZVFHMIN32-NEXT:  # %bb.1:
+; ZVFHMIN32-NEXT:    mv a1, a0
+; ZVFHMIN32-NEXT:  .LBB299_2:
+; ZVFHMIN32-NEXT:    vmv1r.v v0, v3
+; ZVFHMIN32-NEXT:    csrr a0, vlenb
+; ZVFHMIN32-NEXT:    slli a0, a0, 4
+; ZVFHMIN32-NEXT:    add a0, sp, a0
+; ZVFHMIN32-NEXT:    addi a0, a0, 16
+; ZVFHMIN32-NEXT:    vl8r.v v8, (a0) # vscale x 64-byte Folded Reload
+; ZVFHMIN32-NEXT:    vsetvli zero, a1, e16, m4, ta, ma
+; ZVFHMIN32-NEXT:    vfwcvt.f.f.v v16, v8, v0.t
+; ZVFHMIN32-NEXT:    addi a0, sp, 16
+; ZVFHMIN32-NEXT:    vs8r.v v16, (a0) # vscale x 64-byte Folded Spill
+; ZVFHMIN32-NEXT:    csrr a0, vlenb
+; ZVFHMIN32-NEXT:    slli a0, a0, 3
+; ZVFHMIN32-NEXT:    add a0, sp, a0
+; ZVFHMIN32-NEXT:    addi a0, a0, 16
+; ZVFHMIN32-NEXT:    vl8r.v v8, (a0) # vscale x 64-byte Folded Reload
+; ZVFHMIN32-NEXT:    vfwcvt.f.f.v v16, v8, v0.t
+; ZVFHMIN32-NEXT:    csrr a0, vlenb
+; ZVFHMIN32-NEXT:    slli a0, a0, 4
+; ZVFHMIN32-NEXT:    add a0, sp, a0
+; ZVFHMIN32-NEXT:    addi a0, a0, 16
+; ZVFHMIN32-NEXT:    vs8r.v v16, (a0) # vscale x 64-byte Folded Spill
+; ZVFHMIN32-NEXT:    csrr a0, vlenb
+; ZVFHMIN32-NEXT:    slli a0, a0, 3
+; ZVFHMIN32-NEXT:    mv a1, a0
+; ZVFHMIN32-NEXT:    slli a0, a0, 1
+; ZVFHMIN32-NEXT:    add a0, a0, a1
+; ZVFHMIN32-NEXT:    add a0, sp, a0
+; ZVFHMIN32-NEXT:    addi a0, a0, 16
+; ZVFHMIN32-NEXT:    vl8r.v v16, (a0) # vscale x 64-byte Folded Reload
+; ZVFHMIN32-NEXT:    vfwcvt.f.f.v v8, v16, v0.t
+; ZVFHMIN32-NEXT:    addi a0, sp, 16
+; ZVFHMIN32-NEXT:    vl8r.v v16, (a0) # vscale x 64-byte Folded Reload
+; ZVFHMIN32-NEXT:    csrr a0, vlenb
+; ZVFHMIN32-NEXT:    slli a0, a0, 4
+; ZVFHMIN32-NEXT:    add a0, sp, a0
+; ZVFHMIN32-NEXT:    addi a0, a0, 16
+; ZVFHMIN32-NEXT:    vl8r.v v24, (a0) # vscale x 64-byte Folded Reload
+; ZVFHMIN32-NEXT:    vsetvli zero, zero, e32, m8, ta, ma
+; ZVFHMIN32-NEXT:    vfmadd.vv v8, v16, v24, v0.t
+; ZVFHMIN32-NEXT:    vmv.v.v v16, v8
+; ZVFHMIN32-NEXT:    vmv4r.v v12, v4
+; ZVFHMIN32-NEXT:    vsetvli zero, zero, e16, m4, ta, ma
+; ZVFHMIN32-NEXT:    vfncvt.f.f.w v8, v16, v0.t
+; ZVFHMIN32-NEXT:    csrr a0, vlenb
+; ZVFHMIN32-NEXT:    slli a0, a0, 5
+; ZVFHMIN32-NEXT:    add sp, sp, a0
+; ZVFHMIN32-NEXT:    .cfi_def_cfa sp, 16
+; ZVFHMIN32-NEXT:    addi sp, sp, 16
+; ZVFHMIN32-NEXT:    .cfi_def_cfa_offset 0
+; ZVFHMIN32-NEXT:    ret
+;
+; ZVFHMIN64-LABEL: vfnmsub_vv_nxv32f16_commuted:
+; ZVFHMIN64:       # %bb.0:
+; ZVFHMIN64-NEXT:    addi sp, sp, -16
+; ZVFHMIN64-NEXT:    .cfi_def_cfa_offset 16
+; ZVFHMIN64-NEXT:    csrr a2, vlenb
+; ZVFHMIN64-NEXT:    slli a2, a2, 5
+; ZVFHMIN64-NEXT:    sub sp, sp, a2
+; ZVFHMIN64-NEXT:    .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x20, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 32 * vlenb
+; ZVFHMIN64-NEXT:    vsetvli zero, a1, e16, m8, ta, ma
+; ZVFHMIN64-NEXT:    vmv1r.v v3, v0
+; ZVFHMIN64-NEXT:    csrr a2, vlenb
+; ZVFHMIN64-NEXT:    slli a2, a2, 3
+; ZVFHMIN64-NEXT:    mv a3, a2
+; ZVFHMIN64-NEXT:    slli a2, a2, 1
+; ZVFHMIN64-NEXT:    add a2, a2, a3
+; ZVFHMIN64-NEXT:    add a2, sp, a2
+; ZVFHMIN64-NEXT:    addi a2, a2, 16
+; ZVFHMIN64-NEXT:    vs8r.v v8, (a2) # vscale x 64-byte Folded Spill
+; ZVFHMIN64-NEXT:    vle16.v v8, (a0)
+; ZVFHMIN64-NEXT:    lui a2, 8
+; ZVFHMIN64-NEXT:    csrr a3, vlenb
+; ZVFHMIN64-NEXT:    vxor.vx v16, v16, a2, v0.t
+; ZVFHMIN64-NEXT:    slli a0, a3, 1
+; ZVFHMIN64-NEXT:    srli a3, a3, 2
+; ZVFHMIN64-NEXT:    sub a4, a1, a0
+; ZVFHMIN64-NEXT:    vsetvli a5, zero, e8, mf2, ta, ma
+; ZVFHMIN64-NEXT:    vslidedown.vx v6, v0, a3
+; ZVFHMIN64-NEXT:    sltu a3, a1, a4
+; ZVFHMIN64-NEXT:    vsetvli zero, a1, e16, m8, ta, ma
+; ZVFHMIN64-NEXT:    vxor.vx v8, v8, a2, v0.t
+; ZVFHMIN64-NEXT:    addi a3, a3, -1
+; ZVFHMIN64-NEXT:    and a3, a3, a4
+; ZVFHMIN64-NEXT:    vmv1r.v v0, v6
+; ZVFHMIN64-NEXT:    csrr a2, vlenb
+; ZVFHMIN64-NEXT:    slli a2, a2, 3
+; ZVFHMIN64-NEXT:    add a2, sp, a2
+; ZVFHMIN64-NEXT:    addi a2, a2, 16
+; ZVFHMIN64-NEXT:    vs8r.v v8, (a2) # vscale x 64-byte Folded Spill
+; ZVFHMIN64-NEXT:    vsetvli zero, a3, e16, m4, ta, ma
+; ZVFHMIN64-NEXT:    vfwcvt.f.f.v v24, v12, v0.t
+; ZVFHMIN64-NEXT:    addi a2, sp, 16
+; ZVFHMIN64-NEXT:    vs8r.v v24, (a2) # vscale x 64-byte Folded Spill
+; ZVFHMIN64-NEXT:    csrr a2, vlenb
+; ZVFHMIN64-NEXT:    slli a2, a2, 4
+; ZVFHMIN64-NEXT:    add a2, sp, a2
+; ZVFHMIN64-NEXT:    addi a2, a2, 16
+; ZVFHMIN64-NEXT:    vs8r.v v16, (a2) # vscale x 64-byte Folded Spill
+; ZVFHMIN64-NEXT:    vfwcvt.f.f.v v24, v20, v0.t
+; ZVFHMIN64-NEXT:    csrr a2, vlenb
+; ZVFHMIN64-NEXT:    slli a2, a2, 3
+; ZVFHMIN64-NEXT:    mv a3, a2
+; ZVFHMIN64-NEXT:    slli a2, a2, 1
+; ZVFHMIN64-NEXT:    add a2, a2, a3
+; ZVFHMIN64-NEXT:    add a2, sp, a2
+; ZVFHMIN64-NEXT:    addi a2, a2, 16
+; ZVFHMIN64-NEXT:    vl8r.v v16, (a2) # vscale x 64-byte Folded Reload
+; ZVFHMIN64-NEXT:    vfwcvt.f.f.v v8, v20, v0.t
+; ZVFHMIN64-NEXT:    addi a2, sp, 16
+; ZVFHMIN64-NEXT:    vl8r.v v16, (a2) # vscale x 64-byte Folded Reload
+; ZVFHMIN64-NEXT:    vsetvli zero, zero, e32, m8, ta, ma
+; ZVFHMIN64-NEXT:    vfmadd.vv v8, v24, v16, v0.t
+; ZVFHMIN64-NEXT:    vsetvli zero, zero, e16, m4, ta, ma
+; ZVFHMIN64-NEXT:    vfncvt.f.f.w v4, v8, v0.t
+; ZVFHMIN64-NEXT:    bltu a1, a0, .LBB299_2
+; ZVFHMIN64-NEXT:  # %bb.1:
+; ZVFHMIN64-NEXT:    mv a1, a0
+; ZVFHMIN64-NEXT:  .LBB299_2:
+; ZVFHMIN64-NEXT:    vmv1r.v v0, v3
+; ZVFHMIN64-NEXT:    csrr a0, vlenb
+; ZVFHMIN64-NEXT:    slli a0, a0, 3
+; ZVFHMIN64-NEXT:    add a0, sp, a0
+; ZVFHMIN64-NEXT:    addi a0, a0, 16
+; ZVFHMIN64-NEXT:    vl8r.v v8, (a0) # vscale x 64-byte Folded Reload
+; ZVFHMIN64-NEXT:    vsetvli zero, a1, e16, m4, ta, ma
+; ZVFHMIN64-NEXT:    vfwcvt.f.f.v v16, v8, v0.t
+; ZVFHMIN64-NEXT:    addi a0, sp, 16
+; ZVFHMIN64-NEXT:    vs8r.v v16, (a0) # vscale x 64-byte Folded Spill
+; ZVFHMIN64-NEXT:    csrr a0, vlenb
+; ZVFHMIN64-NEXT:    slli a0, a0, 4
+; ZVFHMIN64-NEXT:    add a0, sp, a0
+; ZVFHMIN64-NEXT:    addi a0, a0, 16
+; ZVFHMIN64-NEXT:    vl8r.v v8, (a0) # vscale x 64-byte Folded Reload
+; ZVFHMIN64-NEXT:    vfwcvt.f.f.v v16, v8, v0.t
+; ZVFHMIN64-NEXT:    csrr a0, vlenb
+; ZVFHMIN64-NEXT:    slli a0, a0, 3
+; ZVFHMIN64-NEXT:    add a0, sp, a0
+; ZVFHMIN64-NEXT:    addi a0, a0, 16
+; ZVFHMIN64-NEXT:    vs8r.v v16, (a0) # vscale x 64-byte Folded Spill
+; ZVFHMIN64-NEXT:    csrr a0, vlenb
+; ZVFHMIN64-NEXT:    slli a0, a0, 3
+; ZVFHMIN64-NEXT:    mv a1, a0
+; ZVFHMIN64-NEXT:    slli a0, a0, 1
+; ZVFHMIN64-NEXT:    add a0, a0, a1
+; ZVFHMIN64-NEXT:    add a0, sp, a0
+; ZVFHMIN64-NEXT:    addi a0, a0, 16
+; ZVFHMIN64-NEXT:    vl8r.v v16, (a0) # vscale x 64-byte Folded Reload
+; ZVFHMIN64-NEXT:    vfwcvt.f.f.v v8, v16, v0.t
+; ZVFHMIN64-NEXT:    addi a0, sp, 16
+; ZVFHMIN64-NEXT:    vl8r.v v16, (a0) # vscale x 64-byte Folded Reload
+; ZVFHMIN64-NEXT:    csrr a0, vlenb
+; ZVFHMIN64-NEXT:    slli a0, a0, 3
+; ZVFHMIN64-NEXT:    add a0, sp, a0
+; ZVFHMIN64-NEXT:    addi a0, a0, 16
+; ZVFHMIN64-NEXT:    vl8r.v v24, (a0) # vscale x 64-byte Folded Reload
+; ZVFHMIN64-NEXT:    vsetvli zero, zero, e32, m8, ta, ma
+; ZVFHMIN64-NEXT:    vfmadd.vv v8, v24, v16, v0.t
+; ZVFHMIN64-NEXT:    vmv.v.v v16, v8
+; ZVFHMIN64-NEXT:    vmv4r.v v12, v4
+; ZVFHMIN64-NEXT:    vsetvli zero, zero, e16, m4, ta, ma
+; ZVFHMIN64-NEXT:    vfncvt.f.f.w v8, v16, v0.t
+; ZVFHMIN64-NEXT:    csrr a0, vlenb
+; ZVFHMIN64-NEXT:    slli a0, a0, 5
+; ZVFHMIN64-NEXT:    add sp, sp, a0
+; ZVFHMIN64-NEXT:    .cfi_def_cfa sp, 16
+; ZVFHMIN64-NEXT:    addi sp, sp, 16
+; ZVFHMIN64-NEXT:    .cfi_def_cfa_offset 0
+; ZVFHMIN64-NEXT:    ret
+  %negb = call <vscale x 32 x half> @llvm.vp.fneg.nxv32f16(<vscale x 32 x half> %b, <vscale x 32 x i1> %m, i32 %evl)
+  %negc = call <vscale x 32 x half> @llvm.vp.fneg.nxv32f16(<vscale x 32 x half> %c, <vscale x 32 x i1> %m, i32 %evl)
+  %v = call <vscale x 32 x half> @llvm.vp.fma.nxv32f16(<vscale x 32 x half> %negb, <vscale x 32 x half> %va, <vscale x 32 x half> %negc, <vscale x 32 x i1> %m, i32 %evl)
+  ret <vscale x 32 x half> %v
+}
+
+define <vscale x 32 x half> @vfnmsub_vv_nxv32f16_unmasked(<vscale x 32 x half> %va, <vscale x 32 x half> %b, <vscale x 32 x half> %c, i32 zeroext %evl) {
+; ZVFH-LABEL: vfnmsub_vv_nxv32f16_unmasked:
+; ZVFH:       # %bb.0:
+; ZVFH-NEXT:    vsetvli zero, a1, e16, m8, ta, ma
+; ZVFH-NEXT:    vle16.v v24, (a0)
+; ZVFH-NEXT:    vfnmadd.vv v8, v16, v24
+; ZVFH-NEXT:    ret
+;
+; ZVFHMIN32-LABEL: vfnmsub_vv_nxv32f16_unmasked:
+; ZVFHMIN32:       # %bb.0:
+; ZVFHMIN32-NEXT:    addi sp, sp, -16
+; ZVFHMIN32-NEXT:    .cfi_def_cfa_offset 16
+; ZVFHMIN32-NEXT:    csrr a2, vlenb
+; ZVFHMIN32-NEXT:    slli a2, a2, 5
+; ZVFHMIN32-NEXT:    sub sp, sp, a2
+; ZVFHMIN32-NEXT:    .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x20, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 32 * vlenb
+; ZVFHMIN32-NEXT:    csrr a2, vlenb
+; ZVFHMIN32-NEXT:    slli a2, a2, 3
+; ZVFHMIN32-NEXT:    mv a3, a2
+; ZVFHMIN32-NEXT:    slli a2, a2, 1
+; ZVFHMIN32-NEXT:    add a2, a2, a3
+; ZVFHMIN32-NEXT:    add a2, sp, a2
+; ZVFHMIN32-NEXT:    addi a2, a2, 16
+; ZVFHMIN32-NEXT:    vs8r.v v8, (a2) # vscale x 64-byte Folded Spill
+; ZVFHMIN32-NEXT:    vsetvli zero, a1, e16, m8, ta, ma
+; ZVFHMIN32-NEXT:    vle16.v v24, (a0)
+; ZVFHMIN32-NEXT:    lui a2, 8
+; ZVFHMIN32-NEXT:    vsetvli a0, zero, e8, m4, ta, ma
+; ZVFHMIN32-NEXT:    vmset.m v8
+; ZVFHMIN32-NEXT:    csrr a3, vlenb
+; ZVFHMIN32-NEXT:    vsetvli zero, a1, e16, m8, ta, ma
+; ZVFHMIN32-NEXT:    vxor.vx v16, v16, a2
+; ZVFHMIN32-NEXT:    slli a0, a3, 1
+; ZVFHMIN32-NEXT:    srli a3, a3, 2
+; ZVFHMIN32-NEXT:    sub a4, a1, a0
+; ZVFHMIN32-NEXT:    vsetvli a5, zero, e8, mf2, ta, ma
+; ZVFHMIN32-NEXT:    vslidedown.vx v0, v8, a3
+; ZVFHMIN32-NEXT:    sltu a3, a1, a4
+; ZVFHMIN32-NEXT:    vsetvli zero, a1, e16, m8, ta, ma
+; ZVFHMIN32-NEXT:    vxor.vx v8, v24, a2
+; ZVFHMIN32-NEXT:    addi a3, a3, -1
+; ZVFHMIN32-NEXT:    and a3, a3, a4
+; ZVFHMIN32-NEXT:    csrr a2, vlenb
+; ZVFHMIN32-NEXT:    slli a2, a2, 3
+; ZVFHMIN32-NEXT:    add a2, sp, a2
+; ZVFHMIN32-NEXT:    addi a2, a2, 16
+; ZVFHMIN32-NEXT:    vs8r.v v8, (a2) # vscale x 64-byte Folded Spill
+; ZVFHMIN32-NEXT:    vsetvli zero, a3, e16, m4, ta, ma
+; ZVFHMIN32-NEXT:    vfwcvt.f.f.v v24, v12, v0.t
+; ZVFHMIN32-NEXT:    addi a2, sp, 16
+; ZVFHMIN32-NEXT:    vs8r.v v24, (a2) # vscale x 64-byte Folded Spill
+; ZVFHMIN32-NEXT:    csrr a2, vlenb
+; ZVFHMIN32-NEXT:    slli a2, a2, 4
+; ZVFHMIN32-NEXT:    add a2, sp, a2
+; ZVFHMIN32-NEXT:    addi a2, a2, 16
+; ZVFHMIN32-NEXT:    vs8r.v v16, (a2) # vscale x 64-byte Folded Spill
+; ZVFHMIN32-NEXT:    vfwcvt.f.f.v v24, v20, v0.t
+; ZVFHMIN32-NEXT:    csrr a2, vlenb
+; ZVFHMIN32-NEXT:    slli a2, a2, 3
+; ZVFHMIN32-NEXT:    mv a3, a2
+; ZVFHMIN32-NEXT:    slli a2, a2, 1
+; ZVFHMIN32-NEXT:    add a2, a2, a3
+; ZVFHMIN32-NEXT:    add a2, sp, a2
+; ZVFHMIN32-NEXT:    addi a2, a2, 16
+; ZVFHMIN32-NEXT:    vl8r.v v16, (a2) # vscale x 64-byte Folded Reload
+; ZVFHMIN32-NEXT:    vfwcvt.f.f.v v8, v20, v0.t
+; ZVFHMIN32-NEXT:    addi a2, sp, 16
+; ZVFHMIN32-NEXT:    vl8r.v v16, (a2) # vscale x 64-byte Folded Reload
+; ZVFHMIN32-NEXT:    vsetvli zero, zero, e32, m8, ta, ma
+; ZVFHMIN32-NEXT:    vfmadd.vv v24, v8, v16, v0.t
+; ZVFHMIN32-NEXT:    vsetvli zero, zero, e16, m4, ta, ma
+; ZVFHMIN32-NEXT:    vfncvt.f.f.w v12, v24, v0.t
+; ZVFHMIN32-NEXT:    bltu a1, a0, .LBB300_2
+; ZVFHMIN32-NEXT:  # %bb.1:
+; ZVFHMIN32-NEXT:    mv a1, a0
+; ZVFHMIN32-NEXT:  .LBB300_2:
+; ZVFHMIN32-NEXT:    csrr a0, vlenb
+; ZVFHMIN32-NEXT:    slli a0, a0, 4
+; ZVFHMIN32-NEXT:    add a0, sp, a0
+; ZVFHMIN32-NEXT:    addi a0, a0, 16
+; ZVFHMIN32-NEXT:    vl8r.v v16, (a0) # vscale x 64-byte Folded Reload
+; ZVFHMIN32-NEXT:    vsetvli zero, a1, e16, m4, ta, ma
+; ZVFHMIN32-NEXT:    vfwcvt.f.f.v v24, v16
+; ZVFHMIN32-NEXT:    addi a0, sp, 16
+; ZVFHMIN32-NEXT:    vs8r.v v24, (a0) # vscale x 64-byte Folded Spill
+; ZVFHMIN32-NEXT:    csrr a0, vlenb
+; ZVFHMIN32-NEXT:    slli a0, a0, 3
+; ZVFHMIN32-NEXT:    add a0, sp, a0
+; ZVFHMIN32-NEXT:    addi a0, a0, 16
+; ZVFHMIN32-NEXT:    vl8r.v v16, (a0) # vscale x 64-byte Folded Reload
+; ZVFHMIN32-NEXT:    vfwcvt.f.f.v v0, v16
+; ZVFHMIN32-NEXT:    csrr a0, vlenb
+; ZVFHMIN32-NEXT:    slli a0, a0, 3
+; ZVFHMIN32-NEXT:    mv a1, a0
+; ZVFHMIN32-NEXT:    slli a0, a0, 1
+; ZVFHMIN32-NEXT:    add a0, a0, a1
+; ZVFHMIN32-NEXT:    add a0, sp, a0
+; ZVFHMIN32-NEXT:    addi a0, a0, 16
+; ZVFHMIN32-NEXT:    vl8r.v v24, (a0) # vscale x 64-byte Folded Reload
+; ZVFHMIN32-NEXT:    vfwcvt.f.f.v v16, v24
+; ZVFHMIN32-NEXT:    addi a0, sp, 16
+; ZVFHMIN32-NEXT:    vl8r.v v24, (a0) # vscale x 64-byte Folded Reload
+; ZVFHMIN32-NEXT:    vsetvli zero, zero, e32, m8, ta, ma
+; ZVFHMIN32-NEXT:    vfmadd.vv v16, v24, v0
+; ZVFHMIN32-NEXT:    vsetvli zero, zero, e16, m4, ta, ma
+; ZVFHMIN32-NEXT:    vfncvt.f.f.w v8, v16
+; ZVFHMIN32-NEXT:    csrr a0, vlenb
+; ZVFHMIN32-NEXT:    slli a0, a0, 5
+; ZVFHMIN32-NEXT:    add sp, sp, a0
+; ZVFHMIN32-NEXT:    .cfi_def_cfa sp, 16
+; ZVFHMIN32-NEXT:    addi sp, sp, 16
+; ZVFHMIN32-NEXT:    .cfi_def_cfa_offset 0
+; ZVFHMIN32-NEXT:    ret
+;
+; ZVFHMIN64-LABEL: vfnmsub_vv_nxv32f16_unmasked:
+; ZVFHMIN64:       # %bb.0:
+; ZVFHMIN64-NEXT:    addi sp, sp, -16
+; ZVFHMIN64-NEXT:    .cfi_def_cfa_offset 16
+; ZVFHMIN64-NEXT:    csrr a2, vlenb
+; ZVFHMIN64-NEXT:    slli a2, a2, 5
+; ZVFHMIN64-NEXT:    sub sp, sp, a2
+; ZVFHMIN64-NEXT:    .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x20, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 32 * vlenb
+; ZVFHMIN64-NEXT:    csrr a2, vlenb
+; ZVFHMIN64-NEXT:    slli a2, a2, 3
+; ZVFHMIN64-NEXT:    mv a3, a2
+; ZVFHMIN64-NEXT:    slli a2, a2, 1
+; ZVFHMIN64-NEXT:    add a2, a2, a3
+; ZVFHMIN64-NEXT:    add a2, sp, a2
+; ZVFHMIN64-NEXT:    addi a2, a2, 16
+; ZVFHMIN64-NEXT:    vs8r.v v8, (a2) # vscale x 64-byte Folded Spill
+; ZVFHMIN64-NEXT:    vsetvli zero, a1, e16, m8, ta, ma
+; ZVFHMIN64-NEXT:    vle16.v v24, (a0)
+; ZVFHMIN64-NEXT:    lui a2, 8
+; ZVFHMIN64-NEXT:    vsetvli a0, zero, e8, m4, ta, ma
+; ZVFHMIN64-NEXT:    vmset.m v8
+; ZVFHMIN64-NEXT:    csrr a3, vlenb
+; ZVFHMIN64-NEXT:    vsetvli zero, a1, e16, m8, ta, ma
+; ZVFHMIN64-NEXT:    vxor.vx v16, v16, a2
+; ZVFHMIN64-NEXT:    slli a0, a3, 1
+; ZVFHMIN64-NEXT:    srli a3, a3, 2
+; ZVFHMIN64-NEXT:    sub a4, a1, a0
+; ZVFHMIN64-NEXT:    vsetvli a5, zero, e8, mf2, ta, ma
+; ZVFHMIN64-NEXT:    vslidedown.vx v0, v8, a3
+; ZVFHMIN64-NEXT:    sltu a3, a1, a4
+; ZVFHMIN64-NEXT:    vsetvli zero, a1, e16, m8, ta, ma
+; ZVFHMIN64-NEXT:    vxor.vx v8, v24, a2
+; ZVFHMIN64-NEXT:    addi a3, a3, -1
+; ZVFHMIN64-NEXT:    and a3, a3, a4
+; ZVFHMIN64-NEXT:    csrr a2, vlenb
+; ZVFHMIN64-NEXT:    slli a2, a2, 3
+; ZVFHMIN64-NEXT:    add a2, sp, a2
+; ZVFHMIN64-NEXT:    addi a2, a2, 16
+; ZVFHMIN64-NEXT:    vs8r.v v8, (a2) # vscale x 64-byte Folded Spill
+; ZVFHMIN64-NEXT:    vsetvli zero, a3, e16, m4, ta, ma
+; ZVFHMIN64-NEXT:    vfwcvt.f.f.v v24, v12, v0.t
+; ZVFHMIN64-NEXT:    addi a2, sp, 16
+; ZVFHMIN64-NEXT:    vs8r.v v24, (a2) # vscale x 64-byte Folded Spill
+; ZVFHMIN64-NEXT:    csrr a2, vlenb
+; ZVFHMIN64-NEXT:    slli a2, a2, 4
+; ZVFHMIN64-NEXT:    add a2, sp, a2
+; ZVFHMIN64-NEXT:    addi a2, a2, 16
+; ZVFHMIN64-NEXT:    vs8r.v v16, (a2) # vscale x 64-byte Folded Spill
+; ZVFHMIN64-NEXT:    vfwcvt.f.f.v v24, v20, v0.t
+; ZVFHMIN64-NEXT:    csrr a2, vlenb
+; ZVFHMIN64-NEXT:    slli a2, a2, 3
+; ZVFHMIN64-NEXT:    mv a3, a2
+; ZVFHMIN64-NEXT:    slli a2, a2, 1
+; ZVFHMIN64-NEXT:    add a2, a2, a3
+; ZVFHMIN64-NEXT:    add a2, sp, a2
+; ZVFHMIN64-NEXT:    addi a2, a2, 16
+; ZVFHMIN64-NEXT:    vl8r.v v16, (a2) # vscale x 64-byte Folded Reload
+; ZVFHMIN64-NEXT:    vfwcvt.f.f.v v8, v20, v0.t
+; ZVFHMIN64-NEXT:    addi a2, sp, 16
+; ZVFHMIN64-NEXT:    vl8r.v v16, (a2) # vscale x 64-byte Folded Reload
+; ZVFHMIN64-NEXT:    vsetvli zero, zero, e32, m8, ta, ma
+; ZVFHMIN64-NEXT:    vfmadd.vv v24, v8, v16, v0.t
+; ZVFHMIN64-NEXT:    vsetvli zero, zero, e16, m4, ta, ma
+; ZVFHMIN64-NEXT:    vfncvt.f.f.w v12, v24, v0.t
+; ZVFHMIN64-NEXT:    bltu a1, a0, .LBB300_2
+; ZVFHMIN64-NEXT:  # %bb.1:
+; ZVFHMIN64-NEXT:    mv a1, a0
+; ZVFHMIN64-NEXT:  .LBB300_2:
+; ZVFHMIN64-NEXT:    csrr a0, vlenb
+; ZVFHMIN64-NEXT:    slli a0, a0, 3
+; ZVFHMIN64-NEXT:    add a0, sp, a0
+; ZVFHMIN64-NEXT:    addi a0, a0, 16
+; ZVFHMIN64-NEXT:    vl8r.v v16, (a0) # vscale x 64-byte Folded Reload
+; ZVFHMIN64-NEXT:    vsetvli zero, a1, e16, m4, ta, ma
+; ZVFHMIN64-NEXT:    vfwcvt.f.f.v v24, v16
+; ZVFHMIN64-NEXT:    addi a0, sp, 16
+; ZVFHMIN64-NEXT:    vs8r.v v24, (a0) # vscale x 64-byte Folded Spill
+; ZVFHMIN64-NEXT:    csrr a0, vlenb
+; ZVFHMIN64-NEXT:    slli a0, a0, 4
+; ZVFHMIN64-NEXT:    add a0, sp, a0
+; ZVFHMIN64-NEXT:    addi a0, a0, 16
+; ZVFHMIN64-NEXT:    vl8r.v v24, (a0) # vscale x 64-byte Folded Reload
+; ZVFHMIN64-NEXT:    vfwcvt.f.f.v v16, v24
+; ZVFHMIN64-NEXT:    csrr a0, vlenb
+; ZVFHMIN64-NEXT:    slli a0, a0, 3
+; ZVFHMIN64-NEXT:    mv a1, a0
+; ZVFHMIN64-NEXT:    slli a0, a0, 1
+; ZVFHMIN64-NEXT:    add a0, a0, a1
+; ZVFHMIN64-NEXT:    add a0, sp, a0
+; ZVFHMIN64-NEXT:    addi a0, a0, 16
+; ZVFHMIN64-NEXT:    vl8r.v v24, (a0) # vscale x 64-byte Folded Reload
+; ZVFHMIN64-NEXT:    vfwcvt.f.f.v v0, v24
+; ZVFHMIN64-NEXT:    addi a0, sp, 16
+; ZVFHMIN64-NEXT:    vl8r.v v24, (a0) # vscale x 64-byte Folded Reload
+; ZVFHMIN64-NEXT:    vsetvli zero, zero, e32, m8, ta, ma
+; ZVFHMIN64-NEXT:    vfmadd.vv v0, v16, v24
+; ZVFHMIN64-NEXT:    vsetvli zero, zero, e16, m4, ta, ma
+; ZVFHMIN64-NEXT:    vfncvt.f.f.w v8, v0
+; ZVFHMIN64-NEXT:    csrr a0, vlenb
+; ZVFHMIN64-NEXT:    slli a0, a0, 5
+; ZVFHMIN64-NEXT:    add sp, sp, a0
+; ZVFHMIN64-NEXT:    .cfi_def_cfa sp, 16
+; ZVFHMIN64-NEXT:    addi sp, sp, 16
+; ZVFHMIN64-NEXT:    .cfi_def_cfa_offset 0
+; ZVFHMIN64-NEXT:    ret
+  %negb = call <vscale x 32 x half> @llvm.vp.fneg.nxv32f16(<vscale x 32 x half> %b, <vscale x 32 x i1> splat (i1 true), i32 %evl)
+  %negc = call <vscale x 32 x half> @llvm.vp.fneg.nxv32f16(<vscale x 32 x half> %c, <vscale x 32 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 32 x half> @llvm.vp.fma.nxv32f16(<vscale x 32 x half> %va, <vscale x 32 x half> %negb, <vscale x 32 x half> %negc, <vscale x 32 x i1> splat (i1 true), i32 %evl)
+  ret <vscale x 32 x half> %v
+}
+
+define <vscale x 32 x half> @vfnmsub_vv_nxv32f16_unmasked_commuted(<vscale x 32 x half> %va, <vscale x 32 x half> %b, <vscale x 32 x half> %c, i32 zeroext %evl) {
+; ZVFH-LABEL: vfnmsub_vv_nxv32f16_unmasked_commuted:
+; ZVFH:       # %bb.0:
+; ZVFH-NEXT:    vsetvli zero, a1, e16, m8, ta, ma
+; ZVFH-NEXT:    vle16.v v24, (a0)
+; ZVFH-NEXT:    vfnmadd.vv v8, v16, v24
+; ZVFH-NEXT:    ret
+;
+; ZVFHMIN32-LABEL: vfnmsub_vv_nxv32f16_unmasked_commuted:
+; ZVFHMIN32:       # %bb.0:
+; ZVFHMIN32-NEXT:    addi sp, sp, -16
+; ZVFHMIN32-NEXT:    .cfi_def_cfa_offset 16
+; ZVFHMIN32-NEXT:    csrr a2, vlenb
+; ZVFHMIN32-NEXT:    slli a2, a2, 5
+; ZVFHMIN32-NEXT:    sub sp, sp, a2
+; ZVFHMIN32-NEXT:    .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x20, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 32 * vlenb
+; ZVFHMIN32-NEXT:    csrr a2, vlenb
+; ZVFHMIN32-NEXT:    slli a2, a2, 3
+; ZVFHMIN32-NEXT:    mv a3, a2
+; ZVFHMIN32-NEXT:    slli a2, a2, 1
+; ZVFHMIN32-NEXT:    add a2, a2, a3
+; ZVFHMIN32-NEXT:    add a2, sp, a2
+; ZVFHMIN32-NEXT:    addi a2, a2, 16
+; ZVFHMIN32-NEXT:    vs8r.v v8, (a2) # vscale x 64-byte Folded Spill
+; ZVFHMIN32-NEXT:    vsetvli zero, a1, e16, m8, ta, ma
+; ZVFHMIN32-NEXT:    vle16.v v24, (a0)
+; ZVFHMIN32-NEXT:    lui a2, 8
+; ZVFHMIN32-NEXT:    vsetvli a0, zero, e8, m4, ta, ma
+; ZVFHMIN32-NEXT:    vmset.m v8
+; ZVFHMIN32-NEXT:    csrr a3, vlenb
+; ZVFHMIN32-NEXT:    vsetvli zero, a1, e16, m8, ta, ma
+; ZVFHMIN32-NEXT:    vxor.vx v16, v16, a2
+; ZVFHMIN32-NEXT:    slli a0, a3, 1
+; ZVFHMIN32-NEXT:    srli a3, a3, 2
+; ZVFHMIN32-NEXT:    sub a4, a1, a0
+; ZVFHMIN32-NEXT:    vsetvli a5, zero, e8, mf2, ta, ma
+; ZVFHMIN32-NEXT:    vslidedown.vx v0, v8, a3
+; ZVFHMIN32-NEXT:    sltu a3, a1, a4
+; ZVFHMIN32-NEXT:    vsetvli zero, a1, e16, m8, ta, ma
+; ZVFHMIN32-NEXT:    vxor.vx v8, v24, a2
+; ZVFHMIN32-NEXT:    addi a3, a3, -1
+; ZVFHMIN32-NEXT:    and a3, a3, a4
+; ZVFHMIN32-NEXT:    csrr a2, vlenb
+; ZVFHMIN32-NEXT:    slli a2, a2, 3
+; ZVFHMIN32-NEXT:    add a2, sp, a2
+; ZVFHMIN32-NEXT:    addi a2, a2, 16
+; ZVFHMIN32-NEXT:    vs8r.v v8, (a2) # vscale x 64-byte Folded Spill
+; ZVFHMIN32-NEXT:    vsetvli zero, a3, e16, m4, ta, ma
+; ZVFHMIN32-NEXT:    vfwcvt.f.f.v v24, v12, v0.t
+; ZVFHMIN32-NEXT:    addi a2, sp, 16
+; ZVFHMIN32-NEXT:    vs8r.v v24, (a2) # vscale x 64-byte Folded Spill
+; ZVFHMIN32-NEXT:    csrr a2, vlenb
+; ZVFHMIN32-NEXT:    slli a2, a2, 4
+; ZVFHMIN32-NEXT:    add a2, sp, a2
+; ZVFHMIN32-NEXT:    addi a2, a2, 16
+; ZVFHMIN32-NEXT:    vs8r.v v16, (a2) # vscale x 64-byte Folded Spill
+; ZVFHMIN32-NEXT:    vfwcvt.f.f.v v24, v20, v0.t
+; ZVFHMIN32-NEXT:    csrr a2, vlenb
+; ZVFHMIN32-NEXT:    slli a2, a2, 3
+; ZVFHMIN32-NEXT:    mv a3, a2
+; ZVFHMIN32-NEXT:    slli a2, a2, 1
+; ZVFHMIN32-NEXT:    add a2, a2, a3
+; ZVFHMIN32-NEXT:    add a2, sp, a2
+; ZVFHMIN32-NEXT:    addi a2, a2, 16
+; ZVFHMIN32-NEXT:    vl8r.v v8, (a2) # vscale x 64-byte Folded Reload
+; ZVFHMIN32-NEXT:    vfwcvt.f.f.v v16, v12, v0.t
+; ZVFHMIN32-NEXT:    addi a2, sp, 16
+; ZVFHMIN32-NEXT:    vl8r.v v8, (a2) # vscale x 64-byte Folded Reload
+; ZVFHMIN32-NEXT:    vsetvli zero, zero, e32, m8, ta, ma
+; ZVFHMIN32-NEXT:    vfmadd.vv v16, v24, v8, v0.t
+; ZVFHMIN32-NEXT:    vsetvli zero, zero, e16, m4, ta, ma
+; ZVFHMIN32-NEXT:    vfncvt.f.f.w v12, v16, v0.t
+; ZVFHMIN32-NEXT:    bltu a1, a0, .LBB301_2
+; ZVFHMIN32-NEXT:  # %bb.1:
+; ZVFHMIN32-NEXT:    mv a1, a0
+; ZVFHMIN32-NEXT:  .LBB301_2:
+; ZVFHMIN32-NEXT:    csrr a0, vlenb
+; ZVFHMIN32-NEXT:    slli a0, a0, 4
+; ZVFHMIN32-NEXT:    add a0, sp, a0
+; ZVFHMIN32-NEXT:    addi a0, a0, 16
+; ZVFHMIN32-NEXT:    vl8r.v v16, (a0) # vscale x 64-byte Folded Reload
+; ZVFHMIN32-NEXT:    vsetvli zero, a1, e16, m4, ta, ma
+; ZVFHMIN32-NEXT:    vfwcvt.f.f.v v24, v16
+; ZVFHMIN32-NEXT:    addi a0, sp, 16
+; ZVFHMIN32-NEXT:    vs8r.v v24, (a0) # vscale x 64-byte Folded Spill
+; ZVFHMIN32-NEXT:    csrr a0, vlenb
+; ZVFHMIN32-NEXT:    slli a0, a0, 3
+; ZVFHMIN32-NEXT:    add a0, sp, a0
+; ZVFHMIN32-NEXT:    addi a0, a0, 16
+; ZVFHMIN32-NEXT:    vl8r.v v16, (a0) # vscale x 64-byte Folded Reload
+; ZVFHMIN32-NEXT:    vfwcvt.f.f.v v24, v16
+; ZVFHMIN32-NEXT:    csrr a0, vlenb
+; ZVFHMIN32-NEXT:    slli a0, a0, 3
+; ZVFHMIN32-NEXT:    mv a1, a0
+; ZVFHMIN32-NEXT:    slli a0, a0, 1
+; ZVFHMIN32-NEXT:    add a0, a0, a1
+; ZVFHMIN32-NEXT:    add a0, sp, a0
+; ZVFHMIN32-NEXT:    addi a0, a0, 16
+; ZVFHMIN32-NEXT:    vl8r.v v16, (a0) # vscale x 64-byte Folded Reload
+; ZVFHMIN32-NEXT:    vfwcvt.f.f.v v0, v16
+; ZVFHMIN32-NEXT:    addi a0, sp, 16
+; ZVFHMIN32-NEXT:    vl8r.v v16, (a0) # vscale x 64-byte Folded Reload
+; ZVFHMIN32-NEXT:    vsetvli zero, zero, e32, m8, ta, ma
+; ZVFHMIN32-NEXT:    vfmadd.vv v0, v16, v24
+; ZVFHMIN32-NEXT:    vsetvli zero, zero, e16, m4, ta, ma
+; ZVFHMIN32-NEXT:    vfncvt.f.f.w v8, v0
+; ZVFHMIN32-NEXT:    csrr a0, vlenb
+; ZVFHMIN32-NEXT:    slli a0, a0, 5
+; ZVFHMIN32-NEXT:    add sp, sp, a0
+; ZVFHMIN32-NEXT:    .cfi_def_cfa sp, 16
+; ZVFHMIN32-NEXT:    addi sp, sp, 16
+; ZVFHMIN32-NEXT:    .cfi_def_cfa_offset 0
+; ZVFHMIN32-NEXT:    ret
+;
+; ZVFHMIN64-LABEL: vfnmsub_vv_nxv32f16_unmasked_commuted:
+; ZVFHMIN64:       # %bb.0:
+; ZVFHMIN64-NEXT:    addi sp, sp, -16
+; ZVFHMIN64-NEXT:    .cfi_def_cfa_offset 16
+; ZVFHMIN64-NEXT:    csrr a2, vlenb
+; ZVFHMIN64-NEXT:    slli a2, a2, 5
+; ZVFHMIN64-NEXT:    sub sp, sp, a2
+; ZVFHMIN64-NEXT:    .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x20, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 32 * vlenb
+; ZVFHMIN64-NEXT:    csrr a2, vlenb
+; ZVFHMIN64-NEXT:    slli a2, a2, 3
+; ZVFHMIN64-NEXT:    mv a3, a2
+; ZVFHMIN64-NEXT:    slli a2, a2, 1
+; ZVFHMIN64-NEXT:    add a2, a2, a3
+; ZVFHMIN64-NEXT:    add a2, sp, a2
+; ZVFHMIN64-NEXT:    addi a2, a2, 16
+; ZVFHMIN64-NEXT:    vs8r.v v8, (a2) # vscale x 64-byte Folded Spill
+; ZVFHMIN64-NEXT:    vsetvli zero, a1, e16, m8, ta, ma
+; ZVFHMIN64-NEXT:    vle16.v v24, (a0)
+; ZVFHMIN64-NEXT:    lui a2, 8
+; ZVFHMIN64-NEXT:    vsetvli a0, zero, e8, m4, ta, ma
+; ZVFHMIN64-NEXT:    vmset.m v7
+; ZVFHMIN64-NEXT:    csrr a3, vlenb
+; ZVFHMIN64-NEXT:    vsetvli zero, a1, e16, m8, ta, ma
+; ZVFHMIN64-NEXT:    vxor.vx v8, v16, a2
+; ZVFHMIN64-NEXT:    slli a0, a3, 1
+; ZVFHMIN64-NEXT:    srli a3, a3, 2
+; ZVFHMIN64-NEXT:    sub a4, a1, a0
+; ZVFHMIN64-NEXT:    vsetvli a5, zero, e8, mf2, ta, ma
+; ZVFHMIN64-NEXT:    vslidedown.vx v0, v7, a3
+; ZVFHMIN64-NEXT:    sltu a3, a1, a4
+; ZVFHMIN64-NEXT:    vsetvli zero, a1, e16, m8, ta, ma
+; ZVFHMIN64-NEXT:    vxor.vx v16, v24, a2
+; ZVFHMIN64-NEXT:    addi a3, a3, -1
+; ZVFHMIN64-NEXT:    and a3, a3, a4
+; ZVFHMIN64-NEXT:    addi a2, sp, 16
+; ZVFHMIN64-NEXT:    vs8r.v v16, (a2) # vscale x 64-byte Folded Spill
+; ZVFHMIN64-NEXT:    vsetvli zero, a3, e16, m4, ta, ma
+; ZVFHMIN64-NEXT:    vfwcvt.f.f.v v24, v20, v0.t
+; ZVFHMIN64-NEXT:    csrr a2, vlenb
+; ZVFHMIN64-NEXT:    slli a2, a2, 3
+; ZVFHMIN64-NEXT:    add a2, sp, a2
+; ZVFHMIN64-NEXT:    addi a2, a2, 16
+; ZVFHMIN64-NEXT:    vs8r.v v24, (a2) # vscale x 64-byte Folded Spill
+; ZVFHMIN64-NEXT:    csrr a2, vlenb
+; ZVFHMIN64-NEXT:    slli a2, a2, 4
+; ZVFHMIN64-NEXT:    add a2, sp, a2
+; ZVFHMIN64-NEXT:    addi a2, a2, 16
+; ZVFHMIN64-NEXT:    vs8r.v v8, (a2) # vscale x 64-byte Folded Spill
+; ZVFHMIN64-NEXT:    vfwcvt.f.f.v v16, v12, v0.t
+; ZVFHMIN64-NEXT:    csrr a2, vlenb
+; ZVFHMIN64-NEXT:    slli a2, a2, 3
+; ZVFHMIN64-NEXT:    mv a3, a2
+; ZVFHMIN64-NEXT:    slli a2, a2, 1
+; ZVFHMIN64-NEXT:    add a2, a2, a3
+; ZVFHMIN64-NEXT:    add a2, sp, a2
+; ZVFHMIN64-NEXT:    addi a2, a2, 16
+; ZVFHMIN64-NEXT:    vl8r.v v8, (a2) # vscale x 64-byte Folded Reload
+; ZVFHMIN64-NEXT:    vfwcvt.f.f.v v24, v12, v0.t
+; ZVFHMIN64-NEXT:    csrr a2, vlenb
+; ZVFHMIN64-NEXT:    slli a2, a2, 3
+; ZVFHMIN64-NEXT:    add a2, sp, a2
+; ZVFHMIN64-NEXT:    addi a2, a2, 16
+; ZVFHMIN64-NEXT:    vl8r.v v8, (a2) # vscale x 64-byte Folded Reload
+; ZVFHMIN64-NEXT:    vsetvli zero, zero, e32, m8, ta, ma
+; ZVFHMIN64-NEXT:    vfmadd.vv v24, v16, v8, v0.t
+; ZVFHMIN64-NEXT:    vsetvli zero, zero, e16, m4, ta, ma
+; ZVFHMIN64-NEXT:    vfncvt.f.f.w v12, v24, v0.t
+; ZVFHMIN64-NEXT:    bltu a1, a0, .LBB301_2
+; ZVFHMIN64-NEXT:  # %bb.1:
+; ZVFHMIN64-NEXT:    mv a1, a0
+; ZVFHMIN64-NEXT:  .LBB301_2:
+; ZVFHMIN64-NEXT:    addi a0, sp, 16
+; ZVFHMIN64-NEXT:    vl8r.v v16, (a0) # vscale x 64-byte Folded Reload
+; ZVFHMIN64-NEXT:    vsetvli zero, a1, e16, m4, ta, ma
+; ZVFHMIN64-NEXT:    vfwcvt.f.f.v v24, v16
+; ZVFHMIN64-NEXT:    csrr a0, vlenb
+; ZVFHMIN64-NEXT:    slli a0, a0, 3
+; ZVFHMIN64-NEXT:    add a0, sp, a0
+; ZVFHMIN64-NEXT:    addi a0, a0, 16
+; ZVFHMIN64-NEXT:    vs8r.v v24, (a0) # vscale x 64-byte Folded Spill
+; ZVFHMIN64-NEXT:    csrr a0, vlenb
+; ZVFHMIN64-NEXT:    slli a0, a0, 4
+; ZVFHMIN64-NEXT:    add a0, sp, a0
+; ZVFHMIN64-NEXT:    addi a0, a0, 16
+; ZVFHMIN64-NEXT:    vl8r.v v24, (a0) # vscale x 64-byte Folded Reload
+; ZVFHMIN64-NEXT:    vfwcvt.f.f.v v16, v24
+; ZVFHMIN64-NEXT:    csrr a0, vlenb
+; ZVFHMIN64-NEXT:    slli a0, a0, 3
+; ZVFHMIN64-NEXT:    mv a1, a0
+; ZVFHMIN64-NEXT:    slli a0, a0, 1
+; ZVFHMIN64-NEXT:    add a0, a0, a1
+; ZVFHMIN64-NEXT:    add a0, sp, a0
+; ZVFHMIN64-NEXT:    addi a0, a0, 16
+; ZVFHMIN64-NEXT:    vl8r.v v24, (a0) # vscale x 64-byte Folded Reload
+; ZVFHMIN64-NEXT:    vfwcvt.f.f.v v0, v24
+; ZVFHMIN64-NEXT:    csrr a0, vlenb
+; ZVFHMIN64-NEXT:    slli a0, a0, 3
+; ZVFHMIN64-NEXT:    add a0, sp, a0
+; ZVFHMIN64-NEXT:    addi a0, a0, 16
+; ZVFHMIN64-NEXT:    vl8r.v v24, (a0) # vscale x 64-byte Folded Reload
+; ZVFHMIN64-NEXT:    vsetvli zero, zero, e32, m8, ta, ma
+; ZVFHMIN64-NEXT:    vfmadd.vv v0, v16, v24
+; ZVFHMIN64-NEXT:    vsetvli zero, zero, e16, m4, ta, ma
+; ZVFHMIN64-NEXT:    vfncvt.f.f.w v8, v0
+; ZVFHMIN64-NEXT:    csrr a0, vlenb
+; ZVFHMIN64-NEXT:    slli a0, a0, 5
+; ZVFHMIN64-NEXT:    add sp, sp, a0
+; ZVFHMIN64-NEXT:    .cfi_def_cfa sp, 16
+; ZVFHMIN64-NEXT:    addi sp, sp, 16
+; ZVFHMIN64-NEXT:    .cfi_def_cfa_offset 0
+; ZVFHMIN64-NEXT:    ret
   %negb = call <vscale x 32 x half> @llvm.vp.fneg.nxv32f16(<vscale x 32 x half> %b, <vscale x 32 x i1> splat (i1 true), i32 %evl)
   %negc = call <vscale x 32 x half> @llvm.vp.fneg.nxv32f16(<vscale x 32 x half> %c, <vscale x 32 x i1> splat (i1 true), i32 %evl)
   %v = call <vscale x 32 x half> @llvm.vp.fma.nxv32f16(<vscale x 32 x half> %negb, <vscale x 32 x half> %va, <vscale x 32 x half> %negc, <vscale x 32 x i1> splat (i1 true), i32 %evl)
diff --git a/llvm/test/CodeGen/RISCV/rvv/vp-splice.ll b/llvm/test/CodeGen/RISCV/rvv/vp-splice.ll
index b83ddce61f44d..1f53b29b726aa 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vp-splice.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vp-splice.ll
@@ -475,9 +475,10 @@ define <vscale x 16 x i64> @test_vp_splice_nxv16i64(<vscale x 16 x i64> %va, <vs
 ; CHECK-NEXT:    addi a7, a7, -1
 ; CHECK-NEXT:    add a5, a0, a1
 ; CHECK-NEXT:    mv a6, a2
+; CHECK-NEXT:    mv t0, a2
 ; CHECK-NEXT:    bltu a2, a7, .LBB22_2
 ; CHECK-NEXT:  # %bb.1:
-; CHECK-NEXT:    mv a6, a7
+; CHECK-NEXT:    mv t0, a7
 ; CHECK-NEXT:  .LBB22_2:
 ; CHECK-NEXT:    addi sp, sp, -80
 ; CHECK-NEXT:    sd ra, 72(sp) # 8-byte Folded Spill
@@ -488,31 +489,30 @@ define <vscale x 16 x i64> @test_vp_splice_nxv16i64(<vscale x 16 x i64> %va, <vs
 ; CHECK-NEXT:    sub sp, sp, a7
 ; CHECK-NEXT:    andi sp, sp, -64
 ; CHECK-NEXT:    vl8re64.v v24, (a5)
-; CHECK-NEXT:    slli a5, a6, 3
-; CHECK-NEXT:    addi a6, sp, 64
-; CHECK-NEXT:    add a5, a6, a5
-; CHECK-NEXT:    mv a7, a2
-; CHECK-NEXT:    bltu a2, a4, .LBB22_4
+; CHECK-NEXT:    slli a5, t0, 3
+; CHECK-NEXT:    addi a7, sp, 64
+; CHECK-NEXT:    add a5, a7, a5
+; CHECK-NEXT:    bltu a6, a4, .LBB22_4
 ; CHECK-NEXT:  # %bb.3:
-; CHECK-NEXT:    mv a7, a4
+; CHECK-NEXT:    mv a6, a4
 ; CHECK-NEXT:  .LBB22_4:
 ; CHECK-NEXT:    vl8re64.v v0, (a0)
-; CHECK-NEXT:    vsetvli zero, a7, e64, m8, ta, ma
-; CHECK-NEXT:    vse64.v v8, (a6)
+; CHECK-NEXT:    vsetvli zero, a6, e64, m8, ta, ma
+; CHECK-NEXT:    vse64.v v8, (a7)
 ; CHECK-NEXT:    sub a0, a2, a4
-; CHECK-NEXT:    add a6, a6, a1
-; CHECK-NEXT:    sub a7, a3, a4
+; CHECK-NEXT:    add a7, a7, a1
+; CHECK-NEXT:    sub a6, a3, a4
 ; CHECK-NEXT:    sltu a2, a2, a0
 ; CHECK-NEXT:    addi a2, a2, -1
 ; CHECK-NEXT:    and a2, a2, a0
-; CHECK-NEXT:    sltu a0, a3, a7
+; CHECK-NEXT:    sltu a0, a3, a6
 ; CHECK-NEXT:    addi a0, a0, -1
-; CHECK-NEXT:    and a0, a0, a7
-; CHECK-NEXT:    add a7, a5, a1
+; CHECK-NEXT:    and a0, a0, a6
+; CHECK-NEXT:    add a6, a5, a1
 ; CHECK-NEXT:    vsetvli zero, a2, e64, m8, ta, ma
-; CHECK-NEXT:    vse64.v v16, (a6)
+; CHECK-NEXT:    vse64.v v16, (a7)
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
-; CHECK-NEXT:    vse64.v v24, (a7)
+; CHECK-NEXT:    vse64.v v24, (a6)
 ; CHECK-NEXT:    bltu a3, a4, .LBB22_6
 ; CHECK-NEXT:  # %bb.5:
 ; CHECK-NEXT:    mv a3, a4
@@ -539,48 +539,48 @@ define <vscale x 16 x i64> @test_vp_splice_nxv16i64_negative_offset(<vscale x 16
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    csrr a4, vlenb
 ; CHECK-NEXT:    slli a1, a4, 3
-; CHECK-NEXT:    slli a7, a4, 1
-; CHECK-NEXT:    addi a7, a7, -1
+; CHECK-NEXT:    slli a6, a4, 1
+; CHECK-NEXT:    addi a6, a6, -1
 ; CHECK-NEXT:    add a5, a0, a1
-; CHECK-NEXT:    mv a6, a2
-; CHECK-NEXT:    bltu a2, a7, .LBB23_2
+; CHECK-NEXT:    mv a7, a2
+; CHECK-NEXT:    mv t0, a2
+; CHECK-NEXT:    bltu a2, a6, .LBB23_2
 ; CHECK-NEXT:  # %bb.1:
-; CHECK-NEXT:    mv a6, a7
+; CHECK-NEXT:    mv t0, a6
 ; CHECK-NEXT:  .LBB23_2:
 ; CHECK-NEXT:    addi sp, sp, -80
 ; CHECK-NEXT:    sd ra, 72(sp) # 8-byte Folded Spill
 ; CHECK-NEXT:    sd s0, 64(sp) # 8-byte Folded Spill
 ; CHECK-NEXT:    addi s0, sp, 80
-; CHECK-NEXT:    csrr a7, vlenb
-; CHECK-NEXT:    slli a7, a7, 5
-; CHECK-NEXT:    sub sp, sp, a7
+; CHECK-NEXT:    csrr a6, vlenb
+; CHECK-NEXT:    slli a6, a6, 5
+; CHECK-NEXT:    sub sp, sp, a6
 ; CHECK-NEXT:    andi sp, sp, -64
 ; CHECK-NEXT:    vl8re64.v v24, (a5)
-; CHECK-NEXT:    slli a5, a6, 3
-; CHECK-NEXT:    addi a7, sp, 64
-; CHECK-NEXT:    add a6, a7, a5
-; CHECK-NEXT:    mv t0, a2
-; CHECK-NEXT:    bltu a2, a4, .LBB23_4
+; CHECK-NEXT:    slli a5, t0, 3
+; CHECK-NEXT:    addi t0, sp, 64
+; CHECK-NEXT:    add a6, t0, a5
+; CHECK-NEXT:    bltu a7, a4, .LBB23_4
 ; CHECK-NEXT:  # %bb.3:
-; CHECK-NEXT:    mv t0, a4
+; CHECK-NEXT:    mv a7, a4
 ; CHECK-NEXT:  .LBB23_4:
 ; CHECK-NEXT:    vl8re64.v v0, (a0)
-; CHECK-NEXT:    vsetvli zero, t0, e64, m8, ta, ma
-; CHECK-NEXT:    vse64.v v8, (a7)
+; CHECK-NEXT:    vsetvli zero, a7, e64, m8, ta, ma
+; CHECK-NEXT:    vse64.v v8, (t0)
 ; CHECK-NEXT:    sub a0, a2, a4
-; CHECK-NEXT:    add a7, a7, a1
-; CHECK-NEXT:    sub t0, a3, a4
+; CHECK-NEXT:    add t0, t0, a1
+; CHECK-NEXT:    sub a7, a3, a4
 ; CHECK-NEXT:    sltu a2, a2, a0
 ; CHECK-NEXT:    addi a2, a2, -1
 ; CHECK-NEXT:    and a2, a2, a0
-; CHECK-NEXT:    sltu a0, a3, t0
+; CHECK-NEXT:    sltu a0, a3, a7
 ; CHECK-NEXT:    addi a0, a0, -1
-; CHECK-NEXT:    and a0, a0, t0
-; CHECK-NEXT:    add t0, a6, a1
+; CHECK-NEXT:    and a0, a0, a7
+; CHECK-NEXT:    add a7, a6, a1
 ; CHECK-NEXT:    vsetvli zero, a2, e64, m8, ta, ma
-; CHECK-NEXT:    vse64.v v16, (a7)
+; CHECK-NEXT:    vse64.v v16, (t0)
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
-; CHECK-NEXT:    vse64.v v24, (t0)
+; CHECK-NEXT:    vse64.v v24, (a7)
 ; CHECK-NEXT:    bltu a3, a4, .LBB23_6
 ; CHECK-NEXT:  # %bb.5:
 ; CHECK-NEXT:    mv a3, a4
diff --git a/llvm/test/CodeGen/X86/bmi-select-distrib.ll b/llvm/test/CodeGen/X86/bmi-select-distrib.ll
index e5696ded4fbf1..021b9747795ef 100644
--- a/llvm/test/CodeGen/X86/bmi-select-distrib.ll
+++ b/llvm/test/CodeGen/X86/bmi-select-distrib.ll
@@ -75,14 +75,14 @@ define i64 @and_select_neg_to_blsi_i64(i1 %a0, i64 %a1) nounwind {
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %esi
 ; X86-NEXT:    xorl %edx, %edx
-; X86-NEXT:    movl %ecx, %eax
+; X86-NEXT:    movl %esi, %eax
 ; X86-NEXT:    negl %eax
-; X86-NEXT:    sbbl %esi, %edx
-; X86-NEXT:    andl %esi, %edx
-; X86-NEXT:    andl %ecx, %eax
+; X86-NEXT:    sbbl %ecx, %edx
+; X86-NEXT:    andl %ecx, %edx
+; X86-NEXT:    andl %esi, %eax
 ; X86-NEXT:    testb $1, {{[0-9]+}}(%esp)
-; X86-NEXT:    cmovel %esi, %edx
-; X86-NEXT:    cmovel %ecx, %eax
+; X86-NEXT:    cmovel %ecx, %edx
+; X86-NEXT:    cmovel %esi, %eax
 ; X86-NEXT:    popl %esi
 ; X86-NEXT:    retl
 ;
@@ -399,15 +399,15 @@ define i64 @and_select_sub_1_to_blsr_i64(i1 %a0, i64 %a1) nounwind {
 ; X86-NEXT:    pushl %esi
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %esi
-; X86-NEXT:    movl %ecx, %eax
+; X86-NEXT:    movl %esi, %eax
 ; X86-NEXT:    addl $-1, %eax
-; X86-NEXT:    movl %esi, %edx
+; X86-NEXT:    movl %ecx, %edx
 ; X86-NEXT:    adcl $-1, %edx
-; X86-NEXT:    andl %esi, %edx
-; X86-NEXT:    andl %ecx, %eax
+; X86-NEXT:    andl %ecx, %edx
+; X86-NEXT:    andl %esi, %eax
 ; X86-NEXT:    testb $1, {{[0-9]+}}(%esp)
-; X86-NEXT:    cmovel %ecx, %eax
-; X86-NEXT:    cmovel %esi, %edx
+; X86-NEXT:    cmovel %esi, %eax
+; X86-NEXT:    cmovel %ecx, %edx
 ; X86-NEXT:    popl %esi
 ; X86-NEXT:    retl
 ;
@@ -691,15 +691,15 @@ define i64 @xor_select_sub_1_to_blsmsk_i64(i1 %a0, i64 %a1) nounwind {
 ; X86-NEXT:    pushl %esi
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %esi
-; X86-NEXT:    movl %ecx, %eax
+; X86-NEXT:    movl %esi, %eax
 ; X86-NEXT:    addl $-1, %eax
-; X86-NEXT:    movl %esi, %edx
+; X86-NEXT:    movl %ecx, %edx
 ; X86-NEXT:    adcl $-1, %edx
-; X86-NEXT:    xorl %esi, %edx
-; X86-NEXT:    xorl %ecx, %eax
+; X86-NEXT:    xorl %ecx, %edx
+; X86-NEXT:    xorl %esi, %eax
 ; X86-NEXT:    testb $1, {{[0-9]+}}(%esp)
-; X86-NEXT:    cmovel %ecx, %eax
-; X86-NEXT:    cmovel %esi, %edx
+; X86-NEXT:    cmovel %esi, %eax
+; X86-NEXT:    cmovel %ecx, %edx
 ; X86-NEXT:    popl %esi
 ; X86-NEXT:    retl
 ;
diff --git a/llvm/test/CodeGen/X86/insertelement-var-index.ll b/llvm/test/CodeGen/X86/insertelement-var-index.ll
index d151c6f28e51b..4751c9ce222ef 100644
--- a/llvm/test/CodeGen/X86/insertelement-var-index.ll
+++ b/llvm/test/CodeGen/X86/insertelement-var-index.ll
@@ -1009,19 +1009,18 @@ define <2 x i64> @arg_i64_v2i64(<2 x i64> %v, i64 %x, i32 %y) nounwind {
 ; X86AVX2-NEXT:    pushl %esi
 ; X86AVX2-NEXT:    andl $-16, %esp
 ; X86AVX2-NEXT:    subl $48, %esp
-; X86AVX2-NEXT:    movl 8(%ebp), %edx
-; X86AVX2-NEXT:    movl 12(%ebp), %eax
-; X86AVX2-NEXT:    movl 16(%ebp), %ecx
+; X86AVX2-NEXT:    movl 16(%ebp), %eax
+; X86AVX2-NEXT:    movl 8(%ebp), %ecx
+; X86AVX2-NEXT:    movl 12(%ebp), %edx
 ; X86AVX2-NEXT:    vmovaps %xmm0, (%esp)
-; X86AVX2-NEXT:    addl %ecx, %ecx
-; X86AVX2-NEXT:    movl %ecx, %esi
-; X86AVX2-NEXT:    andl $3, %esi
-; X86AVX2-NEXT:    movl %edx, (%esp,%esi,4)
+; X86AVX2-NEXT:    leal (%eax,%eax), %esi
+; X86AVX2-NEXT:    andl $2, %esi
+; X86AVX2-NEXT:    movl %ecx, (%esp,%esi,4)
 ; X86AVX2-NEXT:    vmovaps (%esp), %xmm0
 ; X86AVX2-NEXT:    vmovaps %xmm0, {{[0-9]+}}(%esp)
-; X86AVX2-NEXT:    incl %ecx
-; X86AVX2-NEXT:    andl $3, %ecx
-; X86AVX2-NEXT:    movl %eax, 16(%esp,%ecx,4)
+; X86AVX2-NEXT:    leal 1(%eax,%eax), %eax
+; X86AVX2-NEXT:    andl $3, %eax
+; X86AVX2-NEXT:    movl %edx, 16(%esp,%eax,4)
 ; X86AVX2-NEXT:    vmovaps {{[0-9]+}}(%esp), %xmm0
 ; X86AVX2-NEXT:    leal -4(%ebp), %esp
 ; X86AVX2-NEXT:    popl %esi
@@ -1363,13 +1362,12 @@ define <2 x i64> @load_i64_v2i64(<2 x i64> %v, ptr %p, i32 %y) nounwind {
 ; X86AVX2-NEXT:    movl (%ecx), %edx
 ; X86AVX2-NEXT:    movl 4(%ecx), %ecx
 ; X86AVX2-NEXT:    vmovaps %xmm0, (%esp)
-; X86AVX2-NEXT:    addl %eax, %eax
-; X86AVX2-NEXT:    movl %eax, %esi
-; X86AVX2-NEXT:    andl $3, %esi
+; X86AVX2-NEXT:    leal (%eax,%eax), %esi
+; X86AVX2-NEXT:    andl $2, %esi
 ; X86AVX2-NEXT:    movl %edx, (%esp,%esi,4)
 ; X86AVX2-NEXT:    vmovaps (%esp), %xmm0
 ; X86AVX2-NEXT:    vmovaps %xmm0, {{[0-9]+}}(%esp)
-; X86AVX2-NEXT:    incl %eax
+; X86AVX2-NEXT:    leal 1(%eax,%eax), %eax
 ; X86AVX2-NEXT:    andl $3, %eax
 ; X86AVX2-NEXT:    movl %ecx, 16(%esp,%eax,4)
 ; X86AVX2-NEXT:    vmovaps {{[0-9]+}}(%esp), %xmm0
@@ -1744,19 +1742,18 @@ define <4 x i64> @arg_i64_v4i64(<4 x i64> %v, i64 %x, i32 %y) nounwind {
 ; X86AVX2-NEXT:    pushl %esi
 ; X86AVX2-NEXT:    andl $-32, %esp
 ; X86AVX2-NEXT:    subl $96, %esp
-; X86AVX2-NEXT:    movl 8(%ebp), %edx
-; X86AVX2-NEXT:    movl 12(%ebp), %eax
-; X86AVX2-NEXT:    movl 16(%ebp), %ecx
+; X86AVX2-NEXT:    movl 16(%ebp), %eax
+; X86AVX2-NEXT:    movl 8(%ebp), %ecx
+; X86AVX2-NEXT:    movl 12(%ebp), %edx
 ; X86AVX2-NEXT:    vmovaps %ymm0, (%esp)
-; X86AVX2-NEXT:    addl %ecx, %ecx
-; X86AVX2-NEXT:    movl %ecx, %esi
-; X86AVX2-NEXT:    andl $7, %esi
-; X86AVX2-NEXT:    movl %edx, (%esp,%esi,4)
+; X86AVX2-NEXT:    leal (%eax,%eax), %esi
+; X86AVX2-NEXT:    andl $6, %esi
+; X86AVX2-NEXT:    movl %ecx, (%esp,%esi,4)
 ; X86AVX2-NEXT:    vmovaps (%esp), %ymm0
 ; X86AVX2-NEXT:    vmovaps %ymm0, {{[0-9]+}}(%esp)
-; X86AVX2-NEXT:    incl %ecx
-; X86AVX2-NEXT:    andl $7, %ecx
-; X86AVX2-NEXT:    movl %eax, 32(%esp,%ecx,4)
+; X86AVX2-NEXT:    leal 1(%eax,%eax), %eax
+; X86AVX2-NEXT:    andl $7, %eax
+; X86AVX2-NEXT:    movl %edx, 32(%esp,%eax,4)
 ; X86AVX2-NEXT:    vmovaps {{[0-9]+}}(%esp), %ymm0
 ; X86AVX2-NEXT:    leal -4(%ebp), %esp
 ; X86AVX2-NEXT:    popl %esi
@@ -2131,13 +2128,12 @@ define <4 x i64> @load_i64_v4i64(<4 x i64> %v, ptr %p, i32 %y) nounwind {
 ; X86AVX2-NEXT:    movl (%ecx), %edx
 ; X86AVX2-NEXT:    movl 4(%ecx), %ecx
 ; X86AVX2-NEXT:    vmovaps %ymm0, (%esp)
-; X86AVX2-NEXT:    addl %eax, %eax
-; X86AVX2-NEXT:    movl %eax, %esi
-; X86AVX2-NEXT:    andl $7, %esi
+; X86AVX2-NEXT:    leal (%eax,%eax), %esi
+; X86AVX2-NEXT:    andl $6, %esi
 ; X86AVX2-NEXT:    movl %edx, (%esp,%esi,4)
 ; X86AVX2-NEXT:    vmovaps (%esp), %ymm0
 ; X86AVX2-NEXT:    vmovaps %ymm0, {{[0-9]+}}(%esp)
-; X86AVX2-NEXT:    incl %eax
+; X86AVX2-NEXT:    leal 1(%eax,%eax), %eax
 ; X86AVX2-NEXT:    andl $7, %eax
 ; X86AVX2-NEXT:    movl %ecx, 32(%esp,%eax,4)
 ; X86AVX2-NEXT:    vmovaps {{[0-9]+}}(%esp), %ymm0
diff --git a/llvm/test/CodeGen/X86/midpoint-int-vec-256.ll b/llvm/test/CodeGen/X86/midpoint-int-vec-256.ll
index cd2b398784d33..f28da02ca8964 100644
--- a/llvm/test/CodeGen/X86/midpoint-int-vec-256.ll
+++ b/llvm/test/CodeGen/X86/midpoint-int-vec-256.ll
@@ -159,21 +159,21 @@ define <8 x i32> @vec256_i32_unsigned_reg_reg(<8 x i32> %a1, <8 x i32> %a2) noun
 define <8 x i32> @vec256_i32_signed_mem_reg(ptr %a1_addr, <8 x i32> %a2) nounwind {
 ; AVX1-LABEL: vec256_i32_signed_mem_reg:
 ; AVX1:       # %bb.0:
-; AVX1-NEXT:    vmovdqa (%rdi), %xmm1
-; AVX1-NEXT:    vmovdqa 16(%rdi), %xmm2
-; AVX1-NEXT:    vpminsd %xmm0, %xmm1, %xmm3
-; AVX1-NEXT:    vpmaxsd %xmm0, %xmm1, %xmm4
-; AVX1-NEXT:    vpsubd %xmm3, %xmm4, %xmm3
+; AVX1-NEXT:    vmovdqa (%rdi), %ymm1
+; AVX1-NEXT:    vpminsd %xmm0, %xmm1, %xmm2
+; AVX1-NEXT:    vpmaxsd %xmm0, %xmm1, %xmm3
+; AVX1-NEXT:    vpsubd %xmm2, %xmm3, %xmm2
 ; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm0
-; AVX1-NEXT:    vpminsd %xmm0, %xmm2, %xmm4
-; AVX1-NEXT:    vpmaxsd %xmm0, %xmm2, %xmm0
+; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm3
+; AVX1-NEXT:    vpminsd %xmm0, %xmm3, %xmm4
+; AVX1-NEXT:    vpmaxsd %xmm0, %xmm3, %xmm0
 ; AVX1-NEXT:    vpsubd %xmm4, %xmm0, %xmm0
 ; AVX1-NEXT:    vpsrld $1, %xmm0, %xmm0
-; AVX1-NEXT:    vpsrld $1, %xmm3, %xmm3
-; AVX1-NEXT:    vpmulld %xmm3, %xmm3, %xmm3
+; AVX1-NEXT:    vpsrld $1, %xmm2, %xmm2
+; AVX1-NEXT:    vpmulld %xmm2, %xmm2, %xmm2
 ; AVX1-NEXT:    vpmulld %xmm0, %xmm0, %xmm0
-; AVX1-NEXT:    vpaddd %xmm2, %xmm0, %xmm0
-; AVX1-NEXT:    vpaddd %xmm1, %xmm3, %xmm1
+; AVX1-NEXT:    vpaddd %xmm3, %xmm0, %xmm0
+; AVX1-NEXT:    vpaddd %xmm1, %xmm2, %xmm1
 ; AVX1-NEXT:    vinsertf128 $1, %xmm0, %ymm1, %ymm0
 ; AVX1-NEXT:    retq
 ;
@@ -190,20 +190,20 @@ define <8 x i32> @vec256_i32_signed_mem_reg(ptr %a1_addr, <8 x i32> %a2) nounwin
 ;
 ; XOP-LABEL: vec256_i32_signed_mem_reg:
 ; XOP:       # %bb.0:
-; XOP-NEXT:    vextractf128 $1, %ymm0, %xmm1
-; XOP-NEXT:    vmovdqa (%rdi), %xmm2
-; XOP-NEXT:    vmovdqa 16(%rdi), %xmm3
-; XOP-NEXT:    vpminsd %xmm1, %xmm3, %xmm4
-; XOP-NEXT:    vpmaxsd %xmm1, %xmm3, %xmm1
-; XOP-NEXT:    vpsubd %xmm4, %xmm1, %xmm1
-; XOP-NEXT:    vpminsd %xmm0, %xmm2, %xmm4
-; XOP-NEXT:    vpmaxsd %xmm0, %xmm2, %xmm0
+; XOP-NEXT:    vmovdqa (%rdi), %ymm1
+; XOP-NEXT:    vextractf128 $1, %ymm0, %xmm2
+; XOP-NEXT:    vextractf128 $1, %ymm1, %xmm3
+; XOP-NEXT:    vpminsd %xmm2, %xmm3, %xmm4
+; XOP-NEXT:    vpmaxsd %xmm2, %xmm3, %xmm2
+; XOP-NEXT:    vpsubd %xmm4, %xmm2, %xmm2
+; XOP-NEXT:    vpminsd %xmm0, %xmm1, %xmm4
+; XOP-NEXT:    vpmaxsd %xmm0, %xmm1, %xmm0
 ; XOP-NEXT:    vpsubd %xmm4, %xmm0, %xmm0
 ; XOP-NEXT:    vpsrld $1, %xmm0, %xmm0
-; XOP-NEXT:    vpsrld $1, %xmm1, %xmm1
-; XOP-NEXT:    vpmacsdd %xmm3, %xmm1, %xmm1, %xmm1
-; XOP-NEXT:    vpmacsdd %xmm2, %xmm0, %xmm0, %xmm0
-; XOP-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; XOP-NEXT:    vpsrld $1, %xmm2, %xmm2
+; XOP-NEXT:    vpmacsdd %xmm3, %xmm2, %xmm2, %xmm2
+; XOP-NEXT:    vpmacsdd %xmm1, %xmm0, %xmm0, %xmm0
+; XOP-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
 ; XOP-NEXT:    retq
 ;
 ; AVX512-LABEL: vec256_i32_signed_mem_reg:
@@ -303,23 +303,23 @@ define <8 x i32> @vec256_i32_signed_reg_mem(<8 x i32> %a1, ptr %a2_addr) nounwin
 define <8 x i32> @vec256_i32_signed_mem_mem(ptr %a1_addr, ptr %a2_addr) nounwind {
 ; AVX1-LABEL: vec256_i32_signed_mem_mem:
 ; AVX1:       # %bb.0:
-; AVX1-NEXT:    vmovdqa (%rsi), %ymm0
-; AVX1-NEXT:    vmovdqa (%rdi), %xmm1
-; AVX1-NEXT:    vmovdqa 16(%rdi), %xmm2
-; AVX1-NEXT:    vpminsd %xmm0, %xmm1, %xmm3
-; AVX1-NEXT:    vpmaxsd %xmm0, %xmm1, %xmm4
-; AVX1-NEXT:    vpsubd %xmm3, %xmm4, %xmm3
-; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm0
-; AVX1-NEXT:    vpminsd %xmm0, %xmm2, %xmm4
-; AVX1-NEXT:    vpmaxsd %xmm0, %xmm2, %xmm0
-; AVX1-NEXT:    vpsubd %xmm4, %xmm0, %xmm0
-; AVX1-NEXT:    vpsrld $1, %xmm0, %xmm0
-; AVX1-NEXT:    vpsrld $1, %xmm3, %xmm3
-; AVX1-NEXT:    vpmulld %xmm3, %xmm3, %xmm3
-; AVX1-NEXT:    vpmulld %xmm0, %xmm0, %xmm0
-; AVX1-NEXT:    vpaddd %xmm2, %xmm0, %xmm0
-; AVX1-NEXT:    vpaddd %xmm1, %xmm3, %xmm1
-; AVX1-NEXT:    vinsertf128 $1, %xmm0, %ymm1, %ymm0
+; AVX1-NEXT:    vmovdqa (%rdi), %ymm0
+; AVX1-NEXT:    vmovdqa (%rsi), %ymm1
+; AVX1-NEXT:    vpminsd %xmm1, %xmm0, %xmm2
+; AVX1-NEXT:    vpmaxsd %xmm1, %xmm0, %xmm3
+; AVX1-NEXT:    vpsubd %xmm2, %xmm3, %xmm2
+; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm1
+; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm3
+; AVX1-NEXT:    vpminsd %xmm1, %xmm3, %xmm4
+; AVX1-NEXT:    vpmaxsd %xmm1, %xmm3, %xmm1
+; AVX1-NEXT:    vpsubd %xmm4, %xmm1, %xmm1
+; AVX1-NEXT:    vpsrld $1, %xmm1, %xmm1
+; AVX1-NEXT:    vpsrld $1, %xmm2, %xmm2
+; AVX1-NEXT:    vpmulld %xmm2, %xmm2, %xmm2
+; AVX1-NEXT:    vpmulld %xmm1, %xmm1, %xmm1
+; AVX1-NEXT:    vpaddd %xmm3, %xmm1, %xmm1
+; AVX1-NEXT:    vpaddd %xmm0, %xmm2, %xmm0
+; AVX1-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: vec256_i32_signed_mem_mem:
@@ -336,20 +336,20 @@ define <8 x i32> @vec256_i32_signed_mem_mem(ptr %a1_addr, ptr %a2_addr) nounwind
 ;
 ; XOP-LABEL: vec256_i32_signed_mem_mem:
 ; XOP:       # %bb.0:
-; XOP-NEXT:    vmovdqa (%rsi), %ymm0
-; XOP-NEXT:    vmovdqa (%rdi), %xmm1
-; XOP-NEXT:    vmovdqa 16(%rdi), %xmm2
+; XOP-NEXT:    vmovdqa (%rdi), %ymm0
+; XOP-NEXT:    vmovdqa (%rsi), %ymm1
+; XOP-NEXT:    vextractf128 $1, %ymm1, %xmm2
 ; XOP-NEXT:    vextractf128 $1, %ymm0, %xmm3
-; XOP-NEXT:    vpminsd %xmm3, %xmm2, %xmm4
-; XOP-NEXT:    vpmaxsd %xmm3, %xmm2, %xmm3
-; XOP-NEXT:    vpsubd %xmm4, %xmm3, %xmm3
-; XOP-NEXT:    vpminsd %xmm0, %xmm1, %xmm4
-; XOP-NEXT:    vpmaxsd %xmm0, %xmm1, %xmm0
-; XOP-NEXT:    vpsubd %xmm4, %xmm0, %xmm0
-; XOP-NEXT:    vpsrld $1, %xmm0, %xmm0
-; XOP-NEXT:    vpsrld $1, %xmm3, %xmm3
-; XOP-NEXT:    vpmacsdd %xmm2, %xmm3, %xmm3, %xmm2
-; XOP-NEXT:    vpmacsdd %xmm1, %xmm0, %xmm0, %xmm0
+; XOP-NEXT:    vpminsd %xmm2, %xmm3, %xmm4
+; XOP-NEXT:    vpmaxsd %xmm2, %xmm3, %xmm2
+; XOP-NEXT:    vpsubd %xmm4, %xmm2, %xmm2
+; XOP-NEXT:    vpminsd %xmm1, %xmm0, %xmm4
+; XOP-NEXT:    vpmaxsd %xmm1, %xmm0, %xmm1
+; XOP-NEXT:    vpsubd %xmm4, %xmm1, %xmm1
+; XOP-NEXT:    vpsrld $1, %xmm1, %xmm1
+; XOP-NEXT:    vpsrld $1, %xmm2, %xmm2
+; XOP-NEXT:    vpmacsdd %xmm3, %xmm2, %xmm2, %xmm2
+; XOP-NEXT:    vpmacsdd %xmm0, %xmm1, %xmm1, %xmm0
 ; XOP-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
 ; XOP-NEXT:    retq
 ;
@@ -727,18 +727,18 @@ define <4 x i64> @vec256_i64_unsigned_reg_reg(<4 x i64> %a1, <4 x i64> %a2) noun
 define <4 x i64> @vec256_i64_signed_mem_reg(ptr %a1_addr, <4 x i64> %a2) nounwind {
 ; AVX1-LABEL: vec256_i64_signed_mem_reg:
 ; AVX1:       # %bb.0:
-; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm1
-; AVX1-NEXT:    vmovdqa (%rdi), %xmm2
-; AVX1-NEXT:    vmovdqa 16(%rdi), %xmm3
-; AVX1-NEXT:    vpcmpgtq %xmm1, %xmm3, %xmm4
-; AVX1-NEXT:    vpcmpgtq %xmm0, %xmm2, %xmm5
-; AVX1-NEXT:    vpsubq %xmm0, %xmm2, %xmm0
+; AVX1-NEXT:    vmovdqa (%rdi), %ymm1
+; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm2
+; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm3
+; AVX1-NEXT:    vpcmpgtq %xmm2, %xmm3, %xmm4
+; AVX1-NEXT:    vpcmpgtq %xmm0, %xmm1, %xmm5
+; AVX1-NEXT:    vpsubq %xmm0, %xmm1, %xmm0
 ; AVX1-NEXT:    vpxor %xmm5, %xmm0, %xmm0
 ; AVX1-NEXT:    vpsubq %xmm0, %xmm5, %xmm0
-; AVX1-NEXT:    vpsubq %xmm1, %xmm3, %xmm1
-; AVX1-NEXT:    vpxor %xmm4, %xmm1, %xmm1
-; AVX1-NEXT:    vpsubq %xmm1, %xmm4, %xmm1
-; AVX1-NEXT:    vpsrlq $1, %xmm1, %xmm6
+; AVX1-NEXT:    vpsubq %xmm2, %xmm3, %xmm2
+; AVX1-NEXT:    vpxor %xmm4, %xmm2, %xmm2
+; AVX1-NEXT:    vpsubq %xmm2, %xmm4, %xmm2
+; AVX1-NEXT:    vpsrlq $1, %xmm2, %xmm6
 ; AVX1-NEXT:    vpsrlq $1, %xmm0, %xmm7
 ; AVX1-NEXT:    vpsrlq $33, %xmm0, %xmm0
 ; AVX1-NEXT:    vpmovsxbq {{.*#+}} xmm8 = [1,1]
@@ -749,19 +749,19 @@ define <4 x i64> @vec256_i64_signed_mem_reg(ptr %a1_addr, <4 x i64> %a2) nounwin
 ; AVX1-NEXT:    vpaddq %xmm0, %xmm5, %xmm0
 ; AVX1-NEXT:    vpsllq $32, %xmm0, %xmm0
 ; AVX1-NEXT:    vpmuludq %xmm7, %xmm9, %xmm5
-; AVX1-NEXT:    vpsrlq $33, %xmm1, %xmm1
+; AVX1-NEXT:    vpsrlq $33, %xmm2, %xmm2
 ; AVX1-NEXT:    vpor %xmm4, %xmm8, %xmm7
-; AVX1-NEXT:    vpmuludq %xmm7, %xmm1, %xmm1
+; AVX1-NEXT:    vpmuludq %xmm7, %xmm2, %xmm2
 ; AVX1-NEXT:    vpsrlq $32, %xmm4, %xmm4
 ; AVX1-NEXT:    vpmuludq %xmm4, %xmm6, %xmm4
-; AVX1-NEXT:    vpaddq %xmm1, %xmm4, %xmm1
-; AVX1-NEXT:    vpsllq $32, %xmm1, %xmm1
+; AVX1-NEXT:    vpaddq %xmm2, %xmm4, %xmm2
+; AVX1-NEXT:    vpsllq $32, %xmm2, %xmm2
 ; AVX1-NEXT:    vpmuludq %xmm7, %xmm6, %xmm4
 ; AVX1-NEXT:    vpaddq %xmm3, %xmm4, %xmm3
-; AVX1-NEXT:    vpaddq %xmm1, %xmm3, %xmm1
-; AVX1-NEXT:    vpaddq %xmm2, %xmm5, %xmm2
-; AVX1-NEXT:    vpaddq %xmm0, %xmm2, %xmm0
-; AVX1-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; AVX1-NEXT:    vpaddq %xmm2, %xmm3, %xmm2
+; AVX1-NEXT:    vpaddq %xmm1, %xmm5, %xmm1
+; AVX1-NEXT:    vpaddq %xmm0, %xmm1, %xmm0
+; AVX1-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: vec256_i64_signed_mem_reg:
@@ -787,18 +787,18 @@ define <4 x i64> @vec256_i64_signed_mem_reg(ptr %a1_addr, <4 x i64> %a2) nounwin
 ;
 ; XOP-LABEL: vec256_i64_signed_mem_reg:
 ; XOP:       # %bb.0:
-; XOP-NEXT:    vextractf128 $1, %ymm0, %xmm1
-; XOP-NEXT:    vmovdqa (%rdi), %xmm2
-; XOP-NEXT:    vmovdqa 16(%rdi), %xmm3
-; XOP-NEXT:    vpcomgtq %xmm1, %xmm3, %xmm4
-; XOP-NEXT:    vpcomgtq %xmm0, %xmm2, %xmm5
-; XOP-NEXT:    vpsubq %xmm0, %xmm2, %xmm0
+; XOP-NEXT:    vmovdqa (%rdi), %ymm1
+; XOP-NEXT:    vextractf128 $1, %ymm0, %xmm2
+; XOP-NEXT:    vextractf128 $1, %ymm1, %xmm3
+; XOP-NEXT:    vpcomgtq %xmm2, %xmm3, %xmm4
+; XOP-NEXT:    vpcomgtq %xmm0, %xmm1, %xmm5
+; XOP-NEXT:    vpsubq %xmm0, %xmm1, %xmm0
 ; XOP-NEXT:    vpxor %xmm5, %xmm0, %xmm0
 ; XOP-NEXT:    vpsubq %xmm0, %xmm5, %xmm0
-; XOP-NEXT:    vpsubq %xmm1, %xmm3, %xmm1
-; XOP-NEXT:    vpxor %xmm4, %xmm1, %xmm1
-; XOP-NEXT:    vpsubq %xmm1, %xmm4, %xmm1
-; XOP-NEXT:    vpsrlq $1, %xmm1, %xmm6
+; XOP-NEXT:    vpsubq %xmm2, %xmm3, %xmm2
+; XOP-NEXT:    vpxor %xmm4, %xmm2, %xmm2
+; XOP-NEXT:    vpsubq %xmm2, %xmm4, %xmm2
+; XOP-NEXT:    vpsrlq $1, %xmm2, %xmm6
 ; XOP-NEXT:    vpsrlq $1, %xmm0, %xmm7
 ; XOP-NEXT:    vpsrlq $33, %xmm0, %xmm0
 ; XOP-NEXT:    vpmovsxbq {{.*#+}} xmm8 = [1,1]
@@ -809,19 +809,19 @@ define <4 x i64> @vec256_i64_signed_mem_reg(ptr %a1_addr, <4 x i64> %a2) nounwin
 ; XOP-NEXT:    vpaddq %xmm0, %xmm5, %xmm0
 ; XOP-NEXT:    vpsllq $32, %xmm0, %xmm0
 ; XOP-NEXT:    vpmuludq %xmm7, %xmm9, %xmm5
-; XOP-NEXT:    vpsrlq $33, %xmm1, %xmm1
+; XOP-NEXT:    vpsrlq $33, %xmm2, %xmm2
 ; XOP-NEXT:    vpor %xmm4, %xmm8, %xmm7
-; XOP-NEXT:    vpmuludq %xmm7, %xmm1, %xmm1
+; XOP-NEXT:    vpmuludq %xmm7, %xmm2, %xmm2
 ; XOP-NEXT:    vpsrlq $32, %xmm4, %xmm4
 ; XOP-NEXT:    vpmuludq %xmm4, %xmm6, %xmm4
-; XOP-NEXT:    vpaddq %xmm1, %xmm4, %xmm1
-; XOP-NEXT:    vpsllq $32, %xmm1, %xmm1
+; XOP-NEXT:    vpaddq %xmm2, %xmm4, %xmm2
+; XOP-NEXT:    vpsllq $32, %xmm2, %xmm2
 ; XOP-NEXT:    vpmuludq %xmm7, %xmm6, %xmm4
 ; XOP-NEXT:    vpaddq %xmm3, %xmm4, %xmm3
-; XOP-NEXT:    vpaddq %xmm1, %xmm3, %xmm1
-; XOP-NEXT:    vpaddq %xmm2, %xmm5, %xmm2
-; XOP-NEXT:    vpaddq %xmm0, %xmm2, %xmm0
-; XOP-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; XOP-NEXT:    vpaddq %xmm2, %xmm3, %xmm2
+; XOP-NEXT:    vpaddq %xmm1, %xmm5, %xmm1
+; XOP-NEXT:    vpaddq %xmm0, %xmm1, %xmm0
+; XOP-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
 ; XOP-NEXT:    retq
 ;
 ; AVX512F-LABEL: vec256_i64_signed_mem_reg:
@@ -897,27 +897,27 @@ define <4 x i64> @vec256_i64_signed_mem_reg(ptr %a1_addr, <4 x i64> %a2) nounwin
 define <4 x i64> @vec256_i64_signed_reg_mem(<4 x i64> %a1, ptr %a2_addr) nounwind {
 ; AVX1-LABEL: vec256_i64_signed_reg_mem:
 ; AVX1:       # %bb.0:
-; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm1
-; AVX1-NEXT:    vmovdqa (%rdi), %xmm2
-; AVX1-NEXT:    vmovdqa 16(%rdi), %xmm3
-; AVX1-NEXT:    vpcmpgtq %xmm3, %xmm1, %xmm4
-; AVX1-NEXT:    vpcmpgtq %xmm2, %xmm0, %xmm5
-; AVX1-NEXT:    vpsubq %xmm2, %xmm0, %xmm2
-; AVX1-NEXT:    vpxor %xmm5, %xmm2, %xmm2
-; AVX1-NEXT:    vpsubq %xmm2, %xmm5, %xmm2
-; AVX1-NEXT:    vpsubq %xmm3, %xmm1, %xmm3
+; AVX1-NEXT:    vmovdqa (%rdi), %ymm1
+; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm2
+; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm3
+; AVX1-NEXT:    vpcmpgtq %xmm3, %xmm2, %xmm4
+; AVX1-NEXT:    vpcmpgtq %xmm1, %xmm0, %xmm5
+; AVX1-NEXT:    vpsubq %xmm1, %xmm0, %xmm1
+; AVX1-NEXT:    vpxor %xmm5, %xmm1, %xmm1
+; AVX1-NEXT:    vpsubq %xmm1, %xmm5, %xmm1
+; AVX1-NEXT:    vpsubq %xmm3, %xmm2, %xmm3
 ; AVX1-NEXT:    vpxor %xmm4, %xmm3, %xmm3
 ; AVX1-NEXT:    vpsubq %xmm3, %xmm4, %xmm3
 ; AVX1-NEXT:    vpsrlq $1, %xmm3, %xmm6
-; AVX1-NEXT:    vpsrlq $1, %xmm2, %xmm7
-; AVX1-NEXT:    vpsrlq $33, %xmm2, %xmm2
+; AVX1-NEXT:    vpsrlq $1, %xmm1, %xmm7
+; AVX1-NEXT:    vpsrlq $33, %xmm1, %xmm1
 ; AVX1-NEXT:    vpmovsxbq {{.*#+}} xmm8 = [1,1]
 ; AVX1-NEXT:    vpor %xmm5, %xmm8, %xmm9
-; AVX1-NEXT:    vpmuludq %xmm2, %xmm9, %xmm2
+; AVX1-NEXT:    vpmuludq %xmm1, %xmm9, %xmm1
 ; AVX1-NEXT:    vpsrlq $32, %xmm5, %xmm5
 ; AVX1-NEXT:    vpmuludq %xmm5, %xmm7, %xmm5
-; AVX1-NEXT:    vpaddq %xmm2, %xmm5, %xmm2
-; AVX1-NEXT:    vpsllq $32, %xmm2, %xmm2
+; AVX1-NEXT:    vpaddq %xmm1, %xmm5, %xmm1
+; AVX1-NEXT:    vpsllq $32, %xmm1, %xmm1
 ; AVX1-NEXT:    vpmuludq %xmm7, %xmm9, %xmm5
 ; AVX1-NEXT:    vpsrlq $33, %xmm3, %xmm3
 ; AVX1-NEXT:    vpor %xmm4, %xmm8, %xmm7
@@ -927,11 +927,11 @@ define <4 x i64> @vec256_i64_signed_reg_mem(<4 x i64> %a1, ptr %a2_addr) nounwin
 ; AVX1-NEXT:    vpaddq %xmm3, %xmm4, %xmm3
 ; AVX1-NEXT:    vpsllq $32, %xmm3, %xmm3
 ; AVX1-NEXT:    vpmuludq %xmm7, %xmm6, %xmm4
-; AVX1-NEXT:    vpaddq %xmm1, %xmm4, %xmm1
-; AVX1-NEXT:    vpaddq %xmm3, %xmm1, %xmm1
+; AVX1-NEXT:    vpaddq %xmm2, %xmm4, %xmm2
+; AVX1-NEXT:    vpaddq %xmm3, %xmm2, %xmm2
 ; AVX1-NEXT:    vpaddq %xmm0, %xmm5, %xmm0
-; AVX1-NEXT:    vpaddq %xmm2, %xmm0, %xmm0
-; AVX1-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; AVX1-NEXT:    vpaddq %xmm1, %xmm0, %xmm0
+; AVX1-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: vec256_i64_signed_reg_mem:
@@ -957,27 +957,27 @@ define <4 x i64> @vec256_i64_signed_reg_mem(<4 x i64> %a1, ptr %a2_addr) nounwin
 ;
 ; XOP-LABEL: vec256_i64_signed_reg_mem:
 ; XOP:       # %bb.0:
-; XOP-NEXT:    vextractf128 $1, %ymm0, %xmm1
-; XOP-NEXT:    vmovdqa (%rdi), %xmm2
-; XOP-NEXT:    vmovdqa 16(%rdi), %xmm3
-; XOP-NEXT:    vpcomgtq %xmm3, %xmm1, %xmm4
-; XOP-NEXT:    vpcomgtq %xmm2, %xmm0, %xmm5
-; XOP-NEXT:    vpsubq %xmm2, %xmm0, %xmm2
-; XOP-NEXT:    vpxor %xmm5, %xmm2, %xmm2
-; XOP-NEXT:    vpsubq %xmm2, %xmm5, %xmm2
-; XOP-NEXT:    vpsubq %xmm3, %xmm1, %xmm3
+; XOP-NEXT:    vmovdqa (%rdi), %ymm1
+; XOP-NEXT:    vextractf128 $1, %ymm0, %xmm2
+; XOP-NEXT:    vextractf128 $1, %ymm1, %xmm3
+; XOP-NEXT:    vpcomgtq %xmm3, %xmm2, %xmm4
+; XOP-NEXT:    vpcomgtq %xmm1, %xmm0, %xmm5
+; XOP-NEXT:    vpsubq %xmm1, %xmm0, %xmm1
+; XOP-NEXT:    vpxor %xmm5, %xmm1, %xmm1
+; XOP-NEXT:    vpsubq %xmm1, %xmm5, %xmm1
+; XOP-NEXT:    vpsubq %xmm3, %xmm2, %xmm3
 ; XOP-NEXT:    vpxor %xmm4, %xmm3, %xmm3
 ; XOP-NEXT:    vpsubq %xmm3, %xmm4, %xmm3
 ; XOP-NEXT:    vpsrlq $1, %xmm3, %xmm6
-; XOP-NEXT:    vpsrlq $1, %xmm2, %xmm7
-; XOP-NEXT:    vpsrlq $33, %xmm2, %xmm2
+; XOP-NEXT:    vpsrlq $1, %xmm1, %xmm7
+; XOP-NEXT:    vpsrlq $33, %xmm1, %xmm1
 ; XOP-NEXT:    vpmovsxbq {{.*#+}} xmm8 = [1,1]
 ; XOP-NEXT:    vpor %xmm5, %xmm8, %xmm9
-; XOP-NEXT:    vpmuludq %xmm2, %xmm9, %xmm2
+; XOP-NEXT:    vpmuludq %xmm1, %xmm9, %xmm1
 ; XOP-NEXT:    vpsrlq $32, %xmm5, %xmm5
 ; XOP-NEXT:    vpmuludq %xmm5, %xmm7, %xmm5
-; XOP-NEXT:    vpaddq %xmm2, %xmm5, %xmm2
-; XOP-NEXT:    vpsllq $32, %xmm2, %xmm2
+; XOP-NEXT:    vpaddq %xmm1, %xmm5, %xmm1
+; XOP-NEXT:    vpsllq $32, %xmm1, %xmm1
 ; XOP-NEXT:    vpmuludq %xmm7, %xmm9, %xmm5
 ; XOP-NEXT:    vpsrlq $33, %xmm3, %xmm3
 ; XOP-NEXT:    vpor %xmm4, %xmm8, %xmm7
@@ -987,11 +987,11 @@ define <4 x i64> @vec256_i64_signed_reg_mem(<4 x i64> %a1, ptr %a2_addr) nounwin
 ; XOP-NEXT:    vpaddq %xmm3, %xmm4, %xmm3
 ; XOP-NEXT:    vpsllq $32, %xmm3, %xmm3
 ; XOP-NEXT:    vpmuludq %xmm7, %xmm6, %xmm4
-; XOP-NEXT:    vpaddq %xmm1, %xmm4, %xmm1
-; XOP-NEXT:    vpaddq %xmm3, %xmm1, %xmm1
+; XOP-NEXT:    vpaddq %xmm2, %xmm4, %xmm2
+; XOP-NEXT:    vpaddq %xmm3, %xmm2, %xmm2
 ; XOP-NEXT:    vpaddq %xmm0, %xmm5, %xmm0
-; XOP-NEXT:    vpaddq %xmm2, %xmm0, %xmm0
-; XOP-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; XOP-NEXT:    vpaddq %xmm1, %xmm0, %xmm0
+; XOP-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
 ; XOP-NEXT:    retq
 ;
 ; AVX512F-LABEL: vec256_i64_signed_reg_mem:
@@ -1067,42 +1067,42 @@ define <4 x i64> @vec256_i64_signed_reg_mem(<4 x i64> %a1, ptr %a2_addr) nounwin
 define <4 x i64> @vec256_i64_signed_mem_mem(ptr %a1_addr, ptr %a2_addr) nounwind {
 ; AVX1-LABEL: vec256_i64_signed_mem_mem:
 ; AVX1:       # %bb.0:
-; AVX1-NEXT:    vmovdqa (%rsi), %xmm0
-; AVX1-NEXT:    vmovdqa 16(%rsi), %xmm1
-; AVX1-NEXT:    vmovdqa (%rdi), %xmm2
-; AVX1-NEXT:    vmovdqa 16(%rdi), %xmm3
-; AVX1-NEXT:    vpcmpgtq %xmm1, %xmm3, %xmm4
-; AVX1-NEXT:    vpcmpgtq %xmm0, %xmm2, %xmm5
-; AVX1-NEXT:    vpsubq %xmm0, %xmm2, %xmm0
-; AVX1-NEXT:    vpxor %xmm5, %xmm0, %xmm0
-; AVX1-NEXT:    vpsubq %xmm0, %xmm5, %xmm0
-; AVX1-NEXT:    vpsubq %xmm1, %xmm3, %xmm1
-; AVX1-NEXT:    vpxor %xmm4, %xmm1, %xmm1
-; AVX1-NEXT:    vpsubq %xmm1, %xmm4, %xmm1
-; AVX1-NEXT:    vpsrlq $1, %xmm1, %xmm6
-; AVX1-NEXT:    vpsrlq $1, %xmm0, %xmm7
-; AVX1-NEXT:    vpsrlq $33, %xmm0, %xmm0
+; AVX1-NEXT:    vmovdqa (%rdi), %ymm0
+; AVX1-NEXT:    vmovdqa (%rsi), %ymm1
+; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm2
+; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm3
+; AVX1-NEXT:    vpcmpgtq %xmm2, %xmm3, %xmm4
+; AVX1-NEXT:    vpcmpgtq %xmm1, %xmm0, %xmm5
+; AVX1-NEXT:    vpsubq %xmm1, %xmm0, %xmm1
+; AVX1-NEXT:    vpxor %xmm5, %xmm1, %xmm1
+; AVX1-NEXT:    vpsubq %xmm1, %xmm5, %xmm1
+; AVX1-NEXT:    vpsubq %xmm2, %xmm3, %xmm2
+; AVX1-NEXT:    vpxor %xmm4, %xmm2, %xmm2
+; AVX1-NEXT:    vpsubq %xmm2, %xmm4, %xmm2
+; AVX1-NEXT:    vpsrlq $1, %xmm2, %xmm6
+; AVX1-NEXT:    vpsrlq $1, %xmm1, %xmm7
+; AVX1-NEXT:    vpsrlq $33, %xmm1, %xmm1
 ; AVX1-NEXT:    vpmovsxbq {{.*#+}} xmm8 = [1,1]
 ; AVX1-NEXT:    vpor %xmm5, %xmm8, %xmm9
-; AVX1-NEXT:    vpmuludq %xmm0, %xmm9, %xmm0
+; AVX1-NEXT:    vpmuludq %xmm1, %xmm9, %xmm1
 ; AVX1-NEXT:    vpsrlq $32, %xmm5, %xmm5
 ; AVX1-NEXT:    vpmuludq %xmm5, %xmm7, %xmm5
-; AVX1-NEXT:    vpaddq %xmm0, %xmm5, %xmm0
-; AVX1-NEXT:    vpsllq $32, %xmm0, %xmm0
+; AVX1-NEXT:    vpaddq %xmm1, %xmm5, %xmm1
+; AVX1-NEXT:    vpsllq $32, %xmm1, %xmm1
 ; AVX1-NEXT:    vpmuludq %xmm7, %xmm9, %xmm5
-; AVX1-NEXT:    vpsrlq $33, %xmm1, %xmm1
+; AVX1-NEXT:    vpsrlq $33, %xmm2, %xmm2
 ; AVX1-NEXT:    vpor %xmm4, %xmm8, %xmm7
-; AVX1-NEXT:    vpmuludq %xmm7, %xmm1, %xmm1
+; AVX1-NEXT:    vpmuludq %xmm7, %xmm2, %xmm2
 ; AVX1-NEXT:    vpsrlq $32, %xmm4, %xmm4
 ; AVX1-NEXT:    vpmuludq %xmm4, %xmm6, %xmm4
-; AVX1-NEXT:    vpaddq %xmm1, %xmm4, %xmm1
-; AVX1-NEXT:    vpsllq $32, %xmm1, %xmm1
+; AVX1-NEXT:    vpaddq %xmm2, %xmm4, %xmm2
+; AVX1-NEXT:    vpsllq $32, %xmm2, %xmm2
 ; AVX1-NEXT:    vpmuludq %xmm7, %xmm6, %xmm4
 ; AVX1-NEXT:    vpaddq %xmm3, %xmm4, %xmm3
-; AVX1-NEXT:    vpaddq %xmm1, %xmm3, %xmm1
-; AVX1-NEXT:    vpaddq %xmm2, %xmm5, %xmm2
-; AVX1-NEXT:    vpaddq %xmm0, %xmm2, %xmm0
-; AVX1-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; AVX1-NEXT:    vpaddq %xmm2, %xmm3, %xmm2
+; AVX1-NEXT:    vpaddq %xmm0, %xmm5, %xmm0
+; AVX1-NEXT:    vpaddq %xmm1, %xmm0, %xmm0
+; AVX1-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: vec256_i64_signed_mem_mem:
@@ -1129,42 +1129,42 @@ define <4 x i64> @vec256_i64_signed_mem_mem(ptr %a1_addr, ptr %a2_addr) nounwind
 ;
 ; XOP-LABEL: vec256_i64_signed_mem_mem:
 ; XOP:       # %bb.0:
-; XOP-NEXT:    vmovdqa (%rsi), %xmm0
-; XOP-NEXT:    vmovdqa 16(%rsi), %xmm1
-; XOP-NEXT:    vmovdqa (%rdi), %xmm2
-; XOP-NEXT:    vmovdqa 16(%rdi), %xmm3
-; XOP-NEXT:    vpcomgtq %xmm1, %xmm3, %xmm4
-; XOP-NEXT:    vpcomgtq %xmm0, %xmm2, %xmm5
-; XOP-NEXT:    vpsubq %xmm0, %xmm2, %xmm0
-; XOP-NEXT:    vpxor %xmm5, %xmm0, %xmm0
-; XOP-NEXT:    vpsubq %xmm0, %xmm5, %xmm0
-; XOP-NEXT:    vpsubq %xmm1, %xmm3, %xmm1
-; XOP-NEXT:    vpxor %xmm4, %xmm1, %xmm1
-; XOP-NEXT:    vpsubq %xmm1, %xmm4, %xmm1
-; XOP-NEXT:    vpsrlq $1, %xmm1, %xmm6
-; XOP-NEXT:    vpsrlq $1, %xmm0, %xmm7
-; XOP-NEXT:    vpsrlq $33, %xmm0, %xmm0
+; XOP-NEXT:    vmovdqa (%rdi), %ymm0
+; XOP-NEXT:    vmovdqa (%rsi), %ymm1
+; XOP-NEXT:    vextractf128 $1, %ymm1, %xmm2
+; XOP-NEXT:    vextractf128 $1, %ymm0, %xmm3
+; XOP-NEXT:    vpcomgtq %xmm2, %xmm3, %xmm4
+; XOP-NEXT:    vpcomgtq %xmm1, %xmm0, %xmm5
+; XOP-NEXT:    vpsubq %xmm1, %xmm0, %xmm1
+; XOP-NEXT:    vpxor %xmm5, %xmm1, %xmm1
+; XOP-NEXT:    vpsubq %xmm1, %xmm5, %xmm1
+; XOP-NEXT:    vpsubq %xmm2, %xmm3, %xmm2
+; XOP-NEXT:    vpxor %xmm4, %xmm2, %xmm2
+; XOP-NEXT:    vpsubq %xmm2, %xmm4, %xmm2
+; XOP-NEXT:    vpsrlq $1, %xmm2, %xmm6
+; XOP-NEXT:    vpsrlq $1, %xmm1, %xmm7
+; XOP-NEXT:    vpsrlq $33, %xmm1, %xmm1
 ; XOP-NEXT:    vpmovsxbq {{.*#+}} xmm8 = [1,1]
 ; XOP-NEXT:    vpor %xmm5, %xmm8, %xmm9
-; XOP-NEXT:    vpmuludq %xmm0, %xmm9, %xmm0
+; XOP-NEXT:    vpmuludq %xmm1, %xmm9, %xmm1
 ; XOP-NEXT:    vpsrlq $32, %xmm5, %xmm5
 ; XOP-NEXT:    vpmuludq %xmm5, %xmm7, %xmm5
-; XOP-NEXT:    vpaddq %xmm0, %xmm5, %xmm0
-; XOP-NEXT:    vpsllq $32, %xmm0, %xmm0
+; XOP-NEXT:    vpaddq %xmm1, %xmm5, %xmm1
+; XOP-NEXT:    vpsllq $32, %xmm1, %xmm1
 ; XOP-NEXT:    vpmuludq %xmm7, %xmm9, %xmm5
-; XOP-NEXT:    vpsrlq $33, %xmm1, %xmm1
+; XOP-NEXT:    vpsrlq $33, %xmm2, %xmm2
 ; XOP-NEXT:    vpor %xmm4, %xmm8, %xmm7
-; XOP-NEXT:    vpmuludq %xmm7, %xmm1, %xmm1
+; XOP-NEXT:    vpmuludq %xmm7, %xmm2, %xmm2
 ; XOP-NEXT:    vpsrlq $32, %xmm4, %xmm4
 ; XOP-NEXT:    vpmuludq %xmm4, %xmm6, %xmm4
-; XOP-NEXT:    vpaddq %xmm1, %xmm4, %xmm1
-; XOP-NEXT:    vpsllq $32, %xmm1, %xmm1
+; XOP-NEXT:    vpaddq %xmm2, %xmm4, %xmm2
+; XOP-NEXT:    vpsllq $32, %xmm2, %xmm2
 ; XOP-NEXT:    vpmuludq %xmm7, %xmm6, %xmm4
 ; XOP-NEXT:    vpaddq %xmm3, %xmm4, %xmm3
-; XOP-NEXT:    vpaddq %xmm1, %xmm3, %xmm1
-; XOP-NEXT:    vpaddq %xmm2, %xmm5, %xmm2
-; XOP-NEXT:    vpaddq %xmm0, %xmm2, %xmm0
-; XOP-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; XOP-NEXT:    vpaddq %xmm2, %xmm3, %xmm2
+; XOP-NEXT:    vpaddq %xmm0, %xmm5, %xmm0
+; XOP-NEXT:    vpaddq %xmm1, %xmm0, %xmm0
+; XOP-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
 ; XOP-NEXT:    retq
 ;
 ; AVX512F-LABEL: vec256_i64_signed_mem_mem:
@@ -1499,27 +1499,27 @@ define <16 x i16> @vec256_i16_unsigned_reg_reg(<16 x i16> %a1, <16 x i16> %a2) n
 define <16 x i16> @vec256_i16_signed_mem_reg(ptr %a1_addr, <16 x i16> %a2) nounwind {
 ; AVX1-LABEL: vec256_i16_signed_mem_reg:
 ; AVX1:       # %bb.0:
-; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm1
-; AVX1-NEXT:    vmovdqa (%rdi), %xmm2
-; AVX1-NEXT:    vmovdqa 16(%rdi), %xmm3
-; AVX1-NEXT:    vpcmpgtw %xmm1, %xmm3, %xmm4
-; AVX1-NEXT:    vpcmpgtw %xmm0, %xmm2, %xmm5
-; AVX1-NEXT:    vpminsw %xmm0, %xmm2, %xmm6
-; AVX1-NEXT:    vpmaxsw %xmm0, %xmm2, %xmm0
+; AVX1-NEXT:    vmovdqa (%rdi), %ymm1
+; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm2
+; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm3
+; AVX1-NEXT:    vpcmpgtw %xmm2, %xmm3, %xmm4
+; AVX1-NEXT:    vpcmpgtw %xmm0, %xmm1, %xmm5
+; AVX1-NEXT:    vpminsw %xmm0, %xmm1, %xmm6
+; AVX1-NEXT:    vpmaxsw %xmm0, %xmm1, %xmm0
 ; AVX1-NEXT:    vpsubw %xmm6, %xmm0, %xmm0
-; AVX1-NEXT:    vpminsw %xmm1, %xmm3, %xmm6
-; AVX1-NEXT:    vpmaxsw %xmm1, %xmm3, %xmm1
-; AVX1-NEXT:    vpsubw %xmm6, %xmm1, %xmm1
-; AVX1-NEXT:    vpsrlw $1, %xmm1, %xmm1
+; AVX1-NEXT:    vpminsw %xmm2, %xmm3, %xmm6
+; AVX1-NEXT:    vpmaxsw %xmm2, %xmm3, %xmm2
+; AVX1-NEXT:    vpsubw %xmm6, %xmm2, %xmm2
+; AVX1-NEXT:    vpsrlw $1, %xmm2, %xmm2
 ; AVX1-NEXT:    vpsrlw $1, %xmm0, %xmm0
 ; AVX1-NEXT:    vbroadcastss {{.*#+}} xmm6 = [1,1,1,1,1,1,1,1]
 ; AVX1-NEXT:    vpor %xmm6, %xmm5, %xmm5
 ; AVX1-NEXT:    vpmullw %xmm5, %xmm0, %xmm0
 ; AVX1-NEXT:    vpor %xmm6, %xmm4, %xmm4
-; AVX1-NEXT:    vpmullw %xmm4, %xmm1, %xmm1
-; AVX1-NEXT:    vpaddw %xmm3, %xmm1, %xmm1
-; AVX1-NEXT:    vpaddw %xmm2, %xmm0, %xmm0
-; AVX1-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; AVX1-NEXT:    vpmullw %xmm4, %xmm2, %xmm2
+; AVX1-NEXT:    vpaddw %xmm3, %xmm2, %xmm2
+; AVX1-NEXT:    vpaddw %xmm1, %xmm0, %xmm0
+; AVX1-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: vec256_i16_signed_mem_reg:
@@ -1537,25 +1537,25 @@ define <16 x i16> @vec256_i16_signed_mem_reg(ptr %a1_addr, <16 x i16> %a2) nounw
 ;
 ; XOP-LABEL: vec256_i16_signed_mem_reg:
 ; XOP:       # %bb.0:
-; XOP-NEXT:    vextractf128 $1, %ymm0, %xmm1
-; XOP-NEXT:    vmovdqa (%rdi), %xmm2
-; XOP-NEXT:    vmovdqa 16(%rdi), %xmm3
-; XOP-NEXT:    vpcomgtw %xmm1, %xmm3, %xmm4
-; XOP-NEXT:    vpcomgtw %xmm0, %xmm2, %xmm5
-; XOP-NEXT:    vpminsw %xmm1, %xmm3, %xmm6
-; XOP-NEXT:    vpmaxsw %xmm1, %xmm3, %xmm1
-; XOP-NEXT:    vpsubw %xmm6, %xmm1, %xmm1
-; XOP-NEXT:    vpminsw %xmm0, %xmm2, %xmm6
-; XOP-NEXT:    vpmaxsw %xmm0, %xmm2, %xmm0
+; XOP-NEXT:    vmovdqa (%rdi), %ymm1
+; XOP-NEXT:    vextractf128 $1, %ymm0, %xmm2
+; XOP-NEXT:    vextractf128 $1, %ymm1, %xmm3
+; XOP-NEXT:    vpcomgtw %xmm2, %xmm3, %xmm4
+; XOP-NEXT:    vpcomgtw %xmm0, %xmm1, %xmm5
+; XOP-NEXT:    vpminsw %xmm2, %xmm3, %xmm6
+; XOP-NEXT:    vpmaxsw %xmm2, %xmm3, %xmm2
+; XOP-NEXT:    vpsubw %xmm6, %xmm2, %xmm2
+; XOP-NEXT:    vpminsw %xmm0, %xmm1, %xmm6
+; XOP-NEXT:    vpmaxsw %xmm0, %xmm1, %xmm0
 ; XOP-NEXT:    vpsubw %xmm6, %xmm0, %xmm0
 ; XOP-NEXT:    vpsrlw $1, %xmm0, %xmm0
-; XOP-NEXT:    vpsrlw $1, %xmm1, %xmm1
+; XOP-NEXT:    vpsrlw $1, %xmm2, %xmm2
 ; XOP-NEXT:    vbroadcastss {{.*#+}} xmm6 = [1,1,1,1,1,1,1,1]
 ; XOP-NEXT:    vpor %xmm6, %xmm5, %xmm5
 ; XOP-NEXT:    vpor %xmm6, %xmm4, %xmm4
-; XOP-NEXT:    vpmacsww %xmm3, %xmm4, %xmm1, %xmm1
-; XOP-NEXT:    vpmacsww %xmm2, %xmm5, %xmm0, %xmm0
-; XOP-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; XOP-NEXT:    vpmacsww %xmm3, %xmm4, %xmm2, %xmm2
+; XOP-NEXT:    vpmacsww %xmm1, %xmm5, %xmm0, %xmm0
+; XOP-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
 ; XOP-NEXT:    retq
 ;
 ; AVX512F-LABEL: vec256_i16_signed_mem_reg:
@@ -1627,27 +1627,27 @@ define <16 x i16> @vec256_i16_signed_mem_reg(ptr %a1_addr, <16 x i16> %a2) nounw
 define <16 x i16> @vec256_i16_signed_reg_mem(<16 x i16> %a1, ptr %a2_addr) nounwind {
 ; AVX1-LABEL: vec256_i16_signed_reg_mem:
 ; AVX1:       # %bb.0:
-; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm1
-; AVX1-NEXT:    vmovdqa (%rdi), %xmm2
-; AVX1-NEXT:    vmovdqa 16(%rdi), %xmm3
-; AVX1-NEXT:    vpcmpgtw %xmm3, %xmm1, %xmm4
-; AVX1-NEXT:    vpcmpgtw %xmm2, %xmm0, %xmm5
-; AVX1-NEXT:    vpminsw %xmm2, %xmm0, %xmm6
-; AVX1-NEXT:    vpmaxsw %xmm2, %xmm0, %xmm2
-; AVX1-NEXT:    vpsubw %xmm6, %xmm2, %xmm2
-; AVX1-NEXT:    vpminsw %xmm3, %xmm1, %xmm6
-; AVX1-NEXT:    vpmaxsw %xmm3, %xmm1, %xmm3
+; AVX1-NEXT:    vmovdqa (%rdi), %ymm1
+; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm2
+; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm3
+; AVX1-NEXT:    vpcmpgtw %xmm3, %xmm2, %xmm4
+; AVX1-NEXT:    vpcmpgtw %xmm1, %xmm0, %xmm5
+; AVX1-NEXT:    vpminsw %xmm1, %xmm0, %xmm6
+; AVX1-NEXT:    vpmaxsw %xmm1, %xmm0, %xmm1
+; AVX1-NEXT:    vpsubw %xmm6, %xmm1, %xmm1
+; AVX1-NEXT:    vpminsw %xmm3, %xmm2, %xmm6
+; AVX1-NEXT:    vpmaxsw %xmm3, %xmm2, %xmm3
 ; AVX1-NEXT:    vpsubw %xmm6, %xmm3, %xmm3
 ; AVX1-NEXT:    vpsrlw $1, %xmm3, %xmm3
-; AVX1-NEXT:    vpsrlw $1, %xmm2, %xmm2
+; AVX1-NEXT:    vpsrlw $1, %xmm1, %xmm1
 ; AVX1-NEXT:    vbroadcastss {{.*#+}} xmm6 = [1,1,1,1,1,1,1,1]
 ; AVX1-NEXT:    vpor %xmm6, %xmm5, %xmm5
-; AVX1-NEXT:    vpmullw %xmm5, %xmm2, %xmm2
+; AVX1-NEXT:    vpmullw %xmm5, %xmm1, %xmm1
 ; AVX1-NEXT:    vpor %xmm6, %xmm4, %xmm4
 ; AVX1-NEXT:    vpmullw %xmm4, %xmm3, %xmm3
-; AVX1-NEXT:    vpaddw %xmm1, %xmm3, %xmm1
-; AVX1-NEXT:    vpaddw %xmm0, %xmm2, %xmm0
-; AVX1-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; AVX1-NEXT:    vpaddw %xmm2, %xmm3, %xmm2
+; AVX1-NEXT:    vpaddw %xmm0, %xmm1, %xmm0
+; AVX1-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: vec256_i16_signed_reg_mem:
@@ -1665,25 +1665,25 @@ define <16 x i16> @vec256_i16_signed_reg_mem(<16 x i16> %a1, ptr %a2_addr) nounw
 ;
 ; XOP-LABEL: vec256_i16_signed_reg_mem:
 ; XOP:       # %bb.0:
-; XOP-NEXT:    vextractf128 $1, %ymm0, %xmm1
-; XOP-NEXT:    vmovdqa (%rdi), %xmm2
-; XOP-NEXT:    vmovdqa 16(%rdi), %xmm3
-; XOP-NEXT:    vpcomgtw %xmm3, %xmm1, %xmm4
-; XOP-NEXT:    vpcomgtw %xmm2, %xmm0, %xmm5
-; XOP-NEXT:    vpminsw %xmm3, %xmm1, %xmm6
-; XOP-NEXT:    vpmaxsw %xmm3, %xmm1, %xmm3
+; XOP-NEXT:    vmovdqa (%rdi), %ymm1
+; XOP-NEXT:    vextractf128 $1, %ymm0, %xmm2
+; XOP-NEXT:    vextractf128 $1, %ymm1, %xmm3
+; XOP-NEXT:    vpcomgtw %xmm3, %xmm2, %xmm4
+; XOP-NEXT:    vpcomgtw %xmm1, %xmm0, %xmm5
+; XOP-NEXT:    vpminsw %xmm3, %xmm2, %xmm6
+; XOP-NEXT:    vpmaxsw %xmm3, %xmm2, %xmm3
 ; XOP-NEXT:    vpsubw %xmm6, %xmm3, %xmm3
-; XOP-NEXT:    vpminsw %xmm2, %xmm0, %xmm6
-; XOP-NEXT:    vpmaxsw %xmm2, %xmm0, %xmm2
-; XOP-NEXT:    vpsubw %xmm6, %xmm2, %xmm2
-; XOP-NEXT:    vpsrlw $1, %xmm2, %xmm2
+; XOP-NEXT:    vpminsw %xmm1, %xmm0, %xmm6
+; XOP-NEXT:    vpmaxsw %xmm1, %xmm0, %xmm1
+; XOP-NEXT:    vpsubw %xmm6, %xmm1, %xmm1
+; XOP-NEXT:    vpsrlw $1, %xmm1, %xmm1
 ; XOP-NEXT:    vpsrlw $1, %xmm3, %xmm3
 ; XOP-NEXT:    vbroadcastss {{.*#+}} xmm6 = [1,1,1,1,1,1,1,1]
 ; XOP-NEXT:    vpor %xmm6, %xmm5, %xmm5
 ; XOP-NEXT:    vpor %xmm6, %xmm4, %xmm4
-; XOP-NEXT:    vpmacsww %xmm1, %xmm4, %xmm3, %xmm1
-; XOP-NEXT:    vpmacsww %xmm0, %xmm5, %xmm2, %xmm0
-; XOP-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; XOP-NEXT:    vpmacsww %xmm2, %xmm4, %xmm3, %xmm2
+; XOP-NEXT:    vpmacsww %xmm0, %xmm5, %xmm1, %xmm0
+; XOP-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
 ; XOP-NEXT:    retq
 ;
 ; AVX512F-LABEL: vec256_i16_signed_reg_mem:
@@ -1755,28 +1755,28 @@ define <16 x i16> @vec256_i16_signed_reg_mem(<16 x i16> %a1, ptr %a2_addr) nounw
 define <16 x i16> @vec256_i16_signed_mem_mem(ptr %a1_addr, ptr %a2_addr) nounwind {
 ; AVX1-LABEL: vec256_i16_signed_mem_mem:
 ; AVX1:       # %bb.0:
-; AVX1-NEXT:    vmovdqa (%rsi), %xmm0
-; AVX1-NEXT:    vmovdqa 16(%rsi), %xmm1
-; AVX1-NEXT:    vmovdqa (%rdi), %xmm2
-; AVX1-NEXT:    vmovdqa 16(%rdi), %xmm3
-; AVX1-NEXT:    vpcmpgtw %xmm1, %xmm3, %xmm4
-; AVX1-NEXT:    vpcmpgtw %xmm0, %xmm2, %xmm5
-; AVX1-NEXT:    vpminsw %xmm0, %xmm2, %xmm6
-; AVX1-NEXT:    vpmaxsw %xmm0, %xmm2, %xmm0
-; AVX1-NEXT:    vpsubw %xmm6, %xmm0, %xmm0
-; AVX1-NEXT:    vpminsw %xmm1, %xmm3, %xmm6
-; AVX1-NEXT:    vpmaxsw %xmm1, %xmm3, %xmm1
+; AVX1-NEXT:    vmovdqa (%rdi), %ymm0
+; AVX1-NEXT:    vmovdqa (%rsi), %ymm1
+; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm2
+; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm3
+; AVX1-NEXT:    vpcmpgtw %xmm2, %xmm3, %xmm4
+; AVX1-NEXT:    vpcmpgtw %xmm1, %xmm0, %xmm5
+; AVX1-NEXT:    vpminsw %xmm1, %xmm0, %xmm6
+; AVX1-NEXT:    vpmaxsw %xmm1, %xmm0, %xmm1
 ; AVX1-NEXT:    vpsubw %xmm6, %xmm1, %xmm1
+; AVX1-NEXT:    vpminsw %xmm2, %xmm3, %xmm6
+; AVX1-NEXT:    vpmaxsw %xmm2, %xmm3, %xmm2
+; AVX1-NEXT:    vpsubw %xmm6, %xmm2, %xmm2
+; AVX1-NEXT:    vpsrlw $1, %xmm2, %xmm2
 ; AVX1-NEXT:    vpsrlw $1, %xmm1, %xmm1
-; AVX1-NEXT:    vpsrlw $1, %xmm0, %xmm0
 ; AVX1-NEXT:    vbroadcastss {{.*#+}} xmm6 = [1,1,1,1,1,1,1,1]
 ; AVX1-NEXT:    vpor %xmm6, %xmm5, %xmm5
-; AVX1-NEXT:    vpmullw %xmm5, %xmm0, %xmm0
+; AVX1-NEXT:    vpmullw %xmm5, %xmm1, %xmm1
 ; AVX1-NEXT:    vpor %xmm6, %xmm4, %xmm4
-; AVX1-NEXT:    vpmullw %xmm4, %xmm1, %xmm1
-; AVX1-NEXT:    vpaddw %xmm3, %xmm1, %xmm1
-; AVX1-NEXT:    vpaddw %xmm2, %xmm0, %xmm0
-; AVX1-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; AVX1-NEXT:    vpmullw %xmm4, %xmm2, %xmm2
+; AVX1-NEXT:    vpaddw %xmm3, %xmm2, %xmm2
+; AVX1-NEXT:    vpaddw %xmm0, %xmm1, %xmm0
+; AVX1-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: vec256_i16_signed_mem_mem:
@@ -1795,26 +1795,26 @@ define <16 x i16> @vec256_i16_signed_mem_mem(ptr %a1_addr, ptr %a2_addr) nounwin
 ;
 ; XOP-LABEL: vec256_i16_signed_mem_mem:
 ; XOP:       # %bb.0:
-; XOP-NEXT:    vmovdqa (%rsi), %xmm0
-; XOP-NEXT:    vmovdqa 16(%rsi), %xmm1
-; XOP-NEXT:    vmovdqa (%rdi), %xmm2
-; XOP-NEXT:    vmovdqa 16(%rdi), %xmm3
-; XOP-NEXT:    vpcomgtw %xmm1, %xmm3, %xmm4
-; XOP-NEXT:    vpcomgtw %xmm0, %xmm2, %xmm5
-; XOP-NEXT:    vpminsw %xmm1, %xmm3, %xmm6
-; XOP-NEXT:    vpmaxsw %xmm1, %xmm3, %xmm1
+; XOP-NEXT:    vmovdqa (%rdi), %ymm0
+; XOP-NEXT:    vmovdqa (%rsi), %ymm1
+; XOP-NEXT:    vextractf128 $1, %ymm1, %xmm2
+; XOP-NEXT:    vextractf128 $1, %ymm0, %xmm3
+; XOP-NEXT:    vpcomgtw %xmm2, %xmm3, %xmm4
+; XOP-NEXT:    vpcomgtw %xmm1, %xmm0, %xmm5
+; XOP-NEXT:    vpminsw %xmm2, %xmm3, %xmm6
+; XOP-NEXT:    vpmaxsw %xmm2, %xmm3, %xmm2
+; XOP-NEXT:    vpsubw %xmm6, %xmm2, %xmm2
+; XOP-NEXT:    vpminsw %xmm1, %xmm0, %xmm6
+; XOP-NEXT:    vpmaxsw %xmm1, %xmm0, %xmm1
 ; XOP-NEXT:    vpsubw %xmm6, %xmm1, %xmm1
-; XOP-NEXT:    vpminsw %xmm0, %xmm2, %xmm6
-; XOP-NEXT:    vpmaxsw %xmm0, %xmm2, %xmm0
-; XOP-NEXT:    vpsubw %xmm6, %xmm0, %xmm0
-; XOP-NEXT:    vpsrlw $1, %xmm0, %xmm0
 ; XOP-NEXT:    vpsrlw $1, %xmm1, %xmm1
+; XOP-NEXT:    vpsrlw $1, %xmm2, %xmm2
 ; XOP-NEXT:    vbroadcastss {{.*#+}} xmm6 = [1,1,1,1,1,1,1,1]
 ; XOP-NEXT:    vpor %xmm6, %xmm5, %xmm5
 ; XOP-NEXT:    vpor %xmm6, %xmm4, %xmm4
-; XOP-NEXT:    vpmacsww %xmm3, %xmm4, %xmm1, %xmm1
-; XOP-NEXT:    vpmacsww %xmm2, %xmm5, %xmm0, %xmm0
-; XOP-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; XOP-NEXT:    vpmacsww %xmm3, %xmm4, %xmm2, %xmm2
+; XOP-NEXT:    vpmacsww %xmm0, %xmm5, %xmm1, %xmm0
+; XOP-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
 ; XOP-NEXT:    retq
 ;
 ; AVX512F-LABEL: vec256_i16_signed_mem_mem:
@@ -2235,9 +2235,9 @@ define <32 x i8> @vec256_i8_unsigned_reg_reg(<32 x i8> %a1, <32 x i8> %a2) nounw
 define <32 x i8> @vec256_i8_signed_mem_reg(ptr %a1_addr, <32 x i8> %a2) nounwind {
 ; AVX1-LABEL: vec256_i8_signed_mem_reg:
 ; AVX1:       # %bb.0:
+; AVX1-NEXT:    vmovdqa (%rdi), %ymm1
 ; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm2
-; AVX1-NEXT:    vmovdqa (%rdi), %xmm1
-; AVX1-NEXT:    vmovdqa 16(%rdi), %xmm3
+; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm3
 ; AVX1-NEXT:    vpcmpgtb %xmm2, %xmm3, %xmm4
 ; AVX1-NEXT:    vpcmpgtb %xmm0, %xmm1, %xmm5
 ; AVX1-NEXT:    vpminsb %xmm0, %xmm1, %xmm6
@@ -2294,19 +2294,19 @@ define <32 x i8> @vec256_i8_signed_mem_reg(ptr %a1_addr, <32 x i8> %a2) nounwind
 ;
 ; XOP-LABEL: vec256_i8_signed_mem_reg:
 ; XOP:       # %bb.0:
-; XOP-NEXT:    vextractf128 $1, %ymm0, %xmm1
-; XOP-NEXT:    vmovdqa (%rdi), %xmm2
-; XOP-NEXT:    vmovdqa 16(%rdi), %xmm3
-; XOP-NEXT:    vpcomgtb %xmm1, %xmm3, %xmm4
-; XOP-NEXT:    vpcomgtb %xmm0, %xmm2, %xmm5
-; XOP-NEXT:    vpminsb %xmm0, %xmm2, %xmm6
-; XOP-NEXT:    vpmaxsb %xmm0, %xmm2, %xmm0
+; XOP-NEXT:    vmovdqa (%rdi), %ymm1
+; XOP-NEXT:    vextractf128 $1, %ymm0, %xmm2
+; XOP-NEXT:    vextractf128 $1, %ymm1, %xmm3
+; XOP-NEXT:    vpcomgtb %xmm2, %xmm3, %xmm4
+; XOP-NEXT:    vpcomgtb %xmm0, %xmm1, %xmm5
+; XOP-NEXT:    vpminsb %xmm0, %xmm1, %xmm6
+; XOP-NEXT:    vpmaxsb %xmm0, %xmm1, %xmm0
 ; XOP-NEXT:    vpsubb %xmm6, %xmm0, %xmm0
-; XOP-NEXT:    vpminsb %xmm1, %xmm3, %xmm6
-; XOP-NEXT:    vpmaxsb %xmm1, %xmm3, %xmm1
-; XOP-NEXT:    vpsubb %xmm6, %xmm1, %xmm1
+; XOP-NEXT:    vpminsb %xmm2, %xmm3, %xmm6
+; XOP-NEXT:    vpmaxsb %xmm2, %xmm3, %xmm2
+; XOP-NEXT:    vpsubb %xmm6, %xmm2, %xmm2
 ; XOP-NEXT:    vpcmpeqd %xmm6, %xmm6, %xmm6
-; XOP-NEXT:    vpshlb %xmm6, %xmm1, %xmm1
+; XOP-NEXT:    vpshlb %xmm6, %xmm2, %xmm2
 ; XOP-NEXT:    vpshlb %xmm6, %xmm0, %xmm0
 ; XOP-NEXT:    vbroadcastss {{.*#+}} xmm6 = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
 ; XOP-NEXT:    vpor %xmm6, %xmm5, %xmm5
@@ -2318,12 +2318,12 @@ define <32 x i8> @vec256_i8_signed_mem_reg(ptr %a1_addr, <32 x i8> %a2) nounwind
 ; XOP-NEXT:    vpperm %xmm5, %xmm8, %xmm0, %xmm0
 ; XOP-NEXT:    vpor %xmm6, %xmm4, %xmm4
 ; XOP-NEXT:    vpandn %xmm4, %xmm7, %xmm6
-; XOP-NEXT:    vpmaddubsw %xmm6, %xmm1, %xmm6
-; XOP-NEXT:    vpmullw %xmm4, %xmm1, %xmm1
-; XOP-NEXT:    vpperm %xmm5, %xmm6, %xmm1, %xmm1
-; XOP-NEXT:    vpaddb %xmm3, %xmm1, %xmm1
-; XOP-NEXT:    vpaddb %xmm2, %xmm0, %xmm0
-; XOP-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; XOP-NEXT:    vpmaddubsw %xmm6, %xmm2, %xmm6
+; XOP-NEXT:    vpmullw %xmm4, %xmm2, %xmm2
+; XOP-NEXT:    vpperm %xmm5, %xmm6, %xmm2, %xmm2
+; XOP-NEXT:    vpaddb %xmm3, %xmm2, %xmm2
+; XOP-NEXT:    vpaddb %xmm1, %xmm0, %xmm0
+; XOP-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
 ; XOP-NEXT:    retq
 ;
 ; AVX512F-LABEL: vec256_i8_signed_mem_reg:
@@ -2407,9 +2407,9 @@ define <32 x i8> @vec256_i8_signed_mem_reg(ptr %a1_addr, <32 x i8> %a2) nounwind
 define <32 x i8> @vec256_i8_signed_reg_mem(<32 x i8> %a1, ptr %a2_addr) nounwind {
 ; AVX1-LABEL: vec256_i8_signed_reg_mem:
 ; AVX1:       # %bb.0:
+; AVX1-NEXT:    vmovdqa (%rdi), %ymm2
 ; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm1
-; AVX1-NEXT:    vmovdqa (%rdi), %xmm2
-; AVX1-NEXT:    vmovdqa 16(%rdi), %xmm3
+; AVX1-NEXT:    vextractf128 $1, %ymm2, %xmm3
 ; AVX1-NEXT:    vpcmpgtb %xmm3, %xmm1, %xmm4
 ; AVX1-NEXT:    vpcmpgtb %xmm2, %xmm0, %xmm5
 ; AVX1-NEXT:    vpminsb %xmm2, %xmm0, %xmm6
@@ -2466,36 +2466,36 @@ define <32 x i8> @vec256_i8_signed_reg_mem(<32 x i8> %a1, ptr %a2_addr) nounwind
 ;
 ; XOP-LABEL: vec256_i8_signed_reg_mem:
 ; XOP:       # %bb.0:
-; XOP-NEXT:    vextractf128 $1, %ymm0, %xmm1
-; XOP-NEXT:    vmovdqa (%rdi), %xmm2
-; XOP-NEXT:    vmovdqa 16(%rdi), %xmm3
-; XOP-NEXT:    vpcomgtb %xmm3, %xmm1, %xmm4
-; XOP-NEXT:    vpcomgtb %xmm2, %xmm0, %xmm5
-; XOP-NEXT:    vpminsb %xmm2, %xmm0, %xmm6
-; XOP-NEXT:    vpmaxsb %xmm2, %xmm0, %xmm2
-; XOP-NEXT:    vpsubb %xmm6, %xmm2, %xmm2
-; XOP-NEXT:    vpminsb %xmm3, %xmm1, %xmm6
-; XOP-NEXT:    vpmaxsb %xmm3, %xmm1, %xmm3
+; XOP-NEXT:    vmovdqa (%rdi), %ymm1
+; XOP-NEXT:    vextractf128 $1, %ymm0, %xmm2
+; XOP-NEXT:    vextractf128 $1, %ymm1, %xmm3
+; XOP-NEXT:    vpcomgtb %xmm3, %xmm2, %xmm4
+; XOP-NEXT:    vpcomgtb %xmm1, %xmm0, %xmm5
+; XOP-NEXT:    vpminsb %xmm1, %xmm0, %xmm6
+; XOP-NEXT:    vpmaxsb %xmm1, %xmm0, %xmm1
+; XOP-NEXT:    vpsubb %xmm6, %xmm1, %xmm1
+; XOP-NEXT:    vpminsb %xmm3, %xmm2, %xmm6
+; XOP-NEXT:    vpmaxsb %xmm3, %xmm2, %xmm3
 ; XOP-NEXT:    vpsubb %xmm6, %xmm3, %xmm3
 ; XOP-NEXT:    vpcmpeqd %xmm6, %xmm6, %xmm6
 ; XOP-NEXT:    vpshlb %xmm6, %xmm3, %xmm3
-; XOP-NEXT:    vpshlb %xmm6, %xmm2, %xmm2
+; XOP-NEXT:    vpshlb %xmm6, %xmm1, %xmm1
 ; XOP-NEXT:    vbroadcastss {{.*#+}} xmm6 = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
 ; XOP-NEXT:    vpor %xmm6, %xmm5, %xmm5
 ; XOP-NEXT:    vbroadcastss {{.*#+}} xmm7 = [255,255,255,255,255,255,255,255]
 ; XOP-NEXT:    vpandn %xmm5, %xmm7, %xmm8
-; XOP-NEXT:    vpmaddubsw %xmm8, %xmm2, %xmm8
-; XOP-NEXT:    vpmullw %xmm5, %xmm2, %xmm2
+; XOP-NEXT:    vpmaddubsw %xmm8, %xmm1, %xmm8
+; XOP-NEXT:    vpmullw %xmm5, %xmm1, %xmm1
 ; XOP-NEXT:    vmovdqa {{.*#+}} xmm5 = [0,16,2,18,4,20,6,22,8,24,10,26,12,28,14,30]
-; XOP-NEXT:    vpperm %xmm5, %xmm8, %xmm2, %xmm2
+; XOP-NEXT:    vpperm %xmm5, %xmm8, %xmm1, %xmm1
 ; XOP-NEXT:    vpor %xmm6, %xmm4, %xmm4
 ; XOP-NEXT:    vpandn %xmm4, %xmm7, %xmm6
 ; XOP-NEXT:    vpmaddubsw %xmm6, %xmm3, %xmm6
 ; XOP-NEXT:    vpmullw %xmm4, %xmm3, %xmm3
 ; XOP-NEXT:    vpperm %xmm5, %xmm6, %xmm3, %xmm3
-; XOP-NEXT:    vpaddb %xmm1, %xmm3, %xmm1
-; XOP-NEXT:    vpaddb %xmm0, %xmm2, %xmm0
-; XOP-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; XOP-NEXT:    vpaddb %xmm2, %xmm3, %xmm2
+; XOP-NEXT:    vpaddb %xmm0, %xmm1, %xmm0
+; XOP-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
 ; XOP-NEXT:    retq
 ;
 ; AVX512F-LABEL: vec256_i8_signed_reg_mem:
@@ -2579,42 +2579,42 @@ define <32 x i8> @vec256_i8_signed_reg_mem(<32 x i8> %a1, ptr %a2_addr) nounwind
 define <32 x i8> @vec256_i8_signed_mem_mem(ptr %a1_addr, ptr %a2_addr) nounwind {
 ; AVX1-LABEL: vec256_i8_signed_mem_mem:
 ; AVX1:       # %bb.0:
-; AVX1-NEXT:    vmovdqa (%rsi), %xmm0
-; AVX1-NEXT:    vmovdqa 16(%rsi), %xmm1
-; AVX1-NEXT:    vmovdqa (%rdi), %xmm2
-; AVX1-NEXT:    vmovdqa 16(%rdi), %xmm3
-; AVX1-NEXT:    vpcmpgtb %xmm1, %xmm3, %xmm4
-; AVX1-NEXT:    vpcmpgtb %xmm0, %xmm2, %xmm5
-; AVX1-NEXT:    vpminsb %xmm0, %xmm2, %xmm6
-; AVX1-NEXT:    vpmaxsb %xmm0, %xmm2, %xmm0
-; AVX1-NEXT:    vpsubb %xmm6, %xmm0, %xmm0
-; AVX1-NEXT:    vpminsb %xmm1, %xmm3, %xmm6
-; AVX1-NEXT:    vpmaxsb %xmm1, %xmm3, %xmm1
+; AVX1-NEXT:    vmovdqa (%rdi), %ymm0
+; AVX1-NEXT:    vmovdqa (%rsi), %ymm1
+; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm2
+; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm3
+; AVX1-NEXT:    vpcmpgtb %xmm2, %xmm3, %xmm4
+; AVX1-NEXT:    vpcmpgtb %xmm1, %xmm0, %xmm5
+; AVX1-NEXT:    vpminsb %xmm1, %xmm0, %xmm6
+; AVX1-NEXT:    vpmaxsb %xmm1, %xmm0, %xmm1
 ; AVX1-NEXT:    vpsubb %xmm6, %xmm1, %xmm1
-; AVX1-NEXT:    vpsrlw $1, %xmm1, %xmm1
+; AVX1-NEXT:    vpminsb %xmm2, %xmm3, %xmm6
+; AVX1-NEXT:    vpmaxsb %xmm2, %xmm3, %xmm2
+; AVX1-NEXT:    vpsubb %xmm6, %xmm2, %xmm2
+; AVX1-NEXT:    vpsrlw $1, %xmm2, %xmm2
 ; AVX1-NEXT:    vbroadcastss {{.*#+}} xmm6 = [127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127]
+; AVX1-NEXT:    vpand %xmm6, %xmm2, %xmm2
+; AVX1-NEXT:    vpsrlw $1, %xmm1, %xmm1
 ; AVX1-NEXT:    vpand %xmm6, %xmm1, %xmm1
-; AVX1-NEXT:    vpsrlw $1, %xmm0, %xmm0
-; AVX1-NEXT:    vpand %xmm6, %xmm0, %xmm0
 ; AVX1-NEXT:    vbroadcastss {{.*#+}} xmm6 = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
 ; AVX1-NEXT:    vpor %xmm6, %xmm5, %xmm5
-; AVX1-NEXT:    vpmullw %xmm5, %xmm0, %xmm7
+; AVX1-NEXT:    vpmullw %xmm5, %xmm1, %xmm7
 ; AVX1-NEXT:    vbroadcastss {{.*#+}} xmm8 = [255,255,255,255,255,255,255,255]
 ; AVX1-NEXT:    vpand %xmm7, %xmm8, %xmm7
 ; AVX1-NEXT:    vpandn %xmm5, %xmm8, %xmm5
-; AVX1-NEXT:    vpmaddubsw %xmm5, %xmm0, %xmm0
-; AVX1-NEXT:    vpsllw $8, %xmm0, %xmm0
-; AVX1-NEXT:    vpor %xmm0, %xmm7, %xmm0
+; AVX1-NEXT:    vpmaddubsw %xmm5, %xmm1, %xmm1
+; AVX1-NEXT:    vpsllw $8, %xmm1, %xmm1
+; AVX1-NEXT:    vpor %xmm1, %xmm7, %xmm1
 ; AVX1-NEXT:    vpor %xmm6, %xmm4, %xmm4
-; AVX1-NEXT:    vpmullw %xmm4, %xmm1, %xmm5
+; AVX1-NEXT:    vpmullw %xmm4, %xmm2, %xmm5
 ; AVX1-NEXT:    vpand %xmm5, %xmm8, %xmm5
 ; AVX1-NEXT:    vpandn %xmm4, %xmm8, %xmm4
-; AVX1-NEXT:    vpmaddubsw %xmm4, %xmm1, %xmm1
-; AVX1-NEXT:    vpsllw $8, %xmm1, %xmm1
-; AVX1-NEXT:    vpor %xmm1, %xmm5, %xmm1
-; AVX1-NEXT:    vpaddb %xmm3, %xmm1, %xmm1
-; AVX1-NEXT:    vpaddb %xmm2, %xmm0, %xmm0
-; AVX1-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; AVX1-NEXT:    vpmaddubsw %xmm4, %xmm2, %xmm2
+; AVX1-NEXT:    vpsllw $8, %xmm2, %xmm2
+; AVX1-NEXT:    vpor %xmm2, %xmm5, %xmm2
+; AVX1-NEXT:    vpaddb %xmm3, %xmm2, %xmm2
+; AVX1-NEXT:    vpaddb %xmm0, %xmm1, %xmm0
+; AVX1-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: vec256_i8_signed_mem_mem:
@@ -2640,37 +2640,37 @@ define <32 x i8> @vec256_i8_signed_mem_mem(ptr %a1_addr, ptr %a2_addr) nounwind
 ;
 ; XOP-LABEL: vec256_i8_signed_mem_mem:
 ; XOP:       # %bb.0:
-; XOP-NEXT:    vmovdqa (%rsi), %xmm0
-; XOP-NEXT:    vmovdqa 16(%rsi), %xmm1
-; XOP-NEXT:    vmovdqa (%rdi), %xmm2
-; XOP-NEXT:    vmovdqa 16(%rdi), %xmm3
-; XOP-NEXT:    vpcomgtb %xmm1, %xmm3, %xmm4
-; XOP-NEXT:    vpcomgtb %xmm0, %xmm2, %xmm5
-; XOP-NEXT:    vpminsb %xmm0, %xmm2, %xmm6
-; XOP-NEXT:    vpmaxsb %xmm0, %xmm2, %xmm0
-; XOP-NEXT:    vpsubb %xmm6, %xmm0, %xmm0
-; XOP-NEXT:    vpminsb %xmm1, %xmm3, %xmm6
-; XOP-NEXT:    vpmaxsb %xmm1, %xmm3, %xmm1
+; XOP-NEXT:    vmovdqa (%rdi), %ymm0
+; XOP-NEXT:    vmovdqa (%rsi), %ymm1
+; XOP-NEXT:    vextractf128 $1, %ymm1, %xmm2
+; XOP-NEXT:    vextractf128 $1, %ymm0, %xmm3
+; XOP-NEXT:    vpcomgtb %xmm2, %xmm3, %xmm4
+; XOP-NEXT:    vpcomgtb %xmm1, %xmm0, %xmm5
+; XOP-NEXT:    vpminsb %xmm1, %xmm0, %xmm6
+; XOP-NEXT:    vpmaxsb %xmm1, %xmm0, %xmm1
 ; XOP-NEXT:    vpsubb %xmm6, %xmm1, %xmm1
+; XOP-NEXT:    vpminsb %xmm2, %xmm3, %xmm6
+; XOP-NEXT:    vpmaxsb %xmm2, %xmm3, %xmm2
+; XOP-NEXT:    vpsubb %xmm6, %xmm2, %xmm2
 ; XOP-NEXT:    vpcmpeqd %xmm6, %xmm6, %xmm6
+; XOP-NEXT:    vpshlb %xmm6, %xmm2, %xmm2
 ; XOP-NEXT:    vpshlb %xmm6, %xmm1, %xmm1
-; XOP-NEXT:    vpshlb %xmm6, %xmm0, %xmm0
 ; XOP-NEXT:    vbroadcastss {{.*#+}} xmm6 = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
 ; XOP-NEXT:    vpor %xmm6, %xmm5, %xmm5
 ; XOP-NEXT:    vbroadcastss {{.*#+}} xmm7 = [255,255,255,255,255,255,255,255]
 ; XOP-NEXT:    vpandn %xmm5, %xmm7, %xmm8
-; XOP-NEXT:    vpmaddubsw %xmm8, %xmm0, %xmm8
-; XOP-NEXT:    vpmullw %xmm5, %xmm0, %xmm0
+; XOP-NEXT:    vpmaddubsw %xmm8, %xmm1, %xmm8
+; XOP-NEXT:    vpmullw %xmm5, %xmm1, %xmm1
 ; XOP-NEXT:    vmovdqa {{.*#+}} xmm5 = [0,16,2,18,4,20,6,22,8,24,10,26,12,28,14,30]
-; XOP-NEXT:    vpperm %xmm5, %xmm8, %xmm0, %xmm0
+; XOP-NEXT:    vpperm %xmm5, %xmm8, %xmm1, %xmm1
 ; XOP-NEXT:    vpor %xmm6, %xmm4, %xmm4
 ; XOP-NEXT:    vpandn %xmm4, %xmm7, %xmm6
-; XOP-NEXT:    vpmaddubsw %xmm6, %xmm1, %xmm6
-; XOP-NEXT:    vpmullw %xmm4, %xmm1, %xmm1
-; XOP-NEXT:    vpperm %xmm5, %xmm6, %xmm1, %xmm1
-; XOP-NEXT:    vpaddb %xmm3, %xmm1, %xmm1
-; XOP-NEXT:    vpaddb %xmm2, %xmm0, %xmm0
-; XOP-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; XOP-NEXT:    vpmaddubsw %xmm6, %xmm2, %xmm6
+; XOP-NEXT:    vpmullw %xmm4, %xmm2, %xmm2
+; XOP-NEXT:    vpperm %xmm5, %xmm6, %xmm2, %xmm2
+; XOP-NEXT:    vpaddb %xmm3, %xmm2, %xmm2
+; XOP-NEXT:    vpaddb %xmm0, %xmm1, %xmm0
+; XOP-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
 ; XOP-NEXT:    retq
 ;
 ; AVX512F-LABEL: vec256_i8_signed_mem_mem:
diff --git a/llvm/test/CodeGen/X86/midpoint-int-vec-512.ll b/llvm/test/CodeGen/X86/midpoint-int-vec-512.ll
index 7eb3d4fbdbbfa..64ff0014d6cb7 100644
--- a/llvm/test/CodeGen/X86/midpoint-int-vec-512.ll
+++ b/llvm/test/CodeGen/X86/midpoint-int-vec-512.ll
@@ -426,58 +426,58 @@ define <32 x i16> @vec512_i16_unsigned_reg_reg(<32 x i16> %a1, <32 x i16> %a2) n
 define <32 x i16> @vec512_i16_signed_mem_reg(ptr %a1_addr, <32 x i16> %a2) nounwind {
 ; AVX512F-LABEL: vec512_i16_signed_mem_reg:
 ; AVX512F:       # %bb.0:
-; AVX512F-NEXT:    vextracti64x4 $1, %zmm0, %ymm1
-; AVX512F-NEXT:    vmovdqa (%rdi), %ymm2
-; AVX512F-NEXT:    vmovdqa 32(%rdi), %ymm3
-; AVX512F-NEXT:    vpcmpgtw %ymm1, %ymm3, %ymm4
-; AVX512F-NEXT:    vpcmpgtw %ymm0, %ymm2, %ymm5
+; AVX512F-NEXT:    vmovdqa64 (%rdi), %zmm1
+; AVX512F-NEXT:    vextracti64x4 $1, %zmm0, %ymm2
+; AVX512F-NEXT:    vextracti64x4 $1, %zmm1, %ymm3
+; AVX512F-NEXT:    vpcmpgtw %ymm2, %ymm3, %ymm4
+; AVX512F-NEXT:    vpcmpgtw %ymm0, %ymm1, %ymm5
 ; AVX512F-NEXT:    vinserti64x4 $1, %ymm4, %zmm5, %zmm4
-; AVX512F-NEXT:    vpminsw %ymm1, %ymm3, %ymm5
-; AVX512F-NEXT:    vpmaxsw %ymm1, %ymm3, %ymm1
-; AVX512F-NEXT:    vpsubw %ymm5, %ymm1, %ymm1
-; AVX512F-NEXT:    vpminsw %ymm0, %ymm2, %ymm5
-; AVX512F-NEXT:    vpmaxsw %ymm0, %ymm2, %ymm0
+; AVX512F-NEXT:    vpminsw %ymm2, %ymm3, %ymm5
+; AVX512F-NEXT:    vpmaxsw %ymm2, %ymm3, %ymm2
+; AVX512F-NEXT:    vpsubw %ymm5, %ymm2, %ymm2
+; AVX512F-NEXT:    vpminsw %ymm0, %ymm1, %ymm5
+; AVX512F-NEXT:    vpmaxsw %ymm0, %ymm1, %ymm0
 ; AVX512F-NEXT:    vpsubw %ymm5, %ymm0, %ymm0
 ; AVX512F-NEXT:    vpsrlw $1, %ymm0, %ymm0
-; AVX512F-NEXT:    vpsrlw $1, %ymm1, %ymm1
-; AVX512F-NEXT:    vinserti64x4 $1, %ymm1, %zmm0, %zmm5
+; AVX512F-NEXT:    vpsrlw $1, %ymm2, %ymm2
+; AVX512F-NEXT:    vinserti64x4 $1, %ymm2, %zmm0, %zmm5
 ; AVX512F-NEXT:    vpxor %xmm6, %xmm6, %xmm6
-; AVX512F-NEXT:    vpsubw %ymm1, %ymm6, %ymm1
+; AVX512F-NEXT:    vpsubw %ymm2, %ymm6, %ymm2
 ; AVX512F-NEXT:    vpsubw %ymm0, %ymm6, %ymm0
-; AVX512F-NEXT:    vinserti64x4 $1, %ymm1, %zmm0, %zmm0
+; AVX512F-NEXT:    vinserti64x4 $1, %ymm2, %zmm0, %zmm0
 ; AVX512F-NEXT:    vpternlogq {{.*#+}} zmm0 = zmm5 ^ (zmm4 & (zmm0 ^ zmm5))
-; AVX512F-NEXT:    vextracti64x4 $1, %zmm0, %ymm1
-; AVX512F-NEXT:    vpaddw %ymm3, %ymm1, %ymm1
-; AVX512F-NEXT:    vpaddw %ymm2, %ymm0, %ymm0
-; AVX512F-NEXT:    vinserti64x4 $1, %ymm1, %zmm0, %zmm0
+; AVX512F-NEXT:    vextracti64x4 $1, %zmm0, %ymm2
+; AVX512F-NEXT:    vpaddw %ymm3, %ymm2, %ymm2
+; AVX512F-NEXT:    vpaddw %ymm1, %ymm0, %ymm0
+; AVX512F-NEXT:    vinserti64x4 $1, %ymm2, %zmm0, %zmm0
 ; AVX512F-NEXT:    retq
 ;
 ; AVX512VL-LABEL: vec512_i16_signed_mem_reg:
 ; AVX512VL:       # %bb.0:
-; AVX512VL-NEXT:    vextracti64x4 $1, %zmm0, %ymm1
-; AVX512VL-NEXT:    vmovdqa (%rdi), %ymm2
-; AVX512VL-NEXT:    vmovdqa 32(%rdi), %ymm3
-; AVX512VL-NEXT:    vpcmpgtw %ymm1, %ymm3, %ymm4
-; AVX512VL-NEXT:    vpcmpgtw %ymm0, %ymm2, %ymm5
+; AVX512VL-NEXT:    vmovdqa64 (%rdi), %zmm1
+; AVX512VL-NEXT:    vextracti64x4 $1, %zmm0, %ymm2
+; AVX512VL-NEXT:    vextracti64x4 $1, %zmm1, %ymm3
+; AVX512VL-NEXT:    vpcmpgtw %ymm2, %ymm3, %ymm4
+; AVX512VL-NEXT:    vpcmpgtw %ymm0, %ymm1, %ymm5
 ; AVX512VL-NEXT:    vinserti64x4 $1, %ymm4, %zmm5, %zmm4
-; AVX512VL-NEXT:    vpminsw %ymm1, %ymm3, %ymm5
-; AVX512VL-NEXT:    vpmaxsw %ymm1, %ymm3, %ymm1
-; AVX512VL-NEXT:    vpsubw %ymm5, %ymm1, %ymm1
-; AVX512VL-NEXT:    vpminsw %ymm0, %ymm2, %ymm5
-; AVX512VL-NEXT:    vpmaxsw %ymm0, %ymm2, %ymm0
+; AVX512VL-NEXT:    vpminsw %ymm2, %ymm3, %ymm5
+; AVX512VL-NEXT:    vpmaxsw %ymm2, %ymm3, %ymm2
+; AVX512VL-NEXT:    vpsubw %ymm5, %ymm2, %ymm2
+; AVX512VL-NEXT:    vpminsw %ymm0, %ymm1, %ymm5
+; AVX512VL-NEXT:    vpmaxsw %ymm0, %ymm1, %ymm0
 ; AVX512VL-NEXT:    vpsubw %ymm5, %ymm0, %ymm0
 ; AVX512VL-NEXT:    vpsrlw $1, %ymm0, %ymm0
-; AVX512VL-NEXT:    vpsrlw $1, %ymm1, %ymm1
-; AVX512VL-NEXT:    vinserti64x4 $1, %ymm1, %zmm0, %zmm5
+; AVX512VL-NEXT:    vpsrlw $1, %ymm2, %ymm2
+; AVX512VL-NEXT:    vinserti64x4 $1, %ymm2, %zmm0, %zmm5
 ; AVX512VL-NEXT:    vpxor %xmm6, %xmm6, %xmm6
-; AVX512VL-NEXT:    vpsubw %ymm1, %ymm6, %ymm1
+; AVX512VL-NEXT:    vpsubw %ymm2, %ymm6, %ymm2
 ; AVX512VL-NEXT:    vpsubw %ymm0, %ymm6, %ymm0
-; AVX512VL-NEXT:    vinserti64x4 $1, %ymm1, %zmm0, %zmm0
+; AVX512VL-NEXT:    vinserti64x4 $1, %ymm2, %zmm0, %zmm0
 ; AVX512VL-NEXT:    vpternlogq {{.*#+}} zmm0 = zmm5 ^ (zmm4 & (zmm0 ^ zmm5))
-; AVX512VL-NEXT:    vextracti64x4 $1, %zmm0, %ymm1
-; AVX512VL-NEXT:    vpaddw %ymm3, %ymm1, %ymm1
-; AVX512VL-NEXT:    vpaddw %ymm2, %ymm0, %ymm0
-; AVX512VL-NEXT:    vinserti64x4 $1, %ymm1, %zmm0, %zmm0
+; AVX512VL-NEXT:    vextracti64x4 $1, %zmm0, %ymm2
+; AVX512VL-NEXT:    vpaddw %ymm3, %ymm2, %ymm2
+; AVX512VL-NEXT:    vpaddw %ymm1, %ymm0, %ymm0
+; AVX512VL-NEXT:    vinserti64x4 $1, %ymm2, %zmm0, %zmm0
 ; AVX512VL-NEXT:    retq
 ;
 ; AVX512BW-LABEL: vec512_i16_signed_mem_reg:
@@ -507,58 +507,58 @@ define <32 x i16> @vec512_i16_signed_mem_reg(ptr %a1_addr, <32 x i16> %a2) nounw
 define <32 x i16> @vec512_i16_signed_reg_mem(<32 x i16> %a1, ptr %a2_addr) nounwind {
 ; AVX512F-LABEL: vec512_i16_signed_reg_mem:
 ; AVX512F:       # %bb.0:
-; AVX512F-NEXT:    vextracti64x4 $1, %zmm0, %ymm1
-; AVX512F-NEXT:    vmovdqa (%rdi), %ymm2
-; AVX512F-NEXT:    vmovdqa 32(%rdi), %ymm3
-; AVX512F-NEXT:    vpcmpgtw %ymm3, %ymm1, %ymm4
-; AVX512F-NEXT:    vpcmpgtw %ymm2, %ymm0, %ymm5
+; AVX512F-NEXT:    vmovdqa64 (%rdi), %zmm1
+; AVX512F-NEXT:    vextracti64x4 $1, %zmm0, %ymm2
+; AVX512F-NEXT:    vextracti64x4 $1, %zmm1, %ymm3
+; AVX512F-NEXT:    vpcmpgtw %ymm3, %ymm2, %ymm4
+; AVX512F-NEXT:    vpcmpgtw %ymm1, %ymm0, %ymm5
 ; AVX512F-NEXT:    vinserti64x4 $1, %ymm4, %zmm5, %zmm4
-; AVX512F-NEXT:    vpminsw %ymm3, %ymm1, %ymm5
-; AVX512F-NEXT:    vpmaxsw %ymm3, %ymm1, %ymm3
+; AVX512F-NEXT:    vpminsw %ymm3, %ymm2, %ymm5
+; AVX512F-NEXT:    vpmaxsw %ymm3, %ymm2, %ymm3
 ; AVX512F-NEXT:    vpsubw %ymm5, %ymm3, %ymm3
-; AVX512F-NEXT:    vpminsw %ymm2, %ymm0, %ymm5
-; AVX512F-NEXT:    vpmaxsw %ymm2, %ymm0, %ymm2
-; AVX512F-NEXT:    vpsubw %ymm5, %ymm2, %ymm2
-; AVX512F-NEXT:    vpsrlw $1, %ymm2, %ymm2
+; AVX512F-NEXT:    vpminsw %ymm1, %ymm0, %ymm5
+; AVX512F-NEXT:    vpmaxsw %ymm1, %ymm0, %ymm1
+; AVX512F-NEXT:    vpsubw %ymm5, %ymm1, %ymm1
+; AVX512F-NEXT:    vpsrlw $1, %ymm1, %ymm1
 ; AVX512F-NEXT:    vpsrlw $1, %ymm3, %ymm3
-; AVX512F-NEXT:    vinserti64x4 $1, %ymm3, %zmm2, %zmm5
+; AVX512F-NEXT:    vinserti64x4 $1, %ymm3, %zmm1, %zmm5
 ; AVX512F-NEXT:    vpxor %xmm6, %xmm6, %xmm6
 ; AVX512F-NEXT:    vpsubw %ymm3, %ymm6, %ymm3
-; AVX512F-NEXT:    vpsubw %ymm2, %ymm6, %ymm2
-; AVX512F-NEXT:    vinserti64x4 $1, %ymm3, %zmm2, %zmm2
-; AVX512F-NEXT:    vpternlogq {{.*#+}} zmm2 = zmm5 ^ (zmm4 & (zmm2 ^ zmm5))
-; AVX512F-NEXT:    vextracti64x4 $1, %zmm2, %ymm3
-; AVX512F-NEXT:    vpaddw %ymm1, %ymm3, %ymm1
-; AVX512F-NEXT:    vpaddw %ymm0, %ymm2, %ymm0
-; AVX512F-NEXT:    vinserti64x4 $1, %ymm1, %zmm0, %zmm0
+; AVX512F-NEXT:    vpsubw %ymm1, %ymm6, %ymm1
+; AVX512F-NEXT:    vinserti64x4 $1, %ymm3, %zmm1, %zmm1
+; AVX512F-NEXT:    vpternlogq {{.*#+}} zmm1 = zmm5 ^ (zmm4 & (zmm1 ^ zmm5))
+; AVX512F-NEXT:    vextracti64x4 $1, %zmm1, %ymm3
+; AVX512F-NEXT:    vpaddw %ymm2, %ymm3, %ymm2
+; AVX512F-NEXT:    vpaddw %ymm0, %ymm1, %ymm0
+; AVX512F-NEXT:    vinserti64x4 $1, %ymm2, %zmm0, %zmm0
 ; AVX512F-NEXT:    retq
 ;
 ; AVX512VL-LABEL: vec512_i16_signed_reg_mem:
 ; AVX512VL:       # %bb.0:
-; AVX512VL-NEXT:    vextracti64x4 $1, %zmm0, %ymm1
-; AVX512VL-NEXT:    vmovdqa (%rdi), %ymm2
-; AVX512VL-NEXT:    vmovdqa 32(%rdi), %ymm3
-; AVX512VL-NEXT:    vpcmpgtw %ymm3, %ymm1, %ymm4
-; AVX512VL-NEXT:    vpcmpgtw %ymm2, %ymm0, %ymm5
+; AVX512VL-NEXT:    vmovdqa64 (%rdi), %zmm1
+; AVX512VL-NEXT:    vextracti64x4 $1, %zmm0, %ymm2
+; AVX512VL-NEXT:    vextracti64x4 $1, %zmm1, %ymm3
+; AVX512VL-NEXT:    vpcmpgtw %ymm3, %ymm2, %ymm4
+; AVX512VL-NEXT:    vpcmpgtw %ymm1, %ymm0, %ymm5
 ; AVX512VL-NEXT:    vinserti64x4 $1, %ymm4, %zmm5, %zmm4
-; AVX512VL-NEXT:    vpminsw %ymm3, %ymm1, %ymm5
-; AVX512VL-NEXT:    vpmaxsw %ymm3, %ymm1, %ymm3
+; AVX512VL-NEXT:    vpminsw %ymm3, %ymm2, %ymm5
+; AVX512VL-NEXT:    vpmaxsw %ymm3, %ymm2, %ymm3
 ; AVX512VL-NEXT:    vpsubw %ymm5, %ymm3, %ymm3
-; AVX512VL-NEXT:    vpminsw %ymm2, %ymm0, %ymm5
-; AVX512VL-NEXT:    vpmaxsw %ymm2, %ymm0, %ymm2
-; AVX512VL-NEXT:    vpsubw %ymm5, %ymm2, %ymm2
-; AVX512VL-NEXT:    vpsrlw $1, %ymm2, %ymm2
+; AVX512VL-NEXT:    vpminsw %ymm1, %ymm0, %ymm5
+; AVX512VL-NEXT:    vpmaxsw %ymm1, %ymm0, %ymm1
+; AVX512VL-NEXT:    vpsubw %ymm5, %ymm1, %ymm1
+; AVX512VL-NEXT:    vpsrlw $1, %ymm1, %ymm1
 ; AVX512VL-NEXT:    vpsrlw $1, %ymm3, %ymm3
-; AVX512VL-NEXT:    vinserti64x4 $1, %ymm3, %zmm2, %zmm5
+; AVX512VL-NEXT:    vinserti64x4 $1, %ymm3, %zmm1, %zmm5
 ; AVX512VL-NEXT:    vpxor %xmm6, %xmm6, %xmm6
 ; AVX512VL-NEXT:    vpsubw %ymm3, %ymm6, %ymm3
-; AVX512VL-NEXT:    vpsubw %ymm2, %ymm6, %ymm2
-; AVX512VL-NEXT:    vinserti64x4 $1, %ymm3, %zmm2, %zmm2
-; AVX512VL-NEXT:    vpternlogq {{.*#+}} zmm2 = zmm5 ^ (zmm4 & (zmm2 ^ zmm5))
-; AVX512VL-NEXT:    vextracti64x4 $1, %zmm2, %ymm3
-; AVX512VL-NEXT:    vpaddw %ymm1, %ymm3, %ymm1
-; AVX512VL-NEXT:    vpaddw %ymm0, %ymm2, %ymm0
-; AVX512VL-NEXT:    vinserti64x4 $1, %ymm1, %zmm0, %zmm0
+; AVX512VL-NEXT:    vpsubw %ymm1, %ymm6, %ymm1
+; AVX512VL-NEXT:    vinserti64x4 $1, %ymm3, %zmm1, %zmm1
+; AVX512VL-NEXT:    vpternlogq {{.*#+}} zmm1 = zmm5 ^ (zmm4 & (zmm1 ^ zmm5))
+; AVX512VL-NEXT:    vextracti64x4 $1, %zmm1, %ymm3
+; AVX512VL-NEXT:    vpaddw %ymm2, %ymm3, %ymm2
+; AVX512VL-NEXT:    vpaddw %ymm0, %ymm1, %ymm0
+; AVX512VL-NEXT:    vinserti64x4 $1, %ymm2, %zmm0, %zmm0
 ; AVX512VL-NEXT:    retq
 ;
 ; AVX512BW-LABEL: vec512_i16_signed_reg_mem:
@@ -588,60 +588,60 @@ define <32 x i16> @vec512_i16_signed_reg_mem(<32 x i16> %a1, ptr %a2_addr) nounw
 define <32 x i16> @vec512_i16_signed_mem_mem(ptr %a1_addr, ptr %a2_addr) nounwind {
 ; AVX512F-LABEL: vec512_i16_signed_mem_mem:
 ; AVX512F:       # %bb.0:
-; AVX512F-NEXT:    vmovdqa (%rsi), %ymm0
-; AVX512F-NEXT:    vmovdqa 32(%rsi), %ymm1
-; AVX512F-NEXT:    vmovdqa (%rdi), %ymm2
-; AVX512F-NEXT:    vmovdqa 32(%rdi), %ymm3
-; AVX512F-NEXT:    vpcmpgtw %ymm1, %ymm3, %ymm4
-; AVX512F-NEXT:    vpcmpgtw %ymm0, %ymm2, %ymm5
+; AVX512F-NEXT:    vmovdqa64 (%rdi), %zmm0
+; AVX512F-NEXT:    vmovdqa64 (%rsi), %zmm1
+; AVX512F-NEXT:    vextracti64x4 $1, %zmm1, %ymm2
+; AVX512F-NEXT:    vextracti64x4 $1, %zmm0, %ymm3
+; AVX512F-NEXT:    vpcmpgtw %ymm2, %ymm3, %ymm4
+; AVX512F-NEXT:    vpcmpgtw %ymm1, %ymm0, %ymm5
 ; AVX512F-NEXT:    vinserti64x4 $1, %ymm4, %zmm5, %zmm4
-; AVX512F-NEXT:    vpminsw %ymm1, %ymm3, %ymm5
-; AVX512F-NEXT:    vpmaxsw %ymm1, %ymm3, %ymm1
+; AVX512F-NEXT:    vpminsw %ymm2, %ymm3, %ymm5
+; AVX512F-NEXT:    vpmaxsw %ymm2, %ymm3, %ymm2
+; AVX512F-NEXT:    vpsubw %ymm5, %ymm2, %ymm2
+; AVX512F-NEXT:    vpminsw %ymm1, %ymm0, %ymm5
+; AVX512F-NEXT:    vpmaxsw %ymm1, %ymm0, %ymm1
 ; AVX512F-NEXT:    vpsubw %ymm5, %ymm1, %ymm1
-; AVX512F-NEXT:    vpminsw %ymm0, %ymm2, %ymm5
-; AVX512F-NEXT:    vpmaxsw %ymm0, %ymm2, %ymm0
-; AVX512F-NEXT:    vpsubw %ymm5, %ymm0, %ymm0
-; AVX512F-NEXT:    vpsrlw $1, %ymm0, %ymm0
 ; AVX512F-NEXT:    vpsrlw $1, %ymm1, %ymm1
-; AVX512F-NEXT:    vinserti64x4 $1, %ymm1, %zmm0, %zmm5
+; AVX512F-NEXT:    vpsrlw $1, %ymm2, %ymm2
+; AVX512F-NEXT:    vinserti64x4 $1, %ymm2, %zmm1, %zmm5
 ; AVX512F-NEXT:    vpxor %xmm6, %xmm6, %xmm6
+; AVX512F-NEXT:    vpsubw %ymm2, %ymm6, %ymm2
 ; AVX512F-NEXT:    vpsubw %ymm1, %ymm6, %ymm1
-; AVX512F-NEXT:    vpsubw %ymm0, %ymm6, %ymm0
-; AVX512F-NEXT:    vinserti64x4 $1, %ymm1, %zmm0, %zmm0
-; AVX512F-NEXT:    vpternlogq {{.*#+}} zmm0 = zmm5 ^ (zmm4 & (zmm0 ^ zmm5))
-; AVX512F-NEXT:    vextracti64x4 $1, %zmm0, %ymm1
-; AVX512F-NEXT:    vpaddw %ymm3, %ymm1, %ymm1
-; AVX512F-NEXT:    vpaddw %ymm2, %ymm0, %ymm0
-; AVX512F-NEXT:    vinserti64x4 $1, %ymm1, %zmm0, %zmm0
+; AVX512F-NEXT:    vinserti64x4 $1, %ymm2, %zmm1, %zmm1
+; AVX512F-NEXT:    vpternlogq {{.*#+}} zmm1 = zmm5 ^ (zmm4 & (zmm1 ^ zmm5))
+; AVX512F-NEXT:    vextracti64x4 $1, %zmm1, %ymm2
+; AVX512F-NEXT:    vpaddw %ymm3, %ymm2, %ymm2
+; AVX512F-NEXT:    vpaddw %ymm0, %ymm1, %ymm0
+; AVX512F-NEXT:    vinserti64x4 $1, %ymm2, %zmm0, %zmm0
 ; AVX512F-NEXT:    retq
 ;
 ; AVX512VL-LABEL: vec512_i16_signed_mem_mem:
 ; AVX512VL:       # %bb.0:
-; AVX512VL-NEXT:    vmovdqa (%rsi), %ymm0
-; AVX512VL-NEXT:    vmovdqa 32(%rsi), %ymm1
-; AVX512VL-NEXT:    vmovdqa (%rdi), %ymm2
-; AVX512VL-NEXT:    vmovdqa 32(%rdi), %ymm3
-; AVX512VL-NEXT:    vpcmpgtw %ymm1, %ymm3, %ymm4
-; AVX512VL-NEXT:    vpcmpgtw %ymm0, %ymm2, %ymm5
+; AVX512VL-NEXT:    vmovdqa64 (%rdi), %zmm0
+; AVX512VL-NEXT:    vmovdqa64 (%rsi), %zmm1
+; AVX512VL-NEXT:    vextracti64x4 $1, %zmm1, %ymm2
+; AVX512VL-NEXT:    vextracti64x4 $1, %zmm0, %ymm3
+; AVX512VL-NEXT:    vpcmpgtw %ymm2, %ymm3, %ymm4
+; AVX512VL-NEXT:    vpcmpgtw %ymm1, %ymm0, %ymm5
 ; AVX512VL-NEXT:    vinserti64x4 $1, %ymm4, %zmm5, %zmm4
-; AVX512VL-NEXT:    vpminsw %ymm1, %ymm3, %ymm5
-; AVX512VL-NEXT:    vpmaxsw %ymm1, %ymm3, %ymm1
+; AVX512VL-NEXT:    vpminsw %ymm2, %ymm3, %ymm5
+; AVX512VL-NEXT:    vpmaxsw %ymm2, %ymm3, %ymm2
+; AVX512VL-NEXT:    vpsubw %ymm5, %ymm2, %ymm2
+; AVX512VL-NEXT:    vpminsw %ymm1, %ymm0, %ymm5
+; AVX512VL-NEXT:    vpmaxsw %ymm1, %ymm0, %ymm1
 ; AVX512VL-NEXT:    vpsubw %ymm5, %ymm1, %ymm1
-; AVX512VL-NEXT:    vpminsw %ymm0, %ymm2, %ymm5
-; AVX512VL-NEXT:    vpmaxsw %ymm0, %ymm2, %ymm0
-; AVX512VL-NEXT:    vpsubw %ymm5, %ymm0, %ymm0
-; AVX512VL-NEXT:    vpsrlw $1, %ymm0, %ymm0
 ; AVX512VL-NEXT:    vpsrlw $1, %ymm1, %ymm1
-; AVX512VL-NEXT:    vinserti64x4 $1, %ymm1, %zmm0, %zmm5
+; AVX512VL-NEXT:    vpsrlw $1, %ymm2, %ymm2
+; AVX512VL-NEXT:    vinserti64x4 $1, %ymm2, %zmm1, %zmm5
 ; AVX512VL-NEXT:    vpxor %xmm6, %xmm6, %xmm6
+; AVX512VL-NEXT:    vpsubw %ymm2, %ymm6, %ymm2
 ; AVX512VL-NEXT:    vpsubw %ymm1, %ymm6, %ymm1
-; AVX512VL-NEXT:    vpsubw %ymm0, %ymm6, %ymm0
-; AVX512VL-NEXT:    vinserti64x4 $1, %ymm1, %zmm0, %zmm0
-; AVX512VL-NEXT:    vpternlogq {{.*#+}} zmm0 = zmm5 ^ (zmm4 & (zmm0 ^ zmm5))
-; AVX512VL-NEXT:    vextracti64x4 $1, %zmm0, %ymm1
-; AVX512VL-NEXT:    vpaddw %ymm3, %ymm1, %ymm1
-; AVX512VL-NEXT:    vpaddw %ymm2, %ymm0, %ymm0
-; AVX512VL-NEXT:    vinserti64x4 $1, %ymm1, %zmm0, %zmm0
+; AVX512VL-NEXT:    vinserti64x4 $1, %ymm2, %zmm1, %zmm1
+; AVX512VL-NEXT:    vpternlogq {{.*#+}} zmm1 = zmm5 ^ (zmm4 & (zmm1 ^ zmm5))
+; AVX512VL-NEXT:    vextracti64x4 $1, %zmm1, %ymm2
+; AVX512VL-NEXT:    vpaddw %ymm3, %ymm2, %ymm2
+; AVX512VL-NEXT:    vpaddw %ymm0, %ymm1, %ymm0
+; AVX512VL-NEXT:    vinserti64x4 $1, %ymm2, %zmm0, %zmm0
 ; AVX512VL-NEXT:    retq
 ;
 ; AVX512BW-LABEL: vec512_i16_signed_mem_mem:
@@ -849,66 +849,66 @@ define <64 x i8> @vec512_i8_unsigned_reg_reg(<64 x i8> %a1, <64 x i8> %a2) nounw
 define <64 x i8> @vec512_i8_signed_mem_reg(ptr %a1_addr, <64 x i8> %a2) nounwind {
 ; AVX512F-LABEL: vec512_i8_signed_mem_reg:
 ; AVX512F:       # %bb.0:
-; AVX512F-NEXT:    vextracti64x4 $1, %zmm0, %ymm1
-; AVX512F-NEXT:    vmovdqa (%rdi), %ymm2
-; AVX512F-NEXT:    vmovdqa 32(%rdi), %ymm3
-; AVX512F-NEXT:    vpcmpgtb %ymm1, %ymm3, %ymm4
-; AVX512F-NEXT:    vpcmpgtb %ymm0, %ymm2, %ymm5
+; AVX512F-NEXT:    vmovdqa64 (%rdi), %zmm1
+; AVX512F-NEXT:    vextracti64x4 $1, %zmm0, %ymm2
+; AVX512F-NEXT:    vextracti64x4 $1, %zmm1, %ymm3
+; AVX512F-NEXT:    vpcmpgtb %ymm2, %ymm3, %ymm4
+; AVX512F-NEXT:    vpcmpgtb %ymm0, %ymm1, %ymm5
 ; AVX512F-NEXT:    vinserti64x4 $1, %ymm4, %zmm5, %zmm4
-; AVX512F-NEXT:    vpminsb %ymm1, %ymm3, %ymm5
-; AVX512F-NEXT:    vpmaxsb %ymm1, %ymm3, %ymm1
-; AVX512F-NEXT:    vpsubb %ymm5, %ymm1, %ymm1
-; AVX512F-NEXT:    vpminsb %ymm0, %ymm2, %ymm5
-; AVX512F-NEXT:    vpmaxsb %ymm0, %ymm2, %ymm0
+; AVX512F-NEXT:    vpminsb %ymm2, %ymm3, %ymm5
+; AVX512F-NEXT:    vpmaxsb %ymm2, %ymm3, %ymm2
+; AVX512F-NEXT:    vpsubb %ymm5, %ymm2, %ymm2
+; AVX512F-NEXT:    vpminsb %ymm0, %ymm1, %ymm5
+; AVX512F-NEXT:    vpmaxsb %ymm0, %ymm1, %ymm0
 ; AVX512F-NEXT:    vpsubb %ymm5, %ymm0, %ymm0
 ; AVX512F-NEXT:    vpsrlw $1, %ymm0, %ymm0
-; AVX512F-NEXT:    vpsrlw $1, %ymm1, %ymm1
-; AVX512F-NEXT:    vinserti64x4 $1, %ymm1, %zmm0, %zmm5
+; AVX512F-NEXT:    vpsrlw $1, %ymm2, %ymm2
+; AVX512F-NEXT:    vinserti64x4 $1, %ymm2, %zmm0, %zmm5
 ; AVX512F-NEXT:    vpbroadcastd {{.*#+}} zmm6 = [127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127]
 ; AVX512F-NEXT:    vpandq %zmm6, %zmm5, %zmm5
-; AVX512F-NEXT:    vpand %ymm6, %ymm1, %ymm1
+; AVX512F-NEXT:    vpand %ymm6, %ymm2, %ymm2
 ; AVX512F-NEXT:    vpxor %xmm7, %xmm7, %xmm7
-; AVX512F-NEXT:    vpsubb %ymm1, %ymm7, %ymm1
+; AVX512F-NEXT:    vpsubb %ymm2, %ymm7, %ymm2
 ; AVX512F-NEXT:    vpand %ymm6, %ymm0, %ymm0
 ; AVX512F-NEXT:    vpsubb %ymm0, %ymm7, %ymm0
-; AVX512F-NEXT:    vinserti64x4 $1, %ymm1, %zmm0, %zmm0
+; AVX512F-NEXT:    vinserti64x4 $1, %ymm2, %zmm0, %zmm0
 ; AVX512F-NEXT:    vpternlogq {{.*#+}} zmm0 = zmm5 ^ (zmm4 & (zmm0 ^ zmm5))
-; AVX512F-NEXT:    vextracti64x4 $1, %zmm0, %ymm1
-; AVX512F-NEXT:    vpaddb %ymm3, %ymm1, %ymm1
-; AVX512F-NEXT:    vpaddb %ymm2, %ymm0, %ymm0
-; AVX512F-NEXT:    vinserti64x4 $1, %ymm1, %zmm0, %zmm0
+; AVX512F-NEXT:    vextracti64x4 $1, %zmm0, %ymm2
+; AVX512F-NEXT:    vpaddb %ymm3, %ymm2, %ymm2
+; AVX512F-NEXT:    vpaddb %ymm1, %ymm0, %ymm0
+; AVX512F-NEXT:    vinserti64x4 $1, %ymm2, %zmm0, %zmm0
 ; AVX512F-NEXT:    retq
 ;
 ; AVX512VL-LABEL: vec512_i8_signed_mem_reg:
 ; AVX512VL:       # %bb.0:
-; AVX512VL-NEXT:    vextracti64x4 $1, %zmm0, %ymm1
-; AVX512VL-NEXT:    vmovdqa (%rdi), %ymm2
-; AVX512VL-NEXT:    vmovdqa 32(%rdi), %ymm3
-; AVX512VL-NEXT:    vpcmpgtb %ymm1, %ymm3, %ymm4
-; AVX512VL-NEXT:    vpcmpgtb %ymm0, %ymm2, %ymm5
+; AVX512VL-NEXT:    vmovdqa64 (%rdi), %zmm1
+; AVX512VL-NEXT:    vextracti64x4 $1, %zmm0, %ymm2
+; AVX512VL-NEXT:    vextracti64x4 $1, %zmm1, %ymm3
+; AVX512VL-NEXT:    vpcmpgtb %ymm2, %ymm3, %ymm4
+; AVX512VL-NEXT:    vpcmpgtb %ymm0, %ymm1, %ymm5
 ; AVX512VL-NEXT:    vinserti64x4 $1, %ymm4, %zmm5, %zmm4
-; AVX512VL-NEXT:    vpminsb %ymm1, %ymm3, %ymm5
-; AVX512VL-NEXT:    vpmaxsb %ymm1, %ymm3, %ymm1
-; AVX512VL-NEXT:    vpsubb %ymm5, %ymm1, %ymm1
-; AVX512VL-NEXT:    vpminsb %ymm0, %ymm2, %ymm5
-; AVX512VL-NEXT:    vpmaxsb %ymm0, %ymm2, %ymm0
+; AVX512VL-NEXT:    vpminsb %ymm2, %ymm3, %ymm5
+; AVX512VL-NEXT:    vpmaxsb %ymm2, %ymm3, %ymm2
+; AVX512VL-NEXT:    vpsubb %ymm5, %ymm2, %ymm2
+; AVX512VL-NEXT:    vpminsb %ymm0, %ymm1, %ymm5
+; AVX512VL-NEXT:    vpmaxsb %ymm0, %ymm1, %ymm0
 ; AVX512VL-NEXT:    vpsubb %ymm5, %ymm0, %ymm0
 ; AVX512VL-NEXT:    vpsrlw $1, %ymm0, %ymm0
-; AVX512VL-NEXT:    vpsrlw $1, %ymm1, %ymm1
-; AVX512VL-NEXT:    vinserti64x4 $1, %ymm1, %zmm0, %zmm5
+; AVX512VL-NEXT:    vpsrlw $1, %ymm2, %ymm2
+; AVX512VL-NEXT:    vinserti64x4 $1, %ymm2, %zmm0, %zmm5
 ; AVX512VL-NEXT:    vpbroadcastd {{.*#+}} zmm6 = [127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127]
 ; AVX512VL-NEXT:    vpandq %zmm6, %zmm5, %zmm5
-; AVX512VL-NEXT:    vpand %ymm6, %ymm1, %ymm1
+; AVX512VL-NEXT:    vpand %ymm6, %ymm2, %ymm2
 ; AVX512VL-NEXT:    vpxor %xmm7, %xmm7, %xmm7
-; AVX512VL-NEXT:    vpsubb %ymm1, %ymm7, %ymm1
+; AVX512VL-NEXT:    vpsubb %ymm2, %ymm7, %ymm2
 ; AVX512VL-NEXT:    vpand %ymm6, %ymm0, %ymm0
 ; AVX512VL-NEXT:    vpsubb %ymm0, %ymm7, %ymm0
-; AVX512VL-NEXT:    vinserti64x4 $1, %ymm1, %zmm0, %zmm0
+; AVX512VL-NEXT:    vinserti64x4 $1, %ymm2, %zmm0, %zmm0
 ; AVX512VL-NEXT:    vpternlogq {{.*#+}} zmm0 = zmm5 ^ (zmm4 & (zmm0 ^ zmm5))
-; AVX512VL-NEXT:    vextracti64x4 $1, %zmm0, %ymm1
-; AVX512VL-NEXT:    vpaddb %ymm3, %ymm1, %ymm1
-; AVX512VL-NEXT:    vpaddb %ymm2, %ymm0, %ymm0
-; AVX512VL-NEXT:    vinserti64x4 $1, %ymm1, %zmm0, %zmm0
+; AVX512VL-NEXT:    vextracti64x4 $1, %zmm0, %ymm2
+; AVX512VL-NEXT:    vpaddb %ymm3, %ymm2, %ymm2
+; AVX512VL-NEXT:    vpaddb %ymm1, %ymm0, %ymm0
+; AVX512VL-NEXT:    vinserti64x4 $1, %ymm2, %zmm0, %zmm0
 ; AVX512VL-NEXT:    retq
 ;
 ; AVX512BW-LABEL: vec512_i8_signed_mem_reg:
@@ -939,66 +939,66 @@ define <64 x i8> @vec512_i8_signed_mem_reg(ptr %a1_addr, <64 x i8> %a2) nounwind
 define <64 x i8> @vec512_i8_signed_reg_mem(<64 x i8> %a1, ptr %a2_addr) nounwind {
 ; AVX512F-LABEL: vec512_i8_signed_reg_mem:
 ; AVX512F:       # %bb.0:
-; AVX512F-NEXT:    vextracti64x4 $1, %zmm0, %ymm1
-; AVX512F-NEXT:    vmovdqa (%rdi), %ymm2
-; AVX512F-NEXT:    vmovdqa 32(%rdi), %ymm3
-; AVX512F-NEXT:    vpcmpgtb %ymm3, %ymm1, %ymm4
-; AVX512F-NEXT:    vpcmpgtb %ymm2, %ymm0, %ymm5
+; AVX512F-NEXT:    vmovdqa64 (%rdi), %zmm1
+; AVX512F-NEXT:    vextracti64x4 $1, %zmm0, %ymm2
+; AVX512F-NEXT:    vextracti64x4 $1, %zmm1, %ymm3
+; AVX512F-NEXT:    vpcmpgtb %ymm3, %ymm2, %ymm4
+; AVX512F-NEXT:    vpcmpgtb %ymm1, %ymm0, %ymm5
 ; AVX512F-NEXT:    vinserti64x4 $1, %ymm4, %zmm5, %zmm4
-; AVX512F-NEXT:    vpminsb %ymm3, %ymm1, %ymm5
-; AVX512F-NEXT:    vpmaxsb %ymm3, %ymm1, %ymm3
+; AVX512F-NEXT:    vpminsb %ymm3, %ymm2, %ymm5
+; AVX512F-NEXT:    vpmaxsb %ymm3, %ymm2, %ymm3
 ; AVX512F-NEXT:    vpsubb %ymm5, %ymm3, %ymm3
-; AVX512F-NEXT:    vpminsb %ymm2, %ymm0, %ymm5
-; AVX512F-NEXT:    vpmaxsb %ymm2, %ymm0, %ymm2
-; AVX512F-NEXT:    vpsubb %ymm5, %ymm2, %ymm2
-; AVX512F-NEXT:    vpsrlw $1, %ymm2, %ymm2
+; AVX512F-NEXT:    vpminsb %ymm1, %ymm0, %ymm5
+; AVX512F-NEXT:    vpmaxsb %ymm1, %ymm0, %ymm1
+; AVX512F-NEXT:    vpsubb %ymm5, %ymm1, %ymm1
+; AVX512F-NEXT:    vpsrlw $1, %ymm1, %ymm1
 ; AVX512F-NEXT:    vpsrlw $1, %ymm3, %ymm3
-; AVX512F-NEXT:    vinserti64x4 $1, %ymm3, %zmm2, %zmm5
+; AVX512F-NEXT:    vinserti64x4 $1, %ymm3, %zmm1, %zmm5
 ; AVX512F-NEXT:    vpbroadcastd {{.*#+}} zmm6 = [127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127]
 ; AVX512F-NEXT:    vpandq %zmm6, %zmm5, %zmm5
 ; AVX512F-NEXT:    vpand %ymm6, %ymm3, %ymm3
 ; AVX512F-NEXT:    vpxor %xmm7, %xmm7, %xmm7
 ; AVX512F-NEXT:    vpsubb %ymm3, %ymm7, %ymm3
-; AVX512F-NEXT:    vpand %ymm6, %ymm2, %ymm2
-; AVX512F-NEXT:    vpsubb %ymm2, %ymm7, %ymm2
-; AVX512F-NEXT:    vinserti64x4 $1, %ymm3, %zmm2, %zmm2
-; AVX512F-NEXT:    vpternlogq {{.*#+}} zmm2 = zmm5 ^ (zmm4 & (zmm2 ^ zmm5))
-; AVX512F-NEXT:    vextracti64x4 $1, %zmm2, %ymm3
-; AVX512F-NEXT:    vpaddb %ymm1, %ymm3, %ymm1
-; AVX512F-NEXT:    vpaddb %ymm0, %ymm2, %ymm0
-; AVX512F-NEXT:    vinserti64x4 $1, %ymm1, %zmm0, %zmm0
+; AVX512F-NEXT:    vpand %ymm6, %ymm1, %ymm1
+; AVX512F-NEXT:    vpsubb %ymm1, %ymm7, %ymm1
+; AVX512F-NEXT:    vinserti64x4 $1, %ymm3, %zmm1, %zmm1
+; AVX512F-NEXT:    vpternlogq {{.*#+}} zmm1 = zmm5 ^ (zmm4 & (zmm1 ^ zmm5))
+; AVX512F-NEXT:    vextracti64x4 $1, %zmm1, %ymm3
+; AVX512F-NEXT:    vpaddb %ymm2, %ymm3, %ymm2
+; AVX512F-NEXT:    vpaddb %ymm0, %ymm1, %ymm0
+; AVX512F-NEXT:    vinserti64x4 $1, %ymm2, %zmm0, %zmm0
 ; AVX512F-NEXT:    retq
 ;
 ; AVX512VL-LABEL: vec512_i8_signed_reg_mem:
 ; AVX512VL:       # %bb.0:
-; AVX512VL-NEXT:    vextracti64x4 $1, %zmm0, %ymm1
-; AVX512VL-NEXT:    vmovdqa (%rdi), %ymm2
-; AVX512VL-NEXT:    vmovdqa 32(%rdi), %ymm3
-; AVX512VL-NEXT:    vpcmpgtb %ymm3, %ymm1, %ymm4
-; AVX512VL-NEXT:    vpcmpgtb %ymm2, %ymm0, %ymm5
+; AVX512VL-NEXT:    vmovdqa64 (%rdi), %zmm1
+; AVX512VL-NEXT:    vextracti64x4 $1, %zmm0, %ymm2
+; AVX512VL-NEXT:    vextracti64x4 $1, %zmm1, %ymm3
+; AVX512VL-NEXT:    vpcmpgtb %ymm3, %ymm2, %ymm4
+; AVX512VL-NEXT:    vpcmpgtb %ymm1, %ymm0, %ymm5
 ; AVX512VL-NEXT:    vinserti64x4 $1, %ymm4, %zmm5, %zmm4
-; AVX512VL-NEXT:    vpminsb %ymm3, %ymm1, %ymm5
-; AVX512VL-NEXT:    vpmaxsb %ymm3, %ymm1, %ymm3
+; AVX512VL-NEXT:    vpminsb %ymm3, %ymm2, %ymm5
+; AVX512VL-NEXT:    vpmaxsb %ymm3, %ymm2, %ymm3
 ; AVX512VL-NEXT:    vpsubb %ymm5, %ymm3, %ymm3
-; AVX512VL-NEXT:    vpminsb %ymm2, %ymm0, %ymm5
-; AVX512VL-NEXT:    vpmaxsb %ymm2, %ymm0, %ymm2
-; AVX512VL-NEXT:    vpsubb %ymm5, %ymm2, %ymm2
-; AVX512VL-NEXT:    vpsrlw $1, %ymm2, %ymm2
+; AVX512VL-NEXT:    vpminsb %ymm1, %ymm0, %ymm5
+; AVX512VL-NEXT:    vpmaxsb %ymm1, %ymm0, %ymm1
+; AVX512VL-NEXT:    vpsubb %ymm5, %ymm1, %ymm1
+; AVX512VL-NEXT:    vpsrlw $1, %ymm1, %ymm1
 ; AVX512VL-NEXT:    vpsrlw $1, %ymm3, %ymm3
-; AVX512VL-NEXT:    vinserti64x4 $1, %ymm3, %zmm2, %zmm5
+; AVX512VL-NEXT:    vinserti64x4 $1, %ymm3, %zmm1, %zmm5
 ; AVX512VL-NEXT:    vpbroadcastd {{.*#+}} zmm6 = [127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127]
 ; AVX512VL-NEXT:    vpandq %zmm6, %zmm5, %zmm5
 ; AVX512VL-NEXT:    vpand %ymm6, %ymm3, %ymm3
 ; AVX512VL-NEXT:    vpxor %xmm7, %xmm7, %xmm7
 ; AVX512VL-NEXT:    vpsubb %ymm3, %ymm7, %ymm3
-; AVX512VL-NEXT:    vpand %ymm6, %ymm2, %ymm2
-; AVX512VL-NEXT:    vpsubb %ymm2, %ymm7, %ymm2
-; AVX512VL-NEXT:    vinserti64x4 $1, %ymm3, %zmm2, %zmm2
-; AVX512VL-NEXT:    vpternlogq {{.*#+}} zmm2 = zmm5 ^ (zmm4 & (zmm2 ^ zmm5))
-; AVX512VL-NEXT:    vextracti64x4 $1, %zmm2, %ymm3
-; AVX512VL-NEXT:    vpaddb %ymm1, %ymm3, %ymm1
-; AVX512VL-NEXT:    vpaddb %ymm0, %ymm2, %ymm0
-; AVX512VL-NEXT:    vinserti64x4 $1, %ymm1, %zmm0, %zmm0
+; AVX512VL-NEXT:    vpand %ymm6, %ymm1, %ymm1
+; AVX512VL-NEXT:    vpsubb %ymm1, %ymm7, %ymm1
+; AVX512VL-NEXT:    vinserti64x4 $1, %ymm3, %zmm1, %zmm1
+; AVX512VL-NEXT:    vpternlogq {{.*#+}} zmm1 = zmm5 ^ (zmm4 & (zmm1 ^ zmm5))
+; AVX512VL-NEXT:    vextracti64x4 $1, %zmm1, %ymm3
+; AVX512VL-NEXT:    vpaddb %ymm2, %ymm3, %ymm2
+; AVX512VL-NEXT:    vpaddb %ymm0, %ymm1, %ymm0
+; AVX512VL-NEXT:    vinserti64x4 $1, %ymm2, %zmm0, %zmm0
 ; AVX512VL-NEXT:    retq
 ;
 ; AVX512BW-LABEL: vec512_i8_signed_reg_mem:
@@ -1029,68 +1029,68 @@ define <64 x i8> @vec512_i8_signed_reg_mem(<64 x i8> %a1, ptr %a2_addr) nounwind
 define <64 x i8> @vec512_i8_signed_mem_mem(ptr %a1_addr, ptr %a2_addr) nounwind {
 ; AVX512F-LABEL: vec512_i8_signed_mem_mem:
 ; AVX512F:       # %bb.0:
-; AVX512F-NEXT:    vmovdqa (%rsi), %ymm0
-; AVX512F-NEXT:    vmovdqa 32(%rsi), %ymm1
-; AVX512F-NEXT:    vmovdqa (%rdi), %ymm2
-; AVX512F-NEXT:    vmovdqa 32(%rdi), %ymm3
-; AVX512F-NEXT:    vpcmpgtb %ymm1, %ymm3, %ymm4
-; AVX512F-NEXT:    vpcmpgtb %ymm0, %ymm2, %ymm5
+; AVX512F-NEXT:    vmovdqa64 (%rdi), %zmm0
+; AVX512F-NEXT:    vmovdqa64 (%rsi), %zmm1
+; AVX512F-NEXT:    vextracti64x4 $1, %zmm1, %ymm2
+; AVX512F-NEXT:    vextracti64x4 $1, %zmm0, %ymm3
+; AVX512F-NEXT:    vpcmpgtb %ymm2, %ymm3, %ymm4
+; AVX512F-NEXT:    vpcmpgtb %ymm1, %ymm0, %ymm5
 ; AVX512F-NEXT:    vinserti64x4 $1, %ymm4, %zmm5, %zmm4
-; AVX512F-NEXT:    vpminsb %ymm1, %ymm3, %ymm5
-; AVX512F-NEXT:    vpmaxsb %ymm1, %ymm3, %ymm1
+; AVX512F-NEXT:    vpminsb %ymm2, %ymm3, %ymm5
+; AVX512F-NEXT:    vpmaxsb %ymm2, %ymm3, %ymm2
+; AVX512F-NEXT:    vpsubb %ymm5, %ymm2, %ymm2
+; AVX512F-NEXT:    vpminsb %ymm1, %ymm0, %ymm5
+; AVX512F-NEXT:    vpmaxsb %ymm1, %ymm0, %ymm1
 ; AVX512F-NEXT:    vpsubb %ymm5, %ymm1, %ymm1
-; AVX512F-NEXT:    vpminsb %ymm0, %ymm2, %ymm5
-; AVX512F-NEXT:    vpmaxsb %ymm0, %ymm2, %ymm0
-; AVX512F-NEXT:    vpsubb %ymm5, %ymm0, %ymm0
-; AVX512F-NEXT:    vpsrlw $1, %ymm0, %ymm0
 ; AVX512F-NEXT:    vpsrlw $1, %ymm1, %ymm1
-; AVX512F-NEXT:    vinserti64x4 $1, %ymm1, %zmm0, %zmm5
+; AVX512F-NEXT:    vpsrlw $1, %ymm2, %ymm2
+; AVX512F-NEXT:    vinserti64x4 $1, %ymm2, %zmm1, %zmm5
 ; AVX512F-NEXT:    vpbroadcastd {{.*#+}} zmm6 = [127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127]
 ; AVX512F-NEXT:    vpandq %zmm6, %zmm5, %zmm5
-; AVX512F-NEXT:    vpand %ymm6, %ymm1, %ymm1
+; AVX512F-NEXT:    vpand %ymm6, %ymm2, %ymm2
 ; AVX512F-NEXT:    vpxor %xmm7, %xmm7, %xmm7
+; AVX512F-NEXT:    vpsubb %ymm2, %ymm7, %ymm2
+; AVX512F-NEXT:    vpand %ymm6, %ymm1, %ymm1
 ; AVX512F-NEXT:    vpsubb %ymm1, %ymm7, %ymm1
-; AVX512F-NEXT:    vpand %ymm6, %ymm0, %ymm0
-; AVX512F-NEXT:    vpsubb %ymm0, %ymm7, %ymm0
-; AVX512F-NEXT:    vinserti64x4 $1, %ymm1, %zmm0, %zmm0
-; AVX512F-NEXT:    vpternlogq {{.*#+}} zmm0 = zmm5 ^ (zmm4 & (zmm0 ^ zmm5))
-; AVX512F-NEXT:    vextracti64x4 $1, %zmm0, %ymm1
-; AVX512F-NEXT:    vpaddb %ymm3, %ymm1, %ymm1
-; AVX512F-NEXT:    vpaddb %ymm2, %ymm0, %ymm0
-; AVX512F-NEXT:    vinserti64x4 $1, %ymm1, %zmm0, %zmm0
+; AVX512F-NEXT:    vinserti64x4 $1, %ymm2, %zmm1, %zmm1
+; AVX512F-NEXT:    vpternlogq {{.*#+}} zmm1 = zmm5 ^ (zmm4 & (zmm1 ^ zmm5))
+; AVX512F-NEXT:    vextracti64x4 $1, %zmm1, %ymm2
+; AVX512F-NEXT:    vpaddb %ymm3, %ymm2, %ymm2
+; AVX512F-NEXT:    vpaddb %ymm0, %ymm1, %ymm0
+; AVX512F-NEXT:    vinserti64x4 $1, %ymm2, %zmm0, %zmm0
 ; AVX512F-NEXT:    retq
 ;
 ; AVX512VL-LABEL: vec512_i8_signed_mem_mem:
 ; AVX512VL:       # %bb.0:
-; AVX512VL-NEXT:    vmovdqa (%rsi), %ymm0
-; AVX512VL-NEXT:    vmovdqa 32(%rsi), %ymm1
-; AVX512VL-NEXT:    vmovdqa (%rdi), %ymm2
-; AVX512VL-NEXT:    vmovdqa 32(%rdi), %ymm3
-; AVX512VL-NEXT:    vpcmpgtb %ymm1, %ymm3, %ymm4
-; AVX512VL-NEXT:    vpcmpgtb %ymm0, %ymm2, %ymm5
+; AVX512VL-NEXT:    vmovdqa64 (%rdi), %zmm0
+; AVX512VL-NEXT:    vmovdqa64 (%rsi), %zmm1
+; AVX512VL-NEXT:    vextracti64x4 $1, %zmm1, %ymm2
+; AVX512VL-NEXT:    vextracti64x4 $1, %zmm0, %ymm3
+; AVX512VL-NEXT:    vpcmpgtb %ymm2, %ymm3, %ymm4
+; AVX512VL-NEXT:    vpcmpgtb %ymm1, %ymm0, %ymm5
 ; AVX512VL-NEXT:    vinserti64x4 $1, %ymm4, %zmm5, %zmm4
-; AVX512VL-NEXT:    vpminsb %ymm1, %ymm3, %ymm5
-; AVX512VL-NEXT:    vpmaxsb %ymm1, %ymm3, %ymm1
+; AVX512VL-NEXT:    vpminsb %ymm2, %ymm3, %ymm5
+; AVX512VL-NEXT:    vpmaxsb %ymm2, %ymm3, %ymm2
+; AVX512VL-NEXT:    vpsubb %ymm5, %ymm2, %ymm2
+; AVX512VL-NEXT:    vpminsb %ymm1, %ymm0, %ymm5
+; AVX512VL-NEXT:    vpmaxsb %ymm1, %ymm0, %ymm1
 ; AVX512VL-NEXT:    vpsubb %ymm5, %ymm1, %ymm1
-; AVX512VL-NEXT:    vpminsb %ymm0, %ymm2, %ymm5
-; AVX512VL-NEXT:    vpmaxsb %ymm0, %ymm2, %ymm0
-; AVX512VL-NEXT:    vpsubb %ymm5, %ymm0, %ymm0
-; AVX512VL-NEXT:    vpsrlw $1, %ymm0, %ymm0
 ; AVX512VL-NEXT:    vpsrlw $1, %ymm1, %ymm1
-; AVX512VL-NEXT:    vinserti64x4 $1, %ymm1, %zmm0, %zmm5
+; AVX512VL-NEXT:    vpsrlw $1, %ymm2, %ymm2
+; AVX512VL-NEXT:    vinserti64x4 $1, %ymm2, %zmm1, %zmm5
 ; AVX512VL-NEXT:    vpbroadcastd {{.*#+}} zmm6 = [127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127]
 ; AVX512VL-NEXT:    vpandq %zmm6, %zmm5, %zmm5
-; AVX512VL-NEXT:    vpand %ymm6, %ymm1, %ymm1
+; AVX512VL-NEXT:    vpand %ymm6, %ymm2, %ymm2
 ; AVX512VL-NEXT:    vpxor %xmm7, %xmm7, %xmm7
+; AVX512VL-NEXT:    vpsubb %ymm2, %ymm7, %ymm2
+; AVX512VL-NEXT:    vpand %ymm6, %ymm1, %ymm1
 ; AVX512VL-NEXT:    vpsubb %ymm1, %ymm7, %ymm1
-; AVX512VL-NEXT:    vpand %ymm6, %ymm0, %ymm0
-; AVX512VL-NEXT:    vpsubb %ymm0, %ymm7, %ymm0
-; AVX512VL-NEXT:    vinserti64x4 $1, %ymm1, %zmm0, %zmm0
-; AVX512VL-NEXT:    vpternlogq {{.*#+}} zmm0 = zmm5 ^ (zmm4 & (zmm0 ^ zmm5))
-; AVX512VL-NEXT:    vextracti64x4 $1, %zmm0, %ymm1
-; AVX512VL-NEXT:    vpaddb %ymm3, %ymm1, %ymm1
-; AVX512VL-NEXT:    vpaddb %ymm2, %ymm0, %ymm0
-; AVX512VL-NEXT:    vinserti64x4 $1, %ymm1, %zmm0, %zmm0
+; AVX512VL-NEXT:    vinserti64x4 $1, %ymm2, %zmm1, %zmm1
+; AVX512VL-NEXT:    vpternlogq {{.*#+}} zmm1 = zmm5 ^ (zmm4 & (zmm1 ^ zmm5))
+; AVX512VL-NEXT:    vextracti64x4 $1, %zmm1, %ymm2
+; AVX512VL-NEXT:    vpaddb %ymm3, %ymm2, %ymm2
+; AVX512VL-NEXT:    vpaddb %ymm0, %ymm1, %ymm0
+; AVX512VL-NEXT:    vinserti64x4 $1, %ymm2, %zmm0, %zmm0
 ; AVX512VL-NEXT:    retq
 ;
 ; AVX512BW-LABEL: vec512_i8_signed_mem_mem:
diff --git a/llvm/test/CodeGen/X86/vector-fshl-256.ll b/llvm/test/CodeGen/X86/vector-fshl-256.ll
index 73e70e45a00bb..a56e22d9f1f3f 100644
--- a/llvm/test/CodeGen/X86/vector-fshl-256.ll
+++ b/llvm/test/CodeGen/X86/vector-fshl-256.ll
@@ -592,18 +592,18 @@ define <32 x i8> @var_funnnel_v32i8(<32 x i8> %x, <32 x i8> %y, <32 x i8> %amt)
 ; AVX512F-NEXT:    vpand %ymm5, %ymm6, %ymm5
 ; AVX512F-NEXT:    vpaddb %ymm4, %ymm4, %ymm4
 ; AVX512F-NEXT:    vpblendvb %ymm4, %ymm5, %ymm1, %ymm1
-; AVX512F-NEXT:    vpsllw $4, %ymm0, %ymm4
-; AVX512F-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm4, %ymm4
 ; AVX512F-NEXT:    vpand %ymm3, %ymm2, %ymm2
 ; AVX512F-NEXT:    vpsllw $5, %ymm2, %ymm2
+; AVX512F-NEXT:    vpaddb %ymm2, %ymm2, %ymm3
+; AVX512F-NEXT:    vpsllw $4, %ymm0, %ymm4
+; AVX512F-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm4, %ymm4
 ; AVX512F-NEXT:    vpblendvb %ymm2, %ymm4, %ymm0, %ymm0
-; AVX512F-NEXT:    vpaddb %ymm0, %ymm0, %ymm3
-; AVX512F-NEXT:    vpaddb %ymm3, %ymm3, %ymm3
-; AVX512F-NEXT:    vpaddb %ymm2, %ymm2, %ymm2
-; AVX512F-NEXT:    vpblendvb %ymm2, %ymm3, %ymm0, %ymm0
-; AVX512F-NEXT:    vpaddb %ymm0, %ymm0, %ymm3
+; AVX512F-NEXT:    vpaddb %ymm0, %ymm0, %ymm2
 ; AVX512F-NEXT:    vpaddb %ymm2, %ymm2, %ymm2
-; AVX512F-NEXT:    vpblendvb %ymm2, %ymm3, %ymm0, %ymm0
+; AVX512F-NEXT:    vpblendvb %ymm3, %ymm2, %ymm0, %ymm0
+; AVX512F-NEXT:    vpaddb %ymm0, %ymm0, %ymm2
+; AVX512F-NEXT:    vpaddb %ymm3, %ymm3, %ymm3
+; AVX512F-NEXT:    vpblendvb %ymm3, %ymm2, %ymm0, %ymm0
 ; AVX512F-NEXT:    vpor %ymm1, %ymm0, %ymm0
 ; AVX512F-NEXT:    retq
 ;
@@ -626,18 +626,18 @@ define <32 x i8> @var_funnnel_v32i8(<32 x i8> %x, <32 x i8> %y, <32 x i8> %amt)
 ; AVX512VL-NEXT:    vpand %ymm5, %ymm6, %ymm5
 ; AVX512VL-NEXT:    vpaddb %ymm4, %ymm4, %ymm4
 ; AVX512VL-NEXT:    vpblendvb %ymm4, %ymm5, %ymm1, %ymm1
-; AVX512VL-NEXT:    vpsllw $4, %ymm0, %ymm4
-; AVX512VL-NEXT:    vpandd {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to8}, %ymm4, %ymm4
 ; AVX512VL-NEXT:    vpand %ymm3, %ymm2, %ymm2
 ; AVX512VL-NEXT:    vpsllw $5, %ymm2, %ymm2
+; AVX512VL-NEXT:    vpaddb %ymm2, %ymm2, %ymm3
+; AVX512VL-NEXT:    vpsllw $4, %ymm0, %ymm4
+; AVX512VL-NEXT:    vpandd {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to8}, %ymm4, %ymm4
 ; AVX512VL-NEXT:    vpblendvb %ymm2, %ymm4, %ymm0, %ymm0
-; AVX512VL-NEXT:    vpaddb %ymm0, %ymm0, %ymm3
-; AVX512VL-NEXT:    vpaddb %ymm3, %ymm3, %ymm3
-; AVX512VL-NEXT:    vpaddb %ymm2, %ymm2, %ymm2
-; AVX512VL-NEXT:    vpblendvb %ymm2, %ymm3, %ymm0, %ymm0
-; AVX512VL-NEXT:    vpaddb %ymm0, %ymm0, %ymm3
+; AVX512VL-NEXT:    vpaddb %ymm0, %ymm0, %ymm2
 ; AVX512VL-NEXT:    vpaddb %ymm2, %ymm2, %ymm2
-; AVX512VL-NEXT:    vpblendvb %ymm2, %ymm3, %ymm0, %ymm0
+; AVX512VL-NEXT:    vpblendvb %ymm3, %ymm2, %ymm0, %ymm0
+; AVX512VL-NEXT:    vpaddb %ymm0, %ymm0, %ymm2
+; AVX512VL-NEXT:    vpaddb %ymm3, %ymm3, %ymm3
+; AVX512VL-NEXT:    vpblendvb %ymm3, %ymm2, %ymm0, %ymm0
 ; AVX512VL-NEXT:    vpor %ymm1, %ymm0, %ymm0
 ; AVX512VL-NEXT:    retq
 ;
diff --git a/llvm/test/CodeGen/X86/vector-fshl-512.ll b/llvm/test/CodeGen/X86/vector-fshl-512.ll
index c3930409c274d..9d35202209533 100644
--- a/llvm/test/CodeGen/X86/vector-fshl-512.ll
+++ b/llvm/test/CodeGen/X86/vector-fshl-512.ll
@@ -258,21 +258,21 @@ define <64 x i8> @var_funnnel_v64i8(<64 x i8> %x, <64 x i8> %y, <64 x i8> %amt)
 ; AVX512F-NEXT:    vpaddb %ymm7, %ymm7, %ymm6
 ; AVX512F-NEXT:    vpblendvb %ymm6, %ymm4, %ymm1, %ymm1
 ; AVX512F-NEXT:    vinserti64x4 $1, %ymm5, %zmm1, %zmm1
-; AVX512F-NEXT:    vpbroadcastb {{.*#+}} ymm4 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240]
-; AVX512F-NEXT:    vextracti64x4 $1, %zmm0, %ymm5
-; AVX512F-NEXT:    vpsllw $4, %ymm5, %ymm6
-; AVX512F-NEXT:    vpand %ymm4, %ymm6, %ymm6
+; AVX512F-NEXT:    vextracti64x4 $1, %zmm0, %ymm4
+; AVX512F-NEXT:    vpsllw $4, %ymm4, %ymm5
+; AVX512F-NEXT:    vpbroadcastb {{.*#+}} ymm6 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240]
+; AVX512F-NEXT:    vpand %ymm6, %ymm5, %ymm5
 ; AVX512F-NEXT:    vpsllw $5, %ymm3, %ymm3
-; AVX512F-NEXT:    vpblendvb %ymm3, %ymm6, %ymm5, %ymm5
-; AVX512F-NEXT:    vpaddb %ymm5, %ymm5, %ymm6
-; AVX512F-NEXT:    vpaddb %ymm6, %ymm6, %ymm6
+; AVX512F-NEXT:    vpblendvb %ymm3, %ymm5, %ymm4, %ymm4
+; AVX512F-NEXT:    vpaddb %ymm4, %ymm4, %ymm5
+; AVX512F-NEXT:    vpaddb %ymm5, %ymm5, %ymm5
 ; AVX512F-NEXT:    vpaddb %ymm3, %ymm3, %ymm3
-; AVX512F-NEXT:    vpblendvb %ymm3, %ymm6, %ymm5, %ymm5
-; AVX512F-NEXT:    vpaddb %ymm5, %ymm5, %ymm6
+; AVX512F-NEXT:    vpblendvb %ymm3, %ymm5, %ymm4, %ymm4
+; AVX512F-NEXT:    vpaddb %ymm4, %ymm4, %ymm5
 ; AVX512F-NEXT:    vpaddb %ymm3, %ymm3, %ymm3
-; AVX512F-NEXT:    vpblendvb %ymm3, %ymm6, %ymm5, %ymm3
-; AVX512F-NEXT:    vpsllw $4, %ymm0, %ymm5
-; AVX512F-NEXT:    vpand %ymm4, %ymm5, %ymm4
+; AVX512F-NEXT:    vpblendvb %ymm3, %ymm5, %ymm4, %ymm3
+; AVX512F-NEXT:    vpsllw $4, %ymm0, %ymm4
+; AVX512F-NEXT:    vpand %ymm6, %ymm4, %ymm4
 ; AVX512F-NEXT:    vpsllw $5, %ymm2, %ymm2
 ; AVX512F-NEXT:    vpblendvb %ymm2, %ymm4, %ymm0, %ymm0
 ; AVX512F-NEXT:    vpaddb %ymm0, %ymm0, %ymm4
@@ -326,21 +326,21 @@ define <64 x i8> @var_funnnel_v64i8(<64 x i8> %x, <64 x i8> %y, <64 x i8> %amt)
 ; AVX512VL-NEXT:    vpaddb %ymm7, %ymm7, %ymm6
 ; AVX512VL-NEXT:    vpblendvb %ymm6, %ymm4, %ymm1, %ymm1
 ; AVX512VL-NEXT:    vinserti64x4 $1, %ymm5, %zmm1, %zmm1
-; AVX512VL-NEXT:    vpbroadcastd {{.*#+}} ymm4 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240]
-; AVX512VL-NEXT:    vextracti64x4 $1, %zmm0, %ymm5
-; AVX512VL-NEXT:    vpsllw $4, %ymm5, %ymm6
-; AVX512VL-NEXT:    vpand %ymm4, %ymm6, %ymm6
+; AVX512VL-NEXT:    vextracti64x4 $1, %zmm0, %ymm4
+; AVX512VL-NEXT:    vpsllw $4, %ymm4, %ymm5
+; AVX512VL-NEXT:    vpbroadcastd {{.*#+}} ymm6 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240]
+; AVX512VL-NEXT:    vpand %ymm6, %ymm5, %ymm5
 ; AVX512VL-NEXT:    vpsllw $5, %ymm3, %ymm3
-; AVX512VL-NEXT:    vpblendvb %ymm3, %ymm6, %ymm5, %ymm5
-; AVX512VL-NEXT:    vpaddb %ymm5, %ymm5, %ymm6
-; AVX512VL-NEXT:    vpaddb %ymm6, %ymm6, %ymm6
+; AVX512VL-NEXT:    vpblendvb %ymm3, %ymm5, %ymm4, %ymm4
+; AVX512VL-NEXT:    vpaddb %ymm4, %ymm4, %ymm5
+; AVX512VL-NEXT:    vpaddb %ymm5, %ymm5, %ymm5
 ; AVX512VL-NEXT:    vpaddb %ymm3, %ymm3, %ymm3
-; AVX512VL-NEXT:    vpblendvb %ymm3, %ymm6, %ymm5, %ymm5
-; AVX512VL-NEXT:    vpaddb %ymm5, %ymm5, %ymm6
+; AVX512VL-NEXT:    vpblendvb %ymm3, %ymm5, %ymm4, %ymm4
+; AVX512VL-NEXT:    vpaddb %ymm4, %ymm4, %ymm5
 ; AVX512VL-NEXT:    vpaddb %ymm3, %ymm3, %ymm3
-; AVX512VL-NEXT:    vpblendvb %ymm3, %ymm6, %ymm5, %ymm3
-; AVX512VL-NEXT:    vpsllw $4, %ymm0, %ymm5
-; AVX512VL-NEXT:    vpand %ymm4, %ymm5, %ymm4
+; AVX512VL-NEXT:    vpblendvb %ymm3, %ymm5, %ymm4, %ymm3
+; AVX512VL-NEXT:    vpsllw $4, %ymm0, %ymm4
+; AVX512VL-NEXT:    vpand %ymm6, %ymm4, %ymm4
 ; AVX512VL-NEXT:    vpsllw $5, %ymm2, %ymm2
 ; AVX512VL-NEXT:    vpblendvb %ymm2, %ymm4, %ymm0, %ymm0
 ; AVX512VL-NEXT:    vpaddb %ymm0, %ymm0, %ymm4
diff --git a/llvm/test/CodeGen/X86/vector-fshl-rot-256.ll b/llvm/test/CodeGen/X86/vector-fshl-rot-256.ll
index f98a2af374c07..51b92bbd5dc59 100644
--- a/llvm/test/CodeGen/X86/vector-fshl-rot-256.ll
+++ b/llvm/test/CodeGen/X86/vector-fshl-rot-256.ll
@@ -464,9 +464,9 @@ define <32 x i8> @var_funnnel_v32i8(<32 x i8> %x, <32 x i8> %amt) nounwind {
 ;
 ; AVX512VL-LABEL: var_funnnel_v32i8:
 ; AVX512VL:       # %bb.0:
-; AVX512VL-NEXT:    vpsrlw $4, %ymm0, %ymm2
-; AVX512VL-NEXT:    vpsllw $4, %ymm0, %ymm3
-; AVX512VL-NEXT:    vpternlogd {{.*#+}} ymm3 = ymm2 ^ (m32bcst & (ymm3 ^ ymm2))
+; AVX512VL-NEXT:    vpsllw $4, %ymm0, %ymm2
+; AVX512VL-NEXT:    vpsrlw $4, %ymm0, %ymm3
+; AVX512VL-NEXT:    vpternlogd {{.*#+}} ymm3 = ymm3 ^ (m32bcst & (ymm3 ^ ymm2))
 ; AVX512VL-NEXT:    vpsllw $5, %ymm1, %ymm1
 ; AVX512VL-NEXT:    vpblendvb %ymm1, %ymm3, %ymm0, %ymm0
 ; AVX512VL-NEXT:    vpsrlw $6, %ymm0, %ymm2
diff --git a/llvm/test/CodeGen/X86/vector-fshl-rot-512.ll b/llvm/test/CodeGen/X86/vector-fshl-rot-512.ll
index d53fb452b8a35..b6aa49873e99d 100644
--- a/llvm/test/CodeGen/X86/vector-fshl-rot-512.ll
+++ b/llvm/test/CodeGen/X86/vector-fshl-rot-512.ll
@@ -174,44 +174,44 @@ define <64 x i8> @var_funnnel_v64i8(<64 x i8> %x, <64 x i8> %amt) nounwind {
 ;
 ; AVX512VL-LABEL: var_funnnel_v64i8:
 ; AVX512VL:       # %bb.0:
-; AVX512VL-NEXT:    vpbroadcastd {{.*#+}} ymm2 = [4042322160,4042322160,4042322160,4042322160,4042322160,4042322160,4042322160,4042322160]
-; AVX512VL-NEXT:    vextracti64x4 $1, %zmm0, %ymm3
-; AVX512VL-NEXT:    vpsrlw $4, %ymm3, %ymm4
-; AVX512VL-NEXT:    vpsllw $4, %ymm3, %ymm5
-; AVX512VL-NEXT:    vpternlogd {{.*#+}} ymm5 = ymm4 ^ (ymm2 & (ymm5 ^ ymm4))
-; AVX512VL-NEXT:    vextracti64x4 $1, %zmm1, %ymm4
-; AVX512VL-NEXT:    vpsllw $5, %ymm4, %ymm4
-; AVX512VL-NEXT:    vpblendvb %ymm4, %ymm5, %ymm3, %ymm3
-; AVX512VL-NEXT:    vpsrlw $6, %ymm3, %ymm5
-; AVX512VL-NEXT:    vpaddb %ymm3, %ymm3, %ymm6
+; AVX512VL-NEXT:    vextracti64x4 $1, %zmm0, %ymm2
+; AVX512VL-NEXT:    vpsrlw $4, %ymm2, %ymm3
+; AVX512VL-NEXT:    vpsllw $4, %ymm2, %ymm4
+; AVX512VL-NEXT:    vpbroadcastd {{.*#+}} ymm5 = [4042322160,4042322160,4042322160,4042322160,4042322160,4042322160,4042322160,4042322160]
+; AVX512VL-NEXT:    vpternlogd {{.*#+}} ymm4 = ymm3 ^ (ymm5 & (ymm4 ^ ymm3))
+; AVX512VL-NEXT:    vextracti64x4 $1, %zmm1, %ymm3
+; AVX512VL-NEXT:    vpsllw $5, %ymm3, %ymm3
+; AVX512VL-NEXT:    vpblendvb %ymm3, %ymm4, %ymm2, %ymm2
+; AVX512VL-NEXT:    vpsrlw $6, %ymm2, %ymm4
+; AVX512VL-NEXT:    vpaddb %ymm2, %ymm2, %ymm6
 ; AVX512VL-NEXT:    vpaddb %ymm6, %ymm6, %ymm6
 ; AVX512VL-NEXT:    vpbroadcastd {{.*#+}} ymm7 = [3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3]
-; AVX512VL-NEXT:    vpternlogq {{.*#+}} ymm6 = ymm6 | (ymm5 & ymm7)
-; AVX512VL-NEXT:    vpaddb %ymm4, %ymm4, %ymm4
-; AVX512VL-NEXT:    vpblendvb %ymm4, %ymm6, %ymm3, %ymm3
-; AVX512VL-NEXT:    vpsrlw $7, %ymm3, %ymm5
-; AVX512VL-NEXT:    vpaddb %ymm3, %ymm3, %ymm6
+; AVX512VL-NEXT:    vpternlogq {{.*#+}} ymm6 = ymm6 | (ymm4 & ymm7)
+; AVX512VL-NEXT:    vpaddb %ymm3, %ymm3, %ymm3
+; AVX512VL-NEXT:    vpblendvb %ymm3, %ymm6, %ymm2, %ymm2
+; AVX512VL-NEXT:    vpsrlw $7, %ymm2, %ymm4
+; AVX512VL-NEXT:    vpaddb %ymm2, %ymm2, %ymm6
 ; AVX512VL-NEXT:    vpbroadcastd {{.*#+}} ymm8 = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
-; AVX512VL-NEXT:    vpternlogq {{.*#+}} ymm6 = ymm6 | (ymm5 & ymm8)
-; AVX512VL-NEXT:    vpaddb %ymm4, %ymm4, %ymm4
-; AVX512VL-NEXT:    vpblendvb %ymm4, %ymm6, %ymm3, %ymm3
-; AVX512VL-NEXT:    vpsrlw $4, %ymm0, %ymm4
-; AVX512VL-NEXT:    vpsllw $4, %ymm0, %ymm5
-; AVX512VL-NEXT:    vpternlogd {{.*#+}} ymm5 = ymm4 ^ (ymm2 & (ymm5 ^ ymm4))
+; AVX512VL-NEXT:    vpternlogq {{.*#+}} ymm6 = ymm6 | (ymm4 & ymm8)
+; AVX512VL-NEXT:    vpaddb %ymm3, %ymm3, %ymm3
+; AVX512VL-NEXT:    vpblendvb %ymm3, %ymm6, %ymm2, %ymm2
+; AVX512VL-NEXT:    vpsrlw $4, %ymm0, %ymm3
+; AVX512VL-NEXT:    vpsllw $4, %ymm0, %ymm4
+; AVX512VL-NEXT:    vpternlogd {{.*#+}} ymm4 = ymm3 ^ (ymm5 & (ymm4 ^ ymm3))
 ; AVX512VL-NEXT:    vpsllw $5, %ymm1, %ymm1
-; AVX512VL-NEXT:    vpblendvb %ymm1, %ymm5, %ymm0, %ymm0
-; AVX512VL-NEXT:    vpsrlw $6, %ymm0, %ymm2
+; AVX512VL-NEXT:    vpblendvb %ymm1, %ymm4, %ymm0, %ymm0
+; AVX512VL-NEXT:    vpsrlw $6, %ymm0, %ymm3
 ; AVX512VL-NEXT:    vpaddb %ymm0, %ymm0, %ymm4
 ; AVX512VL-NEXT:    vpaddb %ymm4, %ymm4, %ymm4
-; AVX512VL-NEXT:    vpternlogq {{.*#+}} ymm4 = ymm4 | (ymm2 & ymm7)
+; AVX512VL-NEXT:    vpternlogq {{.*#+}} ymm4 = ymm4 | (ymm3 & ymm7)
 ; AVX512VL-NEXT:    vpaddb %ymm1, %ymm1, %ymm1
 ; AVX512VL-NEXT:    vpblendvb %ymm1, %ymm4, %ymm0, %ymm0
-; AVX512VL-NEXT:    vpsrlw $7, %ymm0, %ymm2
+; AVX512VL-NEXT:    vpsrlw $7, %ymm0, %ymm3
 ; AVX512VL-NEXT:    vpaddb %ymm0, %ymm0, %ymm4
-; AVX512VL-NEXT:    vpternlogq {{.*#+}} ymm4 = ymm4 | (ymm2 & ymm8)
+; AVX512VL-NEXT:    vpternlogq {{.*#+}} ymm4 = ymm4 | (ymm3 & ymm8)
 ; AVX512VL-NEXT:    vpaddb %ymm1, %ymm1, %ymm1
 ; AVX512VL-NEXT:    vpblendvb %ymm1, %ymm4, %ymm0, %ymm0
-; AVX512VL-NEXT:    vinserti64x4 $1, %ymm3, %zmm0, %zmm0
+; AVX512VL-NEXT:    vinserti64x4 $1, %ymm2, %zmm0, %zmm0
 ; AVX512VL-NEXT:    retq
 ;
 ; AVX512BW-LABEL: var_funnnel_v64i8:
diff --git a/llvm/test/CodeGen/X86/vector-fshr-128.ll b/llvm/test/CodeGen/X86/vector-fshr-128.ll
index f805c45ed15d3..7bb5f1e629f31 100644
--- a/llvm/test/CodeGen/X86/vector-fshr-128.ll
+++ b/llvm/test/CodeGen/X86/vector-fshr-128.ll
@@ -796,110 +796,114 @@ define <16 x i8> @var_funnnel_v16i8(<16 x i8> %x, <16 x i8> %y, <16 x i8> %amt)
 ;
 ; SSE41-LABEL: var_funnnel_v16i8:
 ; SSE41:       # %bb.0:
-; SSE41-NEXT:    movdqa %xmm0, %xmm3
-; SSE41-NEXT:    paddb %xmm0, %xmm3
-; SSE41-NEXT:    movdqa %xmm3, %xmm5
-; SSE41-NEXT:    psllw $4, %xmm5
-; SSE41-NEXT:    pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm5
-; SSE41-NEXT:    movdqa {{.*#+}} xmm4 = [7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7]
-; SSE41-NEXT:    movdqa %xmm2, %xmm0
-; SSE41-NEXT:    pandn %xmm4, %xmm0
+; SSE41-NEXT:    movdqa %xmm2, %xmm3
+; SSE41-NEXT:    movdqa %xmm0, %xmm2
+; SSE41-NEXT:    movdqa {{.*#+}} xmm5 = [7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7]
+; SSE41-NEXT:    movdqa %xmm3, %xmm0
+; SSE41-NEXT:    pand %xmm5, %xmm0
 ; SSE41-NEXT:    psllw $5, %xmm0
-; SSE41-NEXT:    pblendvb %xmm0, %xmm5, %xmm3
-; SSE41-NEXT:    movdqa %xmm3, %xmm5
-; SSE41-NEXT:    paddb %xmm3, %xmm5
-; SSE41-NEXT:    paddb %xmm5, %xmm5
-; SSE41-NEXT:    paddb %xmm0, %xmm0
-; SSE41-NEXT:    pblendvb %xmm0, %xmm5, %xmm3
-; SSE41-NEXT:    movdqa %xmm3, %xmm5
-; SSE41-NEXT:    paddb %xmm3, %xmm5
-; SSE41-NEXT:    paddb %xmm0, %xmm0
-; SSE41-NEXT:    pblendvb %xmm0, %xmm5, %xmm3
-; SSE41-NEXT:    pand %xmm2, %xmm4
-; SSE41-NEXT:    psllw $5, %xmm4
-; SSE41-NEXT:    movdqa %xmm4, %xmm2
-; SSE41-NEXT:    paddb %xmm4, %xmm2
-; SSE41-NEXT:    movdqa %xmm1, %xmm5
-; SSE41-NEXT:    psrlw $4, %xmm5
-; SSE41-NEXT:    pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm5
+; SSE41-NEXT:    movdqa %xmm0, %xmm4
+; SSE41-NEXT:    paddb %xmm0, %xmm4
+; SSE41-NEXT:    movdqa %xmm1, %xmm6
+; SSE41-NEXT:    psrlw $4, %xmm6
+; SSE41-NEXT:    pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm6
+; SSE41-NEXT:    pblendvb %xmm0, %xmm6, %xmm1
+; SSE41-NEXT:    movdqa %xmm1, %xmm6
+; SSE41-NEXT:    psrlw $2, %xmm6
+; SSE41-NEXT:    pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm6
 ; SSE41-NEXT:    movdqa %xmm4, %xmm0
-; SSE41-NEXT:    pblendvb %xmm0, %xmm5, %xmm1
-; SSE41-NEXT:    movdqa %xmm1, %xmm4
-; SSE41-NEXT:    psrlw $2, %xmm4
-; SSE41-NEXT:    pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm4
-; SSE41-NEXT:    movdqa %xmm2, %xmm0
-; SSE41-NEXT:    pblendvb %xmm0, %xmm4, %xmm1
-; SSE41-NEXT:    movdqa %xmm1, %xmm4
-; SSE41-NEXT:    psrlw $1, %xmm4
-; SSE41-NEXT:    pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm4
+; SSE41-NEXT:    pblendvb %xmm0, %xmm6, %xmm1
+; SSE41-NEXT:    movdqa %xmm1, %xmm6
+; SSE41-NEXT:    psrlw $1, %xmm6
+; SSE41-NEXT:    pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm6
+; SSE41-NEXT:    paddb %xmm4, %xmm4
+; SSE41-NEXT:    movdqa %xmm4, %xmm0
+; SSE41-NEXT:    pblendvb %xmm0, %xmm6, %xmm1
+; SSE41-NEXT:    pandn %xmm5, %xmm3
+; SSE41-NEXT:    psllw $5, %xmm3
+; SSE41-NEXT:    movdqa %xmm3, %xmm4
+; SSE41-NEXT:    paddb %xmm3, %xmm4
 ; SSE41-NEXT:    paddb %xmm2, %xmm2
-; SSE41-NEXT:    movdqa %xmm2, %xmm0
-; SSE41-NEXT:    pblendvb %xmm0, %xmm4, %xmm1
-; SSE41-NEXT:    por %xmm1, %xmm3
+; SSE41-NEXT:    movdqa %xmm2, %xmm5
+; SSE41-NEXT:    psllw $4, %xmm5
+; SSE41-NEXT:    pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm5
 ; SSE41-NEXT:    movdqa %xmm3, %xmm0
+; SSE41-NEXT:    pblendvb %xmm0, %xmm5, %xmm2
+; SSE41-NEXT:    movdqa %xmm2, %xmm3
+; SSE41-NEXT:    paddb %xmm2, %xmm3
+; SSE41-NEXT:    paddb %xmm3, %xmm3
+; SSE41-NEXT:    movdqa %xmm4, %xmm0
+; SSE41-NEXT:    pblendvb %xmm0, %xmm3, %xmm2
+; SSE41-NEXT:    movdqa %xmm2, %xmm3
+; SSE41-NEXT:    paddb %xmm2, %xmm3
+; SSE41-NEXT:    paddb %xmm4, %xmm4
+; SSE41-NEXT:    movdqa %xmm4, %xmm0
+; SSE41-NEXT:    pblendvb %xmm0, %xmm3, %xmm2
+; SSE41-NEXT:    por %xmm1, %xmm2
+; SSE41-NEXT:    movdqa %xmm2, %xmm0
 ; SSE41-NEXT:    retq
 ;
 ; AVX1-LABEL: var_funnnel_v16i8:
 ; AVX1:       # %bb.0:
-; AVX1-NEXT:    vpaddb %xmm0, %xmm0, %xmm0
-; AVX1-NEXT:    vpsllw $4, %xmm0, %xmm3
-; AVX1-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm3, %xmm3
-; AVX1-NEXT:    vbroadcastss {{.*#+}} xmm4 = [7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7]
-; AVX1-NEXT:    vpandn %xmm4, %xmm2, %xmm5
-; AVX1-NEXT:    vpsllw $5, %xmm5, %xmm5
-; AVX1-NEXT:    vpblendvb %xmm5, %xmm3, %xmm0, %xmm0
-; AVX1-NEXT:    vpaddb %xmm0, %xmm0, %xmm3
-; AVX1-NEXT:    vpaddb %xmm3, %xmm3, %xmm3
-; AVX1-NEXT:    vpaddb %xmm5, %xmm5, %xmm5
-; AVX1-NEXT:    vpblendvb %xmm5, %xmm3, %xmm0, %xmm0
-; AVX1-NEXT:    vpaddb %xmm0, %xmm0, %xmm3
+; AVX1-NEXT:    vbroadcastss {{.*#+}} xmm3 = [7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7]
+; AVX1-NEXT:    vpand %xmm3, %xmm2, %xmm4
+; AVX1-NEXT:    vpsllw $5, %xmm4, %xmm4
+; AVX1-NEXT:    vpaddb %xmm4, %xmm4, %xmm5
+; AVX1-NEXT:    vpsrlw $4, %xmm1, %xmm6
+; AVX1-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm6, %xmm6
+; AVX1-NEXT:    vpblendvb %xmm4, %xmm6, %xmm1, %xmm1
+; AVX1-NEXT:    vpsrlw $2, %xmm1, %xmm4
+; AVX1-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm4, %xmm4
+; AVX1-NEXT:    vpblendvb %xmm5, %xmm4, %xmm1, %xmm1
+; AVX1-NEXT:    vpsrlw $1, %xmm1, %xmm4
+; AVX1-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm4, %xmm4
 ; AVX1-NEXT:    vpaddb %xmm5, %xmm5, %xmm5
-; AVX1-NEXT:    vpblendvb %xmm5, %xmm3, %xmm0, %xmm0
-; AVX1-NEXT:    vpand %xmm4, %xmm2, %xmm2
+; AVX1-NEXT:    vpblendvb %xmm5, %xmm4, %xmm1, %xmm1
+; AVX1-NEXT:    vpandn %xmm3, %xmm2, %xmm2
 ; AVX1-NEXT:    vpsllw $5, %xmm2, %xmm2
 ; AVX1-NEXT:    vpaddb %xmm2, %xmm2, %xmm3
-; AVX1-NEXT:    vpsrlw $4, %xmm1, %xmm4
+; AVX1-NEXT:    vpaddb %xmm0, %xmm0, %xmm0
+; AVX1-NEXT:    vpsllw $4, %xmm0, %xmm4
 ; AVX1-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm4, %xmm4
-; AVX1-NEXT:    vpblendvb %xmm2, %xmm4, %xmm1, %xmm1
-; AVX1-NEXT:    vpsrlw $2, %xmm1, %xmm2
-; AVX1-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2, %xmm2
-; AVX1-NEXT:    vpblendvb %xmm3, %xmm2, %xmm1, %xmm1
-; AVX1-NEXT:    vpsrlw $1, %xmm1, %xmm2
-; AVX1-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2, %xmm2
+; AVX1-NEXT:    vpblendvb %xmm2, %xmm4, %xmm0, %xmm0
+; AVX1-NEXT:    vpaddb %xmm0, %xmm0, %xmm2
+; AVX1-NEXT:    vpaddb %xmm2, %xmm2, %xmm2
+; AVX1-NEXT:    vpblendvb %xmm3, %xmm2, %xmm0, %xmm0
+; AVX1-NEXT:    vpaddb %xmm0, %xmm0, %xmm2
 ; AVX1-NEXT:    vpaddb %xmm3, %xmm3, %xmm3
-; AVX1-NEXT:    vpblendvb %xmm3, %xmm2, %xmm1, %xmm1
+; AVX1-NEXT:    vpblendvb %xmm3, %xmm2, %xmm0, %xmm0
 ; AVX1-NEXT:    vpor %xmm1, %xmm0, %xmm0
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: var_funnnel_v16i8:
 ; AVX2:       # %bb.0:
-; AVX2-NEXT:    vpaddb %xmm0, %xmm0, %xmm0
-; AVX2-NEXT:    vpsllw $4, %xmm0, %xmm3
-; AVX2-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm3, %xmm3
-; AVX2-NEXT:    vpbroadcastb {{.*#+}} xmm4 = [7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7]
-; AVX2-NEXT:    vpandn %xmm4, %xmm2, %xmm5
-; AVX2-NEXT:    vpsllw $5, %xmm5, %xmm5
-; AVX2-NEXT:    vpblendvb %xmm5, %xmm3, %xmm0, %xmm0
-; AVX2-NEXT:    vpaddb %xmm0, %xmm0, %xmm3
-; AVX2-NEXT:    vpaddb %xmm3, %xmm3, %xmm3
-; AVX2-NEXT:    vpaddb %xmm5, %xmm5, %xmm5
-; AVX2-NEXT:    vpblendvb %xmm5, %xmm3, %xmm0, %xmm0
-; AVX2-NEXT:    vpaddb %xmm0, %xmm0, %xmm3
+; AVX2-NEXT:    vpbroadcastb {{.*#+}} xmm3 = [7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7]
+; AVX2-NEXT:    vpand %xmm3, %xmm2, %xmm4
+; AVX2-NEXT:    vpsllw $5, %xmm4, %xmm4
+; AVX2-NEXT:    vpaddb %xmm4, %xmm4, %xmm5
+; AVX2-NEXT:    vpsrlw $4, %xmm1, %xmm6
+; AVX2-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm6, %xmm6
+; AVX2-NEXT:    vpblendvb %xmm4, %xmm6, %xmm1, %xmm1
+; AVX2-NEXT:    vpsrlw $2, %xmm1, %xmm4
+; AVX2-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm4, %xmm4
+; AVX2-NEXT:    vpblendvb %xmm5, %xmm4, %xmm1, %xmm1
+; AVX2-NEXT:    vpsrlw $1, %xmm1, %xmm4
+; AVX2-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm4, %xmm4
 ; AVX2-NEXT:    vpaddb %xmm5, %xmm5, %xmm5
-; AVX2-NEXT:    vpblendvb %xmm5, %xmm3, %xmm0, %xmm0
-; AVX2-NEXT:    vpand %xmm4, %xmm2, %xmm2
+; AVX2-NEXT:    vpblendvb %xmm5, %xmm4, %xmm1, %xmm1
+; AVX2-NEXT:    vpandn %xmm3, %xmm2, %xmm2
 ; AVX2-NEXT:    vpsllw $5, %xmm2, %xmm2
 ; AVX2-NEXT:    vpaddb %xmm2, %xmm2, %xmm3
-; AVX2-NEXT:    vpsrlw $4, %xmm1, %xmm4
+; AVX2-NEXT:    vpaddb %xmm0, %xmm0, %xmm0
+; AVX2-NEXT:    vpsllw $4, %xmm0, %xmm4
 ; AVX2-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm4, %xmm4
-; AVX2-NEXT:    vpblendvb %xmm2, %xmm4, %xmm1, %xmm1
-; AVX2-NEXT:    vpsrlw $2, %xmm1, %xmm2
-; AVX2-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2, %xmm2
-; AVX2-NEXT:    vpblendvb %xmm3, %xmm2, %xmm1, %xmm1
-; AVX2-NEXT:    vpsrlw $1, %xmm1, %xmm2
-; AVX2-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2, %xmm2
+; AVX2-NEXT:    vpblendvb %xmm2, %xmm4, %xmm0, %xmm0
+; AVX2-NEXT:    vpaddb %xmm0, %xmm0, %xmm2
+; AVX2-NEXT:    vpaddb %xmm2, %xmm2, %xmm2
+; AVX2-NEXT:    vpblendvb %xmm3, %xmm2, %xmm0, %xmm0
+; AVX2-NEXT:    vpaddb %xmm0, %xmm0, %xmm2
 ; AVX2-NEXT:    vpaddb %xmm3, %xmm3, %xmm3
-; AVX2-NEXT:    vpblendvb %xmm3, %xmm2, %xmm1, %xmm1
+; AVX2-NEXT:    vpblendvb %xmm3, %xmm2, %xmm0, %xmm0
 ; AVX2-NEXT:    vpor %xmm1, %xmm0, %xmm0
 ; AVX2-NEXT:    retq
 ;
diff --git a/llvm/test/CodeGen/X86/vector-fshr-256.ll b/llvm/test/CodeGen/X86/vector-fshr-256.ll
index 566311e5e8b39..425441b738399 100644
--- a/llvm/test/CodeGen/X86/vector-fshr-256.ll
+++ b/llvm/test/CodeGen/X86/vector-fshr-256.ll
@@ -577,97 +577,97 @@ define <32 x i8> @var_funnnel_v32i8(<32 x i8> %x, <32 x i8> %y, <32 x i8> %amt)
 ;
 ; AVX2-LABEL: var_funnnel_v32i8:
 ; AVX2:       # %bb.0:
-; AVX2-NEXT:    vpaddb %ymm0, %ymm0, %ymm0
-; AVX2-NEXT:    vpsllw $4, %ymm0, %ymm3
-; AVX2-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm3, %ymm3
-; AVX2-NEXT:    vpbroadcastb {{.*#+}} ymm4 = [7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7]
-; AVX2-NEXT:    vpandn %ymm4, %ymm2, %ymm5
-; AVX2-NEXT:    vpsllw $5, %ymm5, %ymm5
-; AVX2-NEXT:    vpblendvb %ymm5, %ymm3, %ymm0, %ymm0
-; AVX2-NEXT:    vpaddb %ymm0, %ymm0, %ymm3
-; AVX2-NEXT:    vpaddb %ymm3, %ymm3, %ymm3
-; AVX2-NEXT:    vpaddb %ymm5, %ymm5, %ymm5
-; AVX2-NEXT:    vpblendvb %ymm5, %ymm3, %ymm0, %ymm0
-; AVX2-NEXT:    vpaddb %ymm0, %ymm0, %ymm3
+; AVX2-NEXT:    vpbroadcastb {{.*#+}} ymm3 = [7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7]
+; AVX2-NEXT:    vpand %ymm3, %ymm2, %ymm4
+; AVX2-NEXT:    vpsllw $5, %ymm4, %ymm4
+; AVX2-NEXT:    vpaddb %ymm4, %ymm4, %ymm5
+; AVX2-NEXT:    vpsrlw $4, %ymm1, %ymm6
+; AVX2-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm6, %ymm6
+; AVX2-NEXT:    vpblendvb %ymm4, %ymm6, %ymm1, %ymm1
+; AVX2-NEXT:    vpsrlw $2, %ymm1, %ymm4
+; AVX2-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm4, %ymm4
+; AVX2-NEXT:    vpblendvb %ymm5, %ymm4, %ymm1, %ymm1
+; AVX2-NEXT:    vpsrlw $1, %ymm1, %ymm4
+; AVX2-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm4, %ymm4
 ; AVX2-NEXT:    vpaddb %ymm5, %ymm5, %ymm5
-; AVX2-NEXT:    vpblendvb %ymm5, %ymm3, %ymm0, %ymm0
-; AVX2-NEXT:    vpand %ymm4, %ymm2, %ymm2
+; AVX2-NEXT:    vpblendvb %ymm5, %ymm4, %ymm1, %ymm1
+; AVX2-NEXT:    vpandn %ymm3, %ymm2, %ymm2
 ; AVX2-NEXT:    vpsllw $5, %ymm2, %ymm2
 ; AVX2-NEXT:    vpaddb %ymm2, %ymm2, %ymm3
-; AVX2-NEXT:    vpsrlw $4, %ymm1, %ymm4
+; AVX2-NEXT:    vpaddb %ymm0, %ymm0, %ymm0
+; AVX2-NEXT:    vpsllw $4, %ymm0, %ymm4
 ; AVX2-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm4, %ymm4
-; AVX2-NEXT:    vpblendvb %ymm2, %ymm4, %ymm1, %ymm1
-; AVX2-NEXT:    vpsrlw $2, %ymm1, %ymm2
-; AVX2-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm2, %ymm2
-; AVX2-NEXT:    vpblendvb %ymm3, %ymm2, %ymm1, %ymm1
-; AVX2-NEXT:    vpsrlw $1, %ymm1, %ymm2
-; AVX2-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm2, %ymm2
+; AVX2-NEXT:    vpblendvb %ymm2, %ymm4, %ymm0, %ymm0
+; AVX2-NEXT:    vpaddb %ymm0, %ymm0, %ymm2
+; AVX2-NEXT:    vpaddb %ymm2, %ymm2, %ymm2
+; AVX2-NEXT:    vpblendvb %ymm3, %ymm2, %ymm0, %ymm0
+; AVX2-NEXT:    vpaddb %ymm0, %ymm0, %ymm2
 ; AVX2-NEXT:    vpaddb %ymm3, %ymm3, %ymm3
-; AVX2-NEXT:    vpblendvb %ymm3, %ymm2, %ymm1, %ymm1
+; AVX2-NEXT:    vpblendvb %ymm3, %ymm2, %ymm0, %ymm0
 ; AVX2-NEXT:    vpor %ymm1, %ymm0, %ymm0
 ; AVX2-NEXT:    retq
 ;
 ; AVX512F-LABEL: var_funnnel_v32i8:
 ; AVX512F:       # %bb.0:
-; AVX512F-NEXT:    vpaddb %ymm0, %ymm0, %ymm0
-; AVX512F-NEXT:    vpsllw $4, %ymm0, %ymm3
-; AVX512F-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm3, %ymm3
-; AVX512F-NEXT:    vpbroadcastb {{.*#+}} ymm4 = [7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7]
-; AVX512F-NEXT:    vpandn %ymm4, %ymm2, %ymm5
-; AVX512F-NEXT:    vpsllw $5, %ymm5, %ymm5
-; AVX512F-NEXT:    vpblendvb %ymm5, %ymm3, %ymm0, %ymm0
-; AVX512F-NEXT:    vpaddb %ymm0, %ymm0, %ymm3
-; AVX512F-NEXT:    vpaddb %ymm3, %ymm3, %ymm3
-; AVX512F-NEXT:    vpaddb %ymm5, %ymm5, %ymm5
-; AVX512F-NEXT:    vpblendvb %ymm5, %ymm3, %ymm0, %ymm0
-; AVX512F-NEXT:    vpaddb %ymm0, %ymm0, %ymm3
+; AVX512F-NEXT:    vpbroadcastb {{.*#+}} ymm3 = [7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7]
+; AVX512F-NEXT:    vpand %ymm3, %ymm2, %ymm4
+; AVX512F-NEXT:    vpsllw $5, %ymm4, %ymm4
+; AVX512F-NEXT:    vpaddb %ymm4, %ymm4, %ymm5
+; AVX512F-NEXT:    vpsrlw $4, %ymm1, %ymm6
+; AVX512F-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm6, %ymm6
+; AVX512F-NEXT:    vpblendvb %ymm4, %ymm6, %ymm1, %ymm1
+; AVX512F-NEXT:    vpsrlw $2, %ymm1, %ymm4
+; AVX512F-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm4, %ymm4
+; AVX512F-NEXT:    vpblendvb %ymm5, %ymm4, %ymm1, %ymm1
+; AVX512F-NEXT:    vpsrlw $1, %ymm1, %ymm4
+; AVX512F-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm4, %ymm4
 ; AVX512F-NEXT:    vpaddb %ymm5, %ymm5, %ymm5
-; AVX512F-NEXT:    vpblendvb %ymm5, %ymm3, %ymm0, %ymm0
-; AVX512F-NEXT:    vpand %ymm4, %ymm2, %ymm2
+; AVX512F-NEXT:    vpblendvb %ymm5, %ymm4, %ymm1, %ymm1
+; AVX512F-NEXT:    vpandn %ymm3, %ymm2, %ymm2
 ; AVX512F-NEXT:    vpsllw $5, %ymm2, %ymm2
 ; AVX512F-NEXT:    vpaddb %ymm2, %ymm2, %ymm3
-; AVX512F-NEXT:    vpsrlw $4, %ymm1, %ymm4
+; AVX512F-NEXT:    vpaddb %ymm0, %ymm0, %ymm0
+; AVX512F-NEXT:    vpsllw $4, %ymm0, %ymm4
 ; AVX512F-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm4, %ymm4
-; AVX512F-NEXT:    vpblendvb %ymm2, %ymm4, %ymm1, %ymm1
-; AVX512F-NEXT:    vpsrlw $2, %ymm1, %ymm2
-; AVX512F-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm2, %ymm2
-; AVX512F-NEXT:    vpblendvb %ymm3, %ymm2, %ymm1, %ymm1
-; AVX512F-NEXT:    vpsrlw $1, %ymm1, %ymm2
-; AVX512F-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm2, %ymm2
+; AVX512F-NEXT:    vpblendvb %ymm2, %ymm4, %ymm0, %ymm0
+; AVX512F-NEXT:    vpaddb %ymm0, %ymm0, %ymm2
+; AVX512F-NEXT:    vpaddb %ymm2, %ymm2, %ymm2
+; AVX512F-NEXT:    vpblendvb %ymm3, %ymm2, %ymm0, %ymm0
+; AVX512F-NEXT:    vpaddb %ymm0, %ymm0, %ymm2
 ; AVX512F-NEXT:    vpaddb %ymm3, %ymm3, %ymm3
-; AVX512F-NEXT:    vpblendvb %ymm3, %ymm2, %ymm1, %ymm1
+; AVX512F-NEXT:    vpblendvb %ymm3, %ymm2, %ymm0, %ymm0
 ; AVX512F-NEXT:    vpor %ymm1, %ymm0, %ymm0
 ; AVX512F-NEXT:    retq
 ;
 ; AVX512VL-LABEL: var_funnnel_v32i8:
 ; AVX512VL:       # %bb.0:
-; AVX512VL-NEXT:    vpaddb %ymm0, %ymm0, %ymm0
-; AVX512VL-NEXT:    vpsllw $4, %ymm0, %ymm3
-; AVX512VL-NEXT:    vpandd {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to8}, %ymm3, %ymm3
-; AVX512VL-NEXT:    vpbroadcastd {{.*#+}} ymm4 = [7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7]
-; AVX512VL-NEXT:    vpandn %ymm4, %ymm2, %ymm5
-; AVX512VL-NEXT:    vpsllw $5, %ymm5, %ymm5
-; AVX512VL-NEXT:    vpblendvb %ymm5, %ymm3, %ymm0, %ymm0
-; AVX512VL-NEXT:    vpaddb %ymm0, %ymm0, %ymm3
-; AVX512VL-NEXT:    vpaddb %ymm3, %ymm3, %ymm3
-; AVX512VL-NEXT:    vpaddb %ymm5, %ymm5, %ymm5
-; AVX512VL-NEXT:    vpblendvb %ymm5, %ymm3, %ymm0, %ymm0
-; AVX512VL-NEXT:    vpaddb %ymm0, %ymm0, %ymm3
+; AVX512VL-NEXT:    vpbroadcastd {{.*#+}} ymm3 = [7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7]
+; AVX512VL-NEXT:    vpand %ymm3, %ymm2, %ymm4
+; AVX512VL-NEXT:    vpsllw $5, %ymm4, %ymm4
+; AVX512VL-NEXT:    vpaddb %ymm4, %ymm4, %ymm5
+; AVX512VL-NEXT:    vpsrlw $4, %ymm1, %ymm6
+; AVX512VL-NEXT:    vpandd {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to8}, %ymm6, %ymm6
+; AVX512VL-NEXT:    vpblendvb %ymm4, %ymm6, %ymm1, %ymm1
+; AVX512VL-NEXT:    vpsrlw $2, %ymm1, %ymm4
+; AVX512VL-NEXT:    vpandd {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to8}, %ymm4, %ymm4
+; AVX512VL-NEXT:    vpblendvb %ymm5, %ymm4, %ymm1, %ymm1
+; AVX512VL-NEXT:    vpsrlw $1, %ymm1, %ymm4
+; AVX512VL-NEXT:    vpandd {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to8}, %ymm4, %ymm4
 ; AVX512VL-NEXT:    vpaddb %ymm5, %ymm5, %ymm5
-; AVX512VL-NEXT:    vpblendvb %ymm5, %ymm3, %ymm0, %ymm0
-; AVX512VL-NEXT:    vpand %ymm4, %ymm2, %ymm2
+; AVX512VL-NEXT:    vpblendvb %ymm5, %ymm4, %ymm1, %ymm1
+; AVX512VL-NEXT:    vpandn %ymm3, %ymm2, %ymm2
 ; AVX512VL-NEXT:    vpsllw $5, %ymm2, %ymm2
 ; AVX512VL-NEXT:    vpaddb %ymm2, %ymm2, %ymm3
-; AVX512VL-NEXT:    vpsrlw $4, %ymm1, %ymm4
+; AVX512VL-NEXT:    vpaddb %ymm0, %ymm0, %ymm0
+; AVX512VL-NEXT:    vpsllw $4, %ymm0, %ymm4
 ; AVX512VL-NEXT:    vpandd {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to8}, %ymm4, %ymm4
-; AVX512VL-NEXT:    vpblendvb %ymm2, %ymm4, %ymm1, %ymm1
-; AVX512VL-NEXT:    vpsrlw $2, %ymm1, %ymm2
-; AVX512VL-NEXT:    vpandd {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to8}, %ymm2, %ymm2
-; AVX512VL-NEXT:    vpblendvb %ymm3, %ymm2, %ymm1, %ymm1
-; AVX512VL-NEXT:    vpsrlw $1, %ymm1, %ymm2
-; AVX512VL-NEXT:    vpandd {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to8}, %ymm2, %ymm2
+; AVX512VL-NEXT:    vpblendvb %ymm2, %ymm4, %ymm0, %ymm0
+; AVX512VL-NEXT:    vpaddb %ymm0, %ymm0, %ymm2
+; AVX512VL-NEXT:    vpaddb %ymm2, %ymm2, %ymm2
+; AVX512VL-NEXT:    vpblendvb %ymm3, %ymm2, %ymm0, %ymm0
+; AVX512VL-NEXT:    vpaddb %ymm0, %ymm0, %ymm2
 ; AVX512VL-NEXT:    vpaddb %ymm3, %ymm3, %ymm3
-; AVX512VL-NEXT:    vpblendvb %ymm3, %ymm2, %ymm1, %ymm1
+; AVX512VL-NEXT:    vpblendvb %ymm3, %ymm2, %ymm0, %ymm0
 ; AVX512VL-NEXT:    vpor %ymm1, %ymm0, %ymm0
 ; AVX512VL-NEXT:    retq
 ;
diff --git a/llvm/test/CodeGen/X86/vector-fshr-rot-128.ll b/llvm/test/CodeGen/X86/vector-fshr-rot-128.ll
index ec336f289be6a..fe4ddb3d2fe14 100644
--- a/llvm/test/CodeGen/X86/vector-fshr-rot-128.ll
+++ b/llvm/test/CodeGen/X86/vector-fshr-rot-128.ll
@@ -551,16 +551,15 @@ define <16 x i8> @var_funnnel_v16i8(<16 x i8> %x, <16 x i8> %amt) nounwind {
 ; SSE41-LABEL: var_funnnel_v16i8:
 ; SSE41:       # %bb.0:
 ; SSE41-NEXT:    movdqa %xmm0, %xmm2
-; SSE41-NEXT:    pxor %xmm0, %xmm0
-; SSE41-NEXT:    psubb %xmm1, %xmm0
-; SSE41-NEXT:    psllw $5, %xmm0
-; SSE41-NEXT:    movdqa %xmm2, %xmm1
-; SSE41-NEXT:    psrlw $4, %xmm1
-; SSE41-NEXT:    pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
+; SSE41-NEXT:    psrlw $4, %xmm0
+; SSE41-NEXT:    pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
 ; SSE41-NEXT:    movdqa %xmm2, %xmm3
 ; SSE41-NEXT:    psllw $4, %xmm3
 ; SSE41-NEXT:    pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm3
-; SSE41-NEXT:    por %xmm1, %xmm3
+; SSE41-NEXT:    por %xmm0, %xmm3
+; SSE41-NEXT:    pxor %xmm0, %xmm0
+; SSE41-NEXT:    psubb %xmm1, %xmm0
+; SSE41-NEXT:    psllw $5, %xmm0
 ; SSE41-NEXT:    pblendvb %xmm0, %xmm3, %xmm2
 ; SSE41-NEXT:    movdqa %xmm2, %xmm1
 ; SSE41-NEXT:    psrlw $6, %xmm1
@@ -584,14 +583,14 @@ define <16 x i8> @var_funnnel_v16i8(<16 x i8> %x, <16 x i8> %amt) nounwind {
 ;
 ; AVX-LABEL: var_funnnel_v16i8:
 ; AVX:       # %bb.0:
-; AVX-NEXT:    vpxor %xmm2, %xmm2, %xmm2
-; AVX-NEXT:    vpsubb %xmm1, %xmm2, %xmm1
-; AVX-NEXT:    vpsllw $5, %xmm1, %xmm1
 ; AVX-NEXT:    vpsrlw $4, %xmm0, %xmm2
 ; AVX-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2, %xmm2
 ; AVX-NEXT:    vpsllw $4, %xmm0, %xmm3
 ; AVX-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm3, %xmm3
 ; AVX-NEXT:    vpor %xmm2, %xmm3, %xmm2
+; AVX-NEXT:    vpxor %xmm3, %xmm3, %xmm3
+; AVX-NEXT:    vpsubb %xmm1, %xmm3, %xmm1
+; AVX-NEXT:    vpsllw $5, %xmm1, %xmm1
 ; AVX-NEXT:    vpblendvb %xmm1, %xmm2, %xmm0, %xmm0
 ; AVX-NEXT:    vpsrlw $6, %xmm0, %xmm2
 ; AVX-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2, %xmm2
diff --git a/llvm/test/CodeGen/X86/vector-fshr-rot-256.ll b/llvm/test/CodeGen/X86/vector-fshr-rot-256.ll
index 84c1ef3d37200..df09f68bda17d 100644
--- a/llvm/test/CodeGen/X86/vector-fshr-rot-256.ll
+++ b/llvm/test/CodeGen/X86/vector-fshr-rot-256.ll
@@ -441,14 +441,14 @@ define <32 x i8> @var_funnnel_v32i8(<32 x i8> %x, <32 x i8> %amt) nounwind {
 ;
 ; AVX2-LABEL: var_funnnel_v32i8:
 ; AVX2:       # %bb.0:
-; AVX2-NEXT:    vpxor %xmm2, %xmm2, %xmm2
-; AVX2-NEXT:    vpsubb %ymm1, %ymm2, %ymm1
-; AVX2-NEXT:    vpsllw $5, %ymm1, %ymm1
 ; AVX2-NEXT:    vpsrlw $4, %ymm0, %ymm2
 ; AVX2-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm2, %ymm2
 ; AVX2-NEXT:    vpsllw $4, %ymm0, %ymm3
 ; AVX2-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm3, %ymm3
 ; AVX2-NEXT:    vpor %ymm2, %ymm3, %ymm2
+; AVX2-NEXT:    vpxor %xmm3, %xmm3, %xmm3
+; AVX2-NEXT:    vpsubb %ymm1, %ymm3, %ymm1
+; AVX2-NEXT:    vpsllw $5, %ymm1, %ymm1
 ; AVX2-NEXT:    vpblendvb %ymm1, %ymm2, %ymm0, %ymm0
 ; AVX2-NEXT:    vpsrlw $6, %ymm0, %ymm2
 ; AVX2-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm2, %ymm2
diff --git a/llvm/test/CodeGen/X86/vector-rotate-256.ll b/llvm/test/CodeGen/X86/vector-rotate-256.ll
index 10ab3dcd239a9..6924a14664837 100644
--- a/llvm/test/CodeGen/X86/vector-rotate-256.ll
+++ b/llvm/test/CodeGen/X86/vector-rotate-256.ll
@@ -408,9 +408,9 @@ define <32 x i8> @var_rotate_v32i8(<32 x i8> %a, <32 x i8> %b) nounwind {
 ;
 ; AVX512VL-LABEL: var_rotate_v32i8:
 ; AVX512VL:       # %bb.0:
-; AVX512VL-NEXT:    vpsrlw $4, %ymm0, %ymm2
-; AVX512VL-NEXT:    vpsllw $4, %ymm0, %ymm3
-; AVX512VL-NEXT:    vpternlogd {{.*#+}} ymm3 = ymm2 ^ (m32bcst & (ymm3 ^ ymm2))
+; AVX512VL-NEXT:    vpsllw $4, %ymm0, %ymm2
+; AVX512VL-NEXT:    vpsrlw $4, %ymm0, %ymm3
+; AVX512VL-NEXT:    vpternlogd {{.*#+}} ymm3 = ymm3 ^ (m32bcst & (ymm3 ^ ymm2))
 ; AVX512VL-NEXT:    vpsllw $5, %ymm1, %ymm1
 ; AVX512VL-NEXT:    vpblendvb %ymm1, %ymm3, %ymm0, %ymm0
 ; AVX512VL-NEXT:    vpsrlw $6, %ymm0, %ymm2
diff --git a/llvm/test/CodeGen/X86/vector-rotate-512.ll b/llvm/test/CodeGen/X86/vector-rotate-512.ll
index 0b9c7deb797fa..cea29414fe808 100644
--- a/llvm/test/CodeGen/X86/vector-rotate-512.ll
+++ b/llvm/test/CodeGen/X86/vector-rotate-512.ll
@@ -178,44 +178,44 @@ define <64 x i8> @var_rotate_v64i8(<64 x i8> %a, <64 x i8> %b) nounwind {
 ;
 ; AVX512VL-LABEL: var_rotate_v64i8:
 ; AVX512VL:       # %bb.0:
-; AVX512VL-NEXT:    vpbroadcastd {{.*#+}} ymm2 = [4042322160,4042322160,4042322160,4042322160,4042322160,4042322160,4042322160,4042322160]
-; AVX512VL-NEXT:    vextracti64x4 $1, %zmm0, %ymm3
-; AVX512VL-NEXT:    vpsrlw $4, %ymm3, %ymm4
-; AVX512VL-NEXT:    vpsllw $4, %ymm3, %ymm5
-; AVX512VL-NEXT:    vpternlogd {{.*#+}} ymm5 = ymm4 ^ (ymm2 & (ymm5 ^ ymm4))
-; AVX512VL-NEXT:    vextracti64x4 $1, %zmm1, %ymm4
-; AVX512VL-NEXT:    vpsllw $5, %ymm4, %ymm4
-; AVX512VL-NEXT:    vpblendvb %ymm4, %ymm5, %ymm3, %ymm3
-; AVX512VL-NEXT:    vpsrlw $6, %ymm3, %ymm5
-; AVX512VL-NEXT:    vpaddb %ymm3, %ymm3, %ymm6
+; AVX512VL-NEXT:    vextracti64x4 $1, %zmm0, %ymm2
+; AVX512VL-NEXT:    vpsrlw $4, %ymm2, %ymm3
+; AVX512VL-NEXT:    vpsllw $4, %ymm2, %ymm4
+; AVX512VL-NEXT:    vpbroadcastd {{.*#+}} ymm5 = [4042322160,4042322160,4042322160,4042322160,4042322160,4042322160,4042322160,4042322160]
+; AVX512VL-NEXT:    vpternlogd {{.*#+}} ymm4 = ymm3 ^ (ymm5 & (ymm4 ^ ymm3))
+; AVX512VL-NEXT:    vextracti64x4 $1, %zmm1, %ymm3
+; AVX512VL-NEXT:    vpsllw $5, %ymm3, %ymm3
+; AVX512VL-NEXT:    vpblendvb %ymm3, %ymm4, %ymm2, %ymm2
+; AVX512VL-NEXT:    vpsrlw $6, %ymm2, %ymm4
+; AVX512VL-NEXT:    vpaddb %ymm2, %ymm2, %ymm6
 ; AVX512VL-NEXT:    vpaddb %ymm6, %ymm6, %ymm6
 ; AVX512VL-NEXT:    vpbroadcastd {{.*#+}} ymm7 = [3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3]
-; AVX512VL-NEXT:    vpternlogq {{.*#+}} ymm6 = ymm6 | (ymm5 & ymm7)
-; AVX512VL-NEXT:    vpaddb %ymm4, %ymm4, %ymm4
-; AVX512VL-NEXT:    vpblendvb %ymm4, %ymm6, %ymm3, %ymm3
-; AVX512VL-NEXT:    vpsrlw $7, %ymm3, %ymm5
-; AVX512VL-NEXT:    vpaddb %ymm3, %ymm3, %ymm6
+; AVX512VL-NEXT:    vpternlogq {{.*#+}} ymm6 = ymm6 | (ymm4 & ymm7)
+; AVX512VL-NEXT:    vpaddb %ymm3, %ymm3, %ymm3
+; AVX512VL-NEXT:    vpblendvb %ymm3, %ymm6, %ymm2, %ymm2
+; AVX512VL-NEXT:    vpsrlw $7, %ymm2, %ymm4
+; AVX512VL-NEXT:    vpaddb %ymm2, %ymm2, %ymm6
 ; AVX512VL-NEXT:    vpbroadcastd {{.*#+}} ymm8 = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
-; AVX512VL-NEXT:    vpternlogq {{.*#+}} ymm6 = ymm6 | (ymm5 & ymm8)
-; AVX512VL-NEXT:    vpaddb %ymm4, %ymm4, %ymm4
-; AVX512VL-NEXT:    vpblendvb %ymm4, %ymm6, %ymm3, %ymm3
-; AVX512VL-NEXT:    vpsrlw $4, %ymm0, %ymm4
-; AVX512VL-NEXT:    vpsllw $4, %ymm0, %ymm5
-; AVX512VL-NEXT:    vpternlogd {{.*#+}} ymm5 = ymm4 ^ (ymm2 & (ymm5 ^ ymm4))
+; AVX512VL-NEXT:    vpternlogq {{.*#+}} ymm6 = ymm6 | (ymm4 & ymm8)
+; AVX512VL-NEXT:    vpaddb %ymm3, %ymm3, %ymm3
+; AVX512VL-NEXT:    vpblendvb %ymm3, %ymm6, %ymm2, %ymm2
+; AVX512VL-NEXT:    vpsrlw $4, %ymm0, %ymm3
+; AVX512VL-NEXT:    vpsllw $4, %ymm0, %ymm4
+; AVX512VL-NEXT:    vpternlogd {{.*#+}} ymm4 = ymm3 ^ (ymm5 & (ymm4 ^ ymm3))
 ; AVX512VL-NEXT:    vpsllw $5, %ymm1, %ymm1
-; AVX512VL-NEXT:    vpblendvb %ymm1, %ymm5, %ymm0, %ymm0
-; AVX512VL-NEXT:    vpsrlw $6, %ymm0, %ymm2
+; AVX512VL-NEXT:    vpblendvb %ymm1, %ymm4, %ymm0, %ymm0
+; AVX512VL-NEXT:    vpsrlw $6, %ymm0, %ymm3
 ; AVX512VL-NEXT:    vpaddb %ymm0, %ymm0, %ymm4
 ; AVX512VL-NEXT:    vpaddb %ymm4, %ymm4, %ymm4
-; AVX512VL-NEXT:    vpternlogq {{.*#+}} ymm4 = ymm4 | (ymm2 & ymm7)
+; AVX512VL-NEXT:    vpternlogq {{.*#+}} ymm4 = ymm4 | (ymm3 & ymm7)
 ; AVX512VL-NEXT:    vpaddb %ymm1, %ymm1, %ymm1
 ; AVX512VL-NEXT:    vpblendvb %ymm1, %ymm4, %ymm0, %ymm0
-; AVX512VL-NEXT:    vpsrlw $7, %ymm0, %ymm2
+; AVX512VL-NEXT:    vpsrlw $7, %ymm0, %ymm3
 ; AVX512VL-NEXT:    vpaddb %ymm0, %ymm0, %ymm4
-; AVX512VL-NEXT:    vpternlogq {{.*#+}} ymm4 = ymm4 | (ymm2 & ymm8)
+; AVX512VL-NEXT:    vpternlogq {{.*#+}} ymm4 = ymm4 | (ymm3 & ymm8)
 ; AVX512VL-NEXT:    vpaddb %ymm1, %ymm1, %ymm1
 ; AVX512VL-NEXT:    vpblendvb %ymm1, %ymm4, %ymm0, %ymm0
-; AVX512VL-NEXT:    vinserti64x4 $1, %ymm3, %zmm0, %zmm0
+; AVX512VL-NEXT:    vinserti64x4 $1, %ymm2, %zmm0, %zmm0
 ; AVX512VL-NEXT:    retq
 ;
 ; AVX512BW-LABEL: var_rotate_v64i8:
diff --git a/llvm/test/CodeGen/X86/vector-shift-lut.ll b/llvm/test/CodeGen/X86/vector-shift-lut.ll
index cb688b12aec95..44c28d67cf677 100644
--- a/llvm/test/CodeGen/X86/vector-shift-lut.ll
+++ b/llvm/test/CodeGen/X86/vector-shift-lut.ll
@@ -1356,35 +1356,9 @@ define <32 x i8> @perlane_ashr_v32i8(<32 x i8> %a) nounwind {
 define <64 x i8> @perlane_shl_v64i8(<64 x i8> %a) nounwind {
 ; SSE2-LABEL: perlane_shl_v64i8:
 ; SSE2:       # %bb.0:
-; SSE2-NEXT:    movdqa %xmm3, %xmm4
 ; SSE2-NEXT:    movdqa %xmm0, %xmm1
-; SSE2-NEXT:    psllw $5, %xmm4
-; SSE2-NEXT:    pxor %xmm5, %xmm5
-; SSE2-NEXT:    pxor %xmm0, %xmm0
-; SSE2-NEXT:    pcmpgtb %xmm4, %xmm0
-; SSE2-NEXT:    movdqa %xmm0, %xmm3
-; SSE2-NEXT:    pandn {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm3
-; SSE2-NEXT:    pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
-; SSE2-NEXT:    por %xmm3, %xmm0
-; SSE2-NEXT:    paddb %xmm4, %xmm4
-; SSE2-NEXT:    pxor %xmm3, %xmm3
-; SSE2-NEXT:    pcmpgtb %xmm4, %xmm3
-; SSE2-NEXT:    movdqa %xmm3, %xmm7
-; SSE2-NEXT:    pandn %xmm0, %xmm7
-; SSE2-NEXT:    psllw $2, %xmm0
-; SSE2-NEXT:    movdqa {{.*#+}} xmm6 = [252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252]
-; SSE2-NEXT:    pand %xmm6, %xmm3
-; SSE2-NEXT:    pand %xmm0, %xmm3
-; SSE2-NEXT:    por %xmm7, %xmm3
-; SSE2-NEXT:    paddb %xmm4, %xmm4
-; SSE2-NEXT:    pxor %xmm0, %xmm0
-; SSE2-NEXT:    pcmpgtb %xmm4, %xmm0
-; SSE2-NEXT:    movdqa %xmm0, %xmm4
-; SSE2-NEXT:    pandn %xmm3, %xmm4
-; SSE2-NEXT:    paddb %xmm3, %xmm3
-; SSE2-NEXT:    pand %xmm0, %xmm3
-; SSE2-NEXT:    por %xmm4, %xmm3
 ; SSE2-NEXT:    psllw $5, %xmm1
+; SSE2-NEXT:    pxor %xmm5, %xmm5
 ; SSE2-NEXT:    pxor %xmm0, %xmm0
 ; SSE2-NEXT:    pcmpgtb %xmm1, %xmm0
 ; SSE2-NEXT:    movdqa %xmm0, %xmm4
@@ -1394,12 +1368,12 @@ define <64 x i8> @perlane_shl_v64i8(<64 x i8> %a) nounwind {
 ; SSE2-NEXT:    paddb %xmm1, %xmm1
 ; SSE2-NEXT:    pxor %xmm4, %xmm4
 ; SSE2-NEXT:    pcmpgtb %xmm1, %xmm4
-; SSE2-NEXT:    movdqa %xmm4, %xmm7
-; SSE2-NEXT:    pandn %xmm0, %xmm7
+; SSE2-NEXT:    movdqa %xmm4, %xmm6
+; SSE2-NEXT:    pandn %xmm0, %xmm6
 ; SSE2-NEXT:    paddb %xmm0, %xmm0
 ; SSE2-NEXT:    paddb %xmm0, %xmm0
 ; SSE2-NEXT:    pand %xmm4, %xmm0
-; SSE2-NEXT:    por %xmm7, %xmm0
+; SSE2-NEXT:    por %xmm6, %xmm0
 ; SSE2-NEXT:    paddb %xmm1, %xmm1
 ; SSE2-NEXT:    pxor %xmm4, %xmm4
 ; SSE2-NEXT:    pcmpgtb %xmm1, %xmm4
@@ -1408,32 +1382,58 @@ define <64 x i8> @perlane_shl_v64i8(<64 x i8> %a) nounwind {
 ; SSE2-NEXT:    paddb %xmm0, %xmm0
 ; SSE2-NEXT:    pand %xmm4, %xmm0
 ; SSE2-NEXT:    por %xmm1, %xmm0
-; SSE2-NEXT:    psllw $5, %xmm2
-; SSE2-NEXT:    pxor %xmm1, %xmm1
-; SSE2-NEXT:    pcmpgtb %xmm2, %xmm1
-; SSE2-NEXT:    movdqa {{.*#+}} xmm7 = [16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16]
-; SSE2-NEXT:    pand %xmm1, %xmm7
-; SSE2-NEXT:    paddb %xmm1, %xmm7
-; SSE2-NEXT:    pcmpeqd %xmm1, %xmm1
-; SSE2-NEXT:    psubb %xmm1, %xmm7
-; SSE2-NEXT:    paddb %xmm2, %xmm2
+; SSE2-NEXT:    psllw $5, %xmm3
+; SSE2-NEXT:    pxor %xmm6, %xmm6
+; SSE2-NEXT:    pcmpgtb %xmm3, %xmm6
+; SSE2-NEXT:    movdqa %xmm6, %xmm1
+; SSE2-NEXT:    pandn {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
+; SSE2-NEXT:    pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm6
+; SSE2-NEXT:    por %xmm1, %xmm6
+; SSE2-NEXT:    paddb %xmm3, %xmm3
 ; SSE2-NEXT:    pxor %xmm4, %xmm4
-; SSE2-NEXT:    pcmpgtb %xmm2, %xmm4
-; SSE2-NEXT:    movdqa %xmm4, %xmm1
-; SSE2-NEXT:    pandn %xmm7, %xmm1
-; SSE2-NEXT:    psllw $2, %xmm7
+; SSE2-NEXT:    pcmpgtb %xmm3, %xmm4
+; SSE2-NEXT:    movdqa %xmm4, %xmm7
+; SSE2-NEXT:    pandn %xmm6, %xmm7
+; SSE2-NEXT:    psllw $2, %xmm6
+; SSE2-NEXT:    movdqa {{.*#+}} xmm1 = [252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252]
+; SSE2-NEXT:    pand %xmm1, %xmm4
 ; SSE2-NEXT:    pand %xmm6, %xmm4
-; SSE2-NEXT:    pand %xmm7, %xmm4
-; SSE2-NEXT:    por %xmm1, %xmm4
+; SSE2-NEXT:    por %xmm7, %xmm4
+; SSE2-NEXT:    paddb %xmm3, %xmm3
+; SSE2-NEXT:    pxor %xmm6, %xmm6
+; SSE2-NEXT:    pcmpgtb %xmm3, %xmm6
+; SSE2-NEXT:    movdqa %xmm6, %xmm3
+; SSE2-NEXT:    pandn %xmm4, %xmm3
+; SSE2-NEXT:    paddb %xmm4, %xmm4
+; SSE2-NEXT:    pand %xmm6, %xmm4
+; SSE2-NEXT:    por %xmm3, %xmm4
+; SSE2-NEXT:    psllw $5, %xmm2
+; SSE2-NEXT:    pxor %xmm3, %xmm3
+; SSE2-NEXT:    pcmpgtb %xmm2, %xmm3
+; SSE2-NEXT:    movdqa {{.*#+}} xmm6 = [16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16]
+; SSE2-NEXT:    pand %xmm3, %xmm6
+; SSE2-NEXT:    paddb %xmm3, %xmm6
+; SSE2-NEXT:    pcmpeqd %xmm3, %xmm3
+; SSE2-NEXT:    psubb %xmm3, %xmm6
+; SSE2-NEXT:    paddb %xmm2, %xmm2
+; SSE2-NEXT:    pxor %xmm3, %xmm3
+; SSE2-NEXT:    pcmpgtb %xmm2, %xmm3
+; SSE2-NEXT:    movdqa %xmm3, %xmm7
+; SSE2-NEXT:    pandn %xmm6, %xmm7
+; SSE2-NEXT:    psllw $2, %xmm6
+; SSE2-NEXT:    pand %xmm1, %xmm3
+; SSE2-NEXT:    pand %xmm6, %xmm3
+; SSE2-NEXT:    por %xmm7, %xmm3
 ; SSE2-NEXT:    paddb %xmm2, %xmm2
 ; SSE2-NEXT:    pcmpgtb %xmm2, %xmm5
 ; SSE2-NEXT:    movdqa %xmm5, %xmm1
-; SSE2-NEXT:    pandn %xmm4, %xmm1
-; SSE2-NEXT:    paddb %xmm4, %xmm4
-; SSE2-NEXT:    pand %xmm5, %xmm4
-; SSE2-NEXT:    por %xmm1, %xmm4
+; SSE2-NEXT:    pandn %xmm3, %xmm1
+; SSE2-NEXT:    paddb %xmm3, %xmm3
+; SSE2-NEXT:    pand %xmm5, %xmm3
+; SSE2-NEXT:    por %xmm1, %xmm3
 ; SSE2-NEXT:    pxor %xmm1, %xmm1
-; SSE2-NEXT:    movdqa %xmm4, %xmm2
+; SSE2-NEXT:    movdqa %xmm3, %xmm2
+; SSE2-NEXT:    movdqa %xmm4, %xmm3
 ; SSE2-NEXT:    retq
 ;
 ; SSE41-LABEL: perlane_shl_v64i8:
diff --git a/llvm/test/CodeGen/X86/vector-shift-shl-256.ll b/llvm/test/CodeGen/X86/vector-shift-shl-256.ll
index 9a31f37cd6413..6eb853573f371 100644
--- a/llvm/test/CodeGen/X86/vector-shift-shl-256.ll
+++ b/llvm/test/CodeGen/X86/vector-shift-shl-256.ll
@@ -294,32 +294,32 @@ define <16 x i16> @var_shift_v16i16(<16 x i16> %a, <16 x i16> %b) nounwind {
 define <32 x i8> @var_shift_v32i8(<32 x i8> %a, <32 x i8> %b) nounwind {
 ; AVX1-LABEL: var_shift_v32i8:
 ; AVX1:       # %bb.0:
-; AVX1-NEXT:    vbroadcastss {{.*#+}} xmm2 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240]
-; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm3
-; AVX1-NEXT:    vpsllw $4, %xmm3, %xmm4
-; AVX1-NEXT:    vpand %xmm2, %xmm4, %xmm4
+; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm2
+; AVX1-NEXT:    vpsllw $4, %xmm2, %xmm3
+; AVX1-NEXT:    vbroadcastss {{.*#+}} xmm4 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240]
+; AVX1-NEXT:    vpand %xmm4, %xmm3, %xmm3
 ; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm5
 ; AVX1-NEXT:    vpsllw $5, %xmm5, %xmm5
-; AVX1-NEXT:    vpblendvb %xmm5, %xmm4, %xmm3, %xmm3
-; AVX1-NEXT:    vpaddb %xmm3, %xmm3, %xmm4
-; AVX1-NEXT:    vpaddb %xmm4, %xmm4, %xmm4
+; AVX1-NEXT:    vpblendvb %xmm5, %xmm3, %xmm2, %xmm2
+; AVX1-NEXT:    vpaddb %xmm2, %xmm2, %xmm3
+; AVX1-NEXT:    vpaddb %xmm3, %xmm3, %xmm3
 ; AVX1-NEXT:    vpaddb %xmm5, %xmm5, %xmm5
-; AVX1-NEXT:    vpblendvb %xmm5, %xmm4, %xmm3, %xmm3
-; AVX1-NEXT:    vpaddb %xmm3, %xmm3, %xmm4
+; AVX1-NEXT:    vpblendvb %xmm5, %xmm3, %xmm2, %xmm2
+; AVX1-NEXT:    vpaddb %xmm2, %xmm2, %xmm3
 ; AVX1-NEXT:    vpaddb %xmm5, %xmm5, %xmm5
-; AVX1-NEXT:    vpblendvb %xmm5, %xmm4, %xmm3, %xmm3
-; AVX1-NEXT:    vpsllw $4, %xmm0, %xmm4
-; AVX1-NEXT:    vpand %xmm2, %xmm4, %xmm2
+; AVX1-NEXT:    vpblendvb %xmm5, %xmm3, %xmm2, %xmm2
+; AVX1-NEXT:    vpsllw $4, %xmm0, %xmm3
+; AVX1-NEXT:    vpand %xmm4, %xmm3, %xmm3
 ; AVX1-NEXT:    vpsllw $5, %xmm1, %xmm1
-; AVX1-NEXT:    vpblendvb %xmm1, %xmm2, %xmm0, %xmm0
-; AVX1-NEXT:    vpaddb %xmm0, %xmm0, %xmm2
-; AVX1-NEXT:    vpaddb %xmm2, %xmm2, %xmm2
+; AVX1-NEXT:    vpblendvb %xmm1, %xmm3, %xmm0, %xmm0
+; AVX1-NEXT:    vpaddb %xmm0, %xmm0, %xmm3
+; AVX1-NEXT:    vpaddb %xmm3, %xmm3, %xmm3
 ; AVX1-NEXT:    vpaddb %xmm1, %xmm1, %xmm1
-; AVX1-NEXT:    vpblendvb %xmm1, %xmm2, %xmm0, %xmm0
-; AVX1-NEXT:    vpaddb %xmm0, %xmm0, %xmm2
+; AVX1-NEXT:    vpblendvb %xmm1, %xmm3, %xmm0, %xmm0
+; AVX1-NEXT:    vpaddb %xmm0, %xmm0, %xmm3
 ; AVX1-NEXT:    vpaddb %xmm1, %xmm1, %xmm1
-; AVX1-NEXT:    vpblendvb %xmm1, %xmm2, %xmm0, %xmm0
-; AVX1-NEXT:    vinsertf128 $1, %xmm3, %ymm0, %ymm0
+; AVX1-NEXT:    vpblendvb %xmm1, %xmm3, %xmm0, %xmm0
+; AVX1-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: var_shift_v32i8:
@@ -403,32 +403,32 @@ define <32 x i8> @var_shift_v32i8(<32 x i8> %a, <32 x i8> %b) nounwind {
 ;
 ; X86-AVX1-LABEL: var_shift_v32i8:
 ; X86-AVX1:       # %bb.0:
-; X86-AVX1-NEXT:    vbroadcastss {{.*#+}} xmm2 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240]
-; X86-AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm3
-; X86-AVX1-NEXT:    vpsllw $4, %xmm3, %xmm4
-; X86-AVX1-NEXT:    vpand %xmm2, %xmm4, %xmm4
+; X86-AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm2
+; X86-AVX1-NEXT:    vpsllw $4, %xmm2, %xmm3
+; X86-AVX1-NEXT:    vbroadcastss {{.*#+}} xmm4 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240]
+; X86-AVX1-NEXT:    vpand %xmm4, %xmm3, %xmm3
 ; X86-AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm5
 ; X86-AVX1-NEXT:    vpsllw $5, %xmm5, %xmm5
-; X86-AVX1-NEXT:    vpblendvb %xmm5, %xmm4, %xmm3, %xmm3
-; X86-AVX1-NEXT:    vpaddb %xmm3, %xmm3, %xmm4
-; X86-AVX1-NEXT:    vpaddb %xmm4, %xmm4, %xmm4
+; X86-AVX1-NEXT:    vpblendvb %xmm5, %xmm3, %xmm2, %xmm2
+; X86-AVX1-NEXT:    vpaddb %xmm2, %xmm2, %xmm3
+; X86-AVX1-NEXT:    vpaddb %xmm3, %xmm3, %xmm3
 ; X86-AVX1-NEXT:    vpaddb %xmm5, %xmm5, %xmm5
-; X86-AVX1-NEXT:    vpblendvb %xmm5, %xmm4, %xmm3, %xmm3
-; X86-AVX1-NEXT:    vpaddb %xmm3, %xmm3, %xmm4
+; X86-AVX1-NEXT:    vpblendvb %xmm5, %xmm3, %xmm2, %xmm2
+; X86-AVX1-NEXT:    vpaddb %xmm2, %xmm2, %xmm3
 ; X86-AVX1-NEXT:    vpaddb %xmm5, %xmm5, %xmm5
-; X86-AVX1-NEXT:    vpblendvb %xmm5, %xmm4, %xmm3, %xmm3
-; X86-AVX1-NEXT:    vpsllw $4, %xmm0, %xmm4
-; X86-AVX1-NEXT:    vpand %xmm2, %xmm4, %xmm2
+; X86-AVX1-NEXT:    vpblendvb %xmm5, %xmm3, %xmm2, %xmm2
+; X86-AVX1-NEXT:    vpsllw $4, %xmm0, %xmm3
+; X86-AVX1-NEXT:    vpand %xmm4, %xmm3, %xmm3
 ; X86-AVX1-NEXT:    vpsllw $5, %xmm1, %xmm1
-; X86-AVX1-NEXT:    vpblendvb %xmm1, %xmm2, %xmm0, %xmm0
-; X86-AVX1-NEXT:    vpaddb %xmm0, %xmm0, %xmm2
-; X86-AVX1-NEXT:    vpaddb %xmm2, %xmm2, %xmm2
+; X86-AVX1-NEXT:    vpblendvb %xmm1, %xmm3, %xmm0, %xmm0
+; X86-AVX1-NEXT:    vpaddb %xmm0, %xmm0, %xmm3
+; X86-AVX1-NEXT:    vpaddb %xmm3, %xmm3, %xmm3
 ; X86-AVX1-NEXT:    vpaddb %xmm1, %xmm1, %xmm1
-; X86-AVX1-NEXT:    vpblendvb %xmm1, %xmm2, %xmm0, %xmm0
-; X86-AVX1-NEXT:    vpaddb %xmm0, %xmm0, %xmm2
+; X86-AVX1-NEXT:    vpblendvb %xmm1, %xmm3, %xmm0, %xmm0
+; X86-AVX1-NEXT:    vpaddb %xmm0, %xmm0, %xmm3
 ; X86-AVX1-NEXT:    vpaddb %xmm1, %xmm1, %xmm1
-; X86-AVX1-NEXT:    vpblendvb %xmm1, %xmm2, %xmm0, %xmm0
-; X86-AVX1-NEXT:    vinsertf128 $1, %xmm3, %ymm0, %ymm0
+; X86-AVX1-NEXT:    vpblendvb %xmm1, %xmm3, %xmm0, %xmm0
+; X86-AVX1-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
 ; X86-AVX1-NEXT:    retl
 ;
 ; X86-AVX2-LABEL: var_shift_v32i8:
diff --git a/llvm/test/CodeGen/X86/vector-shift-shl-512.ll b/llvm/test/CodeGen/X86/vector-shift-shl-512.ll
index 7f59a33e0ee78..8bdb062b42877 100644
--- a/llvm/test/CodeGen/X86/vector-shift-shl-512.ll
+++ b/llvm/test/CodeGen/X86/vector-shift-shl-512.ll
@@ -51,32 +51,32 @@ define <32 x i16> @var_shift_v32i16(<32 x i16> %a, <32 x i16> %b) nounwind {
 define <64 x i8> @var_shift_v64i8(<64 x i8> %a, <64 x i8> %b) nounwind {
 ; AVX512DQ-LABEL: var_shift_v64i8:
 ; AVX512DQ:       # %bb.0:
-; AVX512DQ-NEXT:    vpbroadcastb {{.*#+}} ymm2 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240]
-; AVX512DQ-NEXT:    vextracti64x4 $1, %zmm0, %ymm3
-; AVX512DQ-NEXT:    vpsllw $4, %ymm3, %ymm4
-; AVX512DQ-NEXT:    vpand %ymm2, %ymm4, %ymm4
+; AVX512DQ-NEXT:    vextracti64x4 $1, %zmm0, %ymm2
+; AVX512DQ-NEXT:    vpsllw $4, %ymm2, %ymm3
+; AVX512DQ-NEXT:    vpbroadcastb {{.*#+}} ymm4 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240]
+; AVX512DQ-NEXT:    vpand %ymm4, %ymm3, %ymm3
 ; AVX512DQ-NEXT:    vextracti64x4 $1, %zmm1, %ymm5
 ; AVX512DQ-NEXT:    vpsllw $5, %ymm5, %ymm5
-; AVX512DQ-NEXT:    vpblendvb %ymm5, %ymm4, %ymm3, %ymm3
-; AVX512DQ-NEXT:    vpaddb %ymm3, %ymm3, %ymm4
-; AVX512DQ-NEXT:    vpaddb %ymm4, %ymm4, %ymm4
+; AVX512DQ-NEXT:    vpblendvb %ymm5, %ymm3, %ymm2, %ymm2
+; AVX512DQ-NEXT:    vpaddb %ymm2, %ymm2, %ymm3
+; AVX512DQ-NEXT:    vpaddb %ymm3, %ymm3, %ymm3
 ; AVX512DQ-NEXT:    vpaddb %ymm5, %ymm5, %ymm5
-; AVX512DQ-NEXT:    vpblendvb %ymm5, %ymm4, %ymm3, %ymm3
-; AVX512DQ-NEXT:    vpaddb %ymm3, %ymm3, %ymm4
+; AVX512DQ-NEXT:    vpblendvb %ymm5, %ymm3, %ymm2, %ymm2
+; AVX512DQ-NEXT:    vpaddb %ymm2, %ymm2, %ymm3
 ; AVX512DQ-NEXT:    vpaddb %ymm5, %ymm5, %ymm5
-; AVX512DQ-NEXT:    vpblendvb %ymm5, %ymm4, %ymm3, %ymm3
-; AVX512DQ-NEXT:    vpsllw $4, %ymm0, %ymm4
-; AVX512DQ-NEXT:    vpand %ymm2, %ymm4, %ymm2
+; AVX512DQ-NEXT:    vpblendvb %ymm5, %ymm3, %ymm2, %ymm2
+; AVX512DQ-NEXT:    vpsllw $4, %ymm0, %ymm3
+; AVX512DQ-NEXT:    vpand %ymm4, %ymm3, %ymm3
 ; AVX512DQ-NEXT:    vpsllw $5, %ymm1, %ymm1
-; AVX512DQ-NEXT:    vpblendvb %ymm1, %ymm2, %ymm0, %ymm0
-; AVX512DQ-NEXT:    vpaddb %ymm0, %ymm0, %ymm2
-; AVX512DQ-NEXT:    vpaddb %ymm2, %ymm2, %ymm2
+; AVX512DQ-NEXT:    vpblendvb %ymm1, %ymm3, %ymm0, %ymm0
+; AVX512DQ-NEXT:    vpaddb %ymm0, %ymm0, %ymm3
+; AVX512DQ-NEXT:    vpaddb %ymm3, %ymm3, %ymm3
 ; AVX512DQ-NEXT:    vpaddb %ymm1, %ymm1, %ymm1
-; AVX512DQ-NEXT:    vpblendvb %ymm1, %ymm2, %ymm0, %ymm0
-; AVX512DQ-NEXT:    vpaddb %ymm0, %ymm0, %ymm2
+; AVX512DQ-NEXT:    vpblendvb %ymm1, %ymm3, %ymm0, %ymm0
+; AVX512DQ-NEXT:    vpaddb %ymm0, %ymm0, %ymm3
 ; AVX512DQ-NEXT:    vpaddb %ymm1, %ymm1, %ymm1
-; AVX512DQ-NEXT:    vpblendvb %ymm1, %ymm2, %ymm0, %ymm0
-; AVX512DQ-NEXT:    vinserti64x4 $1, %ymm3, %zmm0, %zmm0
+; AVX512DQ-NEXT:    vpblendvb %ymm1, %ymm3, %ymm0, %ymm0
+; AVX512DQ-NEXT:    vinserti64x4 $1, %ymm2, %zmm0, %zmm0
 ; AVX512DQ-NEXT:    retq
 ;
 ; AVX512BW-LABEL: var_shift_v64i8:



More information about the llvm-commits mailing list